]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: move modesetting core code under display/
authorJani Nikula <jani.nikula@intel.com>
Thu, 13 Jun 2019 08:44:16 +0000 (11:44 +0300)
committerJani Nikula <jani.nikula@intel.com>
Mon, 17 Jun 2019 08:48:32 +0000 (11:48 +0300)
Now that we have a new subdirectory for display code, continue by moving
modesetting core code.

display/intel_frontbuffer.h sticks out like a sore thumb, otherwise this
is, again, a surprisingly clean operation.

v2:
- don't move intel_sideband.[ch] (Ville)
- use tabs for Makefile file lists and sort them

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190613084416.6794-3-jani.nikula@intel.com
132 files changed:
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/Makefile.header-test
drivers/gpu/drm/i915/display/Makefile.header-test
drivers/gpu/drm/i915/display/intel_acpi.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_acpi.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_atomic.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_atomic.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_atomic_plane.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_atomic_plane.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_audio.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_audio.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_bios.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_bios.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_bw.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_bw.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_cdclk.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_cdclk.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_color.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_color.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_combo_phy.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_combo_phy.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_connector.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_connector.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_display.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_display.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_display_power.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_display_power.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_dpio_phy.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_dpio_phy.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_dpll_mgr.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_dpll_mgr.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_fbc.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_fbc.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_fbdev.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_fbdev.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_fifo_underrun.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_fifo_underrun.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_frontbuffer.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_frontbuffer.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_hdcp.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_hdcp.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_hotplug.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_hotplug.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_lpe_audio.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_lpe_audio.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_opregion.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_opregion.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_overlay.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_overlay.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_pipe_crc.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_pipe_crc.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_psr.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_psr.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_quirks.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_quirks.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_sprite.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_sprite.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_vbt_defs.h [new file with mode: 0644]
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_acpi.c [deleted file]
drivers/gpu/drm/i915/intel_acpi.h [deleted file]
drivers/gpu/drm/i915/intel_atomic.c [deleted file]
drivers/gpu/drm/i915/intel_atomic.h [deleted file]
drivers/gpu/drm/i915/intel_atomic_plane.c [deleted file]
drivers/gpu/drm/i915/intel_atomic_plane.h [deleted file]
drivers/gpu/drm/i915/intel_audio.c [deleted file]
drivers/gpu/drm/i915/intel_audio.h [deleted file]
drivers/gpu/drm/i915/intel_bios.c [deleted file]
drivers/gpu/drm/i915/intel_bios.h [deleted file]
drivers/gpu/drm/i915/intel_bw.c [deleted file]
drivers/gpu/drm/i915/intel_bw.h [deleted file]
drivers/gpu/drm/i915/intel_cdclk.c [deleted file]
drivers/gpu/drm/i915/intel_cdclk.h [deleted file]
drivers/gpu/drm/i915/intel_color.c [deleted file]
drivers/gpu/drm/i915/intel_color.h [deleted file]
drivers/gpu/drm/i915/intel_combo_phy.c [deleted file]
drivers/gpu/drm/i915/intel_combo_phy.h [deleted file]
drivers/gpu/drm/i915/intel_connector.c [deleted file]
drivers/gpu/drm/i915/intel_connector.h [deleted file]
drivers/gpu/drm/i915/intel_device_info.h
drivers/gpu/drm/i915/intel_display.c [deleted file]
drivers/gpu/drm/i915/intel_display.h [deleted file]
drivers/gpu/drm/i915/intel_display_power.c [deleted file]
drivers/gpu/drm/i915/intel_display_power.h [deleted file]
drivers/gpu/drm/i915/intel_dpio_phy.c [deleted file]
drivers/gpu/drm/i915/intel_dpio_phy.h [deleted file]
drivers/gpu/drm/i915/intel_dpll_mgr.c [deleted file]
drivers/gpu/drm/i915/intel_dpll_mgr.h [deleted file]
drivers/gpu/drm/i915/intel_fbc.c [deleted file]
drivers/gpu/drm/i915/intel_fbc.h [deleted file]
drivers/gpu/drm/i915/intel_fbdev.c [deleted file]
drivers/gpu/drm/i915/intel_fbdev.h [deleted file]
drivers/gpu/drm/i915/intel_fifo_underrun.c [deleted file]
drivers/gpu/drm/i915/intel_fifo_underrun.h [deleted file]
drivers/gpu/drm/i915/intel_frontbuffer.c [deleted file]
drivers/gpu/drm/i915/intel_frontbuffer.h [deleted file]
drivers/gpu/drm/i915/intel_hdcp.c [deleted file]
drivers/gpu/drm/i915/intel_hdcp.h [deleted file]
drivers/gpu/drm/i915/intel_hotplug.c [deleted file]
drivers/gpu/drm/i915/intel_hotplug.h [deleted file]
drivers/gpu/drm/i915/intel_lpe_audio.c [deleted file]
drivers/gpu/drm/i915/intel_lpe_audio.h [deleted file]
drivers/gpu/drm/i915/intel_opregion.c [deleted file]
drivers/gpu/drm/i915/intel_opregion.h [deleted file]
drivers/gpu/drm/i915/intel_overlay.c [deleted file]
drivers/gpu/drm/i915/intel_overlay.h [deleted file]
drivers/gpu/drm/i915/intel_pipe_crc.c [deleted file]
drivers/gpu/drm/i915/intel_pipe_crc.h [deleted file]
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c [deleted file]
drivers/gpu/drm/i915/intel_psr.h [deleted file]
drivers/gpu/drm/i915/intel_quirks.c [deleted file]
drivers/gpu/drm/i915/intel_quirks.h [deleted file]
drivers/gpu/drm/i915/intel_runtime_pm.h
drivers/gpu/drm/i915/intel_sprite.c [deleted file]
drivers/gpu/drm/i915/intel_sprite.h [deleted file]
drivers/gpu/drm/i915/intel_vbt_defs.h [deleted file]

index 649f286887b7d8b3698057b1f900e08ea6e61e73..91355c2ea8a509fb29e57859d4ed29d182dfd447 100644 (file)
@@ -51,8 +51,9 @@ i915-y += i915_drv.o \
          intel_device_info.o \
          intel_pm.o \
          intel_runtime_pm.o \
-         intel_wakeref.o \
-         intel_uncore.o
+         intel_sideband.o \
+         intel_uncore.o \
+         intel_wakeref.o
 
 # core library code
 i915-y += \
@@ -63,7 +64,7 @@ i915-y += \
        i915_user_extensions.o
 
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
-i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o
 i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
 
 # "Graphics Technology" (aka we talk to the gpu)
@@ -149,34 +150,38 @@ i915-y += intel_renderstate_gen6.o \
          intel_renderstate_gen9.o
 
 # modesetting core code
-i915-y += intel_audio.o \
-         intel_atomic.o \
-         intel_atomic_plane.o \
-         intel_bios.o \
-         intel_bw.o \
-         intel_cdclk.o \
-         intel_color.o \
-         intel_combo_phy.o \
-         intel_connector.o \
-         intel_display.o \
-         intel_display_power.o \
-         intel_dpio_phy.o \
-         intel_dpll_mgr.o \
-         intel_fbc.o \
-         intel_fifo_underrun.o \
-         intel_frontbuffer.o \
-         intel_hdcp.o \
-         intel_hotplug.o \
-         intel_overlay.o \
-         intel_psr.o \
-         intel_quirks.o \
-         intel_sideband.o \
-         intel_sprite.o
-i915-$(CONFIG_ACPI)            += intel_acpi.o intel_opregion.o
-i915-$(CONFIG_DRM_FBDEV_EMULATION)     += intel_fbdev.o
+obj-y += display/
+i915-y += \
+       display/intel_atomic.o \
+       display/intel_atomic_plane.o \
+       display/intel_audio.o \
+       display/intel_bios.o \
+       display/intel_bw.o \
+       display/intel_cdclk.o \
+       display/intel_color.o \
+       display/intel_combo_phy.o \
+       display/intel_connector.o \
+       display/intel_display.o \
+       display/intel_display_power.o \
+       display/intel_dpio_phy.o \
+       display/intel_dpll_mgr.o \
+       display/intel_fbc.o \
+       display/intel_fifo_underrun.o \
+       display/intel_frontbuffer.o \
+       display/intel_hdcp.o \
+       display/intel_hotplug.o \
+       display/intel_lpe_audio.o \
+       display/intel_overlay.o \
+       display/intel_psr.o \
+       display/intel_quirks.o \
+       display/intel_sprite.o
+i915-$(CONFIG_ACPI) += \
+       display/intel_acpi.o \
+       display/intel_opregion.o
+i915-$(CONFIG_DRM_FBDEV_EMULATION) += \
+       display/intel_fbdev.o
 
 # modesetting output/encoder code
-obj-y += display/
 i915-y += \
        display/dvo_ch7017.o \
        display/dvo_ch7xxx.o \
@@ -242,8 +247,5 @@ i915-y += intel_gvt.o
 include $(src)/gvt/Makefile
 endif
 
-# LPE Audio for VLV and CHT
-i915-y += intel_lpe_audio.o
-
 obj-$(CONFIG_DRM_I915) += i915.o
 obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
index 5a04858c9b7bfeb693f78c62120ca040a0232486..e6ba66f787f99217adfe5d82ddbf57ecd7951af6 100644 (file)
@@ -13,35 +13,11 @@ header_test := \
        i915_scheduler_types.h \
        i915_timeline_types.h \
        i915_utils.h \
-       intel_acpi.h \
-       intel_atomic.h \
-       intel_atomic_plane.h \
-       intel_audio.h \
-       intel_bios.h \
-       intel_cdclk.h \
-       intel_color.h \
-       intel_combo_phy.h \
-       intel_connector.h \
        intel_csr.h \
-       intel_display_power.h \
-       intel_dpio_phy.h \
-       intel_dpll_mgr.h \
        intel_drv.h \
-       intel_fbc.h \
-       intel_fbdev.h \
-       intel_fifo_underrun.h \
-       intel_frontbuffer.h \
-       intel_hdcp.h \
-       intel_hotplug.h \
-       intel_lpe_audio.h \
-       intel_overlay.h \
-       intel_pipe_crc.h \
        intel_pm.h \
-       intel_psr.h \
-       intel_quirks.h \
        intel_runtime_pm.h \
        intel_sideband.h \
-       intel_sprite.h \
        intel_uncore.h \
        intel_wakeref.h
 
index 61e06cbb4b324cb94cc27b536d92a37cda2e9497..fc7d4e5bd2c694e444a30701b833dafe3b911c6f 100644 (file)
@@ -2,7 +2,7 @@
 # Copyright © 2019 Intel Corporation
 
 # Test the headers are compilable as standalone units
-header_test := $(notdir $(wildcard $(src)/*.h))
+header_test := $(notdir $(filter-out %/intel_vbt_defs.h,$(wildcard $(src)/*.h)))
 
 quiet_cmd_header_test = HDRTEST $@
       cmd_header_test = echo "\#include \"$(<F)\"" > $@
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
new file mode 100644 (file)
index 0000000..3456d33
--- /dev/null
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel ACPI functions
+ *
+ * _DSM related code stolen from nouveau_acpi.c.
+ */
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+
+#include "i915_drv.h"
+#include "intel_acpi.h"
+
+#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
+#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
+
+static const guid_t intel_dsm_guid =
+       GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
+                 0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
+
+static char *intel_dsm_port_name(u8 id)
+{
+       switch (id) {
+       case 0:
+               return "Reserved";
+       case 1:
+               return "Analog VGA";
+       case 2:
+               return "LVDS";
+       case 3:
+               return "Reserved";
+       case 4:
+               return "HDMI/DVI_B";
+       case 5:
+               return "HDMI/DVI_C";
+       case 6:
+               return "HDMI/DVI_D";
+       case 7:
+               return "DisplayPort_A";
+       case 8:
+               return "DisplayPort_B";
+       case 9:
+               return "DisplayPort_C";
+       case 0xa:
+               return "DisplayPort_D";
+       case 0xb:
+       case 0xc:
+       case 0xd:
+               return "Reserved";
+       case 0xe:
+               return "WiDi";
+       default:
+               return "bad type";
+       }
+}
+
+static char *intel_dsm_mux_type(u8 type)
+{
+       switch (type) {
+       case 0:
+               return "unknown";
+       case 1:
+               return "No MUX, iGPU only";
+       case 2:
+               return "No MUX, dGPU only";
+       case 3:
+               return "MUXed between iGPU and dGPU";
+       default:
+               return "bad type";
+       }
+}
+
+static void intel_dsm_platform_mux_info(acpi_handle dhandle)
+{
+       int i;
+       union acpi_object *pkg, *connector_count;
+
+       pkg = acpi_evaluate_dsm_typed(dhandle, &intel_dsm_guid,
+                       INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
+                       NULL, ACPI_TYPE_PACKAGE);
+       if (!pkg) {
+               DRM_DEBUG_DRIVER("failed to evaluate _DSM\n");
+               return;
+       }
+
+       connector_count = &pkg->package.elements[0];
+       DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
+                 (unsigned long long)connector_count->integer.value);
+       for (i = 1; i < pkg->package.count; i++) {
+               union acpi_object *obj = &pkg->package.elements[i];
+               union acpi_object *connector_id = &obj->package.elements[0];
+               union acpi_object *info = &obj->package.elements[1];
+               DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
+                         (unsigned long long)connector_id->integer.value);
+               DRM_DEBUG_DRIVER("  port id: %s\n",
+                      intel_dsm_port_name(info->buffer.pointer[0]));
+               DRM_DEBUG_DRIVER("  display mux info: %s\n",
+                      intel_dsm_mux_type(info->buffer.pointer[1]));
+               DRM_DEBUG_DRIVER("  aux/dc mux info: %s\n",
+                      intel_dsm_mux_type(info->buffer.pointer[2]));
+               DRM_DEBUG_DRIVER("  hpd mux info: %s\n",
+                      intel_dsm_mux_type(info->buffer.pointer[3]));
+       }
+
+       ACPI_FREE(pkg);
+}
+
+static acpi_handle intel_dsm_pci_probe(struct pci_dev *pdev)
+{
+       acpi_handle dhandle;
+
+       dhandle = ACPI_HANDLE(&pdev->dev);
+       if (!dhandle)
+               return NULL;
+
+       if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,
+                           1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
+               DRM_DEBUG_KMS("no _DSM method for intel device\n");
+               return NULL;
+       }
+
+       intel_dsm_platform_mux_info(dhandle);
+
+       return dhandle;
+}
+
+static bool intel_dsm_detect(void)
+{
+       acpi_handle dhandle = NULL;
+       char acpi_method_name[255] = { 0 };
+       struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+       struct pci_dev *pdev = NULL;
+       int vga_count = 0;
+
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+               vga_count++;
+               dhandle = intel_dsm_pci_probe(pdev) ?: dhandle;
+       }
+
+       if (vga_count == 2 && dhandle) {
+               acpi_get_name(dhandle, ACPI_FULL_PATHNAME, &buffer);
+               DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n",
+                                acpi_method_name);
+               return true;
+       }
+
+       return false;
+}
+
+void intel_register_dsm_handler(void)
+{
+       if (!intel_dsm_detect())
+               return;
+}
+
+void intel_unregister_dsm_handler(void)
+{
+}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
new file mode 100644 (file)
index 0000000..1c576b3
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ACPI_H__
+#define __INTEL_ACPI_H__
+
+#ifdef CONFIG_ACPI
+void intel_register_dsm_handler(void);
+void intel_unregister_dsm_handler(void);
+#else
+static inline void intel_register_dsm_handler(void) { return; }
+static inline void intel_unregister_dsm_handler(void) { return; }
+#endif /* CONFIG_ACPI */
+
+#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
new file mode 100644 (file)
index 0000000..6b985e8
--- /dev/null
@@ -0,0 +1,438 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: atomic modeset support
+ *
+ * The functions here implement the state management and hardware programming
+ * dispatch required by the atomic modeset infrastructure.
+ * See intel_atomic_plane.c for the plane-specific atomic functionality.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include "intel_atomic.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_sprite.h"
+
+/**
+ * intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
+ * @connector: Connector to get the property for.
+ * @state: Connector state to retrieve the property from.
+ * @property: Property to retrieve.
+ * @val: Return value for the property.
+ *
+ * Returns the atomic property value for a digital connector.
+ */
+int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
+                                               const struct drm_connector_state *state,
+                                               struct drm_property *property,
+                                               u64 *val)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_digital_connector_state *intel_conn_state =
+               to_intel_digital_connector_state(state);
+
+       if (property == dev_priv->force_audio_property)
+               *val = intel_conn_state->force_audio;
+       else if (property == dev_priv->broadcast_rgb_property)
+               *val = intel_conn_state->broadcast_rgb;
+       else {
+               DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
+                                property->base.id, property->name);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * intel_digital_connector_atomic_set_property - hook for connector->atomic_set_property.
+ * @connector: Connector to set the property for.
+ * @state: Connector state to set the property on.
+ * @property: Property to set.
+ * @val: New value for the property.
+ *
+ * Sets the atomic property value for a digital connector.
+ */
+int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
+                                               struct drm_connector_state *state,
+                                               struct drm_property *property,
+                                               u64 val)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_digital_connector_state *intel_conn_state =
+               to_intel_digital_connector_state(state);
+
+       if (property == dev_priv->force_audio_property) {
+               intel_conn_state->force_audio = val;
+               return 0;
+       }
+
+       if (property == dev_priv->broadcast_rgb_property) {
+               intel_conn_state->broadcast_rgb = val;
+               return 0;
+       }
+
+       DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
+                        property->base.id, property->name);
+       return -EINVAL;
+}
+
+static bool blob_equal(const struct drm_property_blob *a,
+                      const struct drm_property_blob *b)
+{
+       if (a && b)
+               return a->length == b->length &&
+                       !memcmp(a->data, b->data, a->length);
+
+       return !a == !b;
+}
+
+int intel_digital_connector_atomic_check(struct drm_connector *conn,
+                                        struct drm_connector_state *new_state)
+{
+       struct intel_digital_connector_state *new_conn_state =
+               to_intel_digital_connector_state(new_state);
+       struct drm_connector_state *old_state =
+               drm_atomic_get_old_connector_state(new_state->state, conn);
+       struct intel_digital_connector_state *old_conn_state =
+               to_intel_digital_connector_state(old_state);
+       struct drm_crtc_state *crtc_state;
+
+       intel_hdcp_atomic_check(conn, old_state, new_state);
+
+       if (!new_state->crtc)
+               return 0;
+
+       crtc_state = drm_atomic_get_new_crtc_state(new_state->state, new_state->crtc);
+
+       /*
+        * These properties are handled by fastset, and might not end
+        * up in a modeset.
+        */
+       if (new_conn_state->force_audio != old_conn_state->force_audio ||
+           new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
+           new_conn_state->base.colorspace != old_conn_state->base.colorspace ||
+           new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
+           new_conn_state->base.content_type != old_conn_state->base.content_type ||
+           new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode ||
+           !blob_equal(new_conn_state->base.hdr_output_metadata,
+                       old_conn_state->base.hdr_output_metadata))
+               crtc_state->mode_changed = true;
+
+       return 0;
+}
+
+/**
+ * intel_digital_connector_duplicate_state - duplicate connector state
+ * @connector: digital connector
+ *
+ * Allocates and returns a copy of the connector state (both common and
+ * digital connector specific) for the specified connector.
+ *
+ * Returns: The newly allocated connector state, or NULL on failure.
+ */
+struct drm_connector_state *
+intel_digital_connector_duplicate_state(struct drm_connector *connector)
+{
+       struct intel_digital_connector_state *state;
+
+       state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return NULL;
+
+       __drm_atomic_helper_connector_duplicate_state(connector, &state->base);
+       return &state->base;
+}
+
+/**
+ * intel_crtc_duplicate_state - duplicate crtc state
+ * @crtc: drm crtc
+ *
+ * Allocates and returns a copy of the crtc state (both common and
+ * Intel-specific) for the specified crtc.
+ *
+ * Returns: The newly allocated crtc state, or NULL on failure.
+ */
+struct drm_crtc_state *
+intel_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+       struct intel_crtc_state *crtc_state;
+
+       crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
+       if (!crtc_state)
+               return NULL;
+
+       __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
+
+       crtc_state->update_pipe = false;
+       crtc_state->disable_lp_wm = false;
+       crtc_state->disable_cxsr = false;
+       crtc_state->update_wm_pre = false;
+       crtc_state->update_wm_post = false;
+       crtc_state->fb_changed = false;
+       crtc_state->fifo_changed = false;
+       crtc_state->wm.need_postvbl_update = false;
+       crtc_state->fb_bits = 0;
+       crtc_state->update_planes = 0;
+
+       return &crtc_state->base;
+}
+
+/**
+ * intel_crtc_destroy_state - destroy crtc state
+ * @crtc: drm crtc
+ * @state: the state to destroy
+ *
+ * Destroys the crtc state (both common and Intel-specific) for the
+ * specified crtc.
+ */
+void
+intel_crtc_destroy_state(struct drm_crtc *crtc,
+                        struct drm_crtc_state *state)
+{
+       drm_atomic_helper_crtc_destroy_state(crtc, state);
+}
+
+static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+                                     int num_scalers_need, struct intel_crtc *intel_crtc,
+                                     const char *name, int idx,
+                                     struct intel_plane_state *plane_state,
+                                     int *scaler_id)
+{
+       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+       int j;
+       u32 mode;
+
+       if (*scaler_id < 0) {
+               /* find a free scaler */
+               for (j = 0; j < intel_crtc->num_scalers; j++) {
+                       if (scaler_state->scalers[j].in_use)
+                               continue;
+
+                       *scaler_id = j;
+                       scaler_state->scalers[*scaler_id].in_use = 1;
+                       break;
+               }
+       }
+
+       if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
+               return;
+
+       /* set scaler mode */
+       if (plane_state && plane_state->base.fb &&
+           plane_state->base.fb->format->is_yuv &&
+           plane_state->base.fb->format->num_planes > 1) {
+               struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+               if (IS_GEN(dev_priv, 9) &&
+                   !IS_GEMINILAKE(dev_priv)) {
+                       mode = SKL_PS_SCALER_MODE_NV12;
+               } else if (icl_is_hdr_plane(dev_priv, plane->id)) {
+                       /*
+                        * On gen11+'s HDR planes we only use the scaler for
+                        * scaling. They have a dedicated chroma upsampler, so
+                        * we don't need the scaler to upsample the UV plane.
+                        */
+                       mode = PS_SCALER_MODE_NORMAL;
+               } else {
+                       mode = PS_SCALER_MODE_PLANAR;
+
+                       if (plane_state->linked_plane)
+                               mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
+               }
+       } else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
+               mode = PS_SCALER_MODE_NORMAL;
+       } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
+               /*
+                * when only 1 scaler is in use on a pipe with 2 scalers
+                * scaler 0 operates in high quality (HQ) mode.
+                * In this case use scaler 0 to take advantage of HQ mode
+                */
+               scaler_state->scalers[*scaler_id].in_use = 0;
+               *scaler_id = 0;
+               scaler_state->scalers[0].in_use = 1;
+               mode = SKL_PS_SCALER_MODE_HQ;
+       } else {
+               mode = SKL_PS_SCALER_MODE_DYN;
+       }
+
+       DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
+                     intel_crtc->pipe, *scaler_id, name, idx);
+       scaler_state->scalers[*scaler_id].mode = mode;
+}
+
+/**
+ * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
+ * @dev_priv: i915 device
+ * @intel_crtc: intel crtc
+ * @crtc_state: incoming crtc_state to validate and setup scalers
+ *
+ * This function sets up scalers based on staged scaling requests for
+ * a @crtc and its planes. It is called from crtc level check path. If request
+ * is a supportable request, it attaches scalers to requested planes and crtc.
+ *
+ * This function takes into account the current scaler(s) in use by any planes
+ * not being part of this atomic state
+ *
+ *  Returns:
+ *         0 - scalers were setup succesfully
+ *         error code - otherwise
+ */
+int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+                              struct intel_crtc *intel_crtc,
+                              struct intel_crtc_state *crtc_state)
+{
+       struct drm_plane *plane = NULL;
+       struct intel_plane *intel_plane;
+       struct intel_plane_state *plane_state = NULL;
+       struct intel_crtc_scaler_state *scaler_state =
+               &crtc_state->scaler_state;
+       struct drm_atomic_state *drm_state = crtc_state->base.state;
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
+       int num_scalers_need;
+       int i;
+
+       num_scalers_need = hweight32(scaler_state->scaler_users);
+
+       /*
+        * High level flow:
+        * - staged scaler requests are already in scaler_state->scaler_users
+        * - check whether staged scaling requests can be supported
+        * - add planes using scalers that aren't in current transaction
+        * - assign scalers to requested users
+        * - as part of plane commit, scalers will be committed
+        *   (i.e., either attached or detached) to respective planes in hw
+        * - as part of crtc_commit, scaler will be either attached or detached
+        *   to crtc in hw
+        */
+
+       /* fail if required scalers > available scalers */
+       if (num_scalers_need > intel_crtc->num_scalers){
+               DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
+                       num_scalers_need, intel_crtc->num_scalers);
+               return -EINVAL;
+       }
+
+       /* walkthrough scaler_users bits and start assigning scalers */
+       for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
+               int *scaler_id;
+               const char *name;
+               int idx;
+
+               /* skip if scaler not required */
+               if (!(scaler_state->scaler_users & (1 << i)))
+                       continue;
+
+               if (i == SKL_CRTC_INDEX) {
+                       name = "CRTC";
+                       idx = intel_crtc->base.base.id;
+
+                       /* panel fitter case: assign as a crtc scaler */
+                       scaler_id = &scaler_state->scaler_id;
+               } else {
+                       name = "PLANE";
+
+                       /* plane scaler case: assign as a plane scaler */
+                       /* find the plane that set the bit as scaler_user */
+                       plane = drm_state->planes[i].ptr;
+
+                       /*
+                        * to enable/disable hq mode, add planes that are using scaler
+                        * into this transaction
+                        */
+                       if (!plane) {
+                               struct drm_plane_state *state;
+                               plane = drm_plane_from_index(&dev_priv->drm, i);
+                               state = drm_atomic_get_plane_state(drm_state, plane);
+                               if (IS_ERR(state)) {
+                                       DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
+                                               plane->base.id);
+                                       return PTR_ERR(state);
+                               }
+
+                               /*
+                                * the plane is added after plane checks are run,
+                                * but since this plane is unchanged just do the
+                                * minimum required validation.
+                                */
+                               crtc_state->base.planes_changed = true;
+                       }
+
+                       intel_plane = to_intel_plane(plane);
+                       idx = plane->base.id;
+
+                       /* plane on different crtc cannot be a scaler user of this crtc */
+                       if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
+                               continue;
+
+                       plane_state = intel_atomic_get_new_plane_state(intel_state,
+                                                                      intel_plane);
+                       scaler_id = &plane_state->scaler_id;
+               }
+
+               intel_atomic_setup_scaler(scaler_state, num_scalers_need,
+                                         intel_crtc, name, idx,
+                                         plane_state, scaler_id);
+       }
+
+       return 0;
+}
+
+struct drm_atomic_state *
+intel_atomic_state_alloc(struct drm_device *dev)
+{
+       struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+       if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+               kfree(state);
+               return NULL;
+       }
+
+       return &state->base;
+}
+
+void intel_atomic_state_clear(struct drm_atomic_state *s)
+{
+       struct intel_atomic_state *state = to_intel_atomic_state(s);
+       drm_atomic_state_default_clear(&state->base);
+       state->dpll_set = state->modeset = false;
+}
+
+struct intel_crtc_state *
+intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+                           struct intel_crtc *crtc)
+{
+       struct drm_crtc_state *crtc_state;
+       crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
+       if (IS_ERR(crtc_state))
+               return ERR_CAST(crtc_state);
+
+       return to_intel_crtc_state(crtc_state);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
new file mode 100644 (file)
index 0000000..1c8507d
--- /dev/null
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ATOMIC_H__
+#define __INTEL_ATOMIC_H__
+
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_connector;
+struct drm_connector_state;
+struct drm_crtc;
+struct drm_crtc_state;
+struct drm_device;
+struct drm_i915_private;
+struct drm_property;
+struct intel_crtc;
+struct intel_crtc_state;
+
+int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
+                                               const struct drm_connector_state *state,
+                                               struct drm_property *property,
+                                               u64 *val);
+int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
+                                               struct drm_connector_state *state,
+                                               struct drm_property *property,
+                                               u64 val);
+int intel_digital_connector_atomic_check(struct drm_connector *conn,
+                                        struct drm_connector_state *new_state);
+struct drm_connector_state *
+intel_digital_connector_duplicate_state(struct drm_connector *connector);
+
+struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
+void intel_crtc_destroy_state(struct drm_crtc *crtc,
+                              struct drm_crtc_state *state);
+struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_clear(struct drm_atomic_state *state);
+
+struct intel_crtc_state *
+intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+                           struct intel_crtc *crtc);
+
+int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+                              struct intel_crtc *intel_crtc,
+                              struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
new file mode 100644 (file)
index 0000000..30bd4e7
--- /dev/null
@@ -0,0 +1,355 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: atomic plane helpers
+ *
+ * The functions here are used by the atomic plane helper functions to
+ * implement legacy plane updates (i.e., drm_plane->update_plane() and
+ * drm_plane->disable_plane()).  This allows plane updates to use the
+ * atomic state infrastructure and perform plane updates as separate
+ * prepare/check/commit/cleanup steps.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include "intel_atomic_plane.h"
+#include "intel_drv.h"
+#include "intel_pm.h"
+#include "intel_sprite.h"
+
+struct intel_plane *intel_plane_alloc(void)
+{
+       struct intel_plane_state *plane_state;
+       struct intel_plane *plane;
+
+       plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+       if (!plane)
+               return ERR_PTR(-ENOMEM);
+
+       plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+       if (!plane_state) {
+               kfree(plane);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
+       plane_state->scaler_id = -1;
+
+       return plane;
+}
+
+void intel_plane_free(struct intel_plane *plane)
+{
+       intel_plane_destroy_state(&plane->base, plane->base.state);
+       kfree(plane);
+}
+
+/**
+ * intel_plane_duplicate_state - duplicate plane state
+ * @plane: drm plane
+ *
+ * Allocates and returns a copy of the plane state (both common and
+ * Intel-specific) for the specified plane.
+ *
+ * Returns: The newly allocated plane state, or NULL on failure.
+ */
+struct drm_plane_state *
+intel_plane_duplicate_state(struct drm_plane *plane)
+{
+       struct drm_plane_state *state;
+       struct intel_plane_state *intel_state;
+
+       intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
+
+       if (!intel_state)
+               return NULL;
+
+       state = &intel_state->base;
+
+       __drm_atomic_helper_plane_duplicate_state(plane, state);
+
+       intel_state->vma = NULL;
+       intel_state->flags = 0;
+
+       return state;
+}
+
+/**
+ * intel_plane_destroy_state - destroy plane state
+ * @plane: drm plane
+ * @state: state object to destroy
+ *
+ * Destroys the plane state (both common and Intel-specific) for the
+ * specified plane.
+ */
+void
+intel_plane_destroy_state(struct drm_plane *plane,
+                         struct drm_plane_state *state)
+{
+       WARN_ON(to_intel_plane_state(state)->vma);
+
+       drm_atomic_helper_plane_destroy_state(plane, state);
+}
+
+unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
+                                  const struct intel_plane_state *plane_state)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int cpp;
+
+       if (!plane_state->base.visible)
+               return 0;
+
+       cpp = fb->format->cpp[0];
+
+       /*
+        * Based on HSD#:1408715493
+        * NV12 cpp == 4, P010 cpp == 8
+        *
+        * FIXME what is the logic behind this?
+        */
+       if (fb->format->is_yuv && fb->format->num_planes > 1)
+               cpp *= 4;
+
+       return cpp * crtc_state->pixel_rate;
+}
+
+int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
+                                       struct intel_crtc_state *new_crtc_state,
+                                       const struct intel_plane_state *old_plane_state,
+                                       struct intel_plane_state *new_plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
+       int ret;
+
+       new_crtc_state->active_planes &= ~BIT(plane->id);
+       new_crtc_state->nv12_planes &= ~BIT(plane->id);
+       new_crtc_state->c8_planes &= ~BIT(plane->id);
+       new_crtc_state->data_rate[plane->id] = 0;
+       new_plane_state->base.visible = false;
+
+       if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
+               return 0;
+
+       ret = plane->check_plane(new_crtc_state, new_plane_state);
+       if (ret)
+               return ret;
+
+       /* FIXME pre-g4x don't work like this */
+       if (new_plane_state->base.visible)
+               new_crtc_state->active_planes |= BIT(plane->id);
+
+       if (new_plane_state->base.visible &&
+           is_planar_yuv_format(new_plane_state->base.fb->format->format))
+               new_crtc_state->nv12_planes |= BIT(plane->id);
+
+       if (new_plane_state->base.visible &&
+           new_plane_state->base.fb->format->format == DRM_FORMAT_C8)
+               new_crtc_state->c8_planes |= BIT(plane->id);
+
+       if (new_plane_state->base.visible || old_plane_state->base.visible)
+               new_crtc_state->update_planes |= BIT(plane->id);
+
+       new_crtc_state->data_rate[plane->id] =
+               intel_plane_data_rate(new_crtc_state, new_plane_state);
+
+       return intel_plane_atomic_calc_changes(old_crtc_state,
+                                              &new_crtc_state->base,
+                                              old_plane_state,
+                                              &new_plane_state->base);
+}
+
+static int intel_plane_atomic_check(struct drm_plane *plane,
+                                   struct drm_plane_state *new_plane_state)
+{
+       struct drm_atomic_state *state = new_plane_state->state;
+       const struct drm_plane_state *old_plane_state =
+               drm_atomic_get_old_plane_state(state, plane);
+       struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
+       const struct drm_crtc_state *old_crtc_state;
+       struct drm_crtc_state *new_crtc_state;
+
+       new_plane_state->visible = false;
+       if (!crtc)
+               return 0;
+
+       old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+       new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+       return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state),
+                                                  to_intel_crtc_state(new_crtc_state),
+                                                  to_intel_plane_state(old_plane_state),
+                                                  to_intel_plane_state(new_plane_state));
+}
+
+static struct intel_plane *
+skl_next_plane_to_commit(struct intel_atomic_state *state,
+                        struct intel_crtc *crtc,
+                        struct skl_ddb_entry entries_y[I915_MAX_PLANES],
+                        struct skl_ddb_entry entries_uv[I915_MAX_PLANES],
+                        unsigned int *update_mask)
+{
+       struct intel_crtc_state *crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
+       struct intel_plane_state *plane_state;
+       struct intel_plane *plane;
+       int i;
+
+       if (*update_mask == 0)
+               return NULL;
+
+       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+               enum plane_id plane_id = plane->id;
+
+               if (crtc->pipe != plane->pipe ||
+                   !(*update_mask & BIT(plane_id)))
+                       continue;
+
+               if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
+                                               entries_y,
+                                               I915_MAX_PLANES, plane_id) ||
+                   skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id],
+                                               entries_uv,
+                                               I915_MAX_PLANES, plane_id))
+                       continue;
+
+               *update_mask &= ~BIT(plane_id);
+               entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
+               entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id];
+
+               return plane;
+       }
+
+       /* should never happen */
+       WARN_ON(1);
+
+       return NULL;
+}
+
+void intel_update_plane(struct intel_plane *plane,
+                       const struct intel_crtc_state *crtc_state,
+                       const struct intel_plane_state *plane_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+       trace_intel_update_plane(&plane->base, crtc);
+       plane->update_plane(plane, crtc_state, plane_state);
+}
+
+void intel_update_slave(struct intel_plane *plane,
+                       const struct intel_crtc_state *crtc_state,
+                       const struct intel_plane_state *plane_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+       trace_intel_update_plane(&plane->base, crtc);
+       plane->update_slave(plane, crtc_state, plane_state);
+}
+
+void intel_disable_plane(struct intel_plane *plane,
+                        const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+       trace_intel_disable_plane(&plane->base, crtc);
+       plane->disable_plane(plane, crtc_state);
+}
+
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+                              struct intel_crtc *crtc)
+{
+       struct intel_crtc_state *old_crtc_state =
+               intel_atomic_get_old_crtc_state(state, crtc);
+       struct intel_crtc_state *new_crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
+       struct skl_ddb_entry entries_y[I915_MAX_PLANES];
+       struct skl_ddb_entry entries_uv[I915_MAX_PLANES];
+       u32 update_mask = new_crtc_state->update_planes;
+       struct intel_plane *plane;
+
+       memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y,
+              sizeof(old_crtc_state->wm.skl.plane_ddb_y));
+       memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv,
+              sizeof(old_crtc_state->wm.skl.plane_ddb_uv));
+
+       while ((plane = skl_next_plane_to_commit(state, crtc,
+                                                entries_y, entries_uv,
+                                                &update_mask))) {
+               struct intel_plane_state *new_plane_state =
+                       intel_atomic_get_new_plane_state(state, plane);
+
+               if (new_plane_state->base.visible) {
+                       intel_update_plane(plane, new_crtc_state, new_plane_state);
+               } else if (new_plane_state->slave) {
+                       struct intel_plane *master =
+                               new_plane_state->linked_plane;
+
+                       /*
+                        * We update the slave plane from this function because
+                        * programming it from the master plane's update_plane
+                        * callback runs into issues when the Y plane is
+                        * reassigned, disabled or used by a different plane.
+                        *
+                        * The slave plane is updated with the master plane's
+                        * plane_state.
+                        */
+                       new_plane_state =
+                               intel_atomic_get_new_plane_state(state, master);
+
+                       intel_update_slave(plane, new_crtc_state, new_plane_state);
+               } else {
+                       intel_disable_plane(plane, new_crtc_state);
+               }
+       }
+}
+
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+                               struct intel_crtc *crtc)
+{
+       struct intel_crtc_state *new_crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
+       u32 update_mask = new_crtc_state->update_planes;
+       struct intel_plane_state *new_plane_state;
+       struct intel_plane *plane;
+       int i;
+
+       for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+               if (crtc->pipe != plane->pipe ||
+                   !(update_mask & BIT(plane->id)))
+                       continue;
+
+               if (new_plane_state->base.visible)
+                       intel_update_plane(plane, new_crtc_state, new_plane_state);
+               else
+                       intel_disable_plane(plane, new_crtc_state);
+       }
+}
+
+const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
+       .prepare_fb = intel_prepare_plane_fb,
+       .cleanup_fb = intel_cleanup_plane_fb,
+       .atomic_check = intel_plane_atomic_check,
+};
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
new file mode 100644 (file)
index 0000000..1437a87
--- /dev/null
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ATOMIC_PLANE_H__
+#define __INTEL_ATOMIC_PLANE_H__
+
+#include <linux/types.h>
+
+struct drm_crtc_state;
+struct drm_plane;
+struct drm_property;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane;
+struct intel_plane_state;
+
+extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+
+unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
+                                  const struct intel_plane_state *plane_state);
+void intel_update_plane(struct intel_plane *plane,
+                       const struct intel_crtc_state *crtc_state,
+                       const struct intel_plane_state *plane_state);
+void intel_update_slave(struct intel_plane *plane,
+                       const struct intel_crtc_state *crtc_state,
+                       const struct intel_plane_state *plane_state);
+void intel_disable_plane(struct intel_plane *plane,
+                        const struct intel_crtc_state *crtc_state);
+struct intel_plane *intel_plane_alloc(void);
+void intel_plane_free(struct intel_plane *plane);
+struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
+void intel_plane_destroy_state(struct drm_plane *plane,
+                              struct drm_plane_state *state);
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+                              struct intel_crtc *crtc);
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+                               struct intel_crtc *crtc);
+int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
+                                       struct intel_crtc_state *crtc_state,
+                                       const struct intel_plane_state *old_plane_state,
+                                       struct intel_plane_state *intel_state);
+int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
+                                   struct drm_crtc_state *crtc_state,
+                                   const struct intel_plane_state *old_plane_state,
+                                   struct drm_plane_state *plane_state);
+
+#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
new file mode 100644 (file)
index 0000000..840daff
--- /dev/null
@@ -0,0 +1,1104 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/component.h>
+#include <linux/kernel.h>
+
+#include <drm/drm_edid.h>
+#include <drm/i915_component.h>
+
+#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_drv.h"
+#include "intel_lpe_audio.h"
+
+/**
+ * DOC: High Definition Audio over HDMI and Display Port
+ *
+ * The graphics and audio drivers together support High Definition Audio over
+ * HDMI and Display Port. The audio programming sequences are divided into audio
+ * codec and controller enable and disable sequences. The graphics driver
+ * handles the audio codec sequences, while the audio driver handles the audio
+ * controller sequences.
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port. The enable sequences may only be performed after enabling the
+ * transcoder and port, and after completed link training. Therefore the audio
+ * enable/disable sequences are part of the modeset sequence.
+ *
+ * The codec and controller sequences could be done either parallel or serial,
+ * but generally the ELDV/PD change in the codec sequence indicates to the audio
+ * driver that the controller sequence should start. Indeed, most of the
+ * co-operation between the graphics and audio drivers is handled via audio
+ * related registers. (The notable exception is the power management, not
+ * covered here.)
+ *
+ * The struct &i915_audio_component is used to interact between the graphics
+ * and audio drivers. The struct &i915_audio_component_ops @ops in it is
+ * defined in graphics driver and called in audio driver. The
+ * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
+ */
+
+/* DP N/M table */
+#define LC_810M        810000
+#define LC_540M        540000
+#define LC_270M        270000
+#define LC_162M        162000
+
+struct dp_aud_n_m {
+       int sample_rate;
+       int clock;
+       u16 m;
+       u16 n;
+};
+
+/* Values according to DP 1.4 Table 2-104 */
+static const struct dp_aud_n_m dp_aud_n_m[] = {
+       { 32000, LC_162M, 1024, 10125 },
+       { 44100, LC_162M, 784, 5625 },
+       { 48000, LC_162M, 512, 3375 },
+       { 64000, LC_162M, 2048, 10125 },
+       { 88200, LC_162M, 1568, 5625 },
+       { 96000, LC_162M, 1024, 3375 },
+       { 128000, LC_162M, 4096, 10125 },
+       { 176400, LC_162M, 3136, 5625 },
+       { 192000, LC_162M, 2048, 3375 },
+       { 32000, LC_270M, 1024, 16875 },
+       { 44100, LC_270M, 784, 9375 },
+       { 48000, LC_270M, 512, 5625 },
+       { 64000, LC_270M, 2048, 16875 },
+       { 88200, LC_270M, 1568, 9375 },
+       { 96000, LC_270M, 1024, 5625 },
+       { 128000, LC_270M, 4096, 16875 },
+       { 176400, LC_270M, 3136, 9375 },
+       { 192000, LC_270M, 2048, 5625 },
+       { 32000, LC_540M, 1024, 33750 },
+       { 44100, LC_540M, 784, 18750 },
+       { 48000, LC_540M, 512, 11250 },
+       { 64000, LC_540M, 2048, 33750 },
+       { 88200, LC_540M, 1568, 18750 },
+       { 96000, LC_540M, 1024, 11250 },
+       { 128000, LC_540M, 4096, 33750 },
+       { 176400, LC_540M, 3136, 18750 },
+       { 192000, LC_540M, 2048, 11250 },
+       { 32000, LC_810M, 1024, 50625 },
+       { 44100, LC_810M, 784, 28125 },
+       { 48000, LC_810M, 512, 16875 },
+       { 64000, LC_810M, 2048, 50625 },
+       { 88200, LC_810M, 1568, 28125 },
+       { 96000, LC_810M, 1024, 16875 },
+       { 128000, LC_810M, 4096, 50625 },
+       { 176400, LC_810M, 3136, 28125 },
+       { 192000, LC_810M, 2048, 16875 },
+};
+
+static const struct dp_aud_n_m *
+audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
+               if (rate == dp_aud_n_m[i].sample_rate &&
+                   crtc_state->port_clock == dp_aud_n_m[i].clock)
+                       return &dp_aud_n_m[i];
+       }
+
+       return NULL;
+}
+
+static const struct {
+       int clock;
+       u32 config;
+} hdmi_audio_clock[] = {
+       { 25175, AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
+       { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
+       { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
+       { 27027, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
+       { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
+       { 54054, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
+       { 74176, AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
+       { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
+       { 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
+       { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+};
+
+/* HDMI N/CTS table */
+#define TMDS_297M 297000
+#define TMDS_296M 296703
+#define TMDS_594M 594000
+#define TMDS_593M 593407
+
+static const struct {
+       int sample_rate;
+       int clock;
+       int n;
+       int cts;
+} hdmi_aud_ncts[] = {
+       { 32000, TMDS_296M, 5824, 421875 },
+       { 32000, TMDS_297M, 3072, 222750 },
+       { 32000, TMDS_593M, 5824, 843750 },
+       { 32000, TMDS_594M, 3072, 445500 },
+       { 44100, TMDS_296M, 4459, 234375 },
+       { 44100, TMDS_297M, 4704, 247500 },
+       { 44100, TMDS_593M, 8918, 937500 },
+       { 44100, TMDS_594M, 9408, 990000 },
+       { 88200, TMDS_296M, 8918, 234375 },
+       { 88200, TMDS_297M, 9408, 247500 },
+       { 88200, TMDS_593M, 17836, 937500 },
+       { 88200, TMDS_594M, 18816, 990000 },
+       { 176400, TMDS_296M, 17836, 234375 },
+       { 176400, TMDS_297M, 18816, 247500 },
+       { 176400, TMDS_593M, 35672, 937500 },
+       { 176400, TMDS_594M, 37632, 990000 },
+       { 48000, TMDS_296M, 5824, 281250 },
+       { 48000, TMDS_297M, 5120, 247500 },
+       { 48000, TMDS_593M, 5824, 562500 },
+       { 48000, TMDS_594M, 6144, 594000 },
+       { 96000, TMDS_296M, 11648, 281250 },
+       { 96000, TMDS_297M, 10240, 247500 },
+       { 96000, TMDS_593M, 11648, 562500 },
+       { 96000, TMDS_594M, 12288, 594000 },
+       { 192000, TMDS_296M, 23296, 281250 },
+       { 192000, TMDS_297M, 20480, 247500 },
+       { 192000, TMDS_593M, 23296, 562500 },
+       { 192000, TMDS_594M, 24576, 594000 },
+};
+
+/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
+{
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->base.adjusted_mode;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
+               if (adjusted_mode->crtc_clock == hdmi_audio_clock[i].clock)
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(hdmi_audio_clock)) {
+               DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
+                             adjusted_mode->crtc_clock);
+               i = 1;
+       }
+
+       DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+                     hdmi_audio_clock[i].clock,
+                     hdmi_audio_clock[i].config);
+
+       return hdmi_audio_clock[i].config;
+}
+
+static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
+                                  int rate)
+{
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->base.adjusted_mode;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
+               if (rate == hdmi_aud_ncts[i].sample_rate &&
+                   adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) {
+                       return hdmi_aud_ncts[i].n;
+               }
+       }
+       return 0;
+}
+
+static bool intel_eld_uptodate(struct drm_connector *connector,
+                              i915_reg_t reg_eldv, u32 bits_eldv,
+                              i915_reg_t reg_elda, u32 bits_elda,
+                              i915_reg_t reg_edid)
+{
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
+       const u8 *eld = connector->eld;
+       u32 tmp;
+       int i;
+
+       tmp = I915_READ(reg_eldv);
+       tmp &= bits_eldv;
+
+       if (!tmp)
+               return false;
+
+       tmp = I915_READ(reg_elda);
+       tmp &= ~bits_elda;
+       I915_WRITE(reg_elda, tmp);
+
+       for (i = 0; i < drm_eld_size(eld) / 4; i++)
+               if (I915_READ(reg_edid) != *((const u32 *)eld + i))
+                       return false;
+
+       return true;
+}
+
+static void g4x_audio_codec_disable(struct intel_encoder *encoder,
+                                   const struct intel_crtc_state *old_crtc_state,
+                                   const struct drm_connector_state *old_conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       u32 eldv, tmp;
+
+       DRM_DEBUG_KMS("Disable audio codec\n");
+
+       tmp = I915_READ(G4X_AUD_VID_DID);
+       if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+               eldv = G4X_ELDV_DEVCL_DEVBLC;
+       else
+               eldv = G4X_ELDV_DEVCTG;
+
+       /* Invalidate ELD */
+       tmp = I915_READ(G4X_AUD_CNTL_ST);
+       tmp &= ~eldv;
+       I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+}
+
+static void g4x_audio_codec_enable(struct intel_encoder *encoder,
+                                  const struct intel_crtc_state *crtc_state,
+                                  const struct drm_connector_state *conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct drm_connector *connector = conn_state->connector;
+       const u8 *eld = connector->eld;
+       u32 eldv;
+       u32 tmp;
+       int len, i;
+
+       DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld));
+
+       tmp = I915_READ(G4X_AUD_VID_DID);
+       if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+               eldv = G4X_ELDV_DEVCL_DEVBLC;
+       else
+               eldv = G4X_ELDV_DEVCTG;
+
+       if (intel_eld_uptodate(connector,
+                              G4X_AUD_CNTL_ST, eldv,
+                              G4X_AUD_CNTL_ST, G4X_ELD_ADDR_MASK,
+                              G4X_HDMIW_HDMIEDID))
+               return;
+
+       tmp = I915_READ(G4X_AUD_CNTL_ST);
+       tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
+       len = (tmp >> 9) & 0x1f;                /* ELD buffer size */
+       I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+
+       len = min(drm_eld_size(eld) / 4, len);
+       DRM_DEBUG_DRIVER("ELD size %d\n", len);
+       for (i = 0; i < len; i++)
+               I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i));
+
+       tmp = I915_READ(G4X_AUD_CNTL_ST);
+       tmp |= eldv;
+       I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+}
+
+static void
+hsw_dp_audio_config_update(struct intel_encoder *encoder,
+                          const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct i915_audio_component *acomp = dev_priv->audio_component;
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+       enum port port = encoder->port;
+       const struct dp_aud_n_m *nm;
+       int rate;
+       u32 tmp;
+
+       rate = acomp ? acomp->aud_sample_rate[port] : 0;
+       nm = audio_config_dp_get_n_m(crtc_state, rate);
+       if (nm)
+               DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
+       else
+               DRM_DEBUG_KMS("using automatic Maud, Naud\n");
+
+       tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+       tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+       tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+       tmp |= AUD_CONFIG_N_VALUE_INDEX;
+
+       if (nm) {
+               tmp &= ~AUD_CONFIG_N_MASK;
+               tmp |= AUD_CONFIG_N(nm->n);
+               tmp |= AUD_CONFIG_N_PROG_ENABLE;
+       }
+
+       I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+
+       tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+       tmp &= ~AUD_CONFIG_M_MASK;
+       tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+       tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
+
+       if (nm) {
+               tmp |= nm->m;
+               tmp |= AUD_M_CTS_M_VALUE_INDEX;
+               tmp |= AUD_M_CTS_M_PROG_ENABLE;
+       }
+
+       I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+}
+
+static void
+hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
+                            const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct i915_audio_component *acomp = dev_priv->audio_component;
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+       enum port port = encoder->port;
+       int n, rate;
+       u32 tmp;
+
+       rate = acomp ? acomp->aud_sample_rate[port] : 0;
+
+       tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+       tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+       tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+       tmp |= audio_config_hdmi_pixel_clock(crtc_state);
+
+       n = audio_config_hdmi_get_n(crtc_state, rate);
+       if (n != 0) {
+               DRM_DEBUG_KMS("using N %d\n", n);
+
+               tmp &= ~AUD_CONFIG_N_MASK;
+               tmp |= AUD_CONFIG_N(n);
+               tmp |= AUD_CONFIG_N_PROG_ENABLE;
+       } else {
+               DRM_DEBUG_KMS("using automatic N\n");
+       }
+
+       I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+
+       /*
+        * Let's disable "Enable CTS or M Prog bit"
+        * and let HW calculate the value
+        */
+       tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+       tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
+       tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+       I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+}
+
+static void
+hsw_audio_config_update(struct intel_encoder *encoder,
+                       const struct intel_crtc_state *crtc_state)
+{
+       if (intel_crtc_has_dp_encoder(crtc_state))
+               hsw_dp_audio_config_update(encoder, crtc_state);
+       else
+               hsw_hdmi_audio_config_update(encoder, crtc_state);
+}
+
+static void hsw_audio_codec_disable(struct intel_encoder *encoder,
+                                   const struct intel_crtc_state *old_crtc_state,
+                                   const struct drm_connector_state *old_conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+       u32 tmp;
+
+       DRM_DEBUG_KMS("Disable audio codec on transcoder %s\n",
+                     transcoder_name(cpu_transcoder));
+
+       mutex_lock(&dev_priv->av_mutex);
+
+       /* Disable timestamps */
+       tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+       tmp |= AUD_CONFIG_N_PROG_ENABLE;
+       tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+       tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+       if (intel_crtc_has_dp_encoder(old_crtc_state))
+               tmp |= AUD_CONFIG_N_VALUE_INDEX;
+       I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+
+       /* Invalidate ELD */
+       tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+       tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
+       tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
+       I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+       mutex_unlock(&dev_priv->av_mutex);
+}
+
+static void hsw_audio_codec_enable(struct intel_encoder *encoder,
+                                  const struct intel_crtc_state *crtc_state,
+                                  const struct drm_connector_state *conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct drm_connector *connector = conn_state->connector;
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+       const u8 *eld = connector->eld;
+       u32 tmp;
+       int len, i;
+
+       DRM_DEBUG_KMS("Enable audio codec on transcoder %s, %u bytes ELD\n",
+                     transcoder_name(cpu_transcoder), drm_eld_size(eld));
+
+       mutex_lock(&dev_priv->av_mutex);
+
+       /* Enable audio presence detect, invalidate ELD */
+       tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+       tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
+       tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
+       I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+       /*
+        * FIXME: We're supposed to wait for vblank here, but we have vblanks
+        * disabled during the mode set. The proper fix would be to push the
+        * rest of the setup into a vblank work item, queued here, but the
+        * infrastructure is not there yet.
+        */
+
+       /* Reset ELD write address */
+       tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
+       tmp &= ~IBX_ELD_ADDRESS_MASK;
+       I915_WRITE(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
+
+       /* Up to 84 bytes of hw ELD buffer */
+       len = min(drm_eld_size(eld), 84);
+       for (i = 0; i < len / 4; i++)
+               I915_WRITE(HSW_AUD_EDID_DATA(cpu_transcoder), *((const u32 *)eld + i));
+
+       /* ELD valid */
+       tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+       tmp |= AUDIO_ELD_VALID(cpu_transcoder);
+       I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+       /* Enable timestamps */
+       hsw_audio_config_update(encoder, crtc_state);
+
+       mutex_unlock(&dev_priv->av_mutex);
+}
+
+static void ilk_audio_codec_disable(struct intel_encoder *encoder,
+                                   const struct intel_crtc_state *old_crtc_state,
+                                   const struct drm_connector_state *old_conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+       enum pipe pipe = crtc->pipe;
+       enum port port = encoder->port;
+       u32 tmp, eldv;
+       i915_reg_t aud_config, aud_cntrl_st2;
+
+       DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
+                     port_name(port), pipe_name(pipe));
+
+       if (WARN_ON(port == PORT_A))
+               return;
+
+       if (HAS_PCH_IBX(dev_priv)) {
+               aud_config = IBX_AUD_CFG(pipe);
+               aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               aud_config = VLV_AUD_CFG(pipe);
+               aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+       } else {
+               aud_config = CPT_AUD_CFG(pipe);
+               aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+       }
+
+       /* Disable timestamps */
+       tmp = I915_READ(aud_config);
+       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+       tmp |= AUD_CONFIG_N_PROG_ENABLE;
+       tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+       tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+       if (intel_crtc_has_dp_encoder(old_crtc_state))
+               tmp |= AUD_CONFIG_N_VALUE_INDEX;
+       I915_WRITE(aud_config, tmp);
+
+       eldv = IBX_ELD_VALID(port);
+
+       /* Invalidate ELD */
+       tmp = I915_READ(aud_cntrl_st2);
+       tmp &= ~eldv;
+       I915_WRITE(aud_cntrl_st2, tmp);
+}
+
+static void ilk_audio_codec_enable(struct intel_encoder *encoder,
+                                  const struct intel_crtc_state *crtc_state,
+                                  const struct drm_connector_state *conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_connector *connector = conn_state->connector;
+       enum pipe pipe = crtc->pipe;
+       enum port port = encoder->port;
+       const u8 *eld = connector->eld;
+       u32 tmp, eldv;
+       int len, i;
+       i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
+
+       DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
+                     port_name(port), pipe_name(pipe), drm_eld_size(eld));
+
+       if (WARN_ON(port == PORT_A))
+               return;
+
+       /*
+        * FIXME: We're supposed to wait for vblank here, but we have vblanks
+        * disabled during the mode set. The proper fix would be to push the
+        * rest of the setup into a vblank work item, queued here, but the
+        * infrastructure is not there yet.
+        */
+
+       if (HAS_PCH_IBX(dev_priv)) {
+               hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+               aud_config = IBX_AUD_CFG(pipe);
+               aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+               aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+       } else if (IS_VALLEYVIEW(dev_priv) ||
+                  IS_CHERRYVIEW(dev_priv)) {
+               hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
+               aud_config = VLV_AUD_CFG(pipe);
+               aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
+               aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+       } else {
+               hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+               aud_config = CPT_AUD_CFG(pipe);
+               aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
+               aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+       }
+
+       eldv = IBX_ELD_VALID(port);
+
+       /* Invalidate ELD */
+       tmp = I915_READ(aud_cntrl_st2);
+       tmp &= ~eldv;
+       I915_WRITE(aud_cntrl_st2, tmp);
+
+       /* Reset ELD write address */
+       tmp = I915_READ(aud_cntl_st);
+       tmp &= ~IBX_ELD_ADDRESS_MASK;
+       I915_WRITE(aud_cntl_st, tmp);
+
+       /* Up to 84 bytes of hw ELD buffer */
+       len = min(drm_eld_size(eld), 84);
+       for (i = 0; i < len / 4; i++)
+               I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i));
+
+       /* ELD valid */
+       tmp = I915_READ(aud_cntrl_st2);
+       tmp |= eldv;
+       I915_WRITE(aud_cntrl_st2, tmp);
+
+       /* Enable timestamps */
+       tmp = I915_READ(aud_config);
+       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+       tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+       tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+       if (intel_crtc_has_dp_encoder(crtc_state))
+               tmp |= AUD_CONFIG_N_VALUE_INDEX;
+       else
+               tmp |= audio_config_hdmi_pixel_clock(crtc_state);
+       I915_WRITE(aud_config, tmp);
+}
+
+/**
+ * intel_audio_codec_enable - Enable the audio codec for HD audio
+ * @encoder: encoder on which to enable audio
+ * @crtc_state: pointer to the current crtc state.
+ * @conn_state: pointer to the current connector state.
+ *
+ * The enable sequences may only be performed after enabling the transcoder and
+ * port, and after completed link training.
+ */
+void intel_audio_codec_enable(struct intel_encoder *encoder,
+                             const struct intel_crtc_state *crtc_state,
+                             const struct drm_connector_state *conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct i915_audio_component *acomp = dev_priv->audio_component;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_connector *connector = conn_state->connector;
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->base.adjusted_mode;
+       enum port port = encoder->port;
+       enum pipe pipe = crtc->pipe;
+
+       /* FIXME precompute the ELD in .compute_config() */
+       if (!connector->eld[0])
+               DRM_DEBUG_KMS("Bogus ELD on [CONNECTOR:%d:%s]\n",
+                             connector->base.id, connector->name);
+
+       DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+                        connector->base.id,
+                        connector->name,
+                        connector->encoder->base.id,
+                        connector->encoder->name);
+
+       connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+
+       if (dev_priv->display.audio_codec_enable)
+               dev_priv->display.audio_codec_enable(encoder,
+                                                    crtc_state,
+                                                    conn_state);
+
+       mutex_lock(&dev_priv->av_mutex);
+       encoder->audio_connector = connector;
+
+       /* referred in audio callbacks */
+       dev_priv->av_enc_map[pipe] = encoder;
+       mutex_unlock(&dev_priv->av_mutex);
+
+       if (acomp && acomp->base.audio_ops &&
+           acomp->base.audio_ops->pin_eld_notify) {
+               /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+               if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+                       pipe = -1;
+               acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
+                                                (int) port, (int) pipe);
+       }
+
+       intel_lpe_audio_notify(dev_priv, pipe, port, connector->eld,
+                              crtc_state->port_clock,
+                              intel_crtc_has_dp_encoder(crtc_state));
+}
+
+/**
+ * intel_audio_codec_disable - Disable the audio codec for HD audio
+ * @encoder: encoder on which to disable audio
+ * @old_crtc_state: pointer to the old crtc state.
+ * @old_conn_state: pointer to the old connector state.
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port.
+ */
+void intel_audio_codec_disable(struct intel_encoder *encoder,
+                              const struct intel_crtc_state *old_crtc_state,
+                              const struct drm_connector_state *old_conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct i915_audio_component *acomp = dev_priv->audio_component;
+       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+       enum port port = encoder->port;
+       enum pipe pipe = crtc->pipe;
+
+       if (dev_priv->display.audio_codec_disable)
+               dev_priv->display.audio_codec_disable(encoder,
+                                                     old_crtc_state,
+                                                     old_conn_state);
+
+       mutex_lock(&dev_priv->av_mutex);
+       encoder->audio_connector = NULL;
+       dev_priv->av_enc_map[pipe] = NULL;
+       mutex_unlock(&dev_priv->av_mutex);
+
+       if (acomp && acomp->base.audio_ops &&
+           acomp->base.audio_ops->pin_eld_notify) {
+               /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+               if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
+                       pipe = -1;
+               acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
+                                                (int) port, (int) pipe);
+       }
+
+       intel_lpe_audio_notify(dev_priv, pipe, port, NULL, 0, false);
+}
+
+/**
+ * intel_init_audio_hooks - Set up chip specific audio hooks
+ * @dev_priv: device private
+ */
+void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
+{
+       if (IS_G4X(dev_priv)) {
+               dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
+               dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
+       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
+               dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+       } else if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) {
+               dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
+               dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
+       } else if (HAS_PCH_SPLIT(dev_priv)) {
+               dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
+               dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+       }
+}
+
+static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
+                                 bool enable)
+{
+       struct drm_modeset_acquire_ctx ctx;
+       struct drm_atomic_state *state;
+       int ret;
+
+       drm_modeset_acquire_init(&ctx, 0);
+       state = drm_atomic_state_alloc(&dev_priv->drm);
+       if (WARN_ON(!state))
+               return;
+
+       state->acquire_ctx = &ctx;
+
+retry:
+       to_intel_atomic_state(state)->cdclk.force_min_cdclk_changed = true;
+       to_intel_atomic_state(state)->cdclk.force_min_cdclk =
+               enable ? 2 * 96000 : 0;
+
+       /*
+        * Protects dev_priv->cdclk.force_min_cdclk
+        * Need to lock this here in case we have no active pipes
+        * and thus wouldn't lock it during the commit otherwise.
+        */
+       ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+                              &ctx);
+       if (!ret)
+               ret = drm_atomic_commit(state);
+
+       if (ret == -EDEADLK) {
+               drm_atomic_state_clear(state);
+               drm_modeset_backoff(&ctx);
+               goto retry;
+       }
+
+       WARN_ON(ret);
+
+       drm_atomic_state_put(state);
+
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+}
+
+static unsigned long i915_audio_component_get_power(struct device *kdev)
+{
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+       intel_wakeref_t ret;
+
+       /* Catch potential impedance mismatches before they occur! */
+       BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
+
+       ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+
+       /* Force CDCLK to 2*BCLK as long as we need audio to be powered. */
+       if (dev_priv->audio_power_refcount++ == 0)
+               if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+                       glk_force_audio_cdclk(dev_priv, true);
+
+       return ret;
+}
+
+static void i915_audio_component_put_power(struct device *kdev,
+                                          unsigned long cookie)
+{
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+
+       /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
+       if (--dev_priv->audio_power_refcount == 0)
+               if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+                       glk_force_audio_cdclk(dev_priv, false);
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
+}
+
+static void i915_audio_component_codec_wake_override(struct device *kdev,
+                                                    bool enable)
+{
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+       unsigned long cookie;
+       u32 tmp;
+
+       if (!IS_GEN(dev_priv, 9))
+               return;
+
+       cookie = i915_audio_component_get_power(kdev);
+
+       /*
+        * Enable/disable generating the codec wake signal, overriding the
+        * internal logic to generate the codec wake to controller.
+        */
+       tmp = I915_READ(HSW_AUD_CHICKENBIT);
+       tmp &= ~SKL_AUD_CODEC_WAKE_SIGNAL;
+       I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
+       usleep_range(1000, 1500);
+
+       if (enable) {
+               tmp = I915_READ(HSW_AUD_CHICKENBIT);
+               tmp |= SKL_AUD_CODEC_WAKE_SIGNAL;
+               I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
+               usleep_range(1000, 1500);
+       }
+
+       i915_audio_component_put_power(kdev, cookie);
+}
+
+/* Get CDCLK in kHz  */
+static int i915_audio_component_get_cdclk_freq(struct device *kdev)
+{
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+
+       if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
+               return -ENODEV;
+
+       return dev_priv->cdclk.hw.cdclk;
+}
+
+/*
+ * get the intel_encoder according to the parameter port and pipe
+ * intel_encoder is saved by the index of pipe
+ * MST & (pipe >= 0): return the av_enc_map[pipe],
+ *   when port is matched
+ * MST & (pipe < 0): this is invalid
+ * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry)
+ *   will get the right intel_encoder with port matched
+ * Non-MST & (pipe < 0): get the right intel_encoder with port matched
+ */
+static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
+                                              int port, int pipe)
+{
+       struct intel_encoder *encoder;
+
+       /* MST */
+       if (pipe >= 0) {
+               if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
+                       return NULL;
+
+               encoder = dev_priv->av_enc_map[pipe];
+               /*
+                * when bootup, audio driver may not know it is
+                * MST or not. So it will poll all the port & pipe
+                * combinations
+                */
+               if (encoder != NULL && encoder->port == port &&
+                   encoder->type == INTEL_OUTPUT_DP_MST)
+                       return encoder;
+       }
+
+       /* Non-MST */
+       if (pipe > 0)
+               return NULL;
+
+       for_each_pipe(dev_priv, pipe) {
+               encoder = dev_priv->av_enc_map[pipe];
+               if (encoder == NULL)
+                       continue;
+
+               if (encoder->type == INTEL_OUTPUT_DP_MST)
+                       continue;
+
+               if (port == encoder->port)
+                       return encoder;
+       }
+
+       return NULL;
+}
+
+static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
+                                               int pipe, int rate)
+{
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+       struct i915_audio_component *acomp = dev_priv->audio_component;
+       struct intel_encoder *encoder;
+       struct intel_crtc *crtc;
+       unsigned long cookie;
+       int err = 0;
+
+       if (!HAS_DDI(dev_priv))
+               return 0;
+
+       cookie = i915_audio_component_get_power(kdev);
+       mutex_lock(&dev_priv->av_mutex);
+
+       /* 1. get the pipe */
+       encoder = get_saved_enc(dev_priv, port, pipe);
+       if (!encoder || !encoder->base.crtc) {
+               DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+               err = -ENODEV;
+               goto unlock;
+       }
+
+       crtc = to_intel_crtc(encoder->base.crtc);
+
+       /* port must be valid now, otherwise the pipe will be invalid */
+       acomp->aud_sample_rate[port] = rate;
+
+       hsw_audio_config_update(encoder, crtc->config);
+
+ unlock:
+       mutex_unlock(&dev_priv->av_mutex);
+       i915_audio_component_put_power(kdev, cookie);
+       return err;
+}
+
+static int i915_audio_component_get_eld(struct device *kdev, int port,
+                                       int pipe, bool *enabled,
+                                       unsigned char *buf, int max_bytes)
+{
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+       struct intel_encoder *intel_encoder;
+       const u8 *eld;
+       int ret = -EINVAL;
+
+       mutex_lock(&dev_priv->av_mutex);
+
+       intel_encoder = get_saved_enc(dev_priv, port, pipe);
+       if (!intel_encoder) {
+               DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+               mutex_unlock(&dev_priv->av_mutex);
+               return ret;
+       }
+
+       ret = 0;
+       *enabled = intel_encoder->audio_connector != NULL;
+       if (*enabled) {
+               eld = intel_encoder->audio_connector->eld;
+               ret = drm_eld_size(eld);
+               memcpy(buf, eld, min(max_bytes, ret));
+       }
+
+       mutex_unlock(&dev_priv->av_mutex);
+       return ret;
+}
+
+static const struct drm_audio_component_ops i915_audio_component_ops = {
+       .owner          = THIS_MODULE,
+       .get_power      = i915_audio_component_get_power,
+       .put_power      = i915_audio_component_put_power,
+       .codec_wake_override = i915_audio_component_codec_wake_override,
+       .get_cdclk_freq = i915_audio_component_get_cdclk_freq,
+       .sync_audio_rate = i915_audio_component_sync_audio_rate,
+       .get_eld        = i915_audio_component_get_eld,
+};
+
+static int i915_audio_component_bind(struct device *i915_kdev,
+                                    struct device *hda_kdev, void *data)
+{
+       struct i915_audio_component *acomp = data;
+       struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+       int i;
+
+       if (WARN_ON(acomp->base.ops || acomp->base.dev))
+               return -EEXIST;
+
+       if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
+               return -ENOMEM;
+
+       drm_modeset_lock_all(&dev_priv->drm);
+       acomp->base.ops = &i915_audio_component_ops;
+       acomp->base.dev = i915_kdev;
+       BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
+       for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
+               acomp->aud_sample_rate[i] = 0;
+       dev_priv->audio_component = acomp;
+       drm_modeset_unlock_all(&dev_priv->drm);
+
+       return 0;
+}
+
+static void i915_audio_component_unbind(struct device *i915_kdev,
+                                       struct device *hda_kdev, void *data)
+{
+       struct i915_audio_component *acomp = data;
+       struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+       drm_modeset_lock_all(&dev_priv->drm);
+       acomp->base.ops = NULL;
+       acomp->base.dev = NULL;
+       dev_priv->audio_component = NULL;
+       drm_modeset_unlock_all(&dev_priv->drm);
+
+       device_link_remove(hda_kdev, i915_kdev);
+}
+
+static const struct component_ops i915_audio_component_bind_ops = {
+       .bind   = i915_audio_component_bind,
+       .unbind = i915_audio_component_unbind,
+};
+
+/**
+ * i915_audio_component_init - initialize and register the audio component
+ * @dev_priv: i915 device instance
+ *
+ * This will register with the component framework a child component which
+ * will bind dynamically to the snd_hda_intel driver's corresponding master
+ * component when the latter is registered. During binding the child
+ * initializes an instance of struct i915_audio_component which it receives
+ * from the master. The master can then start to use the interface defined by
+ * this struct. Each side can break the binding at any point by deregistering
+ * its own component after which each side's component unbind callback is
+ * called.
+ *
+ * We ignore any error during registration and continue with reduced
+ * functionality (i.e. without HDMI audio).
+ */
+static void i915_audio_component_init(struct drm_i915_private *dev_priv)
+{
+       int ret;
+
+       ret = component_add_typed(dev_priv->drm.dev,
+                                 &i915_audio_component_bind_ops,
+                                 I915_COMPONENT_AUDIO);
+       if (ret < 0) {
+               DRM_ERROR("failed to add audio component (%d)\n", ret);
+               /* continue with reduced functionality */
+               return;
+       }
+
+       dev_priv->audio_component_registered = true;
+}
+
+/**
+ * i915_audio_component_cleanup - deregister the audio component
+ * @dev_priv: i915 device instance
+ *
+ * Deregisters the audio component, breaking any existing binding to the
+ * corresponding snd_hda_intel driver's master component.
+ */
+static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
+{
+       if (!dev_priv->audio_component_registered)
+               return;
+
+       component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
+       dev_priv->audio_component_registered = false;
+}
+
+/**
+ * intel_audio_init() - Initialize the audio driver either using
+ * component framework or using lpe audio bridge
+ * @dev_priv: the i915 drm device private data
+ *
+ */
+void intel_audio_init(struct drm_i915_private *dev_priv)
+{
+       if (intel_lpe_audio_init(dev_priv) < 0)
+               i915_audio_component_init(dev_priv);
+}
+
+/**
+ * intel_audio_deinit() - deinitialize the audio driver
+ * @dev_priv: the i915 drm device private data
+ *
+ */
+void intel_audio_deinit(struct drm_i915_private *dev_priv)
+{
+       if ((dev_priv)->lpe_audio.platdev != NULL)
+               intel_lpe_audio_teardown(dev_priv);
+       else
+               i915_audio_component_cleanup(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
new file mode 100644 (file)
index 0000000..a3657c7
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_AUDIO_H__
+#define __INTEL_AUDIO_H__
+
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
+
+void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
+void intel_audio_codec_enable(struct intel_encoder *encoder,
+                             const struct intel_crtc_state *crtc_state,
+                             const struct drm_connector_state *conn_state);
+void intel_audio_codec_disable(struct intel_encoder *encoder,
+                              const struct intel_crtc_state *old_crtc_state,
+                              const struct drm_connector_state *old_conn_state);
+void intel_audio_init(struct drm_i915_private *dev_priv);
+void intel_audio_deinit(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
new file mode 100644 (file)
index 0000000..270719f
--- /dev/null
@@ -0,0 +1,2253 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <drm/drm_dp_helper.h>
+#include <drm/i915_drm.h>
+
+#include "display/intel_gmbus.h"
+
+#include "i915_drv.h"
+
+#define _INTEL_BIOS_PRIVATE
+#include "intel_vbt_defs.h"
+
+/**
+ * DOC: Video BIOS Table (VBT)
+ *
+ * The Video BIOS Table, or VBT, provides platform and board specific
+ * configuration information to the driver that is not discoverable or available
+ * through other means. The configuration is mostly related to display
+ * hardware. The VBT is available via the ACPI OpRegion or, on older systems, in
+ * the PCI ROM.
+ *
+ * The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB
+ * Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that
+ * contain the actual configuration information. The VBT Header, and thus the
+ * VBT, begins with "$VBT" signature. The VBT Header contains the offset of the
+ * BDB Header. The data blocks are concatenated after the BDB Header. The data
+ * blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of
+ * data. (Block 53, the MIPI Sequence Block is an exception.)
+ *
+ * The driver parses the VBT during load. The relevant information is stored in
+ * driver private data for ease of use, and the actual VBT is not read after
+ * that.
+ */
+
+#define        SLAVE_ADDR1     0x70
+#define        SLAVE_ADDR2     0x72
+
+/* Get BDB block size given a pointer to Block ID. */
+static u32 _get_blocksize(const u8 *block_base)
+{
+       /* The MIPI Sequence Block v3+ has a separate size field. */
+       if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3)
+               return *((const u32 *)(block_base + 4));
+       else
+               return *((const u16 *)(block_base + 1));
+}
+
+/* Get BDB block size give a pointer to data after Block ID and Block Size. */
+static u32 get_blocksize(const void *block_data)
+{
+       return _get_blocksize(block_data - 3);
+}
+
+static const void *
+find_section(const void *_bdb, enum bdb_block_id section_id)
+{
+       const struct bdb_header *bdb = _bdb;
+       const u8 *base = _bdb;
+       int index = 0;
+       u32 total, current_size;
+       enum bdb_block_id current_id;
+
+       /* skip to first section */
+       index += bdb->header_size;
+       total = bdb->bdb_size;
+
+       /* walk the sections looking for section_id */
+       while (index + 3 < total) {
+               current_id = *(base + index);
+               current_size = _get_blocksize(base + index);
+               index += 3;
+
+               if (index + current_size > total)
+                       return NULL;
+
+               if (current_id == section_id)
+                       return base + index;
+
+               index += current_size;
+       }
+
+       return NULL;
+}
+
+static void
+fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+                       const struct lvds_dvo_timing *dvo_timing)
+{
+       panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+               dvo_timing->hactive_lo;
+       panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+               ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+       panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+               ((dvo_timing->hsync_pulse_width_hi << 8) |
+                       dvo_timing->hsync_pulse_width_lo);
+       panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+               ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+       panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+               dvo_timing->vactive_lo;
+       panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+               ((dvo_timing->vsync_off_hi << 4) | dvo_timing->vsync_off_lo);
+       panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+               ((dvo_timing->vsync_pulse_width_hi << 4) |
+                       dvo_timing->vsync_pulse_width_lo);
+       panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+               ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+       panel_fixed_mode->clock = dvo_timing->clock * 10;
+       panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+       if (dvo_timing->hsync_positive)
+               panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+       else
+               panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+       if (dvo_timing->vsync_positive)
+               panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+       else
+               panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+       panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
+               dvo_timing->himage_lo;
+       panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
+               dvo_timing->vimage_lo;
+
+       /* Some VBTs have bogus h/vtotal values */
+       if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+               panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+       if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+               panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+       drm_mode_set_name(panel_fixed_mode);
+}
+
+static const struct lvds_dvo_timing *
+get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
+                   const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
+                   int index)
+{
+       /*
+        * the size of fp_timing varies on the different platform.
+        * So calculate the DVO timing relative offset in LVDS data
+        * entry to get the DVO timing entry
+        */
+
+       int lfp_data_size =
+               lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+               lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+       int dvo_timing_offset =
+               lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
+               lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
+       char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
+
+       return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
+}
+
+/* get lvds_fp_timing entry
+ * this function may return NULL if the corresponding entry is invalid
+ */
+static const struct lvds_fp_timing *
+get_lvds_fp_timing(const struct bdb_header *bdb,
+                  const struct bdb_lvds_lfp_data *data,
+                  const struct bdb_lvds_lfp_data_ptrs *ptrs,
+                  int index)
+{
+       size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
+       u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
+       size_t ofs;
+
+       if (index >= ARRAY_SIZE(ptrs->ptr))
+               return NULL;
+       ofs = ptrs->ptr[index].fp_timing_offset;
+       if (ofs < data_ofs ||
+           ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
+               return NULL;
+       return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
+}
+
+/* Try to find integrated panel data */
+static void
+parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+                    const struct bdb_header *bdb)
+{
+       const struct bdb_lvds_options *lvds_options;
+       const struct bdb_lvds_lfp_data *lvds_lfp_data;
+       const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+       const struct lvds_dvo_timing *panel_dvo_timing;
+       const struct lvds_fp_timing *fp_timing;
+       struct drm_display_mode *panel_fixed_mode;
+       int panel_type;
+       int drrs_mode;
+       int ret;
+
+       lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+       if (!lvds_options)
+               return;
+
+       dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
+
+       ret = intel_opregion_get_panel_type(dev_priv);
+       if (ret >= 0) {
+               WARN_ON(ret > 0xf);
+               panel_type = ret;
+               DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type);
+       } else {
+               if (lvds_options->panel_type > 0xf) {
+                       DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n",
+                                     lvds_options->panel_type);
+                       return;
+               }
+               panel_type = lvds_options->panel_type;
+               DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
+       }
+
+       dev_priv->vbt.panel_type = panel_type;
+
+       drrs_mode = (lvds_options->dps_panel_type_bits
+                               >> (panel_type * 2)) & MODE_MASK;
+       /*
+        * VBT has static DRRS = 0 and seamless DRRS = 2.
+        * The below piece of code is required to adjust vbt.drrs_type
+        * to match the enum drrs_support_type.
+        */
+       switch (drrs_mode) {
+       case 0:
+               dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
+               DRM_DEBUG_KMS("DRRS supported mode is static\n");
+               break;
+       case 2:
+               dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
+               DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
+               break;
+       default:
+               dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+               DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
+               break;
+       }
+
+       lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+       if (!lvds_lfp_data)
+               return;
+
+       lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
+       if (!lvds_lfp_data_ptrs)
+               return;
+
+       panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+                                              lvds_lfp_data_ptrs,
+                                              panel_type);
+
+       panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+       if (!panel_fixed_mode)
+               return;
+
+       fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
+
+       dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+
+       DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
+       drm_mode_debug_printmodeline(panel_fixed_mode);
+
+       fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
+                                      lvds_lfp_data_ptrs,
+                                      panel_type);
+       if (fp_timing) {
+               /* check the resolution, just to be sure */
+               if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+                   fp_timing->y_res == panel_fixed_mode->vdisplay) {
+                       dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
+                       DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
+                                     dev_priv->vbt.bios_lvds_val);
+               }
+       }
+}
+
+static void
+parse_lfp_backlight(struct drm_i915_private *dev_priv,
+                   const struct bdb_header *bdb)
+{
+       const struct bdb_lfp_backlight_data *backlight_data;
+       const struct lfp_backlight_data_entry *entry;
+       int panel_type = dev_priv->vbt.panel_type;
+
+       backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
+       if (!backlight_data)
+               return;
+
+       if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
+               DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
+                             backlight_data->entry_size);
+               return;
+       }
+
+       entry = &backlight_data->data[panel_type];
+
+       dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
+       if (!dev_priv->vbt.backlight.present) {
+               DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
+                             entry->type);
+               return;
+       }
+
+       dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+       if (bdb->version >= 191 &&
+           get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
+               const struct lfp_backlight_control_method *method;
+
+               method = &backlight_data->backlight_control[panel_type];
+               dev_priv->vbt.backlight.type = method->type;
+               dev_priv->vbt.backlight.controller = method->controller;
+       }
+
+       dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+       dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+       dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
+       DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
+                     "active %s, min brightness %u, level %u, controller %u\n",
+                     dev_priv->vbt.backlight.pwm_freq_hz,
+                     dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
+                     dev_priv->vbt.backlight.min_brightness,
+                     backlight_data->level[panel_type],
+                     dev_priv->vbt.backlight.controller);
+}
+
+/* Try to find sdvo panel data */
+static void
+parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
+                     const struct bdb_header *bdb)
+{
+       const struct bdb_sdvo_panel_dtds *dtds;
+       struct drm_display_mode *panel_fixed_mode;
+       int index;
+
+       index = i915_modparams.vbt_sdvo_panel_type;
+       if (index == -2) {
+               DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+               return;
+       }
+
+       if (index == -1) {
+               const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+
+               sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+               if (!sdvo_lvds_options)
+                       return;
+
+               index = sdvo_lvds_options->panel_type;
+       }
+
+       dtds = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+       if (!dtds)
+               return;
+
+       panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+       if (!panel_fixed_mode)
+               return;
+
+       fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]);
+
+       dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+       DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+       drm_mode_debug_printmodeline(panel_fixed_mode);
+}
+
+static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv,
+                                   bool alternate)
+{
+       switch (INTEL_GEN(dev_priv)) {
+       case 2:
+               return alternate ? 66667 : 48000;
+       case 3:
+       case 4:
+               return alternate ? 100000 : 96000;
+       default:
+               return alternate ? 100000 : 120000;
+       }
+}
+
+static void
+parse_general_features(struct drm_i915_private *dev_priv,
+                      const struct bdb_header *bdb)
+{
+       const struct bdb_general_features *general;
+
+       general = find_section(bdb, BDB_GENERAL_FEATURES);
+       if (!general)
+               return;
+
+       dev_priv->vbt.int_tv_support = general->int_tv_support;
+       /* int_crt_support can't be trusted on earlier platforms */
+       if (bdb->version >= 155 &&
+           (HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv)))
+               dev_priv->vbt.int_crt_support = general->int_crt_support;
+       dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
+       dev_priv->vbt.lvds_ssc_freq =
+               intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
+       dev_priv->vbt.display_clock_mode = general->display_clock_mode;
+       dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+       if (bdb->version >= 181) {
+               dev_priv->vbt.orientation = general->rotate_180 ?
+                       DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
+                       DRM_MODE_PANEL_ORIENTATION_NORMAL;
+       } else {
+               dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+       }
+       DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
+                     dev_priv->vbt.int_tv_support,
+                     dev_priv->vbt.int_crt_support,
+                     dev_priv->vbt.lvds_use_ssc,
+                     dev_priv->vbt.lvds_ssc_freq,
+                     dev_priv->vbt.display_clock_mode,
+                     dev_priv->vbt.fdi_rx_polarity_inverted);
+}
+
+static const struct child_device_config *
+child_device_ptr(const struct bdb_general_definitions *defs, int i)
+{
+       return (const void *) &defs->devices[i * defs->child_dev_size];
+}
+
+static void
+parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
+{
+       struct sdvo_device_mapping *mapping;
+       const struct child_device_config *child;
+       int i, count = 0;
+
+       /*
+        * Only parse SDVO mappings on gens that could have SDVO. This isn't
+        * accurate and doesn't have to be, as long as it's not too strict.
+        */
+       if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
+               DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
+               return;
+       }
+
+       for (i = 0, count = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               child = dev_priv->vbt.child_dev + i;
+
+               if (child->slave_addr != SLAVE_ADDR1 &&
+                   child->slave_addr != SLAVE_ADDR2) {
+                       /*
+                        * If the slave address is neither 0x70 nor 0x72,
+                        * it is not a SDVO device. Skip it.
+                        */
+                       continue;
+               }
+               if (child->dvo_port != DEVICE_PORT_DVOB &&
+                   child->dvo_port != DEVICE_PORT_DVOC) {
+                       /* skip the incorrect SDVO port */
+                       DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+                       continue;
+               }
+               DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+                             " %s port\n",
+                             child->slave_addr,
+                             (child->dvo_port == DEVICE_PORT_DVOB) ?
+                             "SDVOB" : "SDVOC");
+               mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
+               if (!mapping->initialized) {
+                       mapping->dvo_port = child->dvo_port;
+                       mapping->slave_addr = child->slave_addr;
+                       mapping->dvo_wiring = child->dvo_wiring;
+                       mapping->ddc_pin = child->ddc_pin;
+                       mapping->i2c_pin = child->i2c_pin;
+                       mapping->initialized = 1;
+                       DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+                                     mapping->dvo_port,
+                                     mapping->slave_addr,
+                                     mapping->dvo_wiring,
+                                     mapping->ddc_pin,
+                                     mapping->i2c_pin);
+               } else {
+                       DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+                                        "two SDVO device.\n");
+               }
+               if (child->slave2_addr) {
+                       /* Maybe this is a SDVO device with multiple inputs */
+                       /* And the mapping info is not added */
+                       DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+                               " is a SDVO device with multiple inputs.\n");
+               }
+               count++;
+       }
+
+       if (!count) {
+               /* No SDVO device info is found */
+               DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+       }
+}
+
+static void
+parse_driver_features(struct drm_i915_private *dev_priv,
+                     const struct bdb_header *bdb)
+{
+       const struct bdb_driver_features *driver;
+
+       driver = find_section(bdb, BDB_DRIVER_FEATURES);
+       if (!driver)
+               return;
+
+       if (INTEL_GEN(dev_priv) >= 5) {
+               /*
+                * Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
+                * to mean "eDP". The VBT spec doesn't agree with that
+                * interpretation, but real world VBTs seem to.
+                */
+               if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
+                       dev_priv->vbt.int_lvds_support = 0;
+       } else {
+               /*
+                * FIXME it's not clear which BDB version has the LVDS config
+                * bits defined. Revision history in the VBT spec says:
+                * "0.92 | Add two definitions for VBT value of LVDS Active
+                *  Config (00b and 11b values defined) | 06/13/2005"
+                * but does not the specify the BDB version.
+                *
+                * So far version 134 (on i945gm) is the oldest VBT observed
+                * in the wild with the bits correctly populated. Version
+                * 108 (on i85x) does not have the bits correctly populated.
+                */
+               if (bdb->version >= 134 &&
+                   driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
+                   driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
+                       dev_priv->vbt.int_lvds_support = 0;
+       }
+
+       DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+       /*
+        * If DRRS is not supported, drrs_type has to be set to 0.
+        * This is because, VBT is configured in such a way that
+        * static DRRS is 0 and DRRS not supported is represented by
+        * driver->drrs_enabled=false
+        */
+       if (!driver->drrs_enabled)
+               dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+       dev_priv->vbt.psr.enable = driver->psr_enabled;
+}
+
+static void
+parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+{
+       const struct bdb_edp *edp;
+       const struct edp_power_seq *edp_pps;
+       const struct edp_fast_link_params *edp_link_params;
+       int panel_type = dev_priv->vbt.panel_type;
+
+       edp = find_section(bdb, BDB_EDP);
+       if (!edp)
+               return;
+
+       switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+       case EDP_18BPP:
+               dev_priv->vbt.edp.bpp = 18;
+               break;
+       case EDP_24BPP:
+               dev_priv->vbt.edp.bpp = 24;
+               break;
+       case EDP_30BPP:
+               dev_priv->vbt.edp.bpp = 30;
+               break;
+       }
+
+       /* Get the eDP sequencing and link info */
+       edp_pps = &edp->power_seqs[panel_type];
+       edp_link_params = &edp->fast_link_params[panel_type];
+
+       dev_priv->vbt.edp.pps = *edp_pps;
+
+       switch (edp_link_params->rate) {
+       case EDP_RATE_1_62:
+               dev_priv->vbt.edp.rate = DP_LINK_BW_1_62;
+               break;
+       case EDP_RATE_2_7:
+               dev_priv->vbt.edp.rate = DP_LINK_BW_2_7;
+               break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
+                             edp_link_params->rate);
+               break;
+       }
+
+       switch (edp_link_params->lanes) {
+       case EDP_LANE_1:
+               dev_priv->vbt.edp.lanes = 1;
+               break;
+       case EDP_LANE_2:
+               dev_priv->vbt.edp.lanes = 2;
+               break;
+       case EDP_LANE_4:
+               dev_priv->vbt.edp.lanes = 4;
+               break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
+                             edp_link_params->lanes);
+               break;
+       }
+
+       switch (edp_link_params->preemphasis) {
+       case EDP_PREEMPHASIS_NONE:
+               dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
+               break;
+       case EDP_PREEMPHASIS_3_5dB:
+               dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
+               break;
+       case EDP_PREEMPHASIS_6dB:
+               dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
+               break;
+       case EDP_PREEMPHASIS_9_5dB:
+               dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
+               break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
+                             edp_link_params->preemphasis);
+               break;
+       }
+
+       switch (edp_link_params->vswing) {
+       case EDP_VSWING_0_4V:
+               dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+               break;
+       case EDP_VSWING_0_6V:
+               dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+               break;
+       case EDP_VSWING_0_8V:
+               dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+               break;
+       case EDP_VSWING_1_2V:
+               dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+               break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
+                             edp_link_params->vswing);
+               break;
+       }
+
+       if (bdb->version >= 173) {
+               u8 vswing;
+
+               /* Don't read from VBT if module parameter has valid value*/
+               if (i915_modparams.edp_vswing) {
+                       dev_priv->vbt.edp.low_vswing =
+                               i915_modparams.edp_vswing == 1;
+               } else {
+                       vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
+                       dev_priv->vbt.edp.low_vswing = vswing == 0;
+               }
+       }
+}
+
+static void
+parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+{
+       const struct bdb_psr *psr;
+       const struct psr_table *psr_table;
+       int panel_type = dev_priv->vbt.panel_type;
+
+       psr = find_section(bdb, BDB_PSR);
+       if (!psr) {
+               DRM_DEBUG_KMS("No PSR BDB found.\n");
+               return;
+       }
+
+       psr_table = &psr->psr_table[panel_type];
+
+       dev_priv->vbt.psr.full_link = psr_table->full_link;
+       dev_priv->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
+
+       /* Allowed VBT values goes from 0 to 15 */
+       dev_priv->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
+               psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
+
+       switch (psr_table->lines_to_wait) {
+       case 0:
+               dev_priv->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
+               break;
+       case 1:
+               dev_priv->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
+               break;
+       case 2:
+               dev_priv->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
+               break;
+       case 3:
+               dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
+               break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
+                             psr_table->lines_to_wait);
+               break;
+       }
+
+       /*
+        * New psr options 0=500us, 1=100us, 2=2500us, 3=0us
+        * Old decimal value is wake up time in multiples of 100 us.
+        */
+       if (bdb->version >= 205 &&
+           (IS_GEN9_BC(dev_priv) || IS_GEMINILAKE(dev_priv) ||
+            INTEL_GEN(dev_priv) >= 10)) {
+               switch (psr_table->tp1_wakeup_time) {
+               case 0:
+                       dev_priv->vbt.psr.tp1_wakeup_time_us = 500;
+                       break;
+               case 1:
+                       dev_priv->vbt.psr.tp1_wakeup_time_us = 100;
+                       break;
+               case 3:
+                       dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
+                       break;
+               default:
+                       DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+                                       psr_table->tp1_wakeup_time);
+                       /* fallthrough */
+               case 2:
+                       dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
+                       break;
+               }
+
+               switch (psr_table->tp2_tp3_wakeup_time) {
+               case 0:
+                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 500;
+                       break;
+               case 1:
+                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 100;
+                       break;
+               case 3:
+                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
+                       break;
+               default:
+                       DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+                                       psr_table->tp2_tp3_wakeup_time);
+                       /* fallthrough */
+               case 2:
+                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
+               break;
+               }
+       } else {
+               dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
+               dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
+       }
+
+       if (bdb->version >= 226) {
+               u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time;
+
+               wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
+               switch (wakeup_time) {
+               case 0:
+                       wakeup_time = 500;
+                       break;
+               case 1:
+                       wakeup_time = 100;
+                       break;
+               case 3:
+                       wakeup_time = 50;
+                       break;
+               default:
+               case 2:
+                       wakeup_time = 2500;
+                       break;
+               }
+               dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
+       } else {
+               /* Reusing PSR1 wakeup time for PSR2 in older VBTs */
+               dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = dev_priv->vbt.psr.tp2_tp3_wakeup_time_us;
+       }
+}
+
+static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
+                                     u16 version, enum port port)
+{
+       if (!dev_priv->vbt.dsi.config->dual_link || version < 197) {
+               dev_priv->vbt.dsi.bl_ports = BIT(port);
+               if (dev_priv->vbt.dsi.config->cabc_supported)
+                       dev_priv->vbt.dsi.cabc_ports = BIT(port);
+
+               return;
+       }
+
+       switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
+       case DL_DCS_PORT_A:
+               dev_priv->vbt.dsi.bl_ports = BIT(PORT_A);
+               break;
+       case DL_DCS_PORT_C:
+               dev_priv->vbt.dsi.bl_ports = BIT(PORT_C);
+               break;
+       default:
+       case DL_DCS_PORT_A_AND_C:
+               dev_priv->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
+               break;
+       }
+
+       if (!dev_priv->vbt.dsi.config->cabc_supported)
+               return;
+
+       switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
+       case DL_DCS_PORT_A:
+               dev_priv->vbt.dsi.cabc_ports = BIT(PORT_A);
+               break;
+       case DL_DCS_PORT_C:
+               dev_priv->vbt.dsi.cabc_ports = BIT(PORT_C);
+               break;
+       default:
+       case DL_DCS_PORT_A_AND_C:
+               dev_priv->vbt.dsi.cabc_ports =
+                                       BIT(PORT_A) | BIT(PORT_C);
+               break;
+       }
+}
+
+static void
+parse_mipi_config(struct drm_i915_private *dev_priv,
+                 const struct bdb_header *bdb)
+{
+       const struct bdb_mipi_config *start;
+       const struct mipi_config *config;
+       const struct mipi_pps_data *pps;
+       int panel_type = dev_priv->vbt.panel_type;
+       enum port port;
+
+       /* parse MIPI blocks only if LFP type is MIPI */
+       if (!intel_bios_is_dsi_present(dev_priv, &port))
+               return;
+
+       /* Initialize this to undefined indicating no generic MIPI support */
+       dev_priv->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
+
+       /* Block #40 is already parsed and panel_fixed_mode is
+        * stored in dev_priv->lfp_lvds_vbt_mode
+        * resuse this when needed
+        */
+
+       /* Parse #52 for panel index used from panel_type already
+        * parsed
+        */
+       start = find_section(bdb, BDB_MIPI_CONFIG);
+       if (!start) {
+               DRM_DEBUG_KMS("No MIPI config BDB found");
+               return;
+       }
+
+       DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n",
+                                                               panel_type);
+
+       /*
+        * get hold of the correct configuration block and pps data as per
+        * the panel_type as index
+        */
+       config = &start->config[panel_type];
+       pps = &start->pps[panel_type];
+
+       /* store as of now full data. Trim when we realise all is not needed */
+       dev_priv->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
+       if (!dev_priv->vbt.dsi.config)
+               return;
+
+       dev_priv->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
+       if (!dev_priv->vbt.dsi.pps) {
+               kfree(dev_priv->vbt.dsi.config);
+               return;
+       }
+
+       parse_dsi_backlight_ports(dev_priv, bdb->version, port);
+
+       /* FIXME is the 90 vs. 270 correct? */
+       switch (config->rotation) {
+       case ENABLE_ROTATION_0:
+               /*
+                * Most (all?) VBTs claim 0 degrees despite having
+                * an upside down panel, thus we do not trust this.
+                */
+               dev_priv->vbt.dsi.orientation =
+                       DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+               break;
+       case ENABLE_ROTATION_90:
+               dev_priv->vbt.dsi.orientation =
+                       DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
+               break;
+       case ENABLE_ROTATION_180:
+               dev_priv->vbt.dsi.orientation =
+                       DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+               break;
+       case ENABLE_ROTATION_270:
+               dev_priv->vbt.dsi.orientation =
+                       DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
+               break;
+       }
+
+       /* We have mandatory mipi config blocks. Initialize as generic panel */
+       dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+}
+
+/* Find the sequence block and size for the given panel. */
+static const u8 *
+find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
+                         u16 panel_id, u32 *seq_size)
+{
+       u32 total = get_blocksize(sequence);
+       const u8 *data = &sequence->data[0];
+       u8 current_id;
+       u32 current_size;
+       int header_size = sequence->version >= 3 ? 5 : 3;
+       int index = 0;
+       int i;
+
+       /* skip new block size */
+       if (sequence->version >= 3)
+               data += 4;
+
+       for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
+               if (index + header_size > total) {
+                       DRM_ERROR("Invalid sequence block (header)\n");
+                       return NULL;
+               }
+
+               current_id = *(data + index);
+               if (sequence->version >= 3)
+                       current_size = *((const u32 *)(data + index + 1));
+               else
+                       current_size = *((const u16 *)(data + index + 1));
+
+               index += header_size;
+
+               if (index + current_size > total) {
+                       DRM_ERROR("Invalid sequence block\n");
+                       return NULL;
+               }
+
+               if (current_id == panel_id) {
+                       *seq_size = current_size;
+                       return data + index;
+               }
+
+               index += current_size;
+       }
+
+       DRM_ERROR("Sequence block detected but no valid configuration\n");
+
+       return NULL;
+}
+
+static int goto_next_sequence(const u8 *data, int index, int total)
+{
+       u16 len;
+
+       /* Skip Sequence Byte. */
+       for (index = index + 1; index < total; index += len) {
+               u8 operation_byte = *(data + index);
+               index++;
+
+               switch (operation_byte) {
+               case MIPI_SEQ_ELEM_END:
+                       return index;
+               case MIPI_SEQ_ELEM_SEND_PKT:
+                       if (index + 4 > total)
+                               return 0;
+
+                       len = *((const u16 *)(data + index + 2)) + 4;
+                       break;
+               case MIPI_SEQ_ELEM_DELAY:
+                       len = 4;
+                       break;
+               case MIPI_SEQ_ELEM_GPIO:
+                       len = 2;
+                       break;
+               case MIPI_SEQ_ELEM_I2C:
+                       if (index + 7 > total)
+                               return 0;
+                       len = *(data + index + 6) + 7;
+                       break;
+               default:
+                       DRM_ERROR("Unknown operation byte\n");
+                       return 0;
+               }
+       }
+
+       return 0;
+}
+
+static int goto_next_sequence_v3(const u8 *data, int index, int total)
+{
+       int seq_end;
+       u16 len;
+       u32 size_of_sequence;
+
+       /*
+        * Could skip sequence based on Size of Sequence alone, but also do some
+        * checking on the structure.
+        */
+       if (total < 5) {
+               DRM_ERROR("Too small sequence size\n");
+               return 0;
+       }
+
+       /* Skip Sequence Byte. */
+       index++;
+
+       /*
+        * Size of Sequence. Excludes the Sequence Byte and the size itself,
+        * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
+        * byte.
+        */
+       size_of_sequence = *((const u32 *)(data + index));
+       index += 4;
+
+       seq_end = index + size_of_sequence;
+       if (seq_end > total) {
+               DRM_ERROR("Invalid sequence size\n");
+               return 0;
+       }
+
+       for (; index < total; index += len) {
+               u8 operation_byte = *(data + index);
+               index++;
+
+               if (operation_byte == MIPI_SEQ_ELEM_END) {
+                       if (index != seq_end) {
+                               DRM_ERROR("Invalid element structure\n");
+                               return 0;
+                       }
+                       return index;
+               }
+
+               len = *(data + index);
+               index++;
+
+               /*
+                * FIXME: Would be nice to check elements like for v1/v2 in
+                * goto_next_sequence() above.
+                */
+               switch (operation_byte) {
+               case MIPI_SEQ_ELEM_SEND_PKT:
+               case MIPI_SEQ_ELEM_DELAY:
+               case MIPI_SEQ_ELEM_GPIO:
+               case MIPI_SEQ_ELEM_I2C:
+               case MIPI_SEQ_ELEM_SPI:
+               case MIPI_SEQ_ELEM_PMIC:
+                       break;
+               default:
+                       DRM_ERROR("Unknown operation byte %u\n",
+                                 operation_byte);
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
+ * skip all delay + gpio operands and stop at the first DSI packet op.
+ */
+static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
+{
+       const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+       int index, len;
+
+       if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
+               return 0;
+
+       /* index = 1 to skip sequence byte */
+       for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) {
+               switch (data[index]) {
+               case MIPI_SEQ_ELEM_SEND_PKT:
+                       return index == 1 ? 0 : index;
+               case MIPI_SEQ_ELEM_DELAY:
+                       len = 5; /* 1 byte for operand + uint32 */
+                       break;
+               case MIPI_SEQ_ELEM_GPIO:
+                       len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */
+                       break;
+               default:
+                       return 0;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
+ * The deassert must be done before calling intel_dsi_device_ready, so for
+ * these devices we split the init OTP sequence into a deassert sequence and
+ * the actual init OTP part.
+ */
+static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
+{
+       u8 *init_otp;
+       int len;
+
+       /* Limit this to VLV for now. */
+       if (!IS_VALLEYVIEW(dev_priv))
+               return;
+
+       /* Limit this to v1 vid-mode sequences */
+       if (dev_priv->vbt.dsi.config->is_cmd_mode ||
+           dev_priv->vbt.dsi.seq_version != 1)
+               return;
+
+       /* Only do this if there are otp and assert seqs and no deassert seq */
+       if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
+           !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
+           dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
+               return;
+
+       /* The deassert-sequence ends at the first DSI packet */
+       len = get_init_otp_deassert_fragment_len(dev_priv);
+       if (!len)
+               return;
+
+       DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
+
+       /* Copy the fragment, update seq byte and terminate it */
+       init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+       dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
+       if (!dev_priv->vbt.dsi.deassert_seq)
+               return;
+       dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
+       dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
+       /* Use the copy for deassert */
+       dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
+               dev_priv->vbt.dsi.deassert_seq;
+       /* Replace the last byte of the fragment with init OTP seq byte */
+       init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
+       /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
+       dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
+}
+
+static void
+parse_mipi_sequence(struct drm_i915_private *dev_priv,
+                   const struct bdb_header *bdb)
+{
+       int panel_type = dev_priv->vbt.panel_type;
+       const struct bdb_mipi_sequence *sequence;
+       const u8 *seq_data;
+       u32 seq_size;
+       u8 *data;
+       int index = 0;
+
+       /* Only our generic panel driver uses the sequence block. */
+       if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
+               return;
+
+       sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
+       if (!sequence) {
+               DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
+               return;
+       }
+
+       /* Fail gracefully for forward incompatible sequence block. */
+       if (sequence->version >= 4) {
+               DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n",
+                         sequence->version);
+               return;
+       }
+
+       DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version);
+
+       seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
+       if (!seq_data)
+               return;
+
+       data = kmemdup(seq_data, seq_size, GFP_KERNEL);
+       if (!data)
+               return;
+
+       /* Parse the sequences, store pointers to each sequence. */
+       for (;;) {
+               u8 seq_id = *(data + index);
+               if (seq_id == MIPI_SEQ_END)
+                       break;
+
+               if (seq_id >= MIPI_SEQ_MAX) {
+                       DRM_ERROR("Unknown sequence %u\n", seq_id);
+                       goto err;
+               }
+
+               /* Log about presence of sequences we won't run. */
+               if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
+                       DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id);
+
+               dev_priv->vbt.dsi.sequence[seq_id] = data + index;
+
+               if (sequence->version >= 3)
+                       index = goto_next_sequence_v3(data, index, seq_size);
+               else
+                       index = goto_next_sequence(data, index, seq_size);
+               if (!index) {
+                       DRM_ERROR("Invalid sequence %u\n", seq_id);
+                       goto err;
+               }
+       }
+
+       dev_priv->vbt.dsi.data = data;
+       dev_priv->vbt.dsi.size = seq_size;
+       dev_priv->vbt.dsi.seq_version = sequence->version;
+
+       fixup_mipi_sequences(dev_priv);
+
+       DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
+       return;
+
+err:
+       kfree(data);
+       memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
+}
+
+static u8 translate_iboost(u8 val)
+{
+       static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
+
+       if (val >= ARRAY_SIZE(mapping)) {
+               DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
+               return 0;
+       }
+       return mapping[val];
+}
+
+static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
+{
+       const struct ddi_vbt_port_info *info;
+       enum port port;
+
+       for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+               info = &i915->vbt.ddi_port_info[port];
+
+               if (info->child && ddc_pin == info->alternate_ddc_pin)
+                       return port;
+       }
+
+       return PORT_NONE;
+}
+
+static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
+                            enum port port)
+{
+       struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+       enum port p;
+
+       if (!info->alternate_ddc_pin)
+               return;
+
+       p = get_port_by_ddc_pin(dev_priv, info->alternate_ddc_pin);
+       if (p != PORT_NONE) {
+               DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
+                             "disabling port %c DVI/HDMI support\n",
+                             port_name(port), info->alternate_ddc_pin,
+                             port_name(p), port_name(port));
+
+               /*
+                * If we have multiple ports supposedly sharing the
+                * pin, then dvi/hdmi couldn't exist on the shared
+                * port. Otherwise they share the same ddc bin and
+                * system couldn't communicate with them separately.
+                *
+                * Give child device order the priority, first come first
+                * served.
+                */
+               info->supports_dvi = false;
+               info->supports_hdmi = false;
+               info->alternate_ddc_pin = 0;
+       }
+}
+
+static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
+{
+       const struct ddi_vbt_port_info *info;
+       enum port port;
+
+       for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+               info = &i915->vbt.ddi_port_info[port];
+
+               if (info->child && aux_ch == info->alternate_aux_channel)
+                       return port;
+       }
+
+       return PORT_NONE;
+}
+
+static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
+                           enum port port)
+{
+       struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+       enum port p;
+
+       if (!info->alternate_aux_channel)
+               return;
+
+       p = get_port_by_aux_ch(dev_priv, info->alternate_aux_channel);
+       if (p != PORT_NONE) {
+               DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
+                             "disabling port %c DP support\n",
+                             port_name(port), info->alternate_aux_channel,
+                             port_name(p), port_name(port));
+
+               /*
+                * If we have multiple ports supposedlt sharing the
+                * aux channel, then DP couldn't exist on the shared
+                * port. Otherwise they share the same aux channel
+                * and system couldn't communicate with them separately.
+                *
+                * Give child device order the priority, first come first
+                * served.
+                */
+               info->supports_dp = false;
+               info->alternate_aux_channel = 0;
+       }
+}
+
+static const u8 cnp_ddc_pin_map[] = {
+       [0] = 0, /* N/A */
+       [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
+       [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
+       [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
+       [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
+};
+
+static const u8 icp_ddc_pin_map[] = {
+       [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+       [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+       [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
+       [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
+       [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
+       [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
+};
+
+static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
+{
+       const u8 *ddc_pin_map;
+       int n_entries;
+
+       if (HAS_PCH_ICP(dev_priv)) {
+               ddc_pin_map = icp_ddc_pin_map;
+               n_entries = ARRAY_SIZE(icp_ddc_pin_map);
+       } else if (HAS_PCH_CNP(dev_priv)) {
+               ddc_pin_map = cnp_ddc_pin_map;
+               n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
+       } else {
+               /* Assuming direct map */
+               return vbt_pin;
+       }
+
+       if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
+               return ddc_pin_map[vbt_pin];
+
+       DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+                     vbt_pin);
+       return 0;
+}
+
+static enum port dvo_port_to_port(u8 dvo_port)
+{
+       /*
+        * Each DDI port can have more than one value on the "DVO Port" field,
+        * so look for all the possible values for each port.
+        */
+       static const int dvo_ports[][3] = {
+               [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
+               [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
+               [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
+               [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
+               [PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
+               [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
+       };
+       enum port port;
+       int i;
+
+       for (port = PORT_A; port < ARRAY_SIZE(dvo_ports); port++) {
+               for (i = 0; i < ARRAY_SIZE(dvo_ports[port]); i++) {
+                       if (dvo_ports[port][i] == -1)
+                               break;
+
+                       if (dvo_port == dvo_ports[port][i])
+                               return port;
+               }
+       }
+
+       return PORT_NONE;
+}
+
+static void parse_ddi_port(struct drm_i915_private *dev_priv,
+                          const struct child_device_config *child,
+                          u8 bdb_version)
+{
+       struct ddi_vbt_port_info *info;
+       bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+       enum port port;
+
+       port = dvo_port_to_port(child->dvo_port);
+       if (port == PORT_NONE)
+               return;
+
+       info = &dev_priv->vbt.ddi_port_info[port];
+
+       if (info->child) {
+               DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
+                             port_name(port));
+               return;
+       }
+
+       is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+       is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+       is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT;
+       is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
+       is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+
+       if (port == PORT_A && is_dvi) {
+               DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
+                             is_hdmi ? "/HDMI" : "");
+               is_dvi = false;
+               is_hdmi = false;
+       }
+
+       info->supports_dvi = is_dvi;
+       info->supports_hdmi = is_hdmi;
+       info->supports_dp = is_dp;
+       info->supports_edp = is_edp;
+
+       if (bdb_version >= 195)
+               info->supports_typec_usb = child->dp_usb_type_c;
+
+       if (bdb_version >= 209)
+               info->supports_tbt = child->tbt;
+
+       DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d\n",
+                     port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
+                     HAS_LSPCON(dev_priv) && child->lspcon,
+                     info->supports_typec_usb, info->supports_tbt);
+
+       if (is_edp && is_dvi)
+               DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
+                             port_name(port));
+       if (is_crt && port != PORT_E)
+               DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
+       if (is_crt && (is_dvi || is_dp))
+               DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
+                             port_name(port));
+       if (is_dvi && (port == PORT_A || port == PORT_E))
+               DRM_DEBUG_KMS("Port %c is TMDS compatible\n", port_name(port));
+       if (!is_dvi && !is_dp && !is_crt)
+               DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
+                             port_name(port));
+       if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
+               DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+
+       if (is_dvi) {
+               u8 ddc_pin;
+
+               ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
+               if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
+                       info->alternate_ddc_pin = ddc_pin;
+                       sanitize_ddc_pin(dev_priv, port);
+               } else {
+                       DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
+                                     "sticking to defaults\n",
+                                     port_name(port), ddc_pin);
+               }
+       }
+
+       if (is_dp) {
+               info->alternate_aux_channel = child->aux_channel;
+
+               sanitize_aux_ch(dev_priv, port);
+       }
+
+       if (bdb_version >= 158) {
+               /* The VBT HDMI level shift values match the table we have. */
+               u8 hdmi_level_shift = child->hdmi_level_shifter_value;
+               DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
+                             port_name(port),
+                             hdmi_level_shift);
+               info->hdmi_level_shift = hdmi_level_shift;
+       }
+
+       if (bdb_version >= 204) {
+               int max_tmds_clock;
+
+               switch (child->hdmi_max_data_rate) {
+               default:
+                       MISSING_CASE(child->hdmi_max_data_rate);
+                       /* fall through */
+               case HDMI_MAX_DATA_RATE_PLATFORM:
+                       max_tmds_clock = 0;
+                       break;
+               case HDMI_MAX_DATA_RATE_297:
+                       max_tmds_clock = 297000;
+                       break;
+               case HDMI_MAX_DATA_RATE_165:
+                       max_tmds_clock = 165000;
+                       break;
+               }
+
+               if (max_tmds_clock)
+                       DRM_DEBUG_KMS("VBT HDMI max TMDS clock for port %c: %d kHz\n",
+                                     port_name(port), max_tmds_clock);
+               info->max_tmds_clock = max_tmds_clock;
+       }
+
+       /* Parse the I_boost config for SKL and above */
+       if (bdb_version >= 196 && child->iboost) {
+               info->dp_boost_level = translate_iboost(child->dp_iboost_level);
+               DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
+                             port_name(port), info->dp_boost_level);
+               info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level);
+               DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
+                             port_name(port), info->hdmi_boost_level);
+       }
+
+       /* DP max link rate for CNL+ */
+       if (bdb_version >= 216) {
+               switch (child->dp_max_link_rate) {
+               default:
+               case VBT_DP_MAX_LINK_RATE_HBR3:
+                       info->dp_max_link_rate = 810000;
+                       break;
+               case VBT_DP_MAX_LINK_RATE_HBR2:
+                       info->dp_max_link_rate = 540000;
+                       break;
+               case VBT_DP_MAX_LINK_RATE_HBR:
+                       info->dp_max_link_rate = 270000;
+                       break;
+               case VBT_DP_MAX_LINK_RATE_LBR:
+                       info->dp_max_link_rate = 162000;
+                       break;
+               }
+               DRM_DEBUG_KMS("VBT DP max link rate for port %c: %d\n",
+                             port_name(port), info->dp_max_link_rate);
+       }
+
+       info->child = child;
+}
+
+static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
+{
+       const struct child_device_config *child;
+       int i;
+
+       if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+               return;
+
+       if (bdb_version < 155)
+               return;
+
+       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               child = dev_priv->vbt.child_dev + i;
+
+               parse_ddi_port(dev_priv, child, bdb_version);
+       }
+}
+
+static void
+parse_general_definitions(struct drm_i915_private *dev_priv,
+                         const struct bdb_header *bdb)
+{
+       const struct bdb_general_definitions *defs;
+       const struct child_device_config *child;
+       int i, child_device_num, count;
+       u8 expected_size;
+       u16 block_size;
+       int bus_pin;
+
+       defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+       if (!defs) {
+               DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+               return;
+       }
+
+       block_size = get_blocksize(defs);
+       if (block_size < sizeof(*defs)) {
+               DRM_DEBUG_KMS("General definitions block too small (%u)\n",
+                             block_size);
+               return;
+       }
+
+       bus_pin = defs->crt_ddc_gmbus_pin;
+       DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+       if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
+               dev_priv->vbt.crt_ddc_pin = bus_pin;
+
+       if (bdb->version < 106) {
+               expected_size = 22;
+       } else if (bdb->version < 111) {
+               expected_size = 27;
+       } else if (bdb->version < 195) {
+               expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
+       } else if (bdb->version == 195) {
+               expected_size = 37;
+       } else if (bdb->version <= 215) {
+               expected_size = 38;
+       } else if (bdb->version <= 216) {
+               expected_size = 39;
+       } else {
+               expected_size = sizeof(*child);
+               BUILD_BUG_ON(sizeof(*child) < 39);
+               DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
+                                bdb->version, expected_size);
+       }
+
+       /* Flag an error for unexpected size, but continue anyway. */
+       if (defs->child_dev_size != expected_size)
+               DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
+                         defs->child_dev_size, expected_size, bdb->version);
+
+       /* The legacy sized child device config is the minimum we need. */
+       if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
+               DRM_DEBUG_KMS("Child device config size %u is too small.\n",
+                             defs->child_dev_size);
+               return;
+       }
+
+       /* get the number of child device */
+       child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
+       count = 0;
+       /* get the number of child device that is present */
+       for (i = 0; i < child_device_num; i++) {
+               child = child_device_ptr(defs, i);
+               if (!child->device_type)
+                       continue;
+               count++;
+       }
+       if (!count) {
+               DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+               return;
+       }
+       dev_priv->vbt.child_dev = kcalloc(count, sizeof(*child), GFP_KERNEL);
+       if (!dev_priv->vbt.child_dev) {
+               DRM_DEBUG_KMS("No memory space for child device\n");
+               return;
+       }
+
+       dev_priv->vbt.child_dev_num = count;
+       count = 0;
+       for (i = 0; i < child_device_num; i++) {
+               child = child_device_ptr(defs, i);
+               if (!child->device_type)
+                       continue;
+
+               /*
+                * Copy as much as we know (sizeof) and is available
+                * (child_dev_size) of the child device. Accessing the data must
+                * depend on VBT version.
+                */
+               memcpy(dev_priv->vbt.child_dev + count, child,
+                      min_t(size_t, defs->child_dev_size, sizeof(*child)));
+               count++;
+       }
+}
+
+/* Common defaults which may be overridden by VBT. */
+static void
+init_vbt_defaults(struct drm_i915_private *dev_priv)
+{
+       enum port port;
+
+       dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
+
+       /* Default to having backlight */
+       dev_priv->vbt.backlight.present = true;
+
+       /* LFP panel data */
+       dev_priv->vbt.lvds_dither = 1;
+
+       /* SDVO panel data */
+       dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+
+       /* general features */
+       dev_priv->vbt.int_tv_support = 1;
+       dev_priv->vbt.int_crt_support = 1;
+
+       /* driver features */
+       dev_priv->vbt.int_lvds_support = 1;
+
+       /* Default to using SSC */
+       dev_priv->vbt.lvds_use_ssc = 1;
+       /*
+        * Core/SandyBridge/IvyBridge use alternative (120MHz) reference
+        * clock for LVDS.
+        */
+       dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
+                       !HAS_PCH_SPLIT(dev_priv));
+       DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
+
+       for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+               struct ddi_vbt_port_info *info =
+                       &dev_priv->vbt.ddi_port_info[port];
+
+               info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
+       }
+}
+
+/* Defaults to initialize only if there is no VBT. */
+static void
+init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
+{
+       enum port port;
+
+       for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+               struct ddi_vbt_port_info *info =
+                       &dev_priv->vbt.ddi_port_info[port];
+
+               /*
+                * VBT has the TypeC mode (native,TBT/USB) and we don't want
+                * to detect it.
+                */
+               if (intel_port_is_tc(dev_priv, port))
+                       continue;
+
+               info->supports_dvi = (port != PORT_A && port != PORT_E);
+               info->supports_hdmi = info->supports_dvi;
+               info->supports_dp = (port != PORT_E);
+               info->supports_edp = (port == PORT_A);
+       }
+}
+
+static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
+{
+       const void *_vbt = vbt;
+
+       return _vbt + vbt->bdb_offset;
+}
+
+/**
+ * intel_bios_is_valid_vbt - does the given buffer contain a valid VBT
+ * @buf:       pointer to a buffer to validate
+ * @size:      size of the buffer
+ *
+ * Returns true on valid VBT.
+ */
+bool intel_bios_is_valid_vbt(const void *buf, size_t size)
+{
+       const struct vbt_header *vbt = buf;
+       const struct bdb_header *bdb;
+
+       if (!vbt)
+               return false;
+
+       if (sizeof(struct vbt_header) > size) {
+               DRM_DEBUG_DRIVER("VBT header incomplete\n");
+               return false;
+       }
+
+       if (memcmp(vbt->signature, "$VBT", 4)) {
+               DRM_DEBUG_DRIVER("VBT invalid signature\n");
+               return false;
+       }
+
+       if (range_overflows_t(size_t,
+                             vbt->bdb_offset,
+                             sizeof(struct bdb_header),
+                             size)) {
+               DRM_DEBUG_DRIVER("BDB header incomplete\n");
+               return false;
+       }
+
+       bdb = get_bdb_header(vbt);
+       if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) {
+               DRM_DEBUG_DRIVER("BDB incomplete\n");
+               return false;
+       }
+
+       return vbt;
+}
+
+static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
+{
+       size_t i;
+
+       /* Scour memory looking for the VBT signature. */
+       for (i = 0; i + 4 < size; i++) {
+               void *vbt;
+
+               if (ioread32(bios + i) != *((const u32 *) "$VBT"))
+                       continue;
+
+               /*
+                * This is the one place where we explicitly discard the address
+                * space (__iomem) of the BIOS/VBT.
+                */
+               vbt = (void __force *) bios + i;
+               if (intel_bios_is_valid_vbt(vbt, size - i))
+                       return vbt;
+
+               break;
+       }
+
+       return NULL;
+}
+
+/**
+ * intel_bios_init - find VBT and initialize settings from the BIOS
+ * @dev_priv: i915 device instance
+ *
+ * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
+ * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
+ * initialize some defaults if the VBT is not present at all.
+ */
+void intel_bios_init(struct drm_i915_private *dev_priv)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       const struct vbt_header *vbt = dev_priv->opregion.vbt;
+       const struct bdb_header *bdb;
+       u8 __iomem *bios = NULL;
+
+       if (!HAS_DISPLAY(dev_priv)) {
+               DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
+               return;
+       }
+
+       init_vbt_defaults(dev_priv);
+
+       /* If the OpRegion does not have VBT, look in PCI ROM. */
+       if (!vbt) {
+               size_t size;
+
+               bios = pci_map_rom(pdev, &size);
+               if (!bios)
+                       goto out;
+
+               vbt = find_vbt(bios, size);
+               if (!vbt)
+                       goto out;
+
+               DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
+       }
+
+       bdb = get_bdb_header(vbt);
+
+       DRM_DEBUG_KMS("VBT signature \"%.*s\", BDB version %d\n",
+                     (int)sizeof(vbt->signature), vbt->signature, bdb->version);
+
+       /* Grab useful general definitions */
+       parse_general_features(dev_priv, bdb);
+       parse_general_definitions(dev_priv, bdb);
+       parse_lfp_panel_data(dev_priv, bdb);
+       parse_lfp_backlight(dev_priv, bdb);
+       parse_sdvo_panel_data(dev_priv, bdb);
+       parse_driver_features(dev_priv, bdb);
+       parse_edp(dev_priv, bdb);
+       parse_psr(dev_priv, bdb);
+       parse_mipi_config(dev_priv, bdb);
+       parse_mipi_sequence(dev_priv, bdb);
+
+       /* Further processing on pre-parsed data */
+       parse_sdvo_device_mapping(dev_priv, bdb->version);
+       parse_ddi_ports(dev_priv, bdb->version);
+
+out:
+       if (!vbt) {
+               DRM_INFO("Failed to find VBIOS tables (VBT)\n");
+               init_vbt_missing_defaults(dev_priv);
+       }
+
+       if (bios)
+               pci_unmap_rom(pdev, bios);
+}
+
+/**
+ * intel_bios_cleanup - Free any resources allocated by intel_bios_init()
+ * @dev_priv: i915 device instance
+ */
+void intel_bios_cleanup(struct drm_i915_private *dev_priv)
+{
+       kfree(dev_priv->vbt.child_dev);
+       dev_priv->vbt.child_dev = NULL;
+       dev_priv->vbt.child_dev_num = 0;
+       kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
+       dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+       kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
+       dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+       kfree(dev_priv->vbt.dsi.data);
+       dev_priv->vbt.dsi.data = NULL;
+       kfree(dev_priv->vbt.dsi.pps);
+       dev_priv->vbt.dsi.pps = NULL;
+       kfree(dev_priv->vbt.dsi.config);
+       dev_priv->vbt.dsi.config = NULL;
+       kfree(dev_priv->vbt.dsi.deassert_seq);
+       dev_priv->vbt.dsi.deassert_seq = NULL;
+}
+
+/**
+ * intel_bios_is_tv_present - is integrated TV present in VBT
+ * @dev_priv:  i915 device instance
+ *
+ * Return true if TV is present. If no child devices were parsed from VBT,
+ * assume TV is present.
+ */
+bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
+{
+       const struct child_device_config *child;
+       int i;
+
+       if (!dev_priv->vbt.int_tv_support)
+               return false;
+
+       if (!dev_priv->vbt.child_dev_num)
+               return true;
+
+       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               child = dev_priv->vbt.child_dev + i;
+               /*
+                * If the device type is not TV, continue.
+                */
+               switch (child->device_type) {
+               case DEVICE_TYPE_INT_TV:
+               case DEVICE_TYPE_TV:
+               case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
+                       break;
+               default:
+                       continue;
+               }
+               /* Only when the addin_offset is non-zero, it is regarded
+                * as present.
+                */
+               if (child->addin_offset)
+                       return true;
+       }
+
+       return false;
+}
+
+/**
+ * intel_bios_is_lvds_present - is LVDS present in VBT
+ * @dev_priv:  i915 device instance
+ * @i2c_pin:   i2c pin for LVDS if present
+ *
+ * Return true if LVDS is present. If no child devices were parsed from VBT,
+ * assume LVDS is present.
+ */
+bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
+{
+       const struct child_device_config *child;
+       int i;
+
+       if (!dev_priv->vbt.child_dev_num)
+               return true;
+
+       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               child = dev_priv->vbt.child_dev + i;
+
+               /* If the device type is not LFP, continue.
+                * We have to check both the new identifiers as well as the
+                * old for compatibility with some BIOSes.
+                */
+               if (child->device_type != DEVICE_TYPE_INT_LFP &&
+                   child->device_type != DEVICE_TYPE_LFP)
+                       continue;
+
+               if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
+                       *i2c_pin = child->i2c_pin;
+
+               /* However, we cannot trust the BIOS writers to populate
+                * the VBT correctly.  Since LVDS requires additional
+                * information from AIM blocks, a non-zero addin offset is
+                * a good indicator that the LVDS is actually present.
+                */
+               if (child->addin_offset)
+                       return true;
+
+               /* But even then some BIOS writers perform some black magic
+                * and instantiate the device without reference to any
+                * additional data.  Trust that if the VBT was written into
+                * the OpRegion then they have validated the LVDS's existence.
+                */
+               if (dev_priv->opregion.vbt)
+                       return true;
+       }
+
+       return false;
+}
+
+/**
+ * intel_bios_is_port_present - is the specified digital port present
+ * @dev_priv:  i915 device instance
+ * @port:      port to check
+ *
+ * Return true if the device in %port is present.
+ */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
+{
+       const struct child_device_config *child;
+       static const struct {
+               u16 dp, hdmi;
+       } port_mapping[] = {
+               [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+               [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+               [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+               [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+               [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
+       };
+       int i;
+
+       if (HAS_DDI(dev_priv)) {
+               const struct ddi_vbt_port_info *port_info =
+                       &dev_priv->vbt.ddi_port_info[port];
+
+               return port_info->supports_dp ||
+                      port_info->supports_dvi ||
+                      port_info->supports_hdmi;
+       }
+
+       /* FIXME maybe deal with port A as well? */
+       if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+               return false;
+
+       if (!dev_priv->vbt.child_dev_num)
+               return false;
+
+       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               child = dev_priv->vbt.child_dev + i;
+
+               if ((child->dvo_port == port_mapping[port].dp ||
+                    child->dvo_port == port_mapping[port].hdmi) &&
+                   (child->device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
+                                          DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
+                       return true;
+       }
+
+       return false;
+}
+
+/**
+ * intel_bios_is_port_edp - is the device in given port eDP
+ * @dev_priv:  i915 device instance
+ * @port:      port to check
+ *
+ * Return true if the device in %port is eDP.
+ */
+bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+{
+       const struct child_device_config *child;
+       static const short port_mapping[] = {
+               [PORT_B] = DVO_PORT_DPB,
+               [PORT_C] = DVO_PORT_DPC,
+               [PORT_D] = DVO_PORT_DPD,
+               [PORT_E] = DVO_PORT_DPE,
+               [PORT_F] = DVO_PORT_DPF,
+       };
+       int i;
+
+       if (HAS_DDI(dev_priv))
+               return dev_priv->vbt.ddi_port_info[port].supports_edp;
+
+       if (!dev_priv->vbt.child_dev_num)
+               return false;
+
+       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               child = dev_priv->vbt.child_dev + i;
+
+               if (child->dvo_port == port_mapping[port] &&
+                   (child->device_type & DEVICE_TYPE_eDP_BITS) ==
+                   (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
+                       return true;
+       }
+
+       return false;
+}
+
+static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
+                                     enum port port)
+{
+       static const struct {
+               u16 dp, hdmi;
+       } port_mapping[] = {
+               /*
+                * Buggy VBTs may declare DP ports as having
+                * HDMI type dvo_port :( So let's check both.
+                */
+               [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+               [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+               [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+               [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+               [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
+       };
+
+       if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
+               return false;
+
+       if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
+           (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+               return false;
+
+       if (child->dvo_port == port_mapping[port].dp)
+               return true;
+
+       /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
+       if (child->dvo_port == port_mapping[port].hdmi &&
+           child->aux_channel != 0)
+               return true;
+
+       return false;
+}
+
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
+                                    enum port port)
+{
+       const struct child_device_config *child;
+       int i;
+
+       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               child = dev_priv->vbt.child_dev + i;
+
+               if (child_dev_is_dp_dual_mode(child, port))
+                       return true;
+       }
+
+       return false;
+}
+
+/**
+ * intel_bios_is_dsi_present - is DSI present in VBT
+ * @dev_priv:  i915 device instance
+ * @port:      port for DSI if present
+ *
+ * Return true if DSI is present, and return the port in %port.
+ */
+bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
+                              enum port *port)
+{
+       const struct child_device_config *child;
+       u8 dvo_port;
+       int i;
+
+       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               child = dev_priv->vbt.child_dev + i;
+
+               if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
+                       continue;
+
+               dvo_port = child->dvo_port;
+
+               if (dvo_port == DVO_PORT_MIPIA ||
+                   (dvo_port == DVO_PORT_MIPIB && INTEL_GEN(dev_priv) >= 11) ||
+                   (dvo_port == DVO_PORT_MIPIC && INTEL_GEN(dev_priv) < 11)) {
+                       if (port)
+                               *port = dvo_port - DVO_PORT_MIPIA;
+                       return true;
+               } else if (dvo_port == DVO_PORT_MIPIB ||
+                          dvo_port == DVO_PORT_MIPIC ||
+                          dvo_port == DVO_PORT_MIPID) {
+                       DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
+                                     port_name(dvo_port - DVO_PORT_MIPIA));
+               }
+       }
+
+       return false;
+}
+
+/**
+ * intel_bios_is_port_hpd_inverted - is HPD inverted for %port
+ * @i915:      i915 device instance
+ * @port:      port to check
+ *
+ * Return true if HPD should be inverted for %port.
+ */
+bool
+intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
+                               enum port port)
+{
+       const struct child_device_config *child =
+               i915->vbt.ddi_port_info[port].child;
+
+       if (WARN_ON_ONCE(!IS_GEN9_LP(i915)))
+               return false;
+
+       return child && child->hpd_invert;
+}
+
+/**
+ * intel_bios_is_lspcon_present - if LSPCON is attached on %port
+ * @i915:      i915 device instance
+ * @port:      port to check
+ *
+ * Return true if LSPCON is present on this port
+ */
+bool
+intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
+                            enum port port)
+{
+       const struct child_device_config *child =
+               i915->vbt.ddi_port_info[port].child;
+
+       return HAS_LSPCON(i915) && child && child->lspcon;
+}
+
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
+                                  enum port port)
+{
+       const struct ddi_vbt_port_info *info =
+               &dev_priv->vbt.ddi_port_info[port];
+       enum aux_ch aux_ch;
+
+       if (!info->alternate_aux_channel) {
+               aux_ch = (enum aux_ch)port;
+
+               DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
+                             aux_ch_name(aux_ch), port_name(port));
+               return aux_ch;
+       }
+
+       switch (info->alternate_aux_channel) {
+       case DP_AUX_A:
+               aux_ch = AUX_CH_A;
+               break;
+       case DP_AUX_B:
+               aux_ch = AUX_CH_B;
+               break;
+       case DP_AUX_C:
+               aux_ch = AUX_CH_C;
+               break;
+       case DP_AUX_D:
+               aux_ch = AUX_CH_D;
+               break;
+       case DP_AUX_E:
+               aux_ch = AUX_CH_E;
+               break;
+       case DP_AUX_F:
+               aux_ch = AUX_CH_F;
+               break;
+       default:
+               MISSING_CASE(info->alternate_aux_channel);
+               aux_ch = AUX_CH_A;
+               break;
+       }
+
+       DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
+                     aux_ch_name(aux_ch), port_name(port));
+
+       return aux_ch;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
new file mode 100644 (file)
index 0000000..4e42cfa
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * Please use intel_vbt_defs.h for VBT private data, to hide and abstract away
+ * the VBT from the rest of the driver. Add the parsed, clean data to struct
+ * intel_vbt_data within struct drm_i915_private.
+ */
+
+#ifndef _INTEL_BIOS_H_
+#define _INTEL_BIOS_H_
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_i915_private;
+
+enum intel_backlight_type {
+       INTEL_BACKLIGHT_PMIC,
+       INTEL_BACKLIGHT_LPSS,
+       INTEL_BACKLIGHT_DISPLAY_DDI,
+       INTEL_BACKLIGHT_DSI_DCS,
+       INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
+};
+
+struct edp_power_seq {
+       u16 t1_t3;
+       u16 t8;
+       u16 t9;
+       u16 t10;
+       u16 t11_t12;
+} __packed;
+
+/*
+ * MIPI Sequence Block definitions
+ *
+ * Note the VBT spec has AssertReset / DeassertReset swapped from their
+ * usual naming, we use the proper names here to avoid confusion when
+ * reading the code.
+ */
+enum mipi_seq {
+       MIPI_SEQ_END = 0,
+       MIPI_SEQ_DEASSERT_RESET,        /* Spec says MipiAssertResetPin */
+       MIPI_SEQ_INIT_OTP,
+       MIPI_SEQ_DISPLAY_ON,
+       MIPI_SEQ_DISPLAY_OFF,
+       MIPI_SEQ_ASSERT_RESET,          /* Spec says MipiDeassertResetPin */
+       MIPI_SEQ_BACKLIGHT_ON,          /* sequence block v2+ */
+       MIPI_SEQ_BACKLIGHT_OFF,         /* sequence block v2+ */
+       MIPI_SEQ_TEAR_ON,               /* sequence block v2+ */
+       MIPI_SEQ_TEAR_OFF,              /* sequence block v3+ */
+       MIPI_SEQ_POWER_ON,              /* sequence block v3+ */
+       MIPI_SEQ_POWER_OFF,             /* sequence block v3+ */
+       MIPI_SEQ_MAX
+};
+
+enum mipi_seq_element {
+       MIPI_SEQ_ELEM_END = 0,
+       MIPI_SEQ_ELEM_SEND_PKT,
+       MIPI_SEQ_ELEM_DELAY,
+       MIPI_SEQ_ELEM_GPIO,
+       MIPI_SEQ_ELEM_I2C,              /* sequence block v2+ */
+       MIPI_SEQ_ELEM_SPI,              /* sequence block v3+ */
+       MIPI_SEQ_ELEM_PMIC,             /* sequence block v3+ */
+       MIPI_SEQ_ELEM_MAX
+};
+
+#define MIPI_DSI_UNDEFINED_PANEL_ID    0
+#define MIPI_DSI_GENERIC_PANEL_ID      1
+
+struct mipi_config {
+       u16 panel_id;
+
+       /* General Params */
+       u32 enable_dithering:1;
+       u32 rsvd1:1;
+       u32 is_bridge:1;
+
+       u32 panel_arch_type:2;
+       u32 is_cmd_mode:1;
+
+#define NON_BURST_SYNC_PULSE   0x1
+#define NON_BURST_SYNC_EVENTS  0x2
+#define BURST_MODE             0x3
+       u32 video_transfer_mode:2;
+
+       u32 cabc_supported:1;
+#define PPS_BLC_PMIC   0
+#define PPS_BLC_SOC    1
+       u32 pwm_blc:1;
+
+       /* Bit 13:10 */
+#define PIXEL_FORMAT_RGB565                    0x1
+#define PIXEL_FORMAT_RGB666                    0x2
+#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED     0x3
+#define PIXEL_FORMAT_RGB888                    0x4
+       u32 videomode_color_format:4;
+
+       /* Bit 15:14 */
+#define ENABLE_ROTATION_0      0x0
+#define ENABLE_ROTATION_90     0x1
+#define ENABLE_ROTATION_180    0x2
+#define ENABLE_ROTATION_270    0x3
+       u32 rotation:2;
+       u32 bta_enabled:1;
+       u32 rsvd2:15;
+
+       /* 2 byte Port Description */
+#define DUAL_LINK_NOT_SUPPORTED        0
+#define DUAL_LINK_FRONT_BACK   1
+#define DUAL_LINK_PIXEL_ALT    2
+       u16 dual_link:2;
+       u16 lane_cnt:2;
+       u16 pixel_overlap:3;
+       u16 rgb_flip:1;
+#define DL_DCS_PORT_A                  0x00
+#define DL_DCS_PORT_C                  0x01
+#define DL_DCS_PORT_A_AND_C            0x02
+       u16 dl_dcs_cabc_ports:2;
+       u16 dl_dcs_backlight_ports:2;
+       u16 rsvd3:4;
+
+       u16 rsvd4;
+
+       u8 rsvd5;
+       u32 target_burst_mode_freq;
+       u32 dsi_ddr_clk;
+       u32 bridge_ref_clk;
+
+#define  BYTE_CLK_SEL_20MHZ            0
+#define  BYTE_CLK_SEL_10MHZ            1
+#define  BYTE_CLK_SEL_5MHZ             2
+       u8 byte_clk_sel:2;
+
+       u8 rsvd6:6;
+
+       /* DPHY Flags */
+       u16 dphy_param_valid:1;
+       u16 eot_pkt_disabled:1;
+       u16 enable_clk_stop:1;
+       u16 rsvd7:13;
+
+       u32 hs_tx_timeout;
+       u32 lp_rx_timeout;
+       u32 turn_around_timeout;
+       u32 device_reset_timer;
+       u32 master_init_timer;
+       u32 dbi_bw_timer;
+       u32 lp_byte_clk_val;
+
+       /*  4 byte Dphy Params */
+       u32 prepare_cnt:6;
+       u32 rsvd8:2;
+       u32 clk_zero_cnt:8;
+       u32 trail_cnt:5;
+       u32 rsvd9:3;
+       u32 exit_zero_cnt:6;
+       u32 rsvd10:2;
+
+       u32 clk_lane_switch_cnt;
+       u32 hl_switch_cnt;
+
+       u32 rsvd11[6];
+
+       /* timings based on dphy spec */
+       u8 tclk_miss;
+       u8 tclk_post;
+       u8 rsvd12;
+       u8 tclk_pre;
+       u8 tclk_prepare;
+       u8 tclk_settle;
+       u8 tclk_term_enable;
+       u8 tclk_trail;
+       u16 tclk_prepare_clkzero;
+       u8 rsvd13;
+       u8 td_term_enable;
+       u8 teot;
+       u8 ths_exit;
+       u8 ths_prepare;
+       u16 ths_prepare_hszero;
+       u8 rsvd14;
+       u8 ths_settle;
+       u8 ths_skip;
+       u8 ths_trail;
+       u8 tinit;
+       u8 tlpx;
+       u8 rsvd15[3];
+
+       /* GPIOs */
+       u8 panel_enable;
+       u8 bl_enable;
+       u8 pwm_enable;
+       u8 reset_r_n;
+       u8 pwr_down_r;
+       u8 stdby_r_n;
+
+} __packed;
+
+/* all delays have a unit of 100us */
+struct mipi_pps_data {
+       u16 panel_on_delay;
+       u16 bl_enable_delay;
+       u16 bl_disable_delay;
+       u16 panel_off_delay;
+       u16 panel_power_cycle_delay;
+} __packed;
+
+void intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_cleanup(struct drm_i915_private *dev_priv);
+bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
+bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
+bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
+                                    enum port port);
+bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
+                                 enum port port);
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
+
+#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
new file mode 100644 (file)
index 0000000..753ac31
--- /dev/null
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_atomic_state_helper.h>
+
+#include "intel_bw.h"
+#include "intel_drv.h"
+#include "intel_sideband.h"
+
+/* Parameters for Qclk Geyserville (QGV) */
+struct intel_qgv_point {
+       u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
+};
+
+struct intel_qgv_info {
+       struct intel_qgv_point points[3];
+       u8 num_points;
+       u8 num_channels;
+       u8 t_bl;
+       enum intel_dram_type dram_type;
+};
+
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
+                                         struct intel_qgv_info *qi)
+{
+       u32 val = 0;
+       int ret;
+
+       ret = sandybridge_pcode_read(dev_priv,
+                                    ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+                                    ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
+                                    &val, NULL);
+       if (ret)
+               return ret;
+
+       switch (val & 0xf) {
+       case 0:
+               qi->dram_type = INTEL_DRAM_DDR4;
+               break;
+       case 1:
+               qi->dram_type = INTEL_DRAM_DDR3;
+               break;
+       case 2:
+               qi->dram_type = INTEL_DRAM_LPDDR3;
+               break;
+       case 3:
+               qi->dram_type = INTEL_DRAM_LPDDR3;
+               break;
+       default:
+               MISSING_CASE(val & 0xf);
+               break;
+       }
+
+       qi->num_channels = (val & 0xf0) >> 4;
+       qi->num_points = (val & 0xf00) >> 8;
+
+       qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
+
+       return 0;
+}
+
+static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
+                                        struct intel_qgv_point *sp,
+                                        int point)
+{
+       u32 val = 0, val2;
+       int ret;
+
+       ret = sandybridge_pcode_read(dev_priv,
+                                    ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+                                    ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
+                                    &val, &val2);
+       if (ret)
+               return ret;
+
+       sp->dclk = val & 0xffff;
+       sp->t_rp = (val & 0xff0000) >> 16;
+       sp->t_rcd = (val & 0xff000000) >> 24;
+
+       sp->t_rdpre = val2 & 0xff;
+       sp->t_ras = (val2 & 0xff00) >> 8;
+
+       sp->t_rc = sp->t_rp + sp->t_ras;
+
+       return 0;
+}
+
+static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
+                             struct intel_qgv_info *qi)
+{
+       int i, ret;
+
+       ret = icl_pcode_read_mem_global_info(dev_priv, qi);
+       if (ret)
+               return ret;
+
+       if (WARN_ON(qi->num_points > ARRAY_SIZE(qi->points)))
+               qi->num_points = ARRAY_SIZE(qi->points);
+
+       for (i = 0; i < qi->num_points; i++) {
+               struct intel_qgv_point *sp = &qi->points[i];
+
+               ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
+               if (ret)
+                       return ret;
+
+               DRM_DEBUG_KMS("QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
+                             i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
+                             sp->t_rcd, sp->t_rc);
+       }
+
+       return 0;
+}
+
+static int icl_calc_bw(int dclk, int num, int den)
+{
+       /* multiples of 16.666MHz (100/6) */
+       return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6);
+}
+
+static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
+{
+       u16 dclk = 0;
+       int i;
+
+       for (i = 0; i < qi->num_points; i++)
+               dclk = max(dclk, qi->points[i].dclk);
+
+       return dclk;
+}
+
+struct intel_sa_info {
+       u8 deburst, mpagesize, deprogbwlimit, displayrtids;
+};
+
+static const struct intel_sa_info icl_sa_info = {
+       .deburst = 8,
+       .mpagesize = 16,
+       .deprogbwlimit = 25, /* GB/s */
+       .displayrtids = 128,
+};
+
+static int icl_get_bw_info(struct drm_i915_private *dev_priv)
+{
+       struct intel_qgv_info qi = {};
+       const struct intel_sa_info *sa = &icl_sa_info;
+       bool is_y_tile = true; /* assume y tile may be used */
+       int num_channels;
+       int deinterleave;
+       int ipqdepth, ipqdepthpch;
+       int dclk_max;
+       int maxdebw;
+       int i, ret;
+
+       ret = icl_get_qgv_points(dev_priv, &qi);
+       if (ret) {
+               DRM_DEBUG_KMS("Failed to get memory subsystem information, ignoring bandwidth limits");
+               return ret;
+       }
+       num_channels = qi.num_channels;
+
+       deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
+       dclk_max = icl_sagv_max_dclk(&qi);
+
+       ipqdepthpch = 16;
+
+       maxdebw = min(sa->deprogbwlimit * 1000,
+                     icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */
+       ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
+
+       for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
+               struct intel_bw_info *bi = &dev_priv->max_bw[i];
+               int clpchgroup;
+               int j;
+
+               clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
+               bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
+
+               for (j = 0; j < qi.num_points; j++) {
+                       const struct intel_qgv_point *sp = &qi.points[j];
+                       int ct, bw;
+
+                       /*
+                        * Max row cycle time
+                        *
+                        * FIXME what is the logic behind the
+                        * assumed burst length?
+                        */
+                       ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
+                                  (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
+                       bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
+
+                       bi->deratedbw[j] = min(maxdebw,
+                                              bw * 9 / 10); /* 90% */
+
+                       DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n",
+                                     i, j, bi->num_planes, bi->deratedbw[j]);
+               }
+
+               if (bi->num_planes == 1)
+                       break;
+       }
+
+       return 0;
+}
+
+static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
+                              int num_planes, int qgv_point)
+{
+       int i;
+
+       /* Did we initialize the bw limits successfully? */
+       if (dev_priv->max_bw[0].num_planes == 0)
+               return UINT_MAX;
+
+       for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
+               const struct intel_bw_info *bi =
+                       &dev_priv->max_bw[i];
+
+               if (num_planes >= bi->num_planes)
+                       return bi->deratedbw[qgv_point];
+       }
+
+       return 0;
+}
+
+void intel_bw_init_hw(struct drm_i915_private *dev_priv)
+{
+       if (IS_GEN(dev_priv, 11))
+               icl_get_bw_info(dev_priv);
+}
+
+static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
+                                       int num_planes)
+{
+       if (IS_GEN(dev_priv, 11))
+               /*
+                * FIXME with SAGV disabled maybe we can assume
+                * point 1 will always be used? Seems to match
+                * the behaviour observed in the wild.
+                */
+               return min3(icl_max_bw(dev_priv, num_planes, 0),
+                           icl_max_bw(dev_priv, num_planes, 1),
+                           icl_max_bw(dev_priv, num_planes, 2));
+       else
+               return UINT_MAX;
+}
+
+static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
+{
+       /*
+        * We assume cursors are small enough
+        * to not not cause bandwidth problems.
+        */
+       return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
+}
+
+static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       unsigned int data_rate = 0;
+       enum plane_id plane_id;
+
+       for_each_plane_id_on_crtc(crtc, plane_id) {
+               /*
+                * We assume cursors are small enough
+                * to not not cause bandwidth problems.
+                */
+               if (plane_id == PLANE_CURSOR)
+                       continue;
+
+               data_rate += crtc_state->data_rate[plane_id];
+       }
+
+       return data_rate;
+}
+
+void intel_bw_crtc_update(struct intel_bw_state *bw_state,
+                         const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+       bw_state->data_rate[crtc->pipe] =
+               intel_bw_crtc_data_rate(crtc_state);
+       bw_state->num_active_planes[crtc->pipe] =
+               intel_bw_crtc_num_active_planes(crtc_state);
+
+       DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
+                     pipe_name(crtc->pipe),
+                     bw_state->data_rate[crtc->pipe],
+                     bw_state->num_active_planes[crtc->pipe]);
+}
+
+static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
+                                              const struct intel_bw_state *bw_state)
+{
+       unsigned int num_active_planes = 0;
+       enum pipe pipe;
+
+       for_each_pipe(dev_priv, pipe)
+               num_active_planes += bw_state->num_active_planes[pipe];
+
+       return num_active_planes;
+}
+
+static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
+                                      const struct intel_bw_state *bw_state)
+{
+       unsigned int data_rate = 0;
+       enum pipe pipe;
+
+       for_each_pipe(dev_priv, pipe)
+               data_rate += bw_state->data_rate[pipe];
+
+       return data_rate;
+}
+
+int intel_bw_atomic_check(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+       struct intel_bw_state *bw_state = NULL;
+       unsigned int data_rate, max_data_rate;
+       unsigned int num_active_planes;
+       struct intel_crtc *crtc;
+       int i;
+
+       /* FIXME earlier gens need some checks too */
+       if (INTEL_GEN(dev_priv) < 11)
+               return 0;
+
+       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+                                           new_crtc_state, i) {
+               unsigned int old_data_rate =
+                       intel_bw_crtc_data_rate(old_crtc_state);
+               unsigned int new_data_rate =
+                       intel_bw_crtc_data_rate(new_crtc_state);
+               unsigned int old_active_planes =
+                       intel_bw_crtc_num_active_planes(old_crtc_state);
+               unsigned int new_active_planes =
+                       intel_bw_crtc_num_active_planes(new_crtc_state);
+
+               /*
+                * Avoid locking the bw state when
+                * nothing significant has changed.
+                */
+               if (old_data_rate == new_data_rate &&
+                   old_active_planes == new_active_planes)
+                       continue;
+
+               bw_state  = intel_atomic_get_bw_state(state);
+               if (IS_ERR(bw_state))
+                       return PTR_ERR(bw_state);
+
+               bw_state->data_rate[crtc->pipe] = new_data_rate;
+               bw_state->num_active_planes[crtc->pipe] = new_active_planes;
+
+               DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
+                             pipe_name(crtc->pipe),
+                             bw_state->data_rate[crtc->pipe],
+                             bw_state->num_active_planes[crtc->pipe]);
+       }
+
+       if (!bw_state)
+               return 0;
+
+       data_rate = intel_bw_data_rate(dev_priv, bw_state);
+       num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
+
+       max_data_rate = intel_max_data_rate(dev_priv, num_active_planes);
+
+       data_rate = DIV_ROUND_UP(data_rate, 1000);
+
+       if (data_rate > max_data_rate) {
+               DRM_DEBUG_KMS("Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
+                             data_rate, max_data_rate, num_active_planes);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj *obj)
+{
+       struct intel_bw_state *state;
+
+       state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return NULL;
+
+       __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+       return &state->base;
+}
+
+static void intel_bw_destroy_state(struct drm_private_obj *obj,
+                                  struct drm_private_state *state)
+{
+       kfree(state);
+}
+
+static const struct drm_private_state_funcs intel_bw_funcs = {
+       .atomic_duplicate_state = intel_bw_duplicate_state,
+       .atomic_destroy_state = intel_bw_destroy_state,
+};
+
+int intel_bw_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_bw_state *state;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return -ENOMEM;
+
+       drm_atomic_private_obj_init(&dev_priv->drm, &dev_priv->bw_obj,
+                                   &state->base, &intel_bw_funcs);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
new file mode 100644 (file)
index 0000000..e9d9c6d
--- /dev/null
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_BW_H__
+#define __INTEL_BW_H__
+
+#include <drm/drm_atomic.h>
+
+#include "i915_drv.h"
+#include "intel_display.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc_state;
+
+struct intel_bw_state {
+       struct drm_private_state base;
+
+       unsigned int data_rate[I915_MAX_PIPES];
+       u8 num_active_planes[I915_MAX_PIPES];
+};
+
+#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
+
+static inline struct intel_bw_state *
+intel_atomic_get_bw_state(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       struct drm_private_state *bw_state;
+
+       bw_state = drm_atomic_get_private_obj_state(&state->base,
+                                                   &dev_priv->bw_obj);
+       if (IS_ERR(bw_state))
+               return ERR_CAST(bw_state);
+
+       return to_intel_bw_state(bw_state);
+}
+
+void intel_bw_init_hw(struct drm_i915_private *dev_priv);
+int intel_bw_init(struct drm_i915_private *dev_priv);
+int intel_bw_atomic_check(struct intel_atomic_state *state);
+void intel_bw_crtc_update(struct intel_bw_state *bw_state,
+                         const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
new file mode 100644 (file)
index 0000000..8993ab2
--- /dev/null
@@ -0,0 +1,2853 @@
+/*
+ * Copyright © 2006-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "intel_cdclk.h"
+#include "intel_drv.h"
+#include "intel_sideband.h"
+
+/**
+ * DOC: CDCLK / RAWCLK
+ *
+ * The display engine uses several different clocks to do its work. There
+ * are two main clocks involved that aren't directly related to the actual
+ * pixel clock or any symbol/bit clock of the actual output port. These
+ * are the core display clock (CDCLK) and RAWCLK.
+ *
+ * CDCLK clocks most of the display pipe logic, and thus its frequency
+ * must be high enough to support the rate at which pixels are flowing
+ * through the pipes. Downscaling must also be accounted as that increases
+ * the effective pixel rate.
+ *
+ * On several platforms the CDCLK frequency can be changed dynamically
+ * to minimize power consumption for a given display configuration.
+ * Typically changes to the CDCLK frequency require all the display pipes
+ * to be shut down while the frequency is being changed.
+ *
+ * On SKL+ the DMC will toggle the CDCLK off/on during DC5/6 entry/exit.
+ * DMC will not change the active CDCLK frequency however, so that part
+ * will still be performed by the driver directly.
+ *
+ * RAWCLK is a fixed frequency clock, often used by various auxiliary
+ * blocks such as AUX CH or backlight PWM. Hence the only thing we
+ * really need to know about RAWCLK is its frequency so that various
+ * dividers can be programmed correctly.
+ */
+
+static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
+                                  struct intel_cdclk_state *cdclk_state)
+{
+       cdclk_state->cdclk = 133333;
+}
+
+static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv,
+                                  struct intel_cdclk_state *cdclk_state)
+{
+       cdclk_state->cdclk = 200000;
+}
+
+static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv,
+                                  struct intel_cdclk_state *cdclk_state)
+{
+       cdclk_state->cdclk = 266667;
+}
+
+static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv,
+                                  struct intel_cdclk_state *cdclk_state)
+{
+       cdclk_state->cdclk = 333333;
+}
+
+static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv,
+                                  struct intel_cdclk_state *cdclk_state)
+{
+       cdclk_state->cdclk = 400000;
+}
+
+static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
+                                  struct intel_cdclk_state *cdclk_state)
+{
+       cdclk_state->cdclk = 450000;
+}
+
+static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
+                          struct intel_cdclk_state *cdclk_state)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       u16 hpllcc = 0;
+
+       /*
+        * 852GM/852GMV only supports 133 MHz and the HPLLCC
+        * encoding is different :(
+        * FIXME is this the right way to detect 852GM/852GMV?
+        */
+       if (pdev->revision == 0x1) {
+               cdclk_state->cdclk = 133333;
+               return;
+       }
+
+       pci_bus_read_config_word(pdev->bus,
+                                PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
+
+       /* Assume that the hardware is in the high speed state.  This
+        * should be the default.
+        */
+       switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
+       case GC_CLOCK_133_200:
+       case GC_CLOCK_133_200_2:
+       case GC_CLOCK_100_200:
+               cdclk_state->cdclk = 200000;
+               break;
+       case GC_CLOCK_166_250:
+               cdclk_state->cdclk = 250000;
+               break;
+       case GC_CLOCK_100_133:
+               cdclk_state->cdclk = 133333;
+               break;
+       case GC_CLOCK_133_266:
+       case GC_CLOCK_133_266_2:
+       case GC_CLOCK_166_266:
+               cdclk_state->cdclk = 266667;
+               break;
+       }
+}
+
+static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
+                            struct intel_cdclk_state *cdclk_state)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       u16 gcfgc = 0;
+
+       pci_read_config_word(pdev, GCFGC, &gcfgc);
+
+       if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
+               cdclk_state->cdclk = 133333;
+               return;
+       }
+
+       switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+       case GC_DISPLAY_CLOCK_333_320_MHZ:
+               cdclk_state->cdclk = 333333;
+               break;
+       default:
+       case GC_DISPLAY_CLOCK_190_200_MHZ:
+               cdclk_state->cdclk = 190000;
+               break;
+       }
+}
+
+static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
+                            struct intel_cdclk_state *cdclk_state)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       u16 gcfgc = 0;
+
+       pci_read_config_word(pdev, GCFGC, &gcfgc);
+
+       if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
+               cdclk_state->cdclk = 133333;
+               return;
+       }
+
+       switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+       case GC_DISPLAY_CLOCK_333_320_MHZ:
+               cdclk_state->cdclk = 320000;
+               break;
+       default:
+       case GC_DISPLAY_CLOCK_190_200_MHZ:
+               cdclk_state->cdclk = 200000;
+               break;
+       }
+}
+
+static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
+{
+       static const unsigned int blb_vco[8] = {
+               [0] = 3200000,
+               [1] = 4000000,
+               [2] = 5333333,
+               [3] = 4800000,
+               [4] = 6400000,
+       };
+       static const unsigned int pnv_vco[8] = {
+               [0] = 3200000,
+               [1] = 4000000,
+               [2] = 5333333,
+               [3] = 4800000,
+               [4] = 2666667,
+       };
+       static const unsigned int cl_vco[8] = {
+               [0] = 3200000,
+               [1] = 4000000,
+               [2] = 5333333,
+               [3] = 6400000,
+               [4] = 3333333,
+               [5] = 3566667,
+               [6] = 4266667,
+       };
+       static const unsigned int elk_vco[8] = {
+               [0] = 3200000,
+               [1] = 4000000,
+               [2] = 5333333,
+               [3] = 4800000,
+       };
+       static const unsigned int ctg_vco[8] = {
+               [0] = 3200000,
+               [1] = 4000000,
+               [2] = 5333333,
+               [3] = 6400000,
+               [4] = 2666667,
+               [5] = 4266667,
+       };
+       const unsigned int *vco_table;
+       unsigned int vco;
+       u8 tmp = 0;
+
+       /* FIXME other chipsets? */
+       if (IS_GM45(dev_priv))
+               vco_table = ctg_vco;
+       else if (IS_G45(dev_priv))
+               vco_table = elk_vco;
+       else if (IS_I965GM(dev_priv))
+               vco_table = cl_vco;
+       else if (IS_PINEVIEW(dev_priv))
+               vco_table = pnv_vco;
+       else if (IS_G33(dev_priv))
+               vco_table = blb_vco;
+       else
+               return 0;
+
+       tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ?
+                       HPLLVCO_MOBILE : HPLLVCO);
+
+       vco = vco_table[tmp & 0x7];
+       if (vco == 0)
+               DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
+       else
+               DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+
+       return vco;
+}
+
+static void g33_get_cdclk(struct drm_i915_private *dev_priv,
+                         struct intel_cdclk_state *cdclk_state)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       static const u8 div_3200[] = { 12, 10,  8,  7, 5, 16 };
+       static const u8 div_4000[] = { 14, 12, 10,  8, 6, 20 };
+       static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 };
+       static const u8 div_5333[] = { 20, 16, 12, 12, 8, 28 };
+       const u8 *div_table;
+       unsigned int cdclk_sel;
+       u16 tmp = 0;
+
+       cdclk_state->vco = intel_hpll_vco(dev_priv);
+
+       pci_read_config_word(pdev, GCFGC, &tmp);
+
+       cdclk_sel = (tmp >> 4) & 0x7;
+
+       if (cdclk_sel >= ARRAY_SIZE(div_3200))
+               goto fail;
+
+       switch (cdclk_state->vco) {
+       case 3200000:
+               div_table = div_3200;
+               break;
+       case 4000000:
+               div_table = div_4000;
+               break;
+       case 4800000:
+               div_table = div_4800;
+               break;
+       case 5333333:
+               div_table = div_5333;
+               break;
+       default:
+               goto fail;
+       }
+
+       cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
+                                              div_table[cdclk_sel]);
+       return;
+
+fail:
+       DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
+                 cdclk_state->vco, tmp);
+       cdclk_state->cdclk = 190476;
+}
+
+static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
+                         struct intel_cdclk_state *cdclk_state)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       u16 gcfgc = 0;
+
+       pci_read_config_word(pdev, GCFGC, &gcfgc);
+
+       switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+       case GC_DISPLAY_CLOCK_267_MHZ_PNV:
+               cdclk_state->cdclk = 266667;
+               break;
+       case GC_DISPLAY_CLOCK_333_MHZ_PNV:
+               cdclk_state->cdclk = 333333;
+               break;
+       case GC_DISPLAY_CLOCK_444_MHZ_PNV:
+               cdclk_state->cdclk = 444444;
+               break;
+       case GC_DISPLAY_CLOCK_200_MHZ_PNV:
+               cdclk_state->cdclk = 200000;
+               break;
+       default:
+               DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+               /* fall through */
+       case GC_DISPLAY_CLOCK_133_MHZ_PNV:
+               cdclk_state->cdclk = 133333;
+               break;
+       case GC_DISPLAY_CLOCK_167_MHZ_PNV:
+               cdclk_state->cdclk = 166667;
+               break;
+       }
+}
+
+static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
+                            struct intel_cdclk_state *cdclk_state)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       static const u8 div_3200[] = { 16, 10,  8 };
+       static const u8 div_4000[] = { 20, 12, 10 };
+       static const u8 div_5333[] = { 24, 16, 14 };
+       const u8 *div_table;
+       unsigned int cdclk_sel;
+       u16 tmp = 0;
+
+       cdclk_state->vco = intel_hpll_vco(dev_priv);
+
+       pci_read_config_word(pdev, GCFGC, &tmp);
+
+       cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
+
+       if (cdclk_sel >= ARRAY_SIZE(div_3200))
+               goto fail;
+
+       switch (cdclk_state->vco) {
+       case 3200000:
+               div_table = div_3200;
+               break;
+       case 4000000:
+               div_table = div_4000;
+               break;
+       case 5333333:
+               div_table = div_5333;
+               break;
+       default:
+               goto fail;
+       }
+
+       cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
+                                              div_table[cdclk_sel]);
+       return;
+
+fail:
+       DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
+                 cdclk_state->vco, tmp);
+       cdclk_state->cdclk = 200000;
+}
+
+static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
+                          struct intel_cdclk_state *cdclk_state)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       unsigned int cdclk_sel;
+       u16 tmp = 0;
+
+       cdclk_state->vco = intel_hpll_vco(dev_priv);
+
+       pci_read_config_word(pdev, GCFGC, &tmp);
+
+       cdclk_sel = (tmp >> 12) & 0x1;
+
+       switch (cdclk_state->vco) {
+       case 2666667:
+       case 4000000:
+       case 5333333:
+               cdclk_state->cdclk = cdclk_sel ? 333333 : 222222;
+               break;
+       case 3200000:
+               cdclk_state->cdclk = cdclk_sel ? 320000 : 228571;
+               break;
+       default:
+               DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
+                         cdclk_state->vco, tmp);
+               cdclk_state->cdclk = 222222;
+               break;
+       }
+}
+
+static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
+                         struct intel_cdclk_state *cdclk_state)
+{
+       u32 lcpll = I915_READ(LCPLL_CTL);
+       u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
+
+       if (lcpll & LCPLL_CD_SOURCE_FCLK)
+               cdclk_state->cdclk = 800000;
+       else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+               cdclk_state->cdclk = 450000;
+       else if (freq == LCPLL_CLK_FREQ_450)
+               cdclk_state->cdclk = 450000;
+       else if (IS_HSW_ULT(dev_priv))
+               cdclk_state->cdclk = 337500;
+       else
+               cdclk_state->cdclk = 540000;
+}
+
+static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
+{
+       int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ?
+               333333 : 320000;
+
+       /*
+        * We seem to get an unstable or solid color picture at 200MHz.
+        * Not sure what's wrong. For now use 200MHz only when all pipes
+        * are off.
+        */
+       if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320)
+               return 400000;
+       else if (min_cdclk > 266667)
+               return freq_320;
+       else if (min_cdclk > 0)
+               return 266667;
+       else
+               return 200000;
+}
+
+static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
+{
+       if (IS_VALLEYVIEW(dev_priv)) {
+               if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
+                       return 2;
+               else if (cdclk >= 266667)
+                       return 1;
+               else
+                       return 0;
+       } else {
+               /*
+                * Specs are full of misinformation, but testing on actual
+                * hardware has shown that we just need to write the desired
+                * CCK divider into the Punit register.
+                */
+               return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+       }
+}
+
+static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
+                         struct intel_cdclk_state *cdclk_state)
+{
+       u32 val;
+
+       vlv_iosf_sb_get(dev_priv,
+                       BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
+
+       cdclk_state->vco = vlv_get_hpll_vco(dev_priv);
+       cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
+                                              CCK_DISPLAY_CLOCK_CONTROL,
+                                              cdclk_state->vco);
+
+       val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+
+       vlv_iosf_sb_put(dev_priv,
+                       BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
+
+       if (IS_VALLEYVIEW(dev_priv))
+               cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
+                       DSPFREQGUAR_SHIFT;
+       else
+               cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
+                       DSPFREQGUAR_SHIFT_CHV;
+}
+
+static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
+{
+       unsigned int credits, default_credits;
+
+       if (IS_CHERRYVIEW(dev_priv))
+               default_credits = PFI_CREDIT(12);
+       else
+               default_credits = PFI_CREDIT(8);
+
+       if (dev_priv->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
+               /* CHV suggested value is 31 or 63 */
+               if (IS_CHERRYVIEW(dev_priv))
+                       credits = PFI_CREDIT_63;
+               else
+                       credits = PFI_CREDIT(15);
+       } else {
+               credits = default_credits;
+       }
+
+       /*
+        * WA - write default credits before re-programming
+        * FIXME: should we also set the resend bit here?
+        */
+       I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+                  default_credits);
+
+       I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+                  credits | PFI_CREDIT_RESEND);
+
+       /*
+        * FIXME is this guaranteed to clear
+        * immediately or should we poll for it?
+        */
+       WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
+}
+
+static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
+                         const struct intel_cdclk_state *cdclk_state,
+                         enum pipe pipe)
+{
+       int cdclk = cdclk_state->cdclk;
+       u32 val, cmd = cdclk_state->voltage_level;
+       intel_wakeref_t wakeref;
+
+       switch (cdclk) {
+       case 400000:
+       case 333333:
+       case 320000:
+       case 266667:
+       case 200000:
+               break;
+       default:
+               MISSING_CASE(cdclk);
+               return;
+       }
+
+       /* There are cases where we can end up here with power domains
+        * off and a CDCLK frequency other than the minimum, like when
+        * issuing a modeset without actually changing any display after
+        * a system suspend.  So grab the PIPE-A domain, which covers
+        * the HW blocks needed for the following programming.
+        */
+       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+
+       vlv_iosf_sb_get(dev_priv,
+                       BIT(VLV_IOSF_SB_CCK) |
+                       BIT(VLV_IOSF_SB_BUNIT) |
+                       BIT(VLV_IOSF_SB_PUNIT));
+
+       val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+       val &= ~DSPFREQGUAR_MASK;
+       val |= (cmd << DSPFREQGUAR_SHIFT);
+       vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+       if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
+                     DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+                    50)) {
+               DRM_ERROR("timed out waiting for CDclk change\n");
+       }
+
+       if (cdclk == 400000) {
+               u32 divider;
+
+               divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1,
+                                           cdclk) - 1;
+
+               /* adjust cdclk divider */
+               val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+               val &= ~CCK_FREQUENCY_VALUES;
+               val |= divider;
+               vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+
+               if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
+                             CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
+                            50))
+                       DRM_ERROR("timed out waiting for CDclk change\n");
+       }
+
+       /* adjust self-refresh exit latency value */
+       val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+       val &= ~0x7f;
+
+       /*
+        * For high bandwidth configs, we set a higher latency in the bunit
+        * so that the core display fetch happens in time to avoid underruns.
+        */
+       if (cdclk == 400000)
+               val |= 4500 / 250; /* 4.5 usec */
+       else
+               val |= 3000 / 250; /* 3.0 usec */
+       vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+
+       vlv_iosf_sb_put(dev_priv,
+                       BIT(VLV_IOSF_SB_CCK) |
+                       BIT(VLV_IOSF_SB_BUNIT) |
+                       BIT(VLV_IOSF_SB_PUNIT));
+
+       intel_update_cdclk(dev_priv);
+
+       vlv_program_pfi_credits(dev_priv);
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
+}
+
+static void chv_set_cdclk(struct drm_i915_private *dev_priv,
+                         const struct intel_cdclk_state *cdclk_state,
+                         enum pipe pipe)
+{
+       int cdclk = cdclk_state->cdclk;
+       u32 val, cmd = cdclk_state->voltage_level;
+       intel_wakeref_t wakeref;
+
+       switch (cdclk) {
+       case 333333:
+       case 320000:
+       case 266667:
+       case 200000:
+               break;
+       default:
+               MISSING_CASE(cdclk);
+               return;
+       }
+
+       /* There are cases where we can end up here with power domains
+        * off and a CDCLK frequency other than the minimum, like when
+        * issuing a modeset without actually changing any display after
+        * a system suspend.  So grab the PIPE-A domain, which covers
+        * the HW blocks needed for the following programming.
+        */
+       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+
+       vlv_punit_get(dev_priv);
+       val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+       val &= ~DSPFREQGUAR_MASK_CHV;
+       val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
+       vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+       if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
+                     DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
+                    50)) {
+               DRM_ERROR("timed out waiting for CDclk change\n");
+       }
+
+       vlv_punit_put(dev_priv);
+
+       intel_update_cdclk(dev_priv);
+
+       vlv_program_pfi_credits(dev_priv);
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
+}
+
+static int bdw_calc_cdclk(int min_cdclk)
+{
+       if (min_cdclk > 540000)
+               return 675000;
+       else if (min_cdclk > 450000)
+               return 540000;
+       else if (min_cdclk > 337500)
+               return 450000;
+       else
+               return 337500;
+}
+
+static u8 bdw_calc_voltage_level(int cdclk)
+{
+       switch (cdclk) {
+       default:
+       case 337500:
+               return 2;
+       case 450000:
+               return 0;
+       case 540000:
+               return 1;
+       case 675000:
+               return 3;
+       }
+}
+
+static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
+                         struct intel_cdclk_state *cdclk_state)
+{
+       u32 lcpll = I915_READ(LCPLL_CTL);
+       u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
+
+       if (lcpll & LCPLL_CD_SOURCE_FCLK)
+               cdclk_state->cdclk = 800000;
+       else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+               cdclk_state->cdclk = 450000;
+       else if (freq == LCPLL_CLK_FREQ_450)
+               cdclk_state->cdclk = 450000;
+       else if (freq == LCPLL_CLK_FREQ_54O_BDW)
+               cdclk_state->cdclk = 540000;
+       else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
+               cdclk_state->cdclk = 337500;
+       else
+               cdclk_state->cdclk = 675000;
+
+       /*
+        * Can't read this out :( Let's assume it's
+        * at least what the CDCLK frequency requires.
+        */
+       cdclk_state->voltage_level =
+               bdw_calc_voltage_level(cdclk_state->cdclk);
+}
+
+static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
+                         const struct intel_cdclk_state *cdclk_state,
+                         enum pipe pipe)
+{
+       int cdclk = cdclk_state->cdclk;
+       u32 val;
+       int ret;
+
+       if (WARN((I915_READ(LCPLL_CTL) &
+                 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+                  LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+                  LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+                  LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+                "trying to change cdclk frequency with cdclk not enabled\n"))
+               return;
+
+       ret = sandybridge_pcode_write(dev_priv,
+                                     BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+       if (ret) {
+               DRM_ERROR("failed to inform pcode about cdclk change\n");
+               return;
+       }
+
+       val = I915_READ(LCPLL_CTL);
+       val |= LCPLL_CD_SOURCE_FCLK;
+       I915_WRITE(LCPLL_CTL, val);
+
+       /*
+        * According to the spec, it should be enough to poll for this 1 us.
+        * However, extensive testing shows that this can take longer.
+        */
+       if (wait_for_us(I915_READ(LCPLL_CTL) &
+                       LCPLL_CD_SOURCE_FCLK_DONE, 100))
+               DRM_ERROR("Switching to FCLK failed\n");
+
+       val = I915_READ(LCPLL_CTL);
+       val &= ~LCPLL_CLK_FREQ_MASK;
+
+       switch (cdclk) {
+       default:
+               MISSING_CASE(cdclk);
+               /* fall through */
+       case 337500:
+               val |= LCPLL_CLK_FREQ_337_5_BDW;
+               break;
+       case 450000:
+               val |= LCPLL_CLK_FREQ_450;
+               break;
+       case 540000:
+               val |= LCPLL_CLK_FREQ_54O_BDW;
+               break;
+       case 675000:
+               val |= LCPLL_CLK_FREQ_675_BDW;
+               break;
+       }
+
+       I915_WRITE(LCPLL_CTL, val);
+
+       val = I915_READ(LCPLL_CTL);
+       val &= ~LCPLL_CD_SOURCE_FCLK;
+       I915_WRITE(LCPLL_CTL, val);
+
+       if (wait_for_us((I915_READ(LCPLL_CTL) &
+                       LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+               DRM_ERROR("Switching back to LCPLL failed\n");
+
+       sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+                               cdclk_state->voltage_level);
+
+       I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+
+       intel_update_cdclk(dev_priv);
+}
+
+static int skl_calc_cdclk(int min_cdclk, int vco)
+{
+       if (vco == 8640000) {
+               if (min_cdclk > 540000)
+                       return 617143;
+               else if (min_cdclk > 432000)
+                       return 540000;
+               else if (min_cdclk > 308571)
+                       return 432000;
+               else
+                       return 308571;
+       } else {
+               if (min_cdclk > 540000)
+                       return 675000;
+               else if (min_cdclk > 450000)
+                       return 540000;
+               else if (min_cdclk > 337500)
+                       return 450000;
+               else
+                       return 337500;
+       }
+}
+
+static u8 skl_calc_voltage_level(int cdclk)
+{
+       if (cdclk > 540000)
+               return 3;
+       else if (cdclk > 450000)
+               return 2;
+       else if (cdclk > 337500)
+               return 1;
+       else
+               return 0;
+}
+
+static void skl_dpll0_update(struct drm_i915_private *dev_priv,
+                            struct intel_cdclk_state *cdclk_state)
+{
+       u32 val;
+
+       cdclk_state->ref = 24000;
+       cdclk_state->vco = 0;
+
+       val = I915_READ(LCPLL1_CTL);
+       if ((val & LCPLL_PLL_ENABLE) == 0)
+               return;
+
+       if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
+               return;
+
+       val = I915_READ(DPLL_CTRL1);
+
+       if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+                           DPLL_CTRL1_SSC(SKL_DPLL0) |
+                           DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
+                   DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
+               return;
+
+       switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
+       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
+       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
+       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
+       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
+               cdclk_state->vco = 8100000;
+               break;
+       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
+       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
+               cdclk_state->vco = 8640000;
+               break;
+       default:
+               MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
+               break;
+       }
+}
+
+static void skl_get_cdclk(struct drm_i915_private *dev_priv,
+                         struct intel_cdclk_state *cdclk_state)
+{
+       u32 cdctl;
+
+       skl_dpll0_update(dev_priv, cdclk_state);
+
+       cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
+
+       if (cdclk_state->vco == 0)
+               goto out;
+
+       cdctl = I915_READ(CDCLK_CTL);
+
+       if (cdclk_state->vco == 8640000) {
+               switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+               case CDCLK_FREQ_450_432:
+                       cdclk_state->cdclk = 432000;
+                       break;
+               case CDCLK_FREQ_337_308:
+                       cdclk_state->cdclk = 308571;
+                       break;
+               case CDCLK_FREQ_540:
+                       cdclk_state->cdclk = 540000;
+                       break;
+               case CDCLK_FREQ_675_617:
+                       cdclk_state->cdclk = 617143;
+                       break;
+               default:
+                       MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
+                       break;
+               }
+       } else {
+               switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+               case CDCLK_FREQ_450_432:
+                       cdclk_state->cdclk = 450000;
+                       break;
+               case CDCLK_FREQ_337_308:
+                       cdclk_state->cdclk = 337500;
+                       break;
+               case CDCLK_FREQ_540:
+                       cdclk_state->cdclk = 540000;
+                       break;
+               case CDCLK_FREQ_675_617:
+                       cdclk_state->cdclk = 675000;
+                       break;
+               default:
+                       MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
+                       break;
+               }
+       }
+
+ out:
+       /*
+        * Can't read this out :( Let's assume it's
+        * at least what the CDCLK frequency requires.
+        */
+       cdclk_state->voltage_level =
+               skl_calc_voltage_level(cdclk_state->cdclk);
+}
+
+/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
+static int skl_cdclk_decimal(int cdclk)
+{
+       return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
+}
+
+static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
+                                       int vco)
+{
+       bool changed = dev_priv->skl_preferred_vco_freq != vco;
+
+       dev_priv->skl_preferred_vco_freq = vco;
+
+       if (changed)
+               intel_update_max_cdclk(dev_priv);
+}
+
+static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
+{
+       u32 val;
+
+       WARN_ON(vco != 8100000 && vco != 8640000);
+
+       /*
+        * We always enable DPLL0 with the lowest link rate possible, but still
+        * taking into account the VCO required to operate the eDP panel at the
+        * desired frequency. The usual DP link rates operate with a VCO of
+        * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
+        * The modeset code is responsible for the selection of the exact link
+        * rate later on, with the constraint of choosing a frequency that
+        * works with vco.
+        */
+       val = I915_READ(DPLL_CTRL1);
+
+       val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
+                DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
+       val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
+       if (vco == 8640000)
+               val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
+                                           SKL_DPLL0);
+       else
+               val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
+                                           SKL_DPLL0);
+
+       I915_WRITE(DPLL_CTRL1, val);
+       POSTING_READ(DPLL_CTRL1);
+
+       I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
+
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
+                                   5))
+               DRM_ERROR("DPLL0 not locked\n");
+
+       dev_priv->cdclk.hw.vco = vco;
+
+       /* We'll want to keep using the current vco from now on. */
+       skl_set_preferred_cdclk_vco(dev_priv, vco);
+}
+
+static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
+                                   1))
+               DRM_ERROR("Couldn't disable DPLL0\n");
+
+       dev_priv->cdclk.hw.vco = 0;
+}
+
+static void skl_set_cdclk(struct drm_i915_private *dev_priv,
+                         const struct intel_cdclk_state *cdclk_state,
+                         enum pipe pipe)
+{
+       int cdclk = cdclk_state->cdclk;
+       int vco = cdclk_state->vco;
+       u32 freq_select, cdclk_ctl;
+       int ret;
+
+       /*
+        * Based on WA#1183 CDCLK rates 308 and 617MHz CDCLK rates are
+        * unsupported on SKL. In theory this should never happen since only
+        * the eDP1.4 2.16 and 4.32Gbps rates require it, but eDP1.4 is not
+        * supported on SKL either, see the above WA. WARN whenever trying to
+        * use the corresponding VCO freq as that always leads to using the
+        * minimum 308MHz CDCLK.
+        */
+       WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
+
+       ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+                               SKL_CDCLK_PREPARE_FOR_CHANGE,
+                               SKL_CDCLK_READY_FOR_CHANGE,
+                               SKL_CDCLK_READY_FOR_CHANGE, 3);
+       if (ret) {
+               DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
+                         ret);
+               return;
+       }
+
+       /* Choose frequency for this cdclk */
+       switch (cdclk) {
+       default:
+               WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
+               WARN_ON(vco != 0);
+               /* fall through */
+       case 308571:
+       case 337500:
+               freq_select = CDCLK_FREQ_337_308;
+               break;
+       case 450000:
+       case 432000:
+               freq_select = CDCLK_FREQ_450_432;
+               break;
+       case 540000:
+               freq_select = CDCLK_FREQ_540;
+               break;
+       case 617143:
+       case 675000:
+               freq_select = CDCLK_FREQ_675_617;
+               break;
+       }
+
+       if (dev_priv->cdclk.hw.vco != 0 &&
+           dev_priv->cdclk.hw.vco != vco)
+               skl_dpll0_disable(dev_priv);
+
+       cdclk_ctl = I915_READ(CDCLK_CTL);
+
+       if (dev_priv->cdclk.hw.vco != vco) {
+               /* Wa Display #1183: skl,kbl,cfl */
+               cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+               cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+               I915_WRITE(CDCLK_CTL, cdclk_ctl);
+       }
+
+       /* Wa Display #1183: skl,kbl,cfl */
+       cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
+       I915_WRITE(CDCLK_CTL, cdclk_ctl);
+       POSTING_READ(CDCLK_CTL);
+
+       if (dev_priv->cdclk.hw.vco != vco)
+               skl_dpll0_enable(dev_priv, vco);
+
+       /* Wa Display #1183: skl,kbl,cfl */
+       cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+       I915_WRITE(CDCLK_CTL, cdclk_ctl);
+
+       cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+       I915_WRITE(CDCLK_CTL, cdclk_ctl);
+
+       /* Wa Display #1183: skl,kbl,cfl */
+       cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
+       I915_WRITE(CDCLK_CTL, cdclk_ctl);
+       POSTING_READ(CDCLK_CTL);
+
+       /* inform PCU of the change */
+       sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+                               cdclk_state->voltage_level);
+
+       intel_update_cdclk(dev_priv);
+}
+
+static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+{
+       u32 cdctl, expected;
+
+       /*
+        * check if the pre-os initialized the display
+        * There is SWF18 scratchpad register defined which is set by the
+        * pre-os which can be used by the OS drivers to check the status
+        */
+       if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
+               goto sanitize;
+
+       intel_update_cdclk(dev_priv);
+       intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+
+       /* Is PLL enabled and locked ? */
+       if (dev_priv->cdclk.hw.vco == 0 ||
+           dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+               goto sanitize;
+
+       /* DPLL okay; verify the cdclock
+        *
+        * Noticed in some instances that the freq selection is correct but
+        * decimal part is programmed wrong from BIOS where pre-os does not
+        * enable display. Verify the same as well.
+        */
+       cdctl = I915_READ(CDCLK_CTL);
+       expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
+               skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
+       if (cdctl == expected)
+               /* All well; nothing to sanitize */
+               return;
+
+sanitize:
+       DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+
+       /* force cdclk programming */
+       dev_priv->cdclk.hw.cdclk = 0;
+       /* force full PLL disable + enable */
+       dev_priv->cdclk.hw.vco = -1;
+}
+
+static void skl_init_cdclk(struct drm_i915_private *dev_priv)
+{
+       struct intel_cdclk_state cdclk_state;
+
+       skl_sanitize_cdclk(dev_priv);
+
+       if (dev_priv->cdclk.hw.cdclk != 0 &&
+           dev_priv->cdclk.hw.vco != 0) {
+               /*
+                * Use the current vco as our initial
+                * guess as to what the preferred vco is.
+                */
+               if (dev_priv->skl_preferred_vco_freq == 0)
+                       skl_set_preferred_cdclk_vco(dev_priv,
+                                                   dev_priv->cdclk.hw.vco);
+               return;
+       }
+
+       cdclk_state = dev_priv->cdclk.hw;
+
+       cdclk_state.vco = dev_priv->skl_preferred_vco_freq;
+       if (cdclk_state.vco == 0)
+               cdclk_state.vco = 8100000;
+       cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
+       cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
+
+       skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+}
+
+static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+       struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+
+       cdclk_state.cdclk = cdclk_state.bypass;
+       cdclk_state.vco = 0;
+       cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
+
+       skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+}
+
+static int bxt_calc_cdclk(int min_cdclk)
+{
+       if (min_cdclk > 576000)
+               return 624000;
+       else if (min_cdclk > 384000)
+               return 576000;
+       else if (min_cdclk > 288000)
+               return 384000;
+       else if (min_cdclk > 144000)
+               return 288000;
+       else
+               return 144000;
+}
+
+static int glk_calc_cdclk(int min_cdclk)
+{
+       if (min_cdclk > 158400)
+               return 316800;
+       else if (min_cdclk > 79200)
+               return 158400;
+       else
+               return 79200;
+}
+
+static u8 bxt_calc_voltage_level(int cdclk)
+{
+       return DIV_ROUND_UP(cdclk, 25000);
+}
+
+static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+{
+       int ratio;
+
+       if (cdclk == dev_priv->cdclk.hw.bypass)
+               return 0;
+
+       switch (cdclk) {
+       default:
+               MISSING_CASE(cdclk);
+               /* fall through */
+       case 144000:
+       case 288000:
+       case 384000:
+       case 576000:
+               ratio = 60;
+               break;
+       case 624000:
+               ratio = 65;
+               break;
+       }
+
+       return dev_priv->cdclk.hw.ref * ratio;
+}
+
+static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+{
+       int ratio;
+
+       if (cdclk == dev_priv->cdclk.hw.bypass)
+               return 0;
+
+       switch (cdclk) {
+       default:
+               MISSING_CASE(cdclk);
+               /* fall through */
+       case  79200:
+       case 158400:
+       case 316800:
+               ratio = 33;
+               break;
+       }
+
+       return dev_priv->cdclk.hw.ref * ratio;
+}
+
+static void bxt_de_pll_update(struct drm_i915_private *dev_priv,
+                             struct intel_cdclk_state *cdclk_state)
+{
+       u32 val;
+
+       cdclk_state->ref = 19200;
+       cdclk_state->vco = 0;
+
+       val = I915_READ(BXT_DE_PLL_ENABLE);
+       if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
+               return;
+
+       if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
+               return;
+
+       val = I915_READ(BXT_DE_PLL_CTL);
+       cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
+}
+
+static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
+                         struct intel_cdclk_state *cdclk_state)
+{
+       u32 divider;
+       int div;
+
+       bxt_de_pll_update(dev_priv, cdclk_state);
+
+       cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
+
+       if (cdclk_state->vco == 0)
+               goto out;
+
+       divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
+
+       switch (divider) {
+       case BXT_CDCLK_CD2X_DIV_SEL_1:
+               div = 2;
+               break;
+       case BXT_CDCLK_CD2X_DIV_SEL_1_5:
+               WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
+               div = 3;
+               break;
+       case BXT_CDCLK_CD2X_DIV_SEL_2:
+               div = 4;
+               break;
+       case BXT_CDCLK_CD2X_DIV_SEL_4:
+               div = 8;
+               break;
+       default:
+               MISSING_CASE(divider);
+               return;
+       }
+
+       cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
+
+ out:
+       /*
+        * Can't read this out :( Let's assume it's
+        * at least what the CDCLK frequency requires.
+        */
+       cdclk_state->voltage_level =
+               bxt_calc_voltage_level(cdclk_state->cdclk);
+}
+
+static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE(BXT_DE_PLL_ENABLE, 0);
+
+       /* Timeout 200us */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
+                                   1))
+               DRM_ERROR("timeout waiting for DE PLL unlock\n");
+
+       dev_priv->cdclk.hw.vco = 0;
+}
+
+static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
+{
+       int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+       u32 val;
+
+       val = I915_READ(BXT_DE_PLL_CTL);
+       val &= ~BXT_DE_PLL_RATIO_MASK;
+       val |= BXT_DE_PLL_RATIO(ratio);
+       I915_WRITE(BXT_DE_PLL_CTL, val);
+
+       I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+
+       /* Timeout 200us */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   BXT_DE_PLL_ENABLE,
+                                   BXT_DE_PLL_LOCK,
+                                   BXT_DE_PLL_LOCK,
+                                   1))
+               DRM_ERROR("timeout waiting for DE PLL lock\n");
+
+       dev_priv->cdclk.hw.vco = vco;
+}
+
+static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
+                         const struct intel_cdclk_state *cdclk_state,
+                         enum pipe pipe)
+{
+       int cdclk = cdclk_state->cdclk;
+       int vco = cdclk_state->vco;
+       u32 val, divider;
+       int ret;
+
+       /* cdclk = vco / 2 / div{1,1.5,2,4} */
+       switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
+       default:
+               WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
+               WARN_ON(vco != 0);
+               /* fall through */
+       case 2:
+               divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+               break;
+       case 3:
+               WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
+               divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
+               break;
+       case 4:
+               divider = BXT_CDCLK_CD2X_DIV_SEL_2;
+               break;
+       case 8:
+               divider = BXT_CDCLK_CD2X_DIV_SEL_4;
+               break;
+       }
+
+       /*
+        * Inform power controller of upcoming frequency change. BSpec
+        * requires us to wait up to 150usec, but that leads to timeouts;
+        * the 2ms used here is based on experiment.
+        */
+       ret = sandybridge_pcode_write_timeout(dev_priv,
+                                             HSW_PCODE_DE_WRITE_FREQ_REQ,
+                                             0x80000000, 150, 2);
+       if (ret) {
+               DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
+                         ret, cdclk);
+               return;
+       }
+
+       if (dev_priv->cdclk.hw.vco != 0 &&
+           dev_priv->cdclk.hw.vco != vco)
+               bxt_de_pll_disable(dev_priv);
+
+       if (dev_priv->cdclk.hw.vco != vco)
+               bxt_de_pll_enable(dev_priv, vco);
+
+       val = divider | skl_cdclk_decimal(cdclk);
+       if (pipe == INVALID_PIPE)
+               val |= BXT_CDCLK_CD2X_PIPE_NONE;
+       else
+               val |= BXT_CDCLK_CD2X_PIPE(pipe);
+       /*
+        * Disable SSA Precharge when CD clock frequency < 500 MHz,
+        * enable otherwise.
+        */
+       if (cdclk >= 500000)
+               val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+       I915_WRITE(CDCLK_CTL, val);
+
+       if (pipe != INVALID_PIPE)
+               intel_wait_for_vblank(dev_priv, pipe);
+
+       /*
+        * The timeout isn't specified, the 2ms used here is based on
+        * experiment.
+        * FIXME: Waiting for the request completion could be delayed until
+        * the next PCODE request based on BSpec.
+        */
+       ret = sandybridge_pcode_write_timeout(dev_priv,
+                                             HSW_PCODE_DE_WRITE_FREQ_REQ,
+                                             cdclk_state->voltage_level, 150, 2);
+       if (ret) {
+               DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
+                         ret, cdclk);
+               return;
+       }
+
+       intel_update_cdclk(dev_priv);
+}
+
+static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
+{
+       u32 cdctl, expected;
+
+       intel_update_cdclk(dev_priv);
+       intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+
+       if (dev_priv->cdclk.hw.vco == 0 ||
+           dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+               goto sanitize;
+
+       /* DPLL okay; verify the cdclock
+        *
+        * Some BIOS versions leave an incorrect decimal frequency value and
+        * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
+        * so sanitize this register.
+        */
+       cdctl = I915_READ(CDCLK_CTL);
+       /*
+        * Let's ignore the pipe field, since BIOS could have configured the
+        * dividers both synching to an active pipe, or asynchronously
+        * (PIPE_NONE).
+        */
+       cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
+
+       expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
+               skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
+       /*
+        * Disable SSA Precharge when CD clock frequency < 500 MHz,
+        * enable otherwise.
+        */
+       if (dev_priv->cdclk.hw.cdclk >= 500000)
+               expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+
+       if (cdctl == expected)
+               /* All well; nothing to sanitize */
+               return;
+
+sanitize:
+       DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+
+       /* force cdclk programming */
+       dev_priv->cdclk.hw.cdclk = 0;
+
+       /* force full PLL disable + enable */
+       dev_priv->cdclk.hw.vco = -1;
+}
+
+static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
+{
+       struct intel_cdclk_state cdclk_state;
+
+       bxt_sanitize_cdclk(dev_priv);
+
+       if (dev_priv->cdclk.hw.cdclk != 0 &&
+           dev_priv->cdclk.hw.vco != 0)
+               return;
+
+       cdclk_state = dev_priv->cdclk.hw;
+
+       /*
+        * FIXME:
+        * - The initial CDCLK needs to be read from VBT.
+        *   Need to make this change after VBT has changes for BXT.
+        */
+       if (IS_GEMINILAKE(dev_priv)) {
+               cdclk_state.cdclk = glk_calc_cdclk(0);
+               cdclk_state.vco = glk_de_pll_vco(dev_priv, cdclk_state.cdclk);
+       } else {
+               cdclk_state.cdclk = bxt_calc_cdclk(0);
+               cdclk_state.vco = bxt_de_pll_vco(dev_priv, cdclk_state.cdclk);
+       }
+       cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
+
+       bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+}
+
+static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+       struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+
+       cdclk_state.cdclk = cdclk_state.bypass;
+       cdclk_state.vco = 0;
+       cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
+
+       bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+}
+
+static int cnl_calc_cdclk(int min_cdclk)
+{
+       if (min_cdclk > 336000)
+               return 528000;
+       else if (min_cdclk > 168000)
+               return 336000;
+       else
+               return 168000;
+}
+
+static u8 cnl_calc_voltage_level(int cdclk)
+{
+       if (cdclk > 336000)
+               return 2;
+       else if (cdclk > 168000)
+               return 1;
+       else
+               return 0;
+}
+
+static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
+                                struct intel_cdclk_state *cdclk_state)
+{
+       u32 val;
+
+       if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
+               cdclk_state->ref = 24000;
+       else
+               cdclk_state->ref = 19200;
+
+       cdclk_state->vco = 0;
+
+       val = I915_READ(BXT_DE_PLL_ENABLE);
+       if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
+               return;
+
+       if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
+               return;
+
+       cdclk_state->vco = (val & CNL_CDCLK_PLL_RATIO_MASK) * cdclk_state->ref;
+}
+
+static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
+                        struct intel_cdclk_state *cdclk_state)
+{
+       u32 divider;
+       int div;
+
+       cnl_cdclk_pll_update(dev_priv, cdclk_state);
+
+       cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
+
+       if (cdclk_state->vco == 0)
+               goto out;
+
+       divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
+
+       switch (divider) {
+       case BXT_CDCLK_CD2X_DIV_SEL_1:
+               div = 2;
+               break;
+       case BXT_CDCLK_CD2X_DIV_SEL_2:
+               div = 4;
+               break;
+       default:
+               MISSING_CASE(divider);
+               return;
+       }
+
+       cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
+
+ out:
+       /*
+        * Can't read this out :( Let's assume it's
+        * at least what the CDCLK frequency requires.
+        */
+       cdclk_state->voltage_level =
+               cnl_calc_voltage_level(cdclk_state->cdclk);
+}
+
+static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       val = I915_READ(BXT_DE_PLL_ENABLE);
+       val &= ~BXT_DE_PLL_PLL_ENABLE;
+       I915_WRITE(BXT_DE_PLL_ENABLE, val);
+
+       /* Timeout 200us */
+       if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
+               DRM_ERROR("timeout waiting for CDCLK PLL unlock\n");
+
+       dev_priv->cdclk.hw.vco = 0;
+}
+
+static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
+{
+       int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+       u32 val;
+
+       val = CNL_CDCLK_PLL_RATIO(ratio);
+       I915_WRITE(BXT_DE_PLL_ENABLE, val);
+
+       val |= BXT_DE_PLL_PLL_ENABLE;
+       I915_WRITE(BXT_DE_PLL_ENABLE, val);
+
+       /* Timeout 200us */
+       if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
+               DRM_ERROR("timeout waiting for CDCLK PLL lock\n");
+
+       dev_priv->cdclk.hw.vco = vco;
+}
+
+static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
+                         const struct intel_cdclk_state *cdclk_state,
+                         enum pipe pipe)
+{
+       int cdclk = cdclk_state->cdclk;
+       int vco = cdclk_state->vco;
+       u32 val, divider;
+       int ret;
+
+       ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+                               SKL_CDCLK_PREPARE_FOR_CHANGE,
+                               SKL_CDCLK_READY_FOR_CHANGE,
+                               SKL_CDCLK_READY_FOR_CHANGE, 3);
+       if (ret) {
+               DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
+                         ret);
+               return;
+       }
+
+       /* cdclk = vco / 2 / div{1,2} */
+       switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
+       default:
+               WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
+               WARN_ON(vco != 0);
+               /* fall through */
+       case 2:
+               divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+               break;
+       case 4:
+               divider = BXT_CDCLK_CD2X_DIV_SEL_2;
+               break;
+       }
+
+       if (dev_priv->cdclk.hw.vco != 0 &&
+           dev_priv->cdclk.hw.vco != vco)
+               cnl_cdclk_pll_disable(dev_priv);
+
+       if (dev_priv->cdclk.hw.vco != vco)
+               cnl_cdclk_pll_enable(dev_priv, vco);
+
+       val = divider | skl_cdclk_decimal(cdclk);
+       if (pipe == INVALID_PIPE)
+               val |= BXT_CDCLK_CD2X_PIPE_NONE;
+       else
+               val |= BXT_CDCLK_CD2X_PIPE(pipe);
+       I915_WRITE(CDCLK_CTL, val);
+
+       if (pipe != INVALID_PIPE)
+               intel_wait_for_vblank(dev_priv, pipe);
+
+       /* inform PCU of the change */
+       sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+                               cdclk_state->voltage_level);
+
+       intel_update_cdclk(dev_priv);
+
+       /*
+        * Can't read out the voltage level :(
+        * Let's just assume everything is as expected.
+        */
+       dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
+}
+
+static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+{
+       int ratio;
+
+       if (cdclk == dev_priv->cdclk.hw.bypass)
+               return 0;
+
+       switch (cdclk) {
+       default:
+               MISSING_CASE(cdclk);
+               /* fall through */
+       case 168000:
+       case 336000:
+               ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28;
+               break;
+       case 528000:
+               ratio = dev_priv->cdclk.hw.ref == 19200 ? 55 : 44;
+               break;
+       }
+
+       return dev_priv->cdclk.hw.ref * ratio;
+}
+
+static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+{
+       u32 cdctl, expected;
+
+       intel_update_cdclk(dev_priv);
+       intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+
+       if (dev_priv->cdclk.hw.vco == 0 ||
+           dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+               goto sanitize;
+
+       /* DPLL okay; verify the cdclock
+        *
+        * Some BIOS versions leave an incorrect decimal frequency value and
+        * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
+        * so sanitize this register.
+        */
+       cdctl = I915_READ(CDCLK_CTL);
+       /*
+        * Let's ignore the pipe field, since BIOS could have configured the
+        * dividers both synching to an active pipe, or asynchronously
+        * (PIPE_NONE).
+        */
+       cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
+
+       expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
+                  skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
+
+       if (cdctl == expected)
+               /* All well; nothing to sanitize */
+               return;
+
+sanitize:
+       DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+
+       /* force cdclk programming */
+       dev_priv->cdclk.hw.cdclk = 0;
+
+       /* force full PLL disable + enable */
+       dev_priv->cdclk.hw.vco = -1;
+}
+
+static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
+{
+       int ranges_24[] = { 312000, 552000, 648000 };
+       int ranges_19_38[] = { 307200, 556800, 652800 };
+       int *ranges;
+
+       switch (ref) {
+       default:
+               MISSING_CASE(ref);
+               /* fall through */
+       case 24000:
+               ranges = ranges_24;
+               break;
+       case 19200:
+       case 38400:
+               ranges = ranges_19_38;
+               break;
+       }
+
+       if (min_cdclk > ranges[1])
+               return ranges[2];
+       else if (min_cdclk > ranges[0])
+               return ranges[1];
+       else
+               return ranges[0];
+}
+
+static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+{
+       int ratio;
+
+       if (cdclk == dev_priv->cdclk.hw.bypass)
+               return 0;
+
+       switch (cdclk) {
+       default:
+               MISSING_CASE(cdclk);
+               /* fall through */
+       case 307200:
+       case 556800:
+       case 652800:
+               WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
+                       dev_priv->cdclk.hw.ref != 38400);
+               break;
+       case 312000:
+       case 552000:
+       case 648000:
+               WARN_ON(dev_priv->cdclk.hw.ref != 24000);
+       }
+
+       ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
+
+       return dev_priv->cdclk.hw.ref * ratio;
+}
+
+static void icl_set_cdclk(struct drm_i915_private *dev_priv,
+                         const struct intel_cdclk_state *cdclk_state,
+                         enum pipe pipe)
+{
+       unsigned int cdclk = cdclk_state->cdclk;
+       unsigned int vco = cdclk_state->vco;
+       int ret;
+
+       ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+                               SKL_CDCLK_PREPARE_FOR_CHANGE,
+                               SKL_CDCLK_READY_FOR_CHANGE,
+                               SKL_CDCLK_READY_FOR_CHANGE, 3);
+       if (ret) {
+               DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
+                         ret);
+               return;
+       }
+
+       if (dev_priv->cdclk.hw.vco != 0 &&
+           dev_priv->cdclk.hw.vco != vco)
+               cnl_cdclk_pll_disable(dev_priv);
+
+       if (dev_priv->cdclk.hw.vco != vco)
+               cnl_cdclk_pll_enable(dev_priv, vco);
+
+       /*
+        * On ICL CD2X_DIV can only be 1, so we'll never end up changing the
+        * divider here synchronized to a pipe while CDCLK is on, nor will we
+        * need the corresponding vblank wait.
+        */
+       I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
+                             skl_cdclk_decimal(cdclk));
+
+       sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+                               cdclk_state->voltage_level);
+
+       intel_update_cdclk(dev_priv);
+
+       /*
+        * Can't read out the voltage level :(
+        * Let's just assume everything is as expected.
+        */
+       dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
+}
+
+static u8 icl_calc_voltage_level(int cdclk)
+{
+       if (cdclk > 556800)
+               return 2;
+       else if (cdclk > 312000)
+               return 1;
+       else
+               return 0;
+}
+
+static void icl_get_cdclk(struct drm_i915_private *dev_priv,
+                         struct intel_cdclk_state *cdclk_state)
+{
+       u32 val;
+
+       cdclk_state->bypass = 50000;
+
+       val = I915_READ(SKL_DSSM);
+       switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) {
+       default:
+               MISSING_CASE(val);
+               /* fall through */
+       case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
+               cdclk_state->ref = 24000;
+               break;
+       case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
+               cdclk_state->ref = 19200;
+               break;
+       case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
+               cdclk_state->ref = 38400;
+               break;
+       }
+
+       val = I915_READ(BXT_DE_PLL_ENABLE);
+       if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
+           (val & BXT_DE_PLL_LOCK) == 0) {
+               /*
+                * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
+                * setting it to zero is a way to signal that.
+                */
+               cdclk_state->vco = 0;
+               cdclk_state->cdclk = cdclk_state->bypass;
+               goto out;
+       }
+
+       cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
+
+       val = I915_READ(CDCLK_CTL);
+       WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
+
+       cdclk_state->cdclk = cdclk_state->vco / 2;
+
+out:
+       /*
+        * Can't read this out :( Let's assume it's
+        * at least what the CDCLK frequency requires.
+        */
+       cdclk_state->voltage_level =
+               icl_calc_voltage_level(cdclk_state->cdclk);
+}
+
+static void icl_init_cdclk(struct drm_i915_private *dev_priv)
+{
+       struct intel_cdclk_state sanitized_state;
+       u32 val;
+
+       /* This sets dev_priv->cdclk.hw. */
+       intel_update_cdclk(dev_priv);
+       intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+
+       /* This means CDCLK disabled. */
+       if (dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+               goto sanitize;
+
+       val = I915_READ(CDCLK_CTL);
+
+       if ((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0)
+               goto sanitize;
+
+       if ((val & CDCLK_FREQ_DECIMAL_MASK) !=
+           skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk))
+               goto sanitize;
+
+       return;
+
+sanitize:
+       DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+
+       sanitized_state.ref = dev_priv->cdclk.hw.ref;
+       sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
+       sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
+                                                    sanitized_state.cdclk);
+       sanitized_state.voltage_level =
+                               icl_calc_voltage_level(sanitized_state.cdclk);
+
+       icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
+}
+
+static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+       struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+
+       cdclk_state.cdclk = cdclk_state.bypass;
+       cdclk_state.vco = 0;
+       cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
+
+       icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+}
+
+static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
+{
+       struct intel_cdclk_state cdclk_state;
+
+       cnl_sanitize_cdclk(dev_priv);
+
+       if (dev_priv->cdclk.hw.cdclk != 0 &&
+           dev_priv->cdclk.hw.vco != 0)
+               return;
+
+       cdclk_state = dev_priv->cdclk.hw;
+
+       cdclk_state.cdclk = cnl_calc_cdclk(0);
+       cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
+       cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
+
+       cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+}
+
+static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+       struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+
+       cdclk_state.cdclk = cdclk_state.bypass;
+       cdclk_state.vco = 0;
+       cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
+
+       cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+}
+
+/**
+ * intel_cdclk_init - Initialize CDCLK
+ * @i915: i915 device
+ *
+ * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
+ * sanitizing the state of the hardware if needed. This is generally done only
+ * during the display core initialization sequence, after which the DMC will
+ * take care of turning CDCLK off/on as needed.
+ */
+void intel_cdclk_init(struct drm_i915_private *i915)
+{
+       if (INTEL_GEN(i915) >= 11)
+               icl_init_cdclk(i915);
+       else if (IS_CANNONLAKE(i915))
+               cnl_init_cdclk(i915);
+       else if (IS_GEN9_BC(i915))
+               skl_init_cdclk(i915);
+       else if (IS_GEN9_LP(i915))
+               bxt_init_cdclk(i915);
+}
+
+/**
+ * intel_cdclk_uninit - Uninitialize CDCLK
+ * @i915: i915 device
+ *
+ * Uninitialize CDCLK. This is done only during the display core
+ * uninitialization sequence.
+ */
+void intel_cdclk_uninit(struct drm_i915_private *i915)
+{
+       if (INTEL_GEN(i915) >= 11)
+               icl_uninit_cdclk(i915);
+       else if (IS_CANNONLAKE(i915))
+               cnl_uninit_cdclk(i915);
+       else if (IS_GEN9_BC(i915))
+               skl_uninit_cdclk(i915);
+       else if (IS_GEN9_LP(i915))
+               bxt_uninit_cdclk(i915);
+}
+
+/**
+ * intel_cdclk_needs_modeset - Determine if two CDCLK states require a modeset on all pipes
+ * @a: first CDCLK state
+ * @b: second CDCLK state
+ *
+ * Returns:
+ * True if the CDCLK states require pipes to be off during reprogramming, false if not.
+ */
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
+                              const struct intel_cdclk_state *b)
+{
+       return a->cdclk != b->cdclk ||
+               a->vco != b->vco ||
+               a->ref != b->ref;
+}
+
+/**
+ * intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update
+ * @dev_priv: Not a CDCLK state, it's the drm_i915_private!
+ * @a: first CDCLK state
+ * @b: second CDCLK state
+ *
+ * Returns:
+ * True if the CDCLK states require just a cd2x divider update, false if not.
+ */
+bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+                                  const struct intel_cdclk_state *a,
+                                  const struct intel_cdclk_state *b)
+{
+       /* Older hw doesn't have the capability */
+       if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
+               return false;
+
+       return a->cdclk != b->cdclk &&
+               a->vco == b->vco &&
+               a->ref == b->ref;
+}
+
+/**
+ * intel_cdclk_changed - Determine if two CDCLK states are different
+ * @a: first CDCLK state
+ * @b: second CDCLK state
+ *
+ * Returns:
+ * True if the CDCLK states don't match, false if they do.
+ */
+bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+                        const struct intel_cdclk_state *b)
+{
+       return intel_cdclk_needs_modeset(a, b) ||
+               a->voltage_level != b->voltage_level;
+}
+
+/**
+ * intel_cdclk_swap_state - make atomic CDCLK configuration effective
+ * @state: atomic state
+ *
+ * This is the CDCLK version of drm_atomic_helper_swap_state() since the
+ * helper does not handle driver-specific global state.
+ *
+ * Similarly to the atomic helpers this function does a complete swap,
+ * i.e. it also puts the old state into @state. This is used by the commit
+ * code to determine how CDCLK has changed (for instance did it increase or
+ * decrease).
+ */
+void intel_cdclk_swap_state(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+       swap(state->cdclk.logical, dev_priv->cdclk.logical);
+       swap(state->cdclk.actual, dev_priv->cdclk.actual);
+}
+
+void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
+                           const char *context)
+{
+       DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n",
+                        context, cdclk_state->cdclk, cdclk_state->vco,
+                        cdclk_state->ref, cdclk_state->bypass,
+                        cdclk_state->voltage_level);
+}
+
+/**
+ * intel_set_cdclk - Push the CDCLK state to the hardware
+ * @dev_priv: i915 device
+ * @cdclk_state: new CDCLK state
+ * @pipe: pipe with which to synchronize the update
+ *
+ * Program the hardware based on the passed in CDCLK state,
+ * if necessary.
+ */
+static void intel_set_cdclk(struct drm_i915_private *dev_priv,
+                           const struct intel_cdclk_state *cdclk_state,
+                           enum pipe pipe)
+{
+       if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
+               return;
+
+       if (WARN_ON_ONCE(!dev_priv->display.set_cdclk))
+               return;
+
+       intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
+
+       dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe);
+
+       if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
+                "cdclk state doesn't match!\n")) {
+               intel_dump_cdclk_state(&dev_priv->cdclk.hw, "[hw state]");
+               intel_dump_cdclk_state(cdclk_state, "[sw state]");
+       }
+}
+
+/**
+ * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
+ * @dev_priv: i915 device
+ * @old_state: old CDCLK state
+ * @new_state: new CDCLK state
+ * @pipe: pipe with which to synchronize the update
+ *
+ * Program the hardware before updating the HW plane state based on the passed
+ * in CDCLK state, if necessary.
+ */
+void
+intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
+                                const struct intel_cdclk_state *old_state,
+                                const struct intel_cdclk_state *new_state,
+                                enum pipe pipe)
+{
+       if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk)
+               intel_set_cdclk(dev_priv, new_state, pipe);
+}
+
+/**
+ * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
+ * @dev_priv: i915 device
+ * @old_state: old CDCLK state
+ * @new_state: new CDCLK state
+ * @pipe: pipe with which to synchronize the update
+ *
+ * Program the hardware after updating the HW plane state based on the passed
+ * in CDCLK state, if necessary.
+ */
+void
+intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
+                                 const struct intel_cdclk_state *old_state,
+                                 const struct intel_cdclk_state *new_state,
+                                 enum pipe pipe)
+{
+       if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk)
+               intel_set_cdclk(dev_priv, new_state, pipe);
+}
+
+static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
+                                    int pixel_rate)
+{
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               return DIV_ROUND_UP(pixel_rate, 2);
+       else if (IS_GEN(dev_priv, 9) ||
+                IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+               return pixel_rate;
+       else if (IS_CHERRYVIEW(dev_priv))
+               return DIV_ROUND_UP(pixel_rate * 100, 95);
+       else
+               return DIV_ROUND_UP(pixel_rate * 100, 90);
+}
+
+int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(crtc_state->base.crtc->dev);
+       int min_cdclk;
+
+       if (!crtc_state->base.enable)
+               return 0;
+
+       min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate);
+
+       /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+       if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
+               min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
+
+       /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
+        * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
+        * there may be audio corruption or screen corruption." This cdclk
+        * restriction for GLK is 316.8 MHz.
+        */
+       if (intel_crtc_has_dp_encoder(crtc_state) &&
+           crtc_state->has_audio &&
+           crtc_state->port_clock >= 540000 &&
+           crtc_state->lane_count == 4) {
+               if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
+                       /* Display WA #1145: glk,cnl */
+                       min_cdclk = max(316800, min_cdclk);
+               } else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv)) {
+                       /* Display WA #1144: skl,bxt */
+                       min_cdclk = max(432000, min_cdclk);
+               }
+       }
+
+       /*
+        * According to BSpec, "The CD clock frequency must be at least twice
+        * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
+        */
+       if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
+               min_cdclk = max(2 * 96000, min_cdclk);
+
+       /*
+        * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
+        * than 320000KHz.
+        */
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
+           IS_VALLEYVIEW(dev_priv))
+               min_cdclk = max(320000, min_cdclk);
+
+       /*
+        * On Geminilake once the CDCLK gets as low as 79200
+        * picture gets unstable, despite that values are
+        * correct for DSI PLL and DE PLL.
+        */
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
+           IS_GEMINILAKE(dev_priv))
+               min_cdclk = max(158400, min_cdclk);
+
+       if (min_cdclk > dev_priv->max_cdclk_freq) {
+               DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
+                             min_cdclk, dev_priv->max_cdclk_freq);
+               return -EINVAL;
+       }
+
+       return min_cdclk;
+}
+
+static int intel_compute_min_cdclk(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       struct intel_crtc *crtc;
+       struct intel_crtc_state *crtc_state;
+       int min_cdclk, i;
+       enum pipe pipe;
+
+       memcpy(state->min_cdclk, dev_priv->min_cdclk,
+              sizeof(state->min_cdclk));
+
+       for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+               min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+               if (min_cdclk < 0)
+                       return min_cdclk;
+
+               state->min_cdclk[i] = min_cdclk;
+       }
+
+       min_cdclk = state->cdclk.force_min_cdclk;
+       for_each_pipe(dev_priv, pipe)
+               min_cdclk = max(state->min_cdclk[pipe], min_cdclk);
+
+       return min_cdclk;
+}
+
+/*
+ * Note that this functions assumes that 0 is
+ * the lowest voltage value, and higher values
+ * correspond to increasingly higher voltages.
+ *
+ * Should that relationship no longer hold on
+ * future platforms this code will need to be
+ * adjusted.
+ */
+static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       struct intel_crtc *crtc;
+       struct intel_crtc_state *crtc_state;
+       u8 min_voltage_level;
+       int i;
+       enum pipe pipe;
+
+       memcpy(state->min_voltage_level, dev_priv->min_voltage_level,
+              sizeof(state->min_voltage_level));
+
+       for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+               if (crtc_state->base.enable)
+                       state->min_voltage_level[i] =
+                               crtc_state->min_voltage_level;
+               else
+                       state->min_voltage_level[i] = 0;
+       }
+
+       min_voltage_level = 0;
+       for_each_pipe(dev_priv, pipe)
+               min_voltage_level = max(state->min_voltage_level[pipe],
+                                       min_voltage_level);
+
+       return min_voltage_level;
+}
+
+static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       int min_cdclk, cdclk;
+
+       min_cdclk = intel_compute_min_cdclk(state);
+       if (min_cdclk < 0)
+               return min_cdclk;
+
+       cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
+
+       state->cdclk.logical.cdclk = cdclk;
+       state->cdclk.logical.voltage_level =
+               vlv_calc_voltage_level(dev_priv, cdclk);
+
+       if (!state->active_crtcs) {
+               cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+
+               state->cdclk.actual.cdclk = cdclk;
+               state->cdclk.actual.voltage_level =
+                       vlv_calc_voltage_level(dev_priv, cdclk);
+       } else {
+               state->cdclk.actual = state->cdclk.logical;
+       }
+
+       return 0;
+}
+
+static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
+{
+       int min_cdclk, cdclk;
+
+       min_cdclk = intel_compute_min_cdclk(state);
+       if (min_cdclk < 0)
+               return min_cdclk;
+
+       /*
+        * FIXME should also account for plane ratio
+        * once 64bpp pixel formats are supported.
+        */
+       cdclk = bdw_calc_cdclk(min_cdclk);
+
+       state->cdclk.logical.cdclk = cdclk;
+       state->cdclk.logical.voltage_level =
+               bdw_calc_voltage_level(cdclk);
+
+       if (!state->active_crtcs) {
+               cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk);
+
+               state->cdclk.actual.cdclk = cdclk;
+               state->cdclk.actual.voltage_level =
+                       bdw_calc_voltage_level(cdclk);
+       } else {
+               state->cdclk.actual = state->cdclk.logical;
+       }
+
+       return 0;
+}
+
+static int skl_dpll0_vco(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       struct intel_crtc *crtc;
+       struct intel_crtc_state *crtc_state;
+       int vco, i;
+
+       vco = state->cdclk.logical.vco;
+       if (!vco)
+               vco = dev_priv->skl_preferred_vco_freq;
+
+       for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+               if (!crtc_state->base.enable)
+                       continue;
+
+               if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+                       continue;
+
+               /*
+                * DPLL0 VCO may need to be adjusted to get the correct
+                * clock for eDP. This will affect cdclk as well.
+                */
+               switch (crtc_state->port_clock / 2) {
+               case 108000:
+               case 216000:
+                       vco = 8640000;
+                       break;
+               default:
+                       vco = 8100000;
+                       break;
+               }
+       }
+
+       return vco;
+}
+
+static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
+{
+       int min_cdclk, cdclk, vco;
+
+       min_cdclk = intel_compute_min_cdclk(state);
+       if (min_cdclk < 0)
+               return min_cdclk;
+
+       vco = skl_dpll0_vco(state);
+
+       /*
+        * FIXME should also account for plane ratio
+        * once 64bpp pixel formats are supported.
+        */
+       cdclk = skl_calc_cdclk(min_cdclk, vco);
+
+       state->cdclk.logical.vco = vco;
+       state->cdclk.logical.cdclk = cdclk;
+       state->cdclk.logical.voltage_level =
+               skl_calc_voltage_level(cdclk);
+
+       if (!state->active_crtcs) {
+               cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco);
+
+               state->cdclk.actual.vco = vco;
+               state->cdclk.actual.cdclk = cdclk;
+               state->cdclk.actual.voltage_level =
+                       skl_calc_voltage_level(cdclk);
+       } else {
+               state->cdclk.actual = state->cdclk.logical;
+       }
+
+       return 0;
+}
+
+static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       int min_cdclk, cdclk, vco;
+
+       min_cdclk = intel_compute_min_cdclk(state);
+       if (min_cdclk < 0)
+               return min_cdclk;
+
+       if (IS_GEMINILAKE(dev_priv)) {
+               cdclk = glk_calc_cdclk(min_cdclk);
+               vco = glk_de_pll_vco(dev_priv, cdclk);
+       } else {
+               cdclk = bxt_calc_cdclk(min_cdclk);
+               vco = bxt_de_pll_vco(dev_priv, cdclk);
+       }
+
+       state->cdclk.logical.vco = vco;
+       state->cdclk.logical.cdclk = cdclk;
+       state->cdclk.logical.voltage_level =
+               bxt_calc_voltage_level(cdclk);
+
+       if (!state->active_crtcs) {
+               if (IS_GEMINILAKE(dev_priv)) {
+                       cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk);
+                       vco = glk_de_pll_vco(dev_priv, cdclk);
+               } else {
+                       cdclk = bxt_calc_cdclk(state->cdclk.force_min_cdclk);
+                       vco = bxt_de_pll_vco(dev_priv, cdclk);
+               }
+
+               state->cdclk.actual.vco = vco;
+               state->cdclk.actual.cdclk = cdclk;
+               state->cdclk.actual.voltage_level =
+                       bxt_calc_voltage_level(cdclk);
+       } else {
+               state->cdclk.actual = state->cdclk.logical;
+       }
+
+       return 0;
+}
+
+static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       int min_cdclk, cdclk, vco;
+
+       min_cdclk = intel_compute_min_cdclk(state);
+       if (min_cdclk < 0)
+               return min_cdclk;
+
+       cdclk = cnl_calc_cdclk(min_cdclk);
+       vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+
+       state->cdclk.logical.vco = vco;
+       state->cdclk.logical.cdclk = cdclk;
+       state->cdclk.logical.voltage_level =
+               max(cnl_calc_voltage_level(cdclk),
+                   cnl_compute_min_voltage_level(state));
+
+       if (!state->active_crtcs) {
+               cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk);
+               vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+
+               state->cdclk.actual.vco = vco;
+               state->cdclk.actual.cdclk = cdclk;
+               state->cdclk.actual.voltage_level =
+                       cnl_calc_voltage_level(cdclk);
+       } else {
+               state->cdclk.actual = state->cdclk.logical;
+       }
+
+       return 0;
+}
+
+static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       unsigned int ref = state->cdclk.logical.ref;
+       int min_cdclk, cdclk, vco;
+
+       min_cdclk = intel_compute_min_cdclk(state);
+       if (min_cdclk < 0)
+               return min_cdclk;
+
+       cdclk = icl_calc_cdclk(min_cdclk, ref);
+       vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+
+       state->cdclk.logical.vco = vco;
+       state->cdclk.logical.cdclk = cdclk;
+       state->cdclk.logical.voltage_level =
+               max(icl_calc_voltage_level(cdclk),
+                   cnl_compute_min_voltage_level(state));
+
+       if (!state->active_crtcs) {
+               cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref);
+               vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+
+               state->cdclk.actual.vco = vco;
+               state->cdclk.actual.cdclk = cdclk;
+               state->cdclk.actual.voltage_level =
+                       icl_calc_voltage_level(cdclk);
+       } else {
+               state->cdclk.actual = state->cdclk.logical;
+       }
+
+       return 0;
+}
+
+static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
+{
+       int max_cdclk_freq = dev_priv->max_cdclk_freq;
+
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               return 2 * max_cdclk_freq;
+       else if (IS_GEN(dev_priv, 9) ||
+                IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+               return max_cdclk_freq;
+       else if (IS_CHERRYVIEW(dev_priv))
+               return max_cdclk_freq*95/100;
+       else if (INTEL_GEN(dev_priv) < 4)
+               return 2*max_cdclk_freq*90/100;
+       else
+               return max_cdclk_freq*90/100;
+}
+
+/**
+ * intel_update_max_cdclk - Determine the maximum support CDCLK frequency
+ * @dev_priv: i915 device
+ *
+ * Determine the maximum CDCLK frequency the platform supports, and also
+ * derive the maximum dot clock frequency the maximum CDCLK frequency
+ * allows.
+ */
+void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
+{
+       if (INTEL_GEN(dev_priv) >= 11) {
+               if (dev_priv->cdclk.hw.ref == 24000)
+                       dev_priv->max_cdclk_freq = 648000;
+               else
+                       dev_priv->max_cdclk_freq = 652800;
+       } else if (IS_CANNONLAKE(dev_priv)) {
+               dev_priv->max_cdclk_freq = 528000;
+       } else if (IS_GEN9_BC(dev_priv)) {
+               u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+               int max_cdclk, vco;
+
+               vco = dev_priv->skl_preferred_vco_freq;
+               WARN_ON(vco != 8100000 && vco != 8640000);
+
+               /*
+                * Use the lower (vco 8640) cdclk values as a
+                * first guess. skl_calc_cdclk() will correct it
+                * if the preferred vco is 8100 instead.
+                */
+               if (limit == SKL_DFSM_CDCLK_LIMIT_675)
+                       max_cdclk = 617143;
+               else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
+                       max_cdclk = 540000;
+               else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
+                       max_cdclk = 432000;
+               else
+                       max_cdclk = 308571;
+
+               dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
+       } else if (IS_GEMINILAKE(dev_priv)) {
+               dev_priv->max_cdclk_freq = 316800;
+       } else if (IS_BROXTON(dev_priv)) {
+               dev_priv->max_cdclk_freq = 624000;
+       } else if (IS_BROADWELL(dev_priv))  {
+               /*
+                * FIXME with extra cooling we can allow
+                * 540 MHz for ULX and 675 Mhz for ULT.
+                * How can we know if extra cooling is
+                * available? PCI ID, VTB, something else?
+                */
+               if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+                       dev_priv->max_cdclk_freq = 450000;
+               else if (IS_BDW_ULX(dev_priv))
+                       dev_priv->max_cdclk_freq = 450000;
+               else if (IS_BDW_ULT(dev_priv))
+                       dev_priv->max_cdclk_freq = 540000;
+               else
+                       dev_priv->max_cdclk_freq = 675000;
+       } else if (IS_CHERRYVIEW(dev_priv)) {
+               dev_priv->max_cdclk_freq = 320000;
+       } else if (IS_VALLEYVIEW(dev_priv)) {
+               dev_priv->max_cdclk_freq = 400000;
+       } else {
+               /* otherwise assume cdclk is fixed */
+               dev_priv->max_cdclk_freq = dev_priv->cdclk.hw.cdclk;
+       }
+
+       dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
+
+       DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
+                        dev_priv->max_cdclk_freq);
+
+       DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
+                        dev_priv->max_dotclk_freq);
+}
+
+/**
+ * intel_update_cdclk - Determine the current CDCLK frequency
+ * @dev_priv: i915 device
+ *
+ * Determine the current CDCLK frequency.
+ */
+void intel_update_cdclk(struct drm_i915_private *dev_priv)
+{
+       dev_priv->display.get_cdclk(dev_priv, &dev_priv->cdclk.hw);
+
+       /*
+        * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
+        * Programmng [sic] note: bit[9:2] should be programmed to the number
+        * of cdclk that generates 4MHz reference clock freq which is used to
+        * generate GMBus clock. This will vary with the cdclk freq.
+        */
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               I915_WRITE(GMBUSFREQ_VLV,
+                          DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
+}
+
+static int cnp_rawclk(struct drm_i915_private *dev_priv)
+{
+       u32 rawclk;
+       int divider, fraction;
+
+       if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
+               /* 24 MHz */
+               divider = 24000;
+               fraction = 0;
+       } else {
+               /* 19.2 MHz */
+               divider = 19000;
+               fraction = 200;
+       }
+
+       rawclk = CNP_RAWCLK_DIV(divider / 1000);
+       if (fraction) {
+               int numerator = 1;
+
+               rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
+                                                          fraction) - 1);
+               if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+                       rawclk |= ICP_RAWCLK_NUM(numerator);
+       }
+
+       I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
+       return divider + fraction;
+}
+
+static int pch_rawclk(struct drm_i915_private *dev_priv)
+{
+       return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
+}
+
+static int vlv_hrawclk(struct drm_i915_private *dev_priv)
+{
+       /* RAWCLK_FREQ_VLV register updated from power well code */
+       return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
+                                     CCK_DISPLAY_REF_CLOCK_CONTROL);
+}
+
+static int g4x_hrawclk(struct drm_i915_private *dev_priv)
+{
+       u32 clkcfg;
+
+       /* hrawclock is 1/4 the FSB frequency */
+       clkcfg = I915_READ(CLKCFG);
+       switch (clkcfg & CLKCFG_FSB_MASK) {
+       case CLKCFG_FSB_400:
+               return 100000;
+       case CLKCFG_FSB_533:
+               return 133333;
+       case CLKCFG_FSB_667:
+               return 166667;
+       case CLKCFG_FSB_800:
+               return 200000;
+       case CLKCFG_FSB_1067:
+       case CLKCFG_FSB_1067_ALT:
+               return 266667;
+       case CLKCFG_FSB_1333:
+       case CLKCFG_FSB_1333_ALT:
+               return 333333;
+       default:
+               return 133333;
+       }
+}
+
+/**
+ * intel_update_rawclk - Determine the current RAWCLK frequency
+ * @dev_priv: i915 device
+ *
+ * Determine the current RAWCLK frequency. RAWCLK is a fixed
+ * frequency clock so this needs to done only once.
+ */
+void intel_update_rawclk(struct drm_i915_private *dev_priv)
+{
+       if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+               dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
+       else if (HAS_PCH_SPLIT(dev_priv))
+               dev_priv->rawclk_freq = pch_rawclk(dev_priv);
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               dev_priv->rawclk_freq = vlv_hrawclk(dev_priv);
+       else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
+               dev_priv->rawclk_freq = g4x_hrawclk(dev_priv);
+       else
+               /* no rawclk on other platforms, or no need to know it */
+               return;
+
+       DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
+}
+
+/**
+ * intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks
+ * @dev_priv: i915 device
+ */
+void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
+{
+       if (INTEL_GEN(dev_priv) >= 11) {
+               dev_priv->display.set_cdclk = icl_set_cdclk;
+               dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+       } else if (IS_CANNONLAKE(dev_priv)) {
+               dev_priv->display.set_cdclk = cnl_set_cdclk;
+               dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk;
+       } else if (IS_GEN9_LP(dev_priv)) {
+               dev_priv->display.set_cdclk = bxt_set_cdclk;
+               dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+       } else if (IS_GEN9_BC(dev_priv)) {
+               dev_priv->display.set_cdclk = skl_set_cdclk;
+               dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
+       } else if (IS_BROADWELL(dev_priv)) {
+               dev_priv->display.set_cdclk = bdw_set_cdclk;
+               dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk;
+       } else if (IS_CHERRYVIEW(dev_priv)) {
+               dev_priv->display.set_cdclk = chv_set_cdclk;
+               dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
+       } else if (IS_VALLEYVIEW(dev_priv)) {
+               dev_priv->display.set_cdclk = vlv_set_cdclk;
+               dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
+       }
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               dev_priv->display.get_cdclk = icl_get_cdclk;
+       else if (IS_CANNONLAKE(dev_priv))
+               dev_priv->display.get_cdclk = cnl_get_cdclk;
+       else if (IS_GEN9_LP(dev_priv))
+               dev_priv->display.get_cdclk = bxt_get_cdclk;
+       else if (IS_GEN9_BC(dev_priv))
+               dev_priv->display.get_cdclk = skl_get_cdclk;
+       else if (IS_BROADWELL(dev_priv))
+               dev_priv->display.get_cdclk = bdw_get_cdclk;
+       else if (IS_HASWELL(dev_priv))
+               dev_priv->display.get_cdclk = hsw_get_cdclk;
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               dev_priv->display.get_cdclk = vlv_get_cdclk;
+       else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+               dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
+       else if (IS_GEN(dev_priv, 5))
+               dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
+       else if (IS_GM45(dev_priv))
+               dev_priv->display.get_cdclk = gm45_get_cdclk;
+       else if (IS_G45(dev_priv))
+               dev_priv->display.get_cdclk = g33_get_cdclk;
+       else if (IS_I965GM(dev_priv))
+               dev_priv->display.get_cdclk = i965gm_get_cdclk;
+       else if (IS_I965G(dev_priv))
+               dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
+       else if (IS_PINEVIEW(dev_priv))
+               dev_priv->display.get_cdclk = pnv_get_cdclk;
+       else if (IS_G33(dev_priv))
+               dev_priv->display.get_cdclk = g33_get_cdclk;
+       else if (IS_I945GM(dev_priv))
+               dev_priv->display.get_cdclk = i945gm_get_cdclk;
+       else if (IS_I945G(dev_priv))
+               dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
+       else if (IS_I915GM(dev_priv))
+               dev_priv->display.get_cdclk = i915gm_get_cdclk;
+       else if (IS_I915G(dev_priv))
+               dev_priv->display.get_cdclk = fixed_333mhz_get_cdclk;
+       else if (IS_I865G(dev_priv))
+               dev_priv->display.get_cdclk = fixed_266mhz_get_cdclk;
+       else if (IS_I85X(dev_priv))
+               dev_priv->display.get_cdclk = i85x_get_cdclk;
+       else if (IS_I845G(dev_priv))
+               dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk;
+       else { /* 830 */
+               WARN(!IS_I830(dev_priv),
+                    "Unknown platform. Assuming 133 MHz CDCLK\n");
+               dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
+       }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
new file mode 100644 (file)
index 0000000..4d6f7f5
--- /dev/null
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CDCLK_H__
+#define __INTEL_CDCLK_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_cdclk_state;
+struct intel_crtc_state;
+
+int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
+void intel_cdclk_init(struct drm_i915_private *i915);
+void intel_cdclk_uninit(struct drm_i915_private *i915);
+void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
+void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
+void intel_update_cdclk(struct drm_i915_private *dev_priv);
+void intel_update_rawclk(struct drm_i915_private *dev_priv);
+bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+                                  const struct intel_cdclk_state *a,
+                                  const struct intel_cdclk_state *b);
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
+                              const struct intel_cdclk_state *b);
+bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+                        const struct intel_cdclk_state *b);
+void intel_cdclk_swap_state(struct intel_atomic_state *state);
+void
+intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
+                                const struct intel_cdclk_state *old_state,
+                                const struct intel_cdclk_state *new_state,
+                                enum pipe pipe);
+void
+intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
+                                 const struct intel_cdclk_state *old_state,
+                                 const struct intel_cdclk_state *new_state,
+                                 enum pipe pipe);
+void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
+                           const char *context);
+
+#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
new file mode 100644 (file)
index 0000000..23a84dd
--- /dev/null
@@ -0,0 +1,1428 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "intel_color.h"
+#include "intel_drv.h"
+
+#define CTM_COEFF_SIGN (1ULL << 63)
+
+#define CTM_COEFF_1_0  (1ULL << 32)
+#define CTM_COEFF_2_0  (CTM_COEFF_1_0 << 1)
+#define CTM_COEFF_4_0  (CTM_COEFF_2_0 << 1)
+#define CTM_COEFF_8_0  (CTM_COEFF_4_0 << 1)
+#define CTM_COEFF_0_5  (CTM_COEFF_1_0 >> 1)
+#define CTM_COEFF_0_25 (CTM_COEFF_0_5 >> 1)
+#define CTM_COEFF_0_125        (CTM_COEFF_0_25 >> 1)
+
+#define CTM_COEFF_LIMITED_RANGE ((235ULL - 16ULL) * CTM_COEFF_1_0 / 255)
+
+#define CTM_COEFF_NEGATIVE(coeff)      (((coeff) & CTM_COEFF_SIGN) != 0)
+#define CTM_COEFF_ABS(coeff)           ((coeff) & (CTM_COEFF_SIGN - 1))
+
+#define LEGACY_LUT_LENGTH              256
+
+/*
+ * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
+ * format). This macro takes the coefficient we want transformed and the
+ * number of fractional bits.
+ *
+ * We only have a 9 bits precision window which slides depending on the value
+ * of the CTM coefficient and we write the value from bit 3. We also round the
+ * value.
+ */
+#define ILK_CSC_COEFF_FP(coeff, fbits) \
+       (clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
+
+#define ILK_CSC_COEFF_LIMITED_RANGE 0x0dc0
+#define ILK_CSC_COEFF_1_0 0x7800
+
+#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
+
+static const u16 ilk_csc_off_zero[3] = {};
+
+static const u16 ilk_csc_coeff_identity[9] = {
+       ILK_CSC_COEFF_1_0, 0, 0,
+       0, ILK_CSC_COEFF_1_0, 0,
+       0, 0, ILK_CSC_COEFF_1_0,
+};
+
+static const u16 ilk_csc_postoff_limited_range[3] = {
+       ILK_CSC_POSTOFF_LIMITED_RANGE,
+       ILK_CSC_POSTOFF_LIMITED_RANGE,
+       ILK_CSC_POSTOFF_LIMITED_RANGE,
+};
+
+static const u16 ilk_csc_coeff_limited_range[9] = {
+       ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
+       0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
+       0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
+};
+
+/*
+ * These values are direct register values specified in the Bspec,
+ * for RGB->YUV conversion matrix (colorspace BT709)
+ */
+static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
+       0x1e08, 0x9cc0, 0xb528,
+       0x2ba8, 0x09d8, 0x37e8,
+       0xbce8, 0x9ad8, 0x1e08,
+};
+
+/* Post offset values for RGB->YCBCR conversion */
+static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
+       0x0800, 0x0100, 0x0800,
+};
+
+static bool lut_is_legacy(const struct drm_property_blob *lut)
+{
+       return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
+}
+
+static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state)
+{
+       return !crtc_state->base.degamma_lut &&
+               !crtc_state->base.ctm &&
+               crtc_state->base.gamma_lut &&
+               lut_is_legacy(crtc_state->base.gamma_lut);
+}
+
+/*
+ * When using limited range, multiply the matrix given by userspace by
+ * the matrix that we would use for the limited range.
+ */
+static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
+{
+       int i;
+
+       for (i = 0; i < 9; i++) {
+               u64 user_coeff = input[i];
+               u32 limited_coeff = CTM_COEFF_LIMITED_RANGE;
+               u32 abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 0,
+                                         CTM_COEFF_4_0 - 1) >> 2;
+
+               /*
+                * By scaling every co-efficient with limited range (16-235)
+                * vs full range (0-255) the final o/p will be scaled down to
+                * fit in the limited range supported by the panel.
+                */
+               result[i] = mul_u32_u32(limited_coeff, abs_coeff) >> 30;
+               result[i] |= user_coeff & CTM_COEFF_SIGN;
+       }
+
+       return result;
+}
+
+static void ilk_update_pipe_csc(struct intel_crtc *crtc,
+                               const u16 preoff[3],
+                               const u16 coeff[9],
+                               const u16 postoff[3])
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
+       I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
+       I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
+
+       I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
+       I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
+
+       I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
+       I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
+
+       I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
+       I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
+
+       if (INTEL_GEN(dev_priv) >= 7) {
+               I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]);
+               I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]);
+               I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]);
+       }
+}
+
+static void icl_update_output_csc(struct intel_crtc *crtc,
+                                 const u16 preoff[3],
+                                 const u16 coeff[9],
+                                 const u16 postoff[3])
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
+       I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
+       I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
+
+       I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
+       I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2] << 16);
+
+       I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
+       I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5] << 16);
+
+       I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
+       I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8] << 16);
+
+       I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
+       I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
+       I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
+}
+
+static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       /*
+        * FIXME if there's a gamma LUT after the CSC, we should
+        * do the range compression using the gamma LUT instead.
+        */
+       return crtc_state->limited_color_range &&
+               (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
+                IS_GEN_RANGE(dev_priv, 9, 10));
+}
+
+static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
+                               u16 coeffs[9])
+{
+       const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+       const u64 *input;
+       u64 temp[9];
+       int i;
+
+       if (ilk_csc_limited_range(crtc_state))
+               input = ctm_mult_by_limited(temp, ctm->matrix);
+       else
+               input = ctm->matrix;
+
+       /*
+        * Convert fixed point S31.32 input to format supported by the
+        * hardware.
+        */
+       for (i = 0; i < 9; i++) {
+               u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
+
+               /*
+                * Clamp input value to min/max supported by
+                * hardware.
+                */
+               abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
+
+               coeffs[i] = 0;
+
+               /* sign bit */
+               if (CTM_COEFF_NEGATIVE(input[i]))
+                       coeffs[i] |= 1 << 15;
+
+               if (abs_coeff < CTM_COEFF_0_125)
+                       coeffs[i] |= (3 << 12) |
+                               ILK_CSC_COEFF_FP(abs_coeff, 12);
+               else if (abs_coeff < CTM_COEFF_0_25)
+                       coeffs[i] |= (2 << 12) |
+                               ILK_CSC_COEFF_FP(abs_coeff, 11);
+               else if (abs_coeff < CTM_COEFF_0_5)
+                       coeffs[i] |= (1 << 12) |
+                               ILK_CSC_COEFF_FP(abs_coeff, 10);
+               else if (abs_coeff < CTM_COEFF_1_0)
+                       coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
+               else if (abs_coeff < CTM_COEFF_2_0)
+                       coeffs[i] |= (7 << 12) |
+                               ILK_CSC_COEFF_FP(abs_coeff, 8);
+               else
+                       coeffs[i] |= (6 << 12) |
+                               ILK_CSC_COEFF_FP(abs_coeff, 7);
+       }
+}
+
+static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       bool limited_color_range = ilk_csc_limited_range(crtc_state);
+
+       if (crtc_state->base.ctm) {
+               u16 coeff[9];
+
+               ilk_csc_convert_ctm(crtc_state, coeff);
+               ilk_update_pipe_csc(crtc, ilk_csc_off_zero, coeff,
+                                   limited_color_range ?
+                                   ilk_csc_postoff_limited_range :
+                                   ilk_csc_off_zero);
+       } else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
+               ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+                                   ilk_csc_coeff_rgb_to_ycbcr,
+                                   ilk_csc_postoff_rgb_to_ycbcr);
+       } else if (limited_color_range) {
+               ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+                                   ilk_csc_coeff_limited_range,
+                                   ilk_csc_postoff_limited_range);
+       } else if (crtc_state->csc_enable) {
+               /*
+                * On GLK+ both pipe CSC and degamma LUT are controlled
+                * by csc_enable. Hence for the cases where the degama
+                * LUT is needed but CSC is not we need to load an
+                * identity matrix.
+                */
+               WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_GEMINILAKE(dev_priv));
+
+               ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+                                   ilk_csc_coeff_identity,
+                                   ilk_csc_off_zero);
+       }
+
+       I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+}
+
+static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       if (crtc_state->base.ctm) {
+               u16 coeff[9];
+
+               ilk_csc_convert_ctm(crtc_state, coeff);
+               ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+                                   coeff, ilk_csc_off_zero);
+       }
+
+       if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
+               icl_update_output_csc(crtc, ilk_csc_off_zero,
+                                     ilk_csc_coeff_rgb_to_ycbcr,
+                                     ilk_csc_postoff_rgb_to_ycbcr);
+       } else if (crtc_state->limited_color_range) {
+               icl_update_output_csc(crtc, ilk_csc_off_zero,
+                                     ilk_csc_coeff_limited_range,
+                                     ilk_csc_postoff_limited_range);
+       }
+
+       I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+}
+
+/*
+ * Set up the pipe CSC unit on CherryView.
+ */
+static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       if (crtc_state->base.ctm) {
+               const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+               u16 coeffs[9] = {};
+               int i;
+
+               for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+                       u64 abs_coeff =
+                               ((1ULL << 63) - 1) & ctm->matrix[i];
+
+                       /* Round coefficient. */
+                       abs_coeff += 1 << (32 - 13);
+                       /* Clamp to hardware limits. */
+                       abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
+
+                       /* Write coefficients in S3.12 format. */
+                       if (ctm->matrix[i] & (1ULL << 63))
+                               coeffs[i] = 1 << 15;
+                       coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
+                       coeffs[i] |= (abs_coeff >> 20) & 0xfff;
+               }
+
+               I915_WRITE(CGM_PIPE_CSC_COEFF01(pipe),
+                          coeffs[1] << 16 | coeffs[0]);
+               I915_WRITE(CGM_PIPE_CSC_COEFF23(pipe),
+                          coeffs[3] << 16 | coeffs[2]);
+               I915_WRITE(CGM_PIPE_CSC_COEFF45(pipe),
+                          coeffs[5] << 16 | coeffs[4]);
+               I915_WRITE(CGM_PIPE_CSC_COEFF67(pipe),
+                          coeffs[7] << 16 | coeffs[6]);
+               I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
+       }
+
+       I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
+}
+
+/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
+static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color)
+{
+       return (color->red & 0xff) << 16 |
+               (color->green & 0xff) << 8 |
+               (color->blue & 0xff);
+}
+
+/* i965+ "10.6" interpolated format "odd DW" (high 8 bits) */
+static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
+{
+       return (color->red >> 8) << 16 |
+               (color->green >> 8) << 8 |
+               (color->blue >> 8);
+}
+
+static u32 ilk_lut_10(const struct drm_color_lut *color)
+{
+       return drm_color_lut_extract(color->red, 10) << 20 |
+               drm_color_lut_extract(color->green, 10) << 10 |
+               drm_color_lut_extract(color->blue, 10);
+}
+
+/* Loads the legacy palette/gamma unit for the CRTC. */
+static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
+                                   const struct drm_property_blob *blob)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       int i;
+
+       if (HAS_GMCH(dev_priv)) {
+               if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+                       assert_dsi_pll_enabled(dev_priv);
+               else
+                       assert_pll_enabled(dev_priv, pipe);
+       }
+
+       if (blob) {
+               const struct drm_color_lut *lut = blob->data;
+
+               for (i = 0; i < 256; i++) {
+                       u32 word =
+                               (drm_color_lut_extract(lut[i].red, 8) << 16) |
+                               (drm_color_lut_extract(lut[i].green, 8) << 8) |
+                               drm_color_lut_extract(lut[i].blue, 8);
+
+                       if (HAS_GMCH(dev_priv))
+                               I915_WRITE(PALETTE(pipe, i), word);
+                       else
+                               I915_WRITE(LGC_PALETTE(pipe, i), word);
+               }
+       }
+}
+
+static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
+{
+       i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
+}
+
+static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       u32 val;
+
+       val = I915_READ(PIPECONF(pipe));
+       val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX;
+       val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+       I915_WRITE(PIPECONF(pipe), val);
+}
+
+static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       u32 val;
+
+       val = I915_READ(PIPECONF(pipe));
+       val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
+       val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+       I915_WRITE(PIPECONF(pipe), val);
+
+       ilk_load_csc_matrix(crtc_state);
+}
+
+static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
+
+       ilk_load_csc_matrix(crtc_state);
+}
+
+static void skl_color_commit(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       u32 val = 0;
+
+       /*
+        * We don't (yet) allow userspace to control the pipe background color,
+        * so force it to black, but apply pipe gamma and CSC appropriately
+        * so that its handling will match how we program our planes.
+        */
+       if (crtc_state->gamma_enable)
+               val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
+       if (crtc_state->csc_enable)
+               val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
+       I915_WRITE(SKL_BOTTOM_COLOR(pipe), val);
+
+       I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               icl_load_csc_matrix(crtc_state);
+       else
+               ilk_load_csc_matrix(crtc_state);
+}
+
+static void i965_load_lut_10p6(struct intel_crtc *crtc,
+                              const struct drm_property_blob *blob)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct drm_color_lut *lut = blob->data;
+       int i, lut_size = drm_color_lut_size(blob);
+       enum pipe pipe = crtc->pipe;
+
+       for (i = 0; i < lut_size - 1; i++) {
+               I915_WRITE(PALETTE(pipe, 2 * i + 0),
+                          i965_lut_10p6_ldw(&lut[i]));
+               I915_WRITE(PALETTE(pipe, 2 * i + 1),
+                          i965_lut_10p6_udw(&lut[i]));
+       }
+
+       I915_WRITE(PIPEGCMAX(pipe, 0), lut[i].red);
+       I915_WRITE(PIPEGCMAX(pipe, 1), lut[i].green);
+       I915_WRITE(PIPEGCMAX(pipe, 2), lut[i].blue);
+}
+
+static void i965_load_luts(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+
+       if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+               i9xx_load_luts(crtc_state);
+       else
+               i965_load_lut_10p6(crtc, gamma_lut);
+}
+
+static void ilk_load_lut_10(struct intel_crtc *crtc,
+                           const struct drm_property_blob *blob)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct drm_color_lut *lut = blob->data;
+       int i, lut_size = drm_color_lut_size(blob);
+       enum pipe pipe = crtc->pipe;
+
+       for (i = 0; i < lut_size; i++)
+               I915_WRITE(PREC_PALETTE(pipe, i), ilk_lut_10(&lut[i]));
+}
+
+static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+
+       if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+               i9xx_load_luts(crtc_state);
+       else
+               ilk_load_lut_10(crtc, gamma_lut);
+}
+
+static int ivb_lut_10_size(u32 prec_index)
+{
+       if (prec_index & PAL_PREC_SPLIT_MODE)
+               return 512;
+       else
+               return 1024;
+}
+
+/*
+ * IVB/HSW Bspec / PAL_PREC_INDEX:
+ * "Restriction : Index auto increment mode is not
+ *  supported and must not be enabled."
+ */
+static void ivb_load_lut_10(struct intel_crtc *crtc,
+                           const struct drm_property_blob *blob,
+                           u32 prec_index)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       int hw_lut_size = ivb_lut_10_size(prec_index);
+       const struct drm_color_lut *lut = blob->data;
+       int i, lut_size = drm_color_lut_size(blob);
+       enum pipe pipe = crtc->pipe;
+
+       for (i = 0; i < hw_lut_size; i++) {
+               /* We discard half the user entries in split gamma mode */
+               const struct drm_color_lut *entry =
+                       &lut[i * (lut_size - 1) / (hw_lut_size - 1)];
+
+               I915_WRITE(PREC_PAL_INDEX(pipe), prec_index++);
+               I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
+       }
+
+       /*
+        * Reset the index, otherwise it prevents the legacy palette to be
+        * written properly.
+        */
+       I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+}
+
+/* On BDW+ the index auto increment mode actually works */
+static void bdw_load_lut_10(struct intel_crtc *crtc,
+                           const struct drm_property_blob *blob,
+                           u32 prec_index)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       int hw_lut_size = ivb_lut_10_size(prec_index);
+       const struct drm_color_lut *lut = blob->data;
+       int i, lut_size = drm_color_lut_size(blob);
+       enum pipe pipe = crtc->pipe;
+
+       I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
+                  PAL_PREC_AUTO_INCREMENT);
+
+       for (i = 0; i < hw_lut_size; i++) {
+               /* We discard half the user entries in split gamma mode */
+               const struct drm_color_lut *entry =
+                       &lut[i * (lut_size - 1) / (hw_lut_size - 1)];
+
+               I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
+       }
+
+       /*
+        * Reset the index, otherwise it prevents the legacy palette to be
+        * written properly.
+        */
+       I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+}
+
+static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       /* Program the max register to clamp values > 1.0. */
+       I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
+       I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
+       I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
+
+       /*
+        * Program the gc max 2 register to clamp values > 1.0.
+        * ToDo: Extend the ABI to be able to program values
+        * from 3.0 to 7.0
+        */
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+               I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
+               I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
+               I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
+       }
+}
+
+static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+       const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+
+       if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
+               i9xx_load_luts(crtc_state);
+       } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
+               ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
+                               PAL_PREC_INDEX_VALUE(0));
+               ivb_load_lut_ext_max(crtc);
+               ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
+                               PAL_PREC_INDEX_VALUE(512));
+       } else {
+               const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
+
+               ivb_load_lut_10(crtc, blob,
+                               PAL_PREC_INDEX_VALUE(0));
+               ivb_load_lut_ext_max(crtc);
+       }
+}
+
+static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+       const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+
+       if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
+               i9xx_load_luts(crtc_state);
+       } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
+               bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
+                               PAL_PREC_INDEX_VALUE(0));
+               ivb_load_lut_ext_max(crtc);
+               bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
+                               PAL_PREC_INDEX_VALUE(512));
+       } else {
+               const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
+
+               bdw_load_lut_10(crtc, blob,
+                               PAL_PREC_INDEX_VALUE(0));
+               ivb_load_lut_ext_max(crtc);
+       }
+}
+
+static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+       const struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
+       u32 i;
+
+       /*
+        * When setting the auto-increment bit, the hardware seems to
+        * ignore the index bits, so we need to reset it to index 0
+        * separately.
+        */
+       I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
+       I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+
+       for (i = 0; i < lut_size; i++) {
+               /*
+                * First 33 entries represent range from 0 to 1.0
+                * 34th and 35th entry will represent extended range
+                * inputs 3.0 and 7.0 respectively, currently clamped
+                * at 1.0. Since the precision is 16bit, the user
+                * value can be directly filled to register.
+                * The pipe degamma table in GLK+ onwards doesn't
+                * support different values per channel, so this just
+                * programs green value which will be equal to Red and
+                * Blue into the lut registers.
+                * ToDo: Extend to max 7.0. Enable 32 bit input value
+                * as compared to just 16 to achieve this.
+                */
+               I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green);
+       }
+
+       /* Clamp values > 1.0. */
+       while (i++ < 35)
+               I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+}
+
+static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+       u32 i;
+
+       /*
+        * When setting the auto-increment bit, the hardware seems to
+        * ignore the index bits, so we need to reset it to index 0
+        * separately.
+        */
+       I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
+       I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+
+       for (i = 0; i < lut_size; i++) {
+               u32 v = (i << 16) / (lut_size - 1);
+
+               I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
+       }
+
+       /* Clamp values > 1.0. */
+       while (i++ < 35)
+               I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+}
+
+static void glk_load_luts(const struct intel_crtc_state *crtc_state)
+{
+       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+       /*
+        * On GLK+ both pipe CSC and degamma LUT are controlled
+        * by csc_enable. Hence for the cases where the CSC is
+        * needed but degamma LUT is not we need to load a
+        * linear degamma LUT. In fact we'll just always load
+        * the degama LUT so that we don't have to reload
+        * it every time the pipe CSC is being enabled.
+        */
+       if (crtc_state->base.degamma_lut)
+               glk_load_degamma_lut(crtc_state);
+       else
+               glk_load_degamma_lut_linear(crtc_state);
+
+       if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
+               i9xx_load_luts(crtc_state);
+       } else {
+               bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
+               ivb_load_lut_ext_max(crtc);
+       }
+}
+
+/* ilk+ "12.4" interpolated format (high 10 bits) */
+static u32 ilk_lut_12p4_udw(const struct drm_color_lut *color)
+{
+       return (color->red >> 6) << 20 | (color->green >> 6) << 10 |
+               (color->blue >> 6);
+}
+
+/* ilk+ "12.4" interpolated format (low 6 bits) */
+static u32 ilk_lut_12p4_ldw(const struct drm_color_lut *color)
+{
+       return (color->red & 0x3f) << 24 | (color->green & 0x3f) << 14 |
+               (color->blue & 0x3f) << 4;
+}
+
+static void
+icl_load_gcmax(const struct intel_crtc_state *crtc_state,
+              const struct drm_color_lut *color)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       /* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
+       I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), color->red);
+       I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), color->green);
+       I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), color->blue);
+}
+
+static void
+icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+       const struct drm_color_lut *lut = blob->data;
+       enum pipe pipe = crtc->pipe;
+       u32 i;
+
+       /*
+        * Every entry in the multi-segment LUT is corresponding to a superfine
+        * segment step which is 1/(8 * 128 * 256).
+        *
+        * Superfine segment has 9 entries, corresponding to values
+        * 0, 1/(8 * 128 * 256), 2/(8 * 128 * 256) .... 8/(8 * 128 * 256).
+        */
+       I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+
+       for (i = 0; i < 9; i++) {
+               const struct drm_color_lut *entry = &lut[i];
+
+               I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
+                          ilk_lut_12p4_ldw(entry));
+               I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
+                          ilk_lut_12p4_udw(entry));
+       }
+}
+
+static void
+icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+       const struct drm_color_lut *lut = blob->data;
+       const struct drm_color_lut *entry;
+       enum pipe pipe = crtc->pipe;
+       u32 i;
+
+       /*
+        *
+        * Program Fine segment (let's call it seg2)...
+        *
+        * Fine segment's step is 1/(128 * 256) ie 1/(128 * 256),  2/(128*256)
+        * ... 256/(128*256). So in order to program fine segment of LUT we
+        * need to pick every 8'th entry in LUT, and program 256 indexes.
+        *
+        * PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
+        * with seg2[0] being unused by the hardware.
+        */
+       I915_WRITE(PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+       for (i = 1; i < 257; i++) {
+               entry = &lut[i * 8];
+               I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
+               I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+       }
+
+       /*
+        * Program Coarse segment (let's call it seg3)...
+        *
+        * Coarse segment's starts from index 0 and it's step is 1/256 ie 0,
+        * 1/256, 2/256 ...256/256. As per the description of each entry in LUT
+        * above, we need to pick every (8 * 128)th entry in LUT, and
+        * program 256 of those.
+        *
+        * Spec is not very clear about if entries seg3[0] and seg3[1] are
+        * being used or not, but we still need to program these to advance
+        * the index.
+        */
+       for (i = 0; i < 256; i++) {
+               entry = &lut[i * 8 * 128];
+               I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
+               I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+       }
+
+       /* The last entry in the LUT is to be programmed in GCMAX */
+       entry = &lut[256 * 8 * 128];
+       icl_load_gcmax(crtc_state, entry);
+       ivb_load_lut_ext_max(crtc);
+}
+
+static void icl_load_luts(const struct intel_crtc_state *crtc_state)
+{
+       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+       if (crtc_state->base.degamma_lut)
+               glk_load_degamma_lut(crtc_state);
+
+       switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+       case GAMMA_MODE_MODE_8BIT:
+               i9xx_load_luts(crtc_state);
+               break;
+
+       case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+               icl_program_gamma_superfine_segment(crtc_state);
+               icl_program_gamma_multi_segment(crtc_state);
+               break;
+
+       default:
+               bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
+               ivb_load_lut_ext_max(crtc);
+       }
+}
+
+static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
+{
+       return drm_color_lut_extract(color->green, 14) << 16 |
+               drm_color_lut_extract(color->blue, 14);
+}
+
+static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
+{
+       return drm_color_lut_extract(color->red, 14);
+}
+
+static void chv_load_cgm_degamma(struct intel_crtc *crtc,
+                                const struct drm_property_blob *blob)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct drm_color_lut *lut = blob->data;
+       int i, lut_size = drm_color_lut_size(blob);
+       enum pipe pipe = crtc->pipe;
+
+       for (i = 0; i < lut_size; i++) {
+               I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0),
+                          chv_cgm_degamma_ldw(&lut[i]));
+               I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1),
+                          chv_cgm_degamma_udw(&lut[i]));
+       }
+}
+
+static u32 chv_cgm_gamma_ldw(const struct drm_color_lut *color)
+{
+       return drm_color_lut_extract(color->green, 10) << 16 |
+               drm_color_lut_extract(color->blue, 10);
+}
+
+static u32 chv_cgm_gamma_udw(const struct drm_color_lut *color)
+{
+       return drm_color_lut_extract(color->red, 10);
+}
+
+static void chv_load_cgm_gamma(struct intel_crtc *crtc,
+                              const struct drm_property_blob *blob)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct drm_color_lut *lut = blob->data;
+       int i, lut_size = drm_color_lut_size(blob);
+       enum pipe pipe = crtc->pipe;
+
+       for (i = 0; i < lut_size; i++) {
+               I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0),
+                          chv_cgm_gamma_ldw(&lut[i]));
+               I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1),
+                          chv_cgm_gamma_udw(&lut[i]));
+       }
+}
+
+static void chv_load_luts(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+       const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+
+       cherryview_load_csc_matrix(crtc_state);
+
+       if (crtc_state_is_legacy_gamma(crtc_state)) {
+               i9xx_load_luts(crtc_state);
+               return;
+       }
+
+       if (degamma_lut)
+               chv_load_cgm_degamma(crtc, degamma_lut);
+
+       if (gamma_lut)
+               chv_load_cgm_gamma(crtc, gamma_lut);
+}
+
+void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       dev_priv->display.load_luts(crtc_state);
+}
+
+void intel_color_commit(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       dev_priv->display.color_commit(crtc_state);
+}
+
+int intel_color_check(struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       return dev_priv->display.color_check(crtc_state);
+}
+
+void intel_color_get_config(struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       if (dev_priv->display.read_luts)
+               dev_priv->display.read_luts(crtc_state);
+}
+
+static bool need_plane_update(struct intel_plane *plane,
+                             const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+       /*
+        * On pre-SKL the pipe gamma enable and pipe csc enable for
+        * the pipe bottom color are configured via the primary plane.
+        * We have to reconfigure that even if the plane is inactive.
+        */
+       return crtc_state->active_planes & BIT(plane->id) ||
+               (INTEL_GEN(dev_priv) < 9 &&
+                plane->id == PLANE_PRIMARY);
+}
+
+static int
+intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_atomic_state *state =
+               to_intel_atomic_state(new_crtc_state->base.state);
+       const struct intel_crtc_state *old_crtc_state =
+               intel_atomic_get_old_crtc_state(state, crtc);
+       struct intel_plane *plane;
+
+       if (!new_crtc_state->base.active ||
+           drm_atomic_crtc_needs_modeset(&new_crtc_state->base))
+               return 0;
+
+       if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable &&
+           new_crtc_state->csc_enable == old_crtc_state->csc_enable)
+               return 0;
+
+       for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+               struct intel_plane_state *plane_state;
+
+               if (!need_plane_update(plane, new_crtc_state))
+                       continue;
+
+               plane_state = intel_atomic_get_plane_state(state, plane);
+               if (IS_ERR(plane_state))
+                       return PTR_ERR(plane_state);
+
+               new_crtc_state->update_planes |= BIT(plane->id);
+       }
+
+       return 0;
+}
+
+static int check_lut_size(const struct drm_property_blob *lut, int expected)
+{
+       int len;
+
+       if (!lut)
+               return 0;
+
+       len = drm_color_lut_size(lut);
+       if (len != expected) {
+               DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
+                             len, expected);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int check_luts(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+       const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+       int gamma_length, degamma_length;
+       u32 gamma_tests, degamma_tests;
+
+       /* Always allow legacy gamma LUT with no further checking. */
+       if (crtc_state_is_legacy_gamma(crtc_state))
+               return 0;
+
+       /* C8 relies on its palette being stored in the legacy LUT */
+       if (crtc_state->c8_planes) {
+               DRM_DEBUG_KMS("C8 pixelformat requires the legacy LUT\n");
+               return -EINVAL;
+       }
+
+       degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+       gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+       degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
+       gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
+
+       if (check_lut_size(degamma_lut, degamma_length) ||
+           check_lut_size(gamma_lut, gamma_length))
+               return -EINVAL;
+
+       if (drm_color_lut_check(degamma_lut, degamma_tests) ||
+           drm_color_lut_check(gamma_lut, gamma_tests))
+               return -EINVAL;
+
+       return 0;
+}
+
+static u32 i9xx_gamma_mode(struct intel_crtc_state *crtc_state)
+{
+       if (!crtc_state->gamma_enable ||
+           crtc_state_is_legacy_gamma(crtc_state))
+               return GAMMA_MODE_MODE_8BIT;
+       else
+               return GAMMA_MODE_MODE_10BIT; /* i965+ only */
+}
+
+static int i9xx_color_check(struct intel_crtc_state *crtc_state)
+{
+       int ret;
+
+       ret = check_luts(crtc_state);
+       if (ret)
+               return ret;
+
+       crtc_state->gamma_enable =
+               crtc_state->base.gamma_lut &&
+               !crtc_state->c8_planes;
+
+       crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
+
+       ret = intel_color_add_affected_planes(crtc_state);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
+{
+       u32 cgm_mode = 0;
+
+       if (crtc_state_is_legacy_gamma(crtc_state))
+               return 0;
+
+       if (crtc_state->base.degamma_lut)
+               cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
+       if (crtc_state->base.ctm)
+               cgm_mode |= CGM_PIPE_MODE_CSC;
+       if (crtc_state->base.gamma_lut)
+               cgm_mode |= CGM_PIPE_MODE_GAMMA;
+
+       return cgm_mode;
+}
+
+/*
+ * CHV color pipeline:
+ * u0.10 -> CGM degamma -> u0.14 -> CGM csc -> u0.14 -> CGM gamma ->
+ * u0.10 -> WGC csc -> u0.10 -> pipe gamma -> u0.10
+ *
+ * We always bypass the WGC csc and use the CGM csc
+ * instead since it has degamma and better precision.
+ */
+static int chv_color_check(struct intel_crtc_state *crtc_state)
+{
+       int ret;
+
+       ret = check_luts(crtc_state);
+       if (ret)
+               return ret;
+
+       /*
+        * Pipe gamma will be used only for the legacy LUT.
+        * Otherwise we bypass it and use the CGM gamma instead.
+        */
+       crtc_state->gamma_enable =
+               crtc_state_is_legacy_gamma(crtc_state) &&
+               !crtc_state->c8_planes;
+
+       crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+
+       crtc_state->cgm_mode = chv_cgm_mode(crtc_state);
+
+       ret = intel_color_add_affected_planes(crtc_state);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+       if (!crtc_state->gamma_enable ||
+           crtc_state_is_legacy_gamma(crtc_state))
+               return GAMMA_MODE_MODE_8BIT;
+       else
+               return GAMMA_MODE_MODE_10BIT;
+}
+
+static int ilk_color_check(struct intel_crtc_state *crtc_state)
+{
+       int ret;
+
+       ret = check_luts(crtc_state);
+       if (ret)
+               return ret;
+
+       crtc_state->gamma_enable =
+               crtc_state->base.gamma_lut &&
+               !crtc_state->c8_planes;
+
+       /*
+        * We don't expose the ctm on ilk/snb currently,
+        * nor do we enable YCbCr output. Also RGB limited
+        * range output is handled by the hw automagically.
+        */
+       crtc_state->csc_enable = false;
+
+       crtc_state->gamma_mode = ilk_gamma_mode(crtc_state);
+
+       crtc_state->csc_mode = 0;
+
+       ret = intel_color_add_affected_planes(crtc_state);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+       if (!crtc_state->gamma_enable ||
+           crtc_state_is_legacy_gamma(crtc_state))
+               return GAMMA_MODE_MODE_8BIT;
+       else if (crtc_state->base.gamma_lut &&
+                crtc_state->base.degamma_lut)
+               return GAMMA_MODE_MODE_SPLIT;
+       else
+               return GAMMA_MODE_MODE_10BIT;
+}
+
+static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+       bool limited_color_range = ilk_csc_limited_range(crtc_state);
+
+       /*
+        * CSC comes after the LUT in degamma, RGB->YCbCr,
+        * and RGB full->limited range mode.
+        */
+       if (crtc_state->base.degamma_lut ||
+           crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+           limited_color_range)
+               return 0;
+
+       return CSC_POSITION_BEFORE_GAMMA;
+}
+
+static int ivb_color_check(struct intel_crtc_state *crtc_state)
+{
+       bool limited_color_range = ilk_csc_limited_range(crtc_state);
+       int ret;
+
+       ret = check_luts(crtc_state);
+       if (ret)
+               return ret;
+
+       crtc_state->gamma_enable =
+               (crtc_state->base.gamma_lut ||
+                crtc_state->base.degamma_lut) &&
+               !crtc_state->c8_planes;
+
+       crtc_state->csc_enable =
+               crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+               crtc_state->base.ctm || limited_color_range;
+
+       crtc_state->gamma_mode = ivb_gamma_mode(crtc_state);
+
+       crtc_state->csc_mode = ivb_csc_mode(crtc_state);
+
+       ret = intel_color_add_affected_planes(crtc_state);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+       if (!crtc_state->gamma_enable ||
+           crtc_state_is_legacy_gamma(crtc_state))
+               return GAMMA_MODE_MODE_8BIT;
+       else
+               return GAMMA_MODE_MODE_10BIT;
+}
+
+static int glk_color_check(struct intel_crtc_state *crtc_state)
+{
+       int ret;
+
+       ret = check_luts(crtc_state);
+       if (ret)
+               return ret;
+
+       crtc_state->gamma_enable =
+               crtc_state->base.gamma_lut &&
+               !crtc_state->c8_planes;
+
+       /* On GLK+ degamma LUT is controlled by csc_enable */
+       crtc_state->csc_enable =
+               crtc_state->base.degamma_lut ||
+               crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+               crtc_state->base.ctm || crtc_state->limited_color_range;
+
+       crtc_state->gamma_mode = glk_gamma_mode(crtc_state);
+
+       crtc_state->csc_mode = 0;
+
+       ret = intel_color_add_affected_planes(crtc_state);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+       u32 gamma_mode = 0;
+
+       if (crtc_state->base.degamma_lut)
+               gamma_mode |= PRE_CSC_GAMMA_ENABLE;
+
+       if (crtc_state->base.gamma_lut &&
+           !crtc_state->c8_planes)
+               gamma_mode |= POST_CSC_GAMMA_ENABLE;
+
+       if (!crtc_state->base.gamma_lut ||
+           crtc_state_is_legacy_gamma(crtc_state))
+               gamma_mode |= GAMMA_MODE_MODE_8BIT;
+       else
+               gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED;
+
+       return gamma_mode;
+}
+
+static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+       u32 csc_mode = 0;
+
+       if (crtc_state->base.ctm)
+               csc_mode |= ICL_CSC_ENABLE;
+
+       if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+           crtc_state->limited_color_range)
+               csc_mode |= ICL_OUTPUT_CSC_ENABLE;
+
+       return csc_mode;
+}
+
+static int icl_color_check(struct intel_crtc_state *crtc_state)
+{
+       int ret;
+
+       ret = check_luts(crtc_state);
+       if (ret)
+               return ret;
+
+       crtc_state->gamma_mode = icl_gamma_mode(crtc_state);
+
+       crtc_state->csc_mode = icl_csc_mode(crtc_state);
+
+       return 0;
+}
+
+void intel_color_init(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       bool has_ctm = INTEL_INFO(dev_priv)->color.degamma_lut_size != 0;
+
+       drm_mode_crtc_set_gamma_size(&crtc->base, 256);
+
+       if (HAS_GMCH(dev_priv)) {
+               if (IS_CHERRYVIEW(dev_priv)) {
+                       dev_priv->display.color_check = chv_color_check;
+                       dev_priv->display.color_commit = i9xx_color_commit;
+                       dev_priv->display.load_luts = chv_load_luts;
+               } else if (INTEL_GEN(dev_priv) >= 4) {
+                       dev_priv->display.color_check = i9xx_color_check;
+                       dev_priv->display.color_commit = i9xx_color_commit;
+                       dev_priv->display.load_luts = i965_load_luts;
+               } else {
+                       dev_priv->display.color_check = i9xx_color_check;
+                       dev_priv->display.color_commit = i9xx_color_commit;
+                       dev_priv->display.load_luts = i9xx_load_luts;
+               }
+       } else {
+               if (INTEL_GEN(dev_priv) >= 11)
+                       dev_priv->display.color_check = icl_color_check;
+               else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+                       dev_priv->display.color_check = glk_color_check;
+               else if (INTEL_GEN(dev_priv) >= 7)
+                       dev_priv->display.color_check = ivb_color_check;
+               else
+                       dev_priv->display.color_check = ilk_color_check;
+
+               if (INTEL_GEN(dev_priv) >= 9)
+                       dev_priv->display.color_commit = skl_color_commit;
+               else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+                       dev_priv->display.color_commit = hsw_color_commit;
+               else
+                       dev_priv->display.color_commit = ilk_color_commit;
+
+               if (INTEL_GEN(dev_priv) >= 11)
+                       dev_priv->display.load_luts = icl_load_luts;
+               else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+                       dev_priv->display.load_luts = glk_load_luts;
+               else if (INTEL_GEN(dev_priv) >= 8)
+                       dev_priv->display.load_luts = bdw_load_luts;
+               else if (INTEL_GEN(dev_priv) >= 7)
+                       dev_priv->display.load_luts = ivb_load_luts;
+               else
+                       dev_priv->display.load_luts = ilk_load_luts;
+       }
+
+       drm_crtc_enable_color_mgmt(&crtc->base,
+                                  INTEL_INFO(dev_priv)->color.degamma_lut_size,
+                                  has_ctm,
+                                  INTEL_INFO(dev_priv)->color.gamma_lut_size);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
new file mode 100644 (file)
index 0000000..057e8ac
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_COLOR_H__
+#define __INTEL_COLOR_H__
+
+struct intel_crtc_state;
+struct intel_crtc;
+
+void intel_color_init(struct intel_crtc *crtc);
+int intel_color_check(struct intel_crtc_state *crtc_state);
+void intel_color_commit(const struct intel_crtc_state *crtc_state);
+void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
+void intel_color_get_config(struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
new file mode 100644 (file)
index 0000000..841708d
--- /dev/null
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "intel_combo_phy.h"
+#include "intel_drv.h"
+
+#define for_each_combo_port(__dev_priv, __port) \
+       for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)  \
+               for_each_if(intel_port_is_combophy(__dev_priv, __port))
+
+#define for_each_combo_port_reverse(__dev_priv, __port) \
+       for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
+               for_each_if(intel_port_is_combophy(__dev_priv, __port))
+
+enum {
+       PROCMON_0_85V_DOT_0,
+       PROCMON_0_95V_DOT_0,
+       PROCMON_0_95V_DOT_1,
+       PROCMON_1_05V_DOT_0,
+       PROCMON_1_05V_DOT_1,
+};
+
+static const struct cnl_procmon {
+       u32 dw1, dw9, dw10;
+} cnl_procmon_values[] = {
+       [PROCMON_0_85V_DOT_0] =
+               { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
+       [PROCMON_0_95V_DOT_0] =
+               { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
+       [PROCMON_0_95V_DOT_1] =
+               { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
+       [PROCMON_1_05V_DOT_0] =
+               { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
+       [PROCMON_1_05V_DOT_1] =
+               { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
+};
+
+/*
+ * CNL has just one set of registers, while ICL has two sets: one for port A and
+ * the other for port B. The CNL registers are equivalent to the ICL port A
+ * registers, that's why we call the ICL macros even though the function has CNL
+ * on its name.
+ */
+static const struct cnl_procmon *
+cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
+{
+       const struct cnl_procmon *procmon;
+       u32 val;
+
+       val = I915_READ(ICL_PORT_COMP_DW3(port));
+       switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
+       default:
+               MISSING_CASE(val);
+               /* fall through */
+       case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
+               procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
+               break;
+       case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
+               procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
+               break;
+       case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
+               procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
+               break;
+       case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
+               procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
+               break;
+       case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
+               procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
+               break;
+       }
+
+       return procmon;
+}
+
+static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
+                                      enum port port)
+{
+       const struct cnl_procmon *procmon;
+       u32 val;
+
+       procmon = cnl_get_procmon_ref_values(dev_priv, port);
+
+       val = I915_READ(ICL_PORT_COMP_DW1(port));
+       val &= ~((0xff << 16) | 0xff);
+       val |= procmon->dw1;
+       I915_WRITE(ICL_PORT_COMP_DW1(port), val);
+
+       I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
+       I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
+}
+
+static bool check_phy_reg(struct drm_i915_private *dev_priv,
+                         enum port port, i915_reg_t reg, u32 mask,
+                         u32 expected_val)
+{
+       u32 val = I915_READ(reg);
+
+       if ((val & mask) != expected_val) {
+               DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
+                                "current %08x mask %08x expected %08x\n",
+                                port_name(port),
+                                reg.reg, val, mask, expected_val);
+               return false;
+       }
+
+       return true;
+}
+
+static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
+                                         enum port port)
+{
+       const struct cnl_procmon *procmon;
+       bool ret;
+
+       procmon = cnl_get_procmon_ref_values(dev_priv, port);
+
+       ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
+                           (0xff << 16) | 0xff, procmon->dw1);
+       ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
+                            -1U, procmon->dw9);
+       ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
+                            -1U, procmon->dw10);
+
+       return ret;
+}
+
+static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
+{
+       return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
+               (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
+}
+
+static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
+{
+       enum port port = PORT_A;
+       bool ret;
+
+       if (!cnl_combo_phy_enabled(dev_priv))
+               return false;
+
+       ret = cnl_verify_procmon_ref_values(dev_priv, port);
+
+       ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
+                            CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
+
+       return ret;
+}
+
+static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       val = I915_READ(CHICKEN_MISC_2);
+       val &= ~CNL_COMP_PWR_DOWN;
+       I915_WRITE(CHICKEN_MISC_2, val);
+
+       /* Dummy PORT_A to get the correct CNL register from the ICL macro */
+       cnl_set_procmon_ref_values(dev_priv, PORT_A);
+
+       val = I915_READ(CNL_PORT_COMP_DW0);
+       val |= COMP_INIT;
+       I915_WRITE(CNL_PORT_COMP_DW0, val);
+
+       val = I915_READ(CNL_PORT_CL1CM_DW5);
+       val |= CL_POWER_DOWN_ENABLE;
+       I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+}
+
+static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       if (!cnl_combo_phy_verify_state(dev_priv))
+               DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
+
+       val = I915_READ(CHICKEN_MISC_2);
+       val |= CNL_COMP_PWR_DOWN;
+       I915_WRITE(CHICKEN_MISC_2, val);
+}
+
+static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
+                                 enum port port)
+{
+       return !(I915_READ(ICL_PHY_MISC(port)) &
+                ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
+               (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
+}
+
+static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
+                                      enum port port)
+{
+       bool ret;
+
+       if (!icl_combo_phy_enabled(dev_priv, port))
+               return false;
+
+       ret = cnl_verify_procmon_ref_values(dev_priv, port);
+
+       if (port == PORT_A)
+               ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW8(port),
+                                    IREFGEN, IREFGEN);
+
+       ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
+                            CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
+
+       return ret;
+}
+
+void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+                                   enum port port, bool is_dsi,
+                                   int lane_count, bool lane_reversal)
+{
+       u8 lane_mask;
+       u32 val;
+
+       if (is_dsi) {
+               WARN_ON(lane_reversal);
+
+               switch (lane_count) {
+               case 1:
+                       lane_mask = PWR_DOWN_LN_3_1_0;
+                       break;
+               case 2:
+                       lane_mask = PWR_DOWN_LN_3_1;
+                       break;
+               case 3:
+                       lane_mask = PWR_DOWN_LN_3;
+                       break;
+               default:
+                       MISSING_CASE(lane_count);
+                       /* fall-through */
+               case 4:
+                       lane_mask = PWR_UP_ALL_LANES;
+                       break;
+               }
+       } else {
+               switch (lane_count) {
+               case 1:
+                       lane_mask = lane_reversal ? PWR_DOWN_LN_2_1_0 :
+                                                   PWR_DOWN_LN_3_2_1;
+                       break;
+               case 2:
+                       lane_mask = lane_reversal ? PWR_DOWN_LN_1_0 :
+                                                   PWR_DOWN_LN_3_2;
+                       break;
+               default:
+                       MISSING_CASE(lane_count);
+                       /* fall-through */
+               case 4:
+                       lane_mask = PWR_UP_ALL_LANES;
+                       break;
+               }
+       }
+
+       val = I915_READ(ICL_PORT_CL_DW10(port));
+       val &= ~PWR_DOWN_LN_MASK;
+       val |= lane_mask << PWR_DOWN_LN_SHIFT;
+       I915_WRITE(ICL_PORT_CL_DW10(port), val);
+}
+
+static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
+{
+       enum port port;
+
+       for_each_combo_port(dev_priv, port) {
+               u32 val;
+
+               if (icl_combo_phy_verify_state(dev_priv, port)) {
+                       DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
+                                        port_name(port));
+                       continue;
+               }
+
+               val = I915_READ(ICL_PHY_MISC(port));
+               val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+               I915_WRITE(ICL_PHY_MISC(port), val);
+
+               cnl_set_procmon_ref_values(dev_priv, port);
+
+               if (port == PORT_A) {
+                       val = I915_READ(ICL_PORT_COMP_DW8(port));
+                       val |= IREFGEN;
+                       I915_WRITE(ICL_PORT_COMP_DW8(port), val);
+               }
+
+               val = I915_READ(ICL_PORT_COMP_DW0(port));
+               val |= COMP_INIT;
+               I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+
+               val = I915_READ(ICL_PORT_CL_DW5(port));
+               val |= CL_POWER_DOWN_ENABLE;
+               I915_WRITE(ICL_PORT_CL_DW5(port), val);
+       }
+}
+
+static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+{
+       enum port port;
+
+       for_each_combo_port_reverse(dev_priv, port) {
+               u32 val;
+
+               if (port == PORT_A &&
+                   !icl_combo_phy_verify_state(dev_priv, port))
+                       DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
+                                port_name(port));
+
+               val = I915_READ(ICL_PHY_MISC(port));
+               val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+               I915_WRITE(ICL_PHY_MISC(port), val);
+
+               val = I915_READ(ICL_PORT_COMP_DW0(port));
+               val &= ~COMP_INIT;
+               I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+       }
+}
+
+void intel_combo_phy_init(struct drm_i915_private *i915)
+{
+       if (INTEL_GEN(i915) >= 11)
+               icl_combo_phys_init(i915);
+       else if (IS_CANNONLAKE(i915))
+               cnl_combo_phys_init(i915);
+}
+
+void intel_combo_phy_uninit(struct drm_i915_private *i915)
+{
+       if (INTEL_GEN(i915) >= 11)
+               icl_combo_phys_uninit(i915);
+       else if (IS_CANNONLAKE(i915))
+               cnl_combo_phys_uninit(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.h b/drivers/gpu/drm/i915/display/intel_combo_phy.h
new file mode 100644 (file)
index 0000000..e6e195a
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_COMBO_PHY_H__
+#define __INTEL_COMBO_PHY_H__
+
+#include <linux/types.h>
+#include <drm/i915_drm.h>
+
+struct drm_i915_private;
+
+void intel_combo_phy_init(struct drm_i915_private *dev_priv);
+void intel_combo_phy_uninit(struct drm_i915_private *dev_priv);
+void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+                                   enum port port, bool is_dsi,
+                                   int lane_count, bool lane_reversal);
+
+#endif /* __INTEL_COMBO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
new file mode 100644 (file)
index 0000000..41310f8
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007, 2010 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+
+#include "display/intel_panel.h"
+
+#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
+
+int intel_connector_init(struct intel_connector *connector)
+{
+       struct intel_digital_connector_state *conn_state;
+
+       /*
+        * Allocate enough memory to hold intel_digital_connector_state,
+        * This might be a few bytes too many, but for connectors that don't
+        * need it we'll free the state and allocate a smaller one on the first
+        * successful commit anyway.
+        */
+       conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
+       if (!conn_state)
+               return -ENOMEM;
+
+       __drm_atomic_helper_connector_reset(&connector->base,
+                                           &conn_state->base);
+
+       return 0;
+}
+
+struct intel_connector *intel_connector_alloc(void)
+{
+       struct intel_connector *connector;
+
+       connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+       if (!connector)
+               return NULL;
+
+       if (intel_connector_init(connector) < 0) {
+               kfree(connector);
+               return NULL;
+       }
+
+       return connector;
+}
+
+/*
+ * Free the bits allocated by intel_connector_alloc.
+ * This should only be used after intel_connector_alloc has returned
+ * successfully, and before drm_connector_init returns successfully.
+ * Otherwise the destroy callbacks for the connector and the state should
+ * take care of proper cleanup/free (see intel_connector_destroy).
+ */
+void intel_connector_free(struct intel_connector *connector)
+{
+       kfree(to_intel_digital_connector_state(connector->base.state));
+       kfree(connector);
+}
+
+/*
+ * Connector type independent destroy hook for drm_connector_funcs.
+ */
+void intel_connector_destroy(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+
+       kfree(intel_connector->detect_edid);
+
+       intel_hdcp_cleanup(intel_connector);
+
+       if (!IS_ERR_OR_NULL(intel_connector->edid))
+               kfree(intel_connector->edid);
+
+       intel_panel_fini(&intel_connector->panel);
+
+       drm_connector_cleanup(connector);
+
+       if (intel_connector->port)
+               drm_dp_mst_put_port_malloc(intel_connector->port);
+
+       kfree(connector);
+}
+
+int intel_connector_register(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       int ret;
+
+       ret = intel_backlight_device_register(intel_connector);
+       if (ret)
+               goto err;
+
+       if (i915_inject_load_failure()) {
+               ret = -EFAULT;
+               goto err_backlight;
+       }
+
+       return 0;
+
+err_backlight:
+       intel_backlight_device_unregister(intel_connector);
+err:
+       return ret;
+}
+
+void intel_connector_unregister(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+
+       intel_backlight_device_unregister(intel_connector);
+}
+
+void intel_connector_attach_encoder(struct intel_connector *connector,
+                                   struct intel_encoder *encoder)
+{
+       connector->encoder = encoder;
+       drm_connector_attach_encoder(&connector->base, &encoder->base);
+}
+
+/*
+ * Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector.
+ */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
+{
+       enum pipe pipe = 0;
+       struct intel_encoder *encoder = connector->encoder;
+
+       return encoder->get_hw_state(encoder, &pipe);
+}
+
+enum pipe intel_connector_get_pipe(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+
+       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+       if (!connector->base.state->crtc)
+               return INVALID_PIPE;
+
+       return to_intel_crtc(connector->base.state->crtc)->pipe;
+}
+
+/**
+ * intel_connector_update_modes - update connector from edid
+ * @connector: DRM connector device to use
+ * @edid: previously read EDID information
+ */
+int intel_connector_update_modes(struct drm_connector *connector,
+                               struct edid *edid)
+{
+       int ret;
+
+       drm_connector_update_edid_property(connector, edid);
+       ret = drm_add_edid_modes(connector, edid);
+
+       return ret;
+}
+
+/**
+ * intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ * @adapter: i2c adapter
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int intel_ddc_get_modes(struct drm_connector *connector,
+                       struct i2c_adapter *adapter)
+{
+       struct edid *edid;
+       int ret;
+
+       edid = drm_get_edid(connector, adapter);
+       if (!edid)
+               return 0;
+
+       ret = intel_connector_update_modes(connector, edid);
+       kfree(edid);
+
+       return ret;
+}
+
+static const struct drm_prop_enum_list force_audio_names[] = {
+       { HDMI_AUDIO_OFF_DVI, "force-dvi" },
+       { HDMI_AUDIO_OFF, "off" },
+       { HDMI_AUDIO_AUTO, "auto" },
+       { HDMI_AUDIO_ON, "on" },
+};
+
+void
+intel_attach_force_audio_property(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_property *prop;
+
+       prop = dev_priv->force_audio_property;
+       if (prop == NULL) {
+               prop = drm_property_create_enum(dev, 0,
+                                          "audio",
+                                          force_audio_names,
+                                          ARRAY_SIZE(force_audio_names));
+               if (prop == NULL)
+                       return;
+
+               dev_priv->force_audio_property = prop;
+       }
+       drm_object_attach_property(&connector->base, prop, 0);
+}
+
+static const struct drm_prop_enum_list broadcast_rgb_names[] = {
+       { INTEL_BROADCAST_RGB_AUTO, "Automatic" },
+       { INTEL_BROADCAST_RGB_FULL, "Full" },
+       { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
+};
+
+void
+intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_property *prop;
+
+       prop = dev_priv->broadcast_rgb_property;
+       if (prop == NULL) {
+               prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+                                          "Broadcast RGB",
+                                          broadcast_rgb_names,
+                                          ARRAY_SIZE(broadcast_rgb_names));
+               if (prop == NULL)
+                       return;
+
+               dev_priv->broadcast_rgb_property = prop;
+       }
+
+       drm_object_attach_property(&connector->base, prop, 0);
+}
+
+void
+intel_attach_aspect_ratio_property(struct drm_connector *connector)
+{
+       if (!drm_mode_create_aspect_ratio_property(connector->dev))
+               drm_object_attach_property(&connector->base,
+                       connector->dev->mode_config.aspect_ratio_property,
+                       DRM_MODE_PICTURE_ASPECT_NONE);
+}
+
+void
+intel_attach_colorspace_property(struct drm_connector *connector)
+{
+       if (!drm_mode_create_colorspace_property(connector))
+               drm_object_attach_property(&connector->base,
+                                          connector->colorspace_property, 0);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
new file mode 100644 (file)
index 0000000..93a7375
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONNECTOR_H__
+#define __INTEL_CONNECTOR_H__
+
+#include "intel_display.h"
+
+struct drm_connector;
+struct edid;
+struct i2c_adapter;
+struct intel_connector;
+struct intel_encoder;
+
+int intel_connector_init(struct intel_connector *connector);
+struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
+void intel_connector_destroy(struct drm_connector *connector);
+int intel_connector_register(struct drm_connector *connector);
+void intel_connector_unregister(struct drm_connector *connector);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+                                   struct intel_encoder *encoder);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+enum pipe intel_connector_get_pipe(struct intel_connector *connector);
+int intel_connector_update_modes(struct drm_connector *connector,
+                                struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+void intel_attach_aspect_ratio_property(struct drm_connector *connector);
+void intel_attach_colorspace_property(struct drm_connector *connector);
+
+#endif /* __INTEL_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
new file mode 100644 (file)
index 0000000..8d7e4c8
--- /dev/null
@@ -0,0 +1,17119 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/intel-iommu.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/reservation.h>
+#include <linux/slab.h>
+#include <linux/vgaarb.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/i915_drm.h>
+
+#include "display/intel_crt.h"
+#include "display/intel_ddi.h"
+#include "display/intel_dp.h"
+#include "display/intel_dsi.h"
+#include "display/intel_dvo.h"
+#include "display/intel_gmbus.h"
+#include "display/intel_hdmi.h"
+#include "display/intel_lvds.h"
+#include "display/intel_sdvo.h"
+#include "display/intel_tv.h"
+#include "display/intel_vdsc.h"
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_acpi.h"
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_bw.h"
+#include "intel_color.h"
+#include "intel_cdclk.h"
+#include "intel_drv.h"
+#include "intel_fbc.h"
+#include "intel_fbdev.h"
+#include "intel_fifo_underrun.h"
+#include "intel_frontbuffer.h"
+#include "intel_hdcp.h"
+#include "intel_hotplug.h"
+#include "intel_overlay.h"
+#include "intel_pipe_crc.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_quirks.h"
+#include "intel_sideband.h"
+#include "intel_sprite.h"
+
+/* Primary plane formats for gen <= 3 */
+static const u32 i8xx_primary_formats[] = {
+       DRM_FORMAT_C8,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB1555,
+       DRM_FORMAT_XRGB8888,
+};
+
+/* Primary plane formats for gen >= 4 */
+static const u32 i965_primary_formats[] = {
+       DRM_FORMAT_C8,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_XRGB2101010,
+       DRM_FORMAT_XBGR2101010,
+};
+
+static const u64 i9xx_format_modifiers[] = {
+       I915_FORMAT_MOD_X_TILED,
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID
+};
+
+/* Cursor formats */
+static const u32 intel_cursor_formats[] = {
+       DRM_FORMAT_ARGB8888,
+};
+
+static const u64 cursor_format_modifiers[] = {
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID
+};
+
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+                               struct intel_crtc_state *pipe_config);
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+                                  struct intel_crtc_state *pipe_config);
+
+static int intel_framebuffer_init(struct intel_framebuffer *ifb,
+                                 struct drm_i915_gem_object *obj,
+                                 struct drm_mode_fb_cmd2 *mode_cmd);
+static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
+static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+                                        const struct intel_link_m_n *m_n,
+                                        const struct intel_link_m_n *m2_n2);
+static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
+static void vlv_prepare_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_state *pipe_config);
+static void chv_prepare_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_state *pipe_config);
+static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
+static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
+static void intel_crtc_init_scalers(struct intel_crtc *crtc,
+                                   struct intel_crtc_state *crtc_state);
+static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
+static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void intel_modeset_setup_hw_state(struct drm_device *dev,
+                                        struct drm_modeset_acquire_ctx *ctx);
+static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
+
+struct intel_limit {
+       struct {
+               int min, max;
+       } dot, vco, n, m, m1, m2, p, p1;
+
+       struct {
+               int dot_limit;
+               int p2_slow, p2_fast;
+       } p2;
+};
+
+/* returns HPLL frequency in kHz */
+int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
+{
+       int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
+
+       /* Obtain SKU information */
+       hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+               CCK_FUSE_HPLL_FREQ_MASK;
+
+       return vco_freq[hpll_freq] * 1000;
+}
+
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+                     const char *name, u32 reg, int ref_freq)
+{
+       u32 val;
+       int divider;
+
+       val = vlv_cck_read(dev_priv, reg);
+       divider = val & CCK_FREQUENCY_VALUES;
+
+       WARN((val & CCK_FREQUENCY_STATUS) !=
+            (divider << CCK_FREQUENCY_STATUS_SHIFT),
+            "%s change in progress\n", name);
+
+       return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
+}
+
+int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+                          const char *name, u32 reg)
+{
+       int hpll;
+
+       vlv_cck_get(dev_priv);
+
+       if (dev_priv->hpll_freq == 0)
+               dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
+
+       hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
+
+       vlv_cck_put(dev_priv);
+
+       return hpll;
+}
+
+static void intel_update_czclk(struct drm_i915_private *dev_priv)
+{
+       if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
+               return;
+
+       dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
+                                                     CCK_CZ_CLOCK_CONTROL);
+
+       DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
+}
+
+static inline u32 /* units of 100MHz */
+intel_fdi_link_freq(struct drm_i915_private *dev_priv,
+                   const struct intel_crtc_state *pipe_config)
+{
+       if (HAS_DDI(dev_priv))
+               return pipe_config->port_clock; /* SPLL */
+       else
+               return dev_priv->fdi_pll_freq;
+}
+
+static const struct intel_limit intel_limits_i8xx_dac = {
+       .dot = { .min = 25000, .max = 350000 },
+       .vco = { .min = 908000, .max = 1512000 },
+       .n = { .min = 2, .max = 16 },
+       .m = { .min = 96, .max = 140 },
+       .m1 = { .min = 18, .max = 26 },
+       .m2 = { .min = 6, .max = 16 },
+       .p = { .min = 4, .max = 128 },
+       .p1 = { .min = 2, .max = 33 },
+       .p2 = { .dot_limit = 165000,
+               .p2_slow = 4, .p2_fast = 2 },
+};
+
+static const struct intel_limit intel_limits_i8xx_dvo = {
+       .dot = { .min = 25000, .max = 350000 },
+       .vco = { .min = 908000, .max = 1512000 },
+       .n = { .min = 2, .max = 16 },
+       .m = { .min = 96, .max = 140 },
+       .m1 = { .min = 18, .max = 26 },
+       .m2 = { .min = 6, .max = 16 },
+       .p = { .min = 4, .max = 128 },
+       .p1 = { .min = 2, .max = 33 },
+       .p2 = { .dot_limit = 165000,
+               .p2_slow = 4, .p2_fast = 4 },
+};
+
+static const struct intel_limit intel_limits_i8xx_lvds = {
+       .dot = { .min = 25000, .max = 350000 },
+       .vco = { .min = 908000, .max = 1512000 },
+       .n = { .min = 2, .max = 16 },
+       .m = { .min = 96, .max = 140 },
+       .m1 = { .min = 18, .max = 26 },
+       .m2 = { .min = 6, .max = 16 },
+       .p = { .min = 4, .max = 128 },
+       .p1 = { .min = 1, .max = 6 },
+       .p2 = { .dot_limit = 165000,
+               .p2_slow = 14, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_i9xx_sdvo = {
+       .dot = { .min = 20000, .max = 400000 },
+       .vco = { .min = 1400000, .max = 2800000 },
+       .n = { .min = 1, .max = 6 },
+       .m = { .min = 70, .max = 120 },
+       .m1 = { .min = 8, .max = 18 },
+       .m2 = { .min = 3, .max = 7 },
+       .p = { .min = 5, .max = 80 },
+       .p1 = { .min = 1, .max = 8 },
+       .p2 = { .dot_limit = 200000,
+               .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_i9xx_lvds = {
+       .dot = { .min = 20000, .max = 400000 },
+       .vco = { .min = 1400000, .max = 2800000 },
+       .n = { .min = 1, .max = 6 },
+       .m = { .min = 70, .max = 120 },
+       .m1 = { .min = 8, .max = 18 },
+       .m2 = { .min = 3, .max = 7 },
+       .p = { .min = 7, .max = 98 },
+       .p1 = { .min = 1, .max = 8 },
+       .p2 = { .dot_limit = 112000,
+               .p2_slow = 14, .p2_fast = 7 },
+};
+
+
+static const struct intel_limit intel_limits_g4x_sdvo = {
+       .dot = { .min = 25000, .max = 270000 },
+       .vco = { .min = 1750000, .max = 3500000},
+       .n = { .min = 1, .max = 4 },
+       .m = { .min = 104, .max = 138 },
+       .m1 = { .min = 17, .max = 23 },
+       .m2 = { .min = 5, .max = 11 },
+       .p = { .min = 10, .max = 30 },
+       .p1 = { .min = 1, .max = 3},
+       .p2 = { .dot_limit = 270000,
+               .p2_slow = 10,
+               .p2_fast = 10
+       },
+};
+
+static const struct intel_limit intel_limits_g4x_hdmi = {
+       .dot = { .min = 22000, .max = 400000 },
+       .vco = { .min = 1750000, .max = 3500000},
+       .n = { .min = 1, .max = 4 },
+       .m = { .min = 104, .max = 138 },
+       .m1 = { .min = 16, .max = 23 },
+       .m2 = { .min = 5, .max = 11 },
+       .p = { .min = 5, .max = 80 },
+       .p1 = { .min = 1, .max = 8},
+       .p2 = { .dot_limit = 165000,
+               .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
+       .dot = { .min = 20000, .max = 115000 },
+       .vco = { .min = 1750000, .max = 3500000 },
+       .n = { .min = 1, .max = 3 },
+       .m = { .min = 104, .max = 138 },
+       .m1 = { .min = 17, .max = 23 },
+       .m2 = { .min = 5, .max = 11 },
+       .p = { .min = 28, .max = 112 },
+       .p1 = { .min = 2, .max = 8 },
+       .p2 = { .dot_limit = 0,
+               .p2_slow = 14, .p2_fast = 14
+       },
+};
+
+static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
+       .dot = { .min = 80000, .max = 224000 },
+       .vco = { .min = 1750000, .max = 3500000 },
+       .n = { .min = 1, .max = 3 },
+       .m = { .min = 104, .max = 138 },
+       .m1 = { .min = 17, .max = 23 },
+       .m2 = { .min = 5, .max = 11 },
+       .p = { .min = 14, .max = 42 },
+       .p1 = { .min = 2, .max = 6 },
+       .p2 = { .dot_limit = 0,
+               .p2_slow = 7, .p2_fast = 7
+       },
+};
+
+static const struct intel_limit intel_limits_pineview_sdvo = {
+       .dot = { .min = 20000, .max = 400000},
+       .vco = { .min = 1700000, .max = 3500000 },
+       /* Pineview's Ncounter is a ring counter */
+       .n = { .min = 3, .max = 6 },
+       .m = { .min = 2, .max = 256 },
+       /* Pineview only has one combined m divider, which we treat as m2. */
+       .m1 = { .min = 0, .max = 0 },
+       .m2 = { .min = 0, .max = 254 },
+       .p = { .min = 5, .max = 80 },
+       .p1 = { .min = 1, .max = 8 },
+       .p2 = { .dot_limit = 200000,
+               .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_pineview_lvds = {
+       .dot = { .min = 20000, .max = 400000 },
+       .vco = { .min = 1700000, .max = 3500000 },
+       .n = { .min = 3, .max = 6 },
+       .m = { .min = 2, .max = 256 },
+       .m1 = { .min = 0, .max = 0 },
+       .m2 = { .min = 0, .max = 254 },
+       .p = { .min = 7, .max = 112 },
+       .p1 = { .min = 1, .max = 8 },
+       .p2 = { .dot_limit = 112000,
+               .p2_slow = 14, .p2_fast = 14 },
+};
+
+/* Ironlake / Sandybridge
+ *
+ * We calculate clock using (register_value + 2) for N/M1/M2, so here
+ * the range value for them is (actual_value - 2).
+ */
+static const struct intel_limit intel_limits_ironlake_dac = {
+       .dot = { .min = 25000, .max = 350000 },
+       .vco = { .min = 1760000, .max = 3510000 },
+       .n = { .min = 1, .max = 5 },
+       .m = { .min = 79, .max = 127 },
+       .m1 = { .min = 12, .max = 22 },
+       .m2 = { .min = 5, .max = 9 },
+       .p = { .min = 5, .max = 80 },
+       .p1 = { .min = 1, .max = 8 },
+       .p2 = { .dot_limit = 225000,
+               .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_ironlake_single_lvds = {
+       .dot = { .min = 25000, .max = 350000 },
+       .vco = { .min = 1760000, .max = 3510000 },
+       .n = { .min = 1, .max = 3 },
+       .m = { .min = 79, .max = 118 },
+       .m1 = { .min = 12, .max = 22 },
+       .m2 = { .min = 5, .max = 9 },
+       .p = { .min = 28, .max = 112 },
+       .p1 = { .min = 2, .max = 8 },
+       .p2 = { .dot_limit = 225000,
+               .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit intel_limits_ironlake_dual_lvds = {
+       .dot = { .min = 25000, .max = 350000 },
+       .vco = { .min = 1760000, .max = 3510000 },
+       .n = { .min = 1, .max = 3 },
+       .m = { .min = 79, .max = 127 },
+       .m1 = { .min = 12, .max = 22 },
+       .m2 = { .min = 5, .max = 9 },
+       .p = { .min = 14, .max = 56 },
+       .p1 = { .min = 2, .max = 8 },
+       .p2 = { .dot_limit = 225000,
+               .p2_slow = 7, .p2_fast = 7 },
+};
+
+/* LVDS 100mhz refclk limits. */
+static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
+       .dot = { .min = 25000, .max = 350000 },
+       .vco = { .min = 1760000, .max = 3510000 },
+       .n = { .min = 1, .max = 2 },
+       .m = { .min = 79, .max = 126 },
+       .m1 = { .min = 12, .max = 22 },
+       .m2 = { .min = 5, .max = 9 },
+       .p = { .min = 28, .max = 112 },
+       .p1 = { .min = 2, .max = 8 },
+       .p2 = { .dot_limit = 225000,
+               .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
+       .dot = { .min = 25000, .max = 350000 },
+       .vco = { .min = 1760000, .max = 3510000 },
+       .n = { .min = 1, .max = 3 },
+       .m = { .min = 79, .max = 126 },
+       .m1 = { .min = 12, .max = 22 },
+       .m2 = { .min = 5, .max = 9 },
+       .p = { .min = 14, .max = 42 },
+       .p1 = { .min = 2, .max = 6 },
+       .p2 = { .dot_limit = 225000,
+               .p2_slow = 7, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_vlv = {
+        /*
+         * These are the data rate limits (measured in fast clocks)
+         * since those are the strictest limits we have. The fast
+         * clock and actual rate limits are more relaxed, so checking
+         * them would make no difference.
+         */
+       .dot = { .min = 25000 * 5, .max = 270000 * 5 },
+       .vco = { .min = 4000000, .max = 6000000 },
+       .n = { .min = 1, .max = 7 },
+       .m1 = { .min = 2, .max = 3 },
+       .m2 = { .min = 11, .max = 156 },
+       .p1 = { .min = 2, .max = 3 },
+       .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
+};
+
+static const struct intel_limit intel_limits_chv = {
+       /*
+        * These are the data rate limits (measured in fast clocks)
+        * since those are the strictest limits we have.  The fast
+        * clock and actual rate limits are more relaxed, so checking
+        * them would make no difference.
+        */
+       .dot = { .min = 25000 * 5, .max = 540000 * 5},
+       .vco = { .min = 4800000, .max = 6480000 },
+       .n = { .min = 1, .max = 1 },
+       .m1 = { .min = 2, .max = 2 },
+       .m2 = { .min = 24 << 22, .max = 175 << 22 },
+       .p1 = { .min = 2, .max = 4 },
+       .p2 = { .p2_slow = 1, .p2_fast = 14 },
+};
+
+static const struct intel_limit intel_limits_bxt = {
+       /* FIXME: find real dot limits */
+       .dot = { .min = 0, .max = INT_MAX },
+       .vco = { .min = 4800000, .max = 6700000 },
+       .n = { .min = 1, .max = 1 },
+       .m1 = { .min = 2, .max = 2 },
+       /* FIXME: find real m2 limits */
+       .m2 = { .min = 2 << 22, .max = 255 << 22 },
+       .p1 = { .min = 2, .max = 4 },
+       .p2 = { .p2_slow = 1, .p2_fast = 20 },
+};
+
+/* WA Display #0827: Gen9:all */
+static void
+skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
+{
+       if (enable)
+               I915_WRITE(CLKGATE_DIS_PSL(pipe),
+                          I915_READ(CLKGATE_DIS_PSL(pipe)) |
+                          DUPS1_GATING_DIS | DUPS2_GATING_DIS);
+       else
+               I915_WRITE(CLKGATE_DIS_PSL(pipe),
+                          I915_READ(CLKGATE_DIS_PSL(pipe)) &
+                          ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
+}
+
+/* Wa_2006604312:icl */
+static void
+icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+                      bool enable)
+{
+       if (enable)
+               I915_WRITE(CLKGATE_DIS_PSL(pipe),
+                          I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
+       else
+               I915_WRITE(CLKGATE_DIS_PSL(pipe),
+                          I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
+}
+
+static bool
+needs_modeset(const struct drm_crtc_state *state)
+{
+       return drm_atomic_crtc_needs_modeset(state);
+}
+
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+       clock->m = clock->m2 + 2;
+       clock->p = clock->p1 * clock->p2;
+       if (WARN_ON(clock->n == 0 || clock->p == 0))
+               return 0;
+       clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+       clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+       return clock->dot;
+}
+
+static u32 i9xx_dpll_compute_m(struct dpll *dpll)
+{
+       return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
+}
+
+static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
+{
+       clock->m = i9xx_dpll_compute_m(clock);
+       clock->p = clock->p1 * clock->p2;
+       if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
+               return 0;
+       clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+       clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+       return clock->dot;
+}
+
+static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+       clock->m = clock->m1 * clock->m2;
+       clock->p = clock->p1 * clock->p2;
+       if (WARN_ON(clock->n == 0 || clock->p == 0))
+               return 0;
+       clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+       clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+       return clock->dot / 5;
+}
+
+int chv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+       clock->m = clock->m1 * clock->m2;
+       clock->p = clock->p1 * clock->p2;
+       if (WARN_ON(clock->n == 0 || clock->p == 0))
+               return 0;
+       clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
+                                          clock->n << 22);
+       clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+       return clock->dot / 5;
+}
+
+#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
+
+/*
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
+                              const struct intel_limit *limit,
+                              const struct dpll *clock)
+{
+       if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
+               INTELPllInvalid("n out of range\n");
+       if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
+               INTELPllInvalid("p1 out of range\n");
+       if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
+               INTELPllInvalid("m2 out of range\n");
+       if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
+               INTELPllInvalid("m1 out of range\n");
+
+       if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+           !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
+               if (clock->m1 <= clock->m2)
+                       INTELPllInvalid("m1 <= m2\n");
+
+       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+           !IS_GEN9_LP(dev_priv)) {
+               if (clock->p < limit->p.min || limit->p.max < clock->p)
+                       INTELPllInvalid("p out of range\n");
+               if (clock->m < limit->m.min || limit->m.max < clock->m)
+                       INTELPllInvalid("m out of range\n");
+       }
+
+       if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+               INTELPllInvalid("vco out of range\n");
+       /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+        * connector, etc., rather than just a single range.
+        */
+       if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+               INTELPllInvalid("dot out of range\n");
+
+       return true;
+}
+
+static int
+i9xx_select_p2_div(const struct intel_limit *limit,
+                  const struct intel_crtc_state *crtc_state,
+                  int target)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+               /*
+                * For LVDS just rely on its current settings for dual-channel.
+                * We haven't figured out how to reliably set up different
+                * single/dual channel state, if we even can.
+                */
+               if (intel_is_dual_link_lvds(dev_priv))
+                       return limit->p2.p2_fast;
+               else
+                       return limit->p2.p2_slow;
+       } else {
+               if (target < limit->p2.dot_limit)
+                       return limit->p2.p2_slow;
+               else
+                       return limit->p2.p2_fast;
+       }
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+i9xx_find_best_dpll(const struct intel_limit *limit,
+                   struct intel_crtc_state *crtc_state,
+                   int target, int refclk, struct dpll *match_clock,
+                   struct dpll *best_clock)
+{
+       struct drm_device *dev = crtc_state->base.crtc->dev;
+       struct dpll clock;
+       int err = target;
+
+       memset(best_clock, 0, sizeof(*best_clock));
+
+       clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+       for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+            clock.m1++) {
+               for (clock.m2 = limit->m2.min;
+                    clock.m2 <= limit->m2.max; clock.m2++) {
+                       if (clock.m2 >= clock.m1)
+                               break;
+                       for (clock.n = limit->n.min;
+                            clock.n <= limit->n.max; clock.n++) {
+                               for (clock.p1 = limit->p1.min;
+                                       clock.p1 <= limit->p1.max; clock.p1++) {
+                                       int this_err;
+
+                                       i9xx_calc_dpll_params(refclk, &clock);
+                                       if (!intel_PLL_is_valid(to_i915(dev),
+                                                               limit,
+                                                               &clock))
+                                               continue;
+                                       if (match_clock &&
+                                           clock.p != match_clock->p)
+                                               continue;
+
+                                       this_err = abs(clock.dot - target);
+                                       if (this_err < err) {
+                                               *best_clock = clock;
+                                               err = this_err;
+                                       }
+                               }
+                       }
+               }
+       }
+
+       return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+pnv_find_best_dpll(const struct intel_limit *limit,
+                  struct intel_crtc_state *crtc_state,
+                  int target, int refclk, struct dpll *match_clock,
+                  struct dpll *best_clock)
+{
+       struct drm_device *dev = crtc_state->base.crtc->dev;
+       struct dpll clock;
+       int err = target;
+
+       memset(best_clock, 0, sizeof(*best_clock));
+
+       clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+       for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+            clock.m1++) {
+               for (clock.m2 = limit->m2.min;
+                    clock.m2 <= limit->m2.max; clock.m2++) {
+                       for (clock.n = limit->n.min;
+                            clock.n <= limit->n.max; clock.n++) {
+                               for (clock.p1 = limit->p1.min;
+                                       clock.p1 <= limit->p1.max; clock.p1++) {
+                                       int this_err;
+
+                                       pnv_calc_dpll_params(refclk, &clock);
+                                       if (!intel_PLL_is_valid(to_i915(dev),
+                                                               limit,
+                                                               &clock))
+                                               continue;
+                                       if (match_clock &&
+                                           clock.p != match_clock->p)
+                                               continue;
+
+                                       this_err = abs(clock.dot - target);
+                                       if (this_err < err) {
+                                               *best_clock = clock;
+                                               err = this_err;
+                                       }
+                               }
+                       }
+               }
+       }
+
+       return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+g4x_find_best_dpll(const struct intel_limit *limit,
+                  struct intel_crtc_state *crtc_state,
+                  int target, int refclk, struct dpll *match_clock,
+                  struct dpll *best_clock)
+{
+       struct drm_device *dev = crtc_state->base.crtc->dev;
+       struct dpll clock;
+       int max_n;
+       bool found = false;
+       /* approximately equals target * 0.00585 */
+       int err_most = (target >> 8) + (target >> 9);
+
+       memset(best_clock, 0, sizeof(*best_clock));
+
+       clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+       max_n = limit->n.max;
+       /* based on hardware requirement, prefer smaller n to precision */
+       for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+               /* based on hardware requirement, prefere larger m1,m2 */
+               for (clock.m1 = limit->m1.max;
+                    clock.m1 >= limit->m1.min; clock.m1--) {
+                       for (clock.m2 = limit->m2.max;
+                            clock.m2 >= limit->m2.min; clock.m2--) {
+                               for (clock.p1 = limit->p1.max;
+                                    clock.p1 >= limit->p1.min; clock.p1--) {
+                                       int this_err;
+
+                                       i9xx_calc_dpll_params(refclk, &clock);
+                                       if (!intel_PLL_is_valid(to_i915(dev),
+                                                               limit,
+                                                               &clock))
+                                               continue;
+
+                                       this_err = abs(clock.dot - target);
+                                       if (this_err < err_most) {
+                                               *best_clock = clock;
+                                               err_most = this_err;
+                                               max_n = clock.n;
+                                               found = true;
+                                       }
+                               }
+                       }
+               }
+       }
+       return found;
+}
+
+/*
+ * Check if the calculated PLL configuration is more optimal compared to the
+ * best configuration and error found so far. Return the calculated error.
+ */
+static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+                              const struct dpll *calculated_clock,
+                              const struct dpll *best_clock,
+                              unsigned int best_error_ppm,
+                              unsigned int *error_ppm)
+{
+       /*
+        * For CHV ignore the error and consider only the P value.
+        * Prefer a bigger P value based on HW requirements.
+        */
+       if (IS_CHERRYVIEW(to_i915(dev))) {
+               *error_ppm = 0;
+
+               return calculated_clock->p > best_clock->p;
+       }
+
+       if (WARN_ON_ONCE(!target_freq))
+               return false;
+
+       *error_ppm = div_u64(1000000ULL *
+                               abs(target_freq - calculated_clock->dot),
+                            target_freq);
+       /*
+        * Prefer a better P value over a better (smaller) error if the error
+        * is small. Ensure this preference for future configurations too by
+        * setting the error to 0.
+        */
+       if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
+               *error_ppm = 0;
+
+               return true;
+       }
+
+       return *error_ppm + 10 < best_error_ppm;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool
+vlv_find_best_dpll(const struct intel_limit *limit,
+                  struct intel_crtc_state *crtc_state,
+                  int target, int refclk, struct dpll *match_clock,
+                  struct dpll *best_clock)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_device *dev = crtc->base.dev;
+       struct dpll clock;
+       unsigned int bestppm = 1000000;
+       /* min update 19.2 MHz */
+       int max_n = min(limit->n.max, refclk / 19200);
+       bool found = false;
+
+       target *= 5; /* fast clock */
+
+       memset(best_clock, 0, sizeof(*best_clock));
+
+       /* based on hardware requirement, prefer smaller n to precision */
+       for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+               for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+                       for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+                            clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+                               clock.p = clock.p1 * clock.p2;
+                               /* based on hardware requirement, prefer bigger m1,m2 values */
+                               for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+                                       unsigned int ppm;
+
+                                       clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+                                                                    refclk * clock.m1);
+
+                                       vlv_calc_dpll_params(refclk, &clock);
+
+                                       if (!intel_PLL_is_valid(to_i915(dev),
+                                                               limit,
+                                                               &clock))
+                                               continue;
+
+                                       if (!vlv_PLL_is_optimal(dev, target,
+                                                               &clock,
+                                                               best_clock,
+                                                               bestppm, &ppm))
+                                               continue;
+
+                                       *best_clock = clock;
+                                       bestppm = ppm;
+                                       found = true;
+                               }
+                       }
+               }
+       }
+
+       return found;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool
+chv_find_best_dpll(const struct intel_limit *limit,
+                  struct intel_crtc_state *crtc_state,
+                  int target, int refclk, struct dpll *match_clock,
+                  struct dpll *best_clock)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_device *dev = crtc->base.dev;
+       unsigned int best_error_ppm;
+       struct dpll clock;
+       u64 m2;
+       int found = false;
+
+       memset(best_clock, 0, sizeof(*best_clock));
+       best_error_ppm = 1000000;
+
+       /*
+        * Based on hardware doc, the n always set to 1, and m1 always
+        * set to 2.  If requires to support 200Mhz refclk, we need to
+        * revisit this because n may not 1 anymore.
+        */
+       clock.n = 1, clock.m1 = 2;
+       target *= 5;    /* fast clock */
+
+       for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+               for (clock.p2 = limit->p2.p2_fast;
+                               clock.p2 >= limit->p2.p2_slow;
+                               clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+                       unsigned int error_ppm;
+
+                       clock.p = clock.p1 * clock.p2;
+
+                       m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
+                                                  refclk * clock.m1);
+
+                       if (m2 > INT_MAX/clock.m1)
+                               continue;
+
+                       clock.m2 = m2;
+
+                       chv_calc_dpll_params(refclk, &clock);
+
+                       if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
+                               continue;
+
+                       if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+                                               best_error_ppm, &error_ppm))
+                               continue;
+
+                       *best_clock = clock;
+                       best_error_ppm = error_ppm;
+                       found = true;
+               }
+       }
+
+       return found;
+}
+
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
+                       struct dpll *best_clock)
+{
+       int refclk = 100000;
+       const struct intel_limit *limit = &intel_limits_bxt;
+
+       return chv_find_best_dpll(limit, crtc_state,
+                                 crtc_state->port_clock, refclk,
+                                 NULL, best_clock);
+}
+
+bool intel_crtc_active(struct intel_crtc *crtc)
+{
+       /* Be paranoid as we can arrive here with only partial
+        * state retrieved from the hardware during setup.
+        *
+        * We can ditch the adjusted_mode.crtc_clock check as soon
+        * as Haswell has gained clock readout/fastboot support.
+        *
+        * We can ditch the crtc->primary->state->fb check as soon as we can
+        * properly reconstruct framebuffers.
+        *
+        * FIXME: The intel_crtc->active here should be switched to
+        * crtc->state->active once we have proper CRTC states wired up
+        * for atomic.
+        */
+       return crtc->active && crtc->base.primary->state->fb &&
+               crtc->config->base.adjusted_mode.crtc_clock;
+}
+
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+                                            enum pipe pipe)
+{
+       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+       return crtc->config->cpu_transcoder;
+}
+
+static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
+                                   enum pipe pipe)
+{
+       i915_reg_t reg = PIPEDSL(pipe);
+       u32 line1, line2;
+       u32 line_mask;
+
+       if (IS_GEN(dev_priv, 2))
+               line_mask = DSL_LINEMASK_GEN2;
+       else
+               line_mask = DSL_LINEMASK_GEN3;
+
+       line1 = I915_READ(reg) & line_mask;
+       msleep(5);
+       line2 = I915_READ(reg) & line_mask;
+
+       return line1 != line2;
+}
+
+static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       /* Wait for the display line to settle/start moving */
+       if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
+               DRM_ERROR("pipe %c scanline %s wait timed out\n",
+                         pipe_name(pipe), onoff(state));
+}
+
+static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
+{
+       wait_for_pipe_scanline_moving(crtc, false);
+}
+
+static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
+{
+       wait_for_pipe_scanline_moving(crtc, true);
+}
+
+static void
+intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       if (INTEL_GEN(dev_priv) >= 4) {
+               enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+               i915_reg_t reg = PIPECONF(cpu_transcoder);
+
+               /* Wait for the Pipe State to go off */
+               if (intel_wait_for_register(&dev_priv->uncore,
+                                           reg, I965_PIPECONF_ACTIVE, 0,
+                                           100))
+                       WARN(1, "pipe_off wait timed out\n");
+       } else {
+               intel_wait_for_pipe_scanline_stopped(crtc);
+       }
+}
+
+/* Only for pre-ILK configs */
+void assert_pll(struct drm_i915_private *dev_priv,
+               enum pipe pipe, bool state)
+{
+       u32 val;
+       bool cur_state;
+
+       val = I915_READ(DPLL(pipe));
+       cur_state = !!(val & DPLL_VCO_ENABLE);
+       I915_STATE_WARN(cur_state != state,
+            "PLL state assertion failure (expected %s, current %s)\n",
+                       onoff(state), onoff(cur_state));
+}
+
+/* XXX: the dsi pll is shared between MIPI DSI ports */
+void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
+{
+       u32 val;
+       bool cur_state;
+
+       vlv_cck_get(dev_priv);
+       val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+       vlv_cck_put(dev_priv);
+
+       cur_state = val & DSI_PLL_VCO_EN;
+       I915_STATE_WARN(cur_state != state,
+            "DSI PLL state assertion failure (expected %s, current %s)\n",
+                       onoff(state), onoff(cur_state));
+}
+
+static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+                         enum pipe pipe, bool state)
+{
+       bool cur_state;
+       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+                                                                     pipe);
+
+       if (HAS_DDI(dev_priv)) {
+               /* DDI does not have a specific FDI_TX register */
+               u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+               cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
+       } else {
+               u32 val = I915_READ(FDI_TX_CTL(pipe));
+               cur_state = !!(val & FDI_TX_ENABLE);
+       }
+       I915_STATE_WARN(cur_state != state,
+            "FDI TX state assertion failure (expected %s, current %s)\n",
+                       onoff(state), onoff(cur_state));
+}
+#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
+#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
+
+static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+                         enum pipe pipe, bool state)
+{
+       u32 val;
+       bool cur_state;
+
+       val = I915_READ(FDI_RX_CTL(pipe));
+       cur_state = !!(val & FDI_RX_ENABLE);
+       I915_STATE_WARN(cur_state != state,
+            "FDI RX state assertion failure (expected %s, current %s)\n",
+                       onoff(state), onoff(cur_state));
+}
+#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
+#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
+
+static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
+                                     enum pipe pipe)
+{
+       u32 val;
+
+       /* ILK FDI PLL is always enabled */
+       if (IS_GEN(dev_priv, 5))
+               return;
+
+       /* On Haswell, DDI ports are responsible for the FDI PLL setup */
+       if (HAS_DDI(dev_priv))
+               return;
+
+       val = I915_READ(FDI_TX_CTL(pipe));
+       I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
+}
+
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+                      enum pipe pipe, bool state)
+{
+       u32 val;
+       bool cur_state;
+
+       val = I915_READ(FDI_RX_CTL(pipe));
+       cur_state = !!(val & FDI_RX_PLL_ENABLE);
+       I915_STATE_WARN(cur_state != state,
+            "FDI RX PLL assertion failure (expected %s, current %s)\n",
+                       onoff(state), onoff(cur_state));
+}
+
+void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+       i915_reg_t pp_reg;
+       u32 val;
+       enum pipe panel_pipe = INVALID_PIPE;
+       bool locked = true;
+
+       if (WARN_ON(HAS_DDI(dev_priv)))
+               return;
+
+       if (HAS_PCH_SPLIT(dev_priv)) {
+               u32 port_sel;
+
+               pp_reg = PP_CONTROL(0);
+               port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+               switch (port_sel) {
+               case PANEL_PORT_SELECT_LVDS:
+                       intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+                       break;
+               case PANEL_PORT_SELECT_DPA:
+                       intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
+                       break;
+               case PANEL_PORT_SELECT_DPC:
+                       intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
+                       break;
+               case PANEL_PORT_SELECT_DPD:
+                       intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
+                       break;
+               default:
+                       MISSING_CASE(port_sel);
+                       break;
+               }
+       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               /* presumably write lock depends on pipe, not port select */
+               pp_reg = PP_CONTROL(pipe);
+               panel_pipe = pipe;
+       } else {
+               u32 port_sel;
+
+               pp_reg = PP_CONTROL(0);
+               port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+               WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
+               intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
+       }
+
+       val = I915_READ(pp_reg);
+       if (!(val & PANEL_POWER_ON) ||
+           ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
+               locked = false;
+
+       I915_STATE_WARN(panel_pipe == pipe && locked,
+            "panel assertion failure, pipe %c regs locked\n",
+            pipe_name(pipe));
+}
+
+void assert_pipe(struct drm_i915_private *dev_priv,
+                enum pipe pipe, bool state)
+{
+       bool cur_state;
+       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+                                                                     pipe);
+       enum intel_display_power_domain power_domain;
+       intel_wakeref_t wakeref;
+
+       /* we keep both pipes enabled on 830 */
+       if (IS_I830(dev_priv))
+               state = true;
+
+       power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (wakeref) {
+               u32 val = I915_READ(PIPECONF(cpu_transcoder));
+               cur_state = !!(val & PIPECONF_ENABLE);
+
+               intel_display_power_put(dev_priv, power_domain, wakeref);
+       } else {
+               cur_state = false;
+       }
+
+       I915_STATE_WARN(cur_state != state,
+            "pipe %c assertion failure (expected %s, current %s)\n",
+                       pipe_name(pipe), onoff(state), onoff(cur_state));
+}
+
+static void assert_plane(struct intel_plane *plane, bool state)
+{
+       enum pipe pipe;
+       bool cur_state;
+
+       cur_state = plane->get_hw_state(plane, &pipe);
+
+       I915_STATE_WARN(cur_state != state,
+                       "%s assertion failure (expected %s, current %s)\n",
+                       plane->base.name, onoff(state), onoff(cur_state));
+}
+
+#define assert_plane_enabled(p) assert_plane(p, true)
+#define assert_plane_disabled(p) assert_plane(p, false)
+
+static void assert_planes_disabled(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_plane *plane;
+
+       for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+               assert_plane_disabled(plane);
+}
+
+static void assert_vblank_disabled(struct drm_crtc *crtc)
+{
+       if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
+               drm_crtc_vblank_put(crtc);
+}
+
+void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+                                   enum pipe pipe)
+{
+       u32 val;
+       bool enabled;
+
+       val = I915_READ(PCH_TRANSCONF(pipe));
+       enabled = !!(val & TRANS_ENABLE);
+       I915_STATE_WARN(enabled,
+            "transcoder assertion failed, should be off on pipe %c but is still active\n",
+            pipe_name(pipe));
+}
+
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+                                  enum pipe pipe, enum port port,
+                                  i915_reg_t dp_reg)
+{
+       enum pipe port_pipe;
+       bool state;
+
+       state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
+
+       I915_STATE_WARN(state && port_pipe == pipe,
+                       "PCH DP %c enabled on transcoder %c, should be disabled\n",
+                       port_name(port), pipe_name(pipe));
+
+       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+                       "IBX PCH DP %c still using transcoder B\n",
+                       port_name(port));
+}
+
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+                                    enum pipe pipe, enum port port,
+                                    i915_reg_t hdmi_reg)
+{
+       enum pipe port_pipe;
+       bool state;
+
+       state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
+
+       I915_STATE_WARN(state && port_pipe == pipe,
+                       "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
+                       port_name(port), pipe_name(pipe));
+
+       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+                       "IBX PCH HDMI %c still using transcoder B\n",
+                       port_name(port));
+}
+
+static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+                                     enum pipe pipe)
+{
+       enum pipe port_pipe;
+
+       assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
+       assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
+       assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
+
+       I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
+                       port_pipe == pipe,
+                       "PCH VGA enabled on transcoder %c, should be disabled\n",
+                       pipe_name(pipe));
+
+       I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
+                       port_pipe == pipe,
+                       "PCH LVDS enabled on transcoder %c, should be disabled\n",
+                       pipe_name(pipe));
+
+       /* PCH SDVOB multiplex with HDMIB */
+       assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
+       assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
+       assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
+}
+
+static void _vlv_enable_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+       POSTING_READ(DPLL(pipe));
+       udelay(150);
+
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   DPLL(pipe),
+                                   DPLL_LOCK_VLV,
+                                   DPLL_LOCK_VLV,
+                                   1))
+               DRM_ERROR("DPLL %d failed to lock\n", pipe);
+}
+
+static void vlv_enable_pll(struct intel_crtc *crtc,
+                          const struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       assert_pipe_disabled(dev_priv, pipe);
+
+       /* PLL is protected by panel, make sure we can write it */
+       assert_panel_unlocked(dev_priv, pipe);
+
+       if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+               _vlv_enable_pll(crtc, pipe_config);
+
+       I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
+       POSTING_READ(DPLL_MD(pipe));
+}
+
+
+static void _chv_enable_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       enum dpio_channel port = vlv_pipe_to_channel(pipe);
+       u32 tmp;
+
+       vlv_dpio_get(dev_priv);
+
+       /* Enable back the 10bit clock to display controller */
+       tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+       tmp |= DPIO_DCLKP_EN;
+       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+
+       vlv_dpio_put(dev_priv);
+
+       /*
+        * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
+        */
+       udelay(1);
+
+       /* Enable PLL */
+       I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+
+       /* Check PLL is locked */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
+                                   1))
+               DRM_ERROR("PLL %d failed to lock\n", pipe);
+}
+
+static void chv_enable_pll(struct intel_crtc *crtc,
+                          const struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       assert_pipe_disabled(dev_priv, pipe);
+
+       /* PLL is protected by panel, make sure we can write it */
+       assert_panel_unlocked(dev_priv, pipe);
+
+       if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+               _chv_enable_pll(crtc, pipe_config);
+
+       if (pipe != PIPE_A) {
+               /*
+                * WaPixelRepeatModeFixForC0:chv
+                *
+                * DPLLCMD is AWOL. Use chicken bits to propagate
+                * the value from DPLLBMD to either pipe B or C.
+                */
+               I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+               I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
+               I915_WRITE(CBR4_VLV, 0);
+               dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
+
+               /*
+                * DPLLB VGA mode also seems to cause problems.
+                * We should always have it disabled.
+                */
+               WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
+       } else {
+               I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
+               POSTING_READ(DPLL_MD(pipe));
+       }
+}
+
+static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
+{
+       if (IS_I830(dev_priv))
+               return false;
+
+       return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
+static void i9xx_enable_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       i915_reg_t reg = DPLL(crtc->pipe);
+       u32 dpll = crtc_state->dpll_hw_state.dpll;
+       int i;
+
+       assert_pipe_disabled(dev_priv, crtc->pipe);
+
+       /* PLL is protected by panel, make sure we can write it */
+       if (i9xx_has_pps(dev_priv))
+               assert_panel_unlocked(dev_priv, crtc->pipe);
+
+       /*
+        * Apparently we need to have VGA mode enabled prior to changing
+        * the P1/P2 dividers. Otherwise the DPLL will keep using the old
+        * dividers, even though the register value does change.
+        */
+       I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
+       I915_WRITE(reg, dpll);
+
+       /* Wait for the clocks to stabilize. */
+       POSTING_READ(reg);
+       udelay(150);
+
+       if (INTEL_GEN(dev_priv) >= 4) {
+               I915_WRITE(DPLL_MD(crtc->pipe),
+                          crtc_state->dpll_hw_state.dpll_md);
+       } else {
+               /* The pixel multiplier can only be updated once the
+                * DPLL is enabled and the clocks are stable.
+                *
+                * So write it again.
+                */
+               I915_WRITE(reg, dpll);
+       }
+
+       /* We do this three times for luck */
+       for (i = 0; i < 3; i++) {
+               I915_WRITE(reg, dpll);
+               POSTING_READ(reg);
+               udelay(150); /* wait for warmup */
+       }
+}
+
+static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       /* Don't disable pipe or pipe PLLs if needed */
+       if (IS_I830(dev_priv))
+               return;
+
+       /* Make sure the pipe isn't still relying on us */
+       assert_pipe_disabled(dev_priv, pipe);
+
+       I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
+       POSTING_READ(DPLL(pipe));
+}
+
+static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+       u32 val;
+
+       /* Make sure the pipe isn't still relying on us */
+       assert_pipe_disabled(dev_priv, pipe);
+
+       val = DPLL_INTEGRATED_REF_CLK_VLV |
+               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+       if (pipe != PIPE_A)
+               val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+       I915_WRITE(DPLL(pipe), val);
+       POSTING_READ(DPLL(pipe));
+}
+
+static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+       enum dpio_channel port = vlv_pipe_to_channel(pipe);
+       u32 val;
+
+       /* Make sure the pipe isn't still relying on us */
+       assert_pipe_disabled(dev_priv, pipe);
+
+       val = DPLL_SSC_REF_CLK_CHV |
+               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+       if (pipe != PIPE_A)
+               val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+       I915_WRITE(DPLL(pipe), val);
+       POSTING_READ(DPLL(pipe));
+
+       vlv_dpio_get(dev_priv);
+
+       /* Disable 10bit clock to display controller */
+       val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+       val &= ~DPIO_DCLKP_EN;
+       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+
+       vlv_dpio_put(dev_priv);
+}
+
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+                        struct intel_digital_port *dport,
+                        unsigned int expected_mask)
+{
+       u32 port_mask;
+       i915_reg_t dpll_reg;
+
+       switch (dport->base.port) {
+       case PORT_B:
+               port_mask = DPLL_PORTB_READY_MASK;
+               dpll_reg = DPLL(0);
+               break;
+       case PORT_C:
+               port_mask = DPLL_PORTC_READY_MASK;
+               dpll_reg = DPLL(0);
+               expected_mask <<= 4;
+               break;
+       case PORT_D:
+               port_mask = DPLL_PORTD_READY_MASK;
+               dpll_reg = DPIO_PHY_STATUS;
+               break;
+       default:
+               BUG();
+       }
+
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   dpll_reg, port_mask, expected_mask,
+                                   1000))
+               WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
+                    port_name(dport->base.port),
+                    I915_READ(dpll_reg) & port_mask, expected_mask);
+}
+
+static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       i915_reg_t reg;
+       u32 val, pipeconf_val;
+
+       /* Make sure PCH DPLL is enabled */
+       assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
+
+       /* FDI must be feeding us bits for PCH ports */
+       assert_fdi_tx_enabled(dev_priv, pipe);
+       assert_fdi_rx_enabled(dev_priv, pipe);
+
+       if (HAS_PCH_CPT(dev_priv)) {
+               /* Workaround: Set the timing override bit before enabling the
+                * pch transcoder. */
+               reg = TRANS_CHICKEN2(pipe);
+               val = I915_READ(reg);
+               val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+               I915_WRITE(reg, val);
+       }
+
+       reg = PCH_TRANSCONF(pipe);
+       val = I915_READ(reg);
+       pipeconf_val = I915_READ(PIPECONF(pipe));
+
+       if (HAS_PCH_IBX(dev_priv)) {
+               /*
+                * Make the BPC in transcoder be consistent with
+                * that in pipeconf reg. For HDMI we must use 8bpc
+                * here for both 8bpc and 12bpc.
+                */
+               val &= ~PIPECONF_BPC_MASK;
+               if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+                       val |= PIPECONF_8BPC;
+               else
+                       val |= pipeconf_val & PIPECONF_BPC_MASK;
+       }
+
+       val &= ~TRANS_INTERLACE_MASK;
+       if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
+               if (HAS_PCH_IBX(dev_priv) &&
+                   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
+                       val |= TRANS_LEGACY_INTERLACED_ILK;
+               else
+                       val |= TRANS_INTERLACED;
+       } else {
+               val |= TRANS_PROGRESSIVE;
+       }
+
+       I915_WRITE(reg, val | TRANS_ENABLE);
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
+                                   100))
+               DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
+}
+
+static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+                                     enum transcoder cpu_transcoder)
+{
+       u32 val, pipeconf_val;
+
+       /* FDI must be feeding us bits for PCH ports */
+       assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
+       assert_fdi_rx_enabled(dev_priv, PIPE_A);
+
+       /* Workaround: set timing override bit. */
+       val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+       val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+       I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
+
+       val = TRANS_ENABLE;
+       pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+
+       if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
+           PIPECONF_INTERLACED_ILK)
+               val |= TRANS_INTERLACED;
+       else
+               val |= TRANS_PROGRESSIVE;
+
+       I915_WRITE(LPT_TRANSCONF, val);
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   LPT_TRANSCONF,
+                                   TRANS_STATE_ENABLE,
+                                   TRANS_STATE_ENABLE,
+                                   100))
+               DRM_ERROR("Failed to enable PCH transcoder\n");
+}
+
+static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+                                           enum pipe pipe)
+{
+       i915_reg_t reg;
+       u32 val;
+
+       /* FDI relies on the transcoder */
+       assert_fdi_tx_disabled(dev_priv, pipe);
+       assert_fdi_rx_disabled(dev_priv, pipe);
+
+       /* Ports must be off as well */
+       assert_pch_ports_disabled(dev_priv, pipe);
+
+       reg = PCH_TRANSCONF(pipe);
+       val = I915_READ(reg);
+       val &= ~TRANS_ENABLE;
+       I915_WRITE(reg, val);
+       /* wait for PCH transcoder off, transcoder state */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   reg, TRANS_STATE_ENABLE, 0,
+                                   50))
+               DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
+
+       if (HAS_PCH_CPT(dev_priv)) {
+               /* Workaround: Clear the timing override chicken bit again. */
+               reg = TRANS_CHICKEN2(pipe);
+               val = I915_READ(reg);
+               val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+               I915_WRITE(reg, val);
+       }
+}
+
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       val = I915_READ(LPT_TRANSCONF);
+       val &= ~TRANS_ENABLE;
+       I915_WRITE(LPT_TRANSCONF, val);
+       /* wait for PCH transcoder off, transcoder state */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
+                                   50))
+               DRM_ERROR("Failed to disable PCH transcoder\n");
+
+       /* Workaround: clear timing override bit. */
+       val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+       val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+       I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
+}
+
+enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       if (HAS_PCH_LPT(dev_priv))
+               return PIPE_A;
+       else
+               return crtc->pipe;
+}
+
+static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       /*
+        * On i965gm the hardware frame counter reads
+        * zero when the TV encoder is enabled :(
+        */
+       if (IS_I965GM(dev_priv) &&
+           (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
+               return 0;
+
+       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+               return 0xffffffff; /* full 32 bit counter */
+       else if (INTEL_GEN(dev_priv) >= 3)
+               return 0xffffff; /* only 24 bits of frame count */
+       else
+               return 0; /* Gen2 doesn't have a hardware frame counter */
+}
+
+static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+       drm_crtc_set_max_vblank_count(&crtc->base,
+                                     intel_crtc_max_vblank_count(crtc_state));
+       drm_crtc_vblank_on(&crtc->base);
+}
+
+static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
+       enum pipe pipe = crtc->pipe;
+       i915_reg_t reg;
+       u32 val;
+
+       DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
+
+       assert_planes_disabled(crtc);
+
+       /*
+        * A pipe without a PLL won't actually be able to drive bits from
+        * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
+        * need the check.
+        */
+       if (HAS_GMCH(dev_priv)) {
+               if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
+                       assert_dsi_pll_enabled(dev_priv);
+               else
+                       assert_pll_enabled(dev_priv, pipe);
+       } else {
+               if (new_crtc_state->has_pch_encoder) {
+                       /* if driving the PCH, we need FDI enabled */
+                       assert_fdi_rx_pll_enabled(dev_priv,
+                                                 intel_crtc_pch_transcoder(crtc));
+                       assert_fdi_tx_pll_enabled(dev_priv,
+                                                 (enum pipe) cpu_transcoder);
+               }
+               /* FIXME: assert CPU port conditions for SNB+ */
+       }
+
+       trace_intel_pipe_enable(dev_priv, pipe);
+
+       reg = PIPECONF(cpu_transcoder);
+       val = I915_READ(reg);
+       if (val & PIPECONF_ENABLE) {
+               /* we keep both pipes enabled on 830 */
+               WARN_ON(!IS_I830(dev_priv));
+               return;
+       }
+
+       I915_WRITE(reg, val | PIPECONF_ENABLE);
+       POSTING_READ(reg);
+
+       /*
+        * Until the pipe starts PIPEDSL reads will return a stale value,
+        * which causes an apparent vblank timestamp jump when PIPEDSL
+        * resets to its proper value. That also messes up the frame count
+        * when it's derived from the timestamps. So let's wait for the
+        * pipe to start properly before we call drm_crtc_vblank_on()
+        */
+       if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
+               intel_wait_for_pipe_scanline_moving(crtc);
+}
+
+static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+       enum pipe pipe = crtc->pipe;
+       i915_reg_t reg;
+       u32 val;
+
+       DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
+
+       /*
+        * Make sure planes won't keep trying to pump pixels to us,
+        * or we might hang the display.
+        */
+       assert_planes_disabled(crtc);
+
+       trace_intel_pipe_disable(dev_priv, pipe);
+
+       reg = PIPECONF(cpu_transcoder);
+       val = I915_READ(reg);
+       if ((val & PIPECONF_ENABLE) == 0)
+               return;
+
+       /*
+        * Double wide has implications for planes
+        * so best keep it disabled when not needed.
+        */
+       if (old_crtc_state->double_wide)
+               val &= ~PIPECONF_DOUBLE_WIDE;
+
+       /* Don't disable pipe or pipe PLLs if needed */
+       if (!IS_I830(dev_priv))
+               val &= ~PIPECONF_ENABLE;
+
+       I915_WRITE(reg, val);
+       if ((val & PIPECONF_ENABLE) == 0)
+               intel_wait_for_pipe_off(old_crtc_state);
+}
+
+static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
+{
+       return IS_GEN(dev_priv, 2) ? 2048 : 4096;
+}
+
+static unsigned int
+intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(fb->dev);
+       unsigned int cpp = fb->format->cpp[color_plane];
+
+       switch (fb->modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+               return intel_tile_size(dev_priv);
+       case I915_FORMAT_MOD_X_TILED:
+               if (IS_GEN(dev_priv, 2))
+                       return 128;
+               else
+                       return 512;
+       case I915_FORMAT_MOD_Y_TILED_CCS:
+               if (color_plane == 1)
+                       return 128;
+               /* fall through */
+       case I915_FORMAT_MOD_Y_TILED:
+               if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
+                       return 128;
+               else
+                       return 512;
+       case I915_FORMAT_MOD_Yf_TILED_CCS:
+               if (color_plane == 1)
+                       return 128;
+               /* fall through */
+       case I915_FORMAT_MOD_Yf_TILED:
+               switch (cpp) {
+               case 1:
+                       return 64;
+               case 2:
+               case 4:
+                       return 128;
+               case 8:
+               case 16:
+                       return 256;
+               default:
+                       MISSING_CASE(cpp);
+                       return cpp;
+               }
+               break;
+       default:
+               MISSING_CASE(fb->modifier);
+               return cpp;
+       }
+}
+
+static unsigned int
+intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
+{
+       return intel_tile_size(to_i915(fb->dev)) /
+               intel_tile_width_bytes(fb, color_plane);
+}
+
+/* Return the tile dimensions in pixel units */
+static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
+                           unsigned int *tile_width,
+                           unsigned int *tile_height)
+{
+       unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
+       unsigned int cpp = fb->format->cpp[color_plane];
+
+       *tile_width = tile_width_bytes / cpp;
+       *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
+}
+
+unsigned int
+intel_fb_align_height(const struct drm_framebuffer *fb,
+                     int color_plane, unsigned int height)
+{
+       unsigned int tile_height = intel_tile_height(fb, color_plane);
+
+       return ALIGN(height, tile_height);
+}
+
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
+{
+       unsigned int size = 0;
+       int i;
+
+       for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
+               size += rot_info->plane[i].width * rot_info->plane[i].height;
+
+       return size;
+}
+
+unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
+{
+       unsigned int size = 0;
+       int i;
+
+       for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
+               size += rem_info->plane[i].width * rem_info->plane[i].height;
+
+       return size;
+}
+
+static void
+intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
+                       const struct drm_framebuffer *fb,
+                       unsigned int rotation)
+{
+       view->type = I915_GGTT_VIEW_NORMAL;
+       if (drm_rotation_90_or_270(rotation)) {
+               view->type = I915_GGTT_VIEW_ROTATED;
+               view->rotated = to_intel_framebuffer(fb)->rot_info;
+       }
+}
+
+static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
+{
+       if (IS_I830(dev_priv))
+               return 16 * 1024;
+       else if (IS_I85X(dev_priv))
+               return 256;
+       else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
+               return 32;
+       else
+               return 4 * 1024;
+}
+
+static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
+{
+       if (INTEL_GEN(dev_priv) >= 9)
+               return 256 * 1024;
+       else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
+                IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               return 128 * 1024;
+       else if (INTEL_GEN(dev_priv) >= 4)
+               return 4 * 1024;
+       else
+               return 0;
+}
+
+static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+                                        int color_plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(fb->dev);
+
+       /* AUX_DIST needs only 4K alignment */
+       if (color_plane == 1)
+               return 4096;
+
+       switch (fb->modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+               return intel_linear_alignment(dev_priv);
+       case I915_FORMAT_MOD_X_TILED:
+               if (INTEL_GEN(dev_priv) >= 9)
+                       return 256 * 1024;
+               return 0;
+       case I915_FORMAT_MOD_Y_TILED_CCS:
+       case I915_FORMAT_MOD_Yf_TILED_CCS:
+       case I915_FORMAT_MOD_Y_TILED:
+       case I915_FORMAT_MOD_Yf_TILED:
+               return 1 * 1024 * 1024;
+       default:
+               MISSING_CASE(fb->modifier);
+               return 0;
+       }
+}
+
+static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+       return INTEL_GEN(dev_priv) < 4 ||
+               (plane->has_fbc &&
+                plane_state->view.type == I915_GGTT_VIEW_NORMAL);
+}
+
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+                          const struct i915_ggtt_view *view,
+                          bool uses_fence,
+                          unsigned long *out_flags)
+{
+       struct drm_device *dev = fb->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       intel_wakeref_t wakeref;
+       struct i915_vma *vma;
+       unsigned int pinctl;
+       u32 alignment;
+
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       alignment = intel_surf_alignment(fb, 0);
+
+       /* Note that the w/a also requires 64 PTE of padding following the
+        * bo. We currently fill all unused PTE with the shadow page and so
+        * we should always have valid PTE following the scanout preventing
+        * the VT-d warning.
+        */
+       if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
+               alignment = 256 * 1024;
+
+       /*
+        * Global gtt pte registers are special registers which actually forward
+        * writes to a chunk of system memory. Which means that there is no risk
+        * that the register values disappear as soon as we call
+        * intel_runtime_pm_put(), so it is correct to wrap only the
+        * pin/unpin/fence and not more.
+        */
+       wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+       i915_gem_object_lock(obj);
+
+       atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
+       pinctl = 0;
+
+       /* Valleyview is definitely limited to scanning out the first
+        * 512MiB. Lets presume this behaviour was inherited from the
+        * g4x display engine and that all earlier gen are similarly
+        * limited. Testing suggests that it is a little more
+        * complicated than this. For example, Cherryview appears quite
+        * happy to scanout from anywhere within its global aperture.
+        */
+       if (HAS_GMCH(dev_priv))
+               pinctl |= PIN_MAPPABLE;
+
+       vma = i915_gem_object_pin_to_display_plane(obj,
+                                                  alignment, view, pinctl);
+       if (IS_ERR(vma))
+               goto err;
+
+       if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
+               int ret;
+
+               /* Install a fence for tiled scan-out. Pre-i965 always needs a
+                * fence, whereas 965+ only requires a fence if using
+                * framebuffer compression.  For simplicity, we always, when
+                * possible, install a fence as the cost is not that onerous.
+                *
+                * If we fail to fence the tiled scanout, then either the
+                * modeset will reject the change (which is highly unlikely as
+                * the affected systems, all but one, do not have unmappable
+                * space) or we will not be able to enable full powersaving
+                * techniques (also likely not to apply due to various limits
+                * FBC and the like impose on the size of the buffer, which
+                * presumably we violated anyway with this unmappable buffer).
+                * Anyway, it is presumably better to stumble onwards with
+                * something and try to run the system in a "less than optimal"
+                * mode that matches the user configuration.
+                */
+               ret = i915_vma_pin_fence(vma);
+               if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
+                       i915_gem_object_unpin_from_display_plane(vma);
+                       vma = ERR_PTR(ret);
+                       goto err;
+               }
+
+               if (ret == 0 && vma->fence)
+                       *out_flags |= PLANE_HAS_FENCE;
+       }
+
+       i915_vma_get(vma);
+err:
+       atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+
+       i915_gem_object_unlock(obj);
+       intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+       return vma;
+}
+
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
+{
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+
+       i915_gem_object_lock(vma->obj);
+       if (flags & PLANE_HAS_FENCE)
+               i915_vma_unpin_fence(vma);
+       i915_gem_object_unpin_from_display_plane(vma);
+       i915_gem_object_unlock(vma->obj);
+
+       i915_vma_put(vma);
+}
+
+static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
+                         unsigned int rotation)
+{
+       if (drm_rotation_90_or_270(rotation))
+               return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
+       else
+               return fb->pitches[color_plane];
+}
+
+/*
+ * Convert the x/y offsets into a linear offset.
+ * Only valid with 0/180 degree rotation, which is fine since linear
+ * offset is only used with linear buffers on pre-hsw and tiled buffers
+ * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
+ */
+u32 intel_fb_xy_to_linear(int x, int y,
+                         const struct intel_plane_state *state,
+                         int color_plane)
+{
+       const struct drm_framebuffer *fb = state->base.fb;
+       unsigned int cpp = fb->format->cpp[color_plane];
+       unsigned int pitch = state->color_plane[color_plane].stride;
+
+       return y * pitch + x * cpp;
+}
+
+/*
+ * Add the x/y offsets derived from fb->offsets[] to the user
+ * specified plane src x/y offsets. The resulting x/y offsets
+ * specify the start of scanout from the beginning of the gtt mapping.
+ */
+void intel_add_fb_offsets(int *x, int *y,
+                         const struct intel_plane_state *state,
+                         int color_plane)
+
+{
+       *x += state->color_plane[color_plane].x;
+       *y += state->color_plane[color_plane].y;
+}
+
+static u32 intel_adjust_tile_offset(int *x, int *y,
+                                   unsigned int tile_width,
+                                   unsigned int tile_height,
+                                   unsigned int tile_size,
+                                   unsigned int pitch_tiles,
+                                   u32 old_offset,
+                                   u32 new_offset)
+{
+       unsigned int pitch_pixels = pitch_tiles * tile_width;
+       unsigned int tiles;
+
+       WARN_ON(old_offset & (tile_size - 1));
+       WARN_ON(new_offset & (tile_size - 1));
+       WARN_ON(new_offset > old_offset);
+
+       tiles = (old_offset - new_offset) / tile_size;
+
+       *y += tiles / pitch_tiles * tile_height;
+       *x += tiles % pitch_tiles * tile_width;
+
+       /* minimize x in case it got needlessly big */
+       *y += *x / pitch_pixels * tile_height;
+       *x %= pitch_pixels;
+
+       return new_offset;
+}
+
+static bool is_surface_linear(u64 modifier, int color_plane)
+{
+       return modifier == DRM_FORMAT_MOD_LINEAR;
+}
+
+static u32 intel_adjust_aligned_offset(int *x, int *y,
+                                      const struct drm_framebuffer *fb,
+                                      int color_plane,
+                                      unsigned int rotation,
+                                      unsigned int pitch,
+                                      u32 old_offset, u32 new_offset)
+{
+       struct drm_i915_private *dev_priv = to_i915(fb->dev);
+       unsigned int cpp = fb->format->cpp[color_plane];
+
+       WARN_ON(new_offset > old_offset);
+
+       if (!is_surface_linear(fb->modifier, color_plane)) {
+               unsigned int tile_size, tile_width, tile_height;
+               unsigned int pitch_tiles;
+
+               tile_size = intel_tile_size(dev_priv);
+               intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
+
+               if (drm_rotation_90_or_270(rotation)) {
+                       pitch_tiles = pitch / tile_height;
+                       swap(tile_width, tile_height);
+               } else {
+                       pitch_tiles = pitch / (tile_width * cpp);
+               }
+
+               intel_adjust_tile_offset(x, y, tile_width, tile_height,
+                                        tile_size, pitch_tiles,
+                                        old_offset, new_offset);
+       } else {
+               old_offset += *y * pitch + *x * cpp;
+
+               *y = (old_offset - new_offset) / pitch;
+               *x = ((old_offset - new_offset) - *y * pitch) / cpp;
+       }
+
+       return new_offset;
+}
+
+/*
+ * Adjust the tile offset by moving the difference into
+ * the x/y offsets.
+ */
+static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
+                                            const struct intel_plane_state *state,
+                                            int color_plane,
+                                            u32 old_offset, u32 new_offset)
+{
+       return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
+                                          state->base.rotation,
+                                          state->color_plane[color_plane].stride,
+                                          old_offset, new_offset);
+}
+
+/*
+ * Computes the aligned offset to the base tile and adjusts
+ * x, y. bytes per pixel is assumed to be a power-of-two.
+ *
+ * In the 90/270 rotated case, x and y are assumed
+ * to be already rotated to match the rotated GTT view, and
+ * pitch is the tile_height aligned framebuffer height.
+ *
+ * This function is used when computing the derived information
+ * under intel_framebuffer, so using any of that information
+ * here is not allowed. Anything under drm_framebuffer can be
+ * used. This is why the user has to pass in the pitch since it
+ * is specified in the rotated orientation.
+ */
+static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
+                                       int *x, int *y,
+                                       const struct drm_framebuffer *fb,
+                                       int color_plane,
+                                       unsigned int pitch,
+                                       unsigned int rotation,
+                                       u32 alignment)
+{
+       unsigned int cpp = fb->format->cpp[color_plane];
+       u32 offset, offset_aligned;
+
+       if (alignment)
+               alignment--;
+
+       if (!is_surface_linear(fb->modifier, color_plane)) {
+               unsigned int tile_size, tile_width, tile_height;
+               unsigned int tile_rows, tiles, pitch_tiles;
+
+               tile_size = intel_tile_size(dev_priv);
+               intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
+
+               if (drm_rotation_90_or_270(rotation)) {
+                       pitch_tiles = pitch / tile_height;
+                       swap(tile_width, tile_height);
+               } else {
+                       pitch_tiles = pitch / (tile_width * cpp);
+               }
+
+               tile_rows = *y / tile_height;
+               *y %= tile_height;
+
+               tiles = *x / tile_width;
+               *x %= tile_width;
+
+               offset = (tile_rows * pitch_tiles + tiles) * tile_size;
+               offset_aligned = offset & ~alignment;
+
+               intel_adjust_tile_offset(x, y, tile_width, tile_height,
+                                        tile_size, pitch_tiles,
+                                        offset, offset_aligned);
+       } else {
+               offset = *y * pitch + *x * cpp;
+               offset_aligned = offset & ~alignment;
+
+               *y = (offset & alignment) / pitch;
+               *x = ((offset & alignment) - *y * pitch) / cpp;
+       }
+
+       return offset_aligned;
+}
+
+static u32 intel_plane_compute_aligned_offset(int *x, int *y,
+                                             const struct intel_plane_state *state,
+                                             int color_plane)
+{
+       struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
+       const struct drm_framebuffer *fb = state->base.fb;
+       unsigned int rotation = state->base.rotation;
+       int pitch = state->color_plane[color_plane].stride;
+       u32 alignment;
+
+       if (intel_plane->id == PLANE_CURSOR)
+               alignment = intel_cursor_alignment(dev_priv);
+       else
+               alignment = intel_surf_alignment(fb, color_plane);
+
+       return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
+                                           pitch, rotation, alignment);
+}
+
+/* Convert the fb->offset[] into x/y offsets */
+static int intel_fb_offset_to_xy(int *x, int *y,
+                                const struct drm_framebuffer *fb,
+                                int color_plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(fb->dev);
+       unsigned int height;
+
+       if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
+           fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
+               DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
+                             fb->offsets[color_plane], color_plane);
+               return -EINVAL;
+       }
+
+       height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
+       height = ALIGN(height, intel_tile_height(fb, color_plane));
+
+       /* Catch potential overflows early */
+       if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
+                           fb->offsets[color_plane])) {
+               DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
+                             fb->offsets[color_plane], fb->pitches[color_plane],
+                             color_plane);
+               return -ERANGE;
+       }
+
+       *x = 0;
+       *y = 0;
+
+       intel_adjust_aligned_offset(x, y,
+                                   fb, color_plane, DRM_MODE_ROTATE_0,
+                                   fb->pitches[color_plane],
+                                   fb->offsets[color_plane], 0);
+
+       return 0;
+}
+
+static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
+{
+       switch (fb_modifier) {
+       case I915_FORMAT_MOD_X_TILED:
+               return I915_TILING_X;
+       case I915_FORMAT_MOD_Y_TILED:
+       case I915_FORMAT_MOD_Y_TILED_CCS:
+               return I915_TILING_Y;
+       default:
+               return I915_TILING_NONE;
+       }
+}
+
+/*
+ * From the Sky Lake PRM:
+ * "The Color Control Surface (CCS) contains the compression status of
+ *  the cache-line pairs. The compression state of the cache-line pair
+ *  is specified by 2 bits in the CCS. Each CCS cache-line represents
+ *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
+ *  cache-line-pairs. CCS is always Y tiled."
+ *
+ * Since cache line pairs refers to horizontally adjacent cache lines,
+ * each cache line in the CCS corresponds to an area of 32x16 cache
+ * lines on the main surface. Since each pixel is 4 bytes, this gives
+ * us a ratio of one byte in the CCS for each 8x16 pixels in the
+ * main surface.
+ */
+static const struct drm_format_info ccs_formats[] = {
+       { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+         .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+       { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+         .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+       { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+         .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
+       { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+         .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
+};
+
+static const struct drm_format_info *
+lookup_format_info(const struct drm_format_info formats[],
+                  int num_formats, u32 format)
+{
+       int i;
+
+       for (i = 0; i < num_formats; i++) {
+               if (formats[i].format == format)
+                       return &formats[i];
+       }
+
+       return NULL;
+}
+
+static const struct drm_format_info *
+intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+{
+       switch (cmd->modifier[0]) {
+       case I915_FORMAT_MOD_Y_TILED_CCS:
+       case I915_FORMAT_MOD_Yf_TILED_CCS:
+               return lookup_format_info(ccs_formats,
+                                         ARRAY_SIZE(ccs_formats),
+                                         cmd->pixel_format);
+       default:
+               return NULL;
+       }
+}
+
+bool is_ccs_modifier(u64 modifier)
+{
+       return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+              modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+}
+
+u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+                             u32 pixel_format, u64 modifier)
+{
+       struct intel_crtc *crtc;
+       struct intel_plane *plane;
+
+       /*
+        * We assume the primary plane for pipe A has
+        * the highest stride limits of them all.
+        */
+       crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+       plane = to_intel_plane(crtc->base.primary);
+
+       return plane->max_stride(plane, pixel_format, modifier,
+                                DRM_MODE_ROTATE_0);
+}
+
+static
+u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
+                       u32 pixel_format, u64 modifier)
+{
+       /*
+        * Arbitrary limit for gen4+ chosen to match the
+        * render engine max stride.
+        *
+        * The new CCS hash mode makes remapping impossible
+        */
+       if (!is_ccs_modifier(modifier)) {
+               if (INTEL_GEN(dev_priv) >= 7)
+                       return 256*1024;
+               else if (INTEL_GEN(dev_priv) >= 4)
+                       return 128*1024;
+       }
+
+       return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
+}
+
+static u32
+intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(fb->dev);
+
+       if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+               u32 max_stride = intel_plane_fb_max_stride(dev_priv,
+                                                          fb->format->format,
+                                                          fb->modifier);
+
+               /*
+                * To make remapping with linear generally feasible
+                * we need the stride to be page aligned.
+                */
+               if (fb->pitches[color_plane] > max_stride)
+                       return intel_tile_size(dev_priv);
+               else
+                       return 64;
+       } else {
+               return intel_tile_width_bytes(fb, color_plane);
+       }
+}
+
+bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       int i;
+
+       /* We don't want to deal with remapping with cursors */
+       if (plane->id == PLANE_CURSOR)
+               return false;
+
+       /*
+        * The display engine limits already match/exceed the
+        * render engine limits, so not much point in remapping.
+        * Would also need to deal with the fence POT alignment
+        * and gen2 2KiB GTT tile size.
+        */
+       if (INTEL_GEN(dev_priv) < 4)
+               return false;
+
+       /*
+        * The new CCS hash mode isn't compatible with remapping as
+        * the virtual address of the pages affects the compressed data.
+        */
+       if (is_ccs_modifier(fb->modifier))
+               return false;
+
+       /* Linear needs a page aligned stride for remapping */
+       if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+               unsigned int alignment = intel_tile_size(dev_priv) - 1;
+
+               for (i = 0; i < fb->format->num_planes; i++) {
+                       if (fb->pitches[i] & alignment)
+                               return false;
+               }
+       }
+
+       return true;
+}
+
+static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       u32 stride, max_stride;
+
+       /*
+        * No remapping for invisible planes since we don't have
+        * an actual source viewport to remap.
+        */
+       if (!plane_state->base.visible)
+               return false;
+
+       if (!intel_plane_can_remap(plane_state))
+               return false;
+
+       /*
+        * FIXME: aux plane limits on gen9+ are
+        * unclear in Bspec, for now no checking.
+        */
+       stride = intel_fb_pitch(fb, 0, rotation);
+       max_stride = plane->max_stride(plane, fb->format->format,
+                                      fb->modifier, rotation);
+
+       return stride > max_stride;
+}
+
+static int
+intel_fill_fb_info(struct drm_i915_private *dev_priv,
+                  struct drm_framebuffer *fb)
+{
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       u32 gtt_offset_rotated = 0;
+       unsigned int max_size = 0;
+       int i, num_planes = fb->format->num_planes;
+       unsigned int tile_size = intel_tile_size(dev_priv);
+
+       for (i = 0; i < num_planes; i++) {
+               unsigned int width, height;
+               unsigned int cpp, size;
+               u32 offset;
+               int x, y;
+               int ret;
+
+               cpp = fb->format->cpp[i];
+               width = drm_framebuffer_plane_width(fb->width, fb, i);
+               height = drm_framebuffer_plane_height(fb->height, fb, i);
+
+               ret = intel_fb_offset_to_xy(&x, &y, fb, i);
+               if (ret) {
+                       DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
+                                     i, fb->offsets[i]);
+                       return ret;
+               }
+
+               if (is_ccs_modifier(fb->modifier) && i == 1) {
+                       int hsub = fb->format->hsub;
+                       int vsub = fb->format->vsub;
+                       int tile_width, tile_height;
+                       int main_x, main_y;
+                       int ccs_x, ccs_y;
+
+                       intel_tile_dims(fb, i, &tile_width, &tile_height);
+                       tile_width *= hsub;
+                       tile_height *= vsub;
+
+                       ccs_x = (x * hsub) % tile_width;
+                       ccs_y = (y * vsub) % tile_height;
+                       main_x = intel_fb->normal[0].x % tile_width;
+                       main_y = intel_fb->normal[0].y % tile_height;
+
+                       /*
+                        * CCS doesn't have its own x/y offset register, so the intra CCS tile
+                        * x/y offsets must match between CCS and the main surface.
+                        */
+                       if (main_x != ccs_x || main_y != ccs_y) {
+                               DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+                                             main_x, main_y,
+                                             ccs_x, ccs_y,
+                                             intel_fb->normal[0].x,
+                                             intel_fb->normal[0].y,
+                                             x, y);
+                               return -EINVAL;
+                       }
+               }
+
+               /*
+                * The fence (if used) is aligned to the start of the object
+                * so having the framebuffer wrap around across the edge of the
+                * fenced region doesn't really work. We have no API to configure
+                * the fence start offset within the object (nor could we probably
+                * on gen2/3). So it's just easier if we just require that the
+                * fb layout agrees with the fence layout. We already check that the
+                * fb stride matches the fence stride elsewhere.
+                */
+               if (i == 0 && i915_gem_object_is_tiled(obj) &&
+                   (x + width) * cpp > fb->pitches[i]) {
+                       DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
+                                     i, fb->offsets[i]);
+                       return -EINVAL;
+               }
+
+               /*
+                * First pixel of the framebuffer from
+                * the start of the normal gtt mapping.
+                */
+               intel_fb->normal[i].x = x;
+               intel_fb->normal[i].y = y;
+
+               offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
+                                                     fb->pitches[i],
+                                                     DRM_MODE_ROTATE_0,
+                                                     tile_size);
+               offset /= tile_size;
+
+               if (!is_surface_linear(fb->modifier, i)) {
+                       unsigned int tile_width, tile_height;
+                       unsigned int pitch_tiles;
+                       struct drm_rect r;
+
+                       intel_tile_dims(fb, i, &tile_width, &tile_height);
+
+                       rot_info->plane[i].offset = offset;
+                       rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
+                       rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
+                       rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
+
+                       intel_fb->rotated[i].pitch =
+                               rot_info->plane[i].height * tile_height;
+
+                       /* how many tiles does this plane need */
+                       size = rot_info->plane[i].stride * rot_info->plane[i].height;
+                       /*
+                        * If the plane isn't horizontally tile aligned,
+                        * we need one more tile.
+                        */
+                       if (x != 0)
+                               size++;
+
+                       /* rotate the x/y offsets to match the GTT view */
+                       r.x1 = x;
+                       r.y1 = y;
+                       r.x2 = x + width;
+                       r.y2 = y + height;
+                       drm_rect_rotate(&r,
+                                       rot_info->plane[i].width * tile_width,
+                                       rot_info->plane[i].height * tile_height,
+                                       DRM_MODE_ROTATE_270);
+                       x = r.x1;
+                       y = r.y1;
+
+                       /* rotate the tile dimensions to match the GTT view */
+                       pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
+                       swap(tile_width, tile_height);
+
+                       /*
+                        * We only keep the x/y offsets, so push all of the
+                        * gtt offset into the x/y offsets.
+                        */
+                       intel_adjust_tile_offset(&x, &y,
+                                                tile_width, tile_height,
+                                                tile_size, pitch_tiles,
+                                                gtt_offset_rotated * tile_size, 0);
+
+                       gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
+
+                       /*
+                        * First pixel of the framebuffer from
+                        * the start of the rotated gtt mapping.
+                        */
+                       intel_fb->rotated[i].x = x;
+                       intel_fb->rotated[i].y = y;
+               } else {
+                       size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
+                                           x * cpp, tile_size);
+               }
+
+               /* how many tiles in total needed in the bo */
+               max_size = max(max_size, offset + size);
+       }
+
+       if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
+               DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
+                             mul_u32_u32(max_size, tile_size), obj->base.size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void
+intel_plane_remap_gtt(struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       struct drm_framebuffer *fb = plane_state->base.fb;
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct intel_rotation_info *info = &plane_state->view.rotated;
+       unsigned int rotation = plane_state->base.rotation;
+       int i, num_planes = fb->format->num_planes;
+       unsigned int tile_size = intel_tile_size(dev_priv);
+       unsigned int src_x, src_y;
+       unsigned int src_w, src_h;
+       u32 gtt_offset = 0;
+
+       memset(&plane_state->view, 0, sizeof(plane_state->view));
+       plane_state->view.type = drm_rotation_90_or_270(rotation) ?
+               I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
+
+       src_x = plane_state->base.src.x1 >> 16;
+       src_y = plane_state->base.src.y1 >> 16;
+       src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       src_h = drm_rect_height(&plane_state->base.src) >> 16;
+
+       WARN_ON(is_ccs_modifier(fb->modifier));
+
+       /* Make src coordinates relative to the viewport */
+       drm_rect_translate(&plane_state->base.src,
+                          -(src_x << 16), -(src_y << 16));
+
+       /* Rotate src coordinates to match rotated GTT view */
+       if (drm_rotation_90_or_270(rotation))
+               drm_rect_rotate(&plane_state->base.src,
+                               src_w << 16, src_h << 16,
+                               DRM_MODE_ROTATE_270);
+
+       for (i = 0; i < num_planes; i++) {
+               unsigned int hsub = i ? fb->format->hsub : 1;
+               unsigned int vsub = i ? fb->format->vsub : 1;
+               unsigned int cpp = fb->format->cpp[i];
+               unsigned int tile_width, tile_height;
+               unsigned int width, height;
+               unsigned int pitch_tiles;
+               unsigned int x, y;
+               u32 offset;
+
+               intel_tile_dims(fb, i, &tile_width, &tile_height);
+
+               x = src_x / hsub;
+               y = src_y / vsub;
+               width = src_w / hsub;
+               height = src_h / vsub;
+
+               /*
+                * First pixel of the src viewport from the
+                * start of the normal gtt mapping.
+                */
+               x += intel_fb->normal[i].x;
+               y += intel_fb->normal[i].y;
+
+               offset = intel_compute_aligned_offset(dev_priv, &x, &y,
+                                                     fb, i, fb->pitches[i],
+                                                     DRM_MODE_ROTATE_0, tile_size);
+               offset /= tile_size;
+
+               info->plane[i].offset = offset;
+               info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
+                                                    tile_width * cpp);
+               info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
+               info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
+
+               if (drm_rotation_90_or_270(rotation)) {
+                       struct drm_rect r;
+
+                       /* rotate the x/y offsets to match the GTT view */
+                       r.x1 = x;
+                       r.y1 = y;
+                       r.x2 = x + width;
+                       r.y2 = y + height;
+                       drm_rect_rotate(&r,
+                                       info->plane[i].width * tile_width,
+                                       info->plane[i].height * tile_height,
+                                       DRM_MODE_ROTATE_270);
+                       x = r.x1;
+                       y = r.y1;
+
+                       pitch_tiles = info->plane[i].height;
+                       plane_state->color_plane[i].stride = pitch_tiles * tile_height;
+
+                       /* rotate the tile dimensions to match the GTT view */
+                       swap(tile_width, tile_height);
+               } else {
+                       pitch_tiles = info->plane[i].width;
+                       plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
+               }
+
+               /*
+                * We only keep the x/y offsets, so push all of the
+                * gtt offset into the x/y offsets.
+                */
+               intel_adjust_tile_offset(&x, &y,
+                                        tile_width, tile_height,
+                                        tile_size, pitch_tiles,
+                                        gtt_offset * tile_size, 0);
+
+               gtt_offset += info->plane[i].width * info->plane[i].height;
+
+               plane_state->color_plane[i].offset = 0;
+               plane_state->color_plane[i].x = x;
+               plane_state->color_plane[i].y = y;
+       }
+}
+
+static int
+intel_plane_compute_gtt(struct intel_plane_state *plane_state)
+{
+       const struct intel_framebuffer *fb =
+               to_intel_framebuffer(plane_state->base.fb);
+       unsigned int rotation = plane_state->base.rotation;
+       int i, num_planes;
+
+       if (!fb)
+               return 0;
+
+       num_planes = fb->base.format->num_planes;
+
+       if (intel_plane_needs_remap(plane_state)) {
+               intel_plane_remap_gtt(plane_state);
+
+               /*
+                * Sometimes even remapping can't overcome
+                * the stride limitations :( Can happen with
+                * big plane sizes and suitably misaligned
+                * offsets.
+                */
+               return intel_plane_check_stride(plane_state);
+       }
+
+       intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
+
+       for (i = 0; i < num_planes; i++) {
+               plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
+               plane_state->color_plane[i].offset = 0;
+
+               if (drm_rotation_90_or_270(rotation)) {
+                       plane_state->color_plane[i].x = fb->rotated[i].x;
+                       plane_state->color_plane[i].y = fb->rotated[i].y;
+               } else {
+                       plane_state->color_plane[i].x = fb->normal[i].x;
+                       plane_state->color_plane[i].y = fb->normal[i].y;
+               }
+       }
+
+       /* Rotate src coordinates to match rotated GTT view */
+       if (drm_rotation_90_or_270(rotation))
+               drm_rect_rotate(&plane_state->base.src,
+                               fb->base.width << 16, fb->base.height << 16,
+                               DRM_MODE_ROTATE_270);
+
+       return intel_plane_check_stride(plane_state);
+}
+
+static int i9xx_format_to_fourcc(int format)
+{
+       switch (format) {
+       case DISPPLANE_8BPP:
+               return DRM_FORMAT_C8;
+       case DISPPLANE_BGRX555:
+               return DRM_FORMAT_XRGB1555;
+       case DISPPLANE_BGRX565:
+               return DRM_FORMAT_RGB565;
+       default:
+       case DISPPLANE_BGRX888:
+               return DRM_FORMAT_XRGB8888;
+       case DISPPLANE_RGBX888:
+               return DRM_FORMAT_XBGR8888;
+       case DISPPLANE_BGRX101010:
+               return DRM_FORMAT_XRGB2101010;
+       case DISPPLANE_RGBX101010:
+               return DRM_FORMAT_XBGR2101010;
+       }
+}
+
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
+{
+       switch (format) {
+       case PLANE_CTL_FORMAT_RGB_565:
+               return DRM_FORMAT_RGB565;
+       case PLANE_CTL_FORMAT_NV12:
+               return DRM_FORMAT_NV12;
+       case PLANE_CTL_FORMAT_P010:
+               return DRM_FORMAT_P010;
+       case PLANE_CTL_FORMAT_P012:
+               return DRM_FORMAT_P012;
+       case PLANE_CTL_FORMAT_P016:
+               return DRM_FORMAT_P016;
+       case PLANE_CTL_FORMAT_Y210:
+               return DRM_FORMAT_Y210;
+       case PLANE_CTL_FORMAT_Y212:
+               return DRM_FORMAT_Y212;
+       case PLANE_CTL_FORMAT_Y216:
+               return DRM_FORMAT_Y216;
+       case PLANE_CTL_FORMAT_Y410:
+               return DRM_FORMAT_XVYU2101010;
+       case PLANE_CTL_FORMAT_Y412:
+               return DRM_FORMAT_XVYU12_16161616;
+       case PLANE_CTL_FORMAT_Y416:
+               return DRM_FORMAT_XVYU16161616;
+       default:
+       case PLANE_CTL_FORMAT_XRGB_8888:
+               if (rgb_order) {
+                       if (alpha)
+                               return DRM_FORMAT_ABGR8888;
+                       else
+                               return DRM_FORMAT_XBGR8888;
+               } else {
+                       if (alpha)
+                               return DRM_FORMAT_ARGB8888;
+                       else
+                               return DRM_FORMAT_XRGB8888;
+               }
+       case PLANE_CTL_FORMAT_XRGB_2101010:
+               if (rgb_order)
+                       return DRM_FORMAT_XBGR2101010;
+               else
+                       return DRM_FORMAT_XRGB2101010;
+       case PLANE_CTL_FORMAT_XRGB_16161616F:
+               if (rgb_order) {
+                       if (alpha)
+                               return DRM_FORMAT_ABGR16161616F;
+                       else
+                               return DRM_FORMAT_XBGR16161616F;
+               } else {
+                       if (alpha)
+                               return DRM_FORMAT_ARGB16161616F;
+                       else
+                               return DRM_FORMAT_XRGB16161616F;
+               }
+       }
+}
+
+static bool
+intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
+                             struct intel_initial_plane_config *plane_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_gem_object *obj = NULL;
+       struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+       struct drm_framebuffer *fb = &plane_config->fb->base;
+       u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
+       u32 size_aligned = round_up(plane_config->base + plane_config->size,
+                                   PAGE_SIZE);
+
+       size_aligned -= base_aligned;
+
+       if (plane_config->size == 0)
+               return false;
+
+       /* If the FB is too big, just don't use it since fbdev is not very
+        * important and we should probably use that space with FBC or other
+        * features. */
+       if (size_aligned * 2 > dev_priv->stolen_usable_size)
+               return false;
+
+       switch (fb->modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+       case I915_FORMAT_MOD_Y_TILED:
+               break;
+       default:
+               DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
+                                fb->modifier);
+               return false;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
+                                                            base_aligned,
+                                                            base_aligned,
+                                                            size_aligned);
+       mutex_unlock(&dev->struct_mutex);
+       if (!obj)
+               return false;
+
+       switch (plane_config->tiling) {
+       case I915_TILING_NONE:
+               break;
+       case I915_TILING_X:
+       case I915_TILING_Y:
+               obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
+               break;
+       default:
+               MISSING_CASE(plane_config->tiling);
+               return false;
+       }
+
+       mode_cmd.pixel_format = fb->format->format;
+       mode_cmd.width = fb->width;
+       mode_cmd.height = fb->height;
+       mode_cmd.pitches[0] = fb->pitches[0];
+       mode_cmd.modifier[0] = fb->modifier;
+       mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
+
+       if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
+               DRM_DEBUG_KMS("intel fb init failed\n");
+               goto out_unref_obj;
+       }
+
+
+       DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
+       return true;
+
+out_unref_obj:
+       i915_gem_object_put(obj);
+       return false;
+}
+
+static void
+intel_set_plane_visible(struct intel_crtc_state *crtc_state,
+                       struct intel_plane_state *plane_state,
+                       bool visible)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+
+       plane_state->base.visible = visible;
+
+       if (visible)
+               crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
+       else
+               crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
+}
+
+static void fixup_active_planes(struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       struct drm_plane *plane;
+
+       /*
+        * Active_planes aliases if multiple "primary" or cursor planes
+        * have been used on the same (or wrong) pipe. plane_mask uses
+        * unique ids, hence we can use that to reconstruct active_planes.
+        */
+       crtc_state->active_planes = 0;
+
+       drm_for_each_plane_mask(plane, &dev_priv->drm,
+                               crtc_state->base.plane_mask)
+               crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
+}
+
+static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+                                        struct intel_plane *plane)
+{
+       struct intel_crtc_state *crtc_state =
+               to_intel_crtc_state(crtc->base.state);
+       struct intel_plane_state *plane_state =
+               to_intel_plane_state(plane->base.state);
+
+       DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
+                     plane->base.base.id, plane->base.name,
+                     crtc->base.base.id, crtc->base.name);
+
+       intel_set_plane_visible(crtc_state, plane_state, false);
+       fixup_active_planes(crtc_state);
+       crtc_state->data_rate[plane->id] = 0;
+
+       if (plane->id == PLANE_PRIMARY)
+               intel_pre_disable_primary_noatomic(&crtc->base);
+
+       intel_disable_plane(plane, crtc_state);
+}
+
+static void
+intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
+                            struct intel_initial_plane_config *plane_config)
+{
+       struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_crtc *c;
+       struct drm_i915_gem_object *obj;
+       struct drm_plane *primary = intel_crtc->base.primary;
+       struct drm_plane_state *plane_state = primary->state;
+       struct intel_plane *intel_plane = to_intel_plane(primary);
+       struct intel_plane_state *intel_state =
+               to_intel_plane_state(plane_state);
+       struct drm_framebuffer *fb;
+
+       if (!plane_config->fb)
+               return;
+
+       if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
+               fb = &plane_config->fb->base;
+               goto valid_fb;
+       }
+
+       kfree(plane_config->fb);
+
+       /*
+        * Failed to alloc the obj, check to see if we should share
+        * an fb with another CRTC instead
+        */
+       for_each_crtc(dev, c) {
+               struct intel_plane_state *state;
+
+               if (c == &intel_crtc->base)
+                       continue;
+
+               if (!to_intel_crtc(c)->active)
+                       continue;
+
+               state = to_intel_plane_state(c->primary->state);
+               if (!state->vma)
+                       continue;
+
+               if (intel_plane_ggtt_offset(state) == plane_config->base) {
+                       fb = state->base.fb;
+                       drm_framebuffer_get(fb);
+                       goto valid_fb;
+               }
+       }
+
+       /*
+        * We've failed to reconstruct the BIOS FB.  Current display state
+        * indicates that the primary plane is visible, but has a NULL FB,
+        * which will lead to problems later if we don't fix it up.  The
+        * simplest solution is to just disable the primary plane now and
+        * pretend the BIOS never had it enabled.
+        */
+       intel_plane_disable_noatomic(intel_crtc, intel_plane);
+
+       return;
+
+valid_fb:
+       intel_state->base.rotation = plane_config->rotation;
+       intel_fill_fb_ggtt_view(&intel_state->view, fb,
+                               intel_state->base.rotation);
+       intel_state->color_plane[0].stride =
+               intel_fb_pitch(fb, 0, intel_state->base.rotation);
+
+       mutex_lock(&dev->struct_mutex);
+       intel_state->vma =
+               intel_pin_and_fence_fb_obj(fb,
+                                          &intel_state->view,
+                                          intel_plane_uses_fence(intel_state),
+                                          &intel_state->flags);
+       mutex_unlock(&dev->struct_mutex);
+       if (IS_ERR(intel_state->vma)) {
+               DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
+                         intel_crtc->pipe, PTR_ERR(intel_state->vma));
+
+               intel_state->vma = NULL;
+               drm_framebuffer_put(fb);
+               return;
+       }
+
+       obj = intel_fb_obj(fb);
+       intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+
+       plane_state->src_x = 0;
+       plane_state->src_y = 0;
+       plane_state->src_w = fb->width << 16;
+       plane_state->src_h = fb->height << 16;
+
+       plane_state->crtc_x = 0;
+       plane_state->crtc_y = 0;
+       plane_state->crtc_w = fb->width;
+       plane_state->crtc_h = fb->height;
+
+       intel_state->base.src = drm_plane_state_src(plane_state);
+       intel_state->base.dst = drm_plane_state_dest(plane_state);
+
+       if (i915_gem_object_is_tiled(obj))
+               dev_priv->preserve_bios_swizzle = true;
+
+       plane_state->fb = fb;
+       plane_state->crtc = &intel_crtc->base;
+
+       atomic_or(to_intel_plane(primary)->frontbuffer_bit,
+                 &obj->frontbuffer_bits);
+}
+
+static int skl_max_plane_width(const struct drm_framebuffer *fb,
+                              int color_plane,
+                              unsigned int rotation)
+{
+       int cpp = fb->format->cpp[color_plane];
+
+       switch (fb->modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               return 4096;
+       case I915_FORMAT_MOD_Y_TILED_CCS:
+       case I915_FORMAT_MOD_Yf_TILED_CCS:
+               /* FIXME AUX plane? */
+       case I915_FORMAT_MOD_Y_TILED:
+       case I915_FORMAT_MOD_Yf_TILED:
+               if (cpp == 8)
+                       return 2048;
+               else
+                       return 4096;
+       default:
+               MISSING_CASE(fb->modifier);
+               return 2048;
+       }
+}
+
+static int glk_max_plane_width(const struct drm_framebuffer *fb,
+                              int color_plane,
+                              unsigned int rotation)
+{
+       int cpp = fb->format->cpp[color_plane];
+
+       switch (fb->modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               if (cpp == 8)
+                       return 4096;
+               else
+                       return 5120;
+       case I915_FORMAT_MOD_Y_TILED_CCS:
+       case I915_FORMAT_MOD_Yf_TILED_CCS:
+               /* FIXME AUX plane? */
+       case I915_FORMAT_MOD_Y_TILED:
+       case I915_FORMAT_MOD_Yf_TILED:
+               if (cpp == 8)
+                       return 2048;
+               else
+                       return 5120;
+       default:
+               MISSING_CASE(fb->modifier);
+               return 2048;
+       }
+}
+
+static int icl_max_plane_width(const struct drm_framebuffer *fb,
+                              int color_plane,
+                              unsigned int rotation)
+{
+       return 5120;
+}
+
+static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
+                                          int main_x, int main_y, u32 main_offset)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       int hsub = fb->format->hsub;
+       int vsub = fb->format->vsub;
+       int aux_x = plane_state->color_plane[1].x;
+       int aux_y = plane_state->color_plane[1].y;
+       u32 aux_offset = plane_state->color_plane[1].offset;
+       u32 alignment = intel_surf_alignment(fb, 1);
+
+       while (aux_offset >= main_offset && aux_y <= main_y) {
+               int x, y;
+
+               if (aux_x == main_x && aux_y == main_y)
+                       break;
+
+               if (aux_offset == 0)
+                       break;
+
+               x = aux_x / hsub;
+               y = aux_y / vsub;
+               aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
+                                                              aux_offset, aux_offset - alignment);
+               aux_x = x * hsub + aux_x % hsub;
+               aux_y = y * vsub + aux_y % vsub;
+       }
+
+       if (aux_x != main_x || aux_y != main_y)
+               return false;
+
+       plane_state->color_plane[1].offset = aux_offset;
+       plane_state->color_plane[1].x = aux_x;
+       plane_state->color_plane[1].y = aux_y;
+
+       return true;
+}
+
+static int skl_check_main_surface(struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       int x = plane_state->base.src.x1 >> 16;
+       int y = plane_state->base.src.y1 >> 16;
+       int w = drm_rect_width(&plane_state->base.src) >> 16;
+       int h = drm_rect_height(&plane_state->base.src) >> 16;
+       int max_width;
+       int max_height = 4096;
+       u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               max_width = icl_max_plane_width(fb, 0, rotation);
+       else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               max_width = glk_max_plane_width(fb, 0, rotation);
+       else
+               max_width = skl_max_plane_width(fb, 0, rotation);
+
+       if (w > max_width || h > max_height) {
+               DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
+                             w, h, max_width, max_height);
+               return -EINVAL;
+       }
+
+       intel_add_fb_offsets(&x, &y, plane_state, 0);
+       offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
+       alignment = intel_surf_alignment(fb, 0);
+
+       /*
+        * AUX surface offset is specified as the distance from the
+        * main surface offset, and it must be non-negative. Make
+        * sure that is what we will get.
+        */
+       if (offset > aux_offset)
+               offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+                                                          offset, aux_offset & ~(alignment - 1));
+
+       /*
+        * When using an X-tiled surface, the plane blows up
+        * if the x offset + width exceed the stride.
+        *
+        * TODO: linear and Y-tiled seem fine, Yf untested,
+        */
+       if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
+               int cpp = fb->format->cpp[0];
+
+               while ((x + w) * cpp > plane_state->color_plane[0].stride) {
+                       if (offset == 0) {
+                               DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
+                               return -EINVAL;
+                       }
+
+                       offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+                                                                  offset, offset - alignment);
+               }
+       }
+
+       /*
+        * CCS AUX surface doesn't have its own x/y offsets, we must make sure
+        * they match with the main surface x/y offsets.
+        */
+       if (is_ccs_modifier(fb->modifier)) {
+               while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
+                       if (offset == 0)
+                               break;
+
+                       offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+                                                                  offset, offset - alignment);
+               }
+
+               if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
+                       DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+                       return -EINVAL;
+               }
+       }
+
+       plane_state->color_plane[0].offset = offset;
+       plane_state->color_plane[0].x = x;
+       plane_state->color_plane[0].y = y;
+
+       /*
+        * Put the final coordinates back so that the src
+        * coordinate checks will see the right values.
+        */
+       drm_rect_translate(&plane_state->base.src,
+                          (x << 16) - plane_state->base.src.x1,
+                          (y << 16) - plane_state->base.src.y1);
+
+       return 0;
+}
+
+static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       int max_width = skl_max_plane_width(fb, 1, rotation);
+       int max_height = 4096;
+       int x = plane_state->base.src.x1 >> 17;
+       int y = plane_state->base.src.y1 >> 17;
+       int w = drm_rect_width(&plane_state->base.src) >> 17;
+       int h = drm_rect_height(&plane_state->base.src) >> 17;
+       u32 offset;
+
+       intel_add_fb_offsets(&x, &y, plane_state, 1);
+       offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
+
+       /* FIXME not quite sure how/if these apply to the chroma plane */
+       if (w > max_width || h > max_height) {
+               DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
+                             w, h, max_width, max_height);
+               return -EINVAL;
+       }
+
+       plane_state->color_plane[1].offset = offset;
+       plane_state->color_plane[1].x = x;
+       plane_state->color_plane[1].y = y;
+
+       return 0;
+}
+
+static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       int src_x = plane_state->base.src.x1 >> 16;
+       int src_y = plane_state->base.src.y1 >> 16;
+       int hsub = fb->format->hsub;
+       int vsub = fb->format->vsub;
+       int x = src_x / hsub;
+       int y = src_y / vsub;
+       u32 offset;
+
+       intel_add_fb_offsets(&x, &y, plane_state, 1);
+       offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
+
+       plane_state->color_plane[1].offset = offset;
+       plane_state->color_plane[1].x = x * hsub + src_x % hsub;
+       plane_state->color_plane[1].y = y * vsub + src_y % vsub;
+
+       return 0;
+}
+
+int skl_check_plane_surface(struct intel_plane_state *plane_state)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       int ret;
+
+       ret = intel_plane_compute_gtt(plane_state);
+       if (ret)
+               return ret;
+
+       if (!plane_state->base.visible)
+               return 0;
+
+       /*
+        * Handle the AUX surface first since
+        * the main surface setup depends on it.
+        */
+       if (is_planar_yuv_format(fb->format->format)) {
+               ret = skl_check_nv12_aux_surface(plane_state);
+               if (ret)
+                       return ret;
+       } else if (is_ccs_modifier(fb->modifier)) {
+               ret = skl_check_ccs_aux_surface(plane_state);
+               if (ret)
+                       return ret;
+       } else {
+               plane_state->color_plane[1].offset = ~0xfff;
+               plane_state->color_plane[1].x = 0;
+               plane_state->color_plane[1].y = 0;
+       }
+
+       ret = skl_check_main_surface(plane_state);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+unsigned int
+i9xx_plane_max_stride(struct intel_plane *plane,
+                     u32 pixel_format, u64 modifier,
+                     unsigned int rotation)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+       if (!HAS_GMCH(dev_priv)) {
+               return 32*1024;
+       } else if (INTEL_GEN(dev_priv) >= 4) {
+               if (modifier == I915_FORMAT_MOD_X_TILED)
+                       return 16*1024;
+               else
+                       return 32*1024;
+       } else if (INTEL_GEN(dev_priv) >= 3) {
+               if (modifier == I915_FORMAT_MOD_X_TILED)
+                       return 8*1024;
+               else
+                       return 16*1024;
+       } else {
+               if (plane->i9xx_plane == PLANE_C)
+                       return 4*1024;
+               else
+                       return 8*1024;
+       }
+}
+
+static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 dspcntr = 0;
+
+       if (crtc_state->gamma_enable)
+               dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+       if (crtc_state->csc_enable)
+               dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
+
+       if (INTEL_GEN(dev_priv) < 5)
+               dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
+
+       return dspcntr;
+}
+
+static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
+                         const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       u32 dspcntr;
+
+       dspcntr = DISPLAY_PLANE_ENABLE;
+
+       if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
+           IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+               dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+       switch (fb->format->format) {
+       case DRM_FORMAT_C8:
+               dspcntr |= DISPPLANE_8BPP;
+               break;
+       case DRM_FORMAT_XRGB1555:
+               dspcntr |= DISPPLANE_BGRX555;
+               break;
+       case DRM_FORMAT_RGB565:
+               dspcntr |= DISPPLANE_BGRX565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               dspcntr |= DISPPLANE_BGRX888;
+               break;
+       case DRM_FORMAT_XBGR8888:
+               dspcntr |= DISPPLANE_RGBX888;
+               break;
+       case DRM_FORMAT_XRGB2101010:
+               dspcntr |= DISPPLANE_BGRX101010;
+               break;
+       case DRM_FORMAT_XBGR2101010:
+               dspcntr |= DISPPLANE_RGBX101010;
+               break;
+       default:
+               MISSING_CASE(fb->format->format);
+               return 0;
+       }
+
+       if (INTEL_GEN(dev_priv) >= 4 &&
+           fb->modifier == I915_FORMAT_MOD_X_TILED)
+               dspcntr |= DISPPLANE_TILED;
+
+       if (rotation & DRM_MODE_ROTATE_180)
+               dspcntr |= DISPPLANE_ROTATE_180;
+
+       if (rotation & DRM_MODE_REFLECT_X)
+               dspcntr |= DISPPLANE_MIRROR;
+
+       return dspcntr;
+}
+
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       int src_x, src_y;
+       u32 offset;
+       int ret;
+
+       ret = intel_plane_compute_gtt(plane_state);
+       if (ret)
+               return ret;
+
+       if (!plane_state->base.visible)
+               return 0;
+
+       src_x = plane_state->base.src.x1 >> 16;
+       src_y = plane_state->base.src.y1 >> 16;
+
+       intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+
+       if (INTEL_GEN(dev_priv) >= 4)
+               offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+                                                           plane_state, 0);
+       else
+               offset = 0;
+
+       /*
+        * Put the final coordinates back so that the src
+        * coordinate checks will see the right values.
+        */
+       drm_rect_translate(&plane_state->base.src,
+                          (src_x << 16) - plane_state->base.src.x1,
+                          (src_y << 16) - plane_state->base.src.y1);
+
+       /* HSW/BDW do this automagically in hardware */
+       if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+               unsigned int rotation = plane_state->base.rotation;
+               int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+               int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+
+               if (rotation & DRM_MODE_ROTATE_180) {
+                       src_x += src_w - 1;
+                       src_y += src_h - 1;
+               } else if (rotation & DRM_MODE_REFLECT_X) {
+                       src_x += src_w - 1;
+               }
+       }
+
+       plane_state->color_plane[0].offset = offset;
+       plane_state->color_plane[0].x = src_x;
+       plane_state->color_plane[0].y = src_y;
+
+       return 0;
+}
+
+static int
+i9xx_plane_check(struct intel_crtc_state *crtc_state,
+                struct intel_plane_state *plane_state)
+{
+       int ret;
+
+       ret = chv_plane_check_rotation(plane_state);
+       if (ret)
+               return ret;
+
+       ret = drm_atomic_helper_check_plane_state(&plane_state->base,
+                                                 &crtc_state->base,
+                                                 DRM_PLANE_HELPER_NO_SCALING,
+                                                 DRM_PLANE_HELPER_NO_SCALING,
+                                                 false, true);
+       if (ret)
+               return ret;
+
+       ret = i9xx_check_plane_surface(plane_state);
+       if (ret)
+               return ret;
+
+       if (!plane_state->base.visible)
+               return 0;
+
+       ret = intel_plane_check_src_coordinates(plane_state);
+       if (ret)
+               return ret;
+
+       plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
+
+       return 0;
+}
+
+static void i9xx_update_plane(struct intel_plane *plane,
+                             const struct intel_crtc_state *crtc_state,
+                             const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+       u32 linear_offset;
+       int x = plane_state->color_plane[0].x;
+       int y = plane_state->color_plane[0].y;
+       unsigned long irqflags;
+       u32 dspaddr_offset;
+       u32 dspcntr;
+
+       dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+
+       linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+       if (INTEL_GEN(dev_priv) >= 4)
+               dspaddr_offset = plane_state->color_plane[0].offset;
+       else
+               dspaddr_offset = linear_offset;
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
+
+       if (INTEL_GEN(dev_priv) < 4) {
+               /* pipesrc and dspsize control the size that is scaled from,
+                * which should always be the user's requested size.
+                */
+               I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
+               I915_WRITE_FW(DSPSIZE(i9xx_plane),
+                             ((crtc_state->pipe_src_h - 1) << 16) |
+                             (crtc_state->pipe_src_w - 1));
+       } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+               I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
+               I915_WRITE_FW(PRIMSIZE(i9xx_plane),
+                             ((crtc_state->pipe_src_h - 1) << 16) |
+                             (crtc_state->pipe_src_w - 1));
+               I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
+       }
+
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+               I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
+       } else if (INTEL_GEN(dev_priv) >= 4) {
+               I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
+               I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+       }
+
+       /*
+        * The control register self-arms if the plane was previously
+        * disabled. Try to make the plane enable atomic by writing
+        * the control register just before the surface register.
+        */
+       I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+       if (INTEL_GEN(dev_priv) >= 4)
+               I915_WRITE_FW(DSPSURF(i9xx_plane),
+                             intel_plane_ggtt_offset(plane_state) +
+                             dspaddr_offset);
+       else
+               I915_WRITE_FW(DSPADDR(i9xx_plane),
+                             intel_plane_ggtt_offset(plane_state) +
+                             dspaddr_offset);
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i9xx_disable_plane(struct intel_plane *plane,
+                              const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+       unsigned long irqflags;
+       u32 dspcntr;
+
+       /*
+        * DSPCNTR pipe gamma enable on g4x+ and pipe csc
+        * enable on ilk+ affect the pipe bottom color as
+        * well, so we must configure them even if the plane
+        * is disabled.
+        *
+        * On pre-g4x there is no way to gamma correct the
+        * pipe bottom color but we'll keep on doing this
+        * anyway so that the crtc state readout works correctly.
+        */
+       dspcntr = i9xx_plane_ctl_crtc(crtc_state);
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+       if (INTEL_GEN(dev_priv) >= 4)
+               I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
+       else
+               I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
+                                   enum pipe *pipe)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+       intel_wakeref_t wakeref;
+       bool ret;
+       u32 val;
+
+       /*
+        * Not 100% correct for planes that can move between pipes,
+        * but that's only the case for gen2-4 which don't have any
+        * display power wells.
+        */
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
+               return false;
+
+       val = I915_READ(DSPCNTR(i9xx_plane));
+
+       ret = val & DISPLAY_PLANE_ENABLE;
+
+       if (INTEL_GEN(dev_priv) >= 5)
+               *pipe = plane->pipe;
+       else
+               *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+                       DISPPLANE_SEL_PIPE_SHIFT;
+
+       intel_display_power_put(dev_priv, power_domain, wakeref);
+
+       return ret;
+}
+
+static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
+{
+       struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+       I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+       I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+}
+
+/*
+ * This function detaches (aka. unbinds) unused scalers in hardware
+ */
+static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct intel_crtc_scaler_state *scaler_state =
+               &crtc_state->scaler_state;
+       int i;
+
+       /* loop through and disable scalers that aren't in use */
+       for (i = 0; i < intel_crtc->num_scalers; i++) {
+               if (!scaler_state->scalers[i].in_use)
+                       skl_detach_scaler(intel_crtc, i);
+       }
+}
+
+static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
+                                         int color_plane, unsigned int rotation)
+{
+       /*
+        * The stride is either expressed as a multiple of 64 bytes chunks for
+        * linear buffers or in number of tiles for tiled buffers.
+        */
+       if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+               return 64;
+       else if (drm_rotation_90_or_270(rotation))
+               return intel_tile_height(fb, color_plane);
+       else
+               return intel_tile_width_bytes(fb, color_plane);
+}
+
+u32 skl_plane_stride(const struct intel_plane_state *plane_state,
+                    int color_plane)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       u32 stride = plane_state->color_plane[color_plane].stride;
+
+       if (color_plane >= fb->format->num_planes)
+               return 0;
+
+       return stride / skl_plane_stride_mult(fb, color_plane, rotation);
+}
+
+static u32 skl_plane_ctl_format(u32 pixel_format)
+{
+       switch (pixel_format) {
+       case DRM_FORMAT_C8:
+               return PLANE_CTL_FORMAT_INDEXED;
+       case DRM_FORMAT_RGB565:
+               return PLANE_CTL_FORMAT_RGB_565;
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_ABGR8888:
+               return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               return PLANE_CTL_FORMAT_XRGB_8888;
+       case DRM_FORMAT_XRGB2101010:
+               return PLANE_CTL_FORMAT_XRGB_2101010;
+       case DRM_FORMAT_XBGR2101010:
+               return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
+       case DRM_FORMAT_XBGR16161616F:
+       case DRM_FORMAT_ABGR16161616F:
+               return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
+       case DRM_FORMAT_XRGB16161616F:
+       case DRM_FORMAT_ARGB16161616F:
+               return PLANE_CTL_FORMAT_XRGB_16161616F;
+       case DRM_FORMAT_YUYV:
+               return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+       case DRM_FORMAT_YVYU:
+               return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+       case DRM_FORMAT_UYVY:
+               return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+       case DRM_FORMAT_VYUY:
+               return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+       case DRM_FORMAT_NV12:
+               return PLANE_CTL_FORMAT_NV12;
+       case DRM_FORMAT_P010:
+               return PLANE_CTL_FORMAT_P010;
+       case DRM_FORMAT_P012:
+               return PLANE_CTL_FORMAT_P012;
+       case DRM_FORMAT_P016:
+               return PLANE_CTL_FORMAT_P016;
+       case DRM_FORMAT_Y210:
+               return PLANE_CTL_FORMAT_Y210;
+       case DRM_FORMAT_Y212:
+               return PLANE_CTL_FORMAT_Y212;
+       case DRM_FORMAT_Y216:
+               return PLANE_CTL_FORMAT_Y216;
+       case DRM_FORMAT_XVYU2101010:
+               return PLANE_CTL_FORMAT_Y410;
+       case DRM_FORMAT_XVYU12_16161616:
+               return PLANE_CTL_FORMAT_Y412;
+       case DRM_FORMAT_XVYU16161616:
+               return PLANE_CTL_FORMAT_Y416;
+       default:
+               MISSING_CASE(pixel_format);
+       }
+
+       return 0;
+}
+
+static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
+{
+       if (!plane_state->base.fb->format->has_alpha)
+               return PLANE_CTL_ALPHA_DISABLE;
+
+       switch (plane_state->base.pixel_blend_mode) {
+       case DRM_MODE_BLEND_PIXEL_NONE:
+               return PLANE_CTL_ALPHA_DISABLE;
+       case DRM_MODE_BLEND_PREMULTI:
+               return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+       case DRM_MODE_BLEND_COVERAGE:
+               return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
+       default:
+               MISSING_CASE(plane_state->base.pixel_blend_mode);
+               return PLANE_CTL_ALPHA_DISABLE;
+       }
+}
+
+static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
+{
+       if (!plane_state->base.fb->format->has_alpha)
+               return PLANE_COLOR_ALPHA_DISABLE;
+
+       switch (plane_state->base.pixel_blend_mode) {
+       case DRM_MODE_BLEND_PIXEL_NONE:
+               return PLANE_COLOR_ALPHA_DISABLE;
+       case DRM_MODE_BLEND_PREMULTI:
+               return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
+       case DRM_MODE_BLEND_COVERAGE:
+               return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
+       default:
+               MISSING_CASE(plane_state->base.pixel_blend_mode);
+               return PLANE_COLOR_ALPHA_DISABLE;
+       }
+}
+
+static u32 skl_plane_ctl_tiling(u64 fb_modifier)
+{
+       switch (fb_modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+               break;
+       case I915_FORMAT_MOD_X_TILED:
+               return PLANE_CTL_TILED_X;
+       case I915_FORMAT_MOD_Y_TILED:
+               return PLANE_CTL_TILED_Y;
+       case I915_FORMAT_MOD_Y_TILED_CCS:
+               return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+       case I915_FORMAT_MOD_Yf_TILED:
+               return PLANE_CTL_TILED_YF;
+       case I915_FORMAT_MOD_Yf_TILED_CCS:
+               return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+       default:
+               MISSING_CASE(fb_modifier);
+       }
+
+       return 0;
+}
+
+static u32 skl_plane_ctl_rotate(unsigned int rotate)
+{
+       switch (rotate) {
+       case DRM_MODE_ROTATE_0:
+               break;
+       /*
+        * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
+        * while i915 HW rotation is clockwise, thats why this swapping.
+        */
+       case DRM_MODE_ROTATE_90:
+               return PLANE_CTL_ROTATE_270;
+       case DRM_MODE_ROTATE_180:
+               return PLANE_CTL_ROTATE_180;
+       case DRM_MODE_ROTATE_270:
+               return PLANE_CTL_ROTATE_90;
+       default:
+               MISSING_CASE(rotate);
+       }
+
+       return 0;
+}
+
+static u32 cnl_plane_ctl_flip(unsigned int reflect)
+{
+       switch (reflect) {
+       case 0:
+               break;
+       case DRM_MODE_REFLECT_X:
+               return PLANE_CTL_FLIP_HORIZONTAL;
+       case DRM_MODE_REFLECT_Y:
+       default:
+               MISSING_CASE(reflect);
+       }
+
+       return 0;
+}
+
+u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       u32 plane_ctl = 0;
+
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               return plane_ctl;
+
+       if (crtc_state->gamma_enable)
+               plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
+
+       if (crtc_state->csc_enable)
+               plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+
+       return plane_ctl;
+}
+
+u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
+                 const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+       u32 plane_ctl;
+
+       plane_ctl = PLANE_CTL_ENABLE;
+
+       if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
+               plane_ctl |= skl_plane_ctl_alpha(plane_state);
+               plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
+
+               if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+                       plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
+
+               if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+                       plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
+       }
+
+       plane_ctl |= skl_plane_ctl_format(fb->format->format);
+       plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
+       plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
+
+       if (INTEL_GEN(dev_priv) >= 10)
+               plane_ctl |= cnl_plane_ctl_flip(rotation &
+                                               DRM_MODE_REFLECT_MASK);
+
+       if (key->flags & I915_SET_COLORKEY_DESTINATION)
+               plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+       else if (key->flags & I915_SET_COLORKEY_SOURCE)
+               plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+
+       return plane_ctl;
+}
+
+u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       u32 plane_color_ctl = 0;
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               return plane_color_ctl;
+
+       if (crtc_state->gamma_enable)
+               plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+
+       if (crtc_state->csc_enable)
+               plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+
+       return plane_color_ctl;
+}
+
+u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
+                       const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       u32 plane_color_ctl = 0;
+
+       plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
+       plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
+
+       if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
+               if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+                       plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
+               else
+                       plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
+
+               if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+                       plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
+       } else if (fb->format->is_yuv) {
+               plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
+       }
+
+       return plane_color_ctl;
+}
+
+static int
+__intel_display_resume(struct drm_device *dev,
+                      struct drm_atomic_state *state,
+                      struct drm_modeset_acquire_ctx *ctx)
+{
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc *crtc;
+       int i, ret;
+
+       intel_modeset_setup_hw_state(dev, ctx);
+       i915_redisable_vga(to_i915(dev));
+
+       if (!state)
+               return 0;
+
+       /*
+        * We've duplicated the state, pointers to the old state are invalid.
+        *
+        * Don't attempt to use the old state until we commit the duplicated state.
+        */
+       for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+               /*
+                * Force recalculation even if we restore
+                * current state. With fast modeset this may not result
+                * in a modeset when the state is compatible.
+                */
+               crtc_state->mode_changed = true;
+       }
+
+       /* ignore any reset values/BIOS leftovers in the WM registers */
+       if (!HAS_GMCH(to_i915(dev)))
+               to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
+       ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
+
+       WARN_ON(ret == -EDEADLK);
+       return ret;
+}
+
+static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
+{
+       return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
+               intel_has_gpu_reset(dev_priv));
+}
+
+void intel_prepare_reset(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+       struct drm_atomic_state *state;
+       int ret;
+
+       /* reset doesn't touch the display */
+       if (!i915_modparams.force_reset_modeset_test &&
+           !gpu_reset_clobbers_display(dev_priv))
+               return;
+
+       /* We have a modeset vs reset deadlock, defensively unbreak it. */
+       set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
+       wake_up_all(&dev_priv->gpu_error.wait_queue);
+
+       if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
+               DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
+               i915_gem_set_wedged(dev_priv);
+       }
+
+       /*
+        * Need mode_config.mutex so that we don't
+        * trample ongoing ->detect() and whatnot.
+        */
+       mutex_lock(&dev->mode_config.mutex);
+       drm_modeset_acquire_init(ctx, 0);
+       while (1) {
+               ret = drm_modeset_lock_all_ctx(dev, ctx);
+               if (ret != -EDEADLK)
+                       break;
+
+               drm_modeset_backoff(ctx);
+       }
+       /*
+        * Disabling the crtcs gracefully seems nicer. Also the
+        * g33 docs say we should at least disable all the planes.
+        */
+       state = drm_atomic_helper_duplicate_state(dev, ctx);
+       if (IS_ERR(state)) {
+               ret = PTR_ERR(state);
+               DRM_ERROR("Duplicating state failed with %i\n", ret);
+               return;
+       }
+
+       ret = drm_atomic_helper_disable_all(dev, ctx);
+       if (ret) {
+               DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+               drm_atomic_state_put(state);
+               return;
+       }
+
+       dev_priv->modeset_restore_state = state;
+       state->acquire_ctx = ctx;
+}
+
+void intel_finish_reset(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+       struct drm_atomic_state *state;
+       int ret;
+
+       /* reset doesn't touch the display */
+       if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
+               return;
+
+       state = fetch_and_zero(&dev_priv->modeset_restore_state);
+       if (!state)
+               goto unlock;
+
+       /* reset doesn't touch the display */
+       if (!gpu_reset_clobbers_display(dev_priv)) {
+               /* for testing only restore the display */
+               ret = __intel_display_resume(dev, state, ctx);
+               if (ret)
+                       DRM_ERROR("Restoring old state failed with %i\n", ret);
+       } else {
+               /*
+                * The display has been reset as well,
+                * so need a full re-initialization.
+                */
+               intel_pps_unlock_regs_wa(dev_priv);
+               intel_modeset_init_hw(dev);
+               intel_init_clock_gating(dev_priv);
+
+               spin_lock_irq(&dev_priv->irq_lock);
+               if (dev_priv->display.hpd_irq_setup)
+                       dev_priv->display.hpd_irq_setup(dev_priv);
+               spin_unlock_irq(&dev_priv->irq_lock);
+
+               ret = __intel_display_resume(dev, state, ctx);
+               if (ret)
+                       DRM_ERROR("Restoring old state failed with %i\n", ret);
+
+               intel_hpd_init(dev_priv);
+       }
+
+       drm_atomic_state_put(state);
+unlock:
+       drm_modeset_drop_locks(ctx);
+       drm_modeset_acquire_fini(ctx);
+       mutex_unlock(&dev->mode_config.mutex);
+
+       clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
+}
+
+static void icl_set_pipe_chicken(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       u32 tmp;
+
+       tmp = I915_READ(PIPE_CHICKEN(pipe));
+
+       /*
+        * Display WA #1153: icl
+        * enable hardware to bypass the alpha math
+        * and rounding for per-pixel values 00 and 0xff
+        */
+       tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
+       /*
+        * Display WA # 1605353570: icl
+        * Set the pixel rounding bit to 1 for allowing
+        * passthrough of Frame buffer pixels unmodified
+        * across pipe
+        */
+       tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
+       I915_WRITE(PIPE_CHICKEN(pipe), tmp);
+}
+
+static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
+                                    const struct intel_crtc_state *new_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
+       crtc->base.mode = new_crtc_state->base.mode;
+
+       /*
+        * Update pipe size and adjust fitter if needed: the reason for this is
+        * that in compute_mode_changes we check the native mode (not the pfit
+        * mode) to see if we can flip rather than do a full mode set. In the
+        * fastboot case, we'll flip, but if we don't update the pipesrc and
+        * pfit state, we'll end up with a big fb scanned out into the wrong
+        * sized surface.
+        */
+
+       I915_WRITE(PIPESRC(crtc->pipe),
+                  ((new_crtc_state->pipe_src_w - 1) << 16) |
+                  (new_crtc_state->pipe_src_h - 1));
+
+       /* on skylake this is done by detaching scalers */
+       if (INTEL_GEN(dev_priv) >= 9) {
+               skl_detach_scalers(new_crtc_state);
+
+               if (new_crtc_state->pch_pfit.enabled)
+                       skylake_pfit_enable(new_crtc_state);
+       } else if (HAS_PCH_SPLIT(dev_priv)) {
+               if (new_crtc_state->pch_pfit.enabled)
+                       ironlake_pfit_enable(new_crtc_state);
+               else if (old_crtc_state->pch_pfit.enabled)
+                       ironlake_pfit_disable(old_crtc_state);
+       }
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               icl_set_pipe_chicken(crtc);
+}
+
+static void intel_fdi_normal_train(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int pipe = crtc->pipe;
+       i915_reg_t reg;
+       u32 temp;
+
+       /* enable normal train */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       if (IS_IVYBRIDGE(dev_priv)) {
+               temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+               temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
+       } else {
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+       }
+       I915_WRITE(reg, temp);
+
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       if (HAS_PCH_CPT(dev_priv)) {
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+       } else {
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_NONE;
+       }
+       I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+       /* wait one idle pattern time */
+       POSTING_READ(reg);
+       udelay(1000);
+
+       /* IVB wants error correction enabled */
+       if (IS_IVYBRIDGE(dev_priv))
+               I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
+                          FDI_FE_ERRC_ENABLE);
+}
+
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ironlake_fdi_link_train(struct intel_crtc *crtc,
+                                   const struct intel_crtc_state *crtc_state)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int pipe = crtc->pipe;
+       i915_reg_t reg;
+       u32 temp, tries;
+
+       /* FDI needs bits from pipe first */
+       assert_pipe_enabled(dev_priv, pipe);
+
+       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+          for train result */
+       reg = FDI_RX_IMR(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_RX_SYMBOL_LOCK;
+       temp &= ~FDI_RX_BIT_LOCK;
+       I915_WRITE(reg, temp);
+       I915_READ(reg);
+       udelay(150);
+
+       /* enable CPU FDI TX and PCH FDI RX */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_DP_PORT_WIDTH_MASK;
+       temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_1;
+       I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_1;
+       I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+       POSTING_READ(reg);
+       udelay(150);
+
+       /* Ironlake workaround, enable clock pointer after FDI enable*/
+       I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+       I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+                  FDI_RX_PHASE_SYNC_POINTER_EN);
+
+       reg = FDI_RX_IIR(pipe);
+       for (tries = 0; tries < 5; tries++) {
+               temp = I915_READ(reg);
+               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+               if ((temp & FDI_RX_BIT_LOCK)) {
+                       DRM_DEBUG_KMS("FDI train 1 done.\n");
+                       I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+                       break;
+               }
+       }
+       if (tries == 5)
+               DRM_ERROR("FDI train 1 fail!\n");
+
+       /* Train 2 */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_2;
+       I915_WRITE(reg, temp);
+
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_2;
+       I915_WRITE(reg, temp);
+
+       POSTING_READ(reg);
+       udelay(150);
+
+       reg = FDI_RX_IIR(pipe);
+       for (tries = 0; tries < 5; tries++) {
+               temp = I915_READ(reg);
+               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+               if (temp & FDI_RX_SYMBOL_LOCK) {
+                       I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+                       DRM_DEBUG_KMS("FDI train 2 done.\n");
+                       break;
+               }
+       }
+       if (tries == 5)
+               DRM_ERROR("FDI train 2 fail!\n");
+
+       DRM_DEBUG_KMS("FDI train done\n");
+
+}
+
+static const int snb_b_fdi_train_param[] = {
+       FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+       FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+       FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+       FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct intel_crtc *crtc,
+                               const struct intel_crtc_state *crtc_state)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int pipe = crtc->pipe;
+       i915_reg_t reg;
+       u32 temp, i, retry;
+
+       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+          for train result */
+       reg = FDI_RX_IMR(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_RX_SYMBOL_LOCK;
+       temp &= ~FDI_RX_BIT_LOCK;
+       I915_WRITE(reg, temp);
+
+       POSTING_READ(reg);
+       udelay(150);
+
+       /* enable CPU FDI TX and PCH FDI RX */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_DP_PORT_WIDTH_MASK;
+       temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_1;
+       temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+       /* SNB-B */
+       temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+       I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+       I915_WRITE(FDI_RX_MISC(pipe),
+                  FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       if (HAS_PCH_CPT(dev_priv)) {
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+       } else {
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_PATTERN_1;
+       }
+       I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+       POSTING_READ(reg);
+       udelay(150);
+
+       for (i = 0; i < 4; i++) {
+               reg = FDI_TX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+               temp |= snb_b_fdi_train_param[i];
+               I915_WRITE(reg, temp);
+
+               POSTING_READ(reg);
+               udelay(500);
+
+               for (retry = 0; retry < 5; retry++) {
+                       reg = FDI_RX_IIR(pipe);
+                       temp = I915_READ(reg);
+                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+                       if (temp & FDI_RX_BIT_LOCK) {
+                               I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+                               DRM_DEBUG_KMS("FDI train 1 done.\n");
+                               break;
+                       }
+                       udelay(50);
+               }
+               if (retry < 5)
+                       break;
+       }
+       if (i == 4)
+               DRM_ERROR("FDI train 1 fail!\n");
+
+       /* Train 2 */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_2;
+       if (IS_GEN(dev_priv, 6)) {
+               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+               /* SNB-B */
+               temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+       }
+       I915_WRITE(reg, temp);
+
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       if (HAS_PCH_CPT(dev_priv)) {
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+       } else {
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_PATTERN_2;
+       }
+       I915_WRITE(reg, temp);
+
+       POSTING_READ(reg);
+       udelay(150);
+
+       for (i = 0; i < 4; i++) {
+               reg = FDI_TX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+               temp |= snb_b_fdi_train_param[i];
+               I915_WRITE(reg, temp);
+
+               POSTING_READ(reg);
+               udelay(500);
+
+               for (retry = 0; retry < 5; retry++) {
+                       reg = FDI_RX_IIR(pipe);
+                       temp = I915_READ(reg);
+                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+                       if (temp & FDI_RX_SYMBOL_LOCK) {
+                               I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+                               DRM_DEBUG_KMS("FDI train 2 done.\n");
+                               break;
+                       }
+                       udelay(50);
+               }
+               if (retry < 5)
+                       break;
+       }
+       if (i == 4)
+               DRM_ERROR("FDI train 2 fail!\n");
+
+       DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+/* Manual link training for Ivy Bridge A0 parts */
+static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
+                                     const struct intel_crtc_state *crtc_state)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int pipe = crtc->pipe;
+       i915_reg_t reg;
+       u32 temp, i, j;
+
+       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+          for train result */
+       reg = FDI_RX_IMR(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_RX_SYMBOL_LOCK;
+       temp &= ~FDI_RX_BIT_LOCK;
+       I915_WRITE(reg, temp);
+
+       POSTING_READ(reg);
+       udelay(150);
+
+       DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
+                     I915_READ(FDI_RX_IIR(pipe)));
+
+       /* Try each vswing and preemphasis setting twice before moving on */
+       for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
+               /* disable first in case we need to retry */
+               reg = FDI_TX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+               temp &= ~FDI_TX_ENABLE;
+               I915_WRITE(reg, temp);
+
+               reg = FDI_RX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~FDI_LINK_TRAIN_AUTO;
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp &= ~FDI_RX_ENABLE;
+               I915_WRITE(reg, temp);
+
+               /* enable CPU FDI TX and PCH FDI RX */
+               reg = FDI_TX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~FDI_DP_PORT_WIDTH_MASK;
+               temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+               temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
+               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+               temp |= snb_b_fdi_train_param[j/2];
+               temp |= FDI_COMPOSITE_SYNC;
+               I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+               I915_WRITE(FDI_RX_MISC(pipe),
+                          FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+               reg = FDI_RX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+               temp |= FDI_COMPOSITE_SYNC;
+               I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+               POSTING_READ(reg);
+               udelay(1); /* should be 0.5us */
+
+               for (i = 0; i < 4; i++) {
+                       reg = FDI_RX_IIR(pipe);
+                       temp = I915_READ(reg);
+                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+                       if (temp & FDI_RX_BIT_LOCK ||
+                           (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
+                               I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+                               DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
+                                             i);
+                               break;
+                       }
+                       udelay(1); /* should be 0.5us */
+               }
+               if (i == 4) {
+                       DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
+                       continue;
+               }
+
+               /* Train 2 */
+               reg = FDI_TX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+               temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+               I915_WRITE(reg, temp);
+
+               reg = FDI_RX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+               I915_WRITE(reg, temp);
+
+               POSTING_READ(reg);
+               udelay(2); /* should be 1.5us */
+
+               for (i = 0; i < 4; i++) {
+                       reg = FDI_RX_IIR(pipe);
+                       temp = I915_READ(reg);
+                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+                       if (temp & FDI_RX_SYMBOL_LOCK ||
+                           (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
+                               I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+                               DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
+                                             i);
+                               goto train_done;
+                       }
+                       udelay(2); /* should be 1.5us */
+               }
+               if (i == 4)
+                       DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
+       }
+
+train_done:
+       DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+       int pipe = intel_crtc->pipe;
+       i915_reg_t reg;
+       u32 temp;
+
+       /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
+       temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+       temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+       I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
+
+       POSTING_READ(reg);
+       udelay(200);
+
+       /* Switch from Rawclk to PCDclk */
+       temp = I915_READ(reg);
+       I915_WRITE(reg, temp | FDI_PCDCLK);
+
+       POSTING_READ(reg);
+       udelay(200);
+
+       /* Enable CPU FDI TX PLL, always on for Ironlake */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+               I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+
+               POSTING_READ(reg);
+               udelay(100);
+       }
+}
+
+static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+       struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int pipe = intel_crtc->pipe;
+       i915_reg_t reg;
+       u32 temp;
+
+       /* Switch from PCDclk to Rawclk */
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       I915_WRITE(reg, temp & ~FDI_PCDCLK);
+
+       /* Disable CPU FDI TX PLL */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+
+       POSTING_READ(reg);
+       udelay(100);
+
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+
+       /* Wait for the clocks to turn off. */
+       POSTING_READ(reg);
+       udelay(100);
+}
+
+static void ironlake_fdi_disable(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       i915_reg_t reg;
+       u32 temp;
+
+       /* disable CPU FDI tx and PCH FDI rx */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+       POSTING_READ(reg);
+
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~(0x7 << 16);
+       temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+       I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+       POSTING_READ(reg);
+       udelay(100);
+
+       /* Ironlake workaround, disable clock pointer after downing FDI */
+       if (HAS_PCH_IBX(dev_priv))
+               I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+
+       /* still set train pattern 1 */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_1;
+       I915_WRITE(reg, temp);
+
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       if (HAS_PCH_CPT(dev_priv)) {
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+       } else {
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_PATTERN_1;
+       }
+       /* BPC in FDI rx is consistent with that in PIPECONF */
+       temp &= ~(0x07 << 16);
+       temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+       I915_WRITE(reg, temp);
+
+       POSTING_READ(reg);
+       udelay(100);
+}
+
+bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
+{
+       struct drm_crtc *crtc;
+       bool cleanup_done;
+
+       drm_for_each_crtc(crtc, &dev_priv->drm) {
+               struct drm_crtc_commit *commit;
+               spin_lock(&crtc->commit_lock);
+               commit = list_first_entry_or_null(&crtc->commit_list,
+                                                 struct drm_crtc_commit, commit_entry);
+               cleanup_done = commit ?
+                       try_wait_for_completion(&commit->cleanup_done) : true;
+               spin_unlock(&crtc->commit_lock);
+
+               if (cleanup_done)
+                       continue;
+
+               drm_crtc_wait_one_vblank(crtc);
+
+               return true;
+       }
+
+       return false;
+}
+
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+{
+       u32 temp;
+
+       I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+       temp |= SBI_SSCCTL_DISABLE;
+       intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+
+       mutex_unlock(&dev_priv->sb_lock);
+}
+
+/* Program iCLKIP clock to the desired frequency */
+static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       int clock = crtc_state->base.adjusted_mode.crtc_clock;
+       u32 divsel, phaseinc, auxdiv, phasedir = 0;
+       u32 temp;
+
+       lpt_disable_iclkip(dev_priv);
+
+       /* The iCLK virtual clock root frequency is in MHz,
+        * but the adjusted_mode->crtc_clock in in KHz. To get the
+        * divisors, it is necessary to divide one by another, so we
+        * convert the virtual clock precision to KHz here for higher
+        * precision.
+        */
+       for (auxdiv = 0; auxdiv < 2; auxdiv++) {
+               u32 iclk_virtual_root_freq = 172800 * 1000;
+               u32 iclk_pi_range = 64;
+               u32 desired_divisor;
+
+               desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
+                                                   clock << auxdiv);
+               divsel = (desired_divisor / iclk_pi_range) - 2;
+               phaseinc = desired_divisor % iclk_pi_range;
+
+               /*
+                * Near 20MHz is a corner case which is
+                * out of range for the 7-bit divisor
+                */
+               if (divsel <= 0x7f)
+                       break;
+       }
+
+       /* This should not happen with any sane values */
+       WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+               ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+       WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
+               ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+       DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+                       clock,
+                       auxdiv,
+                       divsel,
+                       phasedir,
+                       phaseinc);
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       /* Program SSCDIVINTPHASE6 */
+       temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+       temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
+       temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+       temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
+       temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
+       temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+       temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
+       intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
+
+       /* Program SSCAUXDIV */
+       temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+       temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
+       temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+       intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
+
+       /* Enable modulator and associated divider */
+       temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+       temp &= ~SBI_SSCCTL_DISABLE;
+       intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+
+       mutex_unlock(&dev_priv->sb_lock);
+
+       /* Wait for initialization time */
+       udelay(24);
+
+       I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+}
+
+int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+{
+       u32 divsel, phaseinc, auxdiv;
+       u32 iclk_virtual_root_freq = 172800 * 1000;
+       u32 iclk_pi_range = 64;
+       u32 desired_divisor;
+       u32 temp;
+
+       if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+               return 0;
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+       if (temp & SBI_SSCCTL_DISABLE) {
+               mutex_unlock(&dev_priv->sb_lock);
+               return 0;
+       }
+
+       temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+       divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
+               SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
+       phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
+               SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
+
+       temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+       auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
+               SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
+
+       mutex_unlock(&dev_priv->sb_lock);
+
+       desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
+
+       return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
+                                desired_divisor << auxdiv);
+}
+
+static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
+                                               enum pipe pch_transcoder)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+       I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
+                  I915_READ(HTOTAL(cpu_transcoder)));
+       I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
+                  I915_READ(HBLANK(cpu_transcoder)));
+       I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
+                  I915_READ(HSYNC(cpu_transcoder)));
+
+       I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
+                  I915_READ(VTOTAL(cpu_transcoder)));
+       I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
+                  I915_READ(VBLANK(cpu_transcoder)));
+       I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
+                  I915_READ(VSYNC(cpu_transcoder)));
+       I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+                  I915_READ(VSYNCSHIFT(cpu_transcoder)));
+}
+
+static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
+{
+       u32 temp;
+
+       temp = I915_READ(SOUTH_CHICKEN1);
+       if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
+               return;
+
+       WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+       WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+       temp &= ~FDI_BC_BIFURCATION_SELECT;
+       if (enable)
+               temp |= FDI_BC_BIFURCATION_SELECT;
+
+       DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
+       I915_WRITE(SOUTH_CHICKEN1, temp);
+       POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       switch (crtc->pipe) {
+       case PIPE_A:
+               break;
+       case PIPE_B:
+               if (crtc_state->fdi_lanes > 2)
+                       cpt_set_fdi_bc_bifurcation(dev_priv, false);
+               else
+                       cpt_set_fdi_bc_bifurcation(dev_priv, true);
+
+               break;
+       case PIPE_C:
+               cpt_set_fdi_bc_bifurcation(dev_priv, true);
+
+               break;
+       default:
+               BUG();
+       }
+}
+
+/*
+ * Finds the encoder associated with the given CRTC. This can only be
+ * used when we know that the CRTC isn't feeding multiple encoders!
+ */
+static struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+                          const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_connector_state *connector_state;
+       const struct drm_connector *connector;
+       struct intel_encoder *encoder = NULL;
+       int num_encoders = 0;
+       int i;
+
+       for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+               if (connector_state->crtc != &crtc->base)
+                       continue;
+
+               encoder = to_intel_encoder(connector_state->best_encoder);
+               num_encoders++;
+       }
+
+       WARN(num_encoders != 1, "%d encoders for pipe %c\n",
+            num_encoders, pipe_name(crtc->pipe));
+
+       return encoder;
+}
+
+/*
+ * Enable PCH resources required for PCH ports:
+ *   - PCH PLLs
+ *   - FDI training & RX/TX
+ *   - update transcoder timings
+ *   - DP transcoding bits
+ *   - transcoder
+ */
+static void ironlake_pch_enable(const struct intel_atomic_state *state,
+                               const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int pipe = crtc->pipe;
+       u32 temp;
+
+       assert_pch_transcoder_disabled(dev_priv, pipe);
+
+       if (IS_IVYBRIDGE(dev_priv))
+               ivybridge_update_fdi_bc_bifurcation(crtc_state);
+
+       /* Write the TU size bits before fdi link training, so that error
+        * detection works. */
+       I915_WRITE(FDI_RX_TUSIZE1(pipe),
+                  I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
+       /* For PCH output, training FDI link */
+       dev_priv->display.fdi_link_train(crtc, crtc_state);
+
+       /* We need to program the right clock selection before writing the pixel
+        * mutliplier into the DPLL. */
+       if (HAS_PCH_CPT(dev_priv)) {
+               u32 sel;
+
+               temp = I915_READ(PCH_DPLL_SEL);
+               temp |= TRANS_DPLL_ENABLE(pipe);
+               sel = TRANS_DPLLB_SEL(pipe);
+               if (crtc_state->shared_dpll ==
+                   intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
+                       temp |= sel;
+               else
+                       temp &= ~sel;
+               I915_WRITE(PCH_DPLL_SEL, temp);
+       }
+
+       /* XXX: pch pll's can be enabled any time before we enable the PCH
+        * transcoder, and we actually should do this to not upset any PCH
+        * transcoder that already use the clock when we share it.
+        *
+        * Note that enable_shared_dpll tries to do the right thing, but
+        * get_shared_dpll unconditionally resets the pll - we need that to have
+        * the right LVDS enable sequence. */
+       intel_enable_shared_dpll(crtc_state);
+
+       /* set transcoder timing, panel must allow it */
+       assert_panel_unlocked(dev_priv, pipe);
+       ironlake_pch_transcoder_set_timings(crtc_state, pipe);
+
+       intel_fdi_normal_train(crtc);
+
+       /* For PCH DP, enable TRANS_DP_CTL */
+       if (HAS_PCH_CPT(dev_priv) &&
+           intel_crtc_has_dp_encoder(crtc_state)) {
+               const struct drm_display_mode *adjusted_mode =
+                       &crtc_state->base.adjusted_mode;
+               u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+               i915_reg_t reg = TRANS_DP_CTL(pipe);
+               enum port port;
+
+               temp = I915_READ(reg);
+               temp &= ~(TRANS_DP_PORT_SEL_MASK |
+                         TRANS_DP_SYNC_MASK |
+                         TRANS_DP_BPC_MASK);
+               temp |= TRANS_DP_OUTPUT_ENABLE;
+               temp |= bpc << 9; /* same format but at 11:9 */
+
+               if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+                       temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+               if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+                       temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+
+               port = intel_get_crtc_new_encoder(state, crtc_state)->port;
+               WARN_ON(port < PORT_B || port > PORT_D);
+               temp |= TRANS_DP_PORT_SEL(port);
+
+               I915_WRITE(reg, temp);
+       }
+
+       ironlake_enable_pch_transcoder(crtc_state);
+}
+
+static void lpt_pch_enable(const struct intel_atomic_state *state,
+                          const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+       assert_pch_transcoder_disabled(dev_priv, PIPE_A);
+
+       lpt_program_iclkip(crtc_state);
+
+       /* Set transcoder timing. */
+       ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
+
+       lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
+}
+
+static void cpt_verify_modeset(struct drm_device *dev, int pipe)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       i915_reg_t dslreg = PIPEDSL(pipe);
+       u32 temp;
+
+       temp = I915_READ(dslreg);
+       udelay(500);
+       if (wait_for(I915_READ(dslreg) != temp, 5)) {
+               if (wait_for(I915_READ(dslreg) != temp, 5))
+                       DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
+       }
+}
+
+/*
+ * The hardware phase 0.0 refers to the center of the pixel.
+ * We want to start from the top/left edge which is phase
+ * -0.5. That matches how the hardware calculates the scaling
+ * factors (from top-left of the first pixel to bottom-right
+ * of the last pixel, as opposed to the pixel centers).
+ *
+ * For 4:2:0 subsampled chroma planes we obviously have to
+ * adjust that so that the chroma sample position lands in
+ * the right spot.
+ *
+ * Note that for packed YCbCr 4:2:2 formats there is no way to
+ * control chroma siting. The hardware simply replicates the
+ * chroma samples for both of the luma samples, and thus we don't
+ * actually get the expected MPEG2 chroma siting convention :(
+ * The same behaviour is observed on pre-SKL platforms as well.
+ *
+ * Theory behind the formula (note that we ignore sub-pixel
+ * source coordinates):
+ * s = source sample position
+ * d = destination sample position
+ *
+ * Downscaling 4:1:
+ * -0.5
+ * | 0.0
+ * | |     1.5 (initial phase)
+ * | |     |
+ * v v     v
+ * | s | s | s | s |
+ * |       d       |
+ *
+ * Upscaling 1:4:
+ * -0.5
+ * | -0.375 (initial phase)
+ * | |     0.0
+ * | |     |
+ * v v     v
+ * |       s       |
+ * | d | d | d | d |
+ */
+u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
+{
+       int phase = -0x8000;
+       u16 trip = 0;
+
+       if (chroma_cosited)
+               phase += (sub - 1) * 0x8000 / sub;
+
+       phase += scale / (2 * sub);
+
+       /*
+        * Hardware initial phase limited to [-0.5:1.5].
+        * Since the max hardware scale factor is 3.0, we
+        * should never actually excdeed 1.0 here.
+        */
+       WARN_ON(phase < -0x8000 || phase > 0x18000);
+
+       if (phase < 0)
+               phase = 0x10000 + phase;
+       else
+               trip = PS_PHASE_TRIP;
+
+       return ((phase >> 2) & PS_PHASE_MASK) | trip;
+}
+
+#define SKL_MIN_SRC_W 8
+#define SKL_MAX_SRC_W 4096
+#define SKL_MIN_SRC_H 8
+#define SKL_MAX_SRC_H 4096
+#define SKL_MIN_DST_W 8
+#define SKL_MAX_DST_W 4096
+#define SKL_MIN_DST_H 8
+#define SKL_MAX_DST_H 4096
+#define ICL_MAX_SRC_W 5120
+#define ICL_MAX_SRC_H 4096
+#define ICL_MAX_DST_W 5120
+#define ICL_MAX_DST_H 4096
+#define SKL_MIN_YUV_420_SRC_W 16
+#define SKL_MIN_YUV_420_SRC_H 16
+
+static int
+skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+                 unsigned int scaler_user, int *scaler_id,
+                 int src_w, int src_h, int dst_w, int dst_h,
+                 const struct drm_format_info *format, bool need_scaler)
+{
+       struct intel_crtc_scaler_state *scaler_state =
+               &crtc_state->scaler_state;
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->base.adjusted_mode;
+
+       /*
+        * Src coordinates are already rotated by 270 degrees for
+        * the 90/270 degree plane rotation cases (to match the
+        * GTT mapping), hence no need to account for rotation here.
+        */
+       if (src_w != dst_w || src_h != dst_h)
+               need_scaler = true;
+
+       /*
+        * Scaling/fitting not supported in IF-ID mode in GEN9+
+        * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
+        * Once NV12 is enabled, handle it here while allocating scaler
+        * for NV12.
+        */
+       if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
+           need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
+               return -EINVAL;
+       }
+
+       /*
+        * if plane is being disabled or scaler is no more required or force detach
+        *  - free scaler binded to this plane/crtc
+        *  - in order to do this, update crtc->scaler_usage
+        *
+        * Here scaler state in crtc_state is set free so that
+        * scaler can be assigned to other user. Actual register
+        * update to free the scaler is done in plane/panel-fit programming.
+        * For this purpose crtc/plane_state->scaler_id isn't reset here.
+        */
+       if (force_detach || !need_scaler) {
+               if (*scaler_id >= 0) {
+                       scaler_state->scaler_users &= ~(1 << scaler_user);
+                       scaler_state->scalers[*scaler_id].in_use = 0;
+
+                       DRM_DEBUG_KMS("scaler_user index %u.%u: "
+                               "Staged freeing scaler id %d scaler_users = 0x%x\n",
+                               intel_crtc->pipe, scaler_user, *scaler_id,
+                               scaler_state->scaler_users);
+                       *scaler_id = -1;
+               }
+               return 0;
+       }
+
+       if (format && is_planar_yuv_format(format->format) &&
+           (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
+               DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
+               return -EINVAL;
+       }
+
+       /* range checks */
+       if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
+           dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
+           (INTEL_GEN(dev_priv) >= 11 &&
+            (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
+             dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
+           (INTEL_GEN(dev_priv) < 11 &&
+            (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
+             dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
+               DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
+                       "size is out of scaler range\n",
+                       intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
+               return -EINVAL;
+       }
+
+       /* mark this plane as a scaler user in crtc_state */
+       scaler_state->scaler_users |= (1 << scaler_user);
+       DRM_DEBUG_KMS("scaler_user index %u.%u: "
+               "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+               intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+               scaler_state->scaler_users);
+
+       return 0;
+}
+
+/**
+ * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
+ *
+ * @state: crtc's scaler state
+ *
+ * Return
+ *     0 - scaler_usage updated successfully
+ *    error - requested scaling cannot be supported or other error condition
+ */
+int skl_update_scaler_crtc(struct intel_crtc_state *state)
+{
+       const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
+       bool need_scaler = false;
+
+       if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+               need_scaler = true;
+
+       return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+                                &state->scaler_state.scaler_id,
+                                state->pipe_src_w, state->pipe_src_h,
+                                adjusted_mode->crtc_hdisplay,
+                                adjusted_mode->crtc_vdisplay, NULL, need_scaler);
+}
+
+/**
+ * skl_update_scaler_plane - Stages update to scaler state for a given plane.
+ * @crtc_state: crtc's scaler state
+ * @plane_state: atomic plane state to update
+ *
+ * Return
+ *     0 - scaler_usage updated successfully
+ *    error - requested scaling cannot be supported or other error condition
+ */
+static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+                                  struct intel_plane_state *plane_state)
+{
+       struct intel_plane *intel_plane =
+               to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
+       struct drm_framebuffer *fb = plane_state->base.fb;
+       int ret;
+       bool force_detach = !fb || !plane_state->base.visible;
+       bool need_scaler = false;
+
+       /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
+       if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
+           fb && is_planar_yuv_format(fb->format->format))
+               need_scaler = true;
+
+       ret = skl_update_scaler(crtc_state, force_detach,
+                               drm_plane_index(&intel_plane->base),
+                               &plane_state->scaler_id,
+                               drm_rect_width(&plane_state->base.src) >> 16,
+                               drm_rect_height(&plane_state->base.src) >> 16,
+                               drm_rect_width(&plane_state->base.dst),
+                               drm_rect_height(&plane_state->base.dst),
+                               fb ? fb->format : NULL, need_scaler);
+
+       if (ret || plane_state->scaler_id < 0)
+               return ret;
+
+       /* check colorkey */
+       if (plane_state->ckey.flags) {
+               DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
+                             intel_plane->base.base.id,
+                             intel_plane->base.name);
+               return -EINVAL;
+       }
+
+       /* Check src format */
+       switch (fb->format->format) {
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_XBGR16161616F:
+       case DRM_FORMAT_ABGR16161616F:
+       case DRM_FORMAT_XRGB16161616F:
+       case DRM_FORMAT_ARGB16161616F:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_P010:
+       case DRM_FORMAT_P012:
+       case DRM_FORMAT_P016:
+       case DRM_FORMAT_Y210:
+       case DRM_FORMAT_Y212:
+       case DRM_FORMAT_Y216:
+       case DRM_FORMAT_XVYU2101010:
+       case DRM_FORMAT_XVYU12_16161616:
+       case DRM_FORMAT_XVYU16161616:
+               break;
+       default:
+               DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
+                             intel_plane->base.base.id, intel_plane->base.name,
+                             fb->base.id, fb->format->format);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void skylake_scaler_disable(struct intel_crtc *crtc)
+{
+       int i;
+
+       for (i = 0; i < crtc->num_scalers; i++)
+               skl_detach_scaler(crtc, i);
+}
+
+static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       const struct intel_crtc_scaler_state *scaler_state =
+               &crtc_state->scaler_state;
+
+       if (crtc_state->pch_pfit.enabled) {
+               u16 uv_rgb_hphase, uv_rgb_vphase;
+               int pfit_w, pfit_h, hscale, vscale;
+               int id;
+
+               if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
+                       return;
+
+               pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
+               pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
+
+               hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
+               vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
+
+               uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+               uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+
+               id = scaler_state->scaler_id;
+               I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
+                       PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+               I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
+                             PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+               I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
+                             PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+               I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
+               I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
+       }
+}
+
+static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       int pipe = crtc->pipe;
+
+       if (crtc_state->pch_pfit.enabled) {
+               /* Force use of hard-coded filter coefficients
+                * as some pre-programmed values are broken,
+                * e.g. x201.
+                */
+               if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
+                       I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+                                                PF_PIPE_SEL_IVB(pipe));
+               else
+                       I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+               I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
+               I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
+       }
+}
+
+void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       if (!crtc_state->ips_enabled)
+               return;
+
+       /*
+        * We can only enable IPS after we enable a plane and wait for a vblank
+        * This function is called from post_plane_update, which is run after
+        * a vblank wait.
+        */
+       WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
+
+       if (IS_BROADWELL(dev_priv)) {
+               WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
+                                               IPS_ENABLE | IPS_PCODE_CONTROL));
+               /* Quoting Art Runyan: "its not safe to expect any particular
+                * value in IPS_CTL bit 31 after enabling IPS through the
+                * mailbox." Moreover, the mailbox may return a bogus state,
+                * so we need to just enable it and continue on.
+                */
+       } else {
+               I915_WRITE(IPS_CTL, IPS_ENABLE);
+               /* The bit only becomes 1 in the next vblank, so this wait here
+                * is essentially intel_wait_for_vblank. If we don't have this
+                * and don't wait for vblanks until the end of crtc_enable, then
+                * the HW state readout code will complain that the expected
+                * IPS_CTL value is not the one we read. */
+               if (intel_wait_for_register(&dev_priv->uncore,
+                                           IPS_CTL, IPS_ENABLE, IPS_ENABLE,
+                                           50))
+                       DRM_ERROR("Timed out waiting for IPS enable\n");
+       }
+}
+
+void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       if (!crtc_state->ips_enabled)
+               return;
+
+       if (IS_BROADWELL(dev_priv)) {
+               WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
+               /*
+                * Wait for PCODE to finish disabling IPS. The BSpec specified
+                * 42ms timeout value leads to occasional timeouts so use 100ms
+                * instead.
+                */
+               if (intel_wait_for_register(&dev_priv->uncore,
+                                           IPS_CTL, IPS_ENABLE, 0,
+                                           100))
+                       DRM_ERROR("Timed out waiting for IPS disable\n");
+       } else {
+               I915_WRITE(IPS_CTL, 0);
+               POSTING_READ(IPS_CTL);
+       }
+
+       /* We need to wait for a vblank before we can disable the plane. */
+       intel_wait_for_vblank(dev_priv, crtc->pipe);
+}
+
+static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
+{
+       if (intel_crtc->overlay) {
+               struct drm_device *dev = intel_crtc->base.dev;
+
+               mutex_lock(&dev->struct_mutex);
+               (void) intel_overlay_switch_off(intel_crtc->overlay);
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       /* Let userspace switch the overlay on again. In most cases userspace
+        * has to recompute where to put it anyway.
+        */
+}
+
+/**
+ * intel_post_enable_primary - Perform operations after enabling primary plane
+ * @crtc: the CRTC whose primary plane was just enabled
+ * @new_crtc_state: the enabling state
+ *
+ * Performs potentially sleeping operations that must be done after the primary
+ * plane is enabled, such as updating FBC and IPS.  Note that this may be
+ * called due to an explicit primary plane update, or due to an implicit
+ * re-enable that is caused when a sprite plane is updated to no longer
+ * completely hide the primary plane.
+ */
+static void
+intel_post_enable_primary(struct drm_crtc *crtc,
+                         const struct intel_crtc_state *new_crtc_state)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+
+       /*
+        * Gen2 reports pipe underruns whenever all planes are disabled.
+        * So don't enable underrun reporting before at least some planes
+        * are enabled.
+        * FIXME: Need to fix the logic to work when we turn off all planes
+        * but leave the pipe running.
+        */
+       if (IS_GEN(dev_priv, 2))
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+       /* Underruns don't always raise interrupts, so check manually. */
+       intel_check_cpu_fifo_underruns(dev_priv);
+       intel_check_pch_fifo_underruns(dev_priv);
+}
+
+/* FIXME get rid of this and use pre_plane_update */
+static void
+intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+
+       /*
+        * Gen2 reports pipe underruns whenever all planes are disabled.
+        * So disable underrun reporting before all the planes get disabled.
+        */
+       if (IS_GEN(dev_priv, 2))
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+       hsw_disable_ips(to_intel_crtc_state(crtc->state));
+
+       /*
+        * Vblank time updates from the shadow to live plane control register
+        * are blocked if the memory self-refresh mode is active at that
+        * moment. So to make sure the plane gets truly disabled, disable
+        * first the self-refresh mode. The self-refresh enable bit in turn
+        * will be checked/applied by the HW only at the next frame start
+        * event which is after the vblank start event, so we need to have a
+        * wait-for-vblank between disabling the plane and the pipe.
+        */
+       if (HAS_GMCH(dev_priv) &&
+           intel_set_memory_cxsr(dev_priv, false))
+               intel_wait_for_vblank(dev_priv, pipe);
+}
+
+static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
+                                      const struct intel_crtc_state *new_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       if (!old_crtc_state->ips_enabled)
+               return false;
+
+       if (needs_modeset(&new_crtc_state->base))
+               return true;
+
+       /*
+        * Workaround : Do not read or write the pipe palette/gamma data while
+        * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+        *
+        * Disable IPS before we program the LUT.
+        */
+       if (IS_HASWELL(dev_priv) &&
+           (new_crtc_state->base.color_mgmt_changed ||
+            new_crtc_state->update_pipe) &&
+           new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+               return true;
+
+       return !new_crtc_state->ips_enabled;
+}
+
+static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
+                                      const struct intel_crtc_state *new_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       if (!new_crtc_state->ips_enabled)
+               return false;
+
+       if (needs_modeset(&new_crtc_state->base))
+               return true;
+
+       /*
+        * Workaround : Do not read or write the pipe palette/gamma data while
+        * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+        *
+        * Re-enable IPS after the LUT has been programmed.
+        */
+       if (IS_HASWELL(dev_priv) &&
+           (new_crtc_state->base.color_mgmt_changed ||
+            new_crtc_state->update_pipe) &&
+           new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+               return true;
+
+       /*
+        * We can't read out IPS on broadwell, assume the worst and
+        * forcibly enable IPS on the first fastset.
+        */
+       if (new_crtc_state->update_pipe &&
+           old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
+               return true;
+
+       return !old_crtc_state->ips_enabled;
+}
+
+static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
+                         const struct intel_crtc_state *crtc_state)
+{
+       if (!crtc_state->nv12_planes)
+               return false;
+
+       /* WA Display #0827: Gen9:all */
+       if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
+               return true;
+
+       return false;
+}
+
+static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
+                              const struct intel_crtc_state *crtc_state)
+{
+       /* Wa_2006604312:icl */
+       if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
+               return true;
+
+       return false;
+}
+
+static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_atomic_state *old_state = old_crtc_state->base.state;
+       struct intel_crtc_state *pipe_config =
+               intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
+                                               crtc);
+       struct drm_plane *primary = crtc->base.primary;
+       struct drm_plane_state *old_primary_state =
+               drm_atomic_get_old_plane_state(old_state, primary);
+
+       intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
+
+       if (pipe_config->update_wm_post && pipe_config->base.active)
+               intel_update_watermarks(crtc);
+
+       if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
+               hsw_enable_ips(pipe_config);
+
+       if (old_primary_state) {
+               struct drm_plane_state *new_primary_state =
+                       drm_atomic_get_new_plane_state(old_state, primary);
+
+               intel_fbc_post_update(crtc);
+
+               if (new_primary_state->visible &&
+                   (needs_modeset(&pipe_config->base) ||
+                    !old_primary_state->visible))
+                       intel_post_enable_primary(&crtc->base, pipe_config);
+       }
+
+       if (needs_nv12_wa(dev_priv, old_crtc_state) &&
+           !needs_nv12_wa(dev_priv, pipe_config))
+               skl_wa_827(dev_priv, crtc->pipe, false);
+
+       if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
+           !needs_scalerclk_wa(dev_priv, pipe_config))
+               icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
+}
+
+static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
+                                  struct intel_crtc_state *pipe_config)
+{
+       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_atomic_state *old_state = old_crtc_state->base.state;
+       struct drm_plane *primary = crtc->base.primary;
+       struct drm_plane_state *old_primary_state =
+               drm_atomic_get_old_plane_state(old_state, primary);
+       bool modeset = needs_modeset(&pipe_config->base);
+       struct intel_atomic_state *old_intel_state =
+               to_intel_atomic_state(old_state);
+
+       if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
+               hsw_disable_ips(old_crtc_state);
+
+       if (old_primary_state) {
+               struct intel_plane_state *new_primary_state =
+                       intel_atomic_get_new_plane_state(old_intel_state,
+                                                        to_intel_plane(primary));
+
+               intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
+               /*
+                * Gen2 reports pipe underruns whenever all planes are disabled.
+                * So disable underrun reporting before all the planes get disabled.
+                */
+               if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
+                   (modeset || !new_primary_state->base.visible))
+                       intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+       }
+
+       /* Display WA 827 */
+       if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
+           needs_nv12_wa(dev_priv, pipe_config))
+               skl_wa_827(dev_priv, crtc->pipe, true);
+
+       /* Wa_2006604312:icl */
+       if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
+           needs_scalerclk_wa(dev_priv, pipe_config))
+               icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
+
+       /*
+        * Vblank time updates from the shadow to live plane control register
+        * are blocked if the memory self-refresh mode is active at that
+        * moment. So to make sure the plane gets truly disabled, disable
+        * first the self-refresh mode. The self-refresh enable bit in turn
+        * will be checked/applied by the HW only at the next frame start
+        * event which is after the vblank start event, so we need to have a
+        * wait-for-vblank between disabling the plane and the pipe.
+        */
+       if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
+           pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+               intel_wait_for_vblank(dev_priv, crtc->pipe);
+
+       /*
+        * IVB workaround: must disable low power watermarks for at least
+        * one frame before enabling scaling.  LP watermarks can be re-enabled
+        * when scaling is disabled.
+        *
+        * WaCxSRDisabledForSpriteScaling:ivb
+        */
+       if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
+           old_crtc_state->base.active)
+               intel_wait_for_vblank(dev_priv, crtc->pipe);
+
+       /*
+        * If we're doing a modeset, we're done.  No need to do any pre-vblank
+        * watermark programming here.
+        */
+       if (needs_modeset(&pipe_config->base))
+               return;
+
+       /*
+        * For platforms that support atomic watermarks, program the
+        * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
+        * will be the intermediate values that are safe for both pre- and
+        * post- vblank; when vblank happens, the 'active' values will be set
+        * to the final 'target' values and we'll do this again to get the
+        * optimal watermarks.  For gen9+ platforms, the values we program here
+        * will be the final target values which will get automatically latched
+        * at vblank time; no further programming will be necessary.
+        *
+        * If a platform hasn't been transitioned to atomic watermarks yet,
+        * we'll continue to update watermarks the old way, if flags tell
+        * us to.
+        */
+       if (dev_priv->display.initial_watermarks != NULL)
+               dev_priv->display.initial_watermarks(old_intel_state,
+                                                    pipe_config);
+       else if (pipe_config->update_wm_pre)
+               intel_update_watermarks(crtc);
+}
+
+static void intel_crtc_disable_planes(struct intel_atomic_state *state,
+                                     struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct intel_crtc_state *new_crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
+       unsigned int update_mask = new_crtc_state->update_planes;
+       const struct intel_plane_state *old_plane_state;
+       struct intel_plane *plane;
+       unsigned fb_bits = 0;
+       int i;
+
+       intel_crtc_dpms_overlay_disable(crtc);
+
+       for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+               if (crtc->pipe != plane->pipe ||
+                   !(update_mask & BIT(plane->id)))
+                       continue;
+
+               intel_disable_plane(plane, new_crtc_state);
+
+               if (old_plane_state->base.visible)
+                       fb_bits |= plane->frontbuffer_bit;
+       }
+
+       intel_frontbuffer_flip(dev_priv, fb_bits);
+}
+
+static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
+                                         struct intel_crtc_state *crtc_state,
+                                         struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(conn_state->best_encoder);
+
+               if (conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->pre_pll_enable)
+                       encoder->pre_pll_enable(encoder, crtc_state, conn_state);
+       }
+}
+
+static void intel_encoders_pre_enable(struct drm_crtc *crtc,
+                                     struct intel_crtc_state *crtc_state,
+                                     struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(conn_state->best_encoder);
+
+               if (conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->pre_enable)
+                       encoder->pre_enable(encoder, crtc_state, conn_state);
+       }
+}
+
+static void intel_encoders_enable(struct drm_crtc *crtc,
+                                 struct intel_crtc_state *crtc_state,
+                                 struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(conn_state->best_encoder);
+
+               if (conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->enable)
+                       encoder->enable(encoder, crtc_state, conn_state);
+               intel_opregion_notify_encoder(encoder, true);
+       }
+}
+
+static void intel_encoders_disable(struct drm_crtc *crtc,
+                                  struct intel_crtc_state *old_crtc_state,
+                                  struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(old_conn_state->best_encoder);
+
+               if (old_conn_state->crtc != crtc)
+                       continue;
+
+               intel_opregion_notify_encoder(encoder, false);
+               if (encoder->disable)
+                       encoder->disable(encoder, old_crtc_state, old_conn_state);
+       }
+}
+
+static void intel_encoders_post_disable(struct drm_crtc *crtc,
+                                       struct intel_crtc_state *old_crtc_state,
+                                       struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(old_conn_state->best_encoder);
+
+               if (old_conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->post_disable)
+                       encoder->post_disable(encoder, old_crtc_state, old_conn_state);
+       }
+}
+
+static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
+                                           struct intel_crtc_state *old_crtc_state,
+                                           struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(old_conn_state->best_encoder);
+
+               if (old_conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->post_pll_disable)
+                       encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
+       }
+}
+
+static void intel_encoders_update_pipe(struct drm_crtc *crtc,
+                                      struct intel_crtc_state *crtc_state,
+                                      struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(conn_state->best_encoder);
+
+               if (conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->update_pipe)
+                       encoder->update_pipe(encoder, crtc_state, conn_state);
+       }
+}
+
+static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+
+       plane->disable_plane(plane, crtc_state);
+}
+
+static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
+                                struct drm_atomic_state *old_state)
+{
+       struct drm_crtc *crtc = pipe_config->base.crtc;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       struct intel_atomic_state *old_intel_state =
+               to_intel_atomic_state(old_state);
+
+       if (WARN_ON(intel_crtc->active))
+               return;
+
+       /*
+        * Sometimes spurious CPU pipe underruns happen during FDI
+        * training, at least with VGA+HDMI cloning. Suppress them.
+        *
+        * On ILK we get an occasional spurious CPU pipe underruns
+        * between eDP port A enable and vdd enable. Also PCH port
+        * enable seems to result in the occasional CPU pipe underrun.
+        *
+        * Spurious PCH underruns also occur during PCH enabling.
+        */
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+       intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+
+       if (pipe_config->has_pch_encoder)
+               intel_prepare_shared_dpll(pipe_config);
+
+       if (intel_crtc_has_dp_encoder(pipe_config))
+               intel_dp_set_m_n(pipe_config, M1_N1);
+
+       intel_set_pipe_timings(pipe_config);
+       intel_set_pipe_src_size(pipe_config);
+
+       if (pipe_config->has_pch_encoder) {
+               intel_cpu_transcoder_set_m_n(pipe_config,
+                                            &pipe_config->fdi_m_n, NULL);
+       }
+
+       ironlake_set_pipeconf(pipe_config);
+
+       intel_crtc->active = true;
+
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
+
+       if (pipe_config->has_pch_encoder) {
+               /* Note: FDI PLL enabling _must_ be done before we enable the
+                * cpu pipes, hence this is separate from all the other fdi/pch
+                * enabling. */
+               ironlake_fdi_pll_enable(pipe_config);
+       } else {
+               assert_fdi_tx_disabled(dev_priv, pipe);
+               assert_fdi_rx_disabled(dev_priv, pipe);
+       }
+
+       ironlake_pfit_enable(pipe_config);
+
+       /*
+        * On ILK+ LUT must be loaded before the pipe is running but with
+        * clocks enabled
+        */
+       intel_color_load_luts(pipe_config);
+       intel_color_commit(pipe_config);
+       /* update DSPCNTR to configure gamma for pipe bottom color */
+       intel_disable_primary_plane(pipe_config);
+
+       if (dev_priv->display.initial_watermarks != NULL)
+               dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+       intel_enable_pipe(pipe_config);
+
+       if (pipe_config->has_pch_encoder)
+               ironlake_pch_enable(old_intel_state, pipe_config);
+
+       assert_vblank_disabled(crtc);
+       intel_crtc_vblank_on(pipe_config);
+
+       intel_encoders_enable(crtc, pipe_config, old_state);
+
+       if (HAS_PCH_CPT(dev_priv))
+               cpt_verify_modeset(dev, intel_crtc->pipe);
+
+       /*
+        * Must wait for vblank to avoid spurious PCH FIFO underruns.
+        * And a second vblank wait is needed at least on ILK with
+        * some interlaced HDMI modes. Let's do the double wait always
+        * in case there are more corner cases we don't know about.
+        */
+       if (pipe_config->has_pch_encoder) {
+               intel_wait_for_vblank(dev_priv, pipe);
+               intel_wait_for_vblank(dev_priv, pipe);
+       }
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+       intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+}
+
+/* IPS only exists on ULT machines and is tied to pipe A. */
+static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
+{
+       return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
+}
+
+static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
+                                           enum pipe pipe, bool apply)
+{
+       u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
+       u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
+
+       if (apply)
+               val |= mask;
+       else
+               val &= ~mask;
+
+       I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
+}
+
+static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       u32 val;
+
+       val = MBUS_DBOX_A_CREDIT(2);
+       val |= MBUS_DBOX_BW_CREDIT(1);
+       val |= MBUS_DBOX_B_CREDIT(8);
+
+       I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
+}
+
+static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
+                               struct drm_atomic_state *old_state)
+{
+       struct drm_crtc *crtc = pipe_config->base.crtc;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+       enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+       struct intel_atomic_state *old_intel_state =
+               to_intel_atomic_state(old_state);
+       bool psl_clkgate_wa;
+
+       if (WARN_ON(intel_crtc->active))
+               return;
+
+       intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
+
+       if (pipe_config->shared_dpll)
+               intel_enable_shared_dpll(pipe_config);
+
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
+
+       if (intel_crtc_has_dp_encoder(pipe_config))
+               intel_dp_set_m_n(pipe_config, M1_N1);
+
+       if (!transcoder_is_dsi(cpu_transcoder))
+               intel_set_pipe_timings(pipe_config);
+
+       intel_set_pipe_src_size(pipe_config);
+
+       if (cpu_transcoder != TRANSCODER_EDP &&
+           !transcoder_is_dsi(cpu_transcoder)) {
+               I915_WRITE(PIPE_MULT(cpu_transcoder),
+                          pipe_config->pixel_multiplier - 1);
+       }
+
+       if (pipe_config->has_pch_encoder) {
+               intel_cpu_transcoder_set_m_n(pipe_config,
+                                            &pipe_config->fdi_m_n, NULL);
+       }
+
+       if (!transcoder_is_dsi(cpu_transcoder))
+               haswell_set_pipeconf(pipe_config);
+
+       if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+               bdw_set_pipemisc(pipe_config);
+
+       intel_crtc->active = true;
+
+       /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
+       psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
+                        pipe_config->pch_pfit.enabled;
+       if (psl_clkgate_wa)
+               glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               skylake_pfit_enable(pipe_config);
+       else
+               ironlake_pfit_enable(pipe_config);
+
+       /*
+        * On ILK+ LUT must be loaded before the pipe is running but with
+        * clocks enabled
+        */
+       intel_color_load_luts(pipe_config);
+       intel_color_commit(pipe_config);
+       /* update DSPCNTR to configure gamma/csc for pipe bottom color */
+       if (INTEL_GEN(dev_priv) < 9)
+               intel_disable_primary_plane(pipe_config);
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               icl_set_pipe_chicken(intel_crtc);
+
+       intel_ddi_set_pipe_settings(pipe_config);
+       if (!transcoder_is_dsi(cpu_transcoder))
+               intel_ddi_enable_transcoder_func(pipe_config);
+
+       if (dev_priv->display.initial_watermarks != NULL)
+               dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               icl_pipe_mbus_enable(intel_crtc);
+
+       /* XXX: Do the pipe assertions at the right place for BXT DSI. */
+       if (!transcoder_is_dsi(cpu_transcoder))
+               intel_enable_pipe(pipe_config);
+
+       if (pipe_config->has_pch_encoder)
+               lpt_pch_enable(old_intel_state, pipe_config);
+
+       if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
+               intel_ddi_set_vc_payload_alloc(pipe_config, true);
+
+       assert_vblank_disabled(crtc);
+       intel_crtc_vblank_on(pipe_config);
+
+       intel_encoders_enable(crtc, pipe_config, old_state);
+
+       if (psl_clkgate_wa) {
+               intel_wait_for_vblank(dev_priv, pipe);
+               glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
+       }
+
+       /* If we change the relative order between pipe/planes enabling, we need
+        * to change the workaround. */
+       hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
+       if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
+               intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
+               intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
+       }
+}
+
+static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       /* To avoid upsetting the power well on haswell only disable the pfit if
+        * it's in use. The hw state code will make sure we get this right. */
+       if (old_crtc_state->pch_pfit.enabled) {
+               I915_WRITE(PF_CTL(pipe), 0);
+               I915_WRITE(PF_WIN_POS(pipe), 0);
+               I915_WRITE(PF_WIN_SZ(pipe), 0);
+       }
+}
+
+static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
+                                 struct drm_atomic_state *old_state)
+{
+       struct drm_crtc *crtc = old_crtc_state->base.crtc;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+
+       /*
+        * Sometimes spurious CPU pipe underruns happen when the
+        * pipe is already disabled, but FDI RX/TX is still enabled.
+        * Happens at least with VGA+HDMI cloning. Suppress them.
+        */
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+       intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+
+       intel_encoders_disable(crtc, old_crtc_state, old_state);
+
+       drm_crtc_vblank_off(crtc);
+       assert_vblank_disabled(crtc);
+
+       intel_disable_pipe(old_crtc_state);
+
+       ironlake_pfit_disable(old_crtc_state);
+
+       if (old_crtc_state->has_pch_encoder)
+               ironlake_fdi_disable(crtc);
+
+       intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+
+       if (old_crtc_state->has_pch_encoder) {
+               ironlake_disable_pch_transcoder(dev_priv, pipe);
+
+               if (HAS_PCH_CPT(dev_priv)) {
+                       i915_reg_t reg;
+                       u32 temp;
+
+                       /* disable TRANS_DP_CTL */
+                       reg = TRANS_DP_CTL(pipe);
+                       temp = I915_READ(reg);
+                       temp &= ~(TRANS_DP_OUTPUT_ENABLE |
+                                 TRANS_DP_PORT_SEL_MASK);
+                       temp |= TRANS_DP_PORT_SEL_NONE;
+                       I915_WRITE(reg, temp);
+
+                       /* disable DPLL_SEL */
+                       temp = I915_READ(PCH_DPLL_SEL);
+                       temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
+                       I915_WRITE(PCH_DPLL_SEL, temp);
+               }
+
+               ironlake_fdi_pll_disable(intel_crtc);
+       }
+
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+       intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+}
+
+static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
+                                struct drm_atomic_state *old_state)
+{
+       struct drm_crtc *crtc = old_crtc_state->base.crtc;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+
+       intel_encoders_disable(crtc, old_crtc_state, old_state);
+
+       drm_crtc_vblank_off(crtc);
+       assert_vblank_disabled(crtc);
+
+       /* XXX: Do the pipe assertions at the right place for BXT DSI. */
+       if (!transcoder_is_dsi(cpu_transcoder))
+               intel_disable_pipe(old_crtc_state);
+
+       if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
+               intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
+
+       if (!transcoder_is_dsi(cpu_transcoder))
+               intel_ddi_disable_transcoder_func(old_crtc_state);
+
+       intel_dsc_disable(old_crtc_state);
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               skylake_scaler_disable(intel_crtc);
+       else
+               ironlake_pfit_disable(old_crtc_state);
+
+       intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+
+       intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
+}
+
+static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       if (!crtc_state->gmch_pfit.control)
+               return;
+
+       /*
+        * The panel fitter should only be adjusted whilst the pipe is disabled,
+        * according to register description and PRM.
+        */
+       WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
+       assert_pipe_disabled(dev_priv, crtc->pipe);
+
+       I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
+       I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
+
+       /* Border color in case we don't scale up to the full screen. Black by
+        * default, change to something else for debugging. */
+       I915_WRITE(BCLRPAT(crtc->pipe), 0);
+}
+
+bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
+{
+       if (port == PORT_NONE)
+               return false;
+
+       if (IS_ELKHARTLAKE(dev_priv))
+               return port <= PORT_C;
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               return port <= PORT_B;
+
+       return false;
+}
+
+bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+       if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
+               return port >= PORT_C && port <= PORT_F;
+
+       return false;
+}
+
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+       if (!intel_port_is_tc(dev_priv, port))
+               return PORT_TC_NONE;
+
+       return port - PORT_C;
+}
+
+enum intel_display_power_domain intel_port_to_power_domain(enum port port)
+{
+       switch (port) {
+       case PORT_A:
+               return POWER_DOMAIN_PORT_DDI_A_LANES;
+       case PORT_B:
+               return POWER_DOMAIN_PORT_DDI_B_LANES;
+       case PORT_C:
+               return POWER_DOMAIN_PORT_DDI_C_LANES;
+       case PORT_D:
+               return POWER_DOMAIN_PORT_DDI_D_LANES;
+       case PORT_E:
+               return POWER_DOMAIN_PORT_DDI_E_LANES;
+       case PORT_F:
+               return POWER_DOMAIN_PORT_DDI_F_LANES;
+       default:
+               MISSING_CASE(port);
+               return POWER_DOMAIN_PORT_OTHER;
+       }
+}
+
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port)
+{
+       switch (dig_port->aux_ch) {
+       case AUX_CH_A:
+               return POWER_DOMAIN_AUX_A;
+       case AUX_CH_B:
+               return POWER_DOMAIN_AUX_B;
+       case AUX_CH_C:
+               return POWER_DOMAIN_AUX_C;
+       case AUX_CH_D:
+               return POWER_DOMAIN_AUX_D;
+       case AUX_CH_E:
+               return POWER_DOMAIN_AUX_E;
+       case AUX_CH_F:
+               return POWER_DOMAIN_AUX_F;
+       default:
+               MISSING_CASE(dig_port->aux_ch);
+               return POWER_DOMAIN_AUX_A;
+       }
+}
+
+static u64 get_crtc_power_domains(struct drm_crtc *crtc,
+                                 struct intel_crtc_state *crtc_state)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_encoder *encoder;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       u64 mask;
+       enum transcoder transcoder = crtc_state->cpu_transcoder;
+
+       if (!crtc_state->base.active)
+               return 0;
+
+       mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
+       mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
+       if (crtc_state->pch_pfit.enabled ||
+           crtc_state->pch_pfit.force_thru)
+               mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+
+       drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
+               struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+               mask |= BIT_ULL(intel_encoder->power_domain);
+       }
+
+       if (HAS_DDI(dev_priv) && crtc_state->has_audio)
+               mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
+
+       if (crtc_state->shared_dpll)
+               mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
+
+       return mask;
+}
+
+static u64
+modeset_get_crtc_power_domains(struct drm_crtc *crtc,
+                              struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum intel_display_power_domain domain;
+       u64 domains, new_domains, old_domains;
+
+       old_domains = intel_crtc->enabled_power_domains;
+       intel_crtc->enabled_power_domains = new_domains =
+               get_crtc_power_domains(crtc, crtc_state);
+
+       domains = new_domains & ~old_domains;
+
+       for_each_power_domain(domain, domains)
+               intel_display_power_get(dev_priv, domain);
+
+       return old_domains & ~new_domains;
+}
+
+static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
+                                     u64 domains)
+{
+       enum intel_display_power_domain domain;
+
+       for_each_power_domain(domain, domains)
+               intel_display_power_put_unchecked(dev_priv, domain);
+}
+
+static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
+                                  struct drm_atomic_state *old_state)
+{
+       struct intel_atomic_state *old_intel_state =
+               to_intel_atomic_state(old_state);
+       struct drm_crtc *crtc = pipe_config->base.crtc;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+
+       if (WARN_ON(intel_crtc->active))
+               return;
+
+       if (intel_crtc_has_dp_encoder(pipe_config))
+               intel_dp_set_m_n(pipe_config, M1_N1);
+
+       intel_set_pipe_timings(pipe_config);
+       intel_set_pipe_src_size(pipe_config);
+
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+               I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
+               I915_WRITE(CHV_CANVAS(pipe), 0);
+       }
+
+       i9xx_set_pipeconf(pipe_config);
+
+       intel_crtc->active = true;
+
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+       intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
+
+       if (IS_CHERRYVIEW(dev_priv)) {
+               chv_prepare_pll(intel_crtc, pipe_config);
+               chv_enable_pll(intel_crtc, pipe_config);
+       } else {
+               vlv_prepare_pll(intel_crtc, pipe_config);
+               vlv_enable_pll(intel_crtc, pipe_config);
+       }
+
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
+
+       i9xx_pfit_enable(pipe_config);
+
+       intel_color_load_luts(pipe_config);
+       intel_color_commit(pipe_config);
+       /* update DSPCNTR to configure gamma for pipe bottom color */
+       intel_disable_primary_plane(pipe_config);
+
+       dev_priv->display.initial_watermarks(old_intel_state,
+                                            pipe_config);
+       intel_enable_pipe(pipe_config);
+
+       assert_vblank_disabled(crtc);
+       intel_crtc_vblank_on(pipe_config);
+
+       intel_encoders_enable(crtc, pipe_config, old_state);
+}
+
+static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
+       I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
+}
+
+static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
+                            struct drm_atomic_state *old_state)
+{
+       struct intel_atomic_state *old_intel_state =
+               to_intel_atomic_state(old_state);
+       struct drm_crtc *crtc = pipe_config->base.crtc;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum pipe pipe = intel_crtc->pipe;
+
+       if (WARN_ON(intel_crtc->active))
+               return;
+
+       i9xx_set_pll_dividers(pipe_config);
+
+       if (intel_crtc_has_dp_encoder(pipe_config))
+               intel_dp_set_m_n(pipe_config, M1_N1);
+
+       intel_set_pipe_timings(pipe_config);
+       intel_set_pipe_src_size(pipe_config);
+
+       i9xx_set_pipeconf(pipe_config);
+
+       intel_crtc->active = true;
+
+       if (!IS_GEN(dev_priv, 2))
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
+
+       i9xx_enable_pll(intel_crtc, pipe_config);
+
+       i9xx_pfit_enable(pipe_config);
+
+       intel_color_load_luts(pipe_config);
+       intel_color_commit(pipe_config);
+       /* update DSPCNTR to configure gamma for pipe bottom color */
+       intel_disable_primary_plane(pipe_config);
+
+       if (dev_priv->display.initial_watermarks != NULL)
+               dev_priv->display.initial_watermarks(old_intel_state,
+                                                    pipe_config);
+       else
+               intel_update_watermarks(intel_crtc);
+       intel_enable_pipe(pipe_config);
+
+       assert_vblank_disabled(crtc);
+       intel_crtc_vblank_on(pipe_config);
+
+       intel_encoders_enable(crtc, pipe_config, old_state);
+}
+
+static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       if (!old_crtc_state->gmch_pfit.control)
+               return;
+
+       assert_pipe_disabled(dev_priv, crtc->pipe);
+
+       DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
+                     I915_READ(PFIT_CONTROL));
+       I915_WRITE(PFIT_CONTROL, 0);
+}
+
+static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
+                             struct drm_atomic_state *old_state)
+{
+       struct drm_crtc *crtc = old_crtc_state->base.crtc;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+
+       /*
+        * On gen2 planes are double buffered but the pipe isn't, so we must
+        * wait for planes to fully turn off before disabling the pipe.
+        */
+       if (IS_GEN(dev_priv, 2))
+               intel_wait_for_vblank(dev_priv, pipe);
+
+       intel_encoders_disable(crtc, old_crtc_state, old_state);
+
+       drm_crtc_vblank_off(crtc);
+       assert_vblank_disabled(crtc);
+
+       intel_disable_pipe(old_crtc_state);
+
+       i9xx_pfit_disable(old_crtc_state);
+
+       intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+
+       if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
+               if (IS_CHERRYVIEW(dev_priv))
+                       chv_disable_pll(dev_priv, pipe);
+               else if (IS_VALLEYVIEW(dev_priv))
+                       vlv_disable_pll(dev_priv, pipe);
+               else
+                       i9xx_disable_pll(old_crtc_state);
+       }
+
+       intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
+
+       if (!IS_GEN(dev_priv, 2))
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+       if (!dev_priv->display.initial_watermarks)
+               intel_update_watermarks(intel_crtc);
+
+       /* clock the pipe down to 640x480@60 to potentially save power */
+       if (IS_I830(dev_priv))
+               i830_enable_pipe(dev_priv, pipe);
+}
+
+static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
+                                       struct drm_modeset_acquire_ctx *ctx)
+{
+       struct intel_encoder *encoder;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_bw_state *bw_state =
+               to_intel_bw_state(dev_priv->bw_obj.state);
+       enum intel_display_power_domain domain;
+       struct intel_plane *plane;
+       u64 domains;
+       struct drm_atomic_state *state;
+       struct intel_crtc_state *crtc_state;
+       int ret;
+
+       if (!intel_crtc->active)
+               return;
+
+       for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+               const struct intel_plane_state *plane_state =
+                       to_intel_plane_state(plane->base.state);
+
+               if (plane_state->base.visible)
+                       intel_plane_disable_noatomic(intel_crtc, plane);
+       }
+
+       state = drm_atomic_state_alloc(crtc->dev);
+       if (!state) {
+               DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
+                             crtc->base.id, crtc->name);
+               return;
+       }
+
+       state->acquire_ctx = ctx;
+
+       /* Everything's already locked, -EDEADLK can't happen. */
+       crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+       ret = drm_atomic_add_affected_connectors(state, crtc);
+
+       WARN_ON(IS_ERR(crtc_state) || ret);
+
+       dev_priv->display.crtc_disable(crtc_state, state);
+
+       drm_atomic_state_put(state);
+
+       DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
+                     crtc->base.id, crtc->name);
+
+       WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
+       crtc->state->active = false;
+       intel_crtc->active = false;
+       crtc->enabled = false;
+       crtc->state->connector_mask = 0;
+       crtc->state->encoder_mask = 0;
+
+       for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
+               encoder->base.crtc = NULL;
+
+       intel_fbc_disable(intel_crtc);
+       intel_update_watermarks(intel_crtc);
+       intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
+
+       domains = intel_crtc->enabled_power_domains;
+       for_each_power_domain(domain, domains)
+               intel_display_power_put_unchecked(dev_priv, domain);
+       intel_crtc->enabled_power_domains = 0;
+
+       dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
+       dev_priv->min_cdclk[intel_crtc->pipe] = 0;
+       dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
+
+       bw_state->data_rate[intel_crtc->pipe] = 0;
+       bw_state->num_active_planes[intel_crtc->pipe] = 0;
+}
+
+/*
+ * turn all crtc's off, but do not adjust state
+ * This has to be paired with a call to intel_modeset_setup_hw_state.
+ */
+int intel_display_suspend(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_atomic_state *state;
+       int ret;
+
+       state = drm_atomic_helper_suspend(dev);
+       ret = PTR_ERR_OR_ZERO(state);
+       if (ret)
+               DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+       else
+               dev_priv->modeset_restore_state = state;
+       return ret;
+}
+
+void intel_encoder_destroy(struct drm_encoder *encoder)
+{
+       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+       drm_encoder_cleanup(encoder);
+       kfree(intel_encoder);
+}
+
+/* Cross check the actual hw state with our own modeset state tracking (and it's
+ * internal consistency). */
+static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
+                                        struct drm_connector_state *conn_state)
+{
+       struct intel_connector *connector = to_intel_connector(conn_state->connector);
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.base.id,
+                     connector->base.name);
+
+       if (connector->get_hw_state(connector)) {
+               struct intel_encoder *encoder = connector->encoder;
+
+               I915_STATE_WARN(!crtc_state,
+                        "connector enabled without attached crtc\n");
+
+               if (!crtc_state)
+                       return;
+
+               I915_STATE_WARN(!crtc_state->active,
+                     "connector is active, but attached crtc isn't\n");
+
+               if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
+                       return;
+
+               I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
+                       "atomic encoder doesn't match attached encoder\n");
+
+               I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
+                       "attached encoder crtc differs from connector crtc\n");
+       } else {
+               I915_STATE_WARN(crtc_state && crtc_state->active,
+                       "attached crtc is active, but connector isn't\n");
+               I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
+                       "best encoder set without crtc!\n");
+       }
+}
+
+static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
+{
+       if (crtc_state->base.enable && crtc_state->has_pch_encoder)
+               return crtc_state->fdi_lanes;
+
+       return 0;
+}
+
+static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+                                    struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_atomic_state *state = pipe_config->base.state;
+       struct intel_crtc *other_crtc;
+       struct intel_crtc_state *other_crtc_state;
+
+       DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
+                     pipe_name(pipe), pipe_config->fdi_lanes);
+       if (pipe_config->fdi_lanes > 4) {
+               DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
+                             pipe_name(pipe), pipe_config->fdi_lanes);
+               return -EINVAL;
+       }
+
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+               if (pipe_config->fdi_lanes > 2) {
+                       DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
+                                     pipe_config->fdi_lanes);
+                       return -EINVAL;
+               } else {
+                       return 0;
+               }
+       }
+
+       if (INTEL_INFO(dev_priv)->num_pipes == 2)
+               return 0;
+
+       /* Ivybridge 3 pipe is really complicated */
+       switch (pipe) {
+       case PIPE_A:
+               return 0;
+       case PIPE_B:
+               if (pipe_config->fdi_lanes <= 2)
+                       return 0;
+
+               other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
+               other_crtc_state =
+                       intel_atomic_get_crtc_state(state, other_crtc);
+               if (IS_ERR(other_crtc_state))
+                       return PTR_ERR(other_crtc_state);
+
+               if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
+                       DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
+                                     pipe_name(pipe), pipe_config->fdi_lanes);
+                       return -EINVAL;
+               }
+               return 0;
+       case PIPE_C:
+               if (pipe_config->fdi_lanes > 2) {
+                       DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
+                                     pipe_name(pipe), pipe_config->fdi_lanes);
+                       return -EINVAL;
+               }
+
+               other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
+               other_crtc_state =
+                       intel_atomic_get_crtc_state(state, other_crtc);
+               if (IS_ERR(other_crtc_state))
+                       return PTR_ERR(other_crtc_state);
+
+               if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
+                       DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+                       return -EINVAL;
+               }
+               return 0;
+       default:
+               BUG();
+       }
+}
+
+#define RETRY 1
+static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
+                                      struct intel_crtc_state *pipe_config)
+{
+       struct drm_device *dev = intel_crtc->base.dev;
+       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+       int lane, link_bw, fdi_dotclock, ret;
+       bool needs_recompute = false;
+
+retry:
+       /* FDI is a binary signal running at ~2.7GHz, encoding
+        * each output octet as 10 bits. The actual frequency
+        * is stored as a divider into a 100MHz clock, and the
+        * mode pixel clock is stored in units of 1KHz.
+        * Hence the bw of each lane in terms of the mode signal
+        * is:
+        */
+       link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
+
+       fdi_dotclock = adjusted_mode->crtc_clock;
+
+       lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
+                                          pipe_config->pipe_bpp);
+
+       pipe_config->fdi_lanes = lane;
+
+       intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
+                              link_bw, &pipe_config->fdi_m_n, false);
+
+       ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+       if (ret == -EDEADLK)
+               return ret;
+
+       if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
+               pipe_config->pipe_bpp -= 2*3;
+               DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
+                             pipe_config->pipe_bpp);
+               needs_recompute = true;
+               pipe_config->bw_constrained = true;
+
+               goto retry;
+       }
+
+       if (needs_recompute)
+               return RETRY;
+
+       return ret;
+}
+
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       /* IPS only exists on ULT machines and is tied to pipe A. */
+       if (!hsw_crtc_supports_ips(crtc))
+               return false;
+
+       if (!i915_modparams.enable_ips)
+               return false;
+
+       if (crtc_state->pipe_bpp > 24)
+               return false;
+
+       /*
+        * We compare against max which means we must take
+        * the increased cdclk requirement into account when
+        * calculating the new cdclk.
+        *
+        * Should measure whether using a lower cdclk w/o IPS
+        */
+       if (IS_BROADWELL(dev_priv) &&
+           crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
+               return false;
+
+       return true;
+}
+
+static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(crtc_state->base.crtc->dev);
+       struct intel_atomic_state *intel_state =
+               to_intel_atomic_state(crtc_state->base.state);
+
+       if (!hsw_crtc_state_ips_capable(crtc_state))
+               return false;
+
+       /*
+        * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+        * enabled and disabled dynamically based on package C states,
+        * user space can't make reliable use of the CRCs, so let's just
+        * completely disable it.
+        */
+       if (crtc_state->crc_enabled)
+               return false;
+
+       /* IPS should be fine as long as at least one plane is enabled. */
+       if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
+               return false;
+
+       /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+       if (IS_BROADWELL(dev_priv) &&
+           crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
+               return false;
+
+       return true;
+}
+
+static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
+{
+       const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       /* GDG double wide on either pipe, otherwise pipe A only */
+       return INTEL_GEN(dev_priv) < 4 &&
+               (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
+}
+
+static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
+{
+       u32 pixel_rate;
+
+       pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
+
+       /*
+        * We only use IF-ID interlacing. If we ever use
+        * PF-ID we'll need to adjust the pixel_rate here.
+        */
+
+       if (pipe_config->pch_pfit.enabled) {
+               u64 pipe_w, pipe_h, pfit_w, pfit_h;
+               u32 pfit_size = pipe_config->pch_pfit.size;
+
+               pipe_w = pipe_config->pipe_src_w;
+               pipe_h = pipe_config->pipe_src_h;
+
+               pfit_w = (pfit_size >> 16) & 0xFFFF;
+               pfit_h = pfit_size & 0xFFFF;
+               if (pipe_w < pfit_w)
+                       pipe_w = pfit_w;
+               if (pipe_h < pfit_h)
+                       pipe_h = pfit_h;
+
+               if (WARN_ON(!pfit_w || !pfit_h))
+                       return pixel_rate;
+
+               pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
+                                    pfit_w * pfit_h);
+       }
+
+       return pixel_rate;
+}
+
+static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       if (HAS_GMCH(dev_priv))
+               /* FIXME calculate proper pipe pixel rate for GMCH pfit */
+               crtc_state->pixel_rate =
+                       crtc_state->base.adjusted_mode.crtc_clock;
+       else
+               crtc_state->pixel_rate =
+                       ilk_pipe_pixel_rate(crtc_state);
+}
+
+static int intel_crtc_compute_config(struct intel_crtc *crtc,
+                                    struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+       int clock_limit = dev_priv->max_dotclk_freq;
+
+       if (INTEL_GEN(dev_priv) < 4) {
+               clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
+
+               /*
+                * Enable double wide mode when the dot clock
+                * is > 90% of the (display) core speed.
+                */
+               if (intel_crtc_supports_double_wide(crtc) &&
+                   adjusted_mode->crtc_clock > clock_limit) {
+                       clock_limit = dev_priv->max_dotclk_freq;
+                       pipe_config->double_wide = true;
+               }
+       }
+
+       if (adjusted_mode->crtc_clock > clock_limit) {
+               DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
+                             adjusted_mode->crtc_clock, clock_limit,
+                             yesno(pipe_config->double_wide));
+               return -EINVAL;
+       }
+
+       if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+            pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
+            pipe_config->base.ctm) {
+               /*
+                * There is only one pipe CSC unit per pipe, and we need that
+                * for output conversion from RGB->YCBCR. So if CTM is already
+                * applied we can't support YCBCR420 output.
+                */
+               DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Pipe horizontal size must be even in:
+        * - DVO ganged mode
+        * - LVDS dual channel mode
+        * - Double wide pipe
+        */
+       if (pipe_config->pipe_src_w & 1) {
+               if (pipe_config->double_wide) {
+                       DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
+                       return -EINVAL;
+               }
+
+               if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
+                   intel_is_dual_link_lvds(dev_priv)) {
+                       DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
+                       return -EINVAL;
+               }
+       }
+
+       /* Cantiga+ cannot handle modes with a hsync front porch of 0.
+        * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
+        */
+       if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
+               adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
+               return -EINVAL;
+
+       intel_crtc_compute_pixel_rate(pipe_config);
+
+       if (pipe_config->has_pch_encoder)
+               return ironlake_fdi_compute_config(crtc, pipe_config);
+
+       return 0;
+}
+
+static void
+intel_reduce_m_n_ratio(u32 *num, u32 *den)
+{
+       while (*num > DATA_LINK_M_N_MASK ||
+              *den > DATA_LINK_M_N_MASK) {
+               *num >>= 1;
+               *den >>= 1;
+       }
+}
+
+static void compute_m_n(unsigned int m, unsigned int n,
+                       u32 *ret_m, u32 *ret_n,
+                       bool constant_n)
+{
+       /*
+        * Several DP dongles in particular seem to be fussy about
+        * too large link M/N values. Give N value as 0x8000 that
+        * should be acceptable by specific devices. 0x8000 is the
+        * specified fixed N value for asynchronous clock mode,
+        * which the devices expect also in synchronous clock mode.
+        */
+       if (constant_n)
+               *ret_n = 0x8000;
+       else
+               *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
+
+       *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
+       intel_reduce_m_n_ratio(ret_m, ret_n);
+}
+
+void
+intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
+                      int pixel_clock, int link_clock,
+                      struct intel_link_m_n *m_n,
+                      bool constant_n)
+{
+       m_n->tu = 64;
+
+       compute_m_n(bits_per_pixel * pixel_clock,
+                   link_clock * nlanes * 8,
+                   &m_n->gmch_m, &m_n->gmch_n,
+                   constant_n);
+
+       compute_m_n(pixel_clock, link_clock,
+                   &m_n->link_m, &m_n->link_n,
+                   constant_n);
+}
+
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+       if (i915_modparams.panel_use_ssc >= 0)
+               return i915_modparams.panel_use_ssc != 0;
+       return dev_priv->vbt.lvds_use_ssc
+               && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+static u32 pnv_dpll_compute_fp(struct dpll *dpll)
+{
+       return (1 << dpll->n) << 16 | dpll->m2;
+}
+
+static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
+{
+       return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
+}
+
+static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
+                                    struct intel_crtc_state *crtc_state,
+                                    struct dpll *reduced_clock)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 fp, fp2 = 0;
+
+       if (IS_PINEVIEW(dev_priv)) {
+               fp = pnv_dpll_compute_fp(&crtc_state->dpll);
+               if (reduced_clock)
+                       fp2 = pnv_dpll_compute_fp(reduced_clock);
+       } else {
+               fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+               if (reduced_clock)
+                       fp2 = i9xx_dpll_compute_fp(reduced_clock);
+       }
+
+       crtc_state->dpll_hw_state.fp0 = fp;
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+           reduced_clock) {
+               crtc_state->dpll_hw_state.fp1 = fp2;
+       } else {
+               crtc_state->dpll_hw_state.fp1 = fp;
+       }
+}
+
+static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
+               pipe)
+{
+       u32 reg_val;
+
+       /*
+        * PLLB opamp always calibrates to max value of 0x3f, force enable it
+        * and set it to a reasonable value instead.
+        */
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+       reg_val &= 0xffffff00;
+       reg_val |= 0x00000030;
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+       reg_val &= 0x00ffffff;
+       reg_val |= 0x8c000000;
+       vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+       reg_val &= 0xffffff00;
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+       reg_val &= 0x00ffffff;
+       reg_val |= 0xb0000000;
+       vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+}
+
+static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+                                        const struct intel_link_m_n *m_n)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+       I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
+       I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
+       I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
+}
+
+static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+                                enum transcoder transcoder)
+{
+       if (IS_HASWELL(dev_priv))
+               return transcoder == TRANSCODER_EDP;
+
+       /*
+        * Strictly speaking some registers are available before
+        * gen7, but we only support DRRS on gen7+
+        */
+       return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
+}
+
+static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+                                        const struct intel_link_m_n *m_n,
+                                        const struct intel_link_m_n *m2_n2)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       enum transcoder transcoder = crtc_state->cpu_transcoder;
+
+       if (INTEL_GEN(dev_priv) >= 5) {
+               I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
+               I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
+               I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
+               I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
+               /*
+                *  M2_N2 registers are set only if DRRS is supported
+                * (to make sure the registers are not unnecessarily accessed).
+                */
+               if (m2_n2 && crtc_state->has_drrs &&
+                   transcoder_has_m2_n2(dev_priv, transcoder)) {
+                       I915_WRITE(PIPE_DATA_M2(transcoder),
+                                       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
+                       I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
+                       I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
+                       I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
+               }
+       } else {
+               I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+               I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
+               I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
+               I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
+       }
+}
+
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
+{
+       const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+
+       if (m_n == M1_N1) {
+               dp_m_n = &crtc_state->dp_m_n;
+               dp_m2_n2 = &crtc_state->dp_m2_n2;
+       } else if (m_n == M2_N2) {
+
+               /*
+                * M2_N2 registers are not supported. Hence m2_n2 divider value
+                * needs to be programmed into M1_N1.
+                */
+               dp_m_n = &crtc_state->dp_m2_n2;
+       } else {
+               DRM_ERROR("Unsupported divider value\n");
+               return;
+       }
+
+       if (crtc_state->has_pch_encoder)
+               intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
+       else
+               intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
+}
+
+static void vlv_compute_dpll(struct intel_crtc *crtc,
+                            struct intel_crtc_state *pipe_config)
+{
+       pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+       if (crtc->pipe != PIPE_A)
+               pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+       /* DPLL not used with DSI, but still need the rest set up */
+       if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
+               pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
+                       DPLL_EXT_BUFFER_ENABLE_VLV;
+
+       pipe_config->dpll_hw_state.dpll_md =
+               (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static void chv_compute_dpll(struct intel_crtc *crtc,
+                            struct intel_crtc_state *pipe_config)
+{
+       pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+       if (crtc->pipe != PIPE_A)
+               pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+       /* DPLL not used with DSI, but still need the rest set up */
+       if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
+               pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+
+       pipe_config->dpll_hw_state.dpll_md =
+               (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static void vlv_prepare_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_state *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       enum pipe pipe = crtc->pipe;
+       u32 mdiv;
+       u32 bestn, bestm1, bestm2, bestp1, bestp2;
+       u32 coreclk, reg_val;
+
+       /* Enable Refclk */
+       I915_WRITE(DPLL(pipe),
+                  pipe_config->dpll_hw_state.dpll &
+                  ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+
+       /* No need to actually set up the DPLL with DSI */
+       if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+               return;
+
+       vlv_dpio_get(dev_priv);
+
+       bestn = pipe_config->dpll.n;
+       bestm1 = pipe_config->dpll.m1;
+       bestm2 = pipe_config->dpll.m2;
+       bestp1 = pipe_config->dpll.p1;
+       bestp2 = pipe_config->dpll.p2;
+
+       /* See eDP HDMI DPIO driver vbios notes doc */
+
+       /* PLL B needs special handling */
+       if (pipe == PIPE_B)
+               vlv_pllb_recal_opamp(dev_priv, pipe);
+
+       /* Set up Tx target for periodic Rcomp update */
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
+
+       /* Disable target IRef on PLL */
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
+       reg_val &= 0x00ffffff;
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
+
+       /* Disable fast lock */
+       vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
+
+       /* Set idtafcrecal before PLL is enabled */
+       mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
+       mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
+       mdiv |= ((bestn << DPIO_N_SHIFT));
+       mdiv |= (1 << DPIO_K_SHIFT);
+
+       /*
+        * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
+        * but we don't support that).
+        * Note: don't use the DAC post divider as it seems unstable.
+        */
+       mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+
+       mdiv |= DPIO_ENABLE_CALIBRATION;
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+
+       /* Set HBR and RBR LPF coefficients */
+       if (pipe_config->port_clock == 162000 ||
+           intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
+           intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+               vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+                                0x009f0003);
+       else
+               vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+                                0x00d0000f);
+
+       if (intel_crtc_has_dp_encoder(pipe_config)) {
+               /* Use SSC source */
+               if (pipe == PIPE_A)
+                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+                                        0x0df40000);
+               else
+                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+                                        0x0df70000);
+       } else { /* HDMI or VGA */
+               /* Use bend source */
+               if (pipe == PIPE_A)
+                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+                                        0x0df70000);
+               else
+                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+                                        0x0df40000);
+       }
+
+       coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
+       coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
+       if (intel_crtc_has_dp_encoder(pipe_config))
+               coreclk |= 0x01000000;
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
+
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
+
+       vlv_dpio_put(dev_priv);
+}
+
+static void chv_prepare_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_state *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       enum pipe pipe = crtc->pipe;
+       enum dpio_channel port = vlv_pipe_to_channel(pipe);
+       u32 loopfilter, tribuf_calcntr;
+       u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
+       u32 dpio_val;
+       int vco;
+
+       /* Enable Refclk and SSC */
+       I915_WRITE(DPLL(pipe),
+                  pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+
+       /* No need to actually set up the DPLL with DSI */
+       if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+               return;
+
+       bestn = pipe_config->dpll.n;
+       bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
+       bestm1 = pipe_config->dpll.m1;
+       bestm2 = pipe_config->dpll.m2 >> 22;
+       bestp1 = pipe_config->dpll.p1;
+       bestp2 = pipe_config->dpll.p2;
+       vco = pipe_config->dpll.vco;
+       dpio_val = 0;
+       loopfilter = 0;
+
+       vlv_dpio_get(dev_priv);
+
+       /* p1 and p2 divider */
+       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
+                       5 << DPIO_CHV_S1_DIV_SHIFT |
+                       bestp1 << DPIO_CHV_P1_DIV_SHIFT |
+                       bestp2 << DPIO_CHV_P2_DIV_SHIFT |
+                       1 << DPIO_CHV_K_DIV_SHIFT);
+
+       /* Feedback post-divider - m2 */
+       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
+
+       /* Feedback refclk divider - n and m1 */
+       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
+                       DPIO_CHV_M1_DIV_BY_2 |
+                       1 << DPIO_CHV_N_DIV_SHIFT);
+
+       /* M2 fraction division */
+       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+
+       /* M2 fraction division enable */
+       dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+       dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
+       dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
+       if (bestm2_frac)
+               dpio_val |= DPIO_CHV_FRAC_DIV_EN;
+       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
+
+       /* Program digital lock detect threshold */
+       dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
+       dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
+                                       DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
+       dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
+       if (!bestm2_frac)
+               dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
+       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
+
+       /* Loop filter */
+       if (vco == 5400000) {
+               loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
+               loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
+               loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
+               tribuf_calcntr = 0x9;
+       } else if (vco <= 6200000) {
+               loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
+               loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
+               loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+               tribuf_calcntr = 0x9;
+       } else if (vco <= 6480000) {
+               loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+               loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+               loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+               tribuf_calcntr = 0x8;
+       } else {
+               /* Not supported. Apply the same limits as in the max case */
+               loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+               loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+               loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+               tribuf_calcntr = 0;
+       }
+       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+
+       dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
+       dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
+       dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
+       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
+
+       /* AFC Recal */
+       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
+                       vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
+                       DPIO_AFC_RECAL);
+
+       vlv_dpio_put(dev_priv);
+}
+
+/**
+ * vlv_force_pll_on - forcibly enable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ * @dpll: PLL configuration
+ *
+ * Enable the PLL for @pipe using the supplied @dpll config. To be used
+ * in cases where we need the PLL enabled even when @pipe is not going to
+ * be enabled.
+ */
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+                    const struct dpll *dpll)
+{
+       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+       struct intel_crtc_state *pipe_config;
+
+       pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+       if (!pipe_config)
+               return -ENOMEM;
+
+       pipe_config->base.crtc = &crtc->base;
+       pipe_config->pixel_multiplier = 1;
+       pipe_config->dpll = *dpll;
+
+       if (IS_CHERRYVIEW(dev_priv)) {
+               chv_compute_dpll(crtc, pipe_config);
+               chv_prepare_pll(crtc, pipe_config);
+               chv_enable_pll(crtc, pipe_config);
+       } else {
+               vlv_compute_dpll(crtc, pipe_config);
+               vlv_prepare_pll(crtc, pipe_config);
+               vlv_enable_pll(crtc, pipe_config);
+       }
+
+       kfree(pipe_config);
+
+       return 0;
+}
+
+/**
+ * vlv_force_pll_off - forcibly disable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe. To be used in cases where we need
+ * the PLL enabled even when @pipe is not going to be enabled.
+ */
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+       if (IS_CHERRYVIEW(dev_priv))
+               chv_disable_pll(dev_priv, pipe);
+       else
+               vlv_disable_pll(dev_priv, pipe);
+}
+
+static void i9xx_compute_dpll(struct intel_crtc *crtc,
+                             struct intel_crtc_state *crtc_state,
+                             struct dpll *reduced_clock)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 dpll;
+       struct dpll *clock = &crtc_state->dpll;
+
+       i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+       dpll = DPLL_VGA_MODE_DIS;
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+               dpll |= DPLLB_MODE_LVDS;
+       else
+               dpll |= DPLLB_MODE_DAC_SERIAL;
+
+       if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+           IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+               dpll |= (crtc_state->pixel_multiplier - 1)
+                       << SDVO_MULTIPLIER_SHIFT_HIRES;
+       }
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+               dpll |= DPLL_SDVO_HIGH_SPEED;
+
+       if (intel_crtc_has_dp_encoder(crtc_state))
+               dpll |= DPLL_SDVO_HIGH_SPEED;
+
+       /* compute bitmask from p1 value */
+       if (IS_PINEVIEW(dev_priv))
+               dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+       else {
+               dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+               if (IS_G4X(dev_priv) && reduced_clock)
+                       dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+       }
+       switch (clock->p2) {
+       case 5:
+               dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+               break;
+       case 7:
+               dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+               break;
+       case 10:
+               dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+               break;
+       case 14:
+               dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+               break;
+       }
+       if (INTEL_GEN(dev_priv) >= 4)
+               dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+       if (crtc_state->sdvo_tv_clock)
+               dpll |= PLL_REF_INPUT_TVCLKINBC;
+       else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+                intel_panel_use_ssc(dev_priv))
+               dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+       else
+               dpll |= PLL_REF_INPUT_DREFCLK;
+
+       dpll |= DPLL_VCO_ENABLE;
+       crtc_state->dpll_hw_state.dpll = dpll;
+
+       if (INTEL_GEN(dev_priv) >= 4) {
+               u32 dpll_md = (crtc_state->pixel_multiplier - 1)
+                       << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+               crtc_state->dpll_hw_state.dpll_md = dpll_md;
+       }
+}
+
+static void i8xx_compute_dpll(struct intel_crtc *crtc,
+                             struct intel_crtc_state *crtc_state,
+                             struct dpll *reduced_clock)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       u32 dpll;
+       struct dpll *clock = &crtc_state->dpll;
+
+       i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+       dpll = DPLL_VGA_MODE_DIS;
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+               dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+       } else {
+               if (clock->p1 == 2)
+                       dpll |= PLL_P1_DIVIDE_BY_TWO;
+               else
+                       dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+               if (clock->p2 == 4)
+                       dpll |= PLL_P2_DIVIDE_BY_4;
+       }
+
+       /*
+        * Bspec:
+        * "[Almador Errata}: For the correct operation of the muxed DVO pins
+        *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
+        *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
+        *  Enable) must be set to “1” in both the DPLL A Control Register
+        *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
+        *
+        * For simplicity We simply keep both bits always enabled in
+        * both DPLLS. The spec says we should disable the DVO 2X clock
+        * when not needed, but this seems to work fine in practice.
+        */
+       if (IS_I830(dev_priv) ||
+           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
+               dpll |= DPLL_DVO_2X_MODE;
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+           intel_panel_use_ssc(dev_priv))
+               dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+       else
+               dpll |= PLL_REF_INPUT_DREFCLK;
+
+       dpll |= DPLL_VCO_ENABLE;
+       crtc_state->dpll_hw_state.dpll = dpll;
+}
+
+static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+       const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+       u32 crtc_vtotal, crtc_vblank_end;
+       int vsyncshift = 0;
+
+       /* We need to be careful not to changed the adjusted mode, for otherwise
+        * the hw state checker will get angry at the mismatch. */
+       crtc_vtotal = adjusted_mode->crtc_vtotal;
+       crtc_vblank_end = adjusted_mode->crtc_vblank_end;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               /* the chip adds 2 halflines automatically */
+               crtc_vtotal -= 1;
+               crtc_vblank_end -= 1;
+
+               if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
+                       vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
+               else
+                       vsyncshift = adjusted_mode->crtc_hsync_start -
+                               adjusted_mode->crtc_htotal / 2;
+               if (vsyncshift < 0)
+                       vsyncshift += adjusted_mode->crtc_htotal;
+       }
+
+       if (INTEL_GEN(dev_priv) > 3)
+               I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
+
+       I915_WRITE(HTOTAL(cpu_transcoder),
+                  (adjusted_mode->crtc_hdisplay - 1) |
+                  ((adjusted_mode->crtc_htotal - 1) << 16));
+       I915_WRITE(HBLANK(cpu_transcoder),
+                  (adjusted_mode->crtc_hblank_start - 1) |
+                  ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       I915_WRITE(HSYNC(cpu_transcoder),
+                  (adjusted_mode->crtc_hsync_start - 1) |
+                  ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+       I915_WRITE(VTOTAL(cpu_transcoder),
+                  (adjusted_mode->crtc_vdisplay - 1) |
+                  ((crtc_vtotal - 1) << 16));
+       I915_WRITE(VBLANK(cpu_transcoder),
+                  (adjusted_mode->crtc_vblank_start - 1) |
+                  ((crtc_vblank_end - 1) << 16));
+       I915_WRITE(VSYNC(cpu_transcoder),
+                  (adjusted_mode->crtc_vsync_start - 1) |
+                  ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+       /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
+        * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
+        * documented on the DDI_FUNC_CTL register description, EDP Input Select
+        * bits. */
+       if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
+           (pipe == PIPE_B || pipe == PIPE_C))
+               I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+
+}
+
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       /* pipesrc controls the size that is scaled from, which should
+        * always be the user's requested size.
+        */
+       I915_WRITE(PIPESRC(pipe),
+                  ((crtc_state->pipe_src_w - 1) << 16) |
+                  (crtc_state->pipe_src_h - 1));
+}
+
+static void intel_get_pipe_timings(struct intel_crtc *crtc,
+                                  struct intel_crtc_state *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+       u32 tmp;
+
+       tmp = I915_READ(HTOTAL(cpu_transcoder));
+       pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
+       pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+
+       if (!transcoder_is_dsi(cpu_transcoder)) {
+               tmp = I915_READ(HBLANK(cpu_transcoder));
+               pipe_config->base.adjusted_mode.crtc_hblank_start =
+                                                       (tmp & 0xffff) + 1;
+               pipe_config->base.adjusted_mode.crtc_hblank_end =
+                                               ((tmp >> 16) & 0xffff) + 1;
+       }
+       tmp = I915_READ(HSYNC(cpu_transcoder));
+       pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
+       pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
+
+       tmp = I915_READ(VTOTAL(cpu_transcoder));
+       pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
+       pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+
+       if (!transcoder_is_dsi(cpu_transcoder)) {
+               tmp = I915_READ(VBLANK(cpu_transcoder));
+               pipe_config->base.adjusted_mode.crtc_vblank_start =
+                                                       (tmp & 0xffff) + 1;
+               pipe_config->base.adjusted_mode.crtc_vblank_end =
+                                               ((tmp >> 16) & 0xffff) + 1;
+       }
+       tmp = I915_READ(VSYNC(cpu_transcoder));
+       pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
+       pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+
+       if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
+               pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+               pipe_config->base.adjusted_mode.crtc_vtotal += 1;
+               pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
+       }
+}
+
+static void intel_get_pipe_src_size(struct intel_crtc *crtc,
+                                   struct intel_crtc_state *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       u32 tmp;
+
+       tmp = I915_READ(PIPESRC(crtc->pipe));
+       pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
+       pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
+
+       pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
+       pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
+}
+
+void intel_mode_from_pipe_config(struct drm_display_mode *mode,
+                                struct intel_crtc_state *pipe_config)
+{
+       mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
+       mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
+       mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
+       mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
+
+       mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
+       mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
+       mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
+       mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
+
+       mode->flags = pipe_config->base.adjusted_mode.flags;
+       mode->type = DRM_MODE_TYPE_DRIVER;
+
+       mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
+
+       mode->hsync = drm_mode_hsync(mode);
+       mode->vrefresh = drm_mode_vrefresh(mode);
+       drm_mode_set_name(mode);
+}
+
+static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 pipeconf;
+
+       pipeconf = 0;
+
+       /* we keep both pipes enabled on 830 */
+       if (IS_I830(dev_priv))
+               pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
+
+       if (crtc_state->double_wide)
+               pipeconf |= PIPECONF_DOUBLE_WIDE;
+
+       /* only g4x and later have fancy bpc/dither controls */
+       if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+           IS_CHERRYVIEW(dev_priv)) {
+               /* Bspec claims that we can't use dithering for 30bpp pipes. */
+               if (crtc_state->dither && crtc_state->pipe_bpp != 30)
+                       pipeconf |= PIPECONF_DITHER_EN |
+                                   PIPECONF_DITHER_TYPE_SP;
+
+               switch (crtc_state->pipe_bpp) {
+               case 18:
+                       pipeconf |= PIPECONF_6BPC;
+                       break;
+               case 24:
+                       pipeconf |= PIPECONF_8BPC;
+                       break;
+               case 30:
+                       pipeconf |= PIPECONF_10BPC;
+                       break;
+               default:
+                       /* Case prevented by intel_choose_pipe_bpp_dither. */
+                       BUG();
+               }
+       }
+
+       if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+               if (INTEL_GEN(dev_priv) < 4 ||
+                   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
+                       pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+               else
+                       pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
+       } else {
+               pipeconf |= PIPECONF_PROGRESSIVE;
+       }
+
+       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+            crtc_state->limited_color_range)
+               pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+
+       pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+
+       I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
+       POSTING_READ(PIPECONF(crtc->pipe));
+}
+
+static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
+                                  struct intel_crtc_state *crtc_state)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       const struct intel_limit *limit;
+       int refclk = 48000;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+               if (intel_panel_use_ssc(dev_priv)) {
+                       refclk = dev_priv->vbt.lvds_ssc_freq;
+                       DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+               }
+
+               limit = &intel_limits_i8xx_lvds;
+       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
+               limit = &intel_limits_i8xx_dvo;
+       } else {
+               limit = &intel_limits_i8xx_dac;
+       }
+
+       if (!crtc_state->clock_set &&
+           !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+                                refclk, NULL, &crtc_state->dpll)) {
+               DRM_ERROR("Couldn't find PLL settings for mode!\n");
+               return -EINVAL;
+       }
+
+       i8xx_compute_dpll(crtc, crtc_state, NULL);
+
+       return 0;
+}
+
+static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
+                                 struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct intel_limit *limit;
+       int refclk = 96000;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+               if (intel_panel_use_ssc(dev_priv)) {
+                       refclk = dev_priv->vbt.lvds_ssc_freq;
+                       DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+               }
+
+               if (intel_is_dual_link_lvds(dev_priv))
+                       limit = &intel_limits_g4x_dual_channel_lvds;
+               else
+                       limit = &intel_limits_g4x_single_channel_lvds;
+       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+                  intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+               limit = &intel_limits_g4x_hdmi;
+       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+               limit = &intel_limits_g4x_sdvo;
+       } else {
+               /* The option is for other outputs */
+               limit = &intel_limits_i9xx_sdvo;
+       }
+
+       if (!crtc_state->clock_set &&
+           !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+                               refclk, NULL, &crtc_state->dpll)) {
+               DRM_ERROR("Couldn't find PLL settings for mode!\n");
+               return -EINVAL;
+       }
+
+       i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+       return 0;
+}
+
+static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
+                                 struct intel_crtc_state *crtc_state)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       const struct intel_limit *limit;
+       int refclk = 96000;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+               if (intel_panel_use_ssc(dev_priv)) {
+                       refclk = dev_priv->vbt.lvds_ssc_freq;
+                       DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+               }
+
+               limit = &intel_limits_pineview_lvds;
+       } else {
+               limit = &intel_limits_pineview_sdvo;
+       }
+
+       if (!crtc_state->clock_set &&
+           !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+                               refclk, NULL, &crtc_state->dpll)) {
+               DRM_ERROR("Couldn't find PLL settings for mode!\n");
+               return -EINVAL;
+       }
+
+       i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+       return 0;
+}
+
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+                                  struct intel_crtc_state *crtc_state)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       const struct intel_limit *limit;
+       int refclk = 96000;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+               if (intel_panel_use_ssc(dev_priv)) {
+                       refclk = dev_priv->vbt.lvds_ssc_freq;
+                       DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+               }
+
+               limit = &intel_limits_i9xx_lvds;
+       } else {
+               limit = &intel_limits_i9xx_sdvo;
+       }
+
+       if (!crtc_state->clock_set &&
+           !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+                                refclk, NULL, &crtc_state->dpll)) {
+               DRM_ERROR("Couldn't find PLL settings for mode!\n");
+               return -EINVAL;
+       }
+
+       i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+       return 0;
+}
+
+static int chv_crtc_compute_clock(struct intel_crtc *crtc,
+                                 struct intel_crtc_state *crtc_state)
+{
+       int refclk = 100000;
+       const struct intel_limit *limit = &intel_limits_chv;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       if (!crtc_state->clock_set &&
+           !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+                               refclk, NULL, &crtc_state->dpll)) {
+               DRM_ERROR("Couldn't find PLL settings for mode!\n");
+               return -EINVAL;
+       }
+
+       chv_compute_dpll(crtc, crtc_state);
+
+       return 0;
+}
+
+static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
+                                 struct intel_crtc_state *crtc_state)
+{
+       int refclk = 100000;
+       const struct intel_limit *limit = &intel_limits_vlv;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       if (!crtc_state->clock_set &&
+           !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+                               refclk, NULL, &crtc_state->dpll)) {
+               DRM_ERROR("Couldn't find PLL settings for mode!\n");
+               return -EINVAL;
+       }
+
+       vlv_compute_dpll(crtc, crtc_state);
+
+       return 0;
+}
+
+static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
+{
+       if (IS_I830(dev_priv))
+               return false;
+
+       return INTEL_GEN(dev_priv) >= 4 ||
+               IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
+static void i9xx_get_pfit_config(struct intel_crtc *crtc,
+                                struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 tmp;
+
+       if (!i9xx_has_pfit(dev_priv))
+               return;
+
+       tmp = I915_READ(PFIT_CONTROL);
+       if (!(tmp & PFIT_ENABLE))
+               return;
+
+       /* Check whether the pfit is attached to our pipe. */
+       if (INTEL_GEN(dev_priv) < 4) {
+               if (crtc->pipe != PIPE_B)
+                       return;
+       } else {
+               if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
+                       return;
+       }
+
+       pipe_config->gmch_pfit.control = tmp;
+       pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
+}
+
+static void vlv_crtc_clock_get(struct intel_crtc *crtc,
+                              struct intel_crtc_state *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int pipe = pipe_config->cpu_transcoder;
+       struct dpll clock;
+       u32 mdiv;
+       int refclk = 100000;
+
+       /* In case of DSI, DPLL will not be used */
+       if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+               return;
+
+       vlv_dpio_get(dev_priv);
+       mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
+       vlv_dpio_put(dev_priv);
+
+       clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
+       clock.m2 = mdiv & DPIO_M2DIV_MASK;
+       clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
+       clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
+       clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+
+       pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
+}
+
+static void
+i9xx_get_initial_plane_config(struct intel_crtc *crtc,
+                             struct intel_initial_plane_config *plane_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+       enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+       enum pipe pipe;
+       u32 val, base, offset;
+       int fourcc, pixel_format;
+       unsigned int aligned_height;
+       struct drm_framebuffer *fb;
+       struct intel_framebuffer *intel_fb;
+
+       if (!plane->get_hw_state(plane, &pipe))
+               return;
+
+       WARN_ON(pipe != crtc->pipe);
+
+       intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+       if (!intel_fb) {
+               DRM_DEBUG_KMS("failed to alloc fb\n");
+               return;
+       }
+
+       fb = &intel_fb->base;
+
+       fb->dev = dev;
+
+       val = I915_READ(DSPCNTR(i9xx_plane));
+
+       if (INTEL_GEN(dev_priv) >= 4) {
+               if (val & DISPPLANE_TILED) {
+                       plane_config->tiling = I915_TILING_X;
+                       fb->modifier = I915_FORMAT_MOD_X_TILED;
+               }
+
+               if (val & DISPPLANE_ROTATE_180)
+                       plane_config->rotation = DRM_MODE_ROTATE_180;
+       }
+
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
+           val & DISPPLANE_MIRROR)
+               plane_config->rotation |= DRM_MODE_REFLECT_X;
+
+       pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
+       fourcc = i9xx_format_to_fourcc(pixel_format);
+       fb->format = drm_format_info(fourcc);
+
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+               offset = I915_READ(DSPOFFSET(i9xx_plane));
+               base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
+       } else if (INTEL_GEN(dev_priv) >= 4) {
+               if (plane_config->tiling)
+                       offset = I915_READ(DSPTILEOFF(i9xx_plane));
+               else
+                       offset = I915_READ(DSPLINOFF(i9xx_plane));
+               base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
+       } else {
+               base = I915_READ(DSPADDR(i9xx_plane));
+       }
+       plane_config->base = base;
+
+       val = I915_READ(PIPESRC(pipe));
+       fb->width = ((val >> 16) & 0xfff) + 1;
+       fb->height = ((val >> 0) & 0xfff) + 1;
+
+       val = I915_READ(DSPSTRIDE(i9xx_plane));
+       fb->pitches[0] = val & 0xffffffc0;
+
+       aligned_height = intel_fb_align_height(fb, 0, fb->height);
+
+       plane_config->size = fb->pitches[0] * aligned_height;
+
+       DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+                     crtc->base.name, plane->base.name, fb->width, fb->height,
+                     fb->format->cpp[0] * 8, base, fb->pitches[0],
+                     plane_config->size);
+
+       plane_config->fb = intel_fb;
+}
+
+static void chv_crtc_clock_get(struct intel_crtc *crtc,
+                              struct intel_crtc_state *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int pipe = pipe_config->cpu_transcoder;
+       enum dpio_channel port = vlv_pipe_to_channel(pipe);
+       struct dpll clock;
+       u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
+       int refclk = 100000;
+
+       /* In case of DSI, DPLL will not be used */
+       if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+               return;
+
+       vlv_dpio_get(dev_priv);
+       cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
+       pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
+       pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
+       pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
+       pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+       vlv_dpio_put(dev_priv);
+
+       clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
+       clock.m2 = (pll_dw0 & 0xff) << 22;
+       if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
+               clock.m2 |= pll_dw2 & 0x3fffff;
+       clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
+       clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
+       clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
+
+       pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
+}
+
+static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
+                                       struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
+
+       pipe_config->lspcon_downsampling = false;
+
+       if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
+               u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+               if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+                       bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
+                       bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
+
+                       if (ycbcr420_enabled) {
+                               /* We support 4:2:0 in full blend mode only */
+                               if (!blend)
+                                       output = INTEL_OUTPUT_FORMAT_INVALID;
+                               else if (!(IS_GEMINILAKE(dev_priv) ||
+                                          INTEL_GEN(dev_priv) >= 10))
+                                       output = INTEL_OUTPUT_FORMAT_INVALID;
+                               else
+                                       output = INTEL_OUTPUT_FORMAT_YCBCR420;
+                       } else {
+                               /*
+                                * Currently there is no interface defined to
+                                * check user preference between RGB/YCBCR444
+                                * or YCBCR420. So the only possible case for
+                                * YCBCR444 usage is driving YCBCR420 output
+                                * with LSPCON, when pipe is configured for
+                                * YCBCR444 output and LSPCON takes care of
+                                * downsampling it.
+                                */
+                               pipe_config->lspcon_downsampling = true;
+                               output = INTEL_OUTPUT_FORMAT_YCBCR444;
+                       }
+               }
+       }
+
+       pipe_config->output_format = output;
+}
+
+static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+       u32 tmp;
+
+       tmp = I915_READ(DSPCNTR(i9xx_plane));
+
+       if (tmp & DISPPLANE_GAMMA_ENABLE)
+               crtc_state->gamma_enable = true;
+
+       if (!HAS_GMCH(dev_priv) &&
+           tmp & DISPPLANE_PIPE_CSC_ENABLE)
+               crtc_state->csc_enable = true;
+}
+
+static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
+                                struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum intel_display_power_domain power_domain;
+       intel_wakeref_t wakeref;
+       u32 tmp;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
+               return false;
+
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+       pipe_config->shared_dpll = NULL;
+
+       ret = false;
+
+       tmp = I915_READ(PIPECONF(crtc->pipe));
+       if (!(tmp & PIPECONF_ENABLE))
+               goto out;
+
+       if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+           IS_CHERRYVIEW(dev_priv)) {
+               switch (tmp & PIPECONF_BPC_MASK) {
+               case PIPECONF_6BPC:
+                       pipe_config->pipe_bpp = 18;
+                       break;
+               case PIPECONF_8BPC:
+                       pipe_config->pipe_bpp = 24;
+                       break;
+               case PIPECONF_10BPC:
+                       pipe_config->pipe_bpp = 30;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+           (tmp & PIPECONF_COLOR_RANGE_SELECT))
+               pipe_config->limited_color_range = true;
+
+       pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
+               PIPECONF_GAMMA_MODE_SHIFT;
+
+       if (IS_CHERRYVIEW(dev_priv))
+               pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
+
+       i9xx_get_pipe_color_config(pipe_config);
+       intel_color_get_config(pipe_config);
+
+       if (INTEL_GEN(dev_priv) < 4)
+               pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
+
+       intel_get_pipe_timings(crtc, pipe_config);
+       intel_get_pipe_src_size(crtc, pipe_config);
+
+       i9xx_get_pfit_config(crtc, pipe_config);
+
+       if (INTEL_GEN(dev_priv) >= 4) {
+               /* No way to read it out on pipes B and C */
+               if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
+                       tmp = dev_priv->chv_dpll_md[crtc->pipe];
+               else
+                       tmp = I915_READ(DPLL_MD(crtc->pipe));
+               pipe_config->pixel_multiplier =
+                       ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
+                        >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
+               pipe_config->dpll_hw_state.dpll_md = tmp;
+       } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+                  IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+               tmp = I915_READ(DPLL(crtc->pipe));
+               pipe_config->pixel_multiplier =
+                       ((tmp & SDVO_MULTIPLIER_MASK)
+                        >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
+       } else {
+               /* Note that on i915G/GM the pixel multiplier is in the sdvo
+                * port and will be fixed up in the encoder->get_config
+                * function. */
+               pipe_config->pixel_multiplier = 1;
+       }
+       pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
+       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
+               pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
+               pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
+       } else {
+               /* Mask out read-only status bits. */
+               pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
+                                                    DPLL_PORTC_READY_MASK |
+                                                    DPLL_PORTB_READY_MASK);
+       }
+
+       if (IS_CHERRYVIEW(dev_priv))
+               chv_crtc_clock_get(crtc, pipe_config);
+       else if (IS_VALLEYVIEW(dev_priv))
+               vlv_crtc_clock_get(crtc, pipe_config);
+       else
+               i9xx_crtc_clock_get(crtc, pipe_config);
+
+       /*
+        * Normally the dotclock is filled in by the encoder .get_config()
+        * but in case the pipe is enabled w/o any ports we need a sane
+        * default.
+        */
+       pipe_config->base.adjusted_mode.crtc_clock =
+               pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+       ret = true;
+
+out:
+       intel_display_power_put(dev_priv, power_domain, wakeref);
+
+       return ret;
+}
+
+static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
+{
+       struct intel_encoder *encoder;
+       int i;
+       u32 val, final;
+       bool has_lvds = false;
+       bool has_cpu_edp = false;
+       bool has_panel = false;
+       bool has_ck505 = false;
+       bool can_ssc = false;
+       bool using_ssc_source = false;
+
+       /* We need to take the global config into account */
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               switch (encoder->type) {
+               case INTEL_OUTPUT_LVDS:
+                       has_panel = true;
+                       has_lvds = true;
+                       break;
+               case INTEL_OUTPUT_EDP:
+                       has_panel = true;
+                       if (encoder->port == PORT_A)
+                               has_cpu_edp = true;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       if (HAS_PCH_IBX(dev_priv)) {
+               has_ck505 = dev_priv->vbt.display_clock_mode;
+               can_ssc = has_ck505;
+       } else {
+               has_ck505 = false;
+               can_ssc = true;
+       }
+
+       /* Check if any DPLLs are using the SSC source */
+       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+               u32 temp = I915_READ(PCH_DPLL(i));
+
+               if (!(temp & DPLL_VCO_ENABLE))
+                       continue;
+
+               if ((temp & PLL_REF_INPUT_MASK) ==
+                   PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+                       using_ssc_source = true;
+                       break;
+               }
+       }
+
+       DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+                     has_panel, has_lvds, has_ck505, using_ssc_source);
+
+       /* Ironlake: try to setup display ref clock before DPLL
+        * enabling. This is only under driver's control after
+        * PCH B stepping, previous chipset stepping should be
+        * ignoring this setting.
+        */
+       val = I915_READ(PCH_DREF_CONTROL);
+
+       /* As we must carefully and slowly disable/enable each source in turn,
+        * compute the final state we want first and check if we need to
+        * make any changes at all.
+        */
+       final = val;
+       final &= ~DREF_NONSPREAD_SOURCE_MASK;
+       if (has_ck505)
+               final |= DREF_NONSPREAD_CK505_ENABLE;
+       else
+               final |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+       final &= ~DREF_SSC_SOURCE_MASK;
+       final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+       final &= ~DREF_SSC1_ENABLE;
+
+       if (has_panel) {
+               final |= DREF_SSC_SOURCE_ENABLE;
+
+               if (intel_panel_use_ssc(dev_priv) && can_ssc)
+                       final |= DREF_SSC1_ENABLE;
+
+               if (has_cpu_edp) {
+                       if (intel_panel_use_ssc(dev_priv) && can_ssc)
+                               final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+                       else
+                               final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+               } else
+                       final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+       } else if (using_ssc_source) {
+               final |= DREF_SSC_SOURCE_ENABLE;
+               final |= DREF_SSC1_ENABLE;
+       }
+
+       if (final == val)
+               return;
+
+       /* Always enable nonspread source */
+       val &= ~DREF_NONSPREAD_SOURCE_MASK;
+
+       if (has_ck505)
+               val |= DREF_NONSPREAD_CK505_ENABLE;
+       else
+               val |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+       if (has_panel) {
+               val &= ~DREF_SSC_SOURCE_MASK;
+               val |= DREF_SSC_SOURCE_ENABLE;
+
+               /* SSC must be turned on before enabling the CPU output  */
+               if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+                       DRM_DEBUG_KMS("Using SSC on panel\n");
+                       val |= DREF_SSC1_ENABLE;
+               } else
+                       val &= ~DREF_SSC1_ENABLE;
+
+               /* Get SSC going before enabling the outputs */
+               I915_WRITE(PCH_DREF_CONTROL, val);
+               POSTING_READ(PCH_DREF_CONTROL);
+               udelay(200);
+
+               val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+               /* Enable CPU source on CPU attached eDP */
+               if (has_cpu_edp) {
+                       if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+                               DRM_DEBUG_KMS("Using SSC on eDP\n");
+                               val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+                       } else
+                               val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+               } else
+                       val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+               I915_WRITE(PCH_DREF_CONTROL, val);
+               POSTING_READ(PCH_DREF_CONTROL);
+               udelay(200);
+       } else {
+               DRM_DEBUG_KMS("Disabling CPU source output\n");
+
+               val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+               /* Turn off CPU output */
+               val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+               I915_WRITE(PCH_DREF_CONTROL, val);
+               POSTING_READ(PCH_DREF_CONTROL);
+               udelay(200);
+
+               if (!using_ssc_source) {
+                       DRM_DEBUG_KMS("Disabling SSC source\n");
+
+                       /* Turn off the SSC source */
+                       val &= ~DREF_SSC_SOURCE_MASK;
+                       val |= DREF_SSC_SOURCE_DISABLE;
+
+                       /* Turn off SSC1 */
+                       val &= ~DREF_SSC1_ENABLE;
+
+                       I915_WRITE(PCH_DREF_CONTROL, val);
+                       POSTING_READ(PCH_DREF_CONTROL);
+                       udelay(200);
+               }
+       }
+
+       BUG_ON(val != final);
+}
+
+static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
+{
+       u32 tmp;
+
+       tmp = I915_READ(SOUTH_CHICKEN2);
+       tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+       I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+       if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
+                       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+               DRM_ERROR("FDI mPHY reset assert timeout\n");
+
+       tmp = I915_READ(SOUTH_CHICKEN2);
+       tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+       I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+       if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
+                        FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+               DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+}
+
+/* WaMPhyProgramming:hsw */
+static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
+{
+       u32 tmp;
+
+       tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+       tmp &= ~(0xFF << 24);
+       tmp |= (0x12 << 24);
+       intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+       tmp |= (1 << 11);
+       intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+       tmp |= (1 << 11);
+       intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+       tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+       intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+       tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+       intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+       tmp &= ~(7 << 13);
+       tmp |= (5 << 13);
+       intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+       tmp &= ~(7 << 13);
+       tmp |= (5 << 13);
+       intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+       tmp &= ~0xFF;
+       tmp |= 0x1C;
+       intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+       tmp &= ~0xFF;
+       tmp |= 0x1C;
+       intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+       tmp &= ~(0xFF << 16);
+       tmp |= (0x1C << 16);
+       intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+       tmp &= ~(0xFF << 16);
+       tmp |= (0x1C << 16);
+       intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+       tmp |= (1 << 27);
+       intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+       tmp |= (1 << 27);
+       intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+       tmp &= ~(0xF << 28);
+       tmp |= (4 << 28);
+       intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+       tmp &= ~(0xF << 28);
+       tmp |= (4 << 28);
+       intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+}
+
+/* Implements 3 different sequences from BSpec chapter "Display iCLK
+ * Programming" based on the parameters passed:
+ * - Sequence to enable CLKOUT_DP
+ * - Sequence to enable CLKOUT_DP without spread
+ * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
+ */
+static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
+                                bool with_spread, bool with_fdi)
+{
+       u32 reg, tmp;
+
+       if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
+               with_spread = true;
+       if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
+           with_fdi, "LP PCH doesn't have FDI\n"))
+               with_fdi = false;
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+       tmp &= ~SBI_SSCCTL_DISABLE;
+       tmp |= SBI_SSCCTL_PATHALT;
+       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+       udelay(24);
+
+       if (with_spread) {
+               tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+               tmp &= ~SBI_SSCCTL_PATHALT;
+               intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+               if (with_fdi) {
+                       lpt_reset_fdi_mphy(dev_priv);
+                       lpt_program_fdi_mphy(dev_priv);
+               }
+       }
+
+       reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
+       tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+       tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+       intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+       mutex_unlock(&dev_priv->sb_lock);
+}
+
+/* Sequence to disable CLKOUT_DP */
+void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+{
+       u32 reg, tmp;
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
+       tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+       tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+       intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+       if (!(tmp & SBI_SSCCTL_DISABLE)) {
+               if (!(tmp & SBI_SSCCTL_PATHALT)) {
+                       tmp |= SBI_SSCCTL_PATHALT;
+                       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+                       udelay(32);
+               }
+               tmp |= SBI_SSCCTL_DISABLE;
+               intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+       }
+
+       mutex_unlock(&dev_priv->sb_lock);
+}
+
+#define BEND_IDX(steps) ((50 + (steps)) / 5)
+
+static const u16 sscdivintphase[] = {
+       [BEND_IDX( 50)] = 0x3B23,
+       [BEND_IDX( 45)] = 0x3B23,
+       [BEND_IDX( 40)] = 0x3C23,
+       [BEND_IDX( 35)] = 0x3C23,
+       [BEND_IDX( 30)] = 0x3D23,
+       [BEND_IDX( 25)] = 0x3D23,
+       [BEND_IDX( 20)] = 0x3E23,
+       [BEND_IDX( 15)] = 0x3E23,
+       [BEND_IDX( 10)] = 0x3F23,
+       [BEND_IDX(  5)] = 0x3F23,
+       [BEND_IDX(  0)] = 0x0025,
+       [BEND_IDX( -5)] = 0x0025,
+       [BEND_IDX(-10)] = 0x0125,
+       [BEND_IDX(-15)] = 0x0125,
+       [BEND_IDX(-20)] = 0x0225,
+       [BEND_IDX(-25)] = 0x0225,
+       [BEND_IDX(-30)] = 0x0325,
+       [BEND_IDX(-35)] = 0x0325,
+       [BEND_IDX(-40)] = 0x0425,
+       [BEND_IDX(-45)] = 0x0425,
+       [BEND_IDX(-50)] = 0x0525,
+};
+
+/*
+ * Bend CLKOUT_DP
+ * steps -50 to 50 inclusive, in steps of 5
+ * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
+ * change in clock period = -(steps / 10) * 5.787 ps
+ */
+static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
+{
+       u32 tmp;
+       int idx = BEND_IDX(steps);
+
+       if (WARN_ON(steps % 5 != 0))
+               return;
+
+       if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
+               return;
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       if (steps % 10 != 0)
+               tmp = 0xAAAAAAAB;
+       else
+               tmp = 0x00000000;
+       intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
+
+       tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
+       tmp &= 0xffff0000;
+       tmp |= sscdivintphase[idx];
+       intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
+
+       mutex_unlock(&dev_priv->sb_lock);
+}
+
+#undef BEND_IDX
+
+static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
+{
+       u32 fuse_strap = I915_READ(FUSE_STRAP);
+       u32 ctl = I915_READ(SPLL_CTL);
+
+       if ((ctl & SPLL_PLL_ENABLE) == 0)
+               return false;
+
+       if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
+           (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
+               return true;
+
+       if (IS_BROADWELL(dev_priv) &&
+           (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
+               return true;
+
+       return false;
+}
+
+static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
+                              enum intel_dpll_id id)
+{
+       u32 fuse_strap = I915_READ(FUSE_STRAP);
+       u32 ctl = I915_READ(WRPLL_CTL(id));
+
+       if ((ctl & WRPLL_PLL_ENABLE) == 0)
+               return false;
+
+       if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
+               return true;
+
+       if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
+           (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
+           (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
+               return true;
+
+       return false;
+}
+
+static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
+{
+       struct intel_encoder *encoder;
+       bool pch_ssc_in_use = false;
+       bool has_fdi = false;
+
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               switch (encoder->type) {
+               case INTEL_OUTPUT_ANALOG:
+                       has_fdi = true;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       /*
+        * The BIOS may have decided to use the PCH SSC
+        * reference so we must not disable it until the
+        * relevant PLLs have stopped relying on it. We'll
+        * just leave the PCH SSC reference enabled in case
+        * any active PLL is using it. It will get disabled
+        * after runtime suspend if we don't have FDI.
+        *
+        * TODO: Move the whole reference clock handling
+        * to the modeset sequence proper so that we can
+        * actually enable/disable/reconfigure these things
+        * safely. To do that we need to introduce a real
+        * clock hierarchy. That would also allow us to do
+        * clock bending finally.
+        */
+       if (spll_uses_pch_ssc(dev_priv)) {
+               DRM_DEBUG_KMS("SPLL using PCH SSC\n");
+               pch_ssc_in_use = true;
+       }
+
+       if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
+               DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
+               pch_ssc_in_use = true;
+       }
+
+       if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
+               DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
+               pch_ssc_in_use = true;
+       }
+
+       if (pch_ssc_in_use)
+               return;
+
+       if (has_fdi) {
+               lpt_bend_clkout_dp(dev_priv, 0);
+               lpt_enable_clkout_dp(dev_priv, true, true);
+       } else {
+               lpt_disable_clkout_dp(dev_priv);
+       }
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+{
+       if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
+               ironlake_init_pch_refclk(dev_priv);
+       else if (HAS_PCH_LPT(dev_priv))
+               lpt_init_pch_refclk(dev_priv);
+}
+
+static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       u32 val;
+
+       val = 0;
+
+       switch (crtc_state->pipe_bpp) {
+       case 18:
+               val |= PIPECONF_6BPC;
+               break;
+       case 24:
+               val |= PIPECONF_8BPC;
+               break;
+       case 30:
+               val |= PIPECONF_10BPC;
+               break;
+       case 36:
+               val |= PIPECONF_12BPC;
+               break;
+       default:
+               /* Case prevented by intel_choose_pipe_bpp_dither. */
+               BUG();
+       }
+
+       if (crtc_state->dither)
+               val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+       if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+               val |= PIPECONF_INTERLACED_ILK;
+       else
+               val |= PIPECONF_PROGRESSIVE;
+
+       if (crtc_state->limited_color_range)
+               val |= PIPECONF_COLOR_RANGE_SELECT;
+
+       val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+
+       I915_WRITE(PIPECONF(pipe), val);
+       POSTING_READ(PIPECONF(pipe));
+}
+
+static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+       u32 val = 0;
+
+       if (IS_HASWELL(dev_priv) && crtc_state->dither)
+               val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+       if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+               val |= PIPECONF_INTERLACED_ILK;
+       else
+               val |= PIPECONF_PROGRESSIVE;
+
+       I915_WRITE(PIPECONF(cpu_transcoder), val);
+       POSTING_READ(PIPECONF(cpu_transcoder));
+}
+
+static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 val = 0;
+
+       switch (crtc_state->pipe_bpp) {
+       case 18:
+               val |= PIPEMISC_DITHER_6_BPC;
+               break;
+       case 24:
+               val |= PIPEMISC_DITHER_8_BPC;
+               break;
+       case 30:
+               val |= PIPEMISC_DITHER_10_BPC;
+               break;
+       case 36:
+               val |= PIPEMISC_DITHER_12_BPC;
+               break;
+       default:
+               MISSING_CASE(crtc_state->pipe_bpp);
+               break;
+       }
+
+       if (crtc_state->dither)
+               val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+
+       if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+           crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+               val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
+
+       if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+               val |= PIPEMISC_YUV420_ENABLE |
+                       PIPEMISC_YUV420_MODE_FULL_BLEND;
+
+       if (INTEL_GEN(dev_priv) >= 11 &&
+           (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
+                                          BIT(PLANE_CURSOR))) == 0)
+               val |= PIPEMISC_HDR_MODE_PRECISION;
+
+       I915_WRITE(PIPEMISC(crtc->pipe), val);
+}
+
+int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 tmp;
+
+       tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+       switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
+       case PIPEMISC_DITHER_6_BPC:
+               return 18;
+       case PIPEMISC_DITHER_8_BPC:
+               return 24;
+       case PIPEMISC_DITHER_10_BPC:
+               return 30;
+       case PIPEMISC_DITHER_12_BPC:
+               return 36;
+       default:
+               MISSING_CASE(tmp);
+               return 0;
+       }
+}
+
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+       /*
+        * Account for spread spectrum to avoid
+        * oversubscribing the link. Max center spread
+        * is 2.5%; use 5% for safety's sake.
+        */
+       u32 bps = target_clock * bpp * 21 / 20;
+       return DIV_ROUND_UP(bps, link_bw * 8);
+}
+
+static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
+{
+       return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
+}
+
+static void ironlake_compute_dpll(struct intel_crtc *crtc,
+                                 struct intel_crtc_state *crtc_state,
+                                 struct dpll *reduced_clock)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 dpll, fp, fp2;
+       int factor;
+
+       /* Enable autotuning of the PLL clock (if permissible) */
+       factor = 21;
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+               if ((intel_panel_use_ssc(dev_priv) &&
+                    dev_priv->vbt.lvds_ssc_freq == 100000) ||
+                   (HAS_PCH_IBX(dev_priv) &&
+                    intel_is_dual_link_lvds(dev_priv)))
+                       factor = 25;
+       } else if (crtc_state->sdvo_tv_clock) {
+               factor = 20;
+       }
+
+       fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+
+       if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
+               fp |= FP_CB_TUNE;
+
+       if (reduced_clock) {
+               fp2 = i9xx_dpll_compute_fp(reduced_clock);
+
+               if (reduced_clock->m < factor * reduced_clock->n)
+                       fp2 |= FP_CB_TUNE;
+       } else {
+               fp2 = fp;
+       }
+
+       dpll = 0;
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+               dpll |= DPLLB_MODE_LVDS;
+       else
+               dpll |= DPLLB_MODE_DAC_SERIAL;
+
+       dpll |= (crtc_state->pixel_multiplier - 1)
+               << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+               dpll |= DPLL_SDVO_HIGH_SPEED;
+
+       if (intel_crtc_has_dp_encoder(crtc_state))
+               dpll |= DPLL_SDVO_HIGH_SPEED;
+
+       /*
+        * The high speed IO clock is only really required for
+        * SDVO/HDMI/DP, but we also enable it for CRT to make it
+        * possible to share the DPLL between CRT and HDMI. Enabling
+        * the clock needlessly does no real harm, except use up a
+        * bit of power potentially.
+        *
+        * We'll limit this to IVB with 3 pipes, since it has only two
+        * DPLLs and so DPLL sharing is the only way to get three pipes
+        * driving PCH ports at the same time. On SNB we could do this,
+        * and potentially avoid enabling the second DPLL, but it's not
+        * clear if it''s a win or loss power wise. No point in doing
+        * this on ILK at all since it has a fixed DPLL<->pipe mapping.
+        */
+       if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
+           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+               dpll |= DPLL_SDVO_HIGH_SPEED;
+
+       /* compute bitmask from p1 value */
+       dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+       /* also FPA1 */
+       dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+
+       switch (crtc_state->dpll.p2) {
+       case 5:
+               dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+               break;
+       case 7:
+               dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+               break;
+       case 10:
+               dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+               break;
+       case 14:
+               dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+               break;
+       }
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+           intel_panel_use_ssc(dev_priv))
+               dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+       else
+               dpll |= PLL_REF_INPUT_DREFCLK;
+
+       dpll |= DPLL_VCO_ENABLE;
+
+       crtc_state->dpll_hw_state.dpll = dpll;
+       crtc_state->dpll_hw_state.fp0 = fp;
+       crtc_state->dpll_hw_state.fp1 = fp2;
+}
+
+static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
+                                      struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct intel_limit *limit;
+       int refclk = 120000;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+       if (!crtc_state->has_pch_encoder)
+               return 0;
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+               if (intel_panel_use_ssc(dev_priv)) {
+                       DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
+                                     dev_priv->vbt.lvds_ssc_freq);
+                       refclk = dev_priv->vbt.lvds_ssc_freq;
+               }
+
+               if (intel_is_dual_link_lvds(dev_priv)) {
+                       if (refclk == 100000)
+                               limit = &intel_limits_ironlake_dual_lvds_100m;
+                       else
+                               limit = &intel_limits_ironlake_dual_lvds;
+               } else {
+                       if (refclk == 100000)
+                               limit = &intel_limits_ironlake_single_lvds_100m;
+                       else
+                               limit = &intel_limits_ironlake_single_lvds;
+               }
+       } else {
+               limit = &intel_limits_ironlake_dac;
+       }
+
+       if (!crtc_state->clock_set &&
+           !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+                               refclk, NULL, &crtc_state->dpll)) {
+               DRM_ERROR("Couldn't find PLL settings for mode!\n");
+               return -EINVAL;
+       }
+
+       ironlake_compute_dpll(crtc, crtc_state, NULL);
+
+       if (!intel_get_shared_dpll(crtc_state, NULL)) {
+               DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
+                             pipe_name(crtc->pipe));
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
+                                        struct intel_link_m_n *m_n)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       enum pipe pipe = crtc->pipe;
+
+       m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
+       m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
+       m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
+               & ~TU_SIZE_MASK;
+       m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
+       m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
+                   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+}
+
+static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
+                                        enum transcoder transcoder,
+                                        struct intel_link_m_n *m_n,
+                                        struct intel_link_m_n *m2_n2)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       if (INTEL_GEN(dev_priv) >= 5) {
+               m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
+               m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
+               m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+                       & ~TU_SIZE_MASK;
+               m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
+               m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+                           & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+
+               if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
+                       m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
+                       m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
+                       m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
+                                       & ~TU_SIZE_MASK;
+                       m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
+                       m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
+                                       & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+               }
+       } else {
+               m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
+               m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
+               m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
+                       & ~TU_SIZE_MASK;
+               m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
+               m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
+                           & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+       }
+}
+
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+                     struct intel_crtc_state *pipe_config)
+{
+       if (pipe_config->has_pch_encoder)
+               intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
+       else
+               intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+                                            &pipe_config->dp_m_n,
+                                            &pipe_config->dp_m2_n2);
+}
+
+static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
+                                       struct intel_crtc_state *pipe_config)
+{
+       intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+                                    &pipe_config->fdi_m_n, NULL);
+}
+
+static void skylake_get_pfit_config(struct intel_crtc *crtc,
+                                   struct intel_crtc_state *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
+       u32 ps_ctrl = 0;
+       int id = -1;
+       int i;
+
+       /* find scaler attached to this pipe */
+       for (i = 0; i < crtc->num_scalers; i++) {
+               ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
+               if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
+                       id = i;
+                       pipe_config->pch_pfit.enabled = true;
+                       pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
+                       pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
+                       scaler_state->scalers[i].in_use = true;
+                       break;
+               }
+       }
+
+       scaler_state->scaler_id = id;
+       if (id >= 0) {
+               scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
+       } else {
+               scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
+       }
+}
+
+static void
+skylake_get_initial_plane_config(struct intel_crtc *crtc,
+                                struct intel_initial_plane_config *plane_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+       enum plane_id plane_id = plane->id;
+       enum pipe pipe;
+       u32 val, base, offset, stride_mult, tiling, alpha;
+       int fourcc, pixel_format;
+       unsigned int aligned_height;
+       struct drm_framebuffer *fb;
+       struct intel_framebuffer *intel_fb;
+
+       if (!plane->get_hw_state(plane, &pipe))
+               return;
+
+       WARN_ON(pipe != crtc->pipe);
+
+       intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+       if (!intel_fb) {
+               DRM_DEBUG_KMS("failed to alloc fb\n");
+               return;
+       }
+
+       fb = &intel_fb->base;
+
+       fb->dev = dev;
+
+       val = I915_READ(PLANE_CTL(pipe, plane_id));
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
+       else
+               pixel_format = val & PLANE_CTL_FORMAT_MASK;
+
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+               alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
+               alpha &= PLANE_COLOR_ALPHA_MASK;
+       } else {
+               alpha = val & PLANE_CTL_ALPHA_MASK;
+       }
+
+       fourcc = skl_format_to_fourcc(pixel_format,
+                                     val & PLANE_CTL_ORDER_RGBX, alpha);
+       fb->format = drm_format_info(fourcc);
+
+       tiling = val & PLANE_CTL_TILED_MASK;
+       switch (tiling) {
+       case PLANE_CTL_TILED_LINEAR:
+               fb->modifier = DRM_FORMAT_MOD_LINEAR;
+               break;
+       case PLANE_CTL_TILED_X:
+               plane_config->tiling = I915_TILING_X;
+               fb->modifier = I915_FORMAT_MOD_X_TILED;
+               break;
+       case PLANE_CTL_TILED_Y:
+               plane_config->tiling = I915_TILING_Y;
+               if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
+                       fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
+               else
+                       fb->modifier = I915_FORMAT_MOD_Y_TILED;
+               break;
+       case PLANE_CTL_TILED_YF:
+               if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
+                       fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
+               else
+                       fb->modifier = I915_FORMAT_MOD_Yf_TILED;
+               break;
+       default:
+               MISSING_CASE(tiling);
+               goto error;
+       }
+
+       /*
+        * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
+        * while i915 HW rotation is clockwise, thats why this swapping.
+        */
+       switch (val & PLANE_CTL_ROTATE_MASK) {
+       case PLANE_CTL_ROTATE_0:
+               plane_config->rotation = DRM_MODE_ROTATE_0;
+               break;
+       case PLANE_CTL_ROTATE_90:
+               plane_config->rotation = DRM_MODE_ROTATE_270;
+               break;
+       case PLANE_CTL_ROTATE_180:
+               plane_config->rotation = DRM_MODE_ROTATE_180;
+               break;
+       case PLANE_CTL_ROTATE_270:
+               plane_config->rotation = DRM_MODE_ROTATE_90;
+               break;
+       }
+
+       if (INTEL_GEN(dev_priv) >= 10 &&
+           val & PLANE_CTL_FLIP_HORIZONTAL)
+               plane_config->rotation |= DRM_MODE_REFLECT_X;
+
+       base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
+       plane_config->base = base;
+
+       offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
+
+       val = I915_READ(PLANE_SIZE(pipe, plane_id));
+       fb->height = ((val >> 16) & 0xfff) + 1;
+       fb->width = ((val >> 0) & 0x1fff) + 1;
+
+       val = I915_READ(PLANE_STRIDE(pipe, plane_id));
+       stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
+       fb->pitches[0] = (val & 0x3ff) * stride_mult;
+
+       aligned_height = intel_fb_align_height(fb, 0, fb->height);
+
+       plane_config->size = fb->pitches[0] * aligned_height;
+
+       DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+                     crtc->base.name, plane->base.name, fb->width, fb->height,
+                     fb->format->cpp[0] * 8, base, fb->pitches[0],
+                     plane_config->size);
+
+       plane_config->fb = intel_fb;
+       return;
+
+error:
+       kfree(intel_fb);
+}
+
+static void ironlake_get_pfit_config(struct intel_crtc *crtc,
+                                    struct intel_crtc_state *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       u32 tmp;
+
+       tmp = I915_READ(PF_CTL(crtc->pipe));
+
+       if (tmp & PF_ENABLE) {
+               pipe_config->pch_pfit.enabled = true;
+               pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
+               pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
+
+               /* We currently do not free assignements of panel fitters on
+                * ivb/hsw (since we don't use the higher upscaling modes which
+                * differentiates them) so just WARN about this case for now. */
+               if (IS_GEN(dev_priv, 7)) {
+                       WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
+                               PF_PIPE_SEL_IVB(crtc->pipe));
+               }
+       }
+}
+
+static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
+                                    struct intel_crtc_state *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       enum intel_display_power_domain power_domain;
+       intel_wakeref_t wakeref;
+       u32 tmp;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
+               return false;
+
+       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+       pipe_config->shared_dpll = NULL;
+
+       ret = false;
+       tmp = I915_READ(PIPECONF(crtc->pipe));
+       if (!(tmp & PIPECONF_ENABLE))
+               goto out;
+
+       switch (tmp & PIPECONF_BPC_MASK) {
+       case PIPECONF_6BPC:
+               pipe_config->pipe_bpp = 18;
+               break;
+       case PIPECONF_8BPC:
+               pipe_config->pipe_bpp = 24;
+               break;
+       case PIPECONF_10BPC:
+               pipe_config->pipe_bpp = 30;
+               break;
+       case PIPECONF_12BPC:
+               pipe_config->pipe_bpp = 36;
+               break;
+       default:
+               break;
+       }
+
+       if (tmp & PIPECONF_COLOR_RANGE_SELECT)
+               pipe_config->limited_color_range = true;
+
+       pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
+               PIPECONF_GAMMA_MODE_SHIFT;
+
+       pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+
+       i9xx_get_pipe_color_config(pipe_config);
+       intel_color_get_config(pipe_config);
+
+       if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
+               struct intel_shared_dpll *pll;
+               enum intel_dpll_id pll_id;
+
+               pipe_config->has_pch_encoder = true;
+
+               tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
+               pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+                                         FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+               ironlake_get_fdi_m_n_config(crtc, pipe_config);
+
+               if (HAS_PCH_IBX(dev_priv)) {
+                       /*
+                        * The pipe->pch transcoder and pch transcoder->pll
+                        * mapping is fixed.
+                        */
+                       pll_id = (enum intel_dpll_id) crtc->pipe;
+               } else {
+                       tmp = I915_READ(PCH_DPLL_SEL);
+                       if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
+                               pll_id = DPLL_ID_PCH_PLL_B;
+                       else
+                               pll_id= DPLL_ID_PCH_PLL_A;
+               }
+
+               pipe_config->shared_dpll =
+                       intel_get_shared_dpll_by_id(dev_priv, pll_id);
+               pll = pipe_config->shared_dpll;
+
+               WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
+                                               &pipe_config->dpll_hw_state));
+
+               tmp = pipe_config->dpll_hw_state.dpll;
+               pipe_config->pixel_multiplier =
+                       ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
+                        >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
+
+               ironlake_pch_clock_get(crtc, pipe_config);
+       } else {
+               pipe_config->pixel_multiplier = 1;
+       }
+
+       intel_get_pipe_timings(crtc, pipe_config);
+       intel_get_pipe_src_size(crtc, pipe_config);
+
+       ironlake_get_pfit_config(crtc, pipe_config);
+
+       ret = true;
+
+out:
+       intel_display_power_put(dev_priv, power_domain, wakeref);
+
+       return ret;
+}
+static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
+                                     struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_atomic_state *state =
+               to_intel_atomic_state(crtc_state->base.state);
+
+       if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+           INTEL_GEN(dev_priv) >= 11) {
+               struct intel_encoder *encoder =
+                       intel_get_crtc_new_encoder(state, crtc_state);
+
+               if (!intel_get_shared_dpll(crtc_state, encoder)) {
+                       DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
+                                     pipe_name(crtc->pipe));
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
+                                  enum port port,
+                                  struct intel_crtc_state *pipe_config)
+{
+       enum intel_dpll_id id;
+       u32 temp;
+
+       temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+       id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
+
+       if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
+               return;
+
+       pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+}
+
+static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
+                               enum port port,
+                               struct intel_crtc_state *pipe_config)
+{
+       enum intel_dpll_id id;
+       u32 temp;
+
+       /* TODO: TBT pll not implemented. */
+       if (intel_port_is_combophy(dev_priv, port)) {
+               temp = I915_READ(DPCLKA_CFGCR0_ICL) &
+                      DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+               id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
+       } else if (intel_port_is_tc(dev_priv, port)) {
+               id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
+       } else {
+               WARN(1, "Invalid port %x\n", port);
+               return;
+       }
+
+       pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+}
+
+static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
+                               enum port port,
+                               struct intel_crtc_state *pipe_config)
+{
+       enum intel_dpll_id id;
+
+       switch (port) {
+       case PORT_A:
+               id = DPLL_ID_SKL_DPLL0;
+               break;
+       case PORT_B:
+               id = DPLL_ID_SKL_DPLL1;
+               break;
+       case PORT_C:
+               id = DPLL_ID_SKL_DPLL2;
+               break;
+       default:
+               DRM_ERROR("Incorrect port type\n");
+               return;
+       }
+
+       pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+}
+
+static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
+                               enum port port,
+                               struct intel_crtc_state *pipe_config)
+{
+       enum intel_dpll_id id;
+       u32 temp;
+
+       temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
+       id = temp >> (port * 3 + 1);
+
+       if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
+               return;
+
+       pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+}
+
+static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
+                               enum port port,
+                               struct intel_crtc_state *pipe_config)
+{
+       enum intel_dpll_id id;
+       u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+
+       switch (ddi_pll_sel) {
+       case PORT_CLK_SEL_WRPLL1:
+               id = DPLL_ID_WRPLL1;
+               break;
+       case PORT_CLK_SEL_WRPLL2:
+               id = DPLL_ID_WRPLL2;
+               break;
+       case PORT_CLK_SEL_SPLL:
+               id = DPLL_ID_SPLL;
+               break;
+       case PORT_CLK_SEL_LCPLL_810:
+               id = DPLL_ID_LCPLL_810;
+               break;
+       case PORT_CLK_SEL_LCPLL_1350:
+               id = DPLL_ID_LCPLL_1350;
+               break;
+       case PORT_CLK_SEL_LCPLL_2700:
+               id = DPLL_ID_LCPLL_2700;
+               break;
+       default:
+               MISSING_CASE(ddi_pll_sel);
+               /* fall through */
+       case PORT_CLK_SEL_NONE:
+               return;
+       }
+
+       pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+}
+
+static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
+                                    struct intel_crtc_state *pipe_config,
+                                    u64 *power_domain_mask,
+                                    intel_wakeref_t *wakerefs)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       enum intel_display_power_domain power_domain;
+       unsigned long panel_transcoder_mask = 0;
+       unsigned long enabled_panel_transcoders = 0;
+       enum transcoder panel_transcoder;
+       intel_wakeref_t wf;
+       u32 tmp;
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               panel_transcoder_mask |=
+                       BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+
+       if (HAS_TRANSCODER_EDP(dev_priv))
+               panel_transcoder_mask |= BIT(TRANSCODER_EDP);
+
+       /*
+        * The pipe->transcoder mapping is fixed with the exception of the eDP
+        * and DSI transcoders handled below.
+        */
+       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+
+       /*
+        * XXX: Do intel_display_power_get_if_enabled before reading this (for
+        * consistency and less surprising code; it's in always on power).
+        */
+       for_each_set_bit(panel_transcoder,
+                        &panel_transcoder_mask,
+                        ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
+               bool force_thru = false;
+               enum pipe trans_pipe;
+
+               tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
+               if (!(tmp & TRANS_DDI_FUNC_ENABLE))
+                       continue;
+
+               /*
+                * Log all enabled ones, only use the first one.
+                *
+                * FIXME: This won't work for two separate DSI displays.
+                */
+               enabled_panel_transcoders |= BIT(panel_transcoder);
+               if (enabled_panel_transcoders != BIT(panel_transcoder))
+                       continue;
+
+               switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+               default:
+                       WARN(1, "unknown pipe linked to transcoder %s\n",
+                            transcoder_name(panel_transcoder));
+                       /* fall through */
+               case TRANS_DDI_EDP_INPUT_A_ONOFF:
+                       force_thru = true;
+                       /* fall through */
+               case TRANS_DDI_EDP_INPUT_A_ON:
+                       trans_pipe = PIPE_A;
+                       break;
+               case TRANS_DDI_EDP_INPUT_B_ONOFF:
+                       trans_pipe = PIPE_B;
+                       break;
+               case TRANS_DDI_EDP_INPUT_C_ONOFF:
+                       trans_pipe = PIPE_C;
+                       break;
+               }
+
+               if (trans_pipe == crtc->pipe) {
+                       pipe_config->cpu_transcoder = panel_transcoder;
+                       pipe_config->pch_pfit.force_thru = force_thru;
+               }
+       }
+
+       /*
+        * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
+        */
+       WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
+               enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+
+       power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
+       WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+
+       wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wf)
+               return false;
+
+       wakerefs[power_domain] = wf;
+       *power_domain_mask |= BIT_ULL(power_domain);
+
+       tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+
+       return tmp & PIPECONF_ENABLE;
+}
+
+static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
+                                        struct intel_crtc_state *pipe_config,
+                                        u64 *power_domain_mask,
+                                        intel_wakeref_t *wakerefs)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       enum intel_display_power_domain power_domain;
+       enum transcoder cpu_transcoder;
+       intel_wakeref_t wf;
+       enum port port;
+       u32 tmp;
+
+       for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
+               if (port == PORT_A)
+                       cpu_transcoder = TRANSCODER_DSI_A;
+               else
+                       cpu_transcoder = TRANSCODER_DSI_C;
+
+               power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+               WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+
+               wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+               if (!wf)
+                       continue;
+
+               wakerefs[power_domain] = wf;
+               *power_domain_mask |= BIT_ULL(power_domain);
+
+               /*
+                * The PLL needs to be enabled with a valid divider
+                * configuration, otherwise accessing DSI registers will hang
+                * the machine. See BSpec North Display Engine
+                * registers/MIPI[BXT]. We can break out here early, since we
+                * need the same DSI PLL to be enabled for both DSI ports.
+                */
+               if (!bxt_dsi_pll_is_enabled(dev_priv))
+                       break;
+
+               /* XXX: this works for video mode only */
+               tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
+               if (!(tmp & DPI_ENABLE))
+                       continue;
+
+               tmp = I915_READ(MIPI_CTRL(port));
+               if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
+                       continue;
+
+               pipe_config->cpu_transcoder = cpu_transcoder;
+               break;
+       }
+
+       return transcoder_is_dsi(pipe_config->cpu_transcoder);
+}
+
+static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
+                                      struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_shared_dpll *pll;
+       enum port port;
+       u32 tmp;
+
+       tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+
+       port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               icelake_get_ddi_pll(dev_priv, port, pipe_config);
+       else if (IS_CANNONLAKE(dev_priv))
+               cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
+       else if (IS_GEN9_BC(dev_priv))
+               skylake_get_ddi_pll(dev_priv, port, pipe_config);
+       else if (IS_GEN9_LP(dev_priv))
+               bxt_get_ddi_pll(dev_priv, port, pipe_config);
+       else
+               haswell_get_ddi_pll(dev_priv, port, pipe_config);
+
+       pll = pipe_config->shared_dpll;
+       if (pll) {
+               WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
+                                               &pipe_config->dpll_hw_state));
+       }
+
+       /*
+        * Haswell has only FDI/PCH transcoder A. It is which is connected to
+        * DDI E. So just check whether this pipe is wired to DDI E and whether
+        * the PCH transcoder is on.
+        */
+       if (INTEL_GEN(dev_priv) < 9 &&
+           (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+               pipe_config->has_pch_encoder = true;
+
+               tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+               pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+                                         FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+               ironlake_get_fdi_m_n_config(crtc, pipe_config);
+       }
+}
+
+static bool haswell_get_pipe_config(struct intel_crtc *crtc,
+                                   struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
+       enum intel_display_power_domain power_domain;
+       u64 power_domain_mask;
+       bool active;
+
+       intel_crtc_init_scalers(crtc, pipe_config);
+
+       power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+       wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wf)
+               return false;
+
+       wakerefs[power_domain] = wf;
+       power_domain_mask = BIT_ULL(power_domain);
+
+       pipe_config->shared_dpll = NULL;
+
+       active = hsw_get_transcoder_state(crtc, pipe_config,
+                                         &power_domain_mask, wakerefs);
+
+       if (IS_GEN9_LP(dev_priv) &&
+           bxt_get_dsi_transcoder_state(crtc, pipe_config,
+                                        &power_domain_mask, wakerefs)) {
+               WARN_ON(active);
+               active = true;
+       }
+
+       if (!active)
+               goto out;
+
+       if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
+           INTEL_GEN(dev_priv) >= 11) {
+               haswell_get_ddi_port_state(crtc, pipe_config);
+               intel_get_pipe_timings(crtc, pipe_config);
+       }
+
+       intel_get_pipe_src_size(crtc, pipe_config);
+       intel_get_crtc_ycbcr_config(crtc, pipe_config);
+
+       pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
+
+       pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+
+       if (INTEL_GEN(dev_priv) >= 9) {
+               u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
+
+               if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
+                       pipe_config->gamma_enable = true;
+
+               if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
+                       pipe_config->csc_enable = true;
+       } else {
+               i9xx_get_pipe_color_config(pipe_config);
+       }
+
+       intel_color_get_config(pipe_config);
+
+       power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
+       WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+
+       wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (wf) {
+               wakerefs[power_domain] = wf;
+               power_domain_mask |= BIT_ULL(power_domain);
+
+               if (INTEL_GEN(dev_priv) >= 9)
+                       skylake_get_pfit_config(crtc, pipe_config);
+               else
+                       ironlake_get_pfit_config(crtc, pipe_config);
+       }
+
+       if (hsw_crtc_supports_ips(crtc)) {
+               if (IS_HASWELL(dev_priv))
+                       pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
+               else {
+                       /*
+                        * We cannot readout IPS state on broadwell, set to
+                        * true so we can set it to a defined state on first
+                        * commit.
+                        */
+                       pipe_config->ips_enabled = true;
+               }
+       }
+
+       if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
+           !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+               pipe_config->pixel_multiplier =
+                       I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
+       } else {
+               pipe_config->pixel_multiplier = 1;
+       }
+
+out:
+       for_each_power_domain(power_domain, power_domain_mask)
+               intel_display_power_put(dev_priv,
+                                       power_domain, wakerefs[power_domain]);
+
+       return active;
+}
+
+static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       u32 base;
+
+       if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
+               base = obj->phys_handle->busaddr;
+       else
+               base = intel_plane_ggtt_offset(plane_state);
+
+       base += plane_state->color_plane[0].offset;
+
+       /* ILK+ do this automagically */
+       if (HAS_GMCH(dev_priv) &&
+           plane_state->base.rotation & DRM_MODE_ROTATE_180)
+               base += (plane_state->base.crtc_h *
+                        plane_state->base.crtc_w - 1) * fb->format->cpp[0];
+
+       return base;
+}
+
+static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
+{
+       int x = plane_state->base.crtc_x;
+       int y = plane_state->base.crtc_y;
+       u32 pos = 0;
+
+       if (x < 0) {
+               pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+               x = -x;
+       }
+       pos |= x << CURSOR_X_SHIFT;
+
+       if (y < 0) {
+               pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+               y = -y;
+       }
+       pos |= y << CURSOR_Y_SHIFT;
+
+       return pos;
+}
+
+static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+       const struct drm_mode_config *config =
+               &plane_state->base.plane->dev->mode_config;
+       int width = plane_state->base.crtc_w;
+       int height = plane_state->base.crtc_h;
+
+       return width > 0 && width <= config->cursor_width &&
+               height > 0 && height <= config->cursor_height;
+}
+
+static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
+{
+       int src_x, src_y;
+       u32 offset;
+       int ret;
+
+       ret = intel_plane_compute_gtt(plane_state);
+       if (ret)
+               return ret;
+
+       if (!plane_state->base.visible)
+               return 0;
+
+       src_x = plane_state->base.src_x >> 16;
+       src_y = plane_state->base.src_y >> 16;
+
+       intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+       offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+                                                   plane_state, 0);
+
+       if (src_x != 0 || src_y != 0) {
+               DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
+               return -EINVAL;
+       }
+
+       plane_state->color_plane[0].offset = offset;
+
+       return 0;
+}
+
+static int intel_check_cursor(struct intel_crtc_state *crtc_state,
+                             struct intel_plane_state *plane_state)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       int ret;
+
+       if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+               DRM_DEBUG_KMS("cursor cannot be tiled\n");
+               return -EINVAL;
+       }
+
+       ret = drm_atomic_helper_check_plane_state(&plane_state->base,
+                                                 &crtc_state->base,
+                                                 DRM_PLANE_HELPER_NO_SCALING,
+                                                 DRM_PLANE_HELPER_NO_SCALING,
+                                                 true, true);
+       if (ret)
+               return ret;
+
+       ret = intel_cursor_check_surface(plane_state);
+       if (ret)
+               return ret;
+
+       if (!plane_state->base.visible)
+               return 0;
+
+       ret = intel_plane_check_src_coordinates(plane_state);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static unsigned int
+i845_cursor_max_stride(struct intel_plane *plane,
+                      u32 pixel_format, u64 modifier,
+                      unsigned int rotation)
+{
+       return 2048;
+}
+
+static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       u32 cntl = 0;
+
+       if (crtc_state->gamma_enable)
+               cntl |= CURSOR_GAMMA_ENABLE;
+
+       return cntl;
+}
+
+static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
+                          const struct intel_plane_state *plane_state)
+{
+       return CURSOR_ENABLE |
+               CURSOR_FORMAT_ARGB |
+               CURSOR_STRIDE(plane_state->color_plane[0].stride);
+}
+
+static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+       int width = plane_state->base.crtc_w;
+
+       /*
+        * 845g/865g are only limited by the width of their cursors,
+        * the height is arbitrary up to the precision of the register.
+        */
+       return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
+}
+
+static int i845_check_cursor(struct intel_crtc_state *crtc_state,
+                            struct intel_plane_state *plane_state)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       int ret;
+
+       ret = intel_check_cursor(crtc_state, plane_state);
+       if (ret)
+               return ret;
+
+       /* if we want to turn off the cursor ignore width and height */
+       if (!fb)
+               return 0;
+
+       /* Check for which cursor types we support */
+       if (!i845_cursor_size_ok(plane_state)) {
+               DRM_DEBUG("Cursor dimension %dx%d not supported\n",
+                         plane_state->base.crtc_w,
+                         plane_state->base.crtc_h);
+               return -EINVAL;
+       }
+
+       WARN_ON(plane_state->base.visible &&
+               plane_state->color_plane[0].stride != fb->pitches[0]);
+
+       switch (fb->pitches[0]) {
+       case 256:
+       case 512:
+       case 1024:
+       case 2048:
+               break;
+       default:
+               DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
+                             fb->pitches[0]);
+               return -EINVAL;
+       }
+
+       plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+
+       return 0;
+}
+
+static void i845_update_cursor(struct intel_plane *plane,
+                              const struct intel_crtc_state *crtc_state,
+                              const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       u32 cntl = 0, base = 0, pos = 0, size = 0;
+       unsigned long irqflags;
+
+       if (plane_state && plane_state->base.visible) {
+               unsigned int width = plane_state->base.crtc_w;
+               unsigned int height = plane_state->base.crtc_h;
+
+               cntl = plane_state->ctl |
+                       i845_cursor_ctl_crtc(crtc_state);
+
+               size = (height << 12) | width;
+
+               base = intel_cursor_base(plane_state);
+               pos = intel_cursor_position(plane_state);
+       }
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       /* On these chipsets we can only modify the base/size/stride
+        * whilst the cursor is disabled.
+        */
+       if (plane->cursor.base != base ||
+           plane->cursor.size != size ||
+           plane->cursor.cntl != cntl) {
+               I915_WRITE_FW(CURCNTR(PIPE_A), 0);
+               I915_WRITE_FW(CURBASE(PIPE_A), base);
+               I915_WRITE_FW(CURSIZE, size);
+               I915_WRITE_FW(CURPOS(PIPE_A), pos);
+               I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
+
+               plane->cursor.base = base;
+               plane->cursor.size = size;
+               plane->cursor.cntl = cntl;
+       } else {
+               I915_WRITE_FW(CURPOS(PIPE_A), pos);
+       }
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i845_disable_cursor(struct intel_plane *plane,
+                               const struct intel_crtc_state *crtc_state)
+{
+       i845_update_cursor(plane, crtc_state, NULL);
+}
+
+static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+                                    enum pipe *pipe)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       intel_wakeref_t wakeref;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(PIPE_A);
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
+               return false;
+
+       ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+
+       *pipe = PIPE_A;
+
+       intel_display_power_put(dev_priv, power_domain, wakeref);
+
+       return ret;
+}
+
+static unsigned int
+i9xx_cursor_max_stride(struct intel_plane *plane,
+                      u32 pixel_format, u64 modifier,
+                      unsigned int rotation)
+{
+       return plane->base.dev->mode_config.cursor_width * 4;
+}
+
+static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 cntl = 0;
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               return cntl;
+
+       if (crtc_state->gamma_enable)
+               cntl = MCURSOR_GAMMA_ENABLE;
+
+       if (crtc_state->csc_enable)
+               cntl |= MCURSOR_PIPE_CSC_ENABLE;
+
+       if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+               cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
+
+       return cntl;
+}
+
+static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
+                          const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       u32 cntl = 0;
+
+       if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+               cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
+
+       switch (plane_state->base.crtc_w) {
+       case 64:
+               cntl |= MCURSOR_MODE_64_ARGB_AX;
+               break;
+       case 128:
+               cntl |= MCURSOR_MODE_128_ARGB_AX;
+               break;
+       case 256:
+               cntl |= MCURSOR_MODE_256_ARGB_AX;
+               break;
+       default:
+               MISSING_CASE(plane_state->base.crtc_w);
+               return 0;
+       }
+
+       if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
+               cntl |= MCURSOR_ROTATE_180;
+
+       return cntl;
+}
+
+static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       int width = plane_state->base.crtc_w;
+       int height = plane_state->base.crtc_h;
+
+       if (!intel_cursor_size_ok(plane_state))
+               return false;
+
+       /* Cursor width is limited to a few power-of-two sizes */
+       switch (width) {
+       case 256:
+       case 128:
+       case 64:
+               break;
+       default:
+               return false;
+       }
+
+       /*
+        * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
+        * height from 8 lines up to the cursor width, when the
+        * cursor is not rotated. Everything else requires square
+        * cursors.
+        */
+       if (HAS_CUR_FBC(dev_priv) &&
+           plane_state->base.rotation & DRM_MODE_ROTATE_0) {
+               if (height < 8 || height > width)
+                       return false;
+       } else {
+               if (height != width)
+                       return false;
+       }
+
+       return true;
+}
+
+static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
+                            struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       enum pipe pipe = plane->pipe;
+       int ret;
+
+       ret = intel_check_cursor(crtc_state, plane_state);
+       if (ret)
+               return ret;
+
+       /* if we want to turn off the cursor ignore width and height */
+       if (!fb)
+               return 0;
+
+       /* Check for which cursor types we support */
+       if (!i9xx_cursor_size_ok(plane_state)) {
+               DRM_DEBUG("Cursor dimension %dx%d not supported\n",
+                         plane_state->base.crtc_w,
+                         plane_state->base.crtc_h);
+               return -EINVAL;
+       }
+
+       WARN_ON(plane_state->base.visible &&
+               plane_state->color_plane[0].stride != fb->pitches[0]);
+
+       if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
+               DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
+                             fb->pitches[0], plane_state->base.crtc_w);
+               return -EINVAL;
+       }
+
+       /*
+        * There's something wrong with the cursor on CHV pipe C.
+        * If it straddles the left edge of the screen then
+        * moving it away from the edge or disabling it often
+        * results in a pipe underrun, and often that can lead to
+        * dead pipe (constant underrun reported, and it scans
+        * out just a solid color). To recover from that, the
+        * display power well must be turned off and on again.
+        * Refuse the put the cursor into that compromised position.
+        */
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
+           plane_state->base.visible && plane_state->base.crtc_x < 0) {
+               DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
+               return -EINVAL;
+       }
+
+       plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
+
+       return 0;
+}
+
+static void i9xx_update_cursor(struct intel_plane *plane,
+                              const struct intel_crtc_state *crtc_state,
+                              const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
+       u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
+       unsigned long irqflags;
+
+       if (plane_state && plane_state->base.visible) {
+               cntl = plane_state->ctl |
+                       i9xx_cursor_ctl_crtc(crtc_state);
+
+               if (plane_state->base.crtc_h != plane_state->base.crtc_w)
+                       fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
+
+               base = intel_cursor_base(plane_state);
+               pos = intel_cursor_position(plane_state);
+       }
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       /*
+        * On some platforms writing CURCNTR first will also
+        * cause CURPOS to be armed by the CURBASE write.
+        * Without the CURCNTR write the CURPOS write would
+        * arm itself. Thus we always update CURCNTR before
+        * CURPOS.
+        *
+        * On other platforms CURPOS always requires the
+        * CURBASE write to arm the update. Additonally
+        * a write to any of the cursor register will cancel
+        * an already armed cursor update. Thus leaving out
+        * the CURBASE write after CURPOS could lead to a
+        * cursor that doesn't appear to move, or even change
+        * shape. Thus we always write CURBASE.
+        *
+        * The other registers are armed by by the CURBASE write
+        * except when the plane is getting enabled at which time
+        * the CURCNTR write arms the update.
+        */
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               skl_write_cursor_wm(plane, crtc_state);
+
+       if (plane->cursor.base != base ||
+           plane->cursor.size != fbc_ctl ||
+           plane->cursor.cntl != cntl) {
+               if (HAS_CUR_FBC(dev_priv))
+                       I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
+               I915_WRITE_FW(CURCNTR(pipe), cntl);
+               I915_WRITE_FW(CURPOS(pipe), pos);
+               I915_WRITE_FW(CURBASE(pipe), base);
+
+               plane->cursor.base = base;
+               plane->cursor.size = fbc_ctl;
+               plane->cursor.cntl = cntl;
+       } else {
+               I915_WRITE_FW(CURPOS(pipe), pos);
+               I915_WRITE_FW(CURBASE(pipe), base);
+       }
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i9xx_disable_cursor(struct intel_plane *plane,
+                               const struct intel_crtc_state *crtc_state)
+{
+       i9xx_update_cursor(plane, crtc_state, NULL);
+}
+
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+                                    enum pipe *pipe)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       intel_wakeref_t wakeref;
+       bool ret;
+       u32 val;
+
+       /*
+        * Not 100% correct for planes that can move between pipes,
+        * but that's only the case for gen2-3 which don't have any
+        * display power wells.
+        */
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
+               return false;
+
+       val = I915_READ(CURCNTR(plane->pipe));
+
+       ret = val & MCURSOR_MODE;
+
+       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+               *pipe = plane->pipe;
+       else
+               *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
+                       MCURSOR_PIPE_SELECT_SHIFT;
+
+       intel_display_power_put(dev_priv, power_domain, wakeref);
+
+       return ret;
+}
+
+/* VESA 640x480x72Hz mode to set on the pipe */
+static const struct drm_display_mode load_detect_mode = {
+       DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
+                704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+};
+
+struct drm_framebuffer *
+intel_framebuffer_create(struct drm_i915_gem_object *obj,
+                        struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       struct intel_framebuffer *intel_fb;
+       int ret;
+
+       intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+       if (!intel_fb)
+               return ERR_PTR(-ENOMEM);
+
+       ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
+       if (ret)
+               goto err;
+
+       return &intel_fb->base;
+
+err:
+       kfree(intel_fb);
+       return ERR_PTR(ret);
+}
+
+static int intel_modeset_disable_planes(struct drm_atomic_state *state,
+                                       struct drm_crtc *crtc)
+{
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
+       int ret, i;
+
+       ret = drm_atomic_add_affected_planes(state, crtc);
+       if (ret)
+               return ret;
+
+       for_each_new_plane_in_state(state, plane, plane_state, i) {
+               if (plane_state->crtc != crtc)
+                       continue;
+
+               ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+               if (ret)
+                       return ret;
+
+               drm_atomic_set_fb_for_plane(plane_state, NULL);
+       }
+
+       return 0;
+}
+
+int intel_get_load_detect_pipe(struct drm_connector *connector,
+                              const struct drm_display_mode *mode,
+                              struct intel_load_detect_pipe *old,
+                              struct drm_modeset_acquire_ctx *ctx)
+{
+       struct intel_crtc *intel_crtc;
+       struct intel_encoder *intel_encoder =
+               intel_attached_encoder(connector);
+       struct drm_crtc *possible_crtc;
+       struct drm_encoder *encoder = &intel_encoder->base;
+       struct drm_crtc *crtc = NULL;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_atomic_state *state = NULL, *restore_state = NULL;
+       struct drm_connector_state *connector_state;
+       struct intel_crtc_state *crtc_state;
+       int ret, i = -1;
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+                     connector->base.id, connector->name,
+                     encoder->base.id, encoder->name);
+
+       old->restore_state = NULL;
+
+       WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+
+       /*
+        * Algorithm gets a little messy:
+        *
+        *   - if the connector already has an assigned crtc, use it (but make
+        *     sure it's on first)
+        *
+        *   - try to find the first unused crtc that can drive this connector,
+        *     and use that if we find one
+        */
+
+       /* See if we already have a CRTC for this connector */
+       if (connector->state->crtc) {
+               crtc = connector->state->crtc;
+
+               ret = drm_modeset_lock(&crtc->mutex, ctx);
+               if (ret)
+                       goto fail;
+
+               /* Make sure the crtc and connector are running */
+               goto found;
+       }
+
+       /* Find an unused one (if possible) */
+       for_each_crtc(dev, possible_crtc) {
+               i++;
+               if (!(encoder->possible_crtcs & (1 << i)))
+                       continue;
+
+               ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
+               if (ret)
+                       goto fail;
+
+               if (possible_crtc->state->enable) {
+                       drm_modeset_unlock(&possible_crtc->mutex);
+                       continue;
+               }
+
+               crtc = possible_crtc;
+               break;
+       }
+
+       /*
+        * If we didn't find an unused CRTC, don't use any.
+        */
+       if (!crtc) {
+               DRM_DEBUG_KMS("no pipe available for load-detect\n");
+               ret = -ENODEV;
+               goto fail;
+       }
+
+found:
+       intel_crtc = to_intel_crtc(crtc);
+
+       state = drm_atomic_state_alloc(dev);
+       restore_state = drm_atomic_state_alloc(dev);
+       if (!state || !restore_state) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       state->acquire_ctx = ctx;
+       restore_state->acquire_ctx = ctx;
+
+       connector_state = drm_atomic_get_connector_state(state, connector);
+       if (IS_ERR(connector_state)) {
+               ret = PTR_ERR(connector_state);
+               goto fail;
+       }
+
+       ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
+       if (ret)
+               goto fail;
+
+       crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+       if (IS_ERR(crtc_state)) {
+               ret = PTR_ERR(crtc_state);
+               goto fail;
+       }
+
+       crtc_state->base.active = crtc_state->base.enable = true;
+
+       if (!mode)
+               mode = &load_detect_mode;
+
+       ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
+       if (ret)
+               goto fail;
+
+       ret = intel_modeset_disable_planes(state, crtc);
+       if (ret)
+               goto fail;
+
+       ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
+       if (!ret)
+               ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
+       if (!ret)
+               ret = drm_atomic_add_affected_planes(restore_state, crtc);
+       if (ret) {
+               DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
+               goto fail;
+       }
+
+       ret = drm_atomic_commit(state);
+       if (ret) {
+               DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
+               goto fail;
+       }
+
+       old->restore_state = restore_state;
+       drm_atomic_state_put(state);
+
+       /* let the connector get through one full cycle before testing */
+       intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
+       return true;
+
+fail:
+       if (state) {
+               drm_atomic_state_put(state);
+               state = NULL;
+       }
+       if (restore_state) {
+               drm_atomic_state_put(restore_state);
+               restore_state = NULL;
+       }
+
+       if (ret == -EDEADLK)
+               return ret;
+
+       return false;
+}
+
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+                                   struct intel_load_detect_pipe *old,
+                                   struct drm_modeset_acquire_ctx *ctx)
+{
+       struct intel_encoder *intel_encoder =
+               intel_attached_encoder(connector);
+       struct drm_encoder *encoder = &intel_encoder->base;
+       struct drm_atomic_state *state = old->restore_state;
+       int ret;
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+                     connector->base.id, connector->name,
+                     encoder->base.id, encoder->name);
+
+       if (!state)
+               return;
+
+       ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
+       if (ret)
+               DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
+       drm_atomic_state_put(state);
+}
+
+static int i9xx_pll_refclk(struct drm_device *dev,
+                          const struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+       if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+               return dev_priv->vbt.lvds_ssc_freq;
+       else if (HAS_PCH_SPLIT(dev_priv))
+               return 120000;
+       else if (!IS_GEN(dev_priv, 2))
+               return 96000;
+       else
+               return 48000;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+                               struct intel_crtc_state *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int pipe = pipe_config->cpu_transcoder;
+       u32 dpll = pipe_config->dpll_hw_state.dpll;
+       u32 fp;
+       struct dpll clock;
+       int port_clock;
+       int refclk = i9xx_pll_refclk(dev, pipe_config);
+
+       if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+               fp = pipe_config->dpll_hw_state.fp0;
+       else
+               fp = pipe_config->dpll_hw_state.fp1;
+
+       clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+       if (IS_PINEVIEW(dev_priv)) {
+               clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+               clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
+       } else {
+               clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+               clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+       }
+
+       if (!IS_GEN(dev_priv, 2)) {
+               if (IS_PINEVIEW(dev_priv))
+                       clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+                               DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+               else
+                       clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+                              DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+               switch (dpll & DPLL_MODE_MASK) {
+               case DPLLB_MODE_DAC_SERIAL:
+                       clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+                               5 : 10;
+                       break;
+               case DPLLB_MODE_LVDS:
+                       clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+                               7 : 14;
+                       break;
+               default:
+                       DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
+                                 "mode\n", (int)(dpll & DPLL_MODE_MASK));
+                       return;
+               }
+
+               if (IS_PINEVIEW(dev_priv))
+                       port_clock = pnv_calc_dpll_params(refclk, &clock);
+               else
+                       port_clock = i9xx_calc_dpll_params(refclk, &clock);
+       } else {
+               u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
+               bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
+
+               if (is_lvds) {
+                       clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+                                      DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+                       if (lvds & LVDS_CLKB_POWER_UP)
+                               clock.p2 = 7;
+                       else
+                               clock.p2 = 14;
+               } else {
+                       if (dpll & PLL_P1_DIVIDE_BY_TWO)
+                               clock.p1 = 2;
+                       else {
+                               clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+                                           DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+                       }
+                       if (dpll & PLL_P2_DIVIDE_BY_4)
+                               clock.p2 = 4;
+                       else
+                               clock.p2 = 2;
+               }
+
+               port_clock = i9xx_calc_dpll_params(refclk, &clock);
+       }
+
+       /*
+        * This value includes pixel_multiplier. We will use
+        * port_clock to compute adjusted_mode.crtc_clock in the
+        * encoder's get_config() function.
+        */
+       pipe_config->port_clock = port_clock;
+}
+
+int intel_dotclock_calculate(int link_freq,
+                            const struct intel_link_m_n *m_n)
+{
+       /*
+        * The calculation for the data clock is:
+        * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
+        * But we want to avoid losing precison if possible, so:
+        * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
+        *
+        * and the link clock is simpler:
+        * link_clock = (m * link_clock) / n
+        */
+
+       if (!m_n->link_n)
+               return 0;
+
+       return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
+}
+
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+                                  struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       /* read out port_clock from the DPLL */
+       i9xx_crtc_clock_get(crtc, pipe_config);
+
+       /*
+        * In case there is an active pipe without active ports,
+        * we may need some idea for the dotclock anyway.
+        * Calculate one based on the FDI configuration.
+        */
+       pipe_config->base.adjusted_mode.crtc_clock =
+               intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
+                                        &pipe_config->fdi_m_n);
+}
+
+/* Returns the currently programmed mode of the given encoder. */
+struct drm_display_mode *
+intel_encoder_current_mode(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc_state *crtc_state;
+       struct drm_display_mode *mode;
+       struct intel_crtc *crtc;
+       enum pipe pipe;
+
+       if (!encoder->get_hw_state(encoder, &pipe))
+               return NULL;
+
+       crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+       mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+       if (!mode)
+               return NULL;
+
+       crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+       if (!crtc_state) {
+               kfree(mode);
+               return NULL;
+       }
+
+       crtc_state->base.crtc = &crtc->base;
+
+       if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
+               kfree(crtc_state);
+               kfree(mode);
+               return NULL;
+       }
+
+       encoder->get_config(encoder, crtc_state);
+
+       intel_mode_from_pipe_config(mode, crtc_state);
+
+       kfree(crtc_state);
+
+       return mode;
+}
+
+static void intel_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       drm_crtc_cleanup(crtc);
+       kfree(intel_crtc);
+}
+
+/**
+ * intel_wm_need_update - Check whether watermarks need updating
+ * @cur: current plane state
+ * @new: new plane state
+ *
+ * Check current plane state versus the new one to determine whether
+ * watermarks need to be recalculated.
+ *
+ * Returns true or false.
+ */
+static bool intel_wm_need_update(struct intel_plane_state *cur,
+                                struct intel_plane_state *new)
+{
+       /* Update watermarks on tiling or size changes. */
+       if (new->base.visible != cur->base.visible)
+               return true;
+
+       if (!cur->base.fb || !new->base.fb)
+               return false;
+
+       if (cur->base.fb->modifier != new->base.fb->modifier ||
+           cur->base.rotation != new->base.rotation ||
+           drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
+           drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
+           drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
+           drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
+               return true;
+
+       return false;
+}
+
+static bool needs_scaling(const struct intel_plane_state *state)
+{
+       int src_w = drm_rect_width(&state->base.src) >> 16;
+       int src_h = drm_rect_height(&state->base.src) >> 16;
+       int dst_w = drm_rect_width(&state->base.dst);
+       int dst_h = drm_rect_height(&state->base.dst);
+
+       return (src_w != dst_w || src_h != dst_h);
+}
+
+int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
+                                   struct drm_crtc_state *crtc_state,
+                                   const struct intel_plane_state *old_plane_state,
+                                   struct drm_plane_state *plane_state)
+{
+       struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
+       struct drm_crtc *crtc = crtc_state->crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_plane *plane = to_intel_plane(plane_state->plane);
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       bool mode_changed = needs_modeset(crtc_state);
+       bool was_crtc_enabled = old_crtc_state->base.active;
+       bool is_crtc_enabled = crtc_state->active;
+       bool turn_off, turn_on, visible, was_visible;
+       struct drm_framebuffer *fb = plane_state->fb;
+       int ret;
+
+       if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
+               ret = skl_update_scaler_plane(
+                       to_intel_crtc_state(crtc_state),
+                       to_intel_plane_state(plane_state));
+               if (ret)
+                       return ret;
+       }
+
+       was_visible = old_plane_state->base.visible;
+       visible = plane_state->visible;
+
+       if (!was_crtc_enabled && WARN_ON(was_visible))
+               was_visible = false;
+
+       /*
+        * Visibility is calculated as if the crtc was on, but
+        * after scaler setup everything depends on it being off
+        * when the crtc isn't active.
+        *
+        * FIXME this is wrong for watermarks. Watermarks should also
+        * be computed as if the pipe would be active. Perhaps move
+        * per-plane wm computation to the .check_plane() hook, and
+        * only combine the results from all planes in the current place?
+        */
+       if (!is_crtc_enabled) {
+               plane_state->visible = visible = false;
+               to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
+               to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0;
+       }
+
+       if (!was_visible && !visible)
+               return 0;
+
+       if (fb != old_plane_state->base.fb)
+               pipe_config->fb_changed = true;
+
+       turn_off = was_visible && (!visible || mode_changed);
+       turn_on = visible && (!was_visible || mode_changed);
+
+       DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
+                        intel_crtc->base.base.id, intel_crtc->base.name,
+                        plane->base.base.id, plane->base.name,
+                        fb ? fb->base.id : -1);
+
+       DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+                        plane->base.base.id, plane->base.name,
+                        was_visible, visible,
+                        turn_off, turn_on, mode_changed);
+
+       if (turn_on) {
+               if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+                       pipe_config->update_wm_pre = true;
+
+               /* must disable cxsr around plane enable/disable */
+               if (plane->id != PLANE_CURSOR)
+                       pipe_config->disable_cxsr = true;
+       } else if (turn_off) {
+               if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+                       pipe_config->update_wm_post = true;
+
+               /* must disable cxsr around plane enable/disable */
+               if (plane->id != PLANE_CURSOR)
+                       pipe_config->disable_cxsr = true;
+       } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
+                                       to_intel_plane_state(plane_state))) {
+               if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
+                       /* FIXME bollocks */
+                       pipe_config->update_wm_pre = true;
+                       pipe_config->update_wm_post = true;
+               }
+       }
+
+       if (visible || was_visible)
+               pipe_config->fb_bits |= plane->frontbuffer_bit;
+
+       /*
+        * ILK/SNB DVSACNTR/Sprite Enable
+        * IVB SPR_CTL/Sprite Enable
+        * "When in Self Refresh Big FIFO mode, a write to enable the
+        *  plane will be internally buffered and delayed while Big FIFO
+        *  mode is exiting."
+        *
+        * Which means that enabling the sprite can take an extra frame
+        * when we start in big FIFO mode (LP1+). Thus we need to drop
+        * down to LP0 and wait for vblank in order to make sure the
+        * sprite gets enabled on the next vblank after the register write.
+        * Doing otherwise would risk enabling the sprite one frame after
+        * we've already signalled flip completion. We can resume LP1+
+        * once the sprite has been enabled.
+        *
+        *
+        * WaCxSRDisabledForSpriteScaling:ivb
+        * IVB SPR_SCALE/Scaling Enable
+        * "Low Power watermarks must be disabled for at least one
+        *  frame before enabling sprite scaling, and kept disabled
+        *  until sprite scaling is disabled."
+        *
+        * ILK/SNB DVSASCALE/Scaling Enable
+        * "When in Self Refresh Big FIFO mode, scaling enable will be
+        *  masked off while Big FIFO mode is exiting."
+        *
+        * Despite the w/a only being listed for IVB we assume that
+        * the ILK/SNB note has similar ramifications, hence we apply
+        * the w/a on all three platforms.
+        *
+        * With experimental results seems this is needed also for primary
+        * plane, not only sprite plane.
+        */
+       if (plane->id != PLANE_CURSOR &&
+           (IS_GEN_RANGE(dev_priv, 5, 6) ||
+            IS_IVYBRIDGE(dev_priv)) &&
+           (turn_on || (!needs_scaling(old_plane_state) &&
+                        needs_scaling(to_intel_plane_state(plane_state)))))
+               pipe_config->disable_lp_wm = true;
+
+       return 0;
+}
+
+static bool encoders_cloneable(const struct intel_encoder *a,
+                              const struct intel_encoder *b)
+{
+       /* masks could be asymmetric, so check both ways */
+       return a == b || (a->cloneable & (1 << b->type) &&
+                         b->cloneable & (1 << a->type));
+}
+
+static bool check_single_encoder_cloning(struct drm_atomic_state *state,
+                                        struct intel_crtc *crtc,
+                                        struct intel_encoder *encoder)
+{
+       struct intel_encoder *source_encoder;
+       struct drm_connector *connector;
+       struct drm_connector_state *connector_state;
+       int i;
+
+       for_each_new_connector_in_state(state, connector, connector_state, i) {
+               if (connector_state->crtc != &crtc->base)
+                       continue;
+
+               source_encoder =
+                       to_intel_encoder(connector_state->best_encoder);
+               if (!encoders_cloneable(encoder, source_encoder))
+                       return false;
+       }
+
+       return true;
+}
+
+static int icl_add_linked_planes(struct intel_atomic_state *state)
+{
+       struct intel_plane *plane, *linked;
+       struct intel_plane_state *plane_state, *linked_plane_state;
+       int i;
+
+       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+               linked = plane_state->linked_plane;
+
+               if (!linked)
+                       continue;
+
+               linked_plane_state = intel_atomic_get_plane_state(state, linked);
+               if (IS_ERR(linked_plane_state))
+                       return PTR_ERR(linked_plane_state);
+
+               WARN_ON(linked_plane_state->linked_plane != plane);
+               WARN_ON(linked_plane_state->slave == plane_state->slave);
+       }
+
+       return 0;
+}
+
+static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
+       struct intel_plane *plane, *linked;
+       struct intel_plane_state *plane_state;
+       int i;
+
+       if (INTEL_GEN(dev_priv) < 11)
+               return 0;
+
+       /*
+        * Destroy all old plane links and make the slave plane invisible
+        * in the crtc_state->active_planes mask.
+        */
+       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+               if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
+                       continue;
+
+               plane_state->linked_plane = NULL;
+               if (plane_state->slave && !plane_state->base.visible) {
+                       crtc_state->active_planes &= ~BIT(plane->id);
+                       crtc_state->update_planes |= BIT(plane->id);
+               }
+
+               plane_state->slave = false;
+       }
+
+       if (!crtc_state->nv12_planes)
+               return 0;
+
+       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+               struct intel_plane_state *linked_state = NULL;
+
+               if (plane->pipe != crtc->pipe ||
+                   !(crtc_state->nv12_planes & BIT(plane->id)))
+                       continue;
+
+               for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
+                       if (!icl_is_nv12_y_plane(linked->id))
+                               continue;
+
+                       if (crtc_state->active_planes & BIT(linked->id))
+                               continue;
+
+                       linked_state = intel_atomic_get_plane_state(state, linked);
+                       if (IS_ERR(linked_state))
+                               return PTR_ERR(linked_state);
+
+                       break;
+               }
+
+               if (!linked_state) {
+                       DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
+                                     hweight8(crtc_state->nv12_planes));
+
+                       return -EINVAL;
+               }
+
+               plane_state->linked_plane = linked;
+
+               linked_state->slave = true;
+               linked_state->linked_plane = plane;
+               crtc_state->active_planes |= BIT(linked->id);
+               crtc_state->update_planes |= BIT(linked->id);
+               DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+       }
+
+       return 0;
+}
+
+static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+       struct intel_atomic_state *state =
+               to_intel_atomic_state(new_crtc_state->base.state);
+       const struct intel_crtc_state *old_crtc_state =
+               intel_atomic_get_old_crtc_state(state, crtc);
+
+       return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
+}
+
+static int intel_crtc_atomic_check(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_crtc_state *pipe_config =
+               to_intel_crtc_state(crtc_state);
+       int ret;
+       bool mode_changed = needs_modeset(crtc_state);
+
+       if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
+           mode_changed && !crtc_state->active)
+               pipe_config->update_wm_post = true;
+
+       if (mode_changed && crtc_state->enable &&
+           dev_priv->display.crtc_compute_clock &&
+           !WARN_ON(pipe_config->shared_dpll)) {
+               ret = dev_priv->display.crtc_compute_clock(intel_crtc,
+                                                          pipe_config);
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * May need to update pipe gamma enable bits
+        * when C8 planes are getting enabled/disabled.
+        */
+       if (c8_planes_changed(pipe_config))
+               crtc_state->color_mgmt_changed = true;
+
+       if (mode_changed || pipe_config->update_pipe ||
+           crtc_state->color_mgmt_changed) {
+               ret = intel_color_check(pipe_config);
+               if (ret)
+                       return ret;
+       }
+
+       ret = 0;
+       if (dev_priv->display.compute_pipe_wm) {
+               ret = dev_priv->display.compute_pipe_wm(pipe_config);
+               if (ret) {
+                       DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
+                       return ret;
+               }
+       }
+
+       if (dev_priv->display.compute_intermediate_wm) {
+               if (WARN_ON(!dev_priv->display.compute_pipe_wm))
+                       return 0;
+
+               /*
+                * Calculate 'intermediate' watermarks that satisfy both the
+                * old state and the new state.  We can program these
+                * immediately.
+                */
+               ret = dev_priv->display.compute_intermediate_wm(pipe_config);
+               if (ret) {
+                       DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
+                       return ret;
+               }
+       }
+
+       if (INTEL_GEN(dev_priv) >= 9) {
+               if (mode_changed || pipe_config->update_pipe)
+                       ret = skl_update_scaler_crtc(pipe_config);
+
+               if (!ret)
+                       ret = icl_check_nv12_planes(pipe_config);
+               if (!ret)
+                       ret = skl_check_pipe_max_pixel_rate(intel_crtc,
+                                                           pipe_config);
+               if (!ret)
+                       ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
+                                                        pipe_config);
+       }
+
+       if (HAS_IPS(dev_priv))
+               pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
+
+       return ret;
+}
+
+static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+       .atomic_check = intel_crtc_atomic_check,
+};
+
+static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
+{
+       struct intel_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       for_each_intel_connector_iter(connector, &conn_iter) {
+               if (connector->base.state->crtc)
+                       drm_connector_put(&connector->base);
+
+               if (connector->base.encoder) {
+                       connector->base.state->best_encoder =
+                               connector->base.encoder;
+                       connector->base.state->crtc =
+                               connector->base.encoder->crtc;
+
+                       drm_connector_get(&connector->base);
+               } else {
+                       connector->base.state->best_encoder = NULL;
+                       connector->base.state->crtc = NULL;
+               }
+       }
+       drm_connector_list_iter_end(&conn_iter);
+}
+
+static int
+compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
+                     struct intel_crtc_state *pipe_config)
+{
+       struct drm_connector *connector = conn_state->connector;
+       const struct drm_display_info *info = &connector->display_info;
+       int bpp;
+
+       switch (conn_state->max_bpc) {
+       case 6 ... 7:
+               bpp = 6 * 3;
+               break;
+       case 8 ... 9:
+               bpp = 8 * 3;
+               break;
+       case 10 ... 11:
+               bpp = 10 * 3;
+               break;
+       case 12:
+               bpp = 12 * 3;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (bpp < pipe_config->pipe_bpp) {
+               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
+                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
+                             connector->base.id, connector->name,
+                             bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
+                             pipe_config->pipe_bpp);
+
+               pipe_config->pipe_bpp = bpp;
+       }
+
+       return 0;
+}
+
+static int
+compute_baseline_pipe_bpp(struct intel_crtc *crtc,
+                         struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct drm_atomic_state *state = pipe_config->base.state;
+       struct drm_connector *connector;
+       struct drm_connector_state *connector_state;
+       int bpp, i;
+
+       if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+           IS_CHERRYVIEW(dev_priv)))
+               bpp = 10*3;
+       else if (INTEL_GEN(dev_priv) >= 5)
+               bpp = 12*3;
+       else
+               bpp = 8*3;
+
+       pipe_config->pipe_bpp = bpp;
+
+       /* Clamp display bpp to connector max bpp */
+       for_each_new_connector_in_state(state, connector, connector_state, i) {
+               int ret;
+
+               if (connector_state->crtc != &crtc->base)
+                       continue;
+
+               ret = compute_sink_pipe_bpp(connector_state, pipe_config);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
+{
+       DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
+                     "type: 0x%x flags: 0x%x\n",
+                     mode->crtc_clock,
+                     mode->crtc_hdisplay, mode->crtc_hsync_start,
+                     mode->crtc_hsync_end, mode->crtc_htotal,
+                     mode->crtc_vdisplay, mode->crtc_vsync_start,
+                     mode->crtc_vsync_end, mode->crtc_vtotal,
+                     mode->type, mode->flags);
+}
+
+static inline void
+intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
+                     const char *id, unsigned int lane_count,
+                     const struct intel_link_m_n *m_n)
+{
+       DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+                     id, lane_count,
+                     m_n->gmch_m, m_n->gmch_n,
+                     m_n->link_m, m_n->link_n, m_n->tu);
+}
+
+static void
+intel_dump_infoframe(struct drm_i915_private *dev_priv,
+                    const union hdmi_infoframe *frame)
+{
+       if ((drm_debug & DRM_UT_KMS) == 0)
+               return;
+
+       hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
+}
+
+#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
+
+static const char * const output_type_str[] = {
+       OUTPUT_TYPE(UNUSED),
+       OUTPUT_TYPE(ANALOG),
+       OUTPUT_TYPE(DVO),
+       OUTPUT_TYPE(SDVO),
+       OUTPUT_TYPE(LVDS),
+       OUTPUT_TYPE(TVOUT),
+       OUTPUT_TYPE(HDMI),
+       OUTPUT_TYPE(DP),
+       OUTPUT_TYPE(EDP),
+       OUTPUT_TYPE(DSI),
+       OUTPUT_TYPE(DDI),
+       OUTPUT_TYPE(DP_MST),
+};
+
+#undef OUTPUT_TYPE
+
+static void snprintf_output_types(char *buf, size_t len,
+                                 unsigned int output_types)
+{
+       char *str = buf;
+       int i;
+
+       str[0] = '\0';
+
+       for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
+               int r;
+
+               if ((output_types & BIT(i)) == 0)
+                       continue;
+
+               r = snprintf(str, len, "%s%s",
+                            str != buf ? "," : "", output_type_str[i]);
+               if (r >= len)
+                       break;
+               str += r;
+               len -= r;
+
+               output_types &= ~BIT(i);
+       }
+
+       WARN_ON_ONCE(output_types != 0);
+}
+
+static const char * const output_format_str[] = {
+       [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
+       [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
+       [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
+       [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
+};
+
+static const char *output_formats(enum intel_output_format format)
+{
+       if (format >= ARRAY_SIZE(output_format_str))
+               format = INTEL_OUTPUT_FORMAT_INVALID;
+       return output_format_str[format];
+}
+
+static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       struct drm_format_name_buf format_name;
+
+       if (!fb) {
+               DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+                             plane->base.base.id, plane->base.name,
+                             yesno(plane_state->base.visible));
+               return;
+       }
+
+       DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
+                     plane->base.base.id, plane->base.name,
+                     fb->base.id, fb->width, fb->height,
+                     drm_get_format_name(fb->format->format, &format_name),
+                     yesno(plane_state->base.visible));
+       DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
+                     plane_state->base.rotation, plane_state->scaler_id);
+       if (plane_state->base.visible)
+               DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+                             DRM_RECT_FP_ARG(&plane_state->base.src),
+                             DRM_RECT_ARG(&plane_state->base.dst));
+}
+
+static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
+                                  struct intel_atomic_state *state,
+                                  const char *context)
+{
+       struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct intel_plane_state *plane_state;
+       struct intel_plane *plane;
+       char buf[64];
+       int i;
+
+       DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
+                     crtc->base.base.id, crtc->base.name,
+                     yesno(pipe_config->base.enable), context);
+
+       if (!pipe_config->base.enable)
+               goto dump_planes;
+
+       snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
+       DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
+                     yesno(pipe_config->base.active),
+                     buf, pipe_config->output_types,
+                     output_formats(pipe_config->output_format));
+
+       DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+                     transcoder_name(pipe_config->cpu_transcoder),
+                     pipe_config->pipe_bpp, pipe_config->dither);
+
+       if (pipe_config->has_pch_encoder)
+               intel_dump_m_n_config(pipe_config, "fdi",
+                                     pipe_config->fdi_lanes,
+                                     &pipe_config->fdi_m_n);
+
+       if (intel_crtc_has_dp_encoder(pipe_config)) {
+               intel_dump_m_n_config(pipe_config, "dp m_n",
+                               pipe_config->lane_count, &pipe_config->dp_m_n);
+               if (pipe_config->has_drrs)
+                       intel_dump_m_n_config(pipe_config, "dp m2_n2",
+                                             pipe_config->lane_count,
+                                             &pipe_config->dp_m2_n2);
+       }
+
+       DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+                     pipe_config->has_audio, pipe_config->has_infoframe,
+                     pipe_config->infoframes.enable);
+
+       if (pipe_config->infoframes.enable &
+           intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
+               DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
+       if (pipe_config->infoframes.enable &
+           intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
+               intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
+       if (pipe_config->infoframes.enable &
+           intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
+               intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
+       if (pipe_config->infoframes.enable &
+           intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
+               intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
+
+       DRM_DEBUG_KMS("requested mode:\n");
+       drm_mode_debug_printmodeline(&pipe_config->base.mode);
+       DRM_DEBUG_KMS("adjusted mode:\n");
+       drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
+       intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
+       DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
+                     pipe_config->port_clock,
+                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
+                     pipe_config->pixel_rate);
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+                             crtc->num_scalers,
+                             pipe_config->scaler_state.scaler_users,
+                             pipe_config->scaler_state.scaler_id);
+
+       if (HAS_GMCH(dev_priv))
+               DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+                             pipe_config->gmch_pfit.control,
+                             pipe_config->gmch_pfit.pgm_ratios,
+                             pipe_config->gmch_pfit.lvds_border_bits);
+       else
+               DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
+                             pipe_config->pch_pfit.pos,
+                             pipe_config->pch_pfit.size,
+                             enableddisabled(pipe_config->pch_pfit.enabled),
+                             yesno(pipe_config->pch_pfit.force_thru));
+
+       DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
+                     pipe_config->ips_enabled, pipe_config->double_wide);
+
+       intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
+
+dump_planes:
+       if (!state)
+               return;
+
+       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+               if (plane->pipe == crtc->pipe)
+                       intel_dump_plane_state(plane_state);
+       }
+}
+
+static bool check_digital_port_conflicts(struct intel_atomic_state *state)
+{
+       struct drm_device *dev = state->base.dev;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       unsigned int used_ports = 0;
+       unsigned int used_mst_ports = 0;
+       bool ret = true;
+
+       /*
+        * Walk the connector list instead of the encoder
+        * list to detect the problem on ddi platforms
+        * where there's just one encoder per digital port.
+        */
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
+               struct drm_connector_state *connector_state;
+               struct intel_encoder *encoder;
+
+               connector_state =
+                       drm_atomic_get_new_connector_state(&state->base,
+                                                          connector);
+               if (!connector_state)
+                       connector_state = connector->state;
+
+               if (!connector_state->best_encoder)
+                       continue;
+
+               encoder = to_intel_encoder(connector_state->best_encoder);
+
+               WARN_ON(!connector_state->crtc);
+
+               switch (encoder->type) {
+                       unsigned int port_mask;
+               case INTEL_OUTPUT_DDI:
+                       if (WARN_ON(!HAS_DDI(to_i915(dev))))
+                               break;
+                       /* else: fall through */
+               case INTEL_OUTPUT_DP:
+               case INTEL_OUTPUT_HDMI:
+               case INTEL_OUTPUT_EDP:
+                       port_mask = 1 << encoder->port;
+
+                       /* the same port mustn't appear more than once */
+                       if (used_ports & port_mask)
+                               ret = false;
+
+                       used_ports |= port_mask;
+                       break;
+               case INTEL_OUTPUT_DP_MST:
+                       used_mst_ports |=
+                               1 << encoder->port;
+                       break;
+               default:
+                       break;
+               }
+       }
+       drm_connector_list_iter_end(&conn_iter);
+
+       /* can't mix MST and SST/HDMI on the same port */
+       if (used_ports & used_mst_ports)
+               return false;
+
+       return ret;
+}
+
+static int
+clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(crtc_state->base.crtc->dev);
+       struct intel_crtc_state *saved_state;
+
+       saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
+       if (!saved_state)
+               return -ENOMEM;
+
+       /* FIXME: before the switch to atomic started, a new pipe_config was
+        * kzalloc'd. Code that depends on any field being zero should be
+        * fixed, so that the crtc_state can be safely duplicated. For now,
+        * only fields that are know to not cause problems are preserved. */
+
+       saved_state->scaler_state = crtc_state->scaler_state;
+       saved_state->shared_dpll = crtc_state->shared_dpll;
+       saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
+       saved_state->crc_enabled = crtc_state->crc_enabled;
+       if (IS_G4X(dev_priv) ||
+           IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               saved_state->wm = crtc_state->wm;
+
+       /* Keep base drm_crtc_state intact, only clear our extended struct */
+       BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
+       memcpy(&crtc_state->base + 1, &saved_state->base + 1,
+              sizeof(*crtc_state) - sizeof(crtc_state->base));
+
+       kfree(saved_state);
+       return 0;
+}
+
+static int
+intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
+{
+       struct drm_crtc *crtc = pipe_config->base.crtc;
+       struct drm_atomic_state *state = pipe_config->base.state;
+       struct intel_encoder *encoder;
+       struct drm_connector *connector;
+       struct drm_connector_state *connector_state;
+       int base_bpp, ret;
+       int i;
+       bool retry = true;
+
+       ret = clear_intel_crtc_state(pipe_config);
+       if (ret)
+               return ret;
+
+       pipe_config->cpu_transcoder =
+               (enum transcoder) to_intel_crtc(crtc)->pipe;
+
+       /*
+        * Sanitize sync polarity flags based on requested ones. If neither
+        * positive or negative polarity is requested, treat this as meaning
+        * negative polarity.
+        */
+       if (!(pipe_config->base.adjusted_mode.flags &
+             (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
+               pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+
+       if (!(pipe_config->base.adjusted_mode.flags &
+             (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
+               pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+
+       ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
+                                       pipe_config);
+       if (ret)
+               return ret;
+
+       base_bpp = pipe_config->pipe_bpp;
+
+       /*
+        * Determine the real pipe dimensions. Note that stereo modes can
+        * increase the actual pipe size due to the frame doubling and
+        * insertion of additional space for blanks between the frame. This
+        * is stored in the crtc timings. We use the requested mode to do this
+        * computation to clearly distinguish it from the adjusted mode, which
+        * can be changed by the connectors in the below retry loop.
+        */
+       drm_mode_get_hv_timing(&pipe_config->base.mode,
+                              &pipe_config->pipe_src_w,
+                              &pipe_config->pipe_src_h);
+
+       for_each_new_connector_in_state(state, connector, connector_state, i) {
+               if (connector_state->crtc != crtc)
+                       continue;
+
+               encoder = to_intel_encoder(connector_state->best_encoder);
+
+               if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
+                       DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+                       return -EINVAL;
+               }
+
+               /*
+                * Determine output_types before calling the .compute_config()
+                * hooks so that the hooks can use this information safely.
+                */
+               if (encoder->compute_output_type)
+                       pipe_config->output_types |=
+                               BIT(encoder->compute_output_type(encoder, pipe_config,
+                                                                connector_state));
+               else
+                       pipe_config->output_types |= BIT(encoder->type);
+       }
+
+encoder_retry:
+       /* Ensure the port clock defaults are reset when retrying. */
+       pipe_config->port_clock = 0;
+       pipe_config->pixel_multiplier = 1;
+
+       /* Fill in default crtc timings, allow encoders to overwrite them. */
+       drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
+                             CRTC_STEREO_DOUBLE);
+
+       /* Pass our mode to the connectors and the CRTC to give them a chance to
+        * adjust it according to limitations or connector properties, and also
+        * a chance to reject the mode entirely.
+        */
+       for_each_new_connector_in_state(state, connector, connector_state, i) {
+               if (connector_state->crtc != crtc)
+                       continue;
+
+               encoder = to_intel_encoder(connector_state->best_encoder);
+               ret = encoder->compute_config(encoder, pipe_config,
+                                             connector_state);
+               if (ret < 0) {
+                       if (ret != -EDEADLK)
+                               DRM_DEBUG_KMS("Encoder config failure: %d\n",
+                                             ret);
+                       return ret;
+               }
+       }
+
+       /* Set default port clock if not overwritten by the encoder. Needs to be
+        * done afterwards in case the encoder adjusts the mode. */
+       if (!pipe_config->port_clock)
+               pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
+                       * pipe_config->pixel_multiplier;
+
+       ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
+       if (ret == -EDEADLK)
+               return ret;
+       if (ret < 0) {
+               DRM_DEBUG_KMS("CRTC fixup failed\n");
+               return ret;
+       }
+
+       if (ret == RETRY) {
+               if (WARN(!retry, "loop in pipe configuration computation\n"))
+                       return -EINVAL;
+
+               DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
+               retry = false;
+               goto encoder_retry;
+       }
+
+       /* Dithering seems to not pass-through bits correctly when it should, so
+        * only enable it on 6bpc panels and when its not a compliance
+        * test requesting 6bpc video pattern.
+        */
+       pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
+               !pipe_config->dither_force_disable;
+       DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
+                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+
+       return 0;
+}
+
+bool intel_fuzzy_clock_check(int clock1, int clock2)
+{
+       int diff;
+
+       if (clock1 == clock2)
+               return true;
+
+       if (!clock1 || !clock2)
+               return false;
+
+       diff = abs(clock1 - clock2);
+
+       if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
+               return true;
+
+       return false;
+}
+
+static bool
+intel_compare_m_n(unsigned int m, unsigned int n,
+                 unsigned int m2, unsigned int n2,
+                 bool exact)
+{
+       if (m == m2 && n == n2)
+               return true;
+
+       if (exact || !m || !n || !m2 || !n2)
+               return false;
+
+       BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
+
+       if (n > n2) {
+               while (n > n2) {
+                       m2 <<= 1;
+                       n2 <<= 1;
+               }
+       } else if (n < n2) {
+               while (n < n2) {
+                       m <<= 1;
+                       n <<= 1;
+               }
+       }
+
+       if (n != n2)
+               return false;
+
+       return intel_fuzzy_clock_check(m, m2);
+}
+
+static bool
+intel_compare_link_m_n(const struct intel_link_m_n *m_n,
+                      struct intel_link_m_n *m2_n2,
+                      bool adjust)
+{
+       if (m_n->tu == m2_n2->tu &&
+           intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
+                             m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
+           intel_compare_m_n(m_n->link_m, m_n->link_n,
+                             m2_n2->link_m, m2_n2->link_n, !adjust)) {
+               if (adjust)
+                       *m2_n2 = *m_n;
+
+               return true;
+       }
+
+       return false;
+}
+
+static bool
+intel_compare_infoframe(const union hdmi_infoframe *a,
+                       const union hdmi_infoframe *b)
+{
+       return memcmp(a, b, sizeof(*a)) == 0;
+}
+
+static void
+pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
+                         bool adjust, const char *name,
+                         const union hdmi_infoframe *a,
+                         const union hdmi_infoframe *b)
+{
+       if (adjust) {
+               if ((drm_debug & DRM_UT_KMS) == 0)
+                       return;
+
+               drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
+               drm_dbg(DRM_UT_KMS, "expected:");
+               hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
+               drm_dbg(DRM_UT_KMS, "found");
+               hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
+       } else {
+               drm_err("mismatch in %s infoframe", name);
+               drm_err("expected:");
+               hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
+               drm_err("found");
+               hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
+       }
+}
+
+static void __printf(3, 4)
+pipe_config_err(bool adjust, const char *name, const char *format, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       va_start(args, format);
+       vaf.fmt = format;
+       vaf.va = &args;
+
+       if (adjust)
+               drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
+       else
+               drm_err("mismatch in %s %pV", name, &vaf);
+
+       va_end(args);
+}
+
+static bool fastboot_enabled(struct drm_i915_private *dev_priv)
+{
+       if (i915_modparams.fastboot != -1)
+               return i915_modparams.fastboot;
+
+       /* Enable fastboot by default on Skylake and newer */
+       if (INTEL_GEN(dev_priv) >= 9)
+               return true;
+
+       /* Enable fastboot by default on VLV and CHV */
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               return true;
+
+       /* Disabled by default on all others */
+       return false;
+}
+
+static bool
+intel_pipe_config_compare(struct drm_i915_private *dev_priv,
+                         struct intel_crtc_state *current_config,
+                         struct intel_crtc_state *pipe_config,
+                         bool adjust)
+{
+       bool ret = true;
+       bool fixup_inherited = adjust &&
+               (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
+               !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
+
+       if (fixup_inherited && !fastboot_enabled(dev_priv)) {
+               DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
+               ret = false;
+       }
+
+#define PIPE_CONF_CHECK_X(name) do { \
+       if (current_config->name != pipe_config->name) { \
+               pipe_config_err(adjust, __stringify(name), \
+                         "(expected 0x%08x, found 0x%08x)\n", \
+                         current_config->name, \
+                         pipe_config->name); \
+               ret = false; \
+       } \
+} while (0)
+
+#define PIPE_CONF_CHECK_I(name) do { \
+       if (current_config->name != pipe_config->name) { \
+               pipe_config_err(adjust, __stringify(name), \
+                         "(expected %i, found %i)\n", \
+                         current_config->name, \
+                         pipe_config->name); \
+               ret = false; \
+       } \
+} while (0)
+
+#define PIPE_CONF_CHECK_BOOL(name) do { \
+       if (current_config->name != pipe_config->name) { \
+               pipe_config_err(adjust, __stringify(name), \
+                         "(expected %s, found %s)\n", \
+                         yesno(current_config->name), \
+                         yesno(pipe_config->name)); \
+               ret = false; \
+       } \
+} while (0)
+
+/*
+ * Checks state where we only read out the enabling, but not the entire
+ * state itself (like full infoframes or ELD for audio). These states
+ * require a full modeset on bootup to fix up.
+ */
+#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
+       if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
+               PIPE_CONF_CHECK_BOOL(name); \
+       } else { \
+               pipe_config_err(adjust, __stringify(name), \
+                         "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
+                         yesno(current_config->name), \
+                         yesno(pipe_config->name)); \
+               ret = false; \
+       } \
+} while (0)
+
+#define PIPE_CONF_CHECK_P(name) do { \
+       if (current_config->name != pipe_config->name) { \
+               pipe_config_err(adjust, __stringify(name), \
+                         "(expected %p, found %p)\n", \
+                         current_config->name, \
+                         pipe_config->name); \
+               ret = false; \
+       } \
+} while (0)
+
+#define PIPE_CONF_CHECK_M_N(name) do { \
+       if (!intel_compare_link_m_n(&current_config->name, \
+                                   &pipe_config->name,\
+                                   adjust)) { \
+               pipe_config_err(adjust, __stringify(name), \
+                         "(expected tu %i gmch %i/%i link %i/%i, " \
+                         "found tu %i, gmch %i/%i link %i/%i)\n", \
+                         current_config->name.tu, \
+                         current_config->name.gmch_m, \
+                         current_config->name.gmch_n, \
+                         current_config->name.link_m, \
+                         current_config->name.link_n, \
+                         pipe_config->name.tu, \
+                         pipe_config->name.gmch_m, \
+                         pipe_config->name.gmch_n, \
+                         pipe_config->name.link_m, \
+                         pipe_config->name.link_n); \
+               ret = false; \
+       } \
+} while (0)
+
+/* This is required for BDW+ where there is only one set of registers for
+ * switching between high and low RR.
+ * This macro can be used whenever a comparison has to be made between one
+ * hw state and multiple sw state variables.
+ */
+#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
+       if (!intel_compare_link_m_n(&current_config->name, \
+                                   &pipe_config->name, adjust) && \
+           !intel_compare_link_m_n(&current_config->alt_name, \
+                                   &pipe_config->name, adjust)) { \
+               pipe_config_err(adjust, __stringify(name), \
+                         "(expected tu %i gmch %i/%i link %i/%i, " \
+                         "or tu %i gmch %i/%i link %i/%i, " \
+                         "found tu %i, gmch %i/%i link %i/%i)\n", \
+                         current_config->name.tu, \
+                         current_config->name.gmch_m, \
+                         current_config->name.gmch_n, \
+                         current_config->name.link_m, \
+                         current_config->name.link_n, \
+                         current_config->alt_name.tu, \
+                         current_config->alt_name.gmch_m, \
+                         current_config->alt_name.gmch_n, \
+                         current_config->alt_name.link_m, \
+                         current_config->alt_name.link_n, \
+                         pipe_config->name.tu, \
+                         pipe_config->name.gmch_m, \
+                         pipe_config->name.gmch_n, \
+                         pipe_config->name.link_m, \
+                         pipe_config->name.link_n); \
+               ret = false; \
+       } \
+} while (0)
+
+#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
+       if ((current_config->name ^ pipe_config->name) & (mask)) { \
+               pipe_config_err(adjust, __stringify(name), \
+                         "(%x) (expected %i, found %i)\n", \
+                         (mask), \
+                         current_config->name & (mask), \
+                         pipe_config->name & (mask)); \
+               ret = false; \
+       } \
+} while (0)
+
+#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
+       if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
+               pipe_config_err(adjust, __stringify(name), \
+                         "(expected %i, found %i)\n", \
+                         current_config->name, \
+                         pipe_config->name); \
+               ret = false; \
+       } \
+} while (0)
+
+#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
+       if (!intel_compare_infoframe(&current_config->infoframes.name, \
+                                    &pipe_config->infoframes.name)) { \
+               pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
+                                         &current_config->infoframes.name, \
+                                         &pipe_config->infoframes.name); \
+               ret = false; \
+       } \
+} while (0)
+
+#define PIPE_CONF_QUIRK(quirk) \
+       ((current_config->quirks | pipe_config->quirks) & (quirk))
+
+       PIPE_CONF_CHECK_I(cpu_transcoder);
+
+       PIPE_CONF_CHECK_BOOL(has_pch_encoder);
+       PIPE_CONF_CHECK_I(fdi_lanes);
+       PIPE_CONF_CHECK_M_N(fdi_m_n);
+
+       PIPE_CONF_CHECK_I(lane_count);
+       PIPE_CONF_CHECK_X(lane_lat_optim_mask);
+
+       if (INTEL_GEN(dev_priv) < 8) {
+               PIPE_CONF_CHECK_M_N(dp_m_n);
+
+               if (current_config->has_drrs)
+                       PIPE_CONF_CHECK_M_N(dp_m2_n2);
+       } else
+               PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
+
+       PIPE_CONF_CHECK_X(output_types);
+
+       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
+       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
+       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
+       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
+       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
+       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
+
+       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
+       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
+       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
+       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
+       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
+       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
+
+       PIPE_CONF_CHECK_I(pixel_multiplier);
+       PIPE_CONF_CHECK_I(output_format);
+       PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
+       if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
+           IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               PIPE_CONF_CHECK_BOOL(limited_color_range);
+
+       PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
+       PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
+       PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
+
+       PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
+
+       PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+                             DRM_MODE_FLAG_INTERLACE);
+
+       if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
+               PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+                                     DRM_MODE_FLAG_PHSYNC);
+               PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+                                     DRM_MODE_FLAG_NHSYNC);
+               PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+                                     DRM_MODE_FLAG_PVSYNC);
+               PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+                                     DRM_MODE_FLAG_NVSYNC);
+       }
+
+       PIPE_CONF_CHECK_X(gmch_pfit.control);
+       /* pfit ratios are autocomputed by the hw on gen4+ */
+       if (INTEL_GEN(dev_priv) < 4)
+               PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
+       PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
+
+       /*
+        * Changing the EDP transcoder input mux
+        * (A_ONOFF vs. A_ON) requires a full modeset.
+        */
+       PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
+
+       if (!adjust) {
+               PIPE_CONF_CHECK_I(pipe_src_w);
+               PIPE_CONF_CHECK_I(pipe_src_h);
+
+               PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
+               if (current_config->pch_pfit.enabled) {
+                       PIPE_CONF_CHECK_X(pch_pfit.pos);
+                       PIPE_CONF_CHECK_X(pch_pfit.size);
+               }
+
+               PIPE_CONF_CHECK_I(scaler_state.scaler_id);
+               PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
+
+               PIPE_CONF_CHECK_X(gamma_mode);
+               if (IS_CHERRYVIEW(dev_priv))
+                       PIPE_CONF_CHECK_X(cgm_mode);
+               else
+                       PIPE_CONF_CHECK_X(csc_mode);
+               PIPE_CONF_CHECK_BOOL(gamma_enable);
+               PIPE_CONF_CHECK_BOOL(csc_enable);
+       }
+
+       PIPE_CONF_CHECK_BOOL(double_wide);
+
+       PIPE_CONF_CHECK_P(shared_dpll);
+       PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
+       PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
+       PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
+       PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
+       PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
+       PIPE_CONF_CHECK_X(dpll_hw_state.spll);
+       PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
+       PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
+       PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
+       PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
+       PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
+       PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
+       PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
+       PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
+       PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
+       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
+       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
+       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
+       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
+       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
+       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
+       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
+
+       PIPE_CONF_CHECK_X(dsi_pll.ctrl);
+       PIPE_CONF_CHECK_X(dsi_pll.div);
+
+       if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
+               PIPE_CONF_CHECK_I(pipe_bpp);
+
+       PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
+       PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+
+       PIPE_CONF_CHECK_I(min_voltage_level);
+
+       PIPE_CONF_CHECK_X(infoframes.enable);
+       PIPE_CONF_CHECK_X(infoframes.gcp);
+       PIPE_CONF_CHECK_INFOFRAME(avi);
+       PIPE_CONF_CHECK_INFOFRAME(spd);
+       PIPE_CONF_CHECK_INFOFRAME(hdmi);
+       PIPE_CONF_CHECK_INFOFRAME(drm);
+
+#undef PIPE_CONF_CHECK_X
+#undef PIPE_CONF_CHECK_I
+#undef PIPE_CONF_CHECK_BOOL
+#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
+#undef PIPE_CONF_CHECK_P
+#undef PIPE_CONF_CHECK_FLAGS
+#undef PIPE_CONF_CHECK_CLOCK_FUZZY
+#undef PIPE_CONF_QUIRK
+
+       return ret;
+}
+
+static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
+                                          const struct intel_crtc_state *pipe_config)
+{
+       if (pipe_config->has_pch_encoder) {
+               int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
+                                                           &pipe_config->fdi_m_n);
+               int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
+
+               /*
+                * FDI already provided one idea for the dotclock.
+                * Yell if the encoder disagrees.
+                */
+               WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+                    "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+                    fdi_dotclock, dotclock);
+       }
+}
+
+static void verify_wm_state(struct drm_crtc *crtc,
+                           struct drm_crtc_state *new_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct skl_hw_state {
+               struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+               struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
+               struct skl_ddb_allocation ddb;
+               struct skl_pipe_wm wm;
+       } *hw;
+       struct skl_ddb_allocation *sw_ddb;
+       struct skl_pipe_wm *sw_wm;
+       struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       const enum pipe pipe = intel_crtc->pipe;
+       int plane, level, max_level = ilk_wm_max_level(dev_priv);
+
+       if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
+               return;
+
+       hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+       if (!hw)
+               return;
+
+       skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
+       sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+
+       skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
+
+       skl_ddb_get_hw_state(dev_priv, &hw->ddb);
+       sw_ddb = &dev_priv->wm.skl_hw.ddb;
+
+       if (INTEL_GEN(dev_priv) >= 11 &&
+           hw->ddb.enabled_slices != sw_ddb->enabled_slices)
+               DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
+                         sw_ddb->enabled_slices,
+                         hw->ddb.enabled_slices);
+
+       /* planes */
+       for_each_universal_plane(dev_priv, pipe, plane) {
+               struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+
+               hw_plane_wm = &hw->wm.planes[plane];
+               sw_plane_wm = &sw_wm->planes[plane];
+
+               /* Watermarks */
+               for (level = 0; level <= max_level; level++) {
+                       if (skl_wm_level_equals(&hw_plane_wm->wm[level],
+                                               &sw_plane_wm->wm[level]))
+                               continue;
+
+                       DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+                                 pipe_name(pipe), plane + 1, level,
+                                 sw_plane_wm->wm[level].plane_en,
+                                 sw_plane_wm->wm[level].plane_res_b,
+                                 sw_plane_wm->wm[level].plane_res_l,
+                                 hw_plane_wm->wm[level].plane_en,
+                                 hw_plane_wm->wm[level].plane_res_b,
+                                 hw_plane_wm->wm[level].plane_res_l);
+               }
+
+               if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
+                                        &sw_plane_wm->trans_wm)) {
+                       DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+                                 pipe_name(pipe), plane + 1,
+                                 sw_plane_wm->trans_wm.plane_en,
+                                 sw_plane_wm->trans_wm.plane_res_b,
+                                 sw_plane_wm->trans_wm.plane_res_l,
+                                 hw_plane_wm->trans_wm.plane_en,
+                                 hw_plane_wm->trans_wm.plane_res_b,
+                                 hw_plane_wm->trans_wm.plane_res_l);
+               }
+
+               /* DDB */
+               hw_ddb_entry = &hw->ddb_y[plane];
+               sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
+
+               if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+                       DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
+                                 pipe_name(pipe), plane + 1,
+                                 sw_ddb_entry->start, sw_ddb_entry->end,
+                                 hw_ddb_entry->start, hw_ddb_entry->end);
+               }
+       }
+
+       /*
+        * cursor
+        * If the cursor plane isn't active, we may not have updated it's ddb
+        * allocation. In that case since the ddb allocation will be updated
+        * once the plane becomes visible, we can skip this check
+        */
+       if (1) {
+               struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+
+               hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
+               sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
+
+               /* Watermarks */
+               for (level = 0; level <= max_level; level++) {
+                       if (skl_wm_level_equals(&hw_plane_wm->wm[level],
+                                               &sw_plane_wm->wm[level]))
+                               continue;
+
+                       DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+                                 pipe_name(pipe), level,
+                                 sw_plane_wm->wm[level].plane_en,
+                                 sw_plane_wm->wm[level].plane_res_b,
+                                 sw_plane_wm->wm[level].plane_res_l,
+                                 hw_plane_wm->wm[level].plane_en,
+                                 hw_plane_wm->wm[level].plane_res_b,
+                                 hw_plane_wm->wm[level].plane_res_l);
+               }
+
+               if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
+                                        &sw_plane_wm->trans_wm)) {
+                       DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+                                 pipe_name(pipe),
+                                 sw_plane_wm->trans_wm.plane_en,
+                                 sw_plane_wm->trans_wm.plane_res_b,
+                                 sw_plane_wm->trans_wm.plane_res_l,
+                                 hw_plane_wm->trans_wm.plane_en,
+                                 hw_plane_wm->trans_wm.plane_res_b,
+                                 hw_plane_wm->trans_wm.plane_res_l);
+               }
+
+               /* DDB */
+               hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
+               sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
+
+               if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+                       DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
+                                 pipe_name(pipe),
+                                 sw_ddb_entry->start, sw_ddb_entry->end,
+                                 hw_ddb_entry->start, hw_ddb_entry->end);
+               }
+       }
+
+       kfree(hw);
+}
+
+static void
+verify_connector_state(struct drm_device *dev,
+                      struct drm_atomic_state *state,
+                      struct drm_crtc *crtc)
+{
+       struct drm_connector *connector;
+       struct drm_connector_state *new_conn_state;
+       int i;
+
+       for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+               struct drm_encoder *encoder = connector->encoder;
+               struct drm_crtc_state *crtc_state = NULL;
+
+               if (new_conn_state->crtc != crtc)
+                       continue;
+
+               if (crtc)
+                       crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+
+               intel_connector_verify_state(crtc_state, new_conn_state);
+
+               I915_STATE_WARN(new_conn_state->best_encoder != encoder,
+                    "connector's atomic encoder doesn't match legacy encoder\n");
+       }
+}
+
+static void
+verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
+{
+       struct intel_encoder *encoder;
+       struct drm_connector *connector;
+       struct drm_connector_state *old_conn_state, *new_conn_state;
+       int i;
+
+       for_each_intel_encoder(dev, encoder) {
+               bool enabled = false, found = false;
+               enum pipe pipe;
+
+               DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
+                             encoder->base.base.id,
+                             encoder->base.name);
+
+               for_each_oldnew_connector_in_state(state, connector, old_conn_state,
+                                                  new_conn_state, i) {
+                       if (old_conn_state->best_encoder == &encoder->base)
+                               found = true;
+
+                       if (new_conn_state->best_encoder != &encoder->base)
+                               continue;
+                       found = enabled = true;
+
+                       I915_STATE_WARN(new_conn_state->crtc !=
+                                       encoder->base.crtc,
+                            "connector's crtc doesn't match encoder crtc\n");
+               }
+
+               if (!found)
+                       continue;
+
+               I915_STATE_WARN(!!encoder->base.crtc != enabled,
+                    "encoder's enabled state mismatch "
+                    "(expected %i, found %i)\n",
+                    !!encoder->base.crtc, enabled);
+
+               if (!encoder->base.crtc) {
+                       bool active;
+
+                       active = encoder->get_hw_state(encoder, &pipe);
+                       I915_STATE_WARN(active,
+                            "encoder detached but still enabled on pipe %c.\n",
+                            pipe_name(pipe));
+               }
+       }
+}
+
+static void
+verify_crtc_state(struct drm_crtc *crtc,
+                 struct drm_crtc_state *old_crtc_state,
+                 struct drm_crtc_state *new_crtc_state)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_encoder *encoder;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_crtc_state *pipe_config, *sw_config;
+       struct drm_atomic_state *old_state;
+       bool active;
+
+       old_state = old_crtc_state->state;
+       __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
+       pipe_config = to_intel_crtc_state(old_crtc_state);
+       memset(pipe_config, 0, sizeof(*pipe_config));
+       pipe_config->base.crtc = crtc;
+       pipe_config->base.state = old_state;
+
+       DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+
+       active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
+
+       /* we keep both pipes enabled on 830 */
+       if (IS_I830(dev_priv))
+               active = new_crtc_state->active;
+
+       I915_STATE_WARN(new_crtc_state->active != active,
+            "crtc active state doesn't match with hw state "
+            "(expected %i, found %i)\n", new_crtc_state->active, active);
+
+       I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
+            "transitional active state does not match atomic hw state "
+            "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
+
+       for_each_encoder_on_crtc(dev, crtc, encoder) {
+               enum pipe pipe;
+
+               active = encoder->get_hw_state(encoder, &pipe);
+               I915_STATE_WARN(active != new_crtc_state->active,
+                       "[ENCODER:%i] active %i with crtc active %i\n",
+                       encoder->base.base.id, active, new_crtc_state->active);
+
+               I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+                               "Encoder connected to wrong pipe %c\n",
+                               pipe_name(pipe));
+
+               if (active)
+                       encoder->get_config(encoder, pipe_config);
+       }
+
+       intel_crtc_compute_pixel_rate(pipe_config);
+
+       if (!new_crtc_state->active)
+               return;
+
+       intel_pipe_config_sanity_check(dev_priv, pipe_config);
+
+       sw_config = to_intel_crtc_state(new_crtc_state);
+       if (!intel_pipe_config_compare(dev_priv, sw_config,
+                                      pipe_config, false)) {
+               I915_STATE_WARN(1, "pipe state doesn't match!\n");
+               intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
+               intel_dump_pipe_config(sw_config, NULL, "[sw state]");
+       }
+}
+
+static void
+intel_verify_planes(struct intel_atomic_state *state)
+{
+       struct intel_plane *plane;
+       const struct intel_plane_state *plane_state;
+       int i;
+
+       for_each_new_intel_plane_in_state(state, plane,
+                                         plane_state, i)
+               assert_plane(plane, plane_state->slave ||
+                            plane_state->base.visible);
+}
+
+static void
+verify_single_dpll_state(struct drm_i915_private *dev_priv,
+                        struct intel_shared_dpll *pll,
+                        struct drm_crtc *crtc,
+                        struct drm_crtc_state *new_state)
+{
+       struct intel_dpll_hw_state dpll_hw_state;
+       unsigned int crtc_mask;
+       bool active;
+
+       memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+
+       DRM_DEBUG_KMS("%s\n", pll->info->name);
+
+       active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
+
+       if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
+               I915_STATE_WARN(!pll->on && pll->active_mask,
+                    "pll in active use but not on in sw tracking\n");
+               I915_STATE_WARN(pll->on && !pll->active_mask,
+                    "pll is on but not used by any active crtc\n");
+               I915_STATE_WARN(pll->on != active,
+                    "pll on state mismatch (expected %i, found %i)\n",
+                    pll->on, active);
+       }
+
+       if (!crtc) {
+               I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
+                               "more active pll users than references: %x vs %x\n",
+                               pll->active_mask, pll->state.crtc_mask);
+
+               return;
+       }
+
+       crtc_mask = drm_crtc_mask(crtc);
+
+       if (new_state->active)
+               I915_STATE_WARN(!(pll->active_mask & crtc_mask),
+                               "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
+                               pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+       else
+               I915_STATE_WARN(pll->active_mask & crtc_mask,
+                               "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
+                               pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+
+       I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
+                       "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
+                       crtc_mask, pll->state.crtc_mask);
+
+       I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
+                                         &dpll_hw_state,
+                                         sizeof(dpll_hw_state)),
+                       "pll hw state mismatch\n");
+}
+
+static void
+verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
+                        struct drm_crtc_state *old_crtc_state,
+                        struct drm_crtc_state *new_crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
+       struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
+
+       if (new_state->shared_dpll)
+               verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
+
+       if (old_state->shared_dpll &&
+           old_state->shared_dpll != new_state->shared_dpll) {
+               unsigned int crtc_mask = drm_crtc_mask(crtc);
+               struct intel_shared_dpll *pll = old_state->shared_dpll;
+
+               I915_STATE_WARN(pll->active_mask & crtc_mask,
+                               "pll active mismatch (didn't expect pipe %c in active mask)\n",
+                               pipe_name(drm_crtc_index(crtc)));
+               I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
+                               "pll enabled crtcs mismatch (found %x in enabled mask)\n",
+                               pipe_name(drm_crtc_index(crtc)));
+       }
+}
+
+static void
+intel_modeset_verify_crtc(struct drm_crtc *crtc,
+                         struct drm_atomic_state *state,
+                         struct drm_crtc_state *old_state,
+                         struct drm_crtc_state *new_state)
+{
+       if (!needs_modeset(new_state) &&
+           !to_intel_crtc_state(new_state)->update_pipe)
+               return;
+
+       verify_wm_state(crtc, new_state);
+       verify_connector_state(crtc->dev, state, crtc);
+       verify_crtc_state(crtc, old_state, new_state);
+       verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
+}
+
+static void
+verify_disabled_dpll_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int i;
+
+       for (i = 0; i < dev_priv->num_shared_dpll; i++)
+               verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
+}
+
+static void
+intel_modeset_verify_disabled(struct drm_device *dev,
+                             struct drm_atomic_state *state)
+{
+       verify_encoder_state(dev, state);
+       verify_connector_state(dev, state, NULL);
+       verify_disabled_dpll_state(dev);
+}
+
+static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       /*
+        * The scanline counter increments at the leading edge of hsync.
+        *
+        * On most platforms it starts counting from vtotal-1 on the
+        * first active line. That means the scanline counter value is
+        * always one less than what we would expect. Ie. just after
+        * start of vblank, which also occurs at start of hsync (on the
+        * last active line), the scanline counter will read vblank_start-1.
+        *
+        * On gen2 the scanline counter starts counting from 1 instead
+        * of vtotal-1, so we have to subtract one (or rather add vtotal-1
+        * to keep the value positive), instead of adding one.
+        *
+        * On HSW+ the behaviour of the scanline counter depends on the output
+        * type. For DP ports it behaves like most other platforms, but on HDMI
+        * there's an extra 1 line difference. So we need to add two instead of
+        * one to the value.
+        *
+        * On VLV/CHV DSI the scanline counter would appear to increment
+        * approx. 1/3 of a scanline before start of vblank. Unfortunately
+        * that means we can't tell whether we're in vblank or not while
+        * we're on that particular line. We must still set scanline_offset
+        * to 1 so that the vblank timestamps come out correct when we query
+        * the scanline counter from within the vblank interrupt handler.
+        * However if queried just before the start of vblank we'll get an
+        * answer that's slightly in the future.
+        */
+       if (IS_GEN(dev_priv, 2)) {
+               const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+               int vtotal;
+
+               vtotal = adjusted_mode->crtc_vtotal;
+               if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       vtotal /= 2;
+
+               crtc->scanline_offset = vtotal - 1;
+       } else if (HAS_DDI(dev_priv) &&
+                  intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+               crtc->scanline_offset = 2;
+       } else
+               crtc->scanline_offset = 1;
+}
+
+static void intel_modeset_clear_plls(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+       struct intel_crtc *crtc;
+       int i;
+
+       if (!dev_priv->display.crtc_compute_clock)
+               return;
+
+       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+                                           new_crtc_state, i) {
+               struct intel_shared_dpll *old_dpll =
+                       old_crtc_state->shared_dpll;
+
+               if (!needs_modeset(&new_crtc_state->base))
+                       continue;
+
+               new_crtc_state->shared_dpll = NULL;
+
+               if (!old_dpll)
+                       continue;
+
+               intel_release_shared_dpll(old_dpll, crtc, &state->base);
+       }
+}
+
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
+{
+       struct intel_crtc_state *crtc_state;
+       struct intel_crtc *crtc;
+       struct intel_crtc_state *first_crtc_state = NULL;
+       struct intel_crtc_state *other_crtc_state = NULL;
+       enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
+       int i;
+
+       /* look at all crtc's that are going to be enabled in during modeset */
+       for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+               if (!crtc_state->base.active ||
+                   !needs_modeset(&crtc_state->base))
+                       continue;
+
+               if (first_crtc_state) {
+                       other_crtc_state = crtc_state;
+                       break;
+               } else {
+                       first_crtc_state = crtc_state;
+                       first_pipe = crtc->pipe;
+               }
+       }
+
+       /* No workaround needed? */
+       if (!first_crtc_state)
+               return 0;
+
+       /* w/a possibly needed, check how many crtc's are already enabled. */
+       for_each_intel_crtc(state->base.dev, crtc) {
+               crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+               if (IS_ERR(crtc_state))
+                       return PTR_ERR(crtc_state);
+
+               crtc_state->hsw_workaround_pipe = INVALID_PIPE;
+
+               if (!crtc_state->base.active ||
+                   needs_modeset(&crtc_state->base))
+                       continue;
+
+               /* 2 or more enabled crtcs means no need for w/a */
+               if (enabled_pipe != INVALID_PIPE)
+                       return 0;
+
+               enabled_pipe = crtc->pipe;
+       }
+
+       if (enabled_pipe != INVALID_PIPE)
+               first_crtc_state->hsw_workaround_pipe = enabled_pipe;
+       else if (other_crtc_state)
+               other_crtc_state->hsw_workaround_pipe = first_pipe;
+
+       return 0;
+}
+
+static int intel_lock_all_pipes(struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+
+       /* Add all pipes to the state */
+       for_each_crtc(state->dev, crtc) {
+               struct drm_crtc_state *crtc_state;
+
+               crtc_state = drm_atomic_get_crtc_state(state, crtc);
+               if (IS_ERR(crtc_state))
+                       return PTR_ERR(crtc_state);
+       }
+
+       return 0;
+}
+
+static int intel_modeset_all_pipes(struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+
+       /*
+        * Add all pipes to the state, and force
+        * a modeset on all the active ones.
+        */
+       for_each_crtc(state->dev, crtc) {
+               struct drm_crtc_state *crtc_state;
+               int ret;
+
+               crtc_state = drm_atomic_get_crtc_state(state, crtc);
+               if (IS_ERR(crtc_state))
+                       return PTR_ERR(crtc_state);
+
+               if (!crtc_state->active || needs_modeset(crtc_state))
+                       continue;
+
+               crtc_state->mode_changed = true;
+
+               ret = drm_atomic_add_affected_connectors(state, crtc);
+               if (ret)
+                       return ret;
+
+               ret = drm_atomic_add_affected_planes(state, crtc);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int intel_modeset_checks(struct intel_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+       struct intel_crtc *crtc;
+       int ret = 0, i;
+
+       if (!check_digital_port_conflicts(state)) {
+               DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+               return -EINVAL;
+       }
+
+       /* keep the current setting */
+       if (!state->cdclk.force_min_cdclk_changed)
+               state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
+
+       state->modeset = true;
+       state->active_crtcs = dev_priv->active_crtcs;
+       state->cdclk.logical = dev_priv->cdclk.logical;
+       state->cdclk.actual = dev_priv->cdclk.actual;
+       state->cdclk.pipe = INVALID_PIPE;
+
+       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+                                           new_crtc_state, i) {
+               if (new_crtc_state->base.active)
+                       state->active_crtcs |= 1 << i;
+               else
+                       state->active_crtcs &= ~(1 << i);
+
+               if (old_crtc_state->base.active != new_crtc_state->base.active)
+                       state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
+       }
+
+       /*
+        * See if the config requires any additional preparation, e.g.
+        * to adjust global state with pipes off.  We need to do this
+        * here so we can get the modeset_pipe updated config for the new
+        * mode set on this crtc.  For other crtcs we need to use the
+        * adjusted_mode bits in the crtc directly.
+        */
+       if (dev_priv->display.modeset_calc_cdclk) {
+               enum pipe pipe;
+
+               ret = dev_priv->display.modeset_calc_cdclk(state);
+               if (ret < 0)
+                       return ret;
+
+               /*
+                * Writes to dev_priv->cdclk.logical must protected by
+                * holding all the crtc locks, even if we don't end up
+                * touching the hardware
+                */
+               if (intel_cdclk_changed(&dev_priv->cdclk.logical,
+                                       &state->cdclk.logical)) {
+                       ret = intel_lock_all_pipes(&state->base);
+                       if (ret < 0)
+                               return ret;
+               }
+
+               if (is_power_of_2(state->active_crtcs)) {
+                       struct drm_crtc *crtc;
+                       struct drm_crtc_state *crtc_state;
+
+                       pipe = ilog2(state->active_crtcs);
+                       crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
+                       crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc);
+                       if (crtc_state && needs_modeset(crtc_state))
+                               pipe = INVALID_PIPE;
+               } else {
+                       pipe = INVALID_PIPE;
+               }
+
+               /* All pipes must be switched off while we change the cdclk. */
+               if (pipe != INVALID_PIPE &&
+                   intel_cdclk_needs_cd2x_update(dev_priv,
+                                                 &dev_priv->cdclk.actual,
+                                                 &state->cdclk.actual)) {
+                       ret = intel_lock_all_pipes(&state->base);
+                       if (ret < 0)
+                               return ret;
+
+                       state->cdclk.pipe = pipe;
+               } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
+                                                    &state->cdclk.actual)) {
+                       ret = intel_modeset_all_pipes(&state->base);
+                       if (ret < 0)
+                               return ret;
+
+                       state->cdclk.pipe = INVALID_PIPE;
+               }
+
+               DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
+                             state->cdclk.logical.cdclk,
+                             state->cdclk.actual.cdclk);
+               DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
+                             state->cdclk.logical.voltage_level,
+                             state->cdclk.actual.voltage_level);
+       }
+
+       intel_modeset_clear_plls(state);
+
+       if (IS_HASWELL(dev_priv))
+               return haswell_mode_set_planes_workaround(state);
+
+       return 0;
+}
+
+/*
+ * Handle calculation of various watermark data at the end of the atomic check
+ * phase.  The code here should be run after the per-crtc and per-plane 'check'
+ * handlers to ensure that all derived state has been updated.
+ */
+static int calc_watermark_data(struct intel_atomic_state *state)
+{
+       struct drm_device *dev = state->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       /* Is there platform-specific watermark information to calculate? */
+       if (dev_priv->display.compute_global_watermarks)
+               return dev_priv->display.compute_global_watermarks(state);
+
+       return 0;
+}
+
+/**
+ * intel_atomic_check - validate state object
+ * @dev: drm device
+ * @_state: state to validate
+ */
+static int intel_atomic_check(struct drm_device *dev,
+                             struct drm_atomic_state *_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_atomic_state *state = to_intel_atomic_state(_state);
+       struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+       struct intel_crtc *crtc;
+       int ret, i;
+       bool any_ms = state->cdclk.force_min_cdclk_changed;
+
+       /* Catch I915_MODE_FLAG_INHERITED */
+       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+                                           new_crtc_state, i) {
+               if (new_crtc_state->base.mode.private_flags !=
+                   old_crtc_state->base.mode.private_flags)
+                       new_crtc_state->base.mode_changed = true;
+       }
+
+       ret = drm_atomic_helper_check_modeset(dev, &state->base);
+       if (ret)
+               goto fail;
+
+       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+                                           new_crtc_state, i) {
+               if (!needs_modeset(&new_crtc_state->base))
+                       continue;
+
+               if (!new_crtc_state->base.enable) {
+                       any_ms = true;
+                       continue;
+               }
+
+               ret = intel_modeset_pipe_config(new_crtc_state);
+               if (ret)
+                       goto fail;
+
+               if (intel_pipe_config_compare(dev_priv, old_crtc_state,
+                                             new_crtc_state, true)) {
+                       new_crtc_state->base.mode_changed = false;
+                       new_crtc_state->update_pipe = true;
+               }
+
+               if (needs_modeset(&new_crtc_state->base))
+                       any_ms = true;
+       }
+
+       ret = drm_dp_mst_atomic_check(&state->base);
+       if (ret)
+               goto fail;
+
+       if (any_ms) {
+               ret = intel_modeset_checks(state);
+               if (ret)
+                       goto fail;
+       } else {
+               state->cdclk.logical = dev_priv->cdclk.logical;
+       }
+
+       ret = icl_add_linked_planes(state);
+       if (ret)
+               goto fail;
+
+       ret = drm_atomic_helper_check_planes(dev, &state->base);
+       if (ret)
+               goto fail;
+
+       intel_fbc_choose_crtc(dev_priv, state);
+       ret = calc_watermark_data(state);
+       if (ret)
+               goto fail;
+
+       ret = intel_bw_atomic_check(state);
+       if (ret)
+               goto fail;
+
+       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+                                           new_crtc_state, i) {
+               if (!needs_modeset(&new_crtc_state->base) &&
+                   !new_crtc_state->update_pipe)
+                       continue;
+
+               intel_dump_pipe_config(new_crtc_state, state,
+                                      needs_modeset(&new_crtc_state->base) ?
+                                      "[modeset]" : "[fastset]");
+       }
+
+       return 0;
+
+ fail:
+       if (ret == -EDEADLK)
+               return ret;
+
+       /*
+        * FIXME would probably be nice to know which crtc specifically
+        * caused the failure, in cases where we can pinpoint it.
+        */
+       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+                                           new_crtc_state, i)
+               intel_dump_pipe_config(new_crtc_state, state, "[failed]");
+
+       return ret;
+}
+
+static int intel_atomic_prepare_commit(struct drm_device *dev,
+                                      struct drm_atomic_state *state)
+{
+       return drm_atomic_helper_prepare_planes(dev, state);
+}
+
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
+
+       if (!vblank->max_vblank_count)
+               return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+
+       return dev->driver->get_vblank_counter(dev, crtc->pipe);
+}
+
+static void intel_update_crtc(struct drm_crtc *crtc,
+                             struct drm_atomic_state *state,
+                             struct drm_crtc_state *old_crtc_state,
+                             struct drm_crtc_state *new_crtc_state)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
+       bool modeset = needs_modeset(new_crtc_state);
+       struct intel_plane_state *new_plane_state =
+               intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
+                                                to_intel_plane(crtc->primary));
+
+       if (modeset) {
+               update_scanline_offset(pipe_config);
+               dev_priv->display.crtc_enable(pipe_config, state);
+
+               /* vblanks work again, re-enable pipe CRC. */
+               intel_crtc_enable_pipe_crc(intel_crtc);
+       } else {
+               intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
+                                      pipe_config);
+
+               if (pipe_config->update_pipe)
+                       intel_encoders_update_pipe(crtc, pipe_config, state);
+       }
+
+       if (pipe_config->update_pipe && !pipe_config->enable_fbc)
+               intel_fbc_disable(intel_crtc);
+       else if (new_plane_state)
+               intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
+
+       intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+       else
+               i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+
+       intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
+}
+
+static void intel_update_crtcs(struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+       int i;
+
+       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+               if (!new_crtc_state->active)
+                       continue;
+
+               intel_update_crtc(crtc, state, old_crtc_state,
+                                 new_crtc_state);
+       }
+}
+
+static void skl_update_crtcs(struct drm_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->dev);
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct drm_crtc *crtc;
+       struct intel_crtc *intel_crtc;
+       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+       struct intel_crtc_state *cstate;
+       unsigned int updated = 0;
+       bool progress;
+       enum pipe pipe;
+       int i;
+       u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+       u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
+       struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+
+       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
+               /* ignore allocations for crtc's that have been turned off. */
+               if (new_crtc_state->active)
+                       entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
+
+       /* If 2nd DBuf slice required, enable it here */
+       if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
+               icl_dbuf_slices_update(dev_priv, required_slices);
+
+       /*
+        * Whenever the number of active pipes changes, we need to make sure we
+        * update the pipes in the right order so that their ddb allocations
+        * never overlap with eachother inbetween CRTC updates. Otherwise we'll
+        * cause pipe underruns and other bad stuff.
+        */
+       do {
+               progress = false;
+
+               for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+                       bool vbl_wait = false;
+                       unsigned int cmask = drm_crtc_mask(crtc);
+
+                       intel_crtc = to_intel_crtc(crtc);
+                       cstate = to_intel_crtc_state(new_crtc_state);
+                       pipe = intel_crtc->pipe;
+
+                       if (updated & cmask || !cstate->base.active)
+                               continue;
+
+                       if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
+                                                       entries,
+                                                       INTEL_INFO(dev_priv)->num_pipes, i))
+                               continue;
+
+                       updated |= cmask;
+                       entries[i] = cstate->wm.skl.ddb;
+
+                       /*
+                        * If this is an already active pipe, it's DDB changed,
+                        * and this isn't the last pipe that needs updating
+                        * then we need to wait for a vblank to pass for the
+                        * new ddb allocation to take effect.
+                        */
+                       if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
+                                                &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
+                           !new_crtc_state->active_changed &&
+                           intel_state->wm_results.dirty_pipes != updated)
+                               vbl_wait = true;
+
+                       intel_update_crtc(crtc, state, old_crtc_state,
+                                         new_crtc_state);
+
+                       if (vbl_wait)
+                               intel_wait_for_vblank(dev_priv, pipe);
+
+                       progress = true;
+               }
+       } while (progress);
+
+       /* If 2nd DBuf slice is no more required disable it */
+       if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
+               icl_dbuf_slices_update(dev_priv, required_slices);
+}
+
+static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
+{
+       struct intel_atomic_state *state, *next;
+       struct llist_node *freed;
+
+       freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+       llist_for_each_entry_safe(state, next, freed, freed)
+               drm_atomic_state_put(&state->base);
+}
+
+static void intel_atomic_helper_free_state_worker(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+
+       intel_atomic_helper_free_state(dev_priv);
+}
+
+static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
+{
+       struct wait_queue_entry wait_fence, wait_reset;
+       struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
+
+       init_wait_entry(&wait_fence, 0);
+       init_wait_entry(&wait_reset, 0);
+       for (;;) {
+               prepare_to_wait(&intel_state->commit_ready.wait,
+                               &wait_fence, TASK_UNINTERRUPTIBLE);
+               prepare_to_wait(&dev_priv->gpu_error.wait_queue,
+                               &wait_reset, TASK_UNINTERRUPTIBLE);
+
+
+               if (i915_sw_fence_done(&intel_state->commit_ready)
+                   || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
+                       break;
+
+               schedule();
+       }
+       finish_wait(&intel_state->commit_ready.wait, &wait_fence);
+       finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
+}
+
+static void intel_atomic_cleanup_work(struct work_struct *work)
+{
+       struct drm_atomic_state *state =
+               container_of(work, struct drm_atomic_state, commit_work);
+       struct drm_i915_private *i915 = to_i915(state->dev);
+
+       drm_atomic_helper_cleanup_planes(&i915->drm, state);
+       drm_atomic_helper_commit_cleanup_done(state);
+       drm_atomic_state_put(state);
+
+       intel_atomic_helper_free_state(i915);
+}
+
+static void intel_atomic_commit_tail(struct drm_atomic_state *state)
+{
+       struct drm_device *dev = state->dev;
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+       struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
+       struct drm_crtc *crtc;
+       struct intel_crtc *intel_crtc;
+       u64 put_domains[I915_MAX_PIPES] = {};
+       intel_wakeref_t wakeref = 0;
+       int i;
+
+       intel_atomic_commit_fence_wait(intel_state);
+
+       drm_atomic_helper_wait_for_dependencies(state);
+
+       if (intel_state->modeset)
+               wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+
+       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+               old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
+               new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
+               intel_crtc = to_intel_crtc(crtc);
+
+               if (needs_modeset(new_crtc_state) ||
+                   to_intel_crtc_state(new_crtc_state)->update_pipe) {
+
+                       put_domains[intel_crtc->pipe] =
+                               modeset_get_crtc_power_domains(crtc,
+                                       new_intel_crtc_state);
+               }
+
+               if (!needs_modeset(new_crtc_state))
+                       continue;
+
+               intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
+
+               if (old_crtc_state->active) {
+                       intel_crtc_disable_planes(intel_state, intel_crtc);
+
+                       /*
+                        * We need to disable pipe CRC before disabling the pipe,
+                        * or we race against vblank off.
+                        */
+                       intel_crtc_disable_pipe_crc(intel_crtc);
+
+                       dev_priv->display.crtc_disable(old_intel_crtc_state, state);
+                       intel_crtc->active = false;
+                       intel_fbc_disable(intel_crtc);
+                       intel_disable_shared_dpll(old_intel_crtc_state);
+
+                       /*
+                        * Underruns don't always raise
+                        * interrupts, so check manually.
+                        */
+                       intel_check_cpu_fifo_underruns(dev_priv);
+                       intel_check_pch_fifo_underruns(dev_priv);
+
+                       /* FIXME unify this for all platforms */
+                       if (!new_crtc_state->active &&
+                           !HAS_GMCH(dev_priv) &&
+                           dev_priv->display.initial_watermarks)
+                               dev_priv->display.initial_watermarks(intel_state,
+                                                                    new_intel_crtc_state);
+               }
+       }
+
+       /* FIXME: Eventually get rid of our intel_crtc->config pointer */
+       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
+               to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
+
+       if (intel_state->modeset) {
+               drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
+
+               intel_set_cdclk_pre_plane_update(dev_priv,
+                                                &intel_state->cdclk.actual,
+                                                &dev_priv->cdclk.actual,
+                                                intel_state->cdclk.pipe);
+
+               /*
+                * SKL workaround: bspec recommends we disable the SAGV when we
+                * have more then one pipe enabled
+                */
+               if (!intel_can_enable_sagv(state))
+                       intel_disable_sagv(dev_priv);
+
+               intel_modeset_verify_disabled(dev, state);
+       }
+
+       /* Complete the events for pipes that have now been disabled */
+       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+               bool modeset = needs_modeset(new_crtc_state);
+
+               /* Complete events for now disable pipes here. */
+               if (modeset && !new_crtc_state->active && new_crtc_state->event) {
+                       spin_lock_irq(&dev->event_lock);
+                       drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
+                       spin_unlock_irq(&dev->event_lock);
+
+                       new_crtc_state->event = NULL;
+               }
+       }
+
+       /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+       dev_priv->display.update_crtcs(state);
+
+       if (intel_state->modeset)
+               intel_set_cdclk_post_plane_update(dev_priv,
+                                                 &intel_state->cdclk.actual,
+                                                 &dev_priv->cdclk.actual,
+                                                 intel_state->cdclk.pipe);
+
+       /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
+        * already, but still need the state for the delayed optimization. To
+        * fix this:
+        * - wrap the optimization/post_plane_update stuff into a per-crtc work.
+        * - schedule that vblank worker _before_ calling hw_done
+        * - at the start of commit_tail, cancel it _synchrously
+        * - switch over to the vblank wait helper in the core after that since
+        *   we don't need out special handling any more.
+        */
+       drm_atomic_helper_wait_for_flip_done(dev, state);
+
+       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+               new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
+
+               if (new_crtc_state->active &&
+                   !needs_modeset(new_crtc_state) &&
+                   (new_intel_crtc_state->base.color_mgmt_changed ||
+                    new_intel_crtc_state->update_pipe))
+                       intel_color_load_luts(new_intel_crtc_state);
+       }
+
+       /*
+        * Now that the vblank has passed, we can go ahead and program the
+        * optimal watermarks on platforms that need two-step watermark
+        * programming.
+        *
+        * TODO: Move this (and other cleanup) to an async worker eventually.
+        */
+       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+               new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
+
+               if (dev_priv->display.optimize_watermarks)
+                       dev_priv->display.optimize_watermarks(intel_state,
+                                                             new_intel_crtc_state);
+       }
+
+       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+               intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
+
+               if (put_domains[i])
+                       modeset_put_power_domains(dev_priv, put_domains[i]);
+
+               intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
+       }
+
+       if (intel_state->modeset)
+               intel_verify_planes(intel_state);
+
+       if (intel_state->modeset && intel_can_enable_sagv(state))
+               intel_enable_sagv(dev_priv);
+
+       drm_atomic_helper_commit_hw_done(state);
+
+       if (intel_state->modeset) {
+               /* As one of the primary mmio accessors, KMS has a high
+                * likelihood of triggering bugs in unclaimed access. After we
+                * finish modesetting, see if an error has been flagged, and if
+                * so enable debugging for the next modeset - and hope we catch
+                * the culprit.
+                */
+               intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
+               intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
+       }
+       intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
+
+       /*
+        * Defer the cleanup of the old state to a separate worker to not
+        * impede the current task (userspace for blocking modesets) that
+        * are executed inline. For out-of-line asynchronous modesets/flips,
+        * deferring to a new worker seems overkill, but we would place a
+        * schedule point (cond_resched()) here anyway to keep latencies
+        * down.
+        */
+       INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
+       queue_work(system_highpri_wq, &state->commit_work);
+}
+
+static void intel_atomic_commit_work(struct work_struct *work)
+{
+       struct drm_atomic_state *state =
+               container_of(work, struct drm_atomic_state, commit_work);
+
+       intel_atomic_commit_tail(state);
+}
+
+static int __i915_sw_fence_call
+intel_atomic_commit_ready(struct i915_sw_fence *fence,
+                         enum i915_sw_fence_notify notify)
+{
+       struct intel_atomic_state *state =
+               container_of(fence, struct intel_atomic_state, commit_ready);
+
+       switch (notify) {
+       case FENCE_COMPLETE:
+               /* we do blocking waits in the worker, nothing to do here */
+               break;
+       case FENCE_FREE:
+               {
+                       struct intel_atomic_helper *helper =
+                               &to_i915(state->base.dev)->atomic_helper;
+
+                       if (llist_add(&state->freed, &helper->free_list))
+                               schedule_work(&helper->free_work);
+                       break;
+               }
+       }
+
+       return NOTIFY_DONE;
+}
+
+static void intel_atomic_track_fbs(struct drm_atomic_state *state)
+{
+       struct drm_plane_state *old_plane_state, *new_plane_state;
+       struct drm_plane *plane;
+       int i;
+
+       for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
+               i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
+                                 intel_fb_obj(new_plane_state->fb),
+                                 to_intel_plane(plane)->frontbuffer_bit);
+}
+
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @nonblock: nonblocking commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+static int intel_atomic_commit(struct drm_device *dev,
+                              struct drm_atomic_state *state,
+                              bool nonblock)
+{
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int ret = 0;
+
+       intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+       drm_atomic_state_get(state);
+       i915_sw_fence_init(&intel_state->commit_ready,
+                          intel_atomic_commit_ready);
+
+       /*
+        * The intel_legacy_cursor_update() fast path takes care
+        * of avoiding the vblank waits for simple cursor
+        * movement and flips. For cursor on/off and size changes,
+        * we want to perform the vblank waits so that watermark
+        * updates happen during the correct frames. Gen9+ have
+        * double buffered watermarks and so shouldn't need this.
+        *
+        * Unset state->legacy_cursor_update before the call to
+        * drm_atomic_helper_setup_commit() because otherwise
+        * drm_atomic_helper_wait_for_flip_done() is a noop and
+        * we get FIFO underruns because we didn't wait
+        * for vblank.
+        *
+        * FIXME doing watermarks and fb cleanup from a vblank worker
+        * (assuming we had any) would solve these problems.
+        */
+       if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
+               struct intel_crtc_state *new_crtc_state;
+               struct intel_crtc *crtc;
+               int i;
+
+               for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
+                       if (new_crtc_state->wm.need_postvbl_update ||
+                           new_crtc_state->update_wm_post)
+                               state->legacy_cursor_update = false;
+       }
+
+       ret = intel_atomic_prepare_commit(dev, state);
+       if (ret) {
+               DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+               i915_sw_fence_commit(&intel_state->commit_ready);
+               intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
+               return ret;
+       }
+
+       ret = drm_atomic_helper_setup_commit(state, nonblock);
+       if (!ret)
+               ret = drm_atomic_helper_swap_state(state, true);
+
+       if (ret) {
+               i915_sw_fence_commit(&intel_state->commit_ready);
+
+               drm_atomic_helper_cleanup_planes(dev, state);
+               intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
+               return ret;
+       }
+       dev_priv->wm.distrust_bios_wm = false;
+       intel_shared_dpll_swap_state(state);
+       intel_atomic_track_fbs(state);
+
+       if (intel_state->modeset) {
+               memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
+                      sizeof(intel_state->min_cdclk));
+               memcpy(dev_priv->min_voltage_level,
+                      intel_state->min_voltage_level,
+                      sizeof(intel_state->min_voltage_level));
+               dev_priv->active_crtcs = intel_state->active_crtcs;
+               dev_priv->cdclk.force_min_cdclk =
+                       intel_state->cdclk.force_min_cdclk;
+
+               intel_cdclk_swap_state(intel_state);
+       }
+
+       drm_atomic_state_get(state);
+       INIT_WORK(&state->commit_work, intel_atomic_commit_work);
+
+       i915_sw_fence_commit(&intel_state->commit_ready);
+       if (nonblock && intel_state->modeset) {
+               queue_work(dev_priv->modeset_wq, &state->commit_work);
+       } else if (nonblock) {
+               queue_work(system_unbound_wq, &state->commit_work);
+       } else {
+               if (intel_state->modeset)
+                       flush_workqueue(dev_priv->modeset_wq);
+               intel_atomic_commit_tail(state);
+       }
+
+       return 0;
+}
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+       .gamma_set = drm_atomic_helper_legacy_gamma_set,
+       .set_config = drm_atomic_helper_set_config,
+       .destroy = intel_crtc_destroy,
+       .page_flip = drm_atomic_helper_page_flip,
+       .atomic_duplicate_state = intel_crtc_duplicate_state,
+       .atomic_destroy_state = intel_crtc_destroy_state,
+       .set_crc_source = intel_crtc_set_crc_source,
+       .verify_crc_source = intel_crtc_verify_crc_source,
+       .get_crc_sources = intel_crtc_get_crc_sources,
+};
+
+struct wait_rps_boost {
+       struct wait_queue_entry wait;
+
+       struct drm_crtc *crtc;
+       struct i915_request *request;
+};
+
+static int do_rps_boost(struct wait_queue_entry *_wait,
+                       unsigned mode, int sync, void *key)
+{
+       struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
+       struct i915_request *rq = wait->request;
+
+       /*
+        * If we missed the vblank, but the request is already running it
+        * is reasonable to assume that it will complete before the next
+        * vblank without our intervention, so leave RPS alone.
+        */
+       if (!i915_request_started(rq))
+               gen6_rps_boost(rq);
+       i915_request_put(rq);
+
+       drm_crtc_vblank_put(wait->crtc);
+
+       list_del(&wait->wait.entry);
+       kfree(wait);
+       return 1;
+}
+
+static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
+                                      struct dma_fence *fence)
+{
+       struct wait_rps_boost *wait;
+
+       if (!dma_fence_is_i915(fence))
+               return;
+
+       if (INTEL_GEN(to_i915(crtc->dev)) < 6)
+               return;
+
+       if (drm_crtc_vblank_get(crtc))
+               return;
+
+       wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+       if (!wait) {
+               drm_crtc_vblank_put(crtc);
+               return;
+       }
+
+       wait->request = to_request(dma_fence_get(fence));
+       wait->crtc = crtc;
+
+       wait->wait.func = do_rps_boost;
+       wait->wait.flags = 0;
+
+       add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+}
+
+static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       struct drm_framebuffer *fb = plane_state->base.fb;
+       struct i915_vma *vma;
+
+       if (plane->id == PLANE_CURSOR &&
+           INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
+               struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+               const int align = intel_cursor_alignment(dev_priv);
+               int err;
+
+               err = i915_gem_object_attach_phys(obj, align);
+               if (err)
+                       return err;
+       }
+
+       vma = intel_pin_and_fence_fb_obj(fb,
+                                        &plane_state->view,
+                                        intel_plane_uses_fence(plane_state),
+                                        &plane_state->flags);
+       if (IS_ERR(vma))
+               return PTR_ERR(vma);
+
+       plane_state->vma = vma;
+
+       return 0;
+}
+
+static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
+{
+       struct i915_vma *vma;
+
+       vma = fetch_and_zero(&old_plane_state->vma);
+       if (vma)
+               intel_unpin_fb_vma(vma, old_plane_state->flags);
+}
+
+static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
+{
+       struct i915_sched_attr attr = {
+               .priority = I915_PRIORITY_DISPLAY,
+       };
+
+       i915_gem_object_wait_priority(obj, 0, &attr);
+}
+
+/**
+ * intel_prepare_plane_fb - Prepare fb for usage on plane
+ * @plane: drm plane to prepare for
+ * @new_state: the plane state being prepared
+ *
+ * Prepares a framebuffer for usage on a display plane.  Generally this
+ * involves pinning the underlying object and updating the frontbuffer tracking
+ * bits.  Some older platforms need special physical address handling for
+ * cursor planes.
+ *
+ * Must be called with struct_mutex held.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int
+intel_prepare_plane_fb(struct drm_plane *plane,
+                      struct drm_plane_state *new_state)
+{
+       struct intel_atomic_state *intel_state =
+               to_intel_atomic_state(new_state->state);
+       struct drm_i915_private *dev_priv = to_i915(plane->dev);
+       struct drm_framebuffer *fb = new_state->fb;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
+       int ret;
+
+       if (old_obj) {
+               struct drm_crtc_state *crtc_state =
+                       drm_atomic_get_new_crtc_state(new_state->state,
+                                                     plane->state->crtc);
+
+               /* Big Hammer, we also need to ensure that any pending
+                * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+                * current scanout is retired before unpinning the old
+                * framebuffer. Note that we rely on userspace rendering
+                * into the buffer attached to the pipe they are waiting
+                * on. If not, userspace generates a GPU hang with IPEHR
+                * point to the MI_WAIT_FOR_EVENT.
+                *
+                * This should only fail upon a hung GPU, in which case we
+                * can safely continue.
+                */
+               if (needs_modeset(crtc_state)) {
+                       ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+                                                             old_obj->resv, NULL,
+                                                             false, 0,
+                                                             GFP_KERNEL);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+
+       if (new_state->fence) { /* explicit fencing */
+               ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
+                                                   new_state->fence,
+                                                   I915_FENCE_TIMEOUT,
+                                                   GFP_KERNEL);
+               if (ret < 0)
+                       return ret;
+       }
+
+       if (!obj)
+               return 0;
+
+       ret = i915_gem_object_pin_pages(obj);
+       if (ret)
+               return ret;
+
+       ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
+       if (ret) {
+               i915_gem_object_unpin_pages(obj);
+               return ret;
+       }
+
+       ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
+
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+       i915_gem_object_unpin_pages(obj);
+       if (ret)
+               return ret;
+
+       fb_obj_bump_render_priority(obj);
+       intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+
+       if (!new_state->fence) { /* implicit fencing */
+               struct dma_fence *fence;
+
+               ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+                                                     obj->resv, NULL,
+                                                     false, I915_FENCE_TIMEOUT,
+                                                     GFP_KERNEL);
+               if (ret < 0)
+                       return ret;
+
+               fence = reservation_object_get_excl_rcu(obj->resv);
+               if (fence) {
+                       add_rps_boost_after_vblank(new_state->crtc, fence);
+                       dma_fence_put(fence);
+               }
+       } else {
+               add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
+       }
+
+       /*
+        * We declare pageflips to be interactive and so merit a small bias
+        * towards upclocking to deliver the frame on time. By only changing
+        * the RPS thresholds to sample more regularly and aim for higher
+        * clocks we can hopefully deliver low power workloads (like kodi)
+        * that are not quite steady state without resorting to forcing
+        * maximum clocks following a vblank miss (see do_rps_boost()).
+        */
+       if (!intel_state->rps_interactive) {
+               intel_rps_mark_interactive(dev_priv, true);
+               intel_state->rps_interactive = true;
+       }
+
+       return 0;
+}
+
+/**
+ * intel_cleanup_plane_fb - Cleans up an fb after plane use
+ * @plane: drm plane to clean up for
+ * @old_state: the state from the previous modeset
+ *
+ * Cleans up a framebuffer that has just been removed from a plane.
+ *
+ * Must be called with struct_mutex held.
+ */
+void
+intel_cleanup_plane_fb(struct drm_plane *plane,
+                      struct drm_plane_state *old_state)
+{
+       struct intel_atomic_state *intel_state =
+               to_intel_atomic_state(old_state->state);
+       struct drm_i915_private *dev_priv = to_i915(plane->dev);
+
+       if (intel_state->rps_interactive) {
+               intel_rps_mark_interactive(dev_priv, false);
+               intel_state->rps_interactive = false;
+       }
+
+       /* Should only be called after a successful intel_prepare_plane_fb()! */
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       intel_plane_unpin_fb(to_intel_plane_state(old_state));
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+int
+skl_max_scale(const struct intel_crtc_state *crtc_state,
+             u32 pixel_format)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       int max_scale, mult;
+       int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
+
+       if (!crtc_state->base.enable)
+               return DRM_PLANE_HELPER_NO_SCALING;
+
+       crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
+       max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
+
+       if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
+               max_dotclk *= 2;
+
+       if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
+               return DRM_PLANE_HELPER_NO_SCALING;
+
+       /*
+        * skl max scale is lower of:
+        *    close to 3 but not 3, -1 is for that purpose
+        *            or
+        *    cdclk/crtc_clock
+        */
+       mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
+       tmpclk1 = (1 << 16) * mult - 1;
+       tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
+       max_scale = min(tmpclk1, tmpclk2);
+
+       return max_scale;
+}
+
+static void intel_begin_crtc_commit(struct intel_atomic_state *state,
+                                   struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_crtc_state *old_crtc_state =
+               intel_atomic_get_old_crtc_state(state, crtc);
+       struct intel_crtc_state *new_crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
+       bool modeset = needs_modeset(&new_crtc_state->base);
+
+       /* Perform vblank evasion around commit operation */
+       intel_pipe_update_start(new_crtc_state);
+
+       if (modeset)
+               goto out;
+
+       if (new_crtc_state->base.color_mgmt_changed ||
+           new_crtc_state->update_pipe)
+               intel_color_commit(new_crtc_state);
+
+       if (new_crtc_state->update_pipe)
+               intel_update_pipe_config(old_crtc_state, new_crtc_state);
+       else if (INTEL_GEN(dev_priv) >= 9)
+               skl_detach_scalers(new_crtc_state);
+
+       if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+               bdw_set_pipemisc(new_crtc_state);
+
+out:
+       if (dev_priv->display.atomic_update_watermarks)
+               dev_priv->display.atomic_update_watermarks(state,
+                                                          new_crtc_state);
+}
+
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+                                 struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       if (!IS_GEN(dev_priv, 2))
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+
+       if (crtc_state->has_pch_encoder) {
+               enum pipe pch_transcoder =
+                       intel_crtc_pch_transcoder(crtc);
+
+               intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+       }
+}
+
+static void intel_finish_crtc_commit(struct intel_atomic_state *state,
+                                    struct intel_crtc *crtc)
+{
+       struct intel_crtc_state *old_crtc_state =
+               intel_atomic_get_old_crtc_state(state, crtc);
+       struct intel_crtc_state *new_crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
+
+       intel_pipe_update_end(new_crtc_state);
+
+       if (new_crtc_state->update_pipe &&
+           !needs_modeset(&new_crtc_state->base) &&
+           old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+               intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+}
+
+/**
+ * intel_plane_destroy - destroy a plane
+ * @plane: plane to destroy
+ *
+ * Common destruction function for all types of planes (primary, cursor,
+ * sprite).
+ */
+void intel_plane_destroy(struct drm_plane *plane)
+{
+       drm_plane_cleanup(plane);
+       kfree(to_intel_plane(plane));
+}
+
+static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
+{
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
+       switch (format) {
+       case DRM_FORMAT_C8:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_XRGB8888:
+               return modifier == DRM_FORMAT_MOD_LINEAR ||
+                       modifier == I915_FORMAT_MOD_X_TILED;
+       default:
+               return false;
+       }
+}
+
+static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
+{
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
+       switch (format) {
+       case DRM_FORMAT_C8:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+               return modifier == DRM_FORMAT_MOD_LINEAR ||
+                       modifier == I915_FORMAT_MOD_X_TILED;
+       default:
+               return false;
+       }
+}
+
+static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
+                                             u32 format, u64 modifier)
+{
+       return modifier == DRM_FORMAT_MOD_LINEAR &&
+               format == DRM_FORMAT_ARGB8888;
+}
+
+static const struct drm_plane_funcs i965_plane_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = i965_plane_format_mod_supported,
+};
+
+static const struct drm_plane_funcs i8xx_plane_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = i8xx_plane_format_mod_supported,
+};
+
+static int
+intel_legacy_cursor_update(struct drm_plane *plane,
+                          struct drm_crtc *crtc,
+                          struct drm_framebuffer *fb,
+                          int crtc_x, int crtc_y,
+                          unsigned int crtc_w, unsigned int crtc_h,
+                          u32 src_x, u32 src_y,
+                          u32 src_w, u32 src_h,
+                          struct drm_modeset_acquire_ctx *ctx)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       int ret;
+       struct drm_plane_state *old_plane_state, *new_plane_state;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       struct drm_framebuffer *old_fb;
+       struct intel_crtc_state *crtc_state =
+               to_intel_crtc_state(crtc->state);
+       struct intel_crtc_state *new_crtc_state;
+
+       /*
+        * When crtc is inactive or there is a modeset pending,
+        * wait for it to complete in the slowpath
+        */
+       if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
+           crtc_state->update_pipe)
+               goto slow;
+
+       old_plane_state = plane->state;
+       /*
+        * Don't do an async update if there is an outstanding commit modifying
+        * the plane.  This prevents our async update's changes from getting
+        * overridden by a previous synchronous update's state.
+        */
+       if (old_plane_state->commit &&
+           !try_wait_for_completion(&old_plane_state->commit->hw_done))
+               goto slow;
+
+       /*
+        * If any parameters change that may affect watermarks,
+        * take the slowpath. Only changing fb or position should be
+        * in the fastpath.
+        */
+       if (old_plane_state->crtc != crtc ||
+           old_plane_state->src_w != src_w ||
+           old_plane_state->src_h != src_h ||
+           old_plane_state->crtc_w != crtc_w ||
+           old_plane_state->crtc_h != crtc_h ||
+           !old_plane_state->fb != !fb)
+               goto slow;
+
+       new_plane_state = intel_plane_duplicate_state(plane);
+       if (!new_plane_state)
+               return -ENOMEM;
+
+       new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
+       if (!new_crtc_state) {
+               ret = -ENOMEM;
+               goto out_free;
+       }
+
+       drm_atomic_set_fb_for_plane(new_plane_state, fb);
+
+       new_plane_state->src_x = src_x;
+       new_plane_state->src_y = src_y;
+       new_plane_state->src_w = src_w;
+       new_plane_state->src_h = src_h;
+       new_plane_state->crtc_x = crtc_x;
+       new_plane_state->crtc_y = crtc_y;
+       new_plane_state->crtc_w = crtc_w;
+       new_plane_state->crtc_h = crtc_h;
+
+       ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
+                                                 to_intel_plane_state(old_plane_state),
+                                                 to_intel_plane_state(new_plane_state));
+       if (ret)
+               goto out_free;
+
+       ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
+       if (ret)
+               goto out_free;
+
+       ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
+       if (ret)
+               goto out_unlock;
+
+       intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
+
+       old_fb = old_plane_state->fb;
+       i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
+                         intel_plane->frontbuffer_bit);
+
+       /* Swap plane state */
+       plane->state = new_plane_state;
+
+       /*
+        * We cannot swap crtc_state as it may be in use by an atomic commit or
+        * page flip that's running simultaneously. If we swap crtc_state and
+        * destroy the old state, we will cause a use-after-free there.
+        *
+        * Only update active_planes, which is needed for our internal
+        * bookkeeping. Either value will do the right thing when updating
+        * planes atomically. If the cursor was part of the atomic update then
+        * we would have taken the slowpath.
+        */
+       crtc_state->active_planes = new_crtc_state->active_planes;
+
+       if (plane->state->visible)
+               intel_update_plane(intel_plane, crtc_state,
+                                  to_intel_plane_state(plane->state));
+       else
+               intel_disable_plane(intel_plane, crtc_state);
+
+       intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
+
+out_unlock:
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+out_free:
+       if (new_crtc_state)
+               intel_crtc_destroy_state(crtc, &new_crtc_state->base);
+       if (ret)
+               intel_plane_destroy_state(plane, new_plane_state);
+       else
+               intel_plane_destroy_state(plane, old_plane_state);
+       return ret;
+
+slow:
+       return drm_atomic_helper_update_plane(plane, crtc, fb,
+                                             crtc_x, crtc_y, crtc_w, crtc_h,
+                                             src_x, src_y, src_w, src_h, ctx);
+}
+
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+       .update_plane = intel_legacy_cursor_update,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = intel_cursor_format_mod_supported,
+};
+
+static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
+                              enum i9xx_plane_id i9xx_plane)
+{
+       if (!HAS_FBC(dev_priv))
+               return false;
+
+       if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+               return i9xx_plane == PLANE_A; /* tied to pipe A */
+       else if (IS_IVYBRIDGE(dev_priv))
+               return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
+                       i9xx_plane == PLANE_C;
+       else if (INTEL_GEN(dev_priv) >= 4)
+               return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
+       else
+               return i9xx_plane == PLANE_A;
+}
+
+static struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+       struct intel_plane *plane;
+       const struct drm_plane_funcs *plane_funcs;
+       unsigned int supported_rotations;
+       unsigned int possible_crtcs;
+       const u64 *modifiers;
+       const u32 *formats;
+       int num_formats;
+       int ret;
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               return skl_universal_plane_create(dev_priv, pipe,
+                                                 PLANE_PRIMARY);
+
+       plane = intel_plane_alloc();
+       if (IS_ERR(plane))
+               return plane;
+
+       plane->pipe = pipe;
+       /*
+        * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
+        * port is hooked to pipe B. Hence we want plane A feeding pipe B.
+        */
+       if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
+               plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
+       else
+               plane->i9xx_plane = (enum i9xx_plane_id) pipe;
+       plane->id = PLANE_PRIMARY;
+       plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
+
+       plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
+       if (plane->has_fbc) {
+               struct intel_fbc *fbc = &dev_priv->fbc;
+
+               fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
+       }
+
+       if (INTEL_GEN(dev_priv) >= 4) {
+               formats = i965_primary_formats;
+               num_formats = ARRAY_SIZE(i965_primary_formats);
+               modifiers = i9xx_format_modifiers;
+
+               plane->max_stride = i9xx_plane_max_stride;
+               plane->update_plane = i9xx_update_plane;
+               plane->disable_plane = i9xx_disable_plane;
+               plane->get_hw_state = i9xx_plane_get_hw_state;
+               plane->check_plane = i9xx_plane_check;
+
+               plane_funcs = &i965_plane_funcs;
+       } else {
+               formats = i8xx_primary_formats;
+               num_formats = ARRAY_SIZE(i8xx_primary_formats);
+               modifiers = i9xx_format_modifiers;
+
+               plane->max_stride = i9xx_plane_max_stride;
+               plane->update_plane = i9xx_update_plane;
+               plane->disable_plane = i9xx_disable_plane;
+               plane->get_hw_state = i9xx_plane_get_hw_state;
+               plane->check_plane = i9xx_plane_check;
+
+               plane_funcs = &i8xx_plane_funcs;
+       }
+
+       possible_crtcs = BIT(pipe);
+
+       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+               ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+                                              possible_crtcs, plane_funcs,
+                                              formats, num_formats, modifiers,
+                                              DRM_PLANE_TYPE_PRIMARY,
+                                              "primary %c", pipe_name(pipe));
+       else
+               ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+                                              possible_crtcs, plane_funcs,
+                                              formats, num_formats, modifiers,
+                                              DRM_PLANE_TYPE_PRIMARY,
+                                              "plane %c",
+                                              plane_name(plane->i9xx_plane));
+       if (ret)
+               goto fail;
+
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+               supported_rotations =
+                       DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+                       DRM_MODE_REFLECT_X;
+       } else if (INTEL_GEN(dev_priv) >= 4) {
+               supported_rotations =
+                       DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+       } else {
+               supported_rotations = DRM_MODE_ROTATE_0;
+       }
+
+       if (INTEL_GEN(dev_priv) >= 4)
+               drm_plane_create_rotation_property(&plane->base,
+                                                  DRM_MODE_ROTATE_0,
+                                                  supported_rotations);
+
+       drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+       return plane;
+
+fail:
+       intel_plane_free(plane);
+
+       return ERR_PTR(ret);
+}
+
+static struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+                         enum pipe pipe)
+{
+       unsigned int possible_crtcs;
+       struct intel_plane *cursor;
+       int ret;
+
+       cursor = intel_plane_alloc();
+       if (IS_ERR(cursor))
+               return cursor;
+
+       cursor->pipe = pipe;
+       cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
+       cursor->id = PLANE_CURSOR;
+       cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
+
+       if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+               cursor->max_stride = i845_cursor_max_stride;
+               cursor->update_plane = i845_update_cursor;
+               cursor->disable_plane = i845_disable_cursor;
+               cursor->get_hw_state = i845_cursor_get_hw_state;
+               cursor->check_plane = i845_check_cursor;
+       } else {
+               cursor->max_stride = i9xx_cursor_max_stride;
+               cursor->update_plane = i9xx_update_cursor;
+               cursor->disable_plane = i9xx_disable_cursor;
+               cursor->get_hw_state = i9xx_cursor_get_hw_state;
+               cursor->check_plane = i9xx_check_cursor;
+       }
+
+       cursor->cursor.base = ~0;
+       cursor->cursor.cntl = ~0;
+
+       if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
+               cursor->cursor.size = ~0;
+
+       possible_crtcs = BIT(pipe);
+
+       ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+                                      possible_crtcs, &intel_cursor_plane_funcs,
+                                      intel_cursor_formats,
+                                      ARRAY_SIZE(intel_cursor_formats),
+                                      cursor_format_modifiers,
+                                      DRM_PLANE_TYPE_CURSOR,
+                                      "cursor %c", pipe_name(pipe));
+       if (ret)
+               goto fail;
+
+       if (INTEL_GEN(dev_priv) >= 4)
+               drm_plane_create_rotation_property(&cursor->base,
+                                                  DRM_MODE_ROTATE_0,
+                                                  DRM_MODE_ROTATE_0 |
+                                                  DRM_MODE_ROTATE_180);
+
+       drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
+
+       return cursor;
+
+fail:
+       intel_plane_free(cursor);
+
+       return ERR_PTR(ret);
+}
+
+static void intel_crtc_init_scalers(struct intel_crtc *crtc,
+                                   struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc_scaler_state *scaler_state =
+               &crtc_state->scaler_state;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       int i;
+
+       crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
+       if (!crtc->num_scalers)
+               return;
+
+       for (i = 0; i < crtc->num_scalers; i++) {
+               struct intel_scaler *scaler = &scaler_state->scalers[i];
+
+               scaler->in_use = 0;
+               scaler->mode = 0;
+       }
+
+       scaler_state->scaler_id = -1;
+}
+
+static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+       struct intel_crtc *intel_crtc;
+       struct intel_crtc_state *crtc_state = NULL;
+       struct intel_plane *primary = NULL;
+       struct intel_plane *cursor = NULL;
+       int sprite, ret;
+
+       intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
+       if (!intel_crtc)
+               return -ENOMEM;
+
+       crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+       if (!crtc_state) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+       __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
+       intel_crtc->config = crtc_state;
+
+       primary = intel_primary_plane_create(dev_priv, pipe);
+       if (IS_ERR(primary)) {
+               ret = PTR_ERR(primary);
+               goto fail;
+       }
+       intel_crtc->plane_ids_mask |= BIT(primary->id);
+
+       for_each_sprite(dev_priv, pipe, sprite) {
+               struct intel_plane *plane;
+
+               plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+               if (IS_ERR(plane)) {
+                       ret = PTR_ERR(plane);
+                       goto fail;
+               }
+               intel_crtc->plane_ids_mask |= BIT(plane->id);
+       }
+
+       cursor = intel_cursor_plane_create(dev_priv, pipe);
+       if (IS_ERR(cursor)) {
+               ret = PTR_ERR(cursor);
+               goto fail;
+       }
+       intel_crtc->plane_ids_mask |= BIT(cursor->id);
+
+       ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
+                                       &primary->base, &cursor->base,
+                                       &intel_crtc_funcs,
+                                       "pipe %c", pipe_name(pipe));
+       if (ret)
+               goto fail;
+
+       intel_crtc->pipe = pipe;
+
+       /* initialize shared scalers */
+       intel_crtc_init_scalers(intel_crtc, crtc_state);
+
+       BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
+              dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
+       dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
+
+       if (INTEL_GEN(dev_priv) < 9) {
+               enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
+
+               BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+                      dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
+               dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
+       }
+
+       drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+
+       intel_color_init(intel_crtc);
+
+       WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
+
+       return 0;
+
+fail:
+       /*
+        * drm_mode_config_cleanup() will free up any
+        * crtcs/planes already initialized.
+        */
+       kfree(crtc_state);
+       kfree(intel_crtc);
+
+       return ret;
+}
+
+int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
+                                     struct drm_file *file)
+{
+       struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
+       struct drm_crtc *drmmode_crtc;
+       struct intel_crtc *crtc;
+
+       drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
+       if (!drmmode_crtc)
+               return -ENOENT;
+
+       crtc = to_intel_crtc(drmmode_crtc);
+       pipe_from_crtc_id->pipe = crtc->pipe;
+
+       return 0;
+}
+
+static int intel_encoder_clones(struct intel_encoder *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct intel_encoder *source_encoder;
+       int index_mask = 0;
+       int entry = 0;
+
+       for_each_intel_encoder(dev, source_encoder) {
+               if (encoders_cloneable(encoder, source_encoder))
+                       index_mask |= (1 << entry);
+
+               entry++;
+       }
+
+       return index_mask;
+}
+
+static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
+{
+       if (!IS_MOBILE(dev_priv))
+               return false;
+
+       if ((I915_READ(DP_A) & DP_DETECTED) == 0)
+               return false;
+
+       if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+               return false;
+
+       return true;
+}
+
+static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
+{
+       if (INTEL_GEN(dev_priv) >= 9)
+               return false;
+
+       if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+               return false;
+
+       if (HAS_PCH_LPT_H(dev_priv) &&
+           I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+               return false;
+
+       /* DDI E can't be used if DDI A requires 4 lanes */
+       if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+               return false;
+
+       if (!dev_priv->vbt.int_crt_support)
+               return false;
+
+       return true;
+}
+
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+{
+       int pps_num;
+       int pps_idx;
+
+       if (HAS_DDI(dev_priv))
+               return;
+       /*
+        * This w/a is needed at least on CPT/PPT, but to be sure apply it
+        * everywhere where registers can be write protected.
+        */
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               pps_num = 2;
+       else
+               pps_num = 1;
+
+       for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
+               u32 val = I915_READ(PP_CONTROL(pps_idx));
+
+               val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
+               I915_WRITE(PP_CONTROL(pps_idx), val);
+       }
+}
+
+static void intel_pps_init(struct drm_i915_private *dev_priv)
+{
+       if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
+               dev_priv->pps_mmio_base = PCH_PPS_BASE;
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               dev_priv->pps_mmio_base = VLV_PPS_BASE;
+       else
+               dev_priv->pps_mmio_base = PPS_BASE;
+
+       intel_pps_unlock_regs_wa(dev_priv);
+}
+
+static void intel_setup_outputs(struct drm_i915_private *dev_priv)
+{
+       struct intel_encoder *encoder;
+       bool dpd_is_edp = false;
+
+       intel_pps_init(dev_priv);
+
+       if (!HAS_DISPLAY(dev_priv))
+               return;
+
+       if (IS_ELKHARTLAKE(dev_priv)) {
+               intel_ddi_init(dev_priv, PORT_A);
+               intel_ddi_init(dev_priv, PORT_B);
+               intel_ddi_init(dev_priv, PORT_C);
+               icl_dsi_init(dev_priv);
+       } else if (INTEL_GEN(dev_priv) >= 11) {
+               intel_ddi_init(dev_priv, PORT_A);
+               intel_ddi_init(dev_priv, PORT_B);
+               intel_ddi_init(dev_priv, PORT_C);
+               intel_ddi_init(dev_priv, PORT_D);
+               intel_ddi_init(dev_priv, PORT_E);
+               /*
+                * On some ICL SKUs port F is not present. No strap bits for
+                * this, so rely on VBT.
+                * Work around broken VBTs on SKUs known to have no port F.
+                */
+               if (IS_ICL_WITH_PORT_F(dev_priv) &&
+                   intel_bios_is_port_present(dev_priv, PORT_F))
+                       intel_ddi_init(dev_priv, PORT_F);
+
+               icl_dsi_init(dev_priv);
+       } else if (IS_GEN9_LP(dev_priv)) {
+               /*
+                * FIXME: Broxton doesn't support port detection via the
+                * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
+                * detect the ports.
+                */
+               intel_ddi_init(dev_priv, PORT_A);
+               intel_ddi_init(dev_priv, PORT_B);
+               intel_ddi_init(dev_priv, PORT_C);
+
+               vlv_dsi_init(dev_priv);
+       } else if (HAS_DDI(dev_priv)) {
+               int found;
+
+               if (intel_ddi_crt_present(dev_priv))
+                       intel_crt_init(dev_priv);
+
+               /*
+                * Haswell uses DDI functions to detect digital outputs.
+                * On SKL pre-D0 the strap isn't connected, so we assume
+                * it's there.
+                */
+               found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+               /* WaIgnoreDDIAStrap: skl */
+               if (found || IS_GEN9_BC(dev_priv))
+                       intel_ddi_init(dev_priv, PORT_A);
+
+               /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
+                * register */
+               found = I915_READ(SFUSE_STRAP);
+
+               if (found & SFUSE_STRAP_DDIB_DETECTED)
+                       intel_ddi_init(dev_priv, PORT_B);
+               if (found & SFUSE_STRAP_DDIC_DETECTED)
+                       intel_ddi_init(dev_priv, PORT_C);
+               if (found & SFUSE_STRAP_DDID_DETECTED)
+                       intel_ddi_init(dev_priv, PORT_D);
+               if (found & SFUSE_STRAP_DDIF_DETECTED)
+                       intel_ddi_init(dev_priv, PORT_F);
+               /*
+                * On SKL we don't have a way to detect DDI-E so we rely on VBT.
+                */
+               if (IS_GEN9_BC(dev_priv) &&
+                   intel_bios_is_port_present(dev_priv, PORT_E))
+                       intel_ddi_init(dev_priv, PORT_E);
+
+       } else if (HAS_PCH_SPLIT(dev_priv)) {
+               int found;
+
+               /*
+                * intel_edp_init_connector() depends on this completing first,
+                * to prevent the registration of both eDP and LVDS and the
+                * incorrect sharing of the PPS.
+                */
+               intel_lvds_init(dev_priv);
+               intel_crt_init(dev_priv);
+
+               dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
+
+               if (ilk_has_edp_a(dev_priv))
+                       intel_dp_init(dev_priv, DP_A, PORT_A);
+
+               if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
+                       /* PCH SDVOB multiplex with HDMIB */
+                       found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
+                       if (!found)
+                               intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
+                       if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
+                               intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
+               }
+
+               if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
+                       intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
+
+               if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
+                       intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
+
+               if (I915_READ(PCH_DP_C) & DP_DETECTED)
+                       intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
+
+               if (I915_READ(PCH_DP_D) & DP_DETECTED)
+                       intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
+       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               bool has_edp, has_port;
+
+               if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
+                       intel_crt_init(dev_priv);
+
+               /*
+                * The DP_DETECTED bit is the latched state of the DDC
+                * SDA pin at boot. However since eDP doesn't require DDC
+                * (no way to plug in a DP->HDMI dongle) the DDC pins for
+                * eDP ports may have been muxed to an alternate function.
+                * Thus we can't rely on the DP_DETECTED bit alone to detect
+                * eDP ports. Consult the VBT as well as DP_DETECTED to
+                * detect eDP ports.
+                *
+                * Sadly the straps seem to be missing sometimes even for HDMI
+                * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
+                * and VBT for the presence of the port. Additionally we can't
+                * trust the port type the VBT declares as we've seen at least
+                * HDMI ports that the VBT claim are DP or eDP.
+                */
+               has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
+               has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+               if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+                       has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
+               if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
+                       intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
+
+               has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
+               has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+               if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+                       has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
+               if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
+                       intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
+
+               if (IS_CHERRYVIEW(dev_priv)) {
+                       /*
+                        * eDP not supported on port D,
+                        * so no need to worry about it
+                        */
+                       has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+                       if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
+                               intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
+                       if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+                               intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
+               }
+
+               vlv_dsi_init(dev_priv);
+       } else if (IS_PINEVIEW(dev_priv)) {
+               intel_lvds_init(dev_priv);
+               intel_crt_init(dev_priv);
+       } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
+               bool found = false;
+
+               if (IS_MOBILE(dev_priv))
+                       intel_lvds_init(dev_priv);
+
+               intel_crt_init(dev_priv);
+
+               if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
+                       DRM_DEBUG_KMS("probing SDVOB\n");
+                       found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
+                       if (!found && IS_G4X(dev_priv)) {
+                               DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
+                               intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
+                       }
+
+                       if (!found && IS_G4X(dev_priv))
+                               intel_dp_init(dev_priv, DP_B, PORT_B);
+               }
+
+               /* Before G4X SDVOC doesn't have its own detect register */
+
+               if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
+                       DRM_DEBUG_KMS("probing SDVOC\n");
+                       found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
+               }
+
+               if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
+
+                       if (IS_G4X(dev_priv)) {
+                               DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
+                               intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
+                       }
+                       if (IS_G4X(dev_priv))
+                               intel_dp_init(dev_priv, DP_C, PORT_C);
+               }
+
+               if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
+                       intel_dp_init(dev_priv, DP_D, PORT_D);
+
+               if (SUPPORTS_TV(dev_priv))
+                       intel_tv_init(dev_priv);
+       } else if (IS_GEN(dev_priv, 2)) {
+               if (IS_I85X(dev_priv))
+                       intel_lvds_init(dev_priv);
+
+               intel_crt_init(dev_priv);
+               intel_dvo_init(dev_priv);
+       }
+
+       intel_psr_init(dev_priv);
+
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               encoder->base.possible_crtcs = encoder->crtc_mask;
+               encoder->base.possible_clones =
+                       intel_encoder_clones(encoder);
+       }
+
+       intel_init_pch_refclk(dev_priv);
+
+       drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
+}
+
+static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+
+       drm_framebuffer_cleanup(fb);
+
+       i915_gem_object_lock(obj);
+       WARN_ON(!obj->framebuffer_references--);
+       i915_gem_object_unlock(obj);
+
+       i915_gem_object_put(obj);
+
+       kfree(intel_fb);
+}
+
+static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+                                               struct drm_file *file,
+                                               unsigned int *handle)
+{
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+
+       if (obj->userptr.mm) {
+               DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
+               return -EINVAL;
+       }
+
+       return drm_gem_handle_create(file, &obj->base, handle);
+}
+
+static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
+                                       struct drm_file *file,
+                                       unsigned flags, unsigned color,
+                                       struct drm_clip_rect *clips,
+                                       unsigned num_clips)
+{
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+
+       i915_gem_object_flush_if_display(obj);
+       intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+
+       return 0;
+}
+
+static const struct drm_framebuffer_funcs intel_fb_funcs = {
+       .destroy = intel_user_framebuffer_destroy,
+       .create_handle = intel_user_framebuffer_create_handle,
+       .dirty = intel_user_framebuffer_dirty,
+};
+
+static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
+                                 struct drm_i915_gem_object *obj,
+                                 struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       struct drm_framebuffer *fb = &intel_fb->base;
+       u32 max_stride;
+       unsigned int tiling, stride;
+       int ret = -EINVAL;
+       int i;
+
+       i915_gem_object_lock(obj);
+       obj->framebuffer_references++;
+       tiling = i915_gem_object_get_tiling(obj);
+       stride = i915_gem_object_get_stride(obj);
+       i915_gem_object_unlock(obj);
+
+       if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+               /*
+                * If there's a fence, enforce that
+                * the fb modifier and tiling mode match.
+                */
+               if (tiling != I915_TILING_NONE &&
+                   tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+                       DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
+                       goto err;
+               }
+       } else {
+               if (tiling == I915_TILING_X) {
+                       mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
+               } else if (tiling == I915_TILING_Y) {
+                       DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
+                       goto err;
+               }
+       }
+
+       if (!drm_any_plane_has_format(&dev_priv->drm,
+                                     mode_cmd->pixel_format,
+                                     mode_cmd->modifier[0])) {
+               struct drm_format_name_buf format_name;
+
+               DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
+                             drm_get_format_name(mode_cmd->pixel_format,
+                                                 &format_name),
+                             mode_cmd->modifier[0]);
+               goto err;
+       }
+
+       /*
+        * gen2/3 display engine uses the fence if present,
+        * so the tiling mode must match the fb modifier exactly.
+        */
+       if (INTEL_GEN(dev_priv) < 4 &&
+           tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+               DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
+               goto err;
+       }
+
+       max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
+                                        mode_cmd->modifier[0]);
+       if (mode_cmd->pitches[0] > max_stride) {
+               DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
+                             mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
+                             "tiled" : "linear",
+                             mode_cmd->pitches[0], max_stride);
+               goto err;
+       }
+
+       /*
+        * If there's a fence, enforce that
+        * the fb pitch and fence stride match.
+        */
+       if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
+               DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
+                             mode_cmd->pitches[0], stride);
+               goto err;
+       }
+
+       /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+       if (mode_cmd->offsets[0] != 0)
+               goto err;
+
+       drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
+
+       for (i = 0; i < fb->format->num_planes; i++) {
+               u32 stride_alignment;
+
+               if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
+                       DRM_DEBUG_KMS("bad plane %d handle\n", i);
+                       goto err;
+               }
+
+               stride_alignment = intel_fb_stride_alignment(fb, i);
+
+               /*
+                * Display WA #0531: skl,bxt,kbl,glk
+                *
+                * Render decompression and plane width > 3840
+                * combined with horizontal panning requires the
+                * plane stride to be a multiple of 4. We'll just
+                * require the entire fb to accommodate that to avoid
+                * potential runtime errors at plane configuration time.
+                */
+               if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
+                   is_ccs_modifier(fb->modifier))
+                       stride_alignment *= 4;
+
+               if (fb->pitches[i] & (stride_alignment - 1)) {
+                       DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
+                                     i, fb->pitches[i], stride_alignment);
+                       goto err;
+               }
+
+               fb->obj[i] = &obj->base;
+       }
+
+       ret = intel_fill_fb_info(dev_priv, fb);
+       if (ret)
+               goto err;
+
+       ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
+       if (ret) {
+               DRM_ERROR("framebuffer init failed %d\n", ret);
+               goto err;
+       }
+
+       return 0;
+
+err:
+       i915_gem_object_lock(obj);
+       obj->framebuffer_references--;
+       i915_gem_object_unlock(obj);
+       return ret;
+}
+
+static struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+                             struct drm_file *filp,
+                             const struct drm_mode_fb_cmd2 *user_mode_cmd)
+{
+       struct drm_framebuffer *fb;
+       struct drm_i915_gem_object *obj;
+       struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
+
+       obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
+       if (!obj)
+               return ERR_PTR(-ENOENT);
+
+       fb = intel_framebuffer_create(obj, &mode_cmd);
+       if (IS_ERR(fb))
+               i915_gem_object_put(obj);
+
+       return fb;
+}
+
+static void intel_atomic_state_free(struct drm_atomic_state *state)
+{
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+
+       drm_atomic_state_default_release(state);
+
+       i915_sw_fence_fini(&intel_state->commit_ready);
+
+       kfree(state);
+}
+
+static enum drm_mode_status
+intel_mode_valid(struct drm_device *dev,
+                const struct drm_display_mode *mode)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int hdisplay_max, htotal_max;
+       int vdisplay_max, vtotal_max;
+
+       /*
+        * Can't reject DBLSCAN here because Xorg ddxen can add piles
+        * of DBLSCAN modes to the output's mode list when they detect
+        * the scaling mode property on the connector. And they don't
+        * ask the kernel to validate those modes in any way until
+        * modeset time at which point the client gets a protocol error.
+        * So in order to not upset those clients we silently ignore the
+        * DBLSCAN flag on such connectors. For other connectors we will
+        * reject modes with the DBLSCAN flag in encoder->compute_config().
+        * And we always reject DBLSCAN modes in connector->mode_valid()
+        * as we never want such modes on the connector's mode list.
+        */
+
+       if (mode->vscan > 1)
+               return MODE_NO_VSCAN;
+
+       if (mode->flags & DRM_MODE_FLAG_HSKEW)
+               return MODE_H_ILLEGAL;
+
+       if (mode->flags & (DRM_MODE_FLAG_CSYNC |
+                          DRM_MODE_FLAG_NCSYNC |
+                          DRM_MODE_FLAG_PCSYNC))
+               return MODE_HSYNC;
+
+       if (mode->flags & (DRM_MODE_FLAG_BCAST |
+                          DRM_MODE_FLAG_PIXMUX |
+                          DRM_MODE_FLAG_CLKDIV2))
+               return MODE_BAD;
+
+       if (INTEL_GEN(dev_priv) >= 9 ||
+           IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+               hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
+               vdisplay_max = 4096;
+               htotal_max = 8192;
+               vtotal_max = 8192;
+       } else if (INTEL_GEN(dev_priv) >= 3) {
+               hdisplay_max = 4096;
+               vdisplay_max = 4096;
+               htotal_max = 8192;
+               vtotal_max = 8192;
+       } else {
+               hdisplay_max = 2048;
+               vdisplay_max = 2048;
+               htotal_max = 4096;
+               vtotal_max = 4096;
+       }
+
+       if (mode->hdisplay > hdisplay_max ||
+           mode->hsync_start > htotal_max ||
+           mode->hsync_end > htotal_max ||
+           mode->htotal > htotal_max)
+               return MODE_H_ILLEGAL;
+
+       if (mode->vdisplay > vdisplay_max ||
+           mode->vsync_start > vtotal_max ||
+           mode->vsync_end > vtotal_max ||
+           mode->vtotal > vtotal_max)
+               return MODE_V_ILLEGAL;
+
+       return MODE_OK;
+}
+
+static const struct drm_mode_config_funcs intel_mode_funcs = {
+       .fb_create = intel_user_framebuffer_create,
+       .get_format_info = intel_get_format_info,
+       .output_poll_changed = intel_fbdev_output_poll_changed,
+       .mode_valid = intel_mode_valid,
+       .atomic_check = intel_atomic_check,
+       .atomic_commit = intel_atomic_commit,
+       .atomic_state_alloc = intel_atomic_state_alloc,
+       .atomic_state_clear = intel_atomic_state_clear,
+       .atomic_state_free = intel_atomic_state_free,
+};
+
+/**
+ * intel_init_display_hooks - initialize the display modesetting hooks
+ * @dev_priv: device private
+ */
+void intel_init_display_hooks(struct drm_i915_private *dev_priv)
+{
+       intel_init_cdclk_hooks(dev_priv);
+
+       if (INTEL_GEN(dev_priv) >= 9) {
+               dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+               dev_priv->display.get_initial_plane_config =
+                       skylake_get_initial_plane_config;
+               dev_priv->display.crtc_compute_clock =
+                       haswell_crtc_compute_clock;
+               dev_priv->display.crtc_enable = haswell_crtc_enable;
+               dev_priv->display.crtc_disable = haswell_crtc_disable;
+       } else if (HAS_DDI(dev_priv)) {
+               dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+               dev_priv->display.get_initial_plane_config =
+                       i9xx_get_initial_plane_config;
+               dev_priv->display.crtc_compute_clock =
+                       haswell_crtc_compute_clock;
+               dev_priv->display.crtc_enable = haswell_crtc_enable;
+               dev_priv->display.crtc_disable = haswell_crtc_disable;
+       } else if (HAS_PCH_SPLIT(dev_priv)) {
+               dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+               dev_priv->display.get_initial_plane_config =
+                       i9xx_get_initial_plane_config;
+               dev_priv->display.crtc_compute_clock =
+                       ironlake_crtc_compute_clock;
+               dev_priv->display.crtc_enable = ironlake_crtc_enable;
+               dev_priv->display.crtc_disable = ironlake_crtc_disable;
+       } else if (IS_CHERRYVIEW(dev_priv)) {
+               dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+               dev_priv->display.get_initial_plane_config =
+                       i9xx_get_initial_plane_config;
+               dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
+               dev_priv->display.crtc_enable = valleyview_crtc_enable;
+               dev_priv->display.crtc_disable = i9xx_crtc_disable;
+       } else if (IS_VALLEYVIEW(dev_priv)) {
+               dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+               dev_priv->display.get_initial_plane_config =
+                       i9xx_get_initial_plane_config;
+               dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
+               dev_priv->display.crtc_enable = valleyview_crtc_enable;
+               dev_priv->display.crtc_disable = i9xx_crtc_disable;
+       } else if (IS_G4X(dev_priv)) {
+               dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+               dev_priv->display.get_initial_plane_config =
+                       i9xx_get_initial_plane_config;
+               dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
+               dev_priv->display.crtc_enable = i9xx_crtc_enable;
+               dev_priv->display.crtc_disable = i9xx_crtc_disable;
+       } else if (IS_PINEVIEW(dev_priv)) {
+               dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+               dev_priv->display.get_initial_plane_config =
+                       i9xx_get_initial_plane_config;
+               dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
+               dev_priv->display.crtc_enable = i9xx_crtc_enable;
+               dev_priv->display.crtc_disable = i9xx_crtc_disable;
+       } else if (!IS_GEN(dev_priv, 2)) {
+               dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+               dev_priv->display.get_initial_plane_config =
+                       i9xx_get_initial_plane_config;
+               dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+               dev_priv->display.crtc_enable = i9xx_crtc_enable;
+               dev_priv->display.crtc_disable = i9xx_crtc_disable;
+       } else {
+               dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+               dev_priv->display.get_initial_plane_config =
+                       i9xx_get_initial_plane_config;
+               dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
+               dev_priv->display.crtc_enable = i9xx_crtc_enable;
+               dev_priv->display.crtc_disable = i9xx_crtc_disable;
+       }
+
+       if (IS_GEN(dev_priv, 5)) {
+               dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
+       } else if (IS_GEN(dev_priv, 6)) {
+               dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+       } else if (IS_IVYBRIDGE(dev_priv)) {
+               /* FIXME: detect B0+ stepping and use auto training */
+               dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+               dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+       }
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               dev_priv->display.update_crtcs = skl_update_crtcs;
+       else
+               dev_priv->display.update_crtcs = intel_update_crtcs;
+}
+
+static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
+{
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               return VLV_VGACNTRL;
+       else if (INTEL_GEN(dev_priv) >= 5)
+               return CPU_VGACNTRL;
+       else
+               return VGACNTRL;
+}
+
+/* Disable the VGA plane that we never use */
+static void i915_disable_vga(struct drm_i915_private *dev_priv)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       u8 sr1;
+       i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
+
+       /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
+       vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+       outb(SR01, VGA_SR_INDEX);
+       sr1 = inb(VGA_SR_DATA);
+       outb(sr1 | 1<<5, VGA_SR_DATA);
+       vga_put(pdev, VGA_RSRC_LEGACY_IO);
+       udelay(300);
+
+       I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+       POSTING_READ(vga_reg);
+}
+
+void intel_modeset_init_hw(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       intel_update_cdclk(dev_priv);
+       intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+       dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
+}
+
+/*
+ * Calculate what we think the watermarks should be for the state we've read
+ * out of the hardware and then immediately program those watermarks so that
+ * we ensure the hardware settings match our internal state.
+ *
+ * We can calculate what we think WM's should be by creating a duplicate of the
+ * current state (which was constructed during hardware readout) and running it
+ * through the atomic check code to calculate new watermark values in the
+ * state object.
+ */
+static void sanitize_watermarks(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_atomic_state *state;
+       struct intel_atomic_state *intel_state;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *cstate;
+       struct drm_modeset_acquire_ctx ctx;
+       int ret;
+       int i;
+
+       /* Only supported on platforms that use atomic watermark design */
+       if (!dev_priv->display.optimize_watermarks)
+               return;
+
+       /*
+        * We need to hold connection_mutex before calling duplicate_state so
+        * that the connector loop is protected.
+        */
+       drm_modeset_acquire_init(&ctx, 0);
+retry:
+       ret = drm_modeset_lock_all_ctx(dev, &ctx);
+       if (ret == -EDEADLK) {
+               drm_modeset_backoff(&ctx);
+               goto retry;
+       } else if (WARN_ON(ret)) {
+               goto fail;
+       }
+
+       state = drm_atomic_helper_duplicate_state(dev, &ctx);
+       if (WARN_ON(IS_ERR(state)))
+               goto fail;
+
+       intel_state = to_intel_atomic_state(state);
+
+       /*
+        * Hardware readout is the only time we don't want to calculate
+        * intermediate watermarks (since we don't trust the current
+        * watermarks).
+        */
+       if (!HAS_GMCH(dev_priv))
+               intel_state->skip_intermediate_wm = true;
+
+       ret = intel_atomic_check(dev, state);
+       if (ret) {
+               /*
+                * If we fail here, it means that the hardware appears to be
+                * programmed in a way that shouldn't be possible, given our
+                * understanding of watermark requirements.  This might mean a
+                * mistake in the hardware readout code or a mistake in the
+                * watermark calculations for a given platform.  Raise a WARN
+                * so that this is noticeable.
+                *
+                * If this actually happens, we'll have to just leave the
+                * BIOS-programmed watermarks untouched and hope for the best.
+                */
+               WARN(true, "Could not determine valid watermarks for inherited state\n");
+               goto put_state;
+       }
+
+       /* Write calculated watermark values back */
+       for_each_new_crtc_in_state(state, crtc, cstate, i) {
+               struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
+
+               cs->wm.need_postvbl_update = true;
+               dev_priv->display.optimize_watermarks(intel_state, cs);
+
+               to_intel_crtc_state(crtc->state)->wm = cs->wm;
+       }
+
+put_state:
+       drm_atomic_state_put(state);
+fail:
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+}
+
+static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
+{
+       if (IS_GEN(dev_priv, 5)) {
+               u32 fdi_pll_clk =
+                       I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+
+               dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
+       } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
+               dev_priv->fdi_pll_freq = 270000;
+       } else {
+               return;
+       }
+
+       DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
+}
+
+static int intel_initial_commit(struct drm_device *dev)
+{
+       struct drm_atomic_state *state = NULL;
+       struct drm_modeset_acquire_ctx ctx;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       int ret = 0;
+
+       state = drm_atomic_state_alloc(dev);
+       if (!state)
+               return -ENOMEM;
+
+       drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+       state->acquire_ctx = &ctx;
+
+       drm_for_each_crtc(crtc, dev) {
+               crtc_state = drm_atomic_get_crtc_state(state, crtc);
+               if (IS_ERR(crtc_state)) {
+                       ret = PTR_ERR(crtc_state);
+                       goto out;
+               }
+
+               if (crtc_state->active) {
+                       ret = drm_atomic_add_affected_planes(state, crtc);
+                       if (ret)
+                               goto out;
+
+                       /*
+                        * FIXME hack to force a LUT update to avoid the
+                        * plane update forcing the pipe gamma on without
+                        * having a proper LUT loaded. Remove once we
+                        * have readout for pipe gamma enable.
+                        */
+                       crtc_state->color_mgmt_changed = true;
+               }
+       }
+
+       ret = drm_atomic_commit(state);
+
+out:
+       if (ret == -EDEADLK) {
+               drm_atomic_state_clear(state);
+               drm_modeset_backoff(&ctx);
+               goto retry;
+       }
+
+       drm_atomic_state_put(state);
+
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+
+       return ret;
+}
+
+int intel_modeset_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       enum pipe pipe;
+       struct intel_crtc *crtc;
+       int ret;
+
+       dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+
+       drm_mode_config_init(dev);
+
+       ret = intel_bw_init(dev_priv);
+       if (ret)
+               return ret;
+
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+
+       dev->mode_config.preferred_depth = 24;
+       dev->mode_config.prefer_shadow = 1;
+
+       dev->mode_config.allow_fb_modifiers = true;
+
+       dev->mode_config.funcs = &intel_mode_funcs;
+
+       init_llist_head(&dev_priv->atomic_helper.free_list);
+       INIT_WORK(&dev_priv->atomic_helper.free_work,
+                 intel_atomic_helper_free_state_worker);
+
+       intel_init_quirks(dev_priv);
+
+       intel_fbc_init(dev_priv);
+
+       intel_init_pm(dev_priv);
+
+       /*
+        * There may be no VBT; and if the BIOS enabled SSC we can
+        * just keep using it to avoid unnecessary flicker.  Whereas if the
+        * BIOS isn't using it, don't assume it will work even if the VBT
+        * indicates as much.
+        */
+       if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+               bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+                                           DREF_SSC1_ENABLE);
+
+               if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+                       DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
+                                    bios_lvds_use_ssc ? "en" : "dis",
+                                    dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
+                       dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+               }
+       }
+
+       /*
+        * Maximum framebuffer dimensions, chosen to match
+        * the maximum render engine surface size on gen4+.
+        */
+       if (INTEL_GEN(dev_priv) >= 7) {
+               dev->mode_config.max_width = 16384;
+               dev->mode_config.max_height = 16384;
+       } else if (INTEL_GEN(dev_priv) >= 4) {
+               dev->mode_config.max_width = 8192;
+               dev->mode_config.max_height = 8192;
+       } else if (IS_GEN(dev_priv, 3)) {
+               dev->mode_config.max_width = 4096;
+               dev->mode_config.max_height = 4096;
+       } else {
+               dev->mode_config.max_width = 2048;
+               dev->mode_config.max_height = 2048;
+       }
+
+       if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+               dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
+               dev->mode_config.cursor_height = 1023;
+       } else if (IS_GEN(dev_priv, 2)) {
+               dev->mode_config.cursor_width = 64;
+               dev->mode_config.cursor_height = 64;
+       } else {
+               dev->mode_config.cursor_width = 256;
+               dev->mode_config.cursor_height = 256;
+       }
+
+       dev->mode_config.fb_base = ggtt->gmadr.start;
+
+       DRM_DEBUG_KMS("%d display pipe%s available.\n",
+                     INTEL_INFO(dev_priv)->num_pipes,
+                     INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
+
+       for_each_pipe(dev_priv, pipe) {
+               ret = intel_crtc_init(dev_priv, pipe);
+               if (ret) {
+                       drm_mode_config_cleanup(dev);
+                       return ret;
+               }
+       }
+
+       intel_shared_dpll_init(dev);
+       intel_update_fdi_pll_freq(dev_priv);
+
+       intel_update_czclk(dev_priv);
+       intel_modeset_init_hw(dev);
+
+       intel_hdcp_component_init(dev_priv);
+
+       if (dev_priv->max_cdclk_freq == 0)
+               intel_update_max_cdclk(dev_priv);
+
+       /* Just disable it once at startup */
+       i915_disable_vga(dev_priv);
+       intel_setup_outputs(dev_priv);
+
+       drm_modeset_lock_all(dev);
+       intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
+       drm_modeset_unlock_all(dev);
+
+       for_each_intel_crtc(dev, crtc) {
+               struct intel_initial_plane_config plane_config = {};
+
+               if (!crtc->active)
+                       continue;
+
+               /*
+                * Note that reserving the BIOS fb up front prevents us
+                * from stuffing other stolen allocations like the ring
+                * on top.  This prevents some ugliness at boot time, and
+                * can even allow for smooth boot transitions if the BIOS
+                * fb is large enough for the active pipe configuration.
+                */
+               dev_priv->display.get_initial_plane_config(crtc,
+                                                          &plane_config);
+
+               /*
+                * If the fb is shared between multiple heads, we'll
+                * just get the first one.
+                */
+               intel_find_initial_plane_obj(crtc, &plane_config);
+       }
+
+       /*
+        * Make sure hardware watermarks really match the state we read out.
+        * Note that we need to do this after reconstructing the BIOS fb's
+        * since the watermark calculation done here will use pstate->fb.
+        */
+       if (!HAS_GMCH(dev_priv))
+               sanitize_watermarks(dev);
+
+       /*
+        * Force all active planes to recompute their states. So that on
+        * mode_setcrtc after probe, all the intel_plane_state variables
+        * are already calculated and there is no assert_plane warnings
+        * during bootup.
+        */
+       ret = intel_initial_commit(dev);
+       if (ret)
+               DRM_DEBUG_KMS("Initial commit in probe failed.\n");
+
+       return 0;
+}
+
+void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+       /* 640x480@60Hz, ~25175 kHz */
+       struct dpll clock = {
+               .m1 = 18,
+               .m2 = 7,
+               .p1 = 13,
+               .p2 = 4,
+               .n = 2,
+       };
+       u32 dpll, fp;
+       int i;
+
+       WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
+
+       DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
+                     pipe_name(pipe), clock.vco, clock.dot);
+
+       fp = i9xx_dpll_compute_fp(&clock);
+       dpll = DPLL_DVO_2X_MODE |
+               DPLL_VGA_MODE_DIS |
+               ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
+               PLL_P2_DIVIDE_BY_4 |
+               PLL_REF_INPUT_DREFCLK |
+               DPLL_VCO_ENABLE;
+
+       I915_WRITE(FP0(pipe), fp);
+       I915_WRITE(FP1(pipe), fp);
+
+       I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
+       I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
+       I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
+       I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
+       I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
+       I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
+       I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
+
+       /*
+        * Apparently we need to have VGA mode enabled prior to changing
+        * the P1/P2 dividers. Otherwise the DPLL will keep using the old
+        * dividers, even though the register value does change.
+        */
+       I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
+       I915_WRITE(DPLL(pipe), dpll);
+
+       /* Wait for the clocks to stabilize. */
+       POSTING_READ(DPLL(pipe));
+       udelay(150);
+
+       /* The pixel multiplier can only be updated once the
+        * DPLL is enabled and the clocks are stable.
+        *
+        * So write it again.
+        */
+       I915_WRITE(DPLL(pipe), dpll);
+
+       /* We do this three times for luck */
+       for (i = 0; i < 3 ; i++) {
+               I915_WRITE(DPLL(pipe), dpll);
+               POSTING_READ(DPLL(pipe));
+               udelay(150); /* wait for warmup */
+       }
+
+       I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
+       POSTING_READ(PIPECONF(pipe));
+
+       intel_wait_for_pipe_scanline_moving(crtc);
+}
+
+void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+       DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
+                     pipe_name(pipe));
+
+       WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
+       WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
+       WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
+       WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
+       WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
+
+       I915_WRITE(PIPECONF(pipe), 0);
+       POSTING_READ(PIPECONF(pipe));
+
+       intel_wait_for_pipe_scanline_stopped(crtc);
+
+       I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
+       POSTING_READ(DPLL(pipe));
+}
+
+static void
+intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
+{
+       struct intel_crtc *crtc;
+
+       if (INTEL_GEN(dev_priv) >= 4)
+               return;
+
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
+               struct intel_plane *plane =
+                       to_intel_plane(crtc->base.primary);
+               struct intel_crtc *plane_crtc;
+               enum pipe pipe;
+
+               if (!plane->get_hw_state(plane, &pipe))
+                       continue;
+
+               if (pipe == crtc->pipe)
+                       continue;
+
+               DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
+                             plane->base.base.id, plane->base.name);
+
+               plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+               intel_plane_disable_noatomic(plane_crtc, plane);
+       }
+}
+
+static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct intel_encoder *encoder;
+
+       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+               return true;
+
+       return false;
+}
+
+static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct intel_connector *connector;
+
+       for_each_connector_on_encoder(dev, &encoder->base, connector)
+               return connector;
+
+       return NULL;
+}
+
+static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
+                             enum pipe pch_transcoder)
+{
+       return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
+               (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc,
+                               struct drm_modeset_acquire_ctx *ctx)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+       /* Clear any frame start delays used for debugging left by the BIOS */
+       if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
+               i915_reg_t reg = PIPECONF(cpu_transcoder);
+
+               I915_WRITE(reg,
+                          I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+       }
+
+       if (crtc_state->base.active) {
+               struct intel_plane *plane;
+
+               /* Disable everything but the primary plane */
+               for_each_intel_plane_on_crtc(dev, crtc, plane) {
+                       const struct intel_plane_state *plane_state =
+                               to_intel_plane_state(plane->base.state);
+
+                       if (plane_state->base.visible &&
+                           plane->base.type != DRM_PLANE_TYPE_PRIMARY)
+                               intel_plane_disable_noatomic(crtc, plane);
+               }
+
+               /*
+                * Disable any background color set by the BIOS, but enable the
+                * gamma and CSC to match how we program our planes.
+                */
+               if (INTEL_GEN(dev_priv) >= 9)
+                       I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
+                                  SKL_BOTTOM_COLOR_GAMMA_ENABLE |
+                                  SKL_BOTTOM_COLOR_CSC_ENABLE);
+       }
+
+       /* Adjust the state of the output pipe according to whether we
+        * have active connectors/encoders. */
+       if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
+               intel_crtc_disable_noatomic(&crtc->base, ctx);
+
+       if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
+               /*
+                * We start out with underrun reporting disabled to avoid races.
+                * For correct bookkeeping mark this on active crtcs.
+                *
+                * Also on gmch platforms we dont have any hardware bits to
+                * disable the underrun reporting. Which means we need to start
+                * out with underrun reporting disabled also on inactive pipes,
+                * since otherwise we'll complain about the garbage we read when
+                * e.g. coming up after runtime pm.
+                *
+                * No protection against concurrent access is required - at
+                * worst a fifo underrun happens which also sets this to false.
+                */
+               crtc->cpu_fifo_underrun_disabled = true;
+               /*
+                * We track the PCH trancoder underrun reporting state
+                * within the crtc. With crtc for pipe A housing the underrun
+                * reporting state for PCH transcoder A, crtc for pipe B housing
+                * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
+                * and marking underrun reporting as disabled for the non-existing
+                * PCH transcoders B and C would prevent enabling the south
+                * error interrupt (see cpt_can_enable_serr_int()).
+                */
+               if (has_pch_trancoder(dev_priv, crtc->pipe))
+                       crtc->pch_fifo_underrun_disabled = true;
+       }
+}
+
+static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       /*
+        * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
+        * the hardware when a high res displays plugged in. DPLL P
+        * divider is zero, and the pipe timings are bonkers. We'll
+        * try to disable everything in that case.
+        *
+        * FIXME would be nice to be able to sanitize this state
+        * without several WARNs, but for now let's take the easy
+        * road.
+        */
+       return IS_GEN(dev_priv, 6) &&
+               crtc_state->base.active &&
+               crtc_state->shared_dpll &&
+               crtc_state->port_clock == 0;
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_connector *connector;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_crtc_state *crtc_state = crtc ?
+               to_intel_crtc_state(crtc->base.state) : NULL;
+
+       /* We need to check both for a crtc link (meaning that the
+        * encoder is active and trying to read from a pipe) and the
+        * pipe itself being active. */
+       bool has_active_crtc = crtc_state &&
+               crtc_state->base.active;
+
+       if (crtc_state && has_bogus_dpll_config(crtc_state)) {
+               DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
+                             pipe_name(crtc->pipe));
+               has_active_crtc = false;
+       }
+
+       connector = intel_encoder_find_connector(encoder);
+       if (connector && !has_active_crtc) {
+               DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+                             encoder->base.base.id,
+                             encoder->base.name);
+
+               /* Connector is active, but has no active pipe. This is
+                * fallout from our resume register restoring. Disable
+                * the encoder manually again. */
+               if (crtc_state) {
+                       struct drm_encoder *best_encoder;
+
+                       DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
+                                     encoder->base.base.id,
+                                     encoder->base.name);
+
+                       /* avoid oopsing in case the hooks consult best_encoder */
+                       best_encoder = connector->base.state->best_encoder;
+                       connector->base.state->best_encoder = &encoder->base;
+
+                       if (encoder->disable)
+                               encoder->disable(encoder, crtc_state,
+                                                connector->base.state);
+                       if (encoder->post_disable)
+                               encoder->post_disable(encoder, crtc_state,
+                                                     connector->base.state);
+
+                       connector->base.state->best_encoder = best_encoder;
+               }
+               encoder->base.crtc = NULL;
+
+               /* Inconsistent output/port/pipe state happens presumably due to
+                * a bug in one of the get_hw_state functions. Or someplace else
+                * in our code, like the register restore mess on resume. Clamp
+                * things to off as a safer default. */
+
+               connector->base.dpms = DRM_MODE_DPMS_OFF;
+               connector->base.encoder = NULL;
+       }
+
+       /* notify opregion of the sanitized encoder state */
+       intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               icl_sanitize_encoder_pll_mapping(encoder);
+}
+
+void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
+{
+       i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
+
+       if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
+               DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+               i915_disable_vga(dev_priv);
+       }
+}
+
+void i915_redisable_vga(struct drm_i915_private *dev_priv)
+{
+       intel_wakeref_t wakeref;
+
+       /*
+        * This function can be called both from intel_modeset_setup_hw_state or
+        * at a very early point in our resume sequence, where the power well
+        * structures are not yet restored. Since this function is at a very
+        * paranoid "someone might have enabled VGA while we were not looking"
+        * level, just check if the power well is enabled instead of trying to
+        * follow the "don't touch the power well if we don't need it" policy
+        * the rest of the driver uses.
+        */
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_VGA);
+       if (!wakeref)
+               return;
+
+       i915_redisable_vga_power_on(dev_priv);
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
+}
+
+/* FIXME read out full plane state for all planes */
+static void readout_plane_state(struct drm_i915_private *dev_priv)
+{
+       struct intel_plane *plane;
+       struct intel_crtc *crtc;
+
+       for_each_intel_plane(&dev_priv->drm, plane) {
+               struct intel_plane_state *plane_state =
+                       to_intel_plane_state(plane->base.state);
+               struct intel_crtc_state *crtc_state;
+               enum pipe pipe = PIPE_A;
+               bool visible;
+
+               visible = plane->get_hw_state(plane, &pipe);
+
+               crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+               crtc_state = to_intel_crtc_state(crtc->base.state);
+
+               intel_set_plane_visible(crtc_state, plane_state, visible);
+
+               DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
+                             plane->base.base.id, plane->base.name,
+                             enableddisabled(visible), pipe_name(pipe));
+       }
+
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
+               struct intel_crtc_state *crtc_state =
+                       to_intel_crtc_state(crtc->base.state);
+
+               fixup_active_planes(crtc_state);
+       }
+}
+
+static void intel_modeset_readout_hw_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       enum pipe pipe;
+       struct intel_crtc *crtc;
+       struct intel_encoder *encoder;
+       struct intel_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       int i;
+
+       dev_priv->active_crtcs = 0;
+
+       for_each_intel_crtc(dev, crtc) {
+               struct intel_crtc_state *crtc_state =
+                       to_intel_crtc_state(crtc->base.state);
+
+               __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
+               memset(crtc_state, 0, sizeof(*crtc_state));
+               __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
+
+               crtc_state->base.active = crtc_state->base.enable =
+                       dev_priv->display.get_pipe_config(crtc, crtc_state);
+
+               crtc->base.enabled = crtc_state->base.enable;
+               crtc->active = crtc_state->base.active;
+
+               if (crtc_state->base.active)
+                       dev_priv->active_crtcs |= 1 << crtc->pipe;
+
+               DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
+                             crtc->base.base.id, crtc->base.name,
+                             enableddisabled(crtc_state->base.active));
+       }
+
+       readout_plane_state(dev_priv);
+
+       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+               struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+               pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
+                                                       &pll->state.hw_state);
+               pll->state.crtc_mask = 0;
+               for_each_intel_crtc(dev, crtc) {
+                       struct intel_crtc_state *crtc_state =
+                               to_intel_crtc_state(crtc->base.state);
+
+                       if (crtc_state->base.active &&
+                           crtc_state->shared_dpll == pll)
+                               pll->state.crtc_mask |= 1 << crtc->pipe;
+               }
+               pll->active_mask = pll->state.crtc_mask;
+
+               DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
+                             pll->info->name, pll->state.crtc_mask, pll->on);
+       }
+
+       for_each_intel_encoder(dev, encoder) {
+               pipe = 0;
+
+               if (encoder->get_hw_state(encoder, &pipe)) {
+                       struct intel_crtc_state *crtc_state;
+
+                       crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+                       crtc_state = to_intel_crtc_state(crtc->base.state);
+
+                       encoder->base.crtc = &crtc->base;
+                       encoder->get_config(encoder, crtc_state);
+               } else {
+                       encoder->base.crtc = NULL;
+               }
+
+               DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
+                             encoder->base.base.id, encoder->base.name,
+                             enableddisabled(encoder->base.crtc),
+                             pipe_name(pipe));
+       }
+
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       for_each_intel_connector_iter(connector, &conn_iter) {
+               if (connector->get_hw_state(connector)) {
+                       connector->base.dpms = DRM_MODE_DPMS_ON;
+
+                       encoder = connector->encoder;
+                       connector->base.encoder = &encoder->base;
+
+                       if (encoder->base.crtc &&
+                           encoder->base.crtc->state->active) {
+                               /*
+                                * This has to be done during hardware readout
+                                * because anything calling .crtc_disable may
+                                * rely on the connector_mask being accurate.
+                                */
+                               encoder->base.crtc->state->connector_mask |=
+                                       drm_connector_mask(&connector->base);
+                               encoder->base.crtc->state->encoder_mask |=
+                                       drm_encoder_mask(&encoder->base);
+                       }
+
+               } else {
+                       connector->base.dpms = DRM_MODE_DPMS_OFF;
+                       connector->base.encoder = NULL;
+               }
+               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
+                             connector->base.base.id, connector->base.name,
+                             enableddisabled(connector->base.encoder));
+       }
+       drm_connector_list_iter_end(&conn_iter);
+
+       for_each_intel_crtc(dev, crtc) {
+               struct intel_bw_state *bw_state =
+                       to_intel_bw_state(dev_priv->bw_obj.state);
+               struct intel_crtc_state *crtc_state =
+                       to_intel_crtc_state(crtc->base.state);
+               struct intel_plane *plane;
+               int min_cdclk = 0;
+
+               memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
+               if (crtc_state->base.active) {
+                       intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
+                       crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
+                       crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
+                       intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
+                       WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+
+                       /*
+                        * The initial mode needs to be set in order to keep
+                        * the atomic core happy. It wants a valid mode if the
+                        * crtc's enabled, so we do the above call.
+                        *
+                        * But we don't set all the derived state fully, hence
+                        * set a flag to indicate that a full recalculation is
+                        * needed on the next commit.
+                        */
+                       crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
+
+                       intel_crtc_compute_pixel_rate(crtc_state);
+
+                       if (dev_priv->display.modeset_calc_cdclk) {
+                               min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+                               if (WARN_ON(min_cdclk < 0))
+                                       min_cdclk = 0;
+                       }
+
+                       drm_calc_timestamping_constants(&crtc->base,
+                                                       &crtc_state->base.adjusted_mode);
+                       update_scanline_offset(crtc_state);
+               }
+
+               dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
+               dev_priv->min_voltage_level[crtc->pipe] =
+                       crtc_state->min_voltage_level;
+
+               for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+                       const struct intel_plane_state *plane_state =
+                               to_intel_plane_state(plane->base.state);
+
+                       /*
+                        * FIXME don't have the fb yet, so can't
+                        * use intel_plane_data_rate() :(
+                        */
+                       if (plane_state->base.visible)
+                               crtc_state->data_rate[plane->id] =
+                                       4 * crtc_state->pixel_rate;
+               }
+
+               intel_bw_crtc_update(bw_state, crtc_state);
+
+               intel_pipe_config_sanity_check(dev_priv, crtc_state);
+       }
+}
+
+static void
+get_encoder_power_domains(struct drm_i915_private *dev_priv)
+{
+       struct intel_encoder *encoder;
+
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               struct intel_crtc_state *crtc_state;
+
+               if (!encoder->get_power_domains)
+                       continue;
+
+               /*
+                * MST-primary and inactive encoders don't have a crtc state
+                * and neither of these require any power domain references.
+                */
+               if (!encoder->base.crtc)
+                       continue;
+
+               crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
+               encoder->get_power_domains(encoder, crtc_state);
+       }
+}
+
+static void intel_early_display_was(struct drm_i915_private *dev_priv)
+{
+       /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
+       if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+               I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+                          DARBF_GATING_DIS);
+
+       if (IS_HASWELL(dev_priv)) {
+               /*
+                * WaRsPkgCStateDisplayPMReq:hsw
+                * System hang if this isn't done before disabling all planes!
+                */
+               I915_WRITE(CHICKEN_PAR1_1,
+                          I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+       }
+}
+
+static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+                                      enum port port, i915_reg_t hdmi_reg)
+{
+       u32 val = I915_READ(hdmi_reg);
+
+       if (val & SDVO_ENABLE ||
+           (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
+               return;
+
+       DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
+                     port_name(port));
+
+       val &= ~SDVO_PIPE_SEL_MASK;
+       val |= SDVO_PIPE_SEL(PIPE_A);
+
+       I915_WRITE(hdmi_reg, val);
+}
+
+static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+                                    enum port port, i915_reg_t dp_reg)
+{
+       u32 val = I915_READ(dp_reg);
+
+       if (val & DP_PORT_EN ||
+           (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
+               return;
+
+       DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
+                     port_name(port));
+
+       val &= ~DP_PIPE_SEL_MASK;
+       val |= DP_PIPE_SEL(PIPE_A);
+
+       I915_WRITE(dp_reg, val);
+}
+
+static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+{
+       /*
+        * The BIOS may select transcoder B on some of the PCH
+        * ports even it doesn't enable the port. This would trip
+        * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
+        * Sanitize the transcoder select bits to prevent that. We
+        * assume that the BIOS never actually enabled the port,
+        * because if it did we'd actually have to toggle the port
+        * on and back off to make the transcoder A select stick
+        * (see. intel_dp_link_down(), intel_disable_hdmi(),
+        * intel_disable_sdvo()).
+        */
+       ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
+       ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
+       ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+
+       /* PCH SDVOB multiplex with HDMIB */
+       ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
+       ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
+       ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+}
+
+/* Scan out the current hw modeset state,
+ * and sanitizes it to the current state
+ */
+static void
+intel_modeset_setup_hw_state(struct drm_device *dev,
+                            struct drm_modeset_acquire_ctx *ctx)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc_state *crtc_state;
+       struct intel_encoder *encoder;
+       struct intel_crtc *crtc;
+       intel_wakeref_t wakeref;
+       int i;
+
+       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
+       intel_early_display_was(dev_priv);
+       intel_modeset_readout_hw_state(dev);
+
+       /* HW state is read out, now we need to sanitize this mess. */
+       get_encoder_power_domains(dev_priv);
+
+       if (HAS_PCH_IBX(dev_priv))
+               ibx_sanitize_pch_ports(dev_priv);
+
+       /*
+        * intel_sanitize_plane_mapping() may need to do vblank
+        * waits, so we need vblank interrupts restored beforehand.
+        */
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
+               crtc_state = to_intel_crtc_state(crtc->base.state);
+
+               drm_crtc_vblank_reset(&crtc->base);
+
+               if (crtc_state->base.active)
+                       intel_crtc_vblank_on(crtc_state);
+       }
+
+       intel_sanitize_plane_mapping(dev_priv);
+
+       for_each_intel_encoder(dev, encoder)
+               intel_sanitize_encoder(encoder);
+
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
+               crtc_state = to_intel_crtc_state(crtc->base.state);
+               intel_sanitize_crtc(crtc, ctx);
+               intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
+       }
+
+       intel_modeset_update_connector_atomic_state(dev);
+
+       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+               struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+               if (!pll->on || pll->active_mask)
+                       continue;
+
+               DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
+                             pll->info->name);
+
+               pll->info->funcs->disable(dev_priv, pll);
+               pll->on = false;
+       }
+
+       if (IS_G4X(dev_priv)) {
+               g4x_wm_get_hw_state(dev_priv);
+               g4x_wm_sanitize(dev_priv);
+       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               vlv_wm_get_hw_state(dev_priv);
+               vlv_wm_sanitize(dev_priv);
+       } else if (INTEL_GEN(dev_priv) >= 9) {
+               skl_wm_get_hw_state(dev_priv);
+       } else if (HAS_PCH_SPLIT(dev_priv)) {
+               ilk_wm_get_hw_state(dev_priv);
+       }
+
+       for_each_intel_crtc(dev, crtc) {
+               u64 put_domains;
+
+               crtc_state = to_intel_crtc_state(crtc->base.state);
+               put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
+               if (WARN_ON(put_domains))
+                       modeset_put_power_domains(dev_priv, put_domains);
+       }
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+
+       intel_fbc_init_pipe_state(dev_priv);
+}
+
+void intel_display_resume(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+       struct drm_modeset_acquire_ctx ctx;
+       int ret;
+
+       dev_priv->modeset_restore_state = NULL;
+       if (state)
+               state->acquire_ctx = &ctx;
+
+       drm_modeset_acquire_init(&ctx, 0);
+
+       while (1) {
+               ret = drm_modeset_lock_all_ctx(dev, &ctx);
+               if (ret != -EDEADLK)
+                       break;
+
+               drm_modeset_backoff(&ctx);
+       }
+
+       if (!ret)
+               ret = __intel_display_resume(dev, state, &ctx);
+
+       intel_enable_ipc(dev_priv);
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+
+       if (ret)
+               DRM_ERROR("Restoring old state failed with %i\n", ret);
+       if (state)
+               drm_atomic_state_put(state);
+}
+
+static void intel_hpd_poll_fini(struct drm_device *dev)
+{
+       struct intel_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+
+       /* Kill all the work that may have been queued by hpd. */
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       for_each_intel_connector_iter(connector, &conn_iter) {
+               if (connector->modeset_retry_work.func)
+                       cancel_work_sync(&connector->modeset_retry_work);
+               if (connector->hdcp.shim) {
+                       cancel_delayed_work_sync(&connector->hdcp.check_work);
+                       cancel_work_sync(&connector->hdcp.prop_work);
+               }
+       }
+       drm_connector_list_iter_end(&conn_iter);
+}
+
+void intel_modeset_cleanup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       flush_workqueue(dev_priv->modeset_wq);
+
+       flush_work(&dev_priv->atomic_helper.free_work);
+       WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+
+       /*
+        * Interrupts and polling as the first thing to avoid creating havoc.
+        * Too much stuff here (turning of connectors, ...) would
+        * experience fancy races otherwise.
+        */
+       intel_irq_uninstall(dev_priv);
+
+       /*
+        * Due to the hpd irq storm handling the hotplug work can re-arm the
+        * poll handlers. Hence disable polling after hpd handling is shut down.
+        */
+       intel_hpd_poll_fini(dev);
+
+       /* poll work can call into fbdev, hence clean that up afterwards */
+       intel_fbdev_fini(dev_priv);
+
+       intel_unregister_dsm_handler();
+
+       intel_fbc_global_disable(dev_priv);
+
+       /* flush any delayed tasks or pending work */
+       flush_scheduled_work();
+
+       intel_hdcp_component_fini(dev_priv);
+
+       drm_mode_config_cleanup(dev);
+
+       intel_overlay_cleanup(dev_priv);
+
+       intel_gmbus_teardown(dev_priv);
+
+       destroy_workqueue(dev_priv->modeset_wq);
+
+       intel_fbc_cleanup_cfb(dev_priv);
+}
+
+/*
+ * set vga decode state - true == enable VGA decode
+ */
+int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
+{
+       unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+       u16 gmch_ctrl;
+
+       if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
+               DRM_ERROR("failed to read control word\n");
+               return -EIO;
+       }
+
+       if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
+               return 0;
+
+       if (state)
+               gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+       else
+               gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+
+       if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
+               DRM_ERROR("failed to write control word\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
+struct intel_display_error_state {
+
+       u32 power_well_driver;
+
+       struct intel_cursor_error_state {
+               u32 control;
+               u32 position;
+               u32 base;
+               u32 size;
+       } cursor[I915_MAX_PIPES];
+
+       struct intel_pipe_error_state {
+               bool power_domain_on;
+               u32 source;
+               u32 stat;
+       } pipe[I915_MAX_PIPES];
+
+       struct intel_plane_error_state {
+               u32 control;
+               u32 stride;
+               u32 size;
+               u32 pos;
+               u32 addr;
+               u32 surface;
+               u32 tile_offset;
+       } plane[I915_MAX_PIPES];
+
+       struct intel_transcoder_error_state {
+               bool available;
+               bool power_domain_on;
+               enum transcoder cpu_transcoder;
+
+               u32 conf;
+
+               u32 htotal;
+               u32 hblank;
+               u32 hsync;
+               u32 vtotal;
+               u32 vblank;
+               u32 vsync;
+       } transcoder[4];
+};
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_i915_private *dev_priv)
+{
+       struct intel_display_error_state *error;
+       int transcoders[] = {
+               TRANSCODER_A,
+               TRANSCODER_B,
+               TRANSCODER_C,
+               TRANSCODER_EDP,
+       };
+       int i;
+
+       BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
+
+       if (!HAS_DISPLAY(dev_priv))
+               return NULL;
+
+       error = kzalloc(sizeof(*error), GFP_ATOMIC);
+       if (error == NULL)
+               return NULL;
+
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
+
+       for_each_pipe(dev_priv, i) {
+               error->pipe[i].power_domain_on =
+                       __intel_display_power_is_enabled(dev_priv,
+                                                        POWER_DOMAIN_PIPE(i));
+               if (!error->pipe[i].power_domain_on)
+                       continue;
+
+               error->cursor[i].control = I915_READ(CURCNTR(i));
+               error->cursor[i].position = I915_READ(CURPOS(i));
+               error->cursor[i].base = I915_READ(CURBASE(i));
+
+               error->plane[i].control = I915_READ(DSPCNTR(i));
+               error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+               if (INTEL_GEN(dev_priv) <= 3) {
+                       error->plane[i].size = I915_READ(DSPSIZE(i));
+                       error->plane[i].pos = I915_READ(DSPPOS(i));
+               }
+               if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
+                       error->plane[i].addr = I915_READ(DSPADDR(i));
+               if (INTEL_GEN(dev_priv) >= 4) {
+                       error->plane[i].surface = I915_READ(DSPSURF(i));
+                       error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+               }
+
+               error->pipe[i].source = I915_READ(PIPESRC(i));
+
+               if (HAS_GMCH(dev_priv))
+                       error->pipe[i].stat = I915_READ(PIPESTAT(i));
+       }
+
+       for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
+               enum transcoder cpu_transcoder = transcoders[i];
+
+               if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
+                       continue;
+
+               error->transcoder[i].available = true;
+               error->transcoder[i].power_domain_on =
+                       __intel_display_power_is_enabled(dev_priv,
+                               POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+               if (!error->transcoder[i].power_domain_on)
+                       continue;
+
+               error->transcoder[i].cpu_transcoder = cpu_transcoder;
+
+               error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+               error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+               error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+               error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+               error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+               error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+               error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+       }
+
+       return error;
+}
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+
+void
+intel_display_print_error_state(struct drm_i915_error_state_buf *m,
+                               struct intel_display_error_state *error)
+{
+       struct drm_i915_private *dev_priv = m->i915;
+       int i;
+
+       if (!error)
+               return;
+
+       err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               err_printf(m, "PWR_WELL_CTL2: %08x\n",
+                          error->power_well_driver);
+       for_each_pipe(dev_priv, i) {
+               err_printf(m, "Pipe [%d]:\n", i);
+               err_printf(m, "  Power: %s\n",
+                          onoff(error->pipe[i].power_domain_on));
+               err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
+               err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
+
+               err_printf(m, "Plane [%d]:\n", i);
+               err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
+               err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
+               if (INTEL_GEN(dev_priv) <= 3) {
+                       err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
+                       err_printf(m, "  POS: %08x\n", error->plane[i].pos);
+               }
+               if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
+                       err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
+               if (INTEL_GEN(dev_priv) >= 4) {
+                       err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
+                       err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
+               }
+
+               err_printf(m, "Cursor [%d]:\n", i);
+               err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
+               err_printf(m, "  POS: %08x\n", error->cursor[i].position);
+               err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
+               if (!error->transcoder[i].available)
+                       continue;
+
+               err_printf(m, "CPU transcoder: %s\n",
+                          transcoder_name(error->transcoder[i].cpu_transcoder));
+               err_printf(m, "  Power: %s\n",
+                          onoff(error->transcoder[i].power_domain_on));
+               err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
+               err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
+               err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
+               err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
+               err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
+               err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
+               err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
+       }
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
new file mode 100644 (file)
index 0000000..ee6b819
--- /dev/null
@@ -0,0 +1,361 @@
+/*
+ * Copyright © 2006-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DISPLAY_H_
+#define _INTEL_DISPLAY_H_
+
+#include <drm/drm_util.h>
+#include <drm/i915_drm.h>
+
+struct drm_i915_private;
+struct intel_plane_state;
+
+enum i915_gpio {
+       GPIOA,
+       GPIOB,
+       GPIOC,
+       GPIOD,
+       GPIOE,
+       GPIOF,
+       GPIOG,
+       GPIOH,
+       __GPIOI_UNUSED,
+       GPIOJ,
+       GPIOK,
+       GPIOL,
+       GPIOM,
+};
+
+/*
+ * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
+ * rest have consecutive values and match the enum values of transcoders
+ * with a 1:1 transcoder -> pipe mapping.
+ */
+enum pipe {
+       INVALID_PIPE = -1,
+
+       PIPE_A = 0,
+       PIPE_B,
+       PIPE_C,
+       _PIPE_EDP,
+
+       I915_MAX_PIPES = _PIPE_EDP
+};
+
+#define pipe_name(p) ((p) + 'A')
+
+enum transcoder {
+       /*
+        * The following transcoders have a 1:1 transcoder -> pipe mapping,
+        * keep their values fixed: the code assumes that TRANSCODER_A=0, the
+        * rest have consecutive values and match the enum values of the pipes
+        * they map to.
+        */
+       TRANSCODER_A = PIPE_A,
+       TRANSCODER_B = PIPE_B,
+       TRANSCODER_C = PIPE_C,
+
+       /*
+        * The following transcoders can map to any pipe, their enum value
+        * doesn't need to stay fixed.
+        */
+       TRANSCODER_EDP,
+       TRANSCODER_DSI_0,
+       TRANSCODER_DSI_1,
+       TRANSCODER_DSI_A = TRANSCODER_DSI_0,    /* legacy DSI */
+       TRANSCODER_DSI_C = TRANSCODER_DSI_1,    /* legacy DSI */
+
+       I915_MAX_TRANSCODERS
+};
+
+static inline const char *transcoder_name(enum transcoder transcoder)
+{
+       switch (transcoder) {
+       case TRANSCODER_A:
+               return "A";
+       case TRANSCODER_B:
+               return "B";
+       case TRANSCODER_C:
+               return "C";
+       case TRANSCODER_EDP:
+               return "EDP";
+       case TRANSCODER_DSI_A:
+               return "DSI A";
+       case TRANSCODER_DSI_C:
+               return "DSI C";
+       default:
+               return "<invalid>";
+       }
+}
+
+static inline bool transcoder_is_dsi(enum transcoder transcoder)
+{
+       return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
+}
+
+/*
+ * Global legacy plane identifier. Valid only for primary/sprite
+ * planes on pre-g4x, and only for primary planes on g4x-bdw.
+ */
+enum i9xx_plane_id {
+       PLANE_A,
+       PLANE_B,
+       PLANE_C,
+};
+
+#define plane_name(p) ((p) + 'A')
+#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+
+/*
+ * Per-pipe plane identifier.
+ * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
+ * number of planes per CRTC.  Not all platforms really have this many planes,
+ * which means some arrays of size I915_MAX_PLANES may have unused entries
+ * between the topmost sprite plane and the cursor plane.
+ *
+ * This is expected to be passed to various register macros
+ * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
+ */
+enum plane_id {
+       PLANE_PRIMARY,
+       PLANE_SPRITE0,
+       PLANE_SPRITE1,
+       PLANE_SPRITE2,
+       PLANE_SPRITE3,
+       PLANE_SPRITE4,
+       PLANE_SPRITE5,
+       PLANE_CURSOR,
+
+       I915_MAX_PLANES,
+};
+
+#define for_each_plane_id_on_crtc(__crtc, __p) \
+       for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
+               for_each_if((__crtc)->plane_ids_mask & BIT(__p))
+
+/*
+ * Ports identifier referenced from other drivers.
+ * Expected to remain stable over time
+ */
+static inline const char *port_identifier(enum port port)
+{
+       switch (port) {
+       case PORT_A:
+               return "Port A";
+       case PORT_B:
+               return "Port B";
+       case PORT_C:
+               return "Port C";
+       case PORT_D:
+               return "Port D";
+       case PORT_E:
+               return "Port E";
+       case PORT_F:
+               return "Port F";
+       default:
+               return "<invalid>";
+       }
+}
+
+enum tc_port {
+       PORT_TC_NONE = -1,
+
+       PORT_TC1 = 0,
+       PORT_TC2,
+       PORT_TC3,
+       PORT_TC4,
+
+       I915_MAX_TC_PORTS
+};
+
+enum tc_port_type {
+       TC_PORT_UNKNOWN = 0,
+       TC_PORT_TYPEC,
+       TC_PORT_TBT,
+       TC_PORT_LEGACY,
+};
+
+enum dpio_channel {
+       DPIO_CH0,
+       DPIO_CH1
+};
+
+enum dpio_phy {
+       DPIO_PHY0,
+       DPIO_PHY1,
+       DPIO_PHY2,
+};
+
+#define I915_NUM_PHYS_VLV 2
+
+enum aux_ch {
+       AUX_CH_A,
+       AUX_CH_B,
+       AUX_CH_C,
+       AUX_CH_D,
+       AUX_CH_E, /* ICL+ */
+       AUX_CH_F,
+};
+
+#define aux_ch_name(a) ((a) + 'A')
+
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+       u32 tu;
+       u32 gmch_m;
+       u32 gmch_n;
+       u32 link_m;
+       u32 link_n;
+};
+
+#define for_each_pipe(__dev_priv, __p) \
+       for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+
+#define for_each_pipe_masked(__dev_priv, __p, __mask) \
+       for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+               for_each_if((__mask) & BIT(__p))
+
+#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
+       for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++)  \
+               for_each_if ((__mask) & (1 << (__t)))
+
+#define for_each_universal_plane(__dev_priv, __pipe, __p)              \
+       for ((__p) = 0;                                                 \
+            (__p) < RUNTIME_INFO(__dev_priv)->num_sprites[(__pipe)] + 1;       \
+            (__p)++)
+
+#define for_each_sprite(__dev_priv, __p, __s)                          \
+       for ((__s) = 0;                                                 \
+            (__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)];      \
+            (__s)++)
+
+#define for_each_port_masked(__port, __ports_mask) \
+       for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)  \
+               for_each_if((__ports_mask) & BIT(__port))
+
+#define for_each_crtc(dev, crtc) \
+       list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+
+#define for_each_intel_plane(dev, intel_plane) \
+       list_for_each_entry(intel_plane,                        \
+                           &(dev)->mode_config.plane_list,     \
+                           base.head)
+
+#define for_each_intel_plane_mask(dev, intel_plane, plane_mask)                \
+       list_for_each_entry(intel_plane,                                \
+                           &(dev)->mode_config.plane_list,             \
+                           base.head)                                  \
+               for_each_if((plane_mask) &                              \
+                           drm_plane_mask(&intel_plane->base)))
+
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)     \
+       list_for_each_entry(intel_plane,                                \
+                           &(dev)->mode_config.plane_list,             \
+                           base.head)                                  \
+               for_each_if((intel_plane)->pipe == (intel_crtc)->pipe)
+
+#define for_each_intel_crtc(dev, intel_crtc)                           \
+       list_for_each_entry(intel_crtc,                                 \
+                           &(dev)->mode_config.crtc_list,              \
+                           base.head)
+
+#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask)           \
+       list_for_each_entry(intel_crtc,                                 \
+                           &(dev)->mode_config.crtc_list,              \
+                           base.head)                                  \
+               for_each_if((crtc_mask) & drm_crtc_mask(&intel_crtc->base))
+
+#define for_each_intel_encoder(dev, intel_encoder)             \
+       list_for_each_entry(intel_encoder,                      \
+                           &(dev)->mode_config.encoder_list,   \
+                           base.head)
+
+#define for_each_intel_dp(dev, intel_encoder)                  \
+       for_each_intel_encoder(dev, intel_encoder)              \
+               for_each_if(intel_encoder_is_dp(intel_encoder))
+
+#define for_each_intel_connector_iter(intel_connector, iter) \
+       while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
+
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+       list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+               for_each_if((intel_encoder)->base.crtc == (__crtc))
+
+#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
+       list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
+               for_each_if((intel_connector)->base.encoder == (__encoder))
+
+#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \
+       for ((__i) = 0; \
+            (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+                    ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+                     (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), 1); \
+            (__i)++) \
+               for_each_if(plane)
+
+#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
+       for ((__i) = 0; \
+            (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+                    ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+                     (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+            (__i)++) \
+               for_each_if(plane)
+
+#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
+       for ((__i) = 0; \
+            (__i) < (__state)->base.dev->mode_config.num_crtc && \
+                    ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+                     (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+            (__i)++) \
+               for_each_if(crtc)
+
+#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
+       for ((__i) = 0; \
+            (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+                    ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+                     (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
+                     (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+            (__i)++) \
+               for_each_if(plane)
+
+#define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+       for ((__i) = 0; \
+            (__i) < (__state)->base.dev->mode_config.num_crtc && \
+                    ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+                     (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+                     (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+            (__i)++) \
+               for_each_if(crtc)
+
+void intel_link_compute_m_n(u16 bpp, int nlanes,
+                           int pixel_clock, int link_clock,
+                           struct intel_link_m_n *m_n,
+                           bool constant_n);
+bool is_ccs_modifier(u64 modifier);
+void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
+u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+                             u32 pixel_format, u64 modifier);
+bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
new file mode 100644 (file)
index 0000000..c93ad51
--- /dev/null
@@ -0,0 +1,4618 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/vgaarb.h>
+
+#include "display/intel_crt.h"
+#include "display/intel_dp.h"
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_cdclk.h"
+#include "intel_combo_phy.h"
+#include "intel_csr.h"
+#include "intel_dpio_phy.h"
+#include "intel_drv.h"
+#include "intel_hotplug.h"
+#include "intel_sideband.h"
+
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+                                        enum i915_power_well_id power_well_id);
+
+const char *
+intel_display_power_domain_str(enum intel_display_power_domain domain)
+{
+       switch (domain) {
+       case POWER_DOMAIN_DISPLAY_CORE:
+               return "DISPLAY_CORE";
+       case POWER_DOMAIN_PIPE_A:
+               return "PIPE_A";
+       case POWER_DOMAIN_PIPE_B:
+               return "PIPE_B";
+       case POWER_DOMAIN_PIPE_C:
+               return "PIPE_C";
+       case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+               return "PIPE_A_PANEL_FITTER";
+       case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+               return "PIPE_B_PANEL_FITTER";
+       case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+               return "PIPE_C_PANEL_FITTER";
+       case POWER_DOMAIN_TRANSCODER_A:
+               return "TRANSCODER_A";
+       case POWER_DOMAIN_TRANSCODER_B:
+               return "TRANSCODER_B";
+       case POWER_DOMAIN_TRANSCODER_C:
+               return "TRANSCODER_C";
+       case POWER_DOMAIN_TRANSCODER_EDP:
+               return "TRANSCODER_EDP";
+       case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
+               return "TRANSCODER_EDP_VDSC";
+       case POWER_DOMAIN_TRANSCODER_DSI_A:
+               return "TRANSCODER_DSI_A";
+       case POWER_DOMAIN_TRANSCODER_DSI_C:
+               return "TRANSCODER_DSI_C";
+       case POWER_DOMAIN_PORT_DDI_A_LANES:
+               return "PORT_DDI_A_LANES";
+       case POWER_DOMAIN_PORT_DDI_B_LANES:
+               return "PORT_DDI_B_LANES";
+       case POWER_DOMAIN_PORT_DDI_C_LANES:
+               return "PORT_DDI_C_LANES";
+       case POWER_DOMAIN_PORT_DDI_D_LANES:
+               return "PORT_DDI_D_LANES";
+       case POWER_DOMAIN_PORT_DDI_E_LANES:
+               return "PORT_DDI_E_LANES";
+       case POWER_DOMAIN_PORT_DDI_F_LANES:
+               return "PORT_DDI_F_LANES";
+       case POWER_DOMAIN_PORT_DDI_A_IO:
+               return "PORT_DDI_A_IO";
+       case POWER_DOMAIN_PORT_DDI_B_IO:
+               return "PORT_DDI_B_IO";
+       case POWER_DOMAIN_PORT_DDI_C_IO:
+               return "PORT_DDI_C_IO";
+       case POWER_DOMAIN_PORT_DDI_D_IO:
+               return "PORT_DDI_D_IO";
+       case POWER_DOMAIN_PORT_DDI_E_IO:
+               return "PORT_DDI_E_IO";
+       case POWER_DOMAIN_PORT_DDI_F_IO:
+               return "PORT_DDI_F_IO";
+       case POWER_DOMAIN_PORT_DSI:
+               return "PORT_DSI";
+       case POWER_DOMAIN_PORT_CRT:
+               return "PORT_CRT";
+       case POWER_DOMAIN_PORT_OTHER:
+               return "PORT_OTHER";
+       case POWER_DOMAIN_VGA:
+               return "VGA";
+       case POWER_DOMAIN_AUDIO:
+               return "AUDIO";
+       case POWER_DOMAIN_AUX_A:
+               return "AUX_A";
+       case POWER_DOMAIN_AUX_B:
+               return "AUX_B";
+       case POWER_DOMAIN_AUX_C:
+               return "AUX_C";
+       case POWER_DOMAIN_AUX_D:
+               return "AUX_D";
+       case POWER_DOMAIN_AUX_E:
+               return "AUX_E";
+       case POWER_DOMAIN_AUX_F:
+               return "AUX_F";
+       case POWER_DOMAIN_AUX_IO_A:
+               return "AUX_IO_A";
+       case POWER_DOMAIN_AUX_TBT1:
+               return "AUX_TBT1";
+       case POWER_DOMAIN_AUX_TBT2:
+               return "AUX_TBT2";
+       case POWER_DOMAIN_AUX_TBT3:
+               return "AUX_TBT3";
+       case POWER_DOMAIN_AUX_TBT4:
+               return "AUX_TBT4";
+       case POWER_DOMAIN_GMBUS:
+               return "GMBUS";
+       case POWER_DOMAIN_INIT:
+               return "INIT";
+       case POWER_DOMAIN_MODESET:
+               return "MODESET";
+       case POWER_DOMAIN_GT_IRQ:
+               return "GT_IRQ";
+       default:
+               MISSING_CASE(domain);
+               return "?";
+       }
+}
+
+static void intel_power_well_enable(struct drm_i915_private *dev_priv,
+                                   struct i915_power_well *power_well)
+{
+       DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
+       power_well->desc->ops->enable(dev_priv, power_well);
+       power_well->hw_enabled = true;
+}
+
+static void intel_power_well_disable(struct drm_i915_private *dev_priv,
+                                    struct i915_power_well *power_well)
+{
+       DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
+       power_well->hw_enabled = false;
+       power_well->desc->ops->disable(dev_priv, power_well);
+}
+
+static void intel_power_well_get(struct drm_i915_private *dev_priv,
+                                struct i915_power_well *power_well)
+{
+       if (!power_well->count++)
+               intel_power_well_enable(dev_priv, power_well);
+}
+
+static void intel_power_well_put(struct drm_i915_private *dev_priv,
+                                struct i915_power_well *power_well)
+{
+       WARN(!power_well->count, "Use count on power well %s is already zero",
+            power_well->desc->name);
+
+       if (!--power_well->count)
+               intel_power_well_disable(dev_priv, power_well);
+}
+
+/**
+ * __intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This is the unlocked version of intel_display_power_is_enabled() and should
+ * only be used from error capture and recovery code where deadlocks are
+ * possible.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+                                     enum intel_display_power_domain domain)
+{
+       struct i915_power_well *power_well;
+       bool is_enabled;
+
+       if (dev_priv->runtime_pm.suspended)
+               return false;
+
+       is_enabled = true;
+
+       for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
+               if (power_well->desc->always_on)
+                       continue;
+
+               if (!power_well->hw_enabled) {
+                       is_enabled = false;
+                       break;
+               }
+       }
+
+       return is_enabled;
+}
+
+/**
+ * intel_display_power_is_enabled - check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This function can be used to check the hw power domain state. It is mostly
+ * used in hardware state readout functions. Everywhere else code should rely
+ * upon explicit power domain reference counting to ensure that the hardware
+ * block is powered up before accessing it.
+ *
+ * Callers must hold the relevant modesetting locks to ensure that concurrent
+ * threads can't disable the power well while the caller tries to read a few
+ * registers.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+                                   enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains;
+       bool ret;
+
+       power_domains = &dev_priv->power_domains;
+
+       mutex_lock(&power_domains->lock);
+       ret = __intel_display_power_is_enabled(dev_priv, domain);
+       mutex_unlock(&power_domains->lock);
+
+       return ret;
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
+                                      u8 irq_pipe_mask, bool has_vga)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+
+       /*
+        * After we re-enable the power well, if we touch VGA register 0x3d5
+        * we'll get unclaimed register interrupts. This stops after we write
+        * anything to the VGA MSR register. The vgacon module uses this
+        * register all the time, so if we unbind our driver and, as a
+        * consequence, bind vgacon, we'll get stuck in an infinite loop at
+        * console_unlock(). So make here we touch the VGA MSR register, making
+        * sure vgacon can keep working normally without triggering interrupts
+        * and error messages.
+        */
+       if (has_vga) {
+               vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+               outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+               vga_put(pdev, VGA_RSRC_LEGACY_IO);
+       }
+
+       if (irq_pipe_mask)
+               gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
+}
+
+static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
+                                      u8 irq_pipe_mask)
+{
+       if (irq_pipe_mask)
+               gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
+}
+
+static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
+
+       /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
+       WARN_ON(intel_wait_for_register(&dev_priv->uncore,
+                                       regs->driver,
+                                       HSW_PWR_WELL_CTL_STATE(pw_idx),
+                                       HSW_PWR_WELL_CTL_STATE(pw_idx),
+                                       1));
+}
+
+static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
+                                    const struct i915_power_well_regs *regs,
+                                    int pw_idx)
+{
+       u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+       u32 ret;
+
+       ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
+       ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
+       if (regs->kvmr.reg)
+               ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
+       ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
+
+       return ret;
+}
+
+static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+{
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
+       bool disabled;
+       u32 reqs;
+
+       /*
+        * Bspec doesn't require waiting for PWs to get disabled, but still do
+        * this for paranoia. The known cases where a PW will be forced on:
+        * - a KVMR request on any power well via the KVMR request register
+        * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
+        *   DEBUG request registers
+        * Skip the wait in case any of the request bits are set and print a
+        * diagnostic message.
+        */
+       wait_for((disabled = !(I915_READ(regs->driver) &
+                              HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
+                (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
+       if (disabled)
+               return;
+
+       DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
+                     power_well->desc->name,
+                     !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
+}
+
+static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
+                                          enum skl_power_gate pg)
+{
+       /* Timeout 5us for PG#0, for other PGs 1us */
+       WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
+                                       SKL_FUSE_PG_DIST_STATUS(pg),
+                                       SKL_FUSE_PG_DIST_STATUS(pg), 1));
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+                                 struct i915_power_well *power_well)
+{
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
+       bool wait_fuses = power_well->desc->hsw.has_fuses;
+       enum skl_power_gate uninitialized_var(pg);
+       u32 val;
+
+       if (wait_fuses) {
+               pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+                                                SKL_PW_CTL_IDX_TO_PG(pw_idx);
+               /*
+                * For PW1 we have to wait both for the PW0/PG0 fuse state
+                * before enabling the power well and PW1/PG1's own fuse
+                * state after the enabling. For all other power wells with
+                * fuses we only have to wait for that PW/PG's fuse state
+                * after the enabling.
+                */
+               if (pg == SKL_PG1)
+                       gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
+       }
+
+       val = I915_READ(regs->driver);
+       I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+       hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+       /* Display WA #1178: cnl */
+       if (IS_CANNONLAKE(dev_priv) &&
+           pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
+           pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
+               val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
+               val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
+               I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
+       }
+
+       if (wait_fuses)
+               gen9_wait_for_power_well_fuses(dev_priv, pg);
+
+       hsw_power_well_post_enable(dev_priv,
+                                  power_well->desc->hsw.irq_pipe_mask,
+                                  power_well->desc->hsw.has_vga);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
+       u32 val;
+
+       hsw_power_well_pre_disable(dev_priv,
+                                  power_well->desc->hsw.irq_pipe_mask);
+
+       val = I915_READ(regs->driver);
+       I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+       hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
+#define ICL_AUX_PW_TO_PORT(pw_idx)     ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
+
+static void
+icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+                                   struct i915_power_well *power_well)
+{
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
+       enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
+       u32 val;
+
+       val = I915_READ(regs->driver);
+       I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+       val = I915_READ(ICL_PORT_CL_DW12(port));
+       I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
+
+       hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+       /* Display WA #1178: icl */
+       if (IS_ICELAKE(dev_priv) &&
+           pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
+           !intel_bios_is_port_edp(dev_priv, port)) {
+               val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
+               val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
+               I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
+       }
+}
+
+static void
+icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+                                    struct i915_power_well *power_well)
+{
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
+       enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
+       u32 val;
+
+       val = I915_READ(ICL_PORT_CL_DW12(port));
+       I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
+
+       val = I915_READ(regs->driver);
+       I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+       hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
+#define ICL_AUX_PW_TO_CH(pw_idx)       \
+       ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+static void
+icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+                                struct i915_power_well *power_well)
+{
+       enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
+       u32 val;
+
+       val = I915_READ(DP_AUX_CH_CTL(aux_ch));
+       val &= ~DP_AUX_CH_CTL_TBT_IO;
+       if (power_well->desc->hsw.is_tc_tbt)
+               val |= DP_AUX_CH_CTL_TBT_IO;
+       I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
+
+       hsw_power_well_enable(dev_priv, power_well);
+}
+
+/*
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       enum i915_power_well_id id = power_well->desc->id;
+       int pw_idx = power_well->desc->hsw.idx;
+       u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
+                  HSW_PWR_WELL_CTL_STATE(pw_idx);
+       u32 val;
+
+       val = I915_READ(regs->driver);
+
+       /*
+        * On GEN9 big core due to a DMC bug the driver's request bits for PW1
+        * and the MISC_IO PW will be not restored, so check instead for the
+        * BIOS's own request bits, which are forced-on for these power wells
+        * when exiting DC5/6.
+        */
+       if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
+           (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
+               val |= I915_READ(regs->bios);
+
+       return (val & mask) == mask;
+}
+
+static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
+{
+       WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
+                 "DC9 already programmed to be enabled.\n");
+       WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+                 "DC5 still not disabled to enable DC9.\n");
+       WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
+                 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
+                 "Power well 2 on.\n");
+       WARN_ONCE(intel_irqs_enabled(dev_priv),
+                 "Interrupts not disabled yet.\n");
+
+        /*
+         * TODO: check for the following to verify the conditions to enter DC9
+         * state are satisfied:
+         * 1] Check relevant display engine registers to verify if mode set
+         * disable sequence was followed.
+         * 2] Check if display uninitialize sequence is initialized.
+         */
+}
+
+static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
+{
+       WARN_ONCE(intel_irqs_enabled(dev_priv),
+                 "Interrupts not disabled yet.\n");
+       WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+                 "DC5 still not disabled.\n");
+
+        /*
+         * TODO: check for the following to verify DC9 state was indeed
+         * entered before programming to disable it:
+         * 1] Check relevant display engine registers to verify if mode
+         *  set disable sequence was followed.
+         * 2] Check if display uninitialize sequence is initialized.
+         */
+}
+
+static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
+                               u32 state)
+{
+       int rewrites = 0;
+       int rereads = 0;
+       u32 v;
+
+       I915_WRITE(DC_STATE_EN, state);
+
+       /* It has been observed that disabling the dc6 state sometimes
+        * doesn't stick and dmc keeps returning old value. Make sure
+        * the write really sticks enough times and also force rewrite until
+        * we are confident that state is exactly what we want.
+        */
+       do  {
+               v = I915_READ(DC_STATE_EN);
+
+               if (v != state) {
+                       I915_WRITE(DC_STATE_EN, state);
+                       rewrites++;
+                       rereads = 0;
+               } else if (rereads++ > 5) {
+                       break;
+               }
+
+       } while (rewrites < 100);
+
+       if (v != state)
+               DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
+                         state, v);
+
+       /* Most of the times we need one retry, avoid spam */
+       if (rewrites > 1)
+               DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
+                             state, rewrites);
+}
+
+static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
+{
+       u32 mask;
+
+       mask = DC_STATE_EN_UPTO_DC5;
+       if (INTEL_GEN(dev_priv) >= 11)
+               mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
+       else if (IS_GEN9_LP(dev_priv))
+               mask |= DC_STATE_EN_DC9;
+       else
+               mask |= DC_STATE_EN_UPTO_DC6;
+
+       return mask;
+}
+
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
+
+       DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
+                     dev_priv->csr.dc_state, val);
+       dev_priv->csr.dc_state = val;
+}
+
+/**
+ * gen9_set_dc_state - set target display C power state
+ * @dev_priv: i915 device instance
+ * @state: target DC power state
+ * - DC_STATE_DISABLE
+ * - DC_STATE_EN_UPTO_DC5
+ * - DC_STATE_EN_UPTO_DC6
+ * - DC_STATE_EN_DC9
+ *
+ * Signal to DMC firmware/HW the target DC power state passed in @state.
+ * DMC/HW can turn off individual display clocks and power rails when entering
+ * a deeper DC power state (higher in number) and turns these back when exiting
+ * that state to a shallower power state (lower in number). The HW will decide
+ * when to actually enter a given state on an on-demand basis, for instance
+ * depending on the active state of display pipes. The state of display
+ * registers backed by affected power rails are saved/restored as needed.
+ *
+ * Based on the above enabling a deeper DC power state is asynchronous wrt.
+ * enabling it. Disabling a deeper power state is synchronous: for instance
+ * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
+ * back on and register state is restored. This is guaranteed by the MMIO write
+ * to DC_STATE_EN blocking until the state is restored.
+ */
+static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
+{
+       u32 val;
+       u32 mask;
+
+       if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
+               state &= dev_priv->csr.allowed_dc_mask;
+
+       val = I915_READ(DC_STATE_EN);
+       mask = gen9_dc_mask(dev_priv);
+       DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
+                     val & mask, state);
+
+       /* Check if DMC is ignoring our DC state requests */
+       if ((val & mask) != dev_priv->csr.dc_state)
+               DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
+                         dev_priv->csr.dc_state, val & mask);
+
+       val &= ~mask;
+       val |= state;
+
+       gen9_write_dc_state(dev_priv, val);
+
+       dev_priv->csr.dc_state = val & mask;
+}
+
+void bxt_enable_dc9(struct drm_i915_private *dev_priv)
+{
+       assert_can_enable_dc9(dev_priv);
+
+       DRM_DEBUG_KMS("Enabling DC9\n");
+       /*
+        * Power sequencer reset is not needed on
+        * platforms with South Display Engine on PCH,
+        * because PPS registers are always on.
+        */
+       if (!HAS_PCH_SPLIT(dev_priv))
+               intel_power_sequencer_reset(dev_priv);
+       gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
+}
+
+void bxt_disable_dc9(struct drm_i915_private *dev_priv)
+{
+       assert_can_disable_dc9(dev_priv);
+
+       DRM_DEBUG_KMS("Disabling DC9\n");
+
+       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       intel_pps_unlock_regs_wa(dev_priv);
+}
+
+static void assert_csr_loaded(struct drm_i915_private *dev_priv)
+{
+       WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
+                 "CSR program storage start is NULL\n");
+       WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
+       WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
+}
+
+static struct i915_power_well *
+lookup_power_well(struct drm_i915_private *dev_priv,
+                 enum i915_power_well_id power_well_id)
+{
+       struct i915_power_well *power_well;
+
+       for_each_power_well(dev_priv, power_well)
+               if (power_well->desc->id == power_well_id)
+                       return power_well;
+
+       /*
+        * It's not feasible to add error checking code to the callers since
+        * this condition really shouldn't happen and it doesn't even make sense
+        * to abort things like display initialization sequences. Just return
+        * the first power well and hope the WARN gets reported so we can fix
+        * our driver.
+        */
+       WARN(1, "Power well %d not defined for this platform\n", power_well_id);
+       return &dev_priv->power_domains.power_wells[0];
+}
+
+static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
+{
+       bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
+                                       SKL_DISP_PW_2);
+
+       WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
+
+       WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
+                 "DC5 already programmed to be enabled.\n");
+       assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+
+       assert_csr_loaded(dev_priv);
+}
+
+void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+{
+       assert_can_enable_dc5(dev_priv);
+
+       DRM_DEBUG_KMS("Enabling DC5\n");
+
+       /* Wa Display #1183: skl,kbl,cfl */
+       if (IS_GEN9_BC(dev_priv))
+               I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+                          SKL_SELECT_ALTERNATE_DC_EXIT);
+
+       gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
+}
+
+static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
+{
+       WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+                 "Backlight is not disabled.\n");
+       WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
+                 "DC6 already programmed to be enabled.\n");
+
+       assert_csr_loaded(dev_priv);
+}
+
+void skl_enable_dc6(struct drm_i915_private *dev_priv)
+{
+       assert_can_enable_dc6(dev_priv);
+
+       DRM_DEBUG_KMS("Enabling DC6\n");
+
+       /* Wa Display #1183: skl,kbl,cfl */
+       if (IS_GEN9_BC(dev_priv))
+               I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+                          SKL_SELECT_ALTERNATE_DC_EXIT);
+
+       gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+}
+
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+       int pw_idx = power_well->desc->hsw.idx;
+       u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+       u32 bios_req = I915_READ(regs->bios);
+
+       /* Take over the request bit if set by BIOS. */
+       if (bios_req & mask) {
+               u32 drv_req = I915_READ(regs->driver);
+
+               if (!(drv_req & mask))
+                       I915_WRITE(regs->driver, drv_req | mask);
+               I915_WRITE(regs->bios, bios_req & ~mask);
+       }
+}
+
+static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
+}
+
+static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+{
+       bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
+}
+
+static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+{
+       return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
+}
+
+static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_well *power_well;
+
+       power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
+       if (power_well->count > 0)
+               bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
+
+       power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+       if (power_well->count > 0)
+               bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
+
+       if (IS_GEMINILAKE(dev_priv)) {
+               power_well = lookup_power_well(dev_priv,
+                                              GLK_DISP_PW_DPIO_CMN_C);
+               if (power_well->count > 0)
+                       bxt_ddi_phy_verify_state(dev_priv,
+                                                power_well->desc->bxt.phy);
+       }
+}
+
+static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
+}
+
+static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+{
+       u32 tmp = I915_READ(DBUF_CTL);
+
+       WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
+            (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
+            "Unexpected DBuf power power state (0x%08x)\n", tmp);
+}
+
+static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
+                                         struct i915_power_well *power_well)
+{
+       struct intel_cdclk_state cdclk_state = {};
+
+       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
+       /* Can't read out voltage_level so can't use intel_cdclk_changed() */
+       WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
+
+       gen9_assert_dbuf_enabled(dev_priv);
+
+       if (IS_GEN9_LP(dev_priv))
+               bxt_verify_ddi_phy_power_wells(dev_priv);
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               /*
+                * DMC retains HW context only for port A, the other combo
+                * PHY's HW context for port B is lost after DC transitions,
+                * so we need to restore it manually.
+                */
+               intel_combo_phy_init(dev_priv);
+}
+
+static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       if (!dev_priv->csr.dmc_payload)
+               return;
+
+       if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
+               skl_enable_dc6(dev_priv);
+       else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
+               gen9_enable_dc5(dev_priv);
+}
+
+static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
+                                        struct i915_power_well *power_well)
+{
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+                                            struct i915_power_well *power_well)
+{
+       return true;
+}
+
+static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
+                                        struct i915_power_well *power_well)
+{
+       if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+               i830_enable_pipe(dev_priv, PIPE_A);
+       if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+               i830_enable_pipe(dev_priv, PIPE_B);
+}
+
+static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
+                                         struct i915_power_well *power_well)
+{
+       i830_disable_pipe(dev_priv, PIPE_B);
+       i830_disable_pipe(dev_priv, PIPE_A);
+}
+
+static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
+                                         struct i915_power_well *power_well)
+{
+       return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
+               I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+}
+
+static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
+                                         struct i915_power_well *power_well)
+{
+       if (power_well->count > 0)
+               i830_pipes_power_well_enable(dev_priv, power_well);
+       else
+               i830_pipes_power_well_disable(dev_priv, power_well);
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+                              struct i915_power_well *power_well, bool enable)
+{
+       int pw_idx = power_well->desc->vlv.idx;
+       u32 mask;
+       u32 state;
+       u32 ctrl;
+
+       mask = PUNIT_PWRGT_MASK(pw_idx);
+       state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
+                        PUNIT_PWRGT_PWR_GATE(pw_idx);
+
+       vlv_punit_get(dev_priv);
+
+#define COND \
+       ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+       if (COND)
+               goto out;
+
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+       ctrl &= ~mask;
+       ctrl |= state;
+       vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+       if (wait_for(COND, 100))
+               DRM_ERROR("timeout setting power well state %08x (%08x)\n",
+                         state,
+                         vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+       vlv_punit_put(dev_priv);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+                                 struct i915_power_well *power_well)
+{
+       vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       int pw_idx = power_well->desc->vlv.idx;
+       bool enabled = false;
+       u32 mask;
+       u32 state;
+       u32 ctrl;
+
+       mask = PUNIT_PWRGT_MASK(pw_idx);
+       ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
+
+       vlv_punit_get(dev_priv);
+
+       state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+       /*
+        * We only ever set the power-on and power-gate states, anything
+        * else is unexpected.
+        */
+       WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+               state != PUNIT_PWRGT_PWR_GATE(pw_idx));
+       if (state == ctrl)
+               enabled = true;
+
+       /*
+        * A transient state at this point would mean some unexpected party
+        * is poking at the power controls too.
+        */
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+       WARN_ON(ctrl != state);
+
+       vlv_punit_put(dev_priv);
+
+       return enabled;
+}
+
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       /*
+        * On driver load, a pipe may be active and driving a DSI display.
+        * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+        * (and never recovering) in this case. intel_dsi_post_disable() will
+        * clear it when we turn off the display.
+        */
+       val = I915_READ(DSPCLK_GATE_D);
+       val &= DPOUNIT_CLOCK_GATE_DISABLE;
+       val |= VRHUNIT_CLOCK_GATE_DISABLE;
+       I915_WRITE(DSPCLK_GATE_D, val);
+
+       /*
+        * Disable trickle feed and enable pnd deadline calculation
+        */
+       I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+       I915_WRITE(CBR1_VLV, 0);
+
+       WARN_ON(dev_priv->rawclk_freq == 0);
+
+       I915_WRITE(RAWCLK_FREQ_VLV,
+                  DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
+}
+
+static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_encoder *encoder;
+       enum pipe pipe;
+
+       /*
+        * Enable the CRI clock source so we can get at the
+        * display and the reference clock for VGA
+        * hotplug / manual detection. Supposedly DSI also
+        * needs the ref clock up and running.
+        *
+        * CHV DPLL B/C have some issues if VGA mode is enabled.
+        */
+       for_each_pipe(dev_priv, pipe) {
+               u32 val = I915_READ(DPLL(pipe));
+
+               val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+               if (pipe != PIPE_A)
+                       val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+               I915_WRITE(DPLL(pipe), val);
+       }
+
+       vlv_init_display_clock_gating(dev_priv);
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       valleyview_enable_display_irqs(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       /*
+        * During driver initialization/resume we can avoid restoring the
+        * part of the HW/SW state that will be inited anyway explicitly.
+        */
+       if (dev_priv->power_domains.initializing)
+               return;
+
+       intel_hpd_init(dev_priv);
+
+       /* Re-enable the ADPA, if we have one */
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               if (encoder->type == INTEL_OUTPUT_ANALOG)
+                       intel_crt_reset(&encoder->base);
+       }
+
+       i915_redisable_vga_power_on(dev_priv);
+
+       intel_pps_unlock_regs_wa(dev_priv);
+}
+
+static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+{
+       spin_lock_irq(&dev_priv->irq_lock);
+       valleyview_disable_display_irqs(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       /* make sure we're done processing display irqs */
+       synchronize_irq(dev_priv->drm.irq);
+
+       intel_power_sequencer_reset(dev_priv);
+
+       /* Prevent us from re-enabling polling on accident in late suspend */
+       if (!dev_priv->drm.dev->power.is_suspended)
+               intel_hpd_poll_init(dev_priv);
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+                                         struct i915_power_well *power_well)
+{
+       vlv_set_power_well(dev_priv, power_well, true);
+
+       vlv_display_power_well_init(dev_priv);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       vlv_display_power_well_deinit(dev_priv);
+
+       vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       /* since ref/cri clock was enabled */
+       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+       vlv_set_power_well(dev_priv, power_well, true);
+
+       /*
+        * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+        *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
+        *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
+        *   b. The other bits such as sfr settings / modesel may all
+        *      be set to 0.
+        *
+        * This should only be done on init and resume from S3 with
+        * both PLLs disabled, or we risk losing DPIO and PLL
+        * synchronization.
+        */
+       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+{
+       enum pipe pipe;
+
+       for_each_pipe(dev_priv, pipe)
+               assert_pll_disabled(dev_priv, pipe);
+
+       /* Assert common reset */
+       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+       vlv_set_power_well(dev_priv, power_well, false);
+}
+
+#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
+
+#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
+
+static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_well *cmn_bc =
+               lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+       struct i915_power_well *cmn_d =
+               lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+       u32 phy_control = dev_priv->chv_phy_control;
+       u32 phy_status = 0;
+       u32 phy_status_mask = 0xffffffff;
+
+       /*
+        * The BIOS can leave the PHY is some weird state
+        * where it doesn't fully power down some parts.
+        * Disable the asserts until the PHY has been fully
+        * reset (ie. the power well has been disabled at
+        * least once).
+        */
+       if (!dev_priv->chv_phy_assert[DPIO_PHY0])
+               phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
+                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
+                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
+                                    PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
+                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
+                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
+
+       if (!dev_priv->chv_phy_assert[DPIO_PHY1])
+               phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
+                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
+                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
+
+       if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
+               phy_status |= PHY_POWERGOOD(DPIO_PHY0);
+
+               /* this assumes override is only used to enable lanes */
+               if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
+                       phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
+
+               if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
+                       phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
+
+               /* CL1 is on whenever anything is on in either channel */
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
+                            PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
+                       phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
+
+               /*
+                * The DPLLB check accounts for the pipe B + port A usage
+                * with CL2 powered up but all the lanes in the second channel
+                * powered down.
+                */
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
+                   (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+                       phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
+
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
+                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
+                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
+
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
+                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
+                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
+       }
+
+       if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
+               phy_status |= PHY_POWERGOOD(DPIO_PHY1);
+
+               /* this assumes override is only used to enable lanes */
+               if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
+                       phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
+
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
+                       phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
+
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
+                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
+                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
+       }
+
+       phy_status &= phy_status_mask;
+
+       /*
+        * The PHY may be busy with some initial calibration and whatnot,
+        * so the power state can take a while to actually change.
+        */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   DISPLAY_PHY_STATUS,
+                                   phy_status_mask,
+                                   phy_status,
+                                   10))
+               DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+                         I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
+                          phy_status, dev_priv->chv_phy_control);
+}
+
+#undef BITS_SET
+
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       enum dpio_phy phy;
+       enum pipe pipe;
+       u32 tmp;
+
+       WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+                    power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+
+       if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
+               pipe = PIPE_A;
+               phy = DPIO_PHY0;
+       } else {
+               pipe = PIPE_C;
+               phy = DPIO_PHY1;
+       }
+
+       /* since ref/cri clock was enabled */
+       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+       vlv_set_power_well(dev_priv, power_well, true);
+
+       /* Poll for phypwrgood signal */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   DISPLAY_PHY_STATUS,
+                                   PHY_POWERGOOD(phy),
+                                   PHY_POWERGOOD(phy),
+                                   1))
+               DRM_ERROR("Display PHY %d is not power up\n", phy);
+
+       vlv_dpio_get(dev_priv);
+
+       /* Enable dynamic power down */
+       tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
+       tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
+               DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
+       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
+
+       if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
+               tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
+               tmp |= DPIO_DYNPWRDOWNEN_CH1;
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
+       } else {
+               /*
+                * Force the non-existing CL2 off. BXT does this
+                * too, so maybe it saves some power even though
+                * CL2 doesn't exist?
+                */
+               tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+               tmp |= DPIO_CL2_LDOFUSE_PWRENB;
+               vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
+       }
+
+       vlv_dpio_put(dev_priv);
+
+       dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+       I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+       DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+                     phy, dev_priv->chv_phy_control);
+
+       assert_chv_phy_status(dev_priv);
+}
+
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+{
+       enum dpio_phy phy;
+
+       WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+                    power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+
+       if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
+               phy = DPIO_PHY0;
+               assert_pll_disabled(dev_priv, PIPE_A);
+               assert_pll_disabled(dev_priv, PIPE_B);
+       } else {
+               phy = DPIO_PHY1;
+               assert_pll_disabled(dev_priv, PIPE_C);
+       }
+
+       dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+       I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+       vlv_set_power_well(dev_priv, power_well, false);
+
+       DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+                     phy, dev_priv->chv_phy_control);
+
+       /* PHY is fully reset now, so we can enable the PHY state asserts */
+       dev_priv->chv_phy_assert[phy] = true;
+
+       assert_chv_phy_status(dev_priv);
+}
+
+static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+                                    enum dpio_channel ch, bool override, unsigned int mask)
+{
+       enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
+       u32 reg, val, expected, actual;
+
+       /*
+        * The BIOS can leave the PHY is some weird state
+        * where it doesn't fully power down some parts.
+        * Disable the asserts until the PHY has been fully
+        * reset (ie. the power well has been disabled at
+        * least once).
+        */
+       if (!dev_priv->chv_phy_assert[phy])
+               return;
+
+       if (ch == DPIO_CH0)
+               reg = _CHV_CMN_DW0_CH0;
+       else
+               reg = _CHV_CMN_DW6_CH1;
+
+       vlv_dpio_get(dev_priv);
+       val = vlv_dpio_read(dev_priv, pipe, reg);
+       vlv_dpio_put(dev_priv);
+
+       /*
+        * This assumes !override is only used when the port is disabled.
+        * All lanes should power down even without the override when
+        * the port is disabled.
+        */
+       if (!override || mask == 0xf) {
+               expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+               /*
+                * If CH1 common lane is not active anymore
+                * (eg. for pipe B DPLL) the entire channel will
+                * shut down, which causes the common lane registers
+                * to read as 0. That means we can't actually check
+                * the lane power down status bits, but as the entire
+                * register reads as 0 it's a good indication that the
+                * channel is indeed entirely powered down.
+                */
+               if (ch == DPIO_CH1 && val == 0)
+                       expected = 0;
+       } else if (mask != 0x0) {
+               expected = DPIO_ANYDL_POWERDOWN;
+       } else {
+               expected = 0;
+       }
+
+       if (ch == DPIO_CH0)
+               actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
+       else
+               actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
+       actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+
+       WARN(actual != expected,
+            "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+            !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
+            !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
+            reg, val);
+}
+
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+                         enum dpio_channel ch, bool override)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       bool was_override;
+
+       mutex_lock(&power_domains->lock);
+
+       was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+       if (override == was_override)
+               goto out;
+
+       if (override)
+               dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+       else
+               dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+       I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+       DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
+                     phy, ch, dev_priv->chv_phy_control);
+
+       assert_chv_phy_status(dev_priv);
+
+out:
+       mutex_unlock(&power_domains->lock);
+
+       return was_override;
+}
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+                            bool override, unsigned int mask)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
+       enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+
+       mutex_lock(&power_domains->lock);
+
+       dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+       dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+
+       if (override)
+               dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+       else
+               dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+       I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+       DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
+                     phy, ch, mask, dev_priv->chv_phy_control);
+
+       assert_chv_phy_status(dev_priv);
+
+       assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
+
+       mutex_unlock(&power_domains->lock);
+}
+
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+                                       struct i915_power_well *power_well)
+{
+       enum pipe pipe = PIPE_A;
+       bool enabled;
+       u32 state, ctrl;
+
+       vlv_punit_get(dev_priv);
+
+       state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
+       /*
+        * We only ever set the power-on and power-gate states, anything
+        * else is unexpected.
+        */
+       WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+       enabled = state == DP_SSS_PWR_ON(pipe);
+
+       /*
+        * A transient state at this point would mean some unexpected party
+        * is poking at the power controls too.
+        */
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
+       WARN_ON(ctrl << 16 != state);
+
+       vlv_punit_put(dev_priv);
+
+       return enabled;
+}
+
+static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+                                   struct i915_power_well *power_well,
+                                   bool enable)
+{
+       enum pipe pipe = PIPE_A;
+       u32 state;
+       u32 ctrl;
+
+       state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+
+       vlv_punit_get(dev_priv);
+
+#define COND \
+       ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
+
+       if (COND)
+               goto out;
+
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+       ctrl &= ~DP_SSC_MASK(pipe);
+       ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
+       vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
+
+       if (wait_for(COND, 100))
+               DRM_ERROR("timeout setting power well state %08x (%08x)\n",
+                         state,
+                         vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+
+#undef COND
+
+out:
+       vlv_punit_put(dev_priv);
+}
+
+static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+                                      struct i915_power_well *power_well)
+{
+       chv_set_pipe_power_well(dev_priv, power_well, true);
+
+       vlv_display_power_well_init(dev_priv);
+}
+
+static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+                                       struct i915_power_well *power_well)
+{
+       vlv_display_power_well_deinit(dev_priv);
+
+       chv_set_pipe_power_well(dev_priv, power_well, false);
+}
+
+static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
+{
+       return power_domains->async_put_domains[0] |
+              power_domains->async_put_domains[1];
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static bool
+assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
+{
+       return !WARN_ON(power_domains->async_put_domains[0] &
+                       power_domains->async_put_domains[1]);
+}
+
+static bool
+__async_put_domains_state_ok(struct i915_power_domains *power_domains)
+{
+       enum intel_display_power_domain domain;
+       bool err = false;
+
+       err |= !assert_async_put_domain_masks_disjoint(power_domains);
+       err |= WARN_ON(!!power_domains->async_put_wakeref !=
+                      !!__async_put_domains_mask(power_domains));
+
+       for_each_power_domain(domain, __async_put_domains_mask(power_domains))
+               err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
+
+       return !err;
+}
+
+static void print_power_domains(struct i915_power_domains *power_domains,
+                               const char *prefix, u64 mask)
+{
+       enum intel_display_power_domain domain;
+
+       DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
+       for_each_power_domain(domain, mask)
+               DRM_DEBUG_DRIVER("%s use_count %d\n",
+                                intel_display_power_domain_str(domain),
+                                power_domains->domain_use_count[domain]);
+}
+
+static void
+print_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+       DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
+                        power_domains->async_put_wakeref);
+
+       print_power_domains(power_domains, "async_put_domains[0]",
+                           power_domains->async_put_domains[0]);
+       print_power_domains(power_domains, "async_put_domains[1]",
+                           power_domains->async_put_domains[1]);
+}
+
+static void
+verify_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+       if (!__async_put_domains_state_ok(power_domains))
+               print_async_put_domains_state(power_domains);
+}
+
+#else
+
+static void
+assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
+{
+}
+
+static void
+verify_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+}
+
+#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
+
+static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
+{
+       assert_async_put_domain_masks_disjoint(power_domains);
+
+       return __async_put_domains_mask(power_domains);
+}
+
+static void
+async_put_domains_clear_domain(struct i915_power_domains *power_domains,
+                              enum intel_display_power_domain domain)
+{
+       assert_async_put_domain_masks_disjoint(power_domains);
+
+       power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
+       power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
+}
+
+static bool
+intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
+                                      enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       bool ret = false;
+
+       if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
+               goto out_verify;
+
+       async_put_domains_clear_domain(power_domains, domain);
+
+       ret = true;
+
+       if (async_put_domains_mask(power_domains))
+               goto out_verify;
+
+       cancel_delayed_work(&power_domains->async_put_work);
+       intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
+                                fetch_and_zero(&power_domains->async_put_wakeref));
+out_verify:
+       verify_async_put_domains_state(power_domains);
+
+       return ret;
+}
+
+static void
+__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
+                                enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
+
+       if (intel_display_power_grab_async_put_ref(dev_priv, domain))
+               return;
+
+       for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
+               intel_power_well_get(dev_priv, power_well);
+
+       power_domains->domain_use_count[domain]++;
+}
+
+/**
+ * intel_display_power_get - grab a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+                                       enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+       mutex_lock(&power_domains->lock);
+       __intel_display_power_get_domain(dev_priv, domain);
+       mutex_unlock(&power_domains->lock);
+
+       return wakeref;
+}
+
+/**
+ * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+                                  enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       intel_wakeref_t wakeref;
+       bool is_enabled;
+
+       wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+       if (!wakeref)
+               return false;
+
+       mutex_lock(&power_domains->lock);
+
+       if (__intel_display_power_is_enabled(dev_priv, domain)) {
+               __intel_display_power_get_domain(dev_priv, domain);
+               is_enabled = true;
+       } else {
+               is_enabled = false;
+       }
+
+       mutex_unlock(&power_domains->lock);
+
+       if (!is_enabled) {
+               intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+               wakeref = 0;
+       }
+
+       return wakeref;
+}
+
+static void
+__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
+                                enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains;
+       struct i915_power_well *power_well;
+       const char *name = intel_display_power_domain_str(domain);
+
+       power_domains = &dev_priv->power_domains;
+
+       WARN(!power_domains->domain_use_count[domain],
+            "Use count on domain %s is already zero\n",
+            name);
+       WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
+            "Async disabling of domain %s is pending\n",
+            name);
+
+       power_domains->domain_use_count[domain]--;
+
+       for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
+               intel_power_well_put(dev_priv, power_well);
+}
+
+static void __intel_display_power_put(struct drm_i915_private *dev_priv,
+                                     enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+       mutex_lock(&power_domains->lock);
+       __intel_display_power_put_domain(dev_priv, domain);
+       mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_put_unchecked - release an unchecked power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ *
+ * This function exists only for historical reasons and should be avoided in
+ * new code, as the correctness of its use cannot be checked. Always use
+ * intel_display_power_put() instead.
+ */
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+                                      enum intel_display_power_domain domain)
+{
+       __intel_display_power_put(dev_priv, domain);
+       intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+}
+
+static void
+queue_async_put_domains_work(struct i915_power_domains *power_domains,
+                            intel_wakeref_t wakeref)
+{
+       WARN_ON(power_domains->async_put_wakeref);
+       power_domains->async_put_wakeref = wakeref;
+       WARN_ON(!queue_delayed_work(system_unbound_wq,
+                                   &power_domains->async_put_work,
+                                   msecs_to_jiffies(100)));
+}
+
+static void
+release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(power_domains, struct drm_i915_private,
+                            power_domains);
+       struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+       enum intel_display_power_domain domain;
+       intel_wakeref_t wakeref;
+
+       /*
+        * The caller must hold already raw wakeref, upgrade that to a proper
+        * wakeref to make the state checker happy about the HW access during
+        * power well disabling.
+        */
+       assert_rpm_raw_wakeref_held(rpm);
+       wakeref = intel_runtime_pm_get(rpm);
+
+       for_each_power_domain(domain, mask) {
+               /* Clear before put, so put's sanity check is happy. */
+               async_put_domains_clear_domain(power_domains, domain);
+               __intel_display_power_put_domain(dev_priv, domain);
+       }
+
+       intel_runtime_pm_put(rpm, wakeref);
+}
+
+static void
+intel_display_power_put_async_work(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, struct drm_i915_private,
+                            power_domains.async_put_work.work);
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+       intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
+       intel_wakeref_t old_work_wakeref = 0;
+
+       mutex_lock(&power_domains->lock);
+
+       /*
+        * Bail out if all the domain refs pending to be released were grabbed
+        * by subsequent gets or a flush_work.
+        */
+       old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
+       if (!old_work_wakeref)
+               goto out_verify;
+
+       release_async_put_domains(power_domains,
+                                 power_domains->async_put_domains[0]);
+
+       /* Requeue the work if more domains were async put meanwhile. */
+       if (power_domains->async_put_domains[1]) {
+               power_domains->async_put_domains[0] =
+                       fetch_and_zero(&power_domains->async_put_domains[1]);
+               queue_async_put_domains_work(power_domains,
+                                            fetch_and_zero(&new_work_wakeref));
+       }
+
+out_verify:
+       verify_async_put_domains_state(power_domains);
+
+       mutex_unlock(&power_domains->lock);
+
+       if (old_work_wakeref)
+               intel_runtime_pm_put_raw(rpm, old_work_wakeref);
+       if (new_work_wakeref)
+               intel_runtime_pm_put_raw(rpm, new_work_wakeref);
+}
+
+/**
+ * intel_display_power_put_async - release a power domain reference asynchronously
+ * @i915: i915 device instance
+ * @domain: power domain to reference
+ * @wakeref: wakeref acquired for the reference that is being released
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get*() and schedules a work to power down the
+ * corresponding hardware block if this is the last reference.
+ */
+void __intel_display_power_put_async(struct drm_i915_private *i915,
+                                    enum intel_display_power_domain domain,
+                                    intel_wakeref_t wakeref)
+{
+       struct i915_power_domains *power_domains = &i915->power_domains;
+       struct intel_runtime_pm *rpm = &i915->runtime_pm;
+       intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
+
+       mutex_lock(&power_domains->lock);
+
+       if (power_domains->domain_use_count[domain] > 1) {
+               __intel_display_power_put_domain(i915, domain);
+
+               goto out_verify;
+       }
+
+       WARN_ON(power_domains->domain_use_count[domain] != 1);
+
+       /* Let a pending work requeue itself or queue a new one. */
+       if (power_domains->async_put_wakeref) {
+               power_domains->async_put_domains[1] |= BIT_ULL(domain);
+       } else {
+               power_domains->async_put_domains[0] |= BIT_ULL(domain);
+               queue_async_put_domains_work(power_domains,
+                                            fetch_and_zero(&work_wakeref));
+       }
+
+out_verify:
+       verify_async_put_domains_state(power_domains);
+
+       mutex_unlock(&power_domains->lock);
+
+       if (work_wakeref)
+               intel_runtime_pm_put_raw(rpm, work_wakeref);
+
+       intel_runtime_pm_put(rpm, wakeref);
+}
+
+/**
+ * intel_display_power_flush_work - flushes the async display power disabling work
+ * @i915: i915 device instance
+ *
+ * Flushes any pending work that was scheduled by a preceding
+ * intel_display_power_put_async() call, completing the disabling of the
+ * corresponding power domains.
+ *
+ * Note that the work handler function may still be running after this
+ * function returns; to ensure that the work handler isn't running use
+ * intel_display_power_flush_work_sync() instead.
+ */
+void intel_display_power_flush_work(struct drm_i915_private *i915)
+{
+       struct i915_power_domains *power_domains = &i915->power_domains;
+       intel_wakeref_t work_wakeref;
+
+       mutex_lock(&power_domains->lock);
+
+       work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
+       if (!work_wakeref)
+               goto out_verify;
+
+       release_async_put_domains(power_domains,
+                                 async_put_domains_mask(power_domains));
+       cancel_delayed_work(&power_domains->async_put_work);
+
+out_verify:
+       verify_async_put_domains_state(power_domains);
+
+       mutex_unlock(&power_domains->lock);
+
+       if (work_wakeref)
+               intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
+}
+
+/**
+ * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
+ * @i915: i915 device instance
+ *
+ * Like intel_display_power_flush_work(), but also ensure that the work
+ * handler function is not running any more when this function returns.
+ */
+static void
+intel_display_power_flush_work_sync(struct drm_i915_private *i915)
+{
+       struct i915_power_domains *power_domains = &i915->power_domains;
+
+       intel_display_power_flush_work(i915);
+       cancel_delayed_work_sync(&power_domains->async_put_work);
+
+       verify_async_put_domains_state(power_domains);
+
+       WARN_ON(power_domains->async_put_wakeref);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ * @wakeref: wakeref acquired for the reference that is being released
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+                            enum intel_display_power_domain domain,
+                            intel_wakeref_t wakeref)
+{
+       __intel_display_power_put(dev_priv, domain);
+       intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+}
+#endif
+
+#define I830_PIPES_POWER_DOMAINS (             \
+       BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
+       BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
+       BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
+       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DISPLAY_POWER_DOMAINS (            \
+       BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |    \
+       BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
+       BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
+       BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
+       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
+       BIT_ULL(POWER_DOMAIN_VGA) |                     \
+       BIT_ULL(POWER_DOMAIN_AUDIO) |           \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
+       BIT_ULL(POWER_DOMAIN_GMBUS) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_CMN_BC_POWER_DOMAINS (                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define CHV_DISPLAY_POWER_DOMAINS (            \
+       BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |    \
+       BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
+       BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
+       BIT_ULL(POWER_DOMAIN_PIPE_C) |          \
+       BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
+       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
+       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |    \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
+       BIT_ULL(POWER_DOMAIN_VGA) |                     \
+       BIT_ULL(POWER_DOMAIN_AUDIO) |           \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
+       BIT_ULL(POWER_DOMAIN_AUX_D) |           \
+       BIT_ULL(POWER_DOMAIN_GMBUS) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_BC_POWER_DOMAINS (                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_D_POWER_DOMAINS (         \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_AUX_D) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define HSW_DISPLAY_POWER_DOMAINS (                    \
+       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
+       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
+       BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |             \
+       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
+       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
+       BIT_ULL(POWER_DOMAIN_VGA) |                             \
+       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define BDW_DISPLAY_POWER_DOMAINS (                    \
+       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
+       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
+       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
+       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
+       BIT_ULL(POWER_DOMAIN_VGA) |                             \
+       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (                \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
+       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_VGA) |                             \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (         \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (             \
+       SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
+       BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
+       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
+       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (                \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
+       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_VGA) |                             \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (             \
+       BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
+       BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
+       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
+       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
+       BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_A_POWER_DOMAINS (                 \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_BC_POWER_DOMAINS (                        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (                \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
+       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_VGA) |                             \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
+#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
+#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
+#define GLK_DPIO_CMN_A_POWER_DOMAINS (                 \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_B_POWER_DOMAINS (                 \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_C_POWER_DOMAINS (                 \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_A_POWER_DOMAINS (              \
+       BIT_ULL(POWER_DOMAIN_AUX_A) |           \
+       BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_B_POWER_DOMAINS (              \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_C_POWER_DOMAINS (              \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (             \
+       GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
+       BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
+       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
+       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
+       BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (                \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
+       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_VGA) |                             \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_A_POWER_DOMAINS (              \
+       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_B_POWER_DOMAINS (              \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_C_POWER_DOMAINS (              \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_D_POWER_DOMAINS (              \
+       BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_F_POWER_DOMAINS (              \
+       BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
+       BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (             \
+       CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
+       BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
+       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
+       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+/*
+ * ICL PW_0/PG_0 domains (HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - central power except FBC
+ * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
+ * ICL PW_1/PG_1 domains (HW/DMC control):
+ * - DBUF function
+ * - PIPE_A and its planes, except VGA
+ * - transcoder EDP + PSR
+ * - transcoder DSI
+ * - DDI_A
+ * - FBC
+ */
+#define ICL_PW_4_POWER_DOMAINS (                       \
+       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
+       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
+       BIT_ULL(POWER_DOMAIN_INIT))
+       /* VDSC/joining */
+#define ICL_PW_3_POWER_DOMAINS (                       \
+       ICL_PW_4_POWER_DOMAINS |                        \
+       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_E) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT1) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT2) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT3) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT4) |                \
+       BIT_ULL(POWER_DOMAIN_VGA) |                     \
+       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+       /*
+        * - transcoder WD
+        * - KVMR (HW control)
+        */
+#define ICL_PW_2_POWER_DOMAINS (                       \
+       ICL_PW_3_POWER_DOMAINS |                        \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) |             \
+       BIT_ULL(POWER_DOMAIN_INIT))
+       /*
+        * - KVMR (HW control)
+        */
+#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (             \
+       ICL_PW_2_POWER_DOMAINS |                        \
+       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
+       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define ICL_DDI_IO_A_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
+#define ICL_DDI_IO_B_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
+#define ICL_DDI_IO_C_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
+#define ICL_DDI_IO_D_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
+#define ICL_DDI_IO_E_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
+#define ICL_DDI_IO_F_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
+
+#define ICL_AUX_A_IO_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_A))
+#define ICL_AUX_B_IO_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_AUX_B))
+#define ICL_AUX_C_IO_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_AUX_C))
+#define ICL_AUX_D_IO_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_AUX_D))
+#define ICL_AUX_E_IO_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_AUX_E))
+#define ICL_AUX_F_IO_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_AUX_F))
+#define ICL_AUX_TBT1_IO_POWER_DOMAINS (                        \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT1))
+#define ICL_AUX_TBT2_IO_POWER_DOMAINS (                        \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT2))
+#define ICL_AUX_TBT3_IO_POWER_DOMAINS (                        \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT3))
+#define ICL_AUX_TBT4_IO_POWER_DOMAINS (                        \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT4))
+
+static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+       .sync_hw = i9xx_power_well_sync_hw_noop,
+       .enable = i9xx_always_on_power_well_noop,
+       .disable = i9xx_always_on_power_well_noop,
+       .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_pipe_power_well_ops = {
+       .sync_hw = i9xx_power_well_sync_hw_noop,
+       .enable = chv_pipe_power_well_enable,
+       .disable = chv_pipe_power_well_disable,
+       .is_enabled = chv_pipe_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
+       .sync_hw = i9xx_power_well_sync_hw_noop,
+       .enable = chv_dpio_cmn_power_well_enable,
+       .disable = chv_dpio_cmn_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+};
+
+static const struct i915_power_well_ops i830_pipes_power_well_ops = {
+       .sync_hw = i830_pipes_power_well_sync_hw,
+       .enable = i830_pipes_power_well_enable,
+       .disable = i830_pipes_power_well_disable,
+       .is_enabled = i830_pipes_power_well_enabled,
+};
+
+static const struct i915_power_well_desc i830_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "pipes",
+               .domains = I830_PIPES_POWER_DOMAINS,
+               .ops = &i830_pipes_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+};
+
+static const struct i915_power_well_ops hsw_power_well_ops = {
+       .sync_hw = hsw_power_well_sync_hw,
+       .enable = hsw_power_well_enable,
+       .disable = hsw_power_well_disable,
+       .is_enabled = hsw_power_well_enabled,
+};
+
+static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
+       .sync_hw = i9xx_power_well_sync_hw_noop,
+       .enable = gen9_dc_off_power_well_enable,
+       .disable = gen9_dc_off_power_well_disable,
+       .is_enabled = gen9_dc_off_power_well_enabled,
+};
+
+static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
+       .sync_hw = i9xx_power_well_sync_hw_noop,
+       .enable = bxt_dpio_cmn_power_well_enable,
+       .disable = bxt_dpio_cmn_power_well_disable,
+       .is_enabled = bxt_dpio_cmn_power_well_enabled,
+};
+
+static const struct i915_power_well_regs hsw_power_well_regs = {
+       .bios   = HSW_PWR_WELL_CTL1,
+       .driver = HSW_PWR_WELL_CTL2,
+       .kvmr   = HSW_PWR_WELL_CTL3,
+       .debug  = HSW_PWR_WELL_CTL4,
+};
+
+static const struct i915_power_well_desc hsw_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "display",
+               .domains = HSW_DISPLAY_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = HSW_DISP_PW_GLOBAL,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
+                       .hsw.has_vga = true,
+               },
+       },
+};
+
+static const struct i915_power_well_desc bdw_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "display",
+               .domains = BDW_DISPLAY_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = HSW_DISP_PW_GLOBAL,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
+                       .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+                       .hsw.has_vga = true,
+               },
+       },
+};
+
+static const struct i915_power_well_ops vlv_display_power_well_ops = {
+       .sync_hw = i9xx_power_well_sync_hw_noop,
+       .enable = vlv_display_power_well_enable,
+       .disable = vlv_display_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+       .sync_hw = i9xx_power_well_sync_hw_noop,
+       .enable = vlv_dpio_cmn_power_well_enable,
+       .disable = vlv_dpio_cmn_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+       .sync_hw = i9xx_power_well_sync_hw_noop,
+       .enable = vlv_power_well_enable,
+       .disable = vlv_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_desc vlv_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "display",
+               .domains = VLV_DISPLAY_POWER_DOMAINS,
+               .ops = &vlv_display_power_well_ops,
+               .id = VLV_DISP_PW_DISP2D,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
+               },
+       },
+       {
+               .name = "dpio-tx-b-01",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
+               },
+       },
+       {
+               .name = "dpio-tx-b-23",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
+               },
+       },
+       {
+               .name = "dpio-tx-c-01",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
+               },
+       },
+       {
+               .name = "dpio-tx-c-23",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
+               },
+       },
+       {
+               .name = "dpio-common",
+               .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+               .ops = &vlv_dpio_cmn_power_well_ops,
+               .id = VLV_DISP_PW_DPIO_CMN_BC,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+               },
+       },
+};
+
+static const struct i915_power_well_desc chv_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "display",
+               /*
+                * Pipe A power well is the new disp2d well. Pipe B and C
+                * power wells don't actually exist. Pipe A power well is
+                * required for any pipe to work.
+                */
+               .domains = CHV_DISPLAY_POWER_DOMAINS,
+               .ops = &chv_pipe_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "dpio-common-bc",
+               .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
+               .ops = &chv_dpio_cmn_power_well_ops,
+               .id = VLV_DISP_PW_DPIO_CMN_BC,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+               },
+       },
+       {
+               .name = "dpio-common-d",
+               .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
+               .ops = &chv_dpio_cmn_power_well_ops,
+               .id = CHV_DISP_PW_DPIO_CMN_D,
+               {
+                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
+               },
+       },
+};
+
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+                                        enum i915_power_well_id power_well_id)
+{
+       struct i915_power_well *power_well;
+       bool ret;
+
+       power_well = lookup_power_well(dev_priv, power_well_id);
+       ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
+
+       return ret;
+}
+
+static const struct i915_power_well_desc skl_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "power well 1",
+               /* Handled by the DMC firmware */
+               .always_on = true,
+               .domains = 0,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_1,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "MISC IO power well",
+               /* Handled by the DMC firmware */
+               .always_on = true,
+               .domains = 0,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_MISC_IO,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
+               },
+       },
+       {
+               .name = "DC off",
+               .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
+               .ops = &gen9_dc_off_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "power well 2",
+               .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_2,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+                       .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+                       .hsw.has_vga = true,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "DDI A/E IO power well",
+               .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
+               },
+       },
+       {
+               .name = "DDI B IO power well",
+               .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+               },
+       },
+       {
+               .name = "DDI C IO power well",
+               .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+               },
+       },
+       {
+               .name = "DDI D IO power well",
+               .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
+               },
+       },
+};
+
+static const struct i915_power_well_desc bxt_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "power well 1",
+               /* Handled by the DMC firmware */
+               .always_on = true,
+               .domains = 0,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_1,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "DC off",
+               .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
+               .ops = &gen9_dc_off_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "power well 2",
+               .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_2,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+                       .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+                       .hsw.has_vga = true,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "dpio-common-a",
+               .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
+               .ops = &bxt_dpio_cmn_power_well_ops,
+               .id = BXT_DISP_PW_DPIO_CMN_A,
+               {
+                       .bxt.phy = DPIO_PHY1,
+               },
+       },
+       {
+               .name = "dpio-common-bc",
+               .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
+               .ops = &bxt_dpio_cmn_power_well_ops,
+               .id = VLV_DISP_PW_DPIO_CMN_BC,
+               {
+                       .bxt.phy = DPIO_PHY0,
+               },
+       },
+};
+
+static const struct i915_power_well_desc glk_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "power well 1",
+               /* Handled by the DMC firmware */
+               .always_on = true,
+               .domains = 0,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_1,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "DC off",
+               .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
+               .ops = &gen9_dc_off_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "power well 2",
+               .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_2,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+                       .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+                       .hsw.has_vga = true,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "dpio-common-a",
+               .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
+               .ops = &bxt_dpio_cmn_power_well_ops,
+               .id = BXT_DISP_PW_DPIO_CMN_A,
+               {
+                       .bxt.phy = DPIO_PHY1,
+               },
+       },
+       {
+               .name = "dpio-common-b",
+               .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
+               .ops = &bxt_dpio_cmn_power_well_ops,
+               .id = VLV_DISP_PW_DPIO_CMN_BC,
+               {
+                       .bxt.phy = DPIO_PHY0,
+               },
+       },
+       {
+               .name = "dpio-common-c",
+               .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
+               .ops = &bxt_dpio_cmn_power_well_ops,
+               .id = GLK_DISP_PW_DPIO_CMN_C,
+               {
+                       .bxt.phy = DPIO_PHY2,
+               },
+       },
+       {
+               .name = "AUX A",
+               .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
+               },
+       },
+       {
+               .name = "AUX B",
+               .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
+               },
+       },
+       {
+               .name = "AUX C",
+               .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
+               },
+       },
+       {
+               .name = "DDI A IO power well",
+               .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
+               },
+       },
+       {
+               .name = "DDI B IO power well",
+               .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+               },
+       },
+       {
+               .name = "DDI C IO power well",
+               .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+               },
+       },
+};
+
+static const struct i915_power_well_desc cnl_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "power well 1",
+               /* Handled by the DMC firmware */
+               .always_on = true,
+               .domains = 0,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_1,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "AUX A",
+               .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
+               },
+       },
+       {
+               .name = "AUX B",
+               .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
+               },
+       },
+       {
+               .name = "AUX C",
+               .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
+               },
+       },
+       {
+               .name = "AUX D",
+               .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
+               },
+       },
+       {
+               .name = "DC off",
+               .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
+               .ops = &gen9_dc_off_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "power well 2",
+               .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_2,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+                       .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+                       .hsw.has_vga = true,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "DDI A IO power well",
+               .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
+               },
+       },
+       {
+               .name = "DDI B IO power well",
+               .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+               },
+       },
+       {
+               .name = "DDI C IO power well",
+               .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+               },
+       },
+       {
+               .name = "DDI D IO power well",
+               .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
+               },
+       },
+       {
+               .name = "DDI F IO power well",
+               .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
+               },
+       },
+       {
+               .name = "AUX F",
+               .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
+               },
+       },
+};
+
+static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
+       .sync_hw = hsw_power_well_sync_hw,
+       .enable = icl_combo_phy_aux_power_well_enable,
+       .disable = icl_combo_phy_aux_power_well_disable,
+       .is_enabled = hsw_power_well_enabled,
+};
+
+static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
+       .sync_hw = hsw_power_well_sync_hw,
+       .enable = icl_tc_phy_aux_power_well_enable,
+       .disable = hsw_power_well_disable,
+       .is_enabled = hsw_power_well_enabled,
+};
+
+static const struct i915_power_well_regs icl_aux_power_well_regs = {
+       .bios   = ICL_PWR_WELL_CTL_AUX1,
+       .driver = ICL_PWR_WELL_CTL_AUX2,
+       .debug  = ICL_PWR_WELL_CTL_AUX4,
+};
+
+static const struct i915_power_well_regs icl_ddi_power_well_regs = {
+       .bios   = ICL_PWR_WELL_CTL_DDI1,
+       .driver = ICL_PWR_WELL_CTL_DDI2,
+       .debug  = ICL_PWR_WELL_CTL_DDI4,
+};
+
+static const struct i915_power_well_desc icl_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "power well 1",
+               /* Handled by the DMC firmware */
+               .always_on = true,
+               .domains = 0,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_1,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "DC off",
+               .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+               .ops = &gen9_dc_off_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "power well 2",
+               .domains = ICL_PW_2_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_2,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "power well 3",
+               .domains = ICL_PW_3_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+                       .hsw.irq_pipe_mask = BIT(PIPE_B),
+                       .hsw.has_vga = true,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "DDI A IO",
+               .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+               },
+       },
+       {
+               .name = "DDI B IO",
+               .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+               },
+       },
+       {
+               .name = "DDI C IO",
+               .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
+               },
+       },
+       {
+               .name = "DDI D IO",
+               .domains = ICL_DDI_IO_D_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
+               },
+       },
+       {
+               .name = "DDI E IO",
+               .domains = ICL_DDI_IO_E_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
+               },
+       },
+       {
+               .name = "DDI F IO",
+               .domains = ICL_DDI_IO_F_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
+               },
+       },
+       {
+               .name = "AUX A",
+               .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+               .ops = &icl_combo_phy_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+               },
+       },
+       {
+               .name = "AUX B",
+               .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+               .ops = &icl_combo_phy_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+               },
+       },
+       {
+               .name = "AUX C",
+               .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+               .ops = &icl_tc_phy_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+                       .hsw.is_tc_tbt = false,
+               },
+       },
+       {
+               .name = "AUX D",
+               .domains = ICL_AUX_D_IO_POWER_DOMAINS,
+               .ops = &icl_tc_phy_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
+                       .hsw.is_tc_tbt = false,
+               },
+       },
+       {
+               .name = "AUX E",
+               .domains = ICL_AUX_E_IO_POWER_DOMAINS,
+               .ops = &icl_tc_phy_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
+                       .hsw.is_tc_tbt = false,
+               },
+       },
+       {
+               .name = "AUX F",
+               .domains = ICL_AUX_F_IO_POWER_DOMAINS,
+               .ops = &icl_tc_phy_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
+                       .hsw.is_tc_tbt = false,
+               },
+       },
+       {
+               .name = "AUX TBT1",
+               .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+               .ops = &icl_tc_phy_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
+                       .hsw.is_tc_tbt = true,
+               },
+       },
+       {
+               .name = "AUX TBT2",
+               .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+               .ops = &icl_tc_phy_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
+                       .hsw.is_tc_tbt = true,
+               },
+       },
+       {
+               .name = "AUX TBT3",
+               .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+               .ops = &icl_tc_phy_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
+                       .hsw.is_tc_tbt = true,
+               },
+       },
+       {
+               .name = "AUX TBT4",
+               .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+               .ops = &icl_tc_phy_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
+                       .hsw.is_tc_tbt = true,
+               },
+       },
+       {
+               .name = "power well 4",
+               .domains = ICL_PW_4_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+                       .hsw.has_fuses = true,
+                       .hsw.irq_pipe_mask = BIT(PIPE_C),
+               },
+       },
+};
+
+static int
+sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
+                                  int disable_power_well)
+{
+       if (disable_power_well >= 0)
+               return !!disable_power_well;
+
+       return 1;
+}
+
+static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
+                              int enable_dc)
+{
+       u32 mask;
+       int requested_dc;
+       int max_dc;
+
+       if (INTEL_GEN(dev_priv) >= 11) {
+               max_dc = 2;
+               /*
+                * DC9 has a separate HW flow from the rest of the DC states,
+                * not depending on the DMC firmware. It's needed by system
+                * suspend/resume, so allow it unconditionally.
+                */
+               mask = DC_STATE_EN_DC9;
+       } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
+               max_dc = 2;
+               mask = 0;
+       } else if (IS_GEN9_LP(dev_priv)) {
+               max_dc = 1;
+               mask = DC_STATE_EN_DC9;
+       } else {
+               max_dc = 0;
+               mask = 0;
+       }
+
+       if (!i915_modparams.disable_power_well)
+               max_dc = 0;
+
+       if (enable_dc >= 0 && enable_dc <= max_dc) {
+               requested_dc = enable_dc;
+       } else if (enable_dc == -1) {
+               requested_dc = max_dc;
+       } else if (enable_dc > max_dc && enable_dc <= 2) {
+               DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
+                             enable_dc, max_dc);
+               requested_dc = max_dc;
+       } else {
+               DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
+               requested_dc = max_dc;
+       }
+
+       if (requested_dc > 1)
+               mask |= DC_STATE_EN_UPTO_DC6;
+       if (requested_dc > 0)
+               mask |= DC_STATE_EN_UPTO_DC5;
+
+       DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
+
+       return mask;
+}
+
+static int
+__set_power_wells(struct i915_power_domains *power_domains,
+                 const struct i915_power_well_desc *power_well_descs,
+                 int power_well_count)
+{
+       u64 power_well_ids = 0;
+       int i;
+
+       power_domains->power_well_count = power_well_count;
+       power_domains->power_wells =
+                               kcalloc(power_well_count,
+                                       sizeof(*power_domains->power_wells),
+                                       GFP_KERNEL);
+       if (!power_domains->power_wells)
+               return -ENOMEM;
+
+       for (i = 0; i < power_well_count; i++) {
+               enum i915_power_well_id id = power_well_descs[i].id;
+
+               power_domains->power_wells[i].desc = &power_well_descs[i];
+
+               if (id == DISP_PW_ID_NONE)
+                       continue;
+
+               WARN_ON(id >= sizeof(power_well_ids) * 8);
+               WARN_ON(power_well_ids & BIT_ULL(id));
+               power_well_ids |= BIT_ULL(id);
+       }
+
+       return 0;
+}
+
+#define set_power_wells(power_domains, __power_well_descs) \
+       __set_power_wells(power_domains, __power_well_descs, \
+                         ARRAY_SIZE(__power_well_descs))
+
+/**
+ * intel_power_domains_init - initializes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Initializes the power domain structures for @dev_priv depending upon the
+ * supported platform.
+ */
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       int err;
+
+       i915_modparams.disable_power_well =
+               sanitize_disable_power_well_option(dev_priv,
+                                                  i915_modparams.disable_power_well);
+       dev_priv->csr.allowed_dc_mask =
+               get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
+
+       BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
+
+       mutex_init(&power_domains->lock);
+
+       INIT_DELAYED_WORK(&power_domains->async_put_work,
+                         intel_display_power_put_async_work);
+
+       /*
+        * The enabling order will be from lower to higher indexed wells,
+        * the disabling order is reversed.
+        */
+       if (IS_GEN(dev_priv, 11)) {
+               err = set_power_wells(power_domains, icl_power_wells);
+       } else if (IS_CANNONLAKE(dev_priv)) {
+               err = set_power_wells(power_domains, cnl_power_wells);
+
+               /*
+                * DDI and Aux IO are getting enabled for all ports
+                * regardless the presence or use. So, in order to avoid
+                * timeouts, lets remove them from the list
+                * for the SKUs without port F.
+                */
+               if (!IS_CNL_WITH_PORT_F(dev_priv))
+                       power_domains->power_well_count -= 2;
+       } else if (IS_GEMINILAKE(dev_priv)) {
+               err = set_power_wells(power_domains, glk_power_wells);
+       } else if (IS_BROXTON(dev_priv)) {
+               err = set_power_wells(power_domains, bxt_power_wells);
+       } else if (IS_GEN9_BC(dev_priv)) {
+               err = set_power_wells(power_domains, skl_power_wells);
+       } else if (IS_CHERRYVIEW(dev_priv)) {
+               err = set_power_wells(power_domains, chv_power_wells);
+       } else if (IS_BROADWELL(dev_priv)) {
+               err = set_power_wells(power_domains, bdw_power_wells);
+       } else if (IS_HASWELL(dev_priv)) {
+               err = set_power_wells(power_domains, hsw_power_wells);
+       } else if (IS_VALLEYVIEW(dev_priv)) {
+               err = set_power_wells(power_domains, vlv_power_wells);
+       } else if (IS_I830(dev_priv)) {
+               err = set_power_wells(power_domains, i830_power_wells);
+       } else {
+               err = set_power_wells(power_domains, i9xx_always_on_power_well);
+       }
+
+       return err;
+}
+
+/**
+ * intel_power_domains_cleanup - clean up power domains resources
+ * @dev_priv: i915 device instance
+ *
+ * Release any resources acquired by intel_power_domains_init()
+ */
+void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
+{
+       kfree(dev_priv->power_domains.power_wells);
+}
+
+static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
+
+       mutex_lock(&power_domains->lock);
+       for_each_power_well(dev_priv, power_well) {
+               power_well->desc->ops->sync_hw(dev_priv, power_well);
+               power_well->hw_enabled =
+                       power_well->desc->ops->is_enabled(dev_priv, power_well);
+       }
+       mutex_unlock(&power_domains->lock);
+}
+
+static inline
+bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
+                         i915_reg_t reg, bool enable)
+{
+       u32 val, status;
+
+       val = I915_READ(reg);
+       val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
+       I915_WRITE(reg, val);
+       POSTING_READ(reg);
+       udelay(10);
+
+       status = I915_READ(reg) & DBUF_POWER_STATE;
+       if ((enable && !status) || (!enable && status)) {
+               DRM_ERROR("DBus power %s timeout!\n",
+                         enable ? "enable" : "disable");
+               return false;
+       }
+       return true;
+}
+
+static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+       intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
+}
+
+static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
+{
+       intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
+}
+
+static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
+{
+       if (INTEL_GEN(dev_priv) < 11)
+               return 1;
+       return 2;
+}
+
+void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+                           u8 req_slices)
+{
+       const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+       bool ret;
+
+       if (req_slices > intel_dbuf_max_slices(dev_priv)) {
+               DRM_ERROR("Invalid number of dbuf slices requested\n");
+               return;
+       }
+
+       if (req_slices == hw_enabled_slices || req_slices == 0)
+               return;
+
+       if (req_slices > hw_enabled_slices)
+               ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
+       else
+               ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
+
+       if (ret)
+               dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
+}
+
+static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
+       I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
+       POSTING_READ(DBUF_CTL_S2);
+
+       udelay(10);
+
+       if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
+           !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
+               DRM_ERROR("DBuf power enable timeout\n");
+       else
+               /*
+                * FIXME: for now pretend that we only have 1 slice, see
+                * intel_enabled_dbuf_slices_num().
+                */
+               dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+}
+
+static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
+       I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
+       POSTING_READ(DBUF_CTL_S2);
+
+       udelay(10);
+
+       if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
+           (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
+               DRM_ERROR("DBuf power disable timeout!\n");
+       else
+               /*
+                * FIXME: for now pretend that the first slice is always
+                * enabled, see intel_enabled_dbuf_slices_num().
+                */
+               dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+}
+
+static void icl_mbus_init(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
+             MBUS_ABOX_BT_CREDIT_POOL2(16) |
+             MBUS_ABOX_B_CREDIT(1) |
+             MBUS_ABOX_BW_CREDIT(1);
+
+       I915_WRITE(MBUS_ABOX_CTL, val);
+}
+
+static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
+{
+       u32 val = I915_READ(LCPLL_CTL);
+
+       /*
+        * The LCPLL register should be turned on by the BIOS. For now
+        * let's just check its state and print errors in case
+        * something is wrong.  Don't even try to turn it on.
+        */
+
+       if (val & LCPLL_CD_SOURCE_FCLK)
+               DRM_ERROR("CDCLK source is not LCPLL\n");
+
+       if (val & LCPLL_PLL_DISABLE)
+               DRM_ERROR("LCPLL is disabled\n");
+
+       if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
+               DRM_ERROR("LCPLL not using non-SSC reference\n");
+}
+
+static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       struct intel_crtc *crtc;
+
+       for_each_intel_crtc(dev, crtc)
+               I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
+                               pipe_name(crtc->pipe));
+
+       I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
+                       "Display power well on\n");
+       I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
+                       "SPLL enabled\n");
+       I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
+                       "WRPLL1 enabled\n");
+       I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
+                       "WRPLL2 enabled\n");
+       I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
+                       "Panel power on\n");
+       I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+                       "CPU PWM1 enabled\n");
+       if (IS_HASWELL(dev_priv))
+               I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+                               "CPU PWM2 enabled\n");
+       I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+                       "PCH PWM1 enabled\n");
+       I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+                       "Utility pin enabled\n");
+       I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
+                       "PCH GTC enabled\n");
+
+       /*
+        * In theory we can still leave IRQs enabled, as long as only the HPD
+        * interrupts remain enabled. We used to check for that, but since it's
+        * gen-specific and since we only disable LCPLL after we fully disable
+        * the interrupts, the check below should be enough.
+        */
+       I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+}
+
+static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
+{
+       if (IS_HASWELL(dev_priv))
+               return I915_READ(D_COMP_HSW);
+       else
+               return I915_READ(D_COMP_BDW);
+}
+
+static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
+{
+       if (IS_HASWELL(dev_priv)) {
+               if (sandybridge_pcode_write(dev_priv,
+                                           GEN6_PCODE_WRITE_D_COMP, val))
+                       DRM_DEBUG_KMS("Failed to write to D_COMP\n");
+       } else {
+               I915_WRITE(D_COMP_BDW, val);
+               POSTING_READ(D_COMP_BDW);
+       }
+}
+
+/*
+ * This function implements pieces of two sequences from BSpec:
+ * - Sequence for display software to disable LCPLL
+ * - Sequence for display software to allow package C8+
+ * The steps implemented here are just the steps that actually touch the LCPLL
+ * register. Callers should take care of disabling all the display engine
+ * functions, doing the mode unset, fixing interrupts, etc.
+ */
+static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+                             bool switch_to_fclk, bool allow_power_down)
+{
+       u32 val;
+
+       assert_can_disable_lcpll(dev_priv);
+
+       val = I915_READ(LCPLL_CTL);
+
+       if (switch_to_fclk) {
+               val |= LCPLL_CD_SOURCE_FCLK;
+               I915_WRITE(LCPLL_CTL, val);
+
+               if (wait_for_us(I915_READ(LCPLL_CTL) &
+                               LCPLL_CD_SOURCE_FCLK_DONE, 1))
+                       DRM_ERROR("Switching to FCLK failed\n");
+
+               val = I915_READ(LCPLL_CTL);
+       }
+
+       val |= LCPLL_PLL_DISABLE;
+       I915_WRITE(LCPLL_CTL, val);
+       POSTING_READ(LCPLL_CTL);
+
+       if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
+                                   LCPLL_PLL_LOCK, 0, 1))
+               DRM_ERROR("LCPLL still locked\n");
+
+       val = hsw_read_dcomp(dev_priv);
+       val |= D_COMP_COMP_DISABLE;
+       hsw_write_dcomp(dev_priv, val);
+       ndelay(100);
+
+       if (wait_for((hsw_read_dcomp(dev_priv) &
+                     D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+               DRM_ERROR("D_COMP RCOMP still in progress\n");
+
+       if (allow_power_down) {
+               val = I915_READ(LCPLL_CTL);
+               val |= LCPLL_POWER_DOWN_ALLOW;
+               I915_WRITE(LCPLL_CTL, val);
+               POSTING_READ(LCPLL_CTL);
+       }
+}
+
+/*
+ * Fully restores LCPLL, disallowing power down and switching back to LCPLL
+ * source.
+ */
+static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       val = I915_READ(LCPLL_CTL);
+
+       if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
+                   LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
+               return;
+
+       /*
+        * Make sure we're not on PC8 state before disabling PC8, otherwise
+        * we'll hang the machine. To prevent PC8 state, just enable force_wake.
+        */
+       intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+
+       if (val & LCPLL_POWER_DOWN_ALLOW) {
+               val &= ~LCPLL_POWER_DOWN_ALLOW;
+               I915_WRITE(LCPLL_CTL, val);
+               POSTING_READ(LCPLL_CTL);
+       }
+
+       val = hsw_read_dcomp(dev_priv);
+       val |= D_COMP_COMP_FORCE;
+       val &= ~D_COMP_COMP_DISABLE;
+       hsw_write_dcomp(dev_priv, val);
+
+       val = I915_READ(LCPLL_CTL);
+       val &= ~LCPLL_PLL_DISABLE;
+       I915_WRITE(LCPLL_CTL, val);
+
+       if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
+                                   LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
+               DRM_ERROR("LCPLL not locked yet\n");
+
+       if (val & LCPLL_CD_SOURCE_FCLK) {
+               val = I915_READ(LCPLL_CTL);
+               val &= ~LCPLL_CD_SOURCE_FCLK;
+               I915_WRITE(LCPLL_CTL, val);
+
+               if (wait_for_us((I915_READ(LCPLL_CTL) &
+                                LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+                       DRM_ERROR("Switching back to LCPLL failed\n");
+       }
+
+       intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+
+       intel_update_cdclk(dev_priv);
+       intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+}
+
+/*
+ * Package states C8 and deeper are really deep PC states that can only be
+ * reached when all the devices on the system allow it, so even if the graphics
+ * device allows PC8+, it doesn't mean the system will actually get to these
+ * states. Our driver only allows PC8+ when going into runtime PM.
+ *
+ * The requirements for PC8+ are that all the outputs are disabled, the power
+ * well is disabled and most interrupts are disabled, and these are also
+ * requirements for runtime PM. When these conditions are met, we manually do
+ * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
+ * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
+ * hang the machine.
+ *
+ * When we really reach PC8 or deeper states (not just when we allow it) we lose
+ * the state of some registers, so when we come back from PC8+ we need to
+ * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
+ * need to take care of the registers kept by RC6. Notice that this happens even
+ * if we don't put the device in PCI D3 state (which is what currently happens
+ * because of the runtime PM support).
+ *
+ * For more, read "Display Sequences for Package C8" on the hardware
+ * documentation.
+ */
+void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       DRM_DEBUG_KMS("Enabling package C8+\n");
+
+       if (HAS_PCH_LPT_LP(dev_priv)) {
+               val = I915_READ(SOUTH_DSPCLK_GATE_D);
+               val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+               I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+       }
+
+       lpt_disable_clkout_dp(dev_priv);
+       hsw_disable_lcpll(dev_priv, true, true);
+}
+
+void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       DRM_DEBUG_KMS("Disabling package C8+\n");
+
+       hsw_restore_lcpll(dev_priv);
+       intel_init_pch_refclk(dev_priv);
+
+       if (HAS_PCH_LPT_LP(dev_priv)) {
+               val = I915_READ(SOUTH_DSPCLK_GATE_D);
+               val |= PCH_LP_PARTITION_LEVEL_DISABLE;
+               I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+       }
+}
+
+static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
+                                     bool enable)
+{
+       i915_reg_t reg;
+       u32 reset_bits, val;
+
+       if (IS_IVYBRIDGE(dev_priv)) {
+               reg = GEN7_MSG_CTL;
+               reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
+       } else {
+               reg = HSW_NDE_RSTWRN_OPT;
+               reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
+       }
+
+       val = I915_READ(reg);
+
+       if (enable)
+               val |= reset_bits;
+       else
+               val &= ~reset_bits;
+
+       I915_WRITE(reg, val);
+}
+
+static void skl_display_core_init(struct drm_i915_private *dev_priv,
+                                 bool resume)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
+
+       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       /* enable PCH reset handshake */
+       intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+       /* enable PG1 and Misc I/O */
+       mutex_lock(&power_domains->lock);
+
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+       intel_power_well_enable(dev_priv, well);
+
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
+       intel_power_well_enable(dev_priv, well);
+
+       mutex_unlock(&power_domains->lock);
+
+       intel_cdclk_init(dev_priv);
+
+       gen9_dbuf_enable(dev_priv);
+
+       if (resume && dev_priv->csr.dmc_payload)
+               intel_csr_load_program(dev_priv);
+}
+
+static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
+
+       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       gen9_dbuf_disable(dev_priv);
+
+       intel_cdclk_uninit(dev_priv);
+
+       /* The spec doesn't call for removing the reset handshake flag */
+       /* disable PG1 and Misc I/O */
+
+       mutex_lock(&power_domains->lock);
+
+       /*
+        * BSpec says to keep the MISC IO power well enabled here, only
+        * remove our request for power well 1.
+        * Note that even though the driver's request is removed power well 1
+        * may stay enabled after this due to DMC's own request on it.
+        */
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+       intel_power_well_disable(dev_priv, well);
+
+       mutex_unlock(&power_domains->lock);
+
+       usleep_range(10, 30);           /* 10 us delay per Bspec */
+}
+
+void bxt_display_core_init(struct drm_i915_private *dev_priv,
+                          bool resume)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
+
+       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       /*
+        * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
+        * or else the reset will hang because there is no PCH to respond.
+        * Move the handshake programming to initialization sequence.
+        * Previously was left up to BIOS.
+        */
+       intel_pch_reset_handshake(dev_priv, false);
+
+       /* Enable PG1 */
+       mutex_lock(&power_domains->lock);
+
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+       intel_power_well_enable(dev_priv, well);
+
+       mutex_unlock(&power_domains->lock);
+
+       intel_cdclk_init(dev_priv);
+
+       gen9_dbuf_enable(dev_priv);
+
+       if (resume && dev_priv->csr.dmc_payload)
+               intel_csr_load_program(dev_priv);
+}
+
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
+
+       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       gen9_dbuf_disable(dev_priv);
+
+       intel_cdclk_uninit(dev_priv);
+
+       /* The spec doesn't call for removing the reset handshake flag */
+
+       /*
+        * Disable PW1 (PG1).
+        * Note that even though the driver's request is removed power well 1
+        * may stay enabled after this due to DMC's own request on it.
+        */
+       mutex_lock(&power_domains->lock);
+
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+       intel_power_well_disable(dev_priv, well);
+
+       mutex_unlock(&power_domains->lock);
+
+       usleep_range(10, 30);           /* 10 us delay per Bspec */
+}
+
+static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
+
+       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       /* 1. Enable PCH Reset Handshake */
+       intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+       /* 2-3. */
+       intel_combo_phy_init(dev_priv);
+
+       /*
+        * 4. Enable Power Well 1 (PG1).
+        *    The AUX IO power wells will be enabled on demand.
+        */
+       mutex_lock(&power_domains->lock);
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+       intel_power_well_enable(dev_priv, well);
+       mutex_unlock(&power_domains->lock);
+
+       /* 5. Enable CD clock */
+       intel_cdclk_init(dev_priv);
+
+       /* 6. Enable DBUF */
+       gen9_dbuf_enable(dev_priv);
+
+       if (resume && dev_priv->csr.dmc_payload)
+               intel_csr_load_program(dev_priv);
+}
+
+static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
+
+       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       /* 1. Disable all display engine functions -> aready done */
+
+       /* 2. Disable DBUF */
+       gen9_dbuf_disable(dev_priv);
+
+       /* 3. Disable CD clock */
+       intel_cdclk_uninit(dev_priv);
+
+       /*
+        * 4. Disable Power Well 1 (PG1).
+        *    The AUX IO power wells are toggled on demand, so they are already
+        *    disabled at this point.
+        */
+       mutex_lock(&power_domains->lock);
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+       intel_power_well_disable(dev_priv, well);
+       mutex_unlock(&power_domains->lock);
+
+       usleep_range(10, 30);           /* 10 us delay per Bspec */
+
+       /* 5. */
+       intel_combo_phy_uninit(dev_priv);
+}
+
+void icl_display_core_init(struct drm_i915_private *dev_priv,
+                          bool resume)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
+
+       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       /* 1. Enable PCH reset handshake. */
+       intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+       /* 2. Initialize all combo phys */
+       intel_combo_phy_init(dev_priv);
+
+       /*
+        * 3. Enable Power Well 1 (PG1).
+        *    The AUX IO power wells will be enabled on demand.
+        */
+       mutex_lock(&power_domains->lock);
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+       intel_power_well_enable(dev_priv, well);
+       mutex_unlock(&power_domains->lock);
+
+       /* 4. Enable CDCLK. */
+       intel_cdclk_init(dev_priv);
+
+       /* 5. Enable DBUF. */
+       icl_dbuf_enable(dev_priv);
+
+       /* 6. Setup MBUS. */
+       icl_mbus_init(dev_priv);
+
+       if (resume && dev_priv->csr.dmc_payload)
+               intel_csr_load_program(dev_priv);
+}
+
+void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
+
+       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+       /* 1. Disable all display engine functions -> aready done */
+
+       /* 2. Disable DBUF */
+       icl_dbuf_disable(dev_priv);
+
+       /* 3. Disable CD clock */
+       intel_cdclk_uninit(dev_priv);
+
+       /*
+        * 4. Disable Power Well 1 (PG1).
+        *    The AUX IO power wells are toggled on demand, so they are already
+        *    disabled at this point.
+        */
+       mutex_lock(&power_domains->lock);
+       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+       intel_power_well_disable(dev_priv, well);
+       mutex_unlock(&power_domains->lock);
+
+       /* 5. */
+       intel_combo_phy_uninit(dev_priv);
+}
+
+static void chv_phy_control_init(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_well *cmn_bc =
+               lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+       struct i915_power_well *cmn_d =
+               lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+
+       /*
+        * DISPLAY_PHY_CONTROL can get corrupted if read. As a
+        * workaround never ever read DISPLAY_PHY_CONTROL, and
+        * instead maintain a shadow copy ourselves. Use the actual
+        * power well state and lane status to reconstruct the
+        * expected initial value.
+        */
+       dev_priv->chv_phy_control =
+               PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
+               PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
+               PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
+               PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
+               PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
+
+       /*
+        * If all lanes are disabled we leave the override disabled
+        * with all power down bits cleared to match the state we
+        * would use after disabling the port. Otherwise enable the
+        * override and set the lane powerdown bits accding to the
+        * current lane status.
+        */
+       if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
+               u32 status = I915_READ(DPLL(PIPE_A));
+               unsigned int mask;
+
+               mask = status & DPLL_PORTB_READY_MASK;
+               if (mask == 0xf)
+                       mask = 0x0;
+               else
+                       dev_priv->chv_phy_control |=
+                               PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
+
+               dev_priv->chv_phy_control |=
+                       PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
+
+               mask = (status & DPLL_PORTC_READY_MASK) >> 4;
+               if (mask == 0xf)
+                       mask = 0x0;
+               else
+                       dev_priv->chv_phy_control |=
+                               PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
+
+               dev_priv->chv_phy_control |=
+                       PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
+
+               dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+
+               dev_priv->chv_phy_assert[DPIO_PHY0] = false;
+       } else {
+               dev_priv->chv_phy_assert[DPIO_PHY0] = true;
+       }
+
+       if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
+               u32 status = I915_READ(DPIO_PHY_STATUS);
+               unsigned int mask;
+
+               mask = status & DPLL_PORTD_READY_MASK;
+
+               if (mask == 0xf)
+                       mask = 0x0;
+               else
+                       dev_priv->chv_phy_control |=
+                               PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
+
+               dev_priv->chv_phy_control |=
+                       PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
+
+               dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+
+               dev_priv->chv_phy_assert[DPIO_PHY1] = false;
+       } else {
+               dev_priv->chv_phy_assert[DPIO_PHY1] = true;
+       }
+
+       I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+       DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
+                     dev_priv->chv_phy_control);
+}
+
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_well *cmn =
+               lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+       struct i915_power_well *disp2d =
+               lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
+
+       /* If the display might be already active skip this */
+       if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
+           disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
+           I915_READ(DPIO_CTL) & DPIO_CMNRST)
+               return;
+
+       DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+       /* cmnlane needs DPLL registers */
+       disp2d->desc->ops->enable(dev_priv, disp2d);
+
+       /*
+        * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+        * Need to assert and de-assert PHY SB reset by gating the
+        * common lane power, then un-gating it.
+        * Simply ungating isn't enough to reset the PHY enough to get
+        * ports and lanes running.
+        */
+       cmn->desc->ops->disable(dev_priv, cmn);
+}
+
+static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
+{
+       bool ret;
+
+       vlv_punit_get(dev_priv);
+       ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+       vlv_punit_put(dev_priv);
+
+       return ret;
+}
+
+static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
+{
+       WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+            "VED not power gated\n");
+}
+
+static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
+{
+       static const struct pci_device_id isp_ids[] = {
+               {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
+               {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
+               {}
+       };
+
+       WARN(!pci_dev_present(isp_ids) &&
+            !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+            "ISP not power gated\n");
+}
+
+static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
+
+/**
+ * intel_power_domains_init_hw - initialize hardware power domain state
+ * @i915: i915 device instance
+ * @resume: Called from resume code paths or not
+ *
+ * This function initializes the hardware power domain state and enables all
+ * power wells belonging to the INIT power domain. Power wells in other
+ * domains (and not in the INIT domain) are referenced or disabled by
+ * intel_modeset_readout_hw_state(). After that the reference count of each
+ * power well must match its HW enabled state, see
+ * intel_power_domains_verify_state().
+ *
+ * It will return with power domains disabled (to be enabled later by
+ * intel_power_domains_enable()) and must be paired with
+ * intel_power_domains_fini_hw().
+ */
+void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
+{
+       struct i915_power_domains *power_domains = &i915->power_domains;
+
+       power_domains->initializing = true;
+
+       if (INTEL_GEN(i915) >= 11) {
+               icl_display_core_init(i915, resume);
+       } else if (IS_CANNONLAKE(i915)) {
+               cnl_display_core_init(i915, resume);
+       } else if (IS_GEN9_BC(i915)) {
+               skl_display_core_init(i915, resume);
+       } else if (IS_GEN9_LP(i915)) {
+               bxt_display_core_init(i915, resume);
+       } else if (IS_CHERRYVIEW(i915)) {
+               mutex_lock(&power_domains->lock);
+               chv_phy_control_init(i915);
+               mutex_unlock(&power_domains->lock);
+               assert_isp_power_gated(i915);
+       } else if (IS_VALLEYVIEW(i915)) {
+               mutex_lock(&power_domains->lock);
+               vlv_cmnlane_wa(i915);
+               mutex_unlock(&power_domains->lock);
+               assert_ved_power_gated(i915);
+               assert_isp_power_gated(i915);
+       } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
+               hsw_assert_cdclk(i915);
+               intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+       } else if (IS_IVYBRIDGE(i915)) {
+               intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+       }
+
+       /*
+        * Keep all power wells enabled for any dependent HW access during
+        * initialization and to make sure we keep BIOS enabled display HW
+        * resources powered until display HW readout is complete. We drop
+        * this reference in intel_power_domains_enable().
+        */
+       power_domains->wakeref =
+               intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+       /* Disable power support if the user asked so. */
+       if (!i915_modparams.disable_power_well)
+               intel_display_power_get(i915, POWER_DOMAIN_INIT);
+       intel_power_domains_sync_hw(i915);
+
+       power_domains->initializing = false;
+}
+
+/**
+ * intel_power_domains_fini_hw - deinitialize hw power domain state
+ * @i915: i915 device instance
+ *
+ * De-initializes the display power domain HW state. It also ensures that the
+ * device stays powered up so that the driver can be reloaded.
+ *
+ * It must be called with power domains already disabled (after a call to
+ * intel_power_domains_disable()) and must be paired with
+ * intel_power_domains_init_hw().
+ */
+void intel_power_domains_fini_hw(struct drm_i915_private *i915)
+{
+       intel_wakeref_t wakeref __maybe_unused =
+               fetch_and_zero(&i915->power_domains.wakeref);
+
+       /* Remove the refcount we took to keep power well support disabled. */
+       if (!i915_modparams.disable_power_well)
+               intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+
+       intel_display_power_flush_work_sync(i915);
+
+       intel_power_domains_verify_state(i915);
+
+       /* Keep the power well enabled, but cancel its rpm wakeref. */
+       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+/**
+ * intel_power_domains_enable - enable toggling of display power wells
+ * @i915: i915 device instance
+ *
+ * Enable the ondemand enabling/disabling of the display power wells. Note that
+ * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
+ * only at specific points of the display modeset sequence, thus they are not
+ * affected by the intel_power_domains_enable()/disable() calls. The purpose
+ * of these function is to keep the rest of power wells enabled until the end
+ * of display HW readout (which will acquire the power references reflecting
+ * the current HW state).
+ */
+void intel_power_domains_enable(struct drm_i915_private *i915)
+{
+       intel_wakeref_t wakeref __maybe_unused =
+               fetch_and_zero(&i915->power_domains.wakeref);
+
+       intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+       intel_power_domains_verify_state(i915);
+}
+
+/**
+ * intel_power_domains_disable - disable toggling of display power wells
+ * @i915: i915 device instance
+ *
+ * Disable the ondemand enabling/disabling of the display power wells. See
+ * intel_power_domains_enable() for which power wells this call controls.
+ */
+void intel_power_domains_disable(struct drm_i915_private *i915)
+{
+       struct i915_power_domains *power_domains = &i915->power_domains;
+
+       WARN_ON(power_domains->wakeref);
+       power_domains->wakeref =
+               intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+       intel_power_domains_verify_state(i915);
+}
+
+/**
+ * intel_power_domains_suspend - suspend power domain state
+ * @i915: i915 device instance
+ * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
+ *
+ * This function prepares the hardware power domain state before entering
+ * system suspend.
+ *
+ * It must be called with power domains already disabled (after a call to
+ * intel_power_domains_disable()) and paired with intel_power_domains_resume().
+ */
+void intel_power_domains_suspend(struct drm_i915_private *i915,
+                                enum i915_drm_suspend_mode suspend_mode)
+{
+       struct i915_power_domains *power_domains = &i915->power_domains;
+       intel_wakeref_t wakeref __maybe_unused =
+               fetch_and_zero(&power_domains->wakeref);
+
+       intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+
+       /*
+        * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
+        * support don't manually deinit the power domains. This also means the
+        * CSR/DMC firmware will stay active, it will power down any HW
+        * resources as required and also enable deeper system power states
+        * that would be blocked if the firmware was inactive.
+        */
+       if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
+           suspend_mode == I915_DRM_SUSPEND_IDLE &&
+           i915->csr.dmc_payload) {
+               intel_display_power_flush_work(i915);
+               intel_power_domains_verify_state(i915);
+               return;
+       }
+
+       /*
+        * Even if power well support was disabled we still want to disable
+        * power wells if power domains must be deinitialized for suspend.
+        */
+       if (!i915_modparams.disable_power_well)
+               intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+
+       intel_display_power_flush_work(i915);
+       intel_power_domains_verify_state(i915);
+
+       if (INTEL_GEN(i915) >= 11)
+               icl_display_core_uninit(i915);
+       else if (IS_CANNONLAKE(i915))
+               cnl_display_core_uninit(i915);
+       else if (IS_GEN9_BC(i915))
+               skl_display_core_uninit(i915);
+       else if (IS_GEN9_LP(i915))
+               bxt_display_core_uninit(i915);
+
+       power_domains->display_core_suspended = true;
+}
+
+/**
+ * intel_power_domains_resume - resume power domain state
+ * @i915: i915 device instance
+ *
+ * This function resume the hardware power domain state during system resume.
+ *
+ * It will return with power domain support disabled (to be enabled later by
+ * intel_power_domains_enable()) and must be paired with
+ * intel_power_domains_suspend().
+ */
+void intel_power_domains_resume(struct drm_i915_private *i915)
+{
+       struct i915_power_domains *power_domains = &i915->power_domains;
+
+       if (power_domains->display_core_suspended) {
+               intel_power_domains_init_hw(i915, true);
+               power_domains->display_core_suspended = false;
+       } else {
+               WARN_ON(power_domains->wakeref);
+               power_domains->wakeref =
+                       intel_display_power_get(i915, POWER_DOMAIN_INIT);
+       }
+
+       intel_power_domains_verify_state(i915);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static void intel_power_domains_dump_info(struct drm_i915_private *i915)
+{
+       struct i915_power_domains *power_domains = &i915->power_domains;
+       struct i915_power_well *power_well;
+
+       for_each_power_well(i915, power_well) {
+               enum intel_display_power_domain domain;
+
+               DRM_DEBUG_DRIVER("%-25s %d\n",
+                                power_well->desc->name, power_well->count);
+
+               for_each_power_domain(domain, power_well->desc->domains)
+                       DRM_DEBUG_DRIVER("  %-23s %d\n",
+                                        intel_display_power_domain_str(domain),
+                                        power_domains->domain_use_count[domain]);
+       }
+}
+
+/**
+ * intel_power_domains_verify_state - verify the HW/SW state for all power wells
+ * @i915: i915 device instance
+ *
+ * Verify if the reference count of each power well matches its HW enabled
+ * state and the total refcount of the domains it belongs to. This must be
+ * called after modeset HW state sanitization, which is responsible for
+ * acquiring reference counts for any power wells in use and disabling the
+ * ones left on by BIOS but not required by any active output.
+ */
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+{
+       struct i915_power_domains *power_domains = &i915->power_domains;
+       struct i915_power_well *power_well;
+       bool dump_domain_info;
+
+       mutex_lock(&power_domains->lock);
+
+       verify_async_put_domains_state(power_domains);
+
+       dump_domain_info = false;
+       for_each_power_well(i915, power_well) {
+               enum intel_display_power_domain domain;
+               int domains_count;
+               bool enabled;
+
+               enabled = power_well->desc->ops->is_enabled(i915, power_well);
+               if ((power_well->count || power_well->desc->always_on) !=
+                   enabled)
+                       DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
+                                 power_well->desc->name,
+                                 power_well->count, enabled);
+
+               domains_count = 0;
+               for_each_power_domain(domain, power_well->desc->domains)
+                       domains_count += power_domains->domain_use_count[domain];
+
+               if (power_well->count != domains_count) {
+                       DRM_ERROR("power well %s refcount/domain refcount mismatch "
+                                 "(refcount %d/domains refcount %d)\n",
+                                 power_well->desc->name, power_well->count,
+                                 domains_count);
+                       dump_domain_info = true;
+               }
+       }
+
+       if (dump_domain_info) {
+               static bool dumped;
+
+               if (!dumped) {
+                       intel_power_domains_dump_info(i915);
+                       dumped = true;
+               }
+       }
+
+       mutex_unlock(&power_domains->lock);
+}
+
+#else
+
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+{
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
new file mode 100644 (file)
index 0000000..ff57b0a
--- /dev/null
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_POWER_H__
+#define __INTEL_DISPLAY_POWER_H__
+
+#include "intel_display.h"
+#include "intel_runtime_pm.h"
+#include "i915_reg.h"
+
+struct drm_i915_private;
+struct intel_encoder;
+
+enum intel_display_power_domain {
+       POWER_DOMAIN_DISPLAY_CORE,
+       POWER_DOMAIN_PIPE_A,
+       POWER_DOMAIN_PIPE_B,
+       POWER_DOMAIN_PIPE_C,
+       POWER_DOMAIN_PIPE_A_PANEL_FITTER,
+       POWER_DOMAIN_PIPE_B_PANEL_FITTER,
+       POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+       POWER_DOMAIN_TRANSCODER_A,
+       POWER_DOMAIN_TRANSCODER_B,
+       POWER_DOMAIN_TRANSCODER_C,
+       POWER_DOMAIN_TRANSCODER_EDP,
+       POWER_DOMAIN_TRANSCODER_EDP_VDSC,
+       POWER_DOMAIN_TRANSCODER_DSI_A,
+       POWER_DOMAIN_TRANSCODER_DSI_C,
+       POWER_DOMAIN_PORT_DDI_A_LANES,
+       POWER_DOMAIN_PORT_DDI_B_LANES,
+       POWER_DOMAIN_PORT_DDI_C_LANES,
+       POWER_DOMAIN_PORT_DDI_D_LANES,
+       POWER_DOMAIN_PORT_DDI_E_LANES,
+       POWER_DOMAIN_PORT_DDI_F_LANES,
+       POWER_DOMAIN_PORT_DDI_A_IO,
+       POWER_DOMAIN_PORT_DDI_B_IO,
+       POWER_DOMAIN_PORT_DDI_C_IO,
+       POWER_DOMAIN_PORT_DDI_D_IO,
+       POWER_DOMAIN_PORT_DDI_E_IO,
+       POWER_DOMAIN_PORT_DDI_F_IO,
+       POWER_DOMAIN_PORT_DSI,
+       POWER_DOMAIN_PORT_CRT,
+       POWER_DOMAIN_PORT_OTHER,
+       POWER_DOMAIN_VGA,
+       POWER_DOMAIN_AUDIO,
+       POWER_DOMAIN_AUX_A,
+       POWER_DOMAIN_AUX_B,
+       POWER_DOMAIN_AUX_C,
+       POWER_DOMAIN_AUX_D,
+       POWER_DOMAIN_AUX_E,
+       POWER_DOMAIN_AUX_F,
+       POWER_DOMAIN_AUX_IO_A,
+       POWER_DOMAIN_AUX_TBT1,
+       POWER_DOMAIN_AUX_TBT2,
+       POWER_DOMAIN_AUX_TBT3,
+       POWER_DOMAIN_AUX_TBT4,
+       POWER_DOMAIN_GMBUS,
+       POWER_DOMAIN_MODESET,
+       POWER_DOMAIN_GT_IRQ,
+       POWER_DOMAIN_INIT,
+
+       POWER_DOMAIN_NUM,
+};
+
+#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
+               ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+       ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+        (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+struct i915_power_well;
+
+struct i915_power_well_ops {
+       /*
+        * Synchronize the well's hw state to match the current sw state, for
+        * example enable/disable it based on the current refcount. Called
+        * during driver init and resume time, possibly after first calling
+        * the enable/disable handlers.
+        */
+       void (*sync_hw)(struct drm_i915_private *dev_priv,
+                       struct i915_power_well *power_well);
+       /*
+        * Enable the well and resources that depend on it (for example
+        * interrupts located on the well). Called after the 0->1 refcount
+        * transition.
+        */
+       void (*enable)(struct drm_i915_private *dev_priv,
+                      struct i915_power_well *power_well);
+       /*
+        * Disable the well and resources that depend on it. Called after
+        * the 1->0 refcount transition.
+        */
+       void (*disable)(struct drm_i915_private *dev_priv,
+                       struct i915_power_well *power_well);
+       /* Returns the hw enabled state. */
+       bool (*is_enabled)(struct drm_i915_private *dev_priv,
+                          struct i915_power_well *power_well);
+};
+
+struct i915_power_well_regs {
+       i915_reg_t bios;
+       i915_reg_t driver;
+       i915_reg_t kvmr;
+       i915_reg_t debug;
+};
+
+/* Power well structure for haswell */
+struct i915_power_well_desc {
+       const char *name;
+       bool always_on;
+       u64 domains;
+       /* unique identifier for this power well */
+       enum i915_power_well_id id;
+       /*
+        * Arbitraty data associated with this power well. Platform and power
+        * well specific.
+        */
+       union {
+               struct {
+                       /*
+                        * request/status flag index in the PUNIT power well
+                        * control/status registers.
+                        */
+                       u8 idx;
+               } vlv;
+               struct {
+                       enum dpio_phy phy;
+               } bxt;
+               struct {
+                       const struct i915_power_well_regs *regs;
+                       /*
+                        * request/status flag index in the power well
+                        * constrol/status registers.
+                        */
+                       u8 idx;
+                       /* Mask of pipes whose IRQ logic is backed by the pw */
+                       u8 irq_pipe_mask;
+                       /* The pw is backing the VGA functionality */
+                       bool has_vga:1;
+                       bool has_fuses:1;
+                       /*
+                        * The pw is for an ICL+ TypeC PHY port in
+                        * Thunderbolt mode.
+                        */
+                       bool is_tc_tbt:1;
+               } hsw;
+       };
+       const struct i915_power_well_ops *ops;
+};
+
+struct i915_power_well {
+       const struct i915_power_well_desc *desc;
+       /* power well enable/disable usage count */
+       int count;
+       /* cached hw enabled state */
+       bool hw_enabled;
+};
+
+struct i915_power_domains {
+       /*
+        * Power wells needed for initialization at driver init and suspend
+        * time are on. They are kept on until after the first modeset.
+        */
+       bool initializing;
+       bool display_core_suspended;
+       int power_well_count;
+
+       intel_wakeref_t wakeref;
+
+       struct mutex lock;
+       int domain_use_count[POWER_DOMAIN_NUM];
+
+       struct delayed_work async_put_work;
+       intel_wakeref_t async_put_wakeref;
+       u64 async_put_domains[2];
+
+       struct i915_power_well *power_wells;
+};
+
+#define for_each_power_domain(domain, mask)                            \
+       for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
+               for_each_if(BIT_ULL(domain) & (mask))
+
+#define for_each_power_well(__dev_priv, __power_well)                          \
+       for ((__power_well) = (__dev_priv)->power_domains.power_wells;  \
+            (__power_well) - (__dev_priv)->power_domains.power_wells < \
+               (__dev_priv)->power_domains.power_well_count;           \
+            (__power_well)++)
+
+#define for_each_power_well_reverse(__dev_priv, __power_well)                  \
+       for ((__power_well) = (__dev_priv)->power_domains.power_wells +         \
+                             (__dev_priv)->power_domains.power_well_count - 1; \
+            (__power_well) - (__dev_priv)->power_domains.power_wells >= 0;     \
+            (__power_well)--)
+
+#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask)    \
+       for_each_power_well(__dev_priv, __power_well)                           \
+               for_each_if((__power_well)->desc->domains & (__domain_mask))
+
+#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
+       for_each_power_well_reverse(__dev_priv, __power_well)                   \
+               for_each_if((__power_well)->desc->domains & (__domain_mask))
+
+void skl_enable_dc6(struct drm_i915_private *dev_priv);
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
+void bxt_enable_dc9(struct drm_i915_private *dev_priv);
+void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+void gen9_enable_dc5(struct drm_i915_private *dev_priv);
+
+int intel_power_domains_init(struct drm_i915_private *dev_priv);
+void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
+void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
+void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void icl_display_core_uninit(struct drm_i915_private *dev_priv);
+void intel_power_domains_enable(struct drm_i915_private *dev_priv);
+void intel_power_domains_disable(struct drm_i915_private *dev_priv);
+void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+                                enum i915_drm_suspend_mode);
+void intel_power_domains_resume(struct drm_i915_private *dev_priv);
+void hsw_enable_pc8(struct drm_i915_private *dev_priv);
+void hsw_disable_pc8(struct drm_i915_private *dev_priv);
+void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
+
+const char *
+intel_display_power_domain_str(enum intel_display_power_domain domain);
+
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+                                   enum intel_display_power_domain domain);
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+                                     enum intel_display_power_domain domain);
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+                                       enum intel_display_power_domain domain);
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+                                  enum intel_display_power_domain domain);
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+                                      enum intel_display_power_domain domain);
+void __intel_display_power_put_async(struct drm_i915_private *i915,
+                                    enum intel_display_power_domain domain,
+                                    intel_wakeref_t wakeref);
+void intel_display_power_flush_work(struct drm_i915_private *i915);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+                            enum intel_display_power_domain domain,
+                            intel_wakeref_t wakeref);
+static inline void
+intel_display_power_put_async(struct drm_i915_private *i915,
+                             enum intel_display_power_domain domain,
+                             intel_wakeref_t wakeref)
+{
+       __intel_display_power_put_async(i915, domain, wakeref);
+}
+#else
+static inline void
+intel_display_power_put(struct drm_i915_private *i915,
+                       enum intel_display_power_domain domain,
+                       intel_wakeref_t wakeref)
+{
+       intel_display_power_put_unchecked(i915, domain);
+}
+
+static inline void
+intel_display_power_put_async(struct drm_i915_private *i915,
+                             enum intel_display_power_domain domain,
+                             intel_wakeref_t wakeref)
+{
+       __intel_display_power_put_async(i915, domain, -1);
+}
+#endif
+
+#define with_intel_display_power(i915, domain, wf) \
+       for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
+            intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+
+void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+                           u8 req_slices);
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+                            bool override, unsigned int mask);
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+                         enum dpio_channel ch, bool override);
+
+#endif /* __INTEL_DISPLAY_POWER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
new file mode 100644 (file)
index 0000000..7ccf7f3
--- /dev/null
@@ -0,0 +1,1088 @@
+/*
+ * Copyright © 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "display/intel_dp.h"
+
+#include "intel_dpio_phy.h"
+#include "intel_drv.h"
+#include "intel_sideband.h"
+
+/**
+ * DOC: DPIO
+ *
+ * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
+ * ports. DPIO is the name given to such a display PHY. These PHYs
+ * don't follow the standard programming model using direct MMIO
+ * registers, and instead their registers must be accessed trough IOSF
+ * sideband. VLV has one such PHY for driving ports B and C, and CHV
+ * adds another PHY for driving port D. Each PHY responds to specific
+ * IOSF-SB port.
+ *
+ * Each display PHY is made up of one or two channels. Each channel
+ * houses a common lane part which contains the PLL and other common
+ * logic. CH0 common lane also contains the IOSF-SB logic for the
+ * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
+ * must be running when any DPIO registers are accessed.
+ *
+ * In addition to having their own registers, the PHYs are also
+ * controlled through some dedicated signals from the display
+ * controller. These include PLL reference clock enable, PLL enable,
+ * and CRI clock selection, for example.
+ *
+ * Eeach channel also has two splines (also called data lanes), and
+ * each spline is made up of one Physical Access Coding Sub-Layer
+ * (PCS) block and two TX lanes. So each channel has two PCS blocks
+ * and four TX lanes. The TX lanes are used as DP lanes or TMDS
+ * data/clock pairs depending on the output type.
+ *
+ * Additionally the PHY also contains an AUX lane with AUX blocks
+ * for each channel. This is used for DP AUX communication, but
+ * this fact isn't really relevant for the driver since AUX is
+ * controlled from the display controller side. No DPIO registers
+ * need to be accessed during AUX communication,
+ *
+ * Generally on VLV/CHV the common lane corresponds to the pipe and
+ * the spline (PCS/TX) corresponds to the port.
+ *
+ * For dual channel PHY (VLV/CHV):
+ *
+ *  pipe A == CMN/PLL/REF CH0
+ *
+ *  pipe B == CMN/PLL/REF CH1
+ *
+ *  port B == PCS/TX CH0
+ *
+ *  port C == PCS/TX CH1
+ *
+ * This is especially important when we cross the streams
+ * ie. drive port B with pipe B, or port C with pipe A.
+ *
+ * For single channel PHY (CHV):
+ *
+ *  pipe C == CMN/PLL/REF CH0
+ *
+ *  port D == PCS/TX CH0
+ *
+ * On BXT the entire PHY channel corresponds to the port. That means
+ * the PLL is also now associated with the port rather than the pipe,
+ * and so the clock needs to be routed to the appropriate transcoder.
+ * Port A PLL is directly connected to transcoder EDP and port B/C
+ * PLLs can be routed to any transcoder A/B/C.
+ *
+ * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
+ * digital port D (CHV) or port A (BXT). ::
+ *
+ *
+ *     Dual channel PHY (VLV/CHV/BXT)
+ *     ---------------------------------
+ *     |      CH0      |      CH1      |
+ *     |  CMN/PLL/REF  |  CMN/PLL/REF  |
+ *     |---------------|---------------| Display PHY
+ *     | PCS01 | PCS23 | PCS01 | PCS23 |
+ *     |-------|-------|-------|-------|
+ *     |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
+ *     ---------------------------------
+ *     |     DDI0      |     DDI1      | DP/HDMI ports
+ *     ---------------------------------
+ *
+ *     Single channel PHY (CHV/BXT)
+ *     -----------------
+ *     |      CH0      |
+ *     |  CMN/PLL/REF  |
+ *     |---------------| Display PHY
+ *     | PCS01 | PCS23 |
+ *     |-------|-------|
+ *     |TX0|TX1|TX2|TX3|
+ *     -----------------
+ *     |     DDI2      | DP/HDMI port
+ *     -----------------
+ */
+
+/**
+ * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
+ */
+struct bxt_ddi_phy_info {
+       /**
+        * @dual_channel: true if this phy has a second channel.
+        */
+       bool dual_channel;
+
+       /**
+        * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
+        * Otherwise the GRC value will be copied from the phy indicated by
+        * this field.
+        */
+       enum dpio_phy rcomp_phy;
+
+       /**
+        * @reset_delay: delay in us to wait before setting the common reset
+        * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy.
+        */
+       int reset_delay;
+
+       /**
+        * @pwron_mask: Mask with the appropriate bit set that would cause the
+        * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON.
+        */
+       u32 pwron_mask;
+
+       /**
+        * @channel: struct containing per channel information.
+        */
+       struct {
+               /**
+                * @channel.port: which port maps to this channel.
+                */
+               enum port port;
+       } channel[2];
+};
+
+static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
+       [DPIO_PHY0] = {
+               .dual_channel = true,
+               .rcomp_phy = DPIO_PHY1,
+               .pwron_mask = BIT(0),
+
+               .channel = {
+                       [DPIO_CH0] = { .port = PORT_B },
+                       [DPIO_CH1] = { .port = PORT_C },
+               }
+       },
+       [DPIO_PHY1] = {
+               .dual_channel = false,
+               .rcomp_phy = -1,
+               .pwron_mask = BIT(1),
+
+               .channel = {
+                       [DPIO_CH0] = { .port = PORT_A },
+               }
+       },
+};
+
+static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
+       [DPIO_PHY0] = {
+               .dual_channel = false,
+               .rcomp_phy = DPIO_PHY1,
+               .pwron_mask = BIT(0),
+               .reset_delay = 20,
+
+               .channel = {
+                       [DPIO_CH0] = { .port = PORT_B },
+               }
+       },
+       [DPIO_PHY1] = {
+               .dual_channel = false,
+               .rcomp_phy = -1,
+               .pwron_mask = BIT(3),
+               .reset_delay = 20,
+
+               .channel = {
+                       [DPIO_CH0] = { .port = PORT_A },
+               }
+       },
+       [DPIO_PHY2] = {
+               .dual_channel = false,
+               .rcomp_phy = DPIO_PHY1,
+               .pwron_mask = BIT(1),
+               .reset_delay = 20,
+
+               .channel = {
+                       [DPIO_CH0] = { .port = PORT_C },
+               }
+       },
+};
+
+static const struct bxt_ddi_phy_info *
+bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
+{
+       if (IS_GEMINILAKE(dev_priv)) {
+               *count =  ARRAY_SIZE(glk_ddi_phy_info);
+               return glk_ddi_phy_info;
+       } else {
+               *count =  ARRAY_SIZE(bxt_ddi_phy_info);
+               return bxt_ddi_phy_info;
+       }
+}
+
+static const struct bxt_ddi_phy_info *
+bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+       int count;
+       const struct bxt_ddi_phy_info *phy_list =
+               bxt_get_phy_list(dev_priv, &count);
+
+       return &phy_list[phy];
+}
+
+void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
+                            enum dpio_phy *phy, enum dpio_channel *ch)
+{
+       const struct bxt_ddi_phy_info *phy_info, *phys;
+       int i, count;
+
+       phys = bxt_get_phy_list(dev_priv, &count);
+
+       for (i = 0; i < count; i++) {
+               phy_info = &phys[i];
+
+               if (port == phy_info->channel[DPIO_CH0].port) {
+                       *phy = i;
+                       *ch = DPIO_CH0;
+                       return;
+               }
+
+               if (phy_info->dual_channel &&
+                   port == phy_info->channel[DPIO_CH1].port) {
+                       *phy = i;
+                       *ch = DPIO_CH1;
+                       return;
+               }
+       }
+
+       WARN(1, "PHY not found for PORT %c", port_name(port));
+       *phy = DPIO_PHY0;
+       *ch = DPIO_CH0;
+}
+
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+                                 enum port port, u32 margin, u32 scale,
+                                 u32 enable, u32 deemphasis)
+{
+       u32 val;
+       enum dpio_phy phy;
+       enum dpio_channel ch;
+
+       bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+
+       /*
+        * While we write to the group register to program all lanes at once we
+        * can read only lane registers and we pick lanes 0/1 for that.
+        */
+       val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+       val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
+       I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+
+       val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
+       val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
+       val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
+       I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
+
+       val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
+       val &= ~SCALE_DCOMP_METHOD;
+       if (enable)
+               val |= SCALE_DCOMP_METHOD;
+
+       if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
+               DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
+
+       I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
+
+       val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
+       val &= ~DE_EMPHASIS;
+       val |= deemphasis << DEEMPH_SHIFT;
+       I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
+
+       val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+       val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
+       I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+}
+
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+                           enum dpio_phy phy)
+{
+       const struct bxt_ddi_phy_info *phy_info;
+
+       phy_info = bxt_get_phy_info(dev_priv, phy);
+
+       if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
+               return false;
+
+       if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+            (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
+               DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
+                                phy);
+
+               return false;
+       }
+
+       if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+               DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
+                                phy);
+
+               return false;
+       }
+
+       return true;
+}
+
+static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+       u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
+
+       return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+}
+
+static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+                                 enum dpio_phy phy)
+{
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   BXT_PORT_REF_DW3(phy),
+                                   GRC_DONE, GRC_DONE,
+                                   10))
+               DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
+}
+
+static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
+                             enum dpio_phy phy)
+{
+       const struct bxt_ddi_phy_info *phy_info;
+       u32 val;
+
+       phy_info = bxt_get_phy_info(dev_priv, phy);
+
+       if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
+               /* Still read out the GRC value for state verification */
+               if (phy_info->rcomp_phy != -1)
+                       dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
+
+               if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
+                       DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
+                                        "won't reprogram it\n", phy);
+                       return;
+               }
+
+               DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
+                                "force reprogramming it\n", phy);
+       }
+
+       val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+       val |= phy_info->pwron_mask;
+       I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+
+       /*
+        * The PHY registers start out inaccessible and respond to reads with
+        * all 1s.  Eventually they become accessible as they power up, then
+        * the reserved bit will give the default 0.  Poll on the reserved bit
+        * becoming 0 to find when the PHY is accessible.
+        * The flag should get set in 100us according to the HW team, but
+        * use 1ms due to occasional timeouts observed with that.
+        */
+       if (intel_wait_for_register_fw(&dev_priv->uncore,
+                                      BXT_PORT_CL1CM_DW0(phy),
+                                      PHY_RESERVED | PHY_POWER_GOOD,
+                                      PHY_POWER_GOOD,
+                                      1))
+               DRM_ERROR("timeout during PHY%d power on\n", phy);
+
+       /* Program PLL Rcomp code offset */
+       val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
+       val &= ~IREF0RC_OFFSET_MASK;
+       val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
+       I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
+
+       val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
+       val &= ~IREF1RC_OFFSET_MASK;
+       val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
+       I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
+
+       /* Program power gating */
+       val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
+       val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
+               SUS_CLK_CONFIG;
+       I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
+
+       if (phy_info->dual_channel) {
+               val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
+               val |= DW6_OLDO_DYN_PWR_DOWN_EN;
+               I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
+       }
+
+       if (phy_info->rcomp_phy != -1) {
+               u32 grc_code;
+
+               bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
+
+               /*
+                * PHY0 isn't connected to an RCOMP resistor so copy over
+                * the corresponding calibrated value from PHY1, and disable
+                * the automatic calibration on PHY0.
+                */
+               val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
+                                                         phy_info->rcomp_phy);
+               grc_code = val << GRC_CODE_FAST_SHIFT |
+                          val << GRC_CODE_SLOW_SHIFT |
+                          val;
+               I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
+
+               val = I915_READ(BXT_PORT_REF_DW8(phy));
+               val |= GRC_DIS | GRC_RDY_OVRD;
+               I915_WRITE(BXT_PORT_REF_DW8(phy), val);
+       }
+
+       if (phy_info->reset_delay)
+               udelay(phy_info->reset_delay);
+
+       val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+       val |= COMMON_RESET_DIS;
+       I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+}
+
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+       const struct bxt_ddi_phy_info *phy_info;
+       u32 val;
+
+       phy_info = bxt_get_phy_info(dev_priv, phy);
+
+       val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+       val &= ~COMMON_RESET_DIS;
+       I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+       val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+       val &= ~phy_info->pwron_mask;
+       I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+}
+
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+       const struct bxt_ddi_phy_info *phy_info =
+               bxt_get_phy_info(dev_priv, phy);
+       enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
+       bool was_enabled;
+
+       lockdep_assert_held(&dev_priv->power_domains.lock);
+
+       was_enabled = true;
+       if (rcomp_phy != -1)
+               was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
+
+       /*
+        * We need to copy the GRC calibration value from rcomp_phy,
+        * so make sure it's powered up.
+        */
+       if (!was_enabled)
+               _bxt_ddi_phy_init(dev_priv, rcomp_phy);
+
+       _bxt_ddi_phy_init(dev_priv, phy);
+
+       if (!was_enabled)
+               bxt_ddi_phy_uninit(dev_priv, rcomp_phy);
+}
+
+static bool __printf(6, 7)
+__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+                      i915_reg_t reg, u32 mask, u32 expected,
+                      const char *reg_fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+       u32 val;
+
+       val = I915_READ(reg);
+       if ((val & mask) == expected)
+               return true;
+
+       va_start(args, reg_fmt);
+       vaf.fmt = reg_fmt;
+       vaf.va = &args;
+
+       DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+                        "current %08x, expected %08x (mask %08x)\n",
+                        phy, &vaf, reg.reg, val, (val & ~mask) | expected,
+                        mask);
+
+       va_end(args);
+
+       return false;
+}
+
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+                             enum dpio_phy phy)
+{
+       const struct bxt_ddi_phy_info *phy_info;
+       u32 mask;
+       bool ok;
+
+       phy_info = bxt_get_phy_info(dev_priv, phy);
+
+#define _CHK(reg, mask, exp, fmt, ...)                                 \
+       __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt,      \
+                              ## __VA_ARGS__)
+
+       if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
+               return false;
+
+       ok = true;
+
+       /* PLL Rcomp code offset */
+       ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
+                   IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
+                   "BXT_PORT_CL1CM_DW9(%d)", phy);
+       ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
+                   IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
+                   "BXT_PORT_CL1CM_DW10(%d)", phy);
+
+       /* Power gating */
+       mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
+       ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
+                   "BXT_PORT_CL1CM_DW28(%d)", phy);
+
+       if (phy_info->dual_channel)
+               ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
+                          DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
+                          "BXT_PORT_CL2CM_DW6(%d)", phy);
+
+       if (phy_info->rcomp_phy != -1) {
+               u32 grc_code = dev_priv->bxt_phy_grc;
+
+               grc_code = grc_code << GRC_CODE_FAST_SHIFT |
+                          grc_code << GRC_CODE_SLOW_SHIFT |
+                          grc_code;
+               mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
+                      GRC_CODE_NOM_MASK;
+               ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
+                          "BXT_PORT_REF_DW6(%d)", phy);
+
+               mask = GRC_DIS | GRC_RDY_OVRD;
+               ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
+                           "BXT_PORT_REF_DW8(%d)", phy);
+       }
+
+       return ok;
+#undef _CHK
+}
+
+u8
+bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
+{
+       switch (lane_count) {
+       case 1:
+               return 0;
+       case 2:
+               return BIT(2) | BIT(0);
+       case 4:
+               return BIT(3) | BIT(2) | BIT(0);
+       default:
+               MISSING_CASE(lane_count);
+
+               return 0;
+       }
+}
+
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+                                    u8 lane_lat_optim_mask)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum port port = encoder->port;
+       enum dpio_phy phy;
+       enum dpio_channel ch;
+       int lane;
+
+       bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+
+       for (lane = 0; lane < 4; lane++) {
+               u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+               /*
+                * Note that on CHV this flag is called UPAR, but has
+                * the same function.
+                */
+               val &= ~LATENCY_OPTIM;
+               if (lane_lat_optim_mask & BIT(lane))
+                       val |= LATENCY_OPTIM;
+
+               I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
+       }
+}
+
+u8
+bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum port port = encoder->port;
+       enum dpio_phy phy;
+       enum dpio_channel ch;
+       int lane;
+       u8 mask;
+
+       bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+
+       mask = 0;
+       for (lane = 0; lane < 4; lane++) {
+               u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+               if (val & LATENCY_OPTIM)
+                       mask |= BIT(lane);
+       }
+
+       return mask;
+}
+
+
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+                             u32 deemph_reg_value, u32 margin_reg_value,
+                             bool uniq_trans_scale)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       enum dpio_channel ch = vlv_dport_to_channel(dport);
+       enum pipe pipe = intel_crtc->pipe;
+       u32 val;
+       int i;
+
+       vlv_dpio_get(dev_priv);
+
+       /* Clear calc init */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+       val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+               val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+               val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+               val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+       }
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+               val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+               val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+       }
+
+       /* Program swing deemph */
+       for (i = 0; i < intel_crtc->config->lane_count; i++) {
+               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+               val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+               val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
+               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+       }
+
+       /* Program swing margin */
+       for (i = 0; i < intel_crtc->config->lane_count; i++) {
+               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+
+               val &= ~DPIO_SWING_MARGIN000_MASK;
+               val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
+
+               /*
+                * Supposedly this value shouldn't matter when unique transition
+                * scale is disabled, but in fact it does matter. Let's just
+                * always program the same value and hope it's OK.
+                */
+               val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+               val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+
+               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+       }
+
+       /*
+        * The document said it needs to set bit 27 for ch0 and bit 26
+        * for ch1. Might be a typo in the doc.
+        * For now, for this unique transition scale selection, set bit
+        * 27 for ch0 and ch1.
+        */
+       for (i = 0; i < intel_crtc->config->lane_count; i++) {
+               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+               if (uniq_trans_scale)
+                       val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
+               else
+                       val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+       }
+
+       /* Start swing calculation */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+       val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+               val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+       }
+
+       vlv_dpio_put(dev_priv);
+}
+
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+                             const struct intel_crtc_state *crtc_state,
+                             bool reset)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       enum pipe pipe = crtc->pipe;
+       u32 val;
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+       if (reset)
+               val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+       else
+               val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+       if (crtc_state->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+               if (reset)
+                       val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+               else
+                       val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+       }
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+       val |= CHV_PCS_REQ_SOFTRESET_EN;
+       if (reset)
+               val &= ~DPIO_PCS_CLK_SOFT_RESET;
+       else
+               val |= DPIO_PCS_CLK_SOFT_RESET;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+       if (crtc_state->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+               val |= CHV_PCS_REQ_SOFTRESET_EN;
+               if (reset)
+                       val &= ~DPIO_PCS_CLK_SOFT_RESET;
+               else
+                       val |= DPIO_PCS_CLK_SOFT_RESET;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+       }
+}
+
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
+                           const struct intel_crtc_state *crtc_state)
+{
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       enum dpio_channel ch = vlv_dport_to_channel(dport);
+       enum pipe pipe = crtc->pipe;
+       unsigned int lane_mask =
+               intel_dp_unused_lane_mask(crtc_state->lane_count);
+       u32 val;
+
+       /*
+        * Must trick the second common lane into life.
+        * Otherwise we can't even access the PLL.
+        */
+       if (ch == DPIO_CH0 && pipe == PIPE_B)
+               dport->release_cl2_override =
+                       !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+
+       chv_phy_powergate_lanes(encoder, true, lane_mask);
+
+       vlv_dpio_get(dev_priv);
+
+       /* Assert data lane reset */
+       chv_data_lane_soft_reset(encoder, crtc_state, true);
+
+       /* program left/right clock distribution */
+       if (pipe != PIPE_B) {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+               if (ch == DPIO_CH0)
+                       val |= CHV_BUFLEFTENA1_FORCE;
+               if (ch == DPIO_CH1)
+                       val |= CHV_BUFRIGHTENA1_FORCE;
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+       } else {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+               if (ch == DPIO_CH0)
+                       val |= CHV_BUFLEFTENA2_FORCE;
+               if (ch == DPIO_CH1)
+                       val |= CHV_BUFRIGHTENA2_FORCE;
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+       }
+
+       /* program clock channel usage */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+       val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+       if (pipe != PIPE_B)
+               val &= ~CHV_PCS_USEDCLKCHANNEL;
+       else
+               val |= CHV_PCS_USEDCLKCHANNEL;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+       if (crtc_state->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+               val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+               if (pipe != PIPE_B)
+                       val &= ~CHV_PCS_USEDCLKCHANNEL;
+               else
+                       val |= CHV_PCS_USEDCLKCHANNEL;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+       }
+
+       /*
+        * This a a bit weird since generally CL
+        * matches the pipe, but here we need to
+        * pick the CL based on the port.
+        */
+       val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+       if (pipe != PIPE_B)
+               val &= ~CHV_CMN_USEDCLKCHANNEL;
+       else
+               val |= CHV_CMN_USEDCLKCHANNEL;
+       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+       vlv_dpio_put(dev_priv);
+}
+
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+                               const struct intel_crtc_state *crtc_state)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       enum dpio_channel ch = vlv_dport_to_channel(dport);
+       enum pipe pipe = crtc->pipe;
+       int data, i, stagger;
+       u32 val;
+
+       vlv_dpio_get(dev_priv);
+
+       /* allow hardware to manage TX FIFO reset source */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+       if (crtc_state->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+               val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+       }
+
+       /* Program Tx lane latency optimal setting*/
+       for (i = 0; i < crtc_state->lane_count; i++) {
+               /* Set the upar bit */
+               if (crtc_state->lane_count == 1)
+                       data = 0x0;
+               else
+                       data = (i == 1) ? 0x0 : 0x1;
+               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+                               data << DPIO_UPAR_SHIFT);
+       }
+
+       /* Data lane stagger programming */
+       if (crtc_state->port_clock > 270000)
+               stagger = 0x18;
+       else if (crtc_state->port_clock > 135000)
+               stagger = 0xd;
+       else if (crtc_state->port_clock > 67500)
+               stagger = 0x7;
+       else if (crtc_state->port_clock > 33750)
+               stagger = 0x4;
+       else
+               stagger = 0x2;
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+       val |= DPIO_TX2_STAGGER_MASK(0x1f);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+       if (crtc_state->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+               val |= DPIO_TX2_STAGGER_MASK(0x1f);
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+       }
+
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+                      DPIO_LANESTAGGER_STRAP(stagger) |
+                      DPIO_LANESTAGGER_STRAP_OVRD |
+                      DPIO_TX1_STAGGER_MASK(0x1f) |
+                      DPIO_TX1_STAGGER_MULT(6) |
+                      DPIO_TX2_STAGGER_MULT(0));
+
+       if (crtc_state->lane_count > 2) {
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+                              DPIO_LANESTAGGER_STRAP(stagger) |
+                              DPIO_LANESTAGGER_STRAP_OVRD |
+                              DPIO_TX1_STAGGER_MASK(0x1f) |
+                              DPIO_TX1_STAGGER_MULT(7) |
+                              DPIO_TX2_STAGGER_MULT(5));
+       }
+
+       /* Deassert data lane reset */
+       chv_data_lane_soft_reset(encoder, crtc_state, false);
+
+       vlv_dpio_put(dev_priv);
+}
+
+void chv_phy_release_cl2_override(struct intel_encoder *encoder)
+{
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+       if (dport->release_cl2_override) {
+               chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+               dport->release_cl2_override = false;
+       }
+}
+
+void chv_phy_post_pll_disable(struct intel_encoder *encoder,
+                             const struct intel_crtc_state *old_crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum pipe pipe = to_intel_crtc(old_crtc_state->base.crtc)->pipe;
+       u32 val;
+
+       vlv_dpio_get(dev_priv);
+
+       /* disable left/right clock distribution */
+       if (pipe != PIPE_B) {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+       } else {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+       }
+
+       vlv_dpio_put(dev_priv);
+
+       /*
+        * Leave the power down bit cleared for at least one
+        * lane so that chv_powergate_phy_ch() will power
+        * on something when the channel is otherwise unused.
+        * When the port is off and the override is removed
+        * the lanes power down anyway, so otherwise it doesn't
+        * really matter what the state of power down bits is
+        * after this.
+        */
+       chv_phy_powergate_lanes(encoder, false, 0x0);
+}
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+                             u32 demph_reg_value, u32 preemph_reg_value,
+                             u32 uniqtranscale_reg_value, u32 tx3_demph)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
+       enum pipe pipe = intel_crtc->pipe;
+
+       vlv_dpio_get(dev_priv);
+
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
+                        uniqtranscale_reg_value);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+
+       if (tx3_demph)
+               vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
+
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+
+       vlv_dpio_put(dev_priv);
+}
+
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
+                           const struct intel_crtc_state *crtc_state)
+{
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
+       enum pipe pipe = crtc->pipe;
+
+       /* Program Tx lane resets to default */
+       vlv_dpio_get(dev_priv);
+
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+                        DPIO_PCS_TX_LANE2_RESET |
+                        DPIO_PCS_TX_LANE1_RESET);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+                        DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+                        DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+                        (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+                                DPIO_PCS_CLK_SOFT_RESET);
+
+       /* Fix up inter-pair skew failure */
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+
+       vlv_dpio_put(dev_priv);
+}
+
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+                               const struct intel_crtc_state *crtc_state)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
+       enum pipe pipe = crtc->pipe;
+       u32 val;
+
+       vlv_dpio_get(dev_priv);
+
+       /* Enable clock channels for this port */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+       val = 0;
+       if (pipe)
+               val |= (1<<21);
+       else
+               val &= ~(1<<21);
+       val |= 0x001000c4;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+
+       /* Program lane clock */
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+
+       vlv_dpio_put(dev_priv);
+}
+
+void vlv_phy_reset_lanes(struct intel_encoder *encoder,
+                        const struct intel_crtc_state *old_crtc_state)
+{
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
+       enum pipe pipe = crtc->pipe;
+
+       vlv_dpio_get(dev_priv);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
+       vlv_dpio_put(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
new file mode 100644 (file)
index 0000000..f418aab
--- /dev/null
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DPIO_PHY_H__
+#define __INTEL_DPIO_PHY_H__
+
+#include <linux/types.h>
+
+enum dpio_channel;
+enum dpio_phy;
+enum port;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
+
+void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
+                            enum dpio_phy *phy, enum dpio_channel *ch);
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+                                 enum port port, u32 margin, u32 scale,
+                                 u32 enable, u32 deemphasis);
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+                           enum dpio_phy phy);
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+                             enum dpio_phy phy);
+u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+                                    u8 lane_lat_optim_mask);
+u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+                             u32 deemph_reg_value, u32 margin_reg_value,
+                             bool uniq_trans_scale);
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+                             const struct intel_crtc_state *crtc_state,
+                             bool reset);
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
+                           const struct intel_crtc_state *crtc_state);
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+                               const struct intel_crtc_state *crtc_state);
+void chv_phy_release_cl2_override(struct intel_encoder *encoder);
+void chv_phy_post_pll_disable(struct intel_encoder *encoder,
+                             const struct intel_crtc_state *old_crtc_state);
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+                             u32 demph_reg_value, u32 preemph_reg_value,
+                             u32 uniqtranscale_reg_value, u32 tx3_demph);
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
+                           const struct intel_crtc_state *crtc_state);
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+                               const struct intel_crtc_state *crtc_state);
+void vlv_phy_reset_lanes(struct intel_encoder *encoder,
+                        const struct intel_crtc_state *old_crtc_state);
+
+#endif /* __INTEL_DPIO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
new file mode 100644 (file)
index 0000000..2d4e7b9
--- /dev/null
@@ -0,0 +1,3359 @@
+/*
+ * Copyright © 2006-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "intel_dpio_phy.h"
+#include "intel_dpll_mgr.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: Display PLLs
+ *
+ * Display PLLs used for driving outputs vary by platform. While some have
+ * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
+ * from a pool. In the latter scenario, it is possible that multiple pipes
+ * share a PLL if their configurations match.
+ *
+ * This file provides an abstraction over display PLLs. The function
+ * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
+ * users of a PLL are tracked and that tracking is integrated with the atomic
+ * modest interface. During an atomic operation, a PLL can be requested for a
+ * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
+ * a previously used PLL can be released with intel_release_shared_dpll().
+ * Changes to the users are first staged in the atomic state, and then made
+ * effective by calling intel_shared_dpll_swap_state() during the atomic
+ * commit phase.
+ */
+
+static void
+intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
+                                 struct intel_shared_dpll_state *shared_dpll)
+{
+       enum intel_dpll_id i;
+
+       /* Copy shared dpll state */
+       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+               struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+               shared_dpll[i] = pll->state;
+       }
+}
+
+static struct intel_shared_dpll_state *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+{
+       struct intel_atomic_state *state = to_intel_atomic_state(s);
+
+       WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+
+       if (!state->dpll_set) {
+               state->dpll_set = true;
+
+               intel_atomic_duplicate_dpll_state(to_i915(s->dev),
+                                                 state->shared_dpll);
+       }
+
+       return state->shared_dpll;
+}
+
+/**
+ * intel_get_shared_dpll_by_id - get a DPLL given its id
+ * @dev_priv: i915 device instance
+ * @id: pll id
+ *
+ * Returns:
+ * A pointer to the DPLL with @id
+ */
+struct intel_shared_dpll *
+intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
+                           enum intel_dpll_id id)
+{
+       return &dev_priv->shared_dplls[id];
+}
+
+/**
+ * intel_get_shared_dpll_id - get the id of a DPLL
+ * @dev_priv: i915 device instance
+ * @pll: the DPLL
+ *
+ * Returns:
+ * The id of @pll
+ */
+enum intel_dpll_id
+intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
+                        struct intel_shared_dpll *pll)
+{
+       if (WARN_ON(pll < dev_priv->shared_dplls||
+                   pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
+               return -1;
+
+       return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
+}
+
+/* For ILK+ */
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+                       struct intel_shared_dpll *pll,
+                       bool state)
+{
+       bool cur_state;
+       struct intel_dpll_hw_state hw_state;
+
+       if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
+               return;
+
+       cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
+       I915_STATE_WARN(cur_state != state,
+            "%s assertion failure (expected %s, current %s)\n",
+                       pll->info->name, onoff(state), onoff(cur_state));
+}
+
+/**
+ * intel_prepare_shared_dpll - call a dpll's prepare hook
+ * @crtc_state: CRTC, and its state, which has a shared dpll
+ *
+ * This calls the PLL's prepare hook if it has one and if the PLL is not
+ * already enabled. The prepare hook is platform specific.
+ */
+void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+
+       if (WARN_ON(pll == NULL))
+               return;
+
+       mutex_lock(&dev_priv->dpll_lock);
+       WARN_ON(!pll->state.crtc_mask);
+       if (!pll->active_mask) {
+               DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
+               WARN_ON(pll->on);
+               assert_shared_dpll_disabled(dev_priv, pll);
+
+               pll->info->funcs->prepare(dev_priv, pll);
+       }
+       mutex_unlock(&dev_priv->dpll_lock);
+}
+
+/**
+ * intel_enable_shared_dpll - enable a CRTC's shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
+ *
+ * Enable the shared DPLL used by @crtc.
+ */
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+       unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+       unsigned int old_mask;
+
+       if (WARN_ON(pll == NULL))
+               return;
+
+       mutex_lock(&dev_priv->dpll_lock);
+       old_mask = pll->active_mask;
+
+       if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
+           WARN_ON(pll->active_mask & crtc_mask))
+               goto out;
+
+       pll->active_mask |= crtc_mask;
+
+       DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
+                     pll->info->name, pll->active_mask, pll->on,
+                     crtc->base.base.id);
+
+       if (old_mask) {
+               WARN_ON(!pll->on);
+               assert_shared_dpll_enabled(dev_priv, pll);
+               goto out;
+       }
+       WARN_ON(pll->on);
+
+       DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
+       pll->info->funcs->enable(dev_priv, pll);
+       pll->on = true;
+
+out:
+       mutex_unlock(&dev_priv->dpll_lock);
+}
+
+/**
+ * intel_disable_shared_dpll - disable a CRTC's shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
+ *
+ * Disable the shared DPLL used by @crtc.
+ */
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+       unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+
+       /* PCH only available on ILK+ */
+       if (INTEL_GEN(dev_priv) < 5)
+               return;
+
+       if (pll == NULL)
+               return;
+
+       mutex_lock(&dev_priv->dpll_lock);
+       if (WARN_ON(!(pll->active_mask & crtc_mask)))
+               goto out;
+
+       DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
+                     pll->info->name, pll->active_mask, pll->on,
+                     crtc->base.base.id);
+
+       assert_shared_dpll_enabled(dev_priv, pll);
+       WARN_ON(!pll->on);
+
+       pll->active_mask &= ~crtc_mask;
+       if (pll->active_mask)
+               goto out;
+
+       DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
+       pll->info->funcs->disable(dev_priv, pll);
+       pll->on = false;
+
+out:
+       mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static struct intel_shared_dpll *
+intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
+                      enum intel_dpll_id range_min,
+                      enum intel_dpll_id range_max)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_shared_dpll *pll, *unused_pll = NULL;
+       struct intel_shared_dpll_state *shared_dpll;
+       enum intel_dpll_id i;
+
+       shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
+       for (i = range_min; i <= range_max; i++) {
+               pll = &dev_priv->shared_dplls[i];
+
+               /* Only want to check enabled timings first */
+               if (shared_dpll[i].crtc_mask == 0) {
+                       if (!unused_pll)
+                               unused_pll = pll;
+                       continue;
+               }
+
+               if (memcmp(&crtc_state->dpll_hw_state,
+                          &shared_dpll[i].hw_state,
+                          sizeof(crtc_state->dpll_hw_state)) == 0) {
+                       DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
+                                     crtc->base.base.id, crtc->base.name,
+                                     pll->info->name,
+                                     shared_dpll[i].crtc_mask,
+                                     pll->active_mask);
+                       return pll;
+               }
+       }
+
+       /* Ok no matching timings, maybe there's a free one? */
+       if (unused_pll) {
+               DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
+                             crtc->base.base.id, crtc->base.name,
+                             unused_pll->info->name);
+               return unused_pll;
+       }
+
+       return NULL;
+}
+
+static void
+intel_reference_shared_dpll(struct intel_shared_dpll *pll,
+                           struct intel_crtc_state *crtc_state)
+{
+       struct intel_shared_dpll_state *shared_dpll;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const enum intel_dpll_id id = pll->info->id;
+
+       shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
+       if (shared_dpll[id].crtc_mask == 0)
+               shared_dpll[id].hw_state =
+                       crtc_state->dpll_hw_state;
+
+       crtc_state->shared_dpll = pll;
+       DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
+                        pipe_name(crtc->pipe));
+
+       shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
+}
+
+/**
+ * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
+ * @state: atomic state
+ *
+ * This is the dpll version of drm_atomic_helper_swap_state() since the
+ * helper does not handle driver-specific global state.
+ *
+ * For consistency with atomic helpers this function does a complete swap,
+ * i.e. it also puts the current state into @state, even though there is no
+ * need for that at this moment.
+ */
+void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->dev);
+       struct intel_shared_dpll_state *shared_dpll;
+       struct intel_shared_dpll *pll;
+       enum intel_dpll_id i;
+
+       if (!to_intel_atomic_state(state)->dpll_set)
+               return;
+
+       shared_dpll = to_intel_atomic_state(state)->shared_dpll;
+       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+               struct intel_shared_dpll_state tmp;
+
+               pll = &dev_priv->shared_dplls[i];
+
+               tmp = pll->state;
+               pll->state = shared_dpll[i];
+               shared_dpll[i] = tmp;
+       }
+}
+
+static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
+                                     struct intel_shared_dpll *pll,
+                                     struct intel_dpll_hw_state *hw_state)
+{
+       const enum intel_dpll_id id = pll->info->id;
+       intel_wakeref_t wakeref;
+       u32 val;
+
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_DISPLAY_CORE);
+       if (!wakeref)
+               return false;
+
+       val = I915_READ(PCH_DPLL(id));
+       hw_state->dpll = val;
+       hw_state->fp0 = I915_READ(PCH_FP0(id));
+       hw_state->fp1 = I915_READ(PCH_FP1(id));
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+       return val & DPLL_VCO_ENABLE;
+}
+
+static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
+                                struct intel_shared_dpll *pll)
+{
+       const enum intel_dpll_id id = pll->info->id;
+
+       I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
+       I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
+}
+
+static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+       bool enabled;
+
+       I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+
+       val = I915_READ(PCH_DREF_CONTROL);
+       enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+                           DREF_SUPERSPREAD_SOURCE_MASK));
+       I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
+                               struct intel_shared_dpll *pll)
+{
+       const enum intel_dpll_id id = pll->info->id;
+
+       /* PCH refclock must be enabled first */
+       ibx_assert_pch_refclk_enabled(dev_priv);
+
+       I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
+
+       /* Wait for the clocks to stabilize. */
+       POSTING_READ(PCH_DPLL(id));
+       udelay(150);
+
+       /* The pixel multiplier can only be updated once the
+        * DPLL is enabled and the clocks are stable.
+        *
+        * So write it again.
+        */
+       I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
+       POSTING_READ(PCH_DPLL(id));
+       udelay(200);
+}
+
+static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
+                                struct intel_shared_dpll *pll)
+{
+       const enum intel_dpll_id id = pll->info->id;
+
+       I915_WRITE(PCH_DPLL(id), 0);
+       POSTING_READ(PCH_DPLL(id));
+       udelay(200);
+}
+
+static struct intel_shared_dpll *
+ibx_get_dpll(struct intel_crtc_state *crtc_state,
+            struct intel_encoder *encoder)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_shared_dpll *pll;
+       enum intel_dpll_id i;
+
+       if (HAS_PCH_IBX(dev_priv)) {
+               /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+               i = (enum intel_dpll_id) crtc->pipe;
+               pll = &dev_priv->shared_dplls[i];
+
+               DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
+                             crtc->base.base.id, crtc->base.name,
+                             pll->info->name);
+       } else {
+               pll = intel_find_shared_dpll(crtc_state,
+                                            DPLL_ID_PCH_PLL_A,
+                                            DPLL_ID_PCH_PLL_B);
+       }
+
+       if (!pll)
+               return NULL;
+
+       /* reference the pll */
+       intel_reference_shared_dpll(pll, crtc_state);
+
+       return pll;
+}
+
+static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
+                             const struct intel_dpll_hw_state *hw_state)
+{
+       DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+                     "fp0: 0x%x, fp1: 0x%x\n",
+                     hw_state->dpll,
+                     hw_state->dpll_md,
+                     hw_state->fp0,
+                     hw_state->fp1);
+}
+
+static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
+       .prepare = ibx_pch_dpll_prepare,
+       .enable = ibx_pch_dpll_enable,
+       .disable = ibx_pch_dpll_disable,
+       .get_hw_state = ibx_pch_dpll_get_hw_state,
+};
+
+static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
+                              struct intel_shared_dpll *pll)
+{
+       const enum intel_dpll_id id = pll->info->id;
+
+       I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
+       POSTING_READ(WRPLL_CTL(id));
+       udelay(20);
+}
+
+static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
+                               struct intel_shared_dpll *pll)
+{
+       I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
+       POSTING_READ(SPLL_CTL);
+       udelay(20);
+}
+
+static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
+                                 struct intel_shared_dpll *pll)
+{
+       const enum intel_dpll_id id = pll->info->id;
+       u32 val;
+
+       val = I915_READ(WRPLL_CTL(id));
+       I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
+       POSTING_READ(WRPLL_CTL(id));
+}
+
+static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
+                                struct intel_shared_dpll *pll)
+{
+       u32 val;
+
+       val = I915_READ(SPLL_CTL);
+       I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+       POSTING_READ(SPLL_CTL);
+}
+
+static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
+                                      struct intel_shared_dpll *pll,
+                                      struct intel_dpll_hw_state *hw_state)
+{
+       const enum intel_dpll_id id = pll->info->id;
+       intel_wakeref_t wakeref;
+       u32 val;
+
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_DISPLAY_CORE);
+       if (!wakeref)
+               return false;
+
+       val = I915_READ(WRPLL_CTL(id));
+       hw_state->wrpll = val;
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+       return val & WRPLL_PLL_ENABLE;
+}
+
+static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
+                                     struct intel_shared_dpll *pll,
+                                     struct intel_dpll_hw_state *hw_state)
+{
+       intel_wakeref_t wakeref;
+       u32 val;
+
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_DISPLAY_CORE);
+       if (!wakeref)
+               return false;
+
+       val = I915_READ(SPLL_CTL);
+       hw_state->spll = val;
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+       return val & SPLL_PLL_ENABLE;
+}
+
+#define LC_FREQ 2700
+#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
+
+#define P_MIN 2
+#define P_MAX 64
+#define P_INC 2
+
+/* Constraints for PLL good behavior */
+#define REF_MIN 48
+#define REF_MAX 400
+#define VCO_MIN 2400
+#define VCO_MAX 4800
+
+struct hsw_wrpll_rnp {
+       unsigned p, n2, r2;
+};
+
+static unsigned hsw_wrpll_get_budget_for_freq(int clock)
+{
+       unsigned budget;
+
+       switch (clock) {
+       case 25175000:
+       case 25200000:
+       case 27000000:
+       case 27027000:
+       case 37762500:
+       case 37800000:
+       case 40500000:
+       case 40541000:
+       case 54000000:
+       case 54054000:
+       case 59341000:
+       case 59400000:
+       case 72000000:
+       case 74176000:
+       case 74250000:
+       case 81000000:
+       case 81081000:
+       case 89012000:
+       case 89100000:
+       case 108000000:
+       case 108108000:
+       case 111264000:
+       case 111375000:
+       case 148352000:
+       case 148500000:
+       case 162000000:
+       case 162162000:
+       case 222525000:
+       case 222750000:
+       case 296703000:
+       case 297000000:
+               budget = 0;
+               break;
+       case 233500000:
+       case 245250000:
+       case 247750000:
+       case 253250000:
+       case 298000000:
+               budget = 1500;
+               break;
+       case 169128000:
+       case 169500000:
+       case 179500000:
+       case 202000000:
+               budget = 2000;
+               break;
+       case 256250000:
+       case 262500000:
+       case 270000000:
+       case 272500000:
+       case 273750000:
+       case 280750000:
+       case 281250000:
+       case 286000000:
+       case 291750000:
+               budget = 4000;
+               break;
+       case 267250000:
+       case 268500000:
+               budget = 5000;
+               break;
+       default:
+               budget = 1000;
+               break;
+       }
+
+       return budget;
+}
+
+static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
+                                unsigned int r2, unsigned int n2,
+                                unsigned int p,
+                                struct hsw_wrpll_rnp *best)
+{
+       u64 a, b, c, d, diff, diff_best;
+
+       /* No best (r,n,p) yet */
+       if (best->p == 0) {
+               best->p = p;
+               best->n2 = n2;
+               best->r2 = r2;
+               return;
+       }
+
+       /*
+        * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
+        * freq2k.
+        *
+        * delta = 1e6 *
+        *         abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
+        *         freq2k;
+        *
+        * and we would like delta <= budget.
+        *
+        * If the discrepancy is above the PPM-based budget, always prefer to
+        * improve upon the previous solution.  However, if you're within the
+        * budget, try to maximize Ref * VCO, that is N / (P * R^2).
+        */
+       a = freq2k * budget * p * r2;
+       b = freq2k * budget * best->p * best->r2;
+       diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
+       diff_best = abs_diff(freq2k * best->p * best->r2,
+                            LC_FREQ_2K * best->n2);
+       c = 1000000 * diff;
+       d = 1000000 * diff_best;
+
+       if (a < c && b < d) {
+               /* If both are above the budget, pick the closer */
+               if (best->p * best->r2 * diff < p * r2 * diff_best) {
+                       best->p = p;
+                       best->n2 = n2;
+                       best->r2 = r2;
+               }
+       } else if (a >= c && b < d) {
+               /* If A is below the threshold but B is above it?  Update. */
+               best->p = p;
+               best->n2 = n2;
+               best->r2 = r2;
+       } else if (a >= c && b >= d) {
+               /* Both are below the limit, so pick the higher n2/(r2*r2) */
+               if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
+                       best->p = p;
+                       best->n2 = n2;
+                       best->r2 = r2;
+               }
+       }
+       /* Otherwise a < c && b >= d, do nothing */
+}
+
+static void
+hsw_ddi_calculate_wrpll(int clock /* in Hz */,
+                       unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
+{
+       u64 freq2k;
+       unsigned p, n2, r2;
+       struct hsw_wrpll_rnp best = { 0, 0, 0 };
+       unsigned budget;
+
+       freq2k = clock / 100;
+
+       budget = hsw_wrpll_get_budget_for_freq(clock);
+
+       /* Special case handling for 540 pixel clock: bypass WR PLL entirely
+        * and directly pass the LC PLL to it. */
+       if (freq2k == 5400000) {
+               *n2_out = 2;
+               *p_out = 1;
+               *r2_out = 2;
+               return;
+       }
+
+       /*
+        * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
+        * the WR PLL.
+        *
+        * We want R so that REF_MIN <= Ref <= REF_MAX.
+        * Injecting R2 = 2 * R gives:
+        *   REF_MAX * r2 > LC_FREQ * 2 and
+        *   REF_MIN * r2 < LC_FREQ * 2
+        *
+        * Which means the desired boundaries for r2 are:
+        *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
+        *
+        */
+       for (r2 = LC_FREQ * 2 / REF_MAX + 1;
+            r2 <= LC_FREQ * 2 / REF_MIN;
+            r2++) {
+
+               /*
+                * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
+                *
+                * Once again we want VCO_MIN <= VCO <= VCO_MAX.
+                * Injecting R2 = 2 * R and N2 = 2 * N, we get:
+                *   VCO_MAX * r2 > n2 * LC_FREQ and
+                *   VCO_MIN * r2 < n2 * LC_FREQ)
+                *
+                * Which means the desired boundaries for n2 are:
+                * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
+                */
+               for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
+                    n2 <= VCO_MAX * r2 / LC_FREQ;
+                    n2++) {
+
+                       for (p = P_MIN; p <= P_MAX; p += P_INC)
+                               hsw_wrpll_update_rnp(freq2k, budget,
+                                                    r2, n2, p, &best);
+               }
+       }
+
+       *n2_out = best.n2;
+       *p_out = best.p;
+       *r2_out = best.r2;
+}
+
+static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
+{
+       struct intel_shared_dpll *pll;
+       u32 val;
+       unsigned int p, n2, r2;
+
+       hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
+
+       val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
+             WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+             WRPLL_DIVIDER_POST(p);
+
+       crtc_state->dpll_hw_state.wrpll = val;
+
+       pll = intel_find_shared_dpll(crtc_state,
+                                    DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+
+       if (!pll)
+               return NULL;
+
+       return pll;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       struct intel_shared_dpll *pll;
+       enum intel_dpll_id pll_id;
+       int clock = crtc_state->port_clock;
+
+       switch (clock / 2) {
+       case 81000:
+               pll_id = DPLL_ID_LCPLL_810;
+               break;
+       case 135000:
+               pll_id = DPLL_ID_LCPLL_1350;
+               break;
+       case 270000:
+               pll_id = DPLL_ID_LCPLL_2700;
+               break;
+       default:
+               DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
+               return NULL;
+       }
+
+       pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+
+       if (!pll)
+               return NULL;
+
+       return pll;
+}
+
+static struct intel_shared_dpll *
+hsw_get_dpll(struct intel_crtc_state *crtc_state,
+            struct intel_encoder *encoder)
+{
+       struct intel_shared_dpll *pll;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+               pll = hsw_ddi_hdmi_get_dpll(crtc_state);
+       } else if (intel_crtc_has_dp_encoder(crtc_state)) {
+               pll = hsw_ddi_dp_get_dpll(crtc_state);
+       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+               if (WARN_ON(crtc_state->port_clock / 2 != 135000))
+                       return NULL;
+
+               crtc_state->dpll_hw_state.spll =
+                       SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
+
+               pll = intel_find_shared_dpll(crtc_state,
+                                            DPLL_ID_SPLL, DPLL_ID_SPLL);
+       } else {
+               return NULL;
+       }
+
+       if (!pll)
+               return NULL;
+
+       intel_reference_shared_dpll(pll, crtc_state);
+
+       return pll;
+}
+
+static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
+                             const struct intel_dpll_hw_state *hw_state)
+{
+       DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+                     hw_state->wrpll, hw_state->spll);
+}
+
+static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
+       .enable = hsw_ddi_wrpll_enable,
+       .disable = hsw_ddi_wrpll_disable,
+       .get_hw_state = hsw_ddi_wrpll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
+       .enable = hsw_ddi_spll_enable,
+       .disable = hsw_ddi_spll_disable,
+       .get_hw_state = hsw_ddi_spll_get_hw_state,
+};
+
+static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
+                                struct intel_shared_dpll *pll)
+{
+}
+
+static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
+                                 struct intel_shared_dpll *pll)
+{
+}
+
+static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
+                                      struct intel_shared_dpll *pll,
+                                      struct intel_dpll_hw_state *hw_state)
+{
+       return true;
+}
+
+static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
+       .enable = hsw_ddi_lcpll_enable,
+       .disable = hsw_ddi_lcpll_disable,
+       .get_hw_state = hsw_ddi_lcpll_get_hw_state,
+};
+
+struct skl_dpll_regs {
+       i915_reg_t ctl, cfgcr1, cfgcr2;
+};
+
+/* this array is indexed by the *shared* pll id */
+static const struct skl_dpll_regs skl_dpll_regs[4] = {
+       {
+               /* DPLL 0 */
+               .ctl = LCPLL1_CTL,
+               /* DPLL 0 doesn't support HDMI mode */
+       },
+       {
+               /* DPLL 1 */
+               .ctl = LCPLL2_CTL,
+               .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
+               .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
+       },
+       {
+               /* DPLL 2 */
+               .ctl = WRPLL_CTL(0),
+               .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
+               .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
+       },
+       {
+               /* DPLL 3 */
+               .ctl = WRPLL_CTL(1),
+               .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
+               .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
+       },
+};
+
+static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
+                                   struct intel_shared_dpll *pll)
+{
+       const enum intel_dpll_id id = pll->info->id;
+       u32 val;
+
+       val = I915_READ(DPLL_CTRL1);
+
+       val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
+                DPLL_CTRL1_SSC(id) |
+                DPLL_CTRL1_LINK_RATE_MASK(id));
+       val |= pll->state.hw_state.ctrl1 << (id * 6);
+
+       I915_WRITE(DPLL_CTRL1, val);
+       POSTING_READ(DPLL_CTRL1);
+}
+
+static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
+                              struct intel_shared_dpll *pll)
+{
+       const struct skl_dpll_regs *regs = skl_dpll_regs;
+       const enum intel_dpll_id id = pll->info->id;
+
+       skl_ddi_pll_write_ctrl1(dev_priv, pll);
+
+       I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
+       I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
+       POSTING_READ(regs[id].cfgcr1);
+       POSTING_READ(regs[id].cfgcr2);
+
+       /* the enable bit is always bit 31 */
+       I915_WRITE(regs[id].ctl,
+                  I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
+
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   DPLL_STATUS,
+                                   DPLL_LOCK(id),
+                                   DPLL_LOCK(id),
+                                   5))
+               DRM_ERROR("DPLL %d not locked\n", id);
+}
+
+static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
+                                struct intel_shared_dpll *pll)
+{
+       skl_ddi_pll_write_ctrl1(dev_priv, pll);
+}
+
+static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
+                               struct intel_shared_dpll *pll)
+{
+       const struct skl_dpll_regs *regs = skl_dpll_regs;
+       const enum intel_dpll_id id = pll->info->id;
+
+       /* the enable bit is always bit 31 */
+       I915_WRITE(regs[id].ctl,
+                  I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
+       POSTING_READ(regs[id].ctl);
+}
+
+static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
+                                 struct intel_shared_dpll *pll)
+{
+}
+
+static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+                                    struct intel_shared_dpll *pll,
+                                    struct intel_dpll_hw_state *hw_state)
+{
+       u32 val;
+       const struct skl_dpll_regs *regs = skl_dpll_regs;
+       const enum intel_dpll_id id = pll->info->id;
+       intel_wakeref_t wakeref;
+       bool ret;
+
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_DISPLAY_CORE);
+       if (!wakeref)
+               return false;
+
+       ret = false;
+
+       val = I915_READ(regs[id].ctl);
+       if (!(val & LCPLL_PLL_ENABLE))
+               goto out;
+
+       val = I915_READ(DPLL_CTRL1);
+       hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
+
+       /* avoid reading back stale values if HDMI mode is not enabled */
+       if (val & DPLL_CTRL1_HDMI_MODE(id)) {
+               hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
+               hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
+       }
+       ret = true;
+
+out:
+       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+       return ret;
+}
+
+static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
+                                      struct intel_shared_dpll *pll,
+                                      struct intel_dpll_hw_state *hw_state)
+{
+       const struct skl_dpll_regs *regs = skl_dpll_regs;
+       const enum intel_dpll_id id = pll->info->id;
+       intel_wakeref_t wakeref;
+       u32 val;
+       bool ret;
+
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_DISPLAY_CORE);
+       if (!wakeref)
+               return false;
+
+       ret = false;
+
+       /* DPLL0 is always enabled since it drives CDCLK */
+       val = I915_READ(regs[id].ctl);
+       if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
+               goto out;
+
+       val = I915_READ(DPLL_CTRL1);
+       hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
+
+       ret = true;
+
+out:
+       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+       return ret;
+}
+
+struct skl_wrpll_context {
+       u64 min_deviation;              /* current minimal deviation */
+       u64 central_freq;               /* chosen central freq */
+       u64 dco_freq;                   /* chosen dco freq */
+       unsigned int p;                 /* chosen divider */
+};
+
+static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
+{
+       memset(ctx, 0, sizeof(*ctx));
+
+       ctx->min_deviation = U64_MAX;
+}
+
+/* DCO freq must be within +1%/-6%  of the DCO central freq */
+#define SKL_DCO_MAX_PDEVIATION 100
+#define SKL_DCO_MAX_NDEVIATION 600
+
+static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
+                                 u64 central_freq,
+                                 u64 dco_freq,
+                                 unsigned int divider)
+{
+       u64 deviation;
+
+       deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
+                             central_freq);
+
+       /* positive deviation */
+       if (dco_freq >= central_freq) {
+               if (deviation < SKL_DCO_MAX_PDEVIATION &&
+                   deviation < ctx->min_deviation) {
+                       ctx->min_deviation = deviation;
+                       ctx->central_freq = central_freq;
+                       ctx->dco_freq = dco_freq;
+                       ctx->p = divider;
+               }
+       /* negative deviation */
+       } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
+                  deviation < ctx->min_deviation) {
+               ctx->min_deviation = deviation;
+               ctx->central_freq = central_freq;
+               ctx->dco_freq = dco_freq;
+               ctx->p = divider;
+       }
+}
+
+static void skl_wrpll_get_multipliers(unsigned int p,
+                                     unsigned int *p0 /* out */,
+                                     unsigned int *p1 /* out */,
+                                     unsigned int *p2 /* out */)
+{
+       /* even dividers */
+       if (p % 2 == 0) {
+               unsigned int half = p / 2;
+
+               if (half == 1 || half == 2 || half == 3 || half == 5) {
+                       *p0 = 2;
+                       *p1 = 1;
+                       *p2 = half;
+               } else if (half % 2 == 0) {
+                       *p0 = 2;
+                       *p1 = half / 2;
+                       *p2 = 2;
+               } else if (half % 3 == 0) {
+                       *p0 = 3;
+                       *p1 = half / 3;
+                       *p2 = 2;
+               } else if (half % 7 == 0) {
+                       *p0 = 7;
+                       *p1 = half / 7;
+                       *p2 = 2;
+               }
+       } else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
+               *p0 = 3;
+               *p1 = 1;
+               *p2 = p / 3;
+       } else if (p == 5 || p == 7) {
+               *p0 = p;
+               *p1 = 1;
+               *p2 = 1;
+       } else if (p == 15) {
+               *p0 = 3;
+               *p1 = 1;
+               *p2 = 5;
+       } else if (p == 21) {
+               *p0 = 7;
+               *p1 = 1;
+               *p2 = 3;
+       } else if (p == 35) {
+               *p0 = 7;
+               *p1 = 1;
+               *p2 = 5;
+       }
+}
+
+struct skl_wrpll_params {
+       u32 dco_fraction;
+       u32 dco_integer;
+       u32 qdiv_ratio;
+       u32 qdiv_mode;
+       u32 kdiv;
+       u32 pdiv;
+       u32 central_freq;
+};
+
+static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+                                     u64 afe_clock,
+                                     u64 central_freq,
+                                     u32 p0, u32 p1, u32 p2)
+{
+       u64 dco_freq;
+
+       switch (central_freq) {
+       case 9600000000ULL:
+               params->central_freq = 0;
+               break;
+       case 9000000000ULL:
+               params->central_freq = 1;
+               break;
+       case 8400000000ULL:
+               params->central_freq = 3;
+       }
+
+       switch (p0) {
+       case 1:
+               params->pdiv = 0;
+               break;
+       case 2:
+               params->pdiv = 1;
+               break;
+       case 3:
+               params->pdiv = 2;
+               break;
+       case 7:
+               params->pdiv = 4;
+               break;
+       default:
+               WARN(1, "Incorrect PDiv\n");
+       }
+
+       switch (p2) {
+       case 5:
+               params->kdiv = 0;
+               break;
+       case 2:
+               params->kdiv = 1;
+               break;
+       case 3:
+               params->kdiv = 2;
+               break;
+       case 1:
+               params->kdiv = 3;
+               break;
+       default:
+               WARN(1, "Incorrect KDiv\n");
+       }
+
+       params->qdiv_ratio = p1;
+       params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
+
+       dco_freq = p0 * p1 * p2 * afe_clock;
+
+       /*
+        * Intermediate values are in Hz.
+        * Divide by MHz to match bsepc
+        */
+       params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+       params->dco_fraction =
+               div_u64((div_u64(dco_freq, 24) -
+                        params->dco_integer * MHz(1)) * 0x8000, MHz(1));
+}
+
+static bool
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+                       struct skl_wrpll_params *wrpll_params)
+{
+       u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+       u64 dco_central_freq[3] = { 8400000000ULL,
+                                   9000000000ULL,
+                                   9600000000ULL };
+       static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
+                                            24, 28, 30, 32, 36, 40, 42, 44,
+                                            48, 52, 54, 56, 60, 64, 66, 68,
+                                            70, 72, 76, 78, 80, 84, 88, 90,
+                                            92, 96, 98 };
+       static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
+       static const struct {
+               const int *list;
+               int n_dividers;
+       } dividers[] = {
+               { even_dividers, ARRAY_SIZE(even_dividers) },
+               { odd_dividers, ARRAY_SIZE(odd_dividers) },
+       };
+       struct skl_wrpll_context ctx;
+       unsigned int dco, d, i;
+       unsigned int p0, p1, p2;
+
+       skl_wrpll_context_init(&ctx);
+
+       for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+               for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+                       for (i = 0; i < dividers[d].n_dividers; i++) {
+                               unsigned int p = dividers[d].list[i];
+                               u64 dco_freq = p * afe_clock;
+
+                               skl_wrpll_try_divider(&ctx,
+                                                     dco_central_freq[dco],
+                                                     dco_freq,
+                                                     p);
+                               /*
+                                * Skip the remaining dividers if we're sure to
+                                * have found the definitive divider, we can't
+                                * improve a 0 deviation.
+                                */
+                               if (ctx.min_deviation == 0)
+                                       goto skip_remaining_dividers;
+                       }
+               }
+
+skip_remaining_dividers:
+               /*
+                * If a solution is found with an even divider, prefer
+                * this one.
+                */
+               if (d == 0 && ctx.p)
+                       break;
+       }
+
+       if (!ctx.p) {
+               DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
+               return false;
+       }
+
+       /*
+        * gcc incorrectly analyses that these can be used without being
+        * initialized. To be fair, it's hard to guess.
+        */
+       p0 = p1 = p2 = 0;
+       skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
+       skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
+                                 p0, p1, p2);
+
+       return true;
+}
+
+static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
+{
+       u32 ctrl1, cfgcr1, cfgcr2;
+       struct skl_wrpll_params wrpll_params = { 0, };
+
+       /*
+        * See comment in intel_dpll_hw_state to understand why we always use 0
+        * as the DPLL id in this function.
+        */
+       ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+
+       ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+       if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+                                    &wrpll_params))
+               return false;
+
+       cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+               DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+               wrpll_params.dco_integer;
+
+       cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+               DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+               DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+               DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+               wrpll_params.central_freq;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+       crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+       crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+       return true;
+}
+
+static bool
+skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
+{
+       u32 ctrl1;
+
+       /*
+        * See comment in intel_dpll_hw_state to understand why we always use 0
+        * as the DPLL id in this function.
+        */
+       ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+       switch (crtc_state->port_clock / 2) {
+       case 81000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+               break;
+       case 135000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
+               break;
+       case 270000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
+               break;
+               /* eDP 1.4 rates */
+       case 162000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
+               break;
+       case 108000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
+               break;
+       case 216000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
+               break;
+       }
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+
+       return true;
+}
+
+static struct intel_shared_dpll *
+skl_get_dpll(struct intel_crtc_state *crtc_state,
+            struct intel_encoder *encoder)
+{
+       struct intel_shared_dpll *pll;
+       bool bret;
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+               bret = skl_ddi_hdmi_pll_dividers(crtc_state);
+               if (!bret) {
+                       DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+                       return NULL;
+               }
+       } else if (intel_crtc_has_dp_encoder(crtc_state)) {
+               bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
+               if (!bret) {
+                       DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+                       return NULL;
+               }
+       } else {
+               return NULL;
+       }
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+               pll = intel_find_shared_dpll(crtc_state,
+                                            DPLL_ID_SKL_DPLL0,
+                                            DPLL_ID_SKL_DPLL0);
+       else
+               pll = intel_find_shared_dpll(crtc_state,
+                                            DPLL_ID_SKL_DPLL1,
+                                            DPLL_ID_SKL_DPLL3);
+       if (!pll)
+               return NULL;
+
+       intel_reference_shared_dpll(pll, crtc_state);
+
+       return pll;
+}
+
+static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
+                             const struct intel_dpll_hw_state *hw_state)
+{
+       DRM_DEBUG_KMS("dpll_hw_state: "
+                     "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
+                     hw_state->ctrl1,
+                     hw_state->cfgcr1,
+                     hw_state->cfgcr2);
+}
+
+static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
+       .enable = skl_ddi_pll_enable,
+       .disable = skl_ddi_pll_disable,
+       .get_hw_state = skl_ddi_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
+       .enable = skl_ddi_dpll0_enable,
+       .disable = skl_ddi_dpll0_disable,
+       .get_hw_state = skl_ddi_dpll0_get_hw_state,
+};
+
+static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
+                               struct intel_shared_dpll *pll)
+{
+       u32 temp;
+       enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
+       enum dpio_phy phy;
+       enum dpio_channel ch;
+
+       bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+
+       /* Non-SSC reference */
+       temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+       temp |= PORT_PLL_REF_SEL;
+       I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+
+       if (IS_GEMINILAKE(dev_priv)) {
+               temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+               temp |= PORT_PLL_POWER_ENABLE;
+               I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+
+               if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
+                                PORT_PLL_POWER_STATE), 200))
+                       DRM_ERROR("Power state not set for PLL:%d\n", port);
+       }
+
+       /* Disable 10 bit clock */
+       temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+       temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+       I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+
+       /* Write P1 & P2 */
+       temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
+       temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
+       temp |= pll->state.hw_state.ebb0;
+       I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
+
+       /* Write M2 integer */
+       temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
+       temp &= ~PORT_PLL_M2_MASK;
+       temp |= pll->state.hw_state.pll0;
+       I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
+
+       /* Write N */
+       temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
+       temp &= ~PORT_PLL_N_MASK;
+       temp |= pll->state.hw_state.pll1;
+       I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
+
+       /* Write M2 fraction */
+       temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
+       temp &= ~PORT_PLL_M2_FRAC_MASK;
+       temp |= pll->state.hw_state.pll2;
+       I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
+
+       /* Write M2 fraction enable */
+       temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
+       temp &= ~PORT_PLL_M2_FRAC_ENABLE;
+       temp |= pll->state.hw_state.pll3;
+       I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
+
+       /* Write coeff */
+       temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
+       temp &= ~PORT_PLL_PROP_COEFF_MASK;
+       temp &= ~PORT_PLL_INT_COEFF_MASK;
+       temp &= ~PORT_PLL_GAIN_CTL_MASK;
+       temp |= pll->state.hw_state.pll6;
+       I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
+
+       /* Write calibration val */
+       temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
+       temp &= ~PORT_PLL_TARGET_CNT_MASK;
+       temp |= pll->state.hw_state.pll8;
+       I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
+
+       temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
+       temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
+       temp |= pll->state.hw_state.pll9;
+       I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
+
+       temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
+       temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
+       temp &= ~PORT_PLL_DCO_AMP_MASK;
+       temp |= pll->state.hw_state.pll10;
+       I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
+
+       /* Recalibrate with new settings */
+       temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+       temp |= PORT_PLL_RECALIBRATE;
+       I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+       temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+       temp |= pll->state.hw_state.ebb4;
+       I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+
+       /* Enable PLL */
+       temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+       temp |= PORT_PLL_ENABLE;
+       I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+       POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+
+       if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
+                       200))
+               DRM_ERROR("PLL %d not locked\n", port);
+
+       if (IS_GEMINILAKE(dev_priv)) {
+               temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
+               temp |= DCC_DELAY_RANGE_2;
+               I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
+       }
+
+       /*
+        * While we write to the group register to program all lanes at once we
+        * can read only lane registers and we pick lanes 0/1 for that.
+        */
+       temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
+       temp &= ~LANE_STAGGER_MASK;
+       temp &= ~LANESTAGGER_STRAP_OVRD;
+       temp |= pll->state.hw_state.pcsdw12;
+       I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
+}
+
+static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
+                                       struct intel_shared_dpll *pll)
+{
+       enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
+       u32 temp;
+
+       temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+       temp &= ~PORT_PLL_ENABLE;
+       I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+       POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+
+       if (IS_GEMINILAKE(dev_priv)) {
+               temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+               temp &= ~PORT_PLL_POWER_ENABLE;
+               I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+
+               if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
+                               PORT_PLL_POWER_STATE), 200))
+                       DRM_ERROR("Power state not reset for PLL:%d\n", port);
+       }
+}
+
+static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+                                       struct intel_shared_dpll *pll,
+                                       struct intel_dpll_hw_state *hw_state)
+{
+       enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
+       intel_wakeref_t wakeref;
+       enum dpio_phy phy;
+       enum dpio_channel ch;
+       u32 val;
+       bool ret;
+
+       bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_DISPLAY_CORE);
+       if (!wakeref)
+               return false;
+
+       ret = false;
+
+       val = I915_READ(BXT_PORT_PLL_ENABLE(port));
+       if (!(val & PORT_PLL_ENABLE))
+               goto out;
+
+       hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
+       hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+
+       hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+       hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+
+       hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
+       hw_state->pll0 &= PORT_PLL_M2_MASK;
+
+       hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
+       hw_state->pll1 &= PORT_PLL_N_MASK;
+
+       hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
+       hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+
+       hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
+       hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+
+       hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
+       hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
+                         PORT_PLL_INT_COEFF_MASK |
+                         PORT_PLL_GAIN_CTL_MASK;
+
+       hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
+       hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+
+       hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
+       hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+
+       hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
+       hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
+                          PORT_PLL_DCO_AMP_MASK;
+
+       /*
+        * While we write to the group register to program all lanes at once we
+        * can read only lane registers. We configure all lanes the same way, so
+        * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
+        */
+       hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
+       if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
+               DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
+                                hw_state->pcsdw12,
+                                I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
+       hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
+
+       ret = true;
+
+out:
+       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+       return ret;
+}
+
+/* bxt clock parameters */
+struct bxt_clk_div {
+       int clock;
+       u32 p1;
+       u32 p2;
+       u32 m2_int;
+       u32 m2_frac;
+       bool m2_frac_en;
+       u32 n;
+
+       int vco;
+};
+
+/* pre-calculated values for DP linkrates */
+static const struct bxt_clk_div bxt_dp_clk_val[] = {
+       {162000, 4, 2, 32, 1677722, 1, 1},
+       {270000, 4, 1, 27,       0, 0, 1},
+       {540000, 2, 1, 27,       0, 0, 1},
+       {216000, 3, 2, 32, 1677722, 1, 1},
+       {243000, 4, 1, 24, 1258291, 1, 1},
+       {324000, 4, 1, 32, 1677722, 1, 1},
+       {432000, 3, 1, 32, 1677722, 1, 1}
+};
+
+static bool
+bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
+                         struct bxt_clk_div *clk_div)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct dpll best_clock;
+
+       /* Calculate HDMI div */
+       /*
+        * FIXME: tie the following calculation into
+        * i9xx_crtc_compute_clock
+        */
+       if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
+               DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
+                                crtc_state->port_clock,
+                                pipe_name(crtc->pipe));
+               return false;
+       }
+
+       clk_div->p1 = best_clock.p1;
+       clk_div->p2 = best_clock.p2;
+       WARN_ON(best_clock.m1 != 2);
+       clk_div->n = best_clock.n;
+       clk_div->m2_int = best_clock.m2 >> 22;
+       clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
+       clk_div->m2_frac_en = clk_div->m2_frac != 0;
+
+       clk_div->vco = best_clock.vco;
+
+       return true;
+}
+
+static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
+                                   struct bxt_clk_div *clk_div)
+{
+       int clock = crtc_state->port_clock;
+       int i;
+
+       *clk_div = bxt_dp_clk_val[0];
+       for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+               if (bxt_dp_clk_val[i].clock == clock) {
+                       *clk_div = bxt_dp_clk_val[i];
+                       break;
+               }
+       }
+
+       clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
+}
+
+static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
+                                     const struct bxt_clk_div *clk_div)
+{
+       struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
+       int clock = crtc_state->port_clock;
+       int vco = clk_div->vco;
+       u32 prop_coef, int_coef, gain_ctl, targ_cnt;
+       u32 lanestagger;
+
+       memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
+
+       if (vco >= 6200000 && vco <= 6700000) {
+               prop_coef = 4;
+               int_coef = 9;
+               gain_ctl = 3;
+               targ_cnt = 8;
+       } else if ((vco > 5400000 && vco < 6200000) ||
+                       (vco >= 4800000 && vco < 5400000)) {
+               prop_coef = 5;
+               int_coef = 11;
+               gain_ctl = 3;
+               targ_cnt = 9;
+       } else if (vco == 5400000) {
+               prop_coef = 3;
+               int_coef = 8;
+               gain_ctl = 1;
+               targ_cnt = 9;
+       } else {
+               DRM_ERROR("Invalid VCO\n");
+               return false;
+       }
+
+       if (clock > 270000)
+               lanestagger = 0x18;
+       else if (clock > 135000)
+               lanestagger = 0x0d;
+       else if (clock > 67000)
+               lanestagger = 0x07;
+       else if (clock > 33000)
+               lanestagger = 0x04;
+       else
+               lanestagger = 0x02;
+
+       dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
+       dpll_hw_state->pll0 = clk_div->m2_int;
+       dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
+       dpll_hw_state->pll2 = clk_div->m2_frac;
+
+       if (clk_div->m2_frac_en)
+               dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
+
+       dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
+       dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
+
+       dpll_hw_state->pll8 = targ_cnt;
+
+       dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
+
+       dpll_hw_state->pll10 =
+               PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
+               | PORT_PLL_DCO_AMP_OVR_EN_H;
+
+       dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+
+       dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
+
+       return true;
+}
+
+static bool
+bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
+{
+       struct bxt_clk_div clk_div = {};
+
+       bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
+
+       return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
+}
+
+static bool
+bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
+{
+       struct bxt_clk_div clk_div = {};
+
+       bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
+
+       return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
+}
+
+static struct intel_shared_dpll *
+bxt_get_dpll(struct intel_crtc_state *crtc_state,
+            struct intel_encoder *encoder)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_shared_dpll *pll;
+       enum intel_dpll_id id;
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
+           !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
+               return NULL;
+
+       if (intel_crtc_has_dp_encoder(crtc_state) &&
+           !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
+               return NULL;
+
+       /* 1:1 mapping between ports and PLLs */
+       id = (enum intel_dpll_id) encoder->port;
+       pll = intel_get_shared_dpll_by_id(dev_priv, id);
+
+       DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
+                     crtc->base.base.id, crtc->base.name, pll->info->name);
+
+       intel_reference_shared_dpll(pll, crtc_state);
+
+       return pll;
+}
+
+static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
+                             const struct intel_dpll_hw_state *hw_state)
+{
+       DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+                     "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
+                     "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
+                     hw_state->ebb0,
+                     hw_state->ebb4,
+                     hw_state->pll0,
+                     hw_state->pll1,
+                     hw_state->pll2,
+                     hw_state->pll3,
+                     hw_state->pll6,
+                     hw_state->pll8,
+                     hw_state->pll9,
+                     hw_state->pll10,
+                     hw_state->pcsdw12);
+}
+
+static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
+       .enable = bxt_ddi_pll_enable,
+       .disable = bxt_ddi_pll_disable,
+       .get_hw_state = bxt_ddi_pll_get_hw_state,
+};
+
+struct intel_dpll_mgr {
+       const struct dpll_info *dpll_info;
+
+       struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
+                                             struct intel_encoder *encoder);
+
+       void (*dump_hw_state)(struct drm_i915_private *dev_priv,
+                             const struct intel_dpll_hw_state *hw_state);
+};
+
+static const struct dpll_info pch_plls[] = {
+       { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
+       { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
+       { },
+};
+
+static const struct intel_dpll_mgr pch_pll_mgr = {
+       .dpll_info = pch_plls,
+       .get_dpll = ibx_get_dpll,
+       .dump_hw_state = ibx_dump_hw_state,
+};
+
+static const struct dpll_info hsw_plls[] = {
+       { "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
+       { "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
+       { "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
+       { "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
+       { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
+       { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
+       { },
+};
+
+static const struct intel_dpll_mgr hsw_pll_mgr = {
+       .dpll_info = hsw_plls,
+       .get_dpll = hsw_get_dpll,
+       .dump_hw_state = hsw_dump_hw_state,
+};
+
+static const struct dpll_info skl_plls[] = {
+       { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
+       { "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
+       { "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
+       { "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
+       { },
+};
+
+static const struct intel_dpll_mgr skl_pll_mgr = {
+       .dpll_info = skl_plls,
+       .get_dpll = skl_get_dpll,
+       .dump_hw_state = skl_dump_hw_state,
+};
+
+static const struct dpll_info bxt_plls[] = {
+       { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
+       { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+       { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+       { },
+};
+
+static const struct intel_dpll_mgr bxt_pll_mgr = {
+       .dpll_info = bxt_plls,
+       .get_dpll = bxt_get_dpll,
+       .dump_hw_state = bxt_dump_hw_state,
+};
+
+static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
+                              struct intel_shared_dpll *pll)
+{
+       const enum intel_dpll_id id = pll->info->id;
+       u32 val;
+
+       /* 1. Enable DPLL power in DPLL_ENABLE. */
+       val = I915_READ(CNL_DPLL_ENABLE(id));
+       val |= PLL_POWER_ENABLE;
+       I915_WRITE(CNL_DPLL_ENABLE(id), val);
+
+       /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   CNL_DPLL_ENABLE(id),
+                                   PLL_POWER_STATE,
+                                   PLL_POWER_STATE,
+                                   5))
+               DRM_ERROR("PLL %d Power not enabled\n", id);
+
+       /*
+        * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
+        * select DP mode, and set DP link rate.
+        */
+       val = pll->state.hw_state.cfgcr0;
+       I915_WRITE(CNL_DPLL_CFGCR0(id), val);
+
+       /* 4. Reab back to ensure writes completed */
+       POSTING_READ(CNL_DPLL_CFGCR0(id));
+
+       /* 3. Configure DPLL_CFGCR0 */
+       /* Avoid touch CFGCR1 if HDMI mode is not enabled */
+       if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
+               val = pll->state.hw_state.cfgcr1;
+               I915_WRITE(CNL_DPLL_CFGCR1(id), val);
+               /* 4. Reab back to ensure writes completed */
+               POSTING_READ(CNL_DPLL_CFGCR1(id));
+       }
+
+       /*
+        * 5. If the frequency will result in a change to the voltage
+        * requirement, follow the Display Voltage Frequency Switching
+        * Sequence Before Frequency Change
+        *
+        * Note: DVFS is actually handled via the cdclk code paths,
+        * hence we do nothing here.
+        */
+
+       /* 6. Enable DPLL in DPLL_ENABLE. */
+       val = I915_READ(CNL_DPLL_ENABLE(id));
+       val |= PLL_ENABLE;
+       I915_WRITE(CNL_DPLL_ENABLE(id), val);
+
+       /* 7. Wait for PLL lock status in DPLL_ENABLE. */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   CNL_DPLL_ENABLE(id),
+                                   PLL_LOCK,
+                                   PLL_LOCK,
+                                   5))
+               DRM_ERROR("PLL %d not locked\n", id);
+
+       /*
+        * 8. If the frequency will result in a change to the voltage
+        * requirement, follow the Display Voltage Frequency Switching
+        * Sequence After Frequency Change
+        *
+        * Note: DVFS is actually handled via the cdclk code paths,
+        * hence we do nothing here.
+        */
+
+       /*
+        * 9. turn on the clock for the DDI and map the DPLL to the DDI
+        * Done at intel_ddi_clk_select
+        */
+}
+
+static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
+                               struct intel_shared_dpll *pll)
+{
+       const enum intel_dpll_id id = pll->info->id;
+       u32 val;
+
+       /*
+        * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
+        * Done at intel_ddi_post_disable
+        */
+
+       /*
+        * 2. If the frequency will result in a change to the voltage
+        * requirement, follow the Display Voltage Frequency Switching
+        * Sequence Before Frequency Change
+        *
+        * Note: DVFS is actually handled via the cdclk code paths,
+        * hence we do nothing here.
+        */
+
+       /* 3. Disable DPLL through DPLL_ENABLE. */
+       val = I915_READ(CNL_DPLL_ENABLE(id));
+       val &= ~PLL_ENABLE;
+       I915_WRITE(CNL_DPLL_ENABLE(id), val);
+
+       /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   CNL_DPLL_ENABLE(id),
+                                   PLL_LOCK,
+                                   0,
+                                   5))
+               DRM_ERROR("PLL %d locked\n", id);
+
+       /*
+        * 5. If the frequency will result in a change to the voltage
+        * requirement, follow the Display Voltage Frequency Switching
+        * Sequence After Frequency Change
+        *
+        * Note: DVFS is actually handled via the cdclk code paths,
+        * hence we do nothing here.
+        */
+
+       /* 6. Disable DPLL power in DPLL_ENABLE. */
+       val = I915_READ(CNL_DPLL_ENABLE(id));
+       val &= ~PLL_POWER_ENABLE;
+       I915_WRITE(CNL_DPLL_ENABLE(id), val);
+
+       /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   CNL_DPLL_ENABLE(id),
+                                   PLL_POWER_STATE,
+                                   0,
+                                   5))
+               DRM_ERROR("PLL %d Power not disabled\n", id);
+}
+
+static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+                                    struct intel_shared_dpll *pll,
+                                    struct intel_dpll_hw_state *hw_state)
+{
+       const enum intel_dpll_id id = pll->info->id;
+       intel_wakeref_t wakeref;
+       u32 val;
+       bool ret;
+
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_DISPLAY_CORE);
+       if (!wakeref)
+               return false;
+
+       ret = false;
+
+       val = I915_READ(CNL_DPLL_ENABLE(id));
+       if (!(val & PLL_ENABLE))
+               goto out;
+
+       val = I915_READ(CNL_DPLL_CFGCR0(id));
+       hw_state->cfgcr0 = val;
+
+       /* avoid reading back stale values if HDMI mode is not enabled */
+       if (val & DPLL_CFGCR0_HDMI_MODE) {
+               hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
+       }
+       ret = true;
+
+out:
+       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+       return ret;
+}
+
+static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
+                                     int *qdiv, int *kdiv)
+{
+       /* even dividers */
+       if (bestdiv % 2 == 0) {
+               if (bestdiv == 2) {
+                       *pdiv = 2;
+                       *qdiv = 1;
+                       *kdiv = 1;
+               } else if (bestdiv % 4 == 0) {
+                       *pdiv = 2;
+                       *qdiv = bestdiv / 4;
+                       *kdiv = 2;
+               } else if (bestdiv % 6 == 0) {
+                       *pdiv = 3;
+                       *qdiv = bestdiv / 6;
+                       *kdiv = 2;
+               } else if (bestdiv % 5 == 0) {
+                       *pdiv = 5;
+                       *qdiv = bestdiv / 10;
+                       *kdiv = 2;
+               } else if (bestdiv % 14 == 0) {
+                       *pdiv = 7;
+                       *qdiv = bestdiv / 14;
+                       *kdiv = 2;
+               }
+       } else {
+               if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
+                       *pdiv = bestdiv;
+                       *qdiv = 1;
+                       *kdiv = 1;
+               } else { /* 9, 15, 21 */
+                       *pdiv = bestdiv / 3;
+                       *qdiv = 1;
+                       *kdiv = 3;
+               }
+       }
+}
+
+static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
+                                     u32 dco_freq, u32 ref_freq,
+                                     int pdiv, int qdiv, int kdiv)
+{
+       u32 dco;
+
+       switch (kdiv) {
+       case 1:
+               params->kdiv = 1;
+               break;
+       case 2:
+               params->kdiv = 2;
+               break;
+       case 3:
+               params->kdiv = 4;
+               break;
+       default:
+               WARN(1, "Incorrect KDiv\n");
+       }
+
+       switch (pdiv) {
+       case 2:
+               params->pdiv = 1;
+               break;
+       case 3:
+               params->pdiv = 2;
+               break;
+       case 5:
+               params->pdiv = 4;
+               break;
+       case 7:
+               params->pdiv = 8;
+               break;
+       default:
+               WARN(1, "Incorrect PDiv\n");
+       }
+
+       WARN_ON(kdiv != 2 && qdiv != 1);
+
+       params->qdiv_ratio = qdiv;
+       params->qdiv_mode = (qdiv == 1) ? 0 : 1;
+
+       dco = div_u64((u64)dco_freq << 15, ref_freq);
+
+       params->dco_integer = dco >> 15;
+       params->dco_fraction = dco & 0x7fff;
+}
+
+int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
+{
+       int ref_clock = dev_priv->cdclk.hw.ref;
+
+       /*
+        * For ICL+, the spec states: if reference frequency is 38.4,
+        * use 19.2 because the DPLL automatically divides that by 2.
+        */
+       if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
+               ref_clock = 19200;
+
+       return ref_clock;
+}
+
+static bool
+cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
+                       struct skl_wrpll_params *wrpll_params)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       u32 afe_clock = crtc_state->port_clock * 5;
+       u32 ref_clock;
+       u32 dco_min = 7998000;
+       u32 dco_max = 10000000;
+       u32 dco_mid = (dco_min + dco_max) / 2;
+       static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
+                                        18, 20, 24, 28, 30, 32,  36,  40,
+                                        42, 44, 48, 50, 52, 54,  56,  60,
+                                        64, 66, 68, 70, 72, 76,  78,  80,
+                                        84, 88, 90, 92, 96, 98, 100, 102,
+                                         3,  5,  7,  9, 15, 21 };
+       u32 dco, best_dco = 0, dco_centrality = 0;
+       u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
+       int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
+
+       for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+               dco = afe_clock * dividers[d];
+
+               if ((dco <= dco_max) && (dco >= dco_min)) {
+                       dco_centrality = abs(dco - dco_mid);
+
+                       if (dco_centrality < best_dco_centrality) {
+                               best_dco_centrality = dco_centrality;
+                               best_div = dividers[d];
+                               best_dco = dco;
+                       }
+               }
+       }
+
+       if (best_div == 0)
+               return false;
+
+       cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
+
+       ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
+
+       cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
+                                 pdiv, qdiv, kdiv);
+
+       return true;
+}
+
+static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
+{
+       u32 cfgcr0, cfgcr1;
+       struct skl_wrpll_params wrpll_params = { 0, };
+
+       cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
+
+       if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
+               return false;
+
+       cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
+               wrpll_params.dco_integer;
+
+       cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+               DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
+               DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
+               DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
+               DPLL_CFGCR1_CENTRAL_FREQ;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
+       crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+       return true;
+}
+
+static bool
+cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
+{
+       u32 cfgcr0;
+
+       cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
+
+       switch (crtc_state->port_clock / 2) {
+       case 81000:
+               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
+               break;
+       case 135000:
+               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
+               break;
+       case 270000:
+               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
+               break;
+               /* eDP 1.4 rates */
+       case 162000:
+               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
+               break;
+       case 108000:
+               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
+               break;
+       case 216000:
+               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
+               break;
+       case 324000:
+               /* Some SKUs may require elevated I/O voltage to support this */
+               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
+               break;
+       case 405000:
+               /* Some SKUs may require elevated I/O voltage to support this */
+               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
+               break;
+       }
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
+
+       return true;
+}
+
+static struct intel_shared_dpll *
+cnl_get_dpll(struct intel_crtc_state *crtc_state,
+            struct intel_encoder *encoder)
+{
+       struct intel_shared_dpll *pll;
+       bool bret;
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+               bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
+               if (!bret) {
+                       DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+                       return NULL;
+               }
+       } else if (intel_crtc_has_dp_encoder(crtc_state)) {
+               bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
+               if (!bret) {
+                       DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+                       return NULL;
+               }
+       } else {
+               DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
+                             crtc_state->output_types);
+               return NULL;
+       }
+
+       pll = intel_find_shared_dpll(crtc_state,
+                                    DPLL_ID_SKL_DPLL0,
+                                    DPLL_ID_SKL_DPLL2);
+       if (!pll) {
+               DRM_DEBUG_KMS("No PLL selected\n");
+               return NULL;
+       }
+
+       intel_reference_shared_dpll(pll, crtc_state);
+
+       return pll;
+}
+
+static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
+                             const struct intel_dpll_hw_state *hw_state)
+{
+       DRM_DEBUG_KMS("dpll_hw_state: "
+                     "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
+                     hw_state->cfgcr0,
+                     hw_state->cfgcr1);
+}
+
+static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
+       .enable = cnl_ddi_pll_enable,
+       .disable = cnl_ddi_pll_disable,
+       .get_hw_state = cnl_ddi_pll_get_hw_state,
+};
+
+static const struct dpll_info cnl_plls[] = {
+       { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
+       { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+       { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+       { },
+};
+
+static const struct intel_dpll_mgr cnl_pll_mgr = {
+       .dpll_info = cnl_plls,
+       .get_dpll = cnl_get_dpll,
+       .dump_hw_state = cnl_dump_hw_state,
+};
+
+struct icl_combo_pll_params {
+       int clock;
+       struct skl_wrpll_params wrpll;
+};
+
+/*
+ * These values alrea already adjusted: they're the bits we write to the
+ * registers, not the logical values.
+ */
+static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
+       { 540000,
+         { .dco_integer = 0x151, .dco_fraction = 0x4000,               /* [0]: 5.4 */
+           .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+       { 270000,
+         { .dco_integer = 0x151, .dco_fraction = 0x4000,               /* [1]: 2.7 */
+           .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+       { 162000,
+         { .dco_integer = 0x151, .dco_fraction = 0x4000,               /* [2]: 1.62 */
+           .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+       { 324000,
+         { .dco_integer = 0x151, .dco_fraction = 0x4000,               /* [3]: 3.24 */
+           .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+       { 216000,
+         { .dco_integer = 0x168, .dco_fraction = 0x0000,               /* [4]: 2.16 */
+           .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
+       { 432000,
+         { .dco_integer = 0x168, .dco_fraction = 0x0000,               /* [5]: 4.32 */
+           .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+       { 648000,
+         { .dco_integer = 0x195, .dco_fraction = 0x0000,               /* [6]: 6.48 */
+           .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+       { 810000,
+         { .dco_integer = 0x151, .dco_fraction = 0x4000,               /* [7]: 8.1 */
+           .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+};
+
+
+/* Also used for 38.4 MHz values. */
+static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
+       { 540000,
+         { .dco_integer = 0x1A5, .dco_fraction = 0x7000,               /* [0]: 5.4 */
+           .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+       { 270000,
+         { .dco_integer = 0x1A5, .dco_fraction = 0x7000,               /* [1]: 2.7 */
+           .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+       { 162000,
+         { .dco_integer = 0x1A5, .dco_fraction = 0x7000,               /* [2]: 1.62 */
+           .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+       { 324000,
+         { .dco_integer = 0x1A5, .dco_fraction = 0x7000,               /* [3]: 3.24 */
+           .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+       { 216000,
+         { .dco_integer = 0x1C2, .dco_fraction = 0x0000,               /* [4]: 2.16 */
+           .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
+       { 432000,
+         { .dco_integer = 0x1C2, .dco_fraction = 0x0000,               /* [5]: 4.32 */
+           .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+       { 648000,
+         { .dco_integer = 0x1FA, .dco_fraction = 0x2000,               /* [6]: 6.48 */
+           .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+       { 810000,
+         { .dco_integer = 0x1A5, .dco_fraction = 0x7000,               /* [7]: 8.1 */
+           .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+};
+
+static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
+       .dco_integer = 0x151, .dco_fraction = 0x4000,
+       .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
+static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
+       .dco_integer = 0x1A5, .dco_fraction = 0x7000,
+       .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
+static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
+                                 struct skl_wrpll_params *pll_params)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       const struct icl_combo_pll_params *params =
+               dev_priv->cdclk.hw.ref == 24000 ?
+               icl_dp_combo_pll_24MHz_values :
+               icl_dp_combo_pll_19_2MHz_values;
+       int clock = crtc_state->port_clock;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
+               if (clock == params[i].clock) {
+                       *pll_params = params[i].wrpll;
+                       return true;
+               }
+       }
+
+       MISSING_CASE(clock);
+       return false;
+}
+
+static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
+                            struct skl_wrpll_params *pll_params)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
+                       icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
+       return true;
+}
+
+static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
+                               struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       u32 cfgcr0, cfgcr1;
+       struct skl_wrpll_params pll_params = { 0 };
+       bool ret;
+
+       if (intel_port_is_tc(dev_priv, encoder->port))
+               ret = icl_calc_tbt_pll(crtc_state, &pll_params);
+       else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+                intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+               ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
+       else
+               ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
+
+       if (!ret)
+               return false;
+
+       cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
+                pll_params.dco_integer;
+
+       cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
+                DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
+                DPLL_CFGCR1_KDIV(pll_params.kdiv) |
+                DPLL_CFGCR1_PDIV(pll_params.pdiv) |
+                DPLL_CFGCR1_CENTRAL_FREQ_8400;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
+       crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+
+       return true;
+}
+
+
+static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
+{
+       return id - DPLL_ID_ICL_MGPLL1;
+}
+
+enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
+{
+       return tc_port + DPLL_ID_ICL_MGPLL1;
+}
+
+static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
+                                    u32 *target_dco_khz,
+                                    struct intel_dpll_hw_state *state)
+{
+       u32 dco_min_freq, dco_max_freq;
+       int div1_vals[] = {7, 5, 3, 2};
+       unsigned int i;
+       int div2;
+
+       dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
+       dco_max_freq = is_dp ? 8100000 : 10000000;
+
+       for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
+               int div1 = div1_vals[i];
+
+               for (div2 = 10; div2 > 0; div2--) {
+                       int dco = div1 * div2 * clock_khz * 5;
+                       int a_divratio, tlinedrv, inputsel;
+                       u32 hsdiv;
+
+                       if (dco < dco_min_freq || dco > dco_max_freq)
+                               continue;
+
+                       if (div2 >= 2) {
+                               a_divratio = is_dp ? 10 : 5;
+                               tlinedrv = 2;
+                       } else {
+                               a_divratio = 5;
+                               tlinedrv = 0;
+                       }
+                       inputsel = is_dp ? 0 : 1;
+
+                       switch (div1) {
+                       default:
+                               MISSING_CASE(div1);
+                               /* fall through */
+                       case 2:
+                               hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
+                               break;
+                       case 3:
+                               hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
+                               break;
+                       case 5:
+                               hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
+                               break;
+                       case 7:
+                               hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
+                               break;
+                       }
+
+                       *target_dco_khz = dco;
+
+                       state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
+
+                       state->mg_clktop2_coreclkctl1 =
+                               MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
+
+                       state->mg_clktop2_hsclkctl =
+                               MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
+                               MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
+                               hsdiv |
+                               MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
+
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+/*
+ * The specification for this function uses real numbers, so the math had to be
+ * adapted to integer-only calculation, that's why it looks so different.
+ */
+static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state;
+       int refclk_khz = dev_priv->cdclk.hw.ref;
+       int clock = crtc_state->port_clock;
+       u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
+       u32 iref_ndiv, iref_trim, iref_pulse_w;
+       u32 prop_coeff, int_coeff;
+       u32 tdc_targetcnt, feedfwgain;
+       u64 ssc_stepsize, ssc_steplen, ssc_steplog;
+       u64 tmp;
+       bool use_ssc = false;
+       bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
+
+       memset(pll_state, 0, sizeof(*pll_state));
+
+       if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
+                                     pll_state)) {
+               DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
+               return false;
+       }
+
+       m1div = 2;
+       m2div_int = dco_khz / (refclk_khz * m1div);
+       if (m2div_int > 255) {
+               m1div = 4;
+               m2div_int = dco_khz / (refclk_khz * m1div);
+               if (m2div_int > 255) {
+                       DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
+                                     clock);
+                       return false;
+               }
+       }
+       m2div_rem = dco_khz % (refclk_khz * m1div);
+
+       tmp = (u64)m2div_rem * (1 << 22);
+       do_div(tmp, refclk_khz * m1div);
+       m2div_frac = tmp;
+
+       switch (refclk_khz) {
+       case 19200:
+               iref_ndiv = 1;
+               iref_trim = 28;
+               iref_pulse_w = 1;
+               break;
+       case 24000:
+               iref_ndiv = 1;
+               iref_trim = 25;
+               iref_pulse_w = 2;
+               break;
+       case 38400:
+               iref_ndiv = 2;
+               iref_trim = 28;
+               iref_pulse_w = 1;
+               break;
+       default:
+               MISSING_CASE(refclk_khz);
+               return false;
+       }
+
+       /*
+        * tdc_res = 0.000003
+        * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
+        *
+        * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
+        * was supposed to be a division, but we rearranged the operations of
+        * the formula to avoid early divisions so we don't multiply the
+        * rounding errors.
+        *
+        * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
+        * we also rearrange to work with integers.
+        *
+        * The 0.5 transformed to 5 results in a multiplication by 10 and the
+        * last division by 10.
+        */
+       tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
+
+       /*
+        * Here we divide dco_khz by 10 in order to allow the dividend to fit in
+        * 32 bits. That's not a problem since we round the division down
+        * anyway.
+        */
+       feedfwgain = (use_ssc || m2div_rem > 0) ?
+               m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
+
+       if (dco_khz >= 9000000) {
+               prop_coeff = 5;
+               int_coeff = 10;
+       } else {
+               prop_coeff = 4;
+               int_coeff = 8;
+       }
+
+       if (use_ssc) {
+               tmp = mul_u32_u32(dco_khz, 47 * 32);
+               do_div(tmp, refclk_khz * m1div * 10000);
+               ssc_stepsize = tmp;
+
+               tmp = mul_u32_u32(dco_khz, 1000);
+               ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
+       } else {
+               ssc_stepsize = 0;
+               ssc_steplen = 0;
+       }
+       ssc_steplog = 4;
+
+       pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
+                                 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
+                                 MG_PLL_DIV0_FBDIV_INT(m2div_int);
+
+       pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
+                                MG_PLL_DIV1_DITHER_DIV_2 |
+                                MG_PLL_DIV1_NDIVRATIO(1) |
+                                MG_PLL_DIV1_FBPREDIV(m1div);
+
+       pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
+                              MG_PLL_LF_AFCCNTSEL_512 |
+                              MG_PLL_LF_GAINCTRL(1) |
+                              MG_PLL_LF_INT_COEFF(int_coeff) |
+                              MG_PLL_LF_PROP_COEFF(prop_coeff);
+
+       pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
+                                     MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
+                                     MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
+                                     MG_PLL_FRAC_LOCK_DCODITHEREN |
+                                     MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
+       if (use_ssc || m2div_rem > 0)
+               pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
+
+       pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
+                               MG_PLL_SSC_TYPE(2) |
+                               MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
+                               MG_PLL_SSC_STEPNUM(ssc_steplog) |
+                               MG_PLL_SSC_FLLEN |
+                               MG_PLL_SSC_STEPSIZE(ssc_stepsize);
+
+       pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
+                                           MG_PLL_TDC_COLDST_IREFINT_EN |
+                                           MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
+                                           MG_PLL_TDC_TDCOVCCORR_EN |
+                                           MG_PLL_TDC_TDCSEL(3);
+
+       pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
+                                MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
+                                MG_PLL_BIAS_BIAS_BONUS(10) |
+                                MG_PLL_BIAS_BIASCAL_EN |
+                                MG_PLL_BIAS_CTRIM(12) |
+                                MG_PLL_BIAS_VREF_RDAC(4) |
+                                MG_PLL_BIAS_IREFTRIM(iref_trim);
+
+       if (refclk_khz == 38400) {
+               pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+               pll_state->mg_pll_bias_mask = 0;
+       } else {
+               pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
+               pll_state->mg_pll_bias_mask = -1U;
+       }
+
+       pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
+       pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+
+       return true;
+}
+
+static struct intel_shared_dpll *
+icl_get_dpll(struct intel_crtc_state *crtc_state,
+            struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       struct intel_digital_port *intel_dig_port;
+       struct intel_shared_dpll *pll;
+       enum port port = encoder->port;
+       enum intel_dpll_id min, max;
+       bool ret;
+
+       if (intel_port_is_combophy(dev_priv, port)) {
+               min = DPLL_ID_ICL_DPLL0;
+               max = DPLL_ID_ICL_DPLL1;
+               ret = icl_calc_dpll_state(crtc_state, encoder);
+       } else if (intel_port_is_tc(dev_priv, port)) {
+               if (encoder->type == INTEL_OUTPUT_DP_MST) {
+                       struct intel_dp_mst_encoder *mst_encoder;
+
+                       mst_encoder = enc_to_mst(&encoder->base);
+                       intel_dig_port = mst_encoder->primary;
+               } else {
+                       intel_dig_port = enc_to_dig_port(&encoder->base);
+               }
+
+               if (intel_dig_port->tc_type == TC_PORT_TBT) {
+                       min = DPLL_ID_ICL_TBTPLL;
+                       max = min;
+                       ret = icl_calc_dpll_state(crtc_state, encoder);
+               } else {
+                       enum tc_port tc_port;
+
+                       tc_port = intel_port_to_tc(dev_priv, port);
+                       min = icl_tc_port_to_pll_id(tc_port);
+                       max = min;
+                       ret = icl_calc_mg_pll_state(crtc_state);
+               }
+       } else {
+               MISSING_CASE(port);
+               return NULL;
+       }
+
+       if (!ret) {
+               DRM_DEBUG_KMS("Could not calculate PLL state.\n");
+               return NULL;
+       }
+
+
+       pll = intel_find_shared_dpll(crtc_state, min, max);
+       if (!pll) {
+               DRM_DEBUG_KMS("No PLL selected\n");
+               return NULL;
+       }
+
+       intel_reference_shared_dpll(pll, crtc_state);
+
+       return pll;
+}
+
+static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
+                               struct intel_shared_dpll *pll,
+                               struct intel_dpll_hw_state *hw_state)
+{
+       const enum intel_dpll_id id = pll->info->id;
+       enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+       intel_wakeref_t wakeref;
+       bool ret = false;
+       u32 val;
+
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_DISPLAY_CORE);
+       if (!wakeref)
+               return false;
+
+       val = I915_READ(MG_PLL_ENABLE(tc_port));
+       if (!(val & PLL_ENABLE))
+               goto out;
+
+       hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
+       hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
+       hw_state->mg_clktop2_coreclkctl1 =
+               I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+       hw_state->mg_clktop2_coreclkctl1 &=
+               MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
+       hw_state->mg_clktop2_hsclkctl =
+               I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+       hw_state->mg_clktop2_hsclkctl &=
+               MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+               MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+               MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+               MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
+       hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
+       hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
+       hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
+       hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
+       hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
+
+       hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
+       hw_state->mg_pll_tdc_coldst_bias =
+               I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+
+       if (dev_priv->cdclk.hw.ref == 38400) {
+               hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+               hw_state->mg_pll_bias_mask = 0;
+       } else {
+               hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
+               hw_state->mg_pll_bias_mask = -1U;
+       }
+
+       hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
+       hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
+
+       ret = true;
+out:
+       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+       return ret;
+}
+
+static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
+                                struct intel_shared_dpll *pll,
+                                struct intel_dpll_hw_state *hw_state,
+                                i915_reg_t enable_reg)
+{
+       const enum intel_dpll_id id = pll->info->id;
+       intel_wakeref_t wakeref;
+       bool ret = false;
+       u32 val;
+
+       wakeref = intel_display_power_get_if_enabled(dev_priv,
+                                                    POWER_DOMAIN_DISPLAY_CORE);
+       if (!wakeref)
+               return false;
+
+       val = I915_READ(enable_reg);
+       if (!(val & PLL_ENABLE))
+               goto out;
+
+       hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
+       hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
+
+       ret = true;
+out:
+       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+       return ret;
+}
+
+static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
+                                  struct intel_shared_dpll *pll,
+                                  struct intel_dpll_hw_state *hw_state)
+{
+       return icl_pll_get_hw_state(dev_priv, pll, hw_state,
+                                   CNL_DPLL_ENABLE(pll->info->id));
+}
+
+static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
+                                struct intel_shared_dpll *pll,
+                                struct intel_dpll_hw_state *hw_state)
+{
+       return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
+}
+
+static void icl_dpll_write(struct drm_i915_private *dev_priv,
+                          struct intel_shared_dpll *pll)
+{
+       struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
+       const enum intel_dpll_id id = pll->info->id;
+
+       I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0);
+       I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1);
+       POSTING_READ(ICL_DPLL_CFGCR1(id));
+}
+
+static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
+                            struct intel_shared_dpll *pll)
+{
+       struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
+       enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
+       u32 val;
+
+       /*
+        * Some of the following registers have reserved fields, so program
+        * these with RMW based on a mask. The mask can be fixed or generated
+        * during the calc/readout phase if the mask depends on some other HW
+        * state like refclk, see icl_calc_mg_pll_state().
+        */
+       val = I915_READ(MG_REFCLKIN_CTL(tc_port));
+       val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+       val |= hw_state->mg_refclkin_ctl;
+       I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
+
+       val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+       val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+       val |= hw_state->mg_clktop2_coreclkctl1;
+       I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
+
+       val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+       val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+                MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+                MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+                MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
+       val |= hw_state->mg_clktop2_hsclkctl;
+       I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
+
+       I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
+       I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
+       I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
+       I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
+       I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
+
+       val = I915_READ(MG_PLL_BIAS(tc_port));
+       val &= ~hw_state->mg_pll_bias_mask;
+       val |= hw_state->mg_pll_bias;
+       I915_WRITE(MG_PLL_BIAS(tc_port), val);
+
+       val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+       val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
+       val |= hw_state->mg_pll_tdc_coldst_bias;
+       I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
+
+       POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+}
+
+static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
+                                struct intel_shared_dpll *pll,
+                                i915_reg_t enable_reg)
+{
+       u32 val;
+
+       val = I915_READ(enable_reg);
+       val |= PLL_POWER_ENABLE;
+       I915_WRITE(enable_reg, val);
+
+       /*
+        * The spec says we need to "wait" but it also says it should be
+        * immediate.
+        */
+       if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
+                                   PLL_POWER_STATE, PLL_POWER_STATE, 1))
+               DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
+}
+
+static void icl_pll_enable(struct drm_i915_private *dev_priv,
+                          struct intel_shared_dpll *pll,
+                          i915_reg_t enable_reg)
+{
+       u32 val;
+
+       val = I915_READ(enable_reg);
+       val |= PLL_ENABLE;
+       I915_WRITE(enable_reg, val);
+
+       /* Timeout is actually 600us. */
+       if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
+                                   PLL_LOCK, PLL_LOCK, 1))
+               DRM_ERROR("PLL %d not locked\n", pll->info->id);
+}
+
+static void combo_pll_enable(struct drm_i915_private *dev_priv,
+                            struct intel_shared_dpll *pll)
+{
+       i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+
+       icl_pll_power_enable(dev_priv, pll, enable_reg);
+
+       icl_dpll_write(dev_priv, pll);
+
+       /*
+        * DVFS pre sequence would be here, but in our driver the cdclk code
+        * paths should already be setting the appropriate voltage, hence we do
+        * nothing here.
+        */
+
+       icl_pll_enable(dev_priv, pll, enable_reg);
+
+       /* DVFS post sequence would be here. See the comment above. */
+}
+
+static void tbt_pll_enable(struct drm_i915_private *dev_priv,
+                          struct intel_shared_dpll *pll)
+{
+       icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
+
+       icl_dpll_write(dev_priv, pll);
+
+       /*
+        * DVFS pre sequence would be here, but in our driver the cdclk code
+        * paths should already be setting the appropriate voltage, hence we do
+        * nothing here.
+        */
+
+       icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
+
+       /* DVFS post sequence would be here. See the comment above. */
+}
+
+static void mg_pll_enable(struct drm_i915_private *dev_priv,
+                         struct intel_shared_dpll *pll)
+{
+       i915_reg_t enable_reg =
+               MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+
+       icl_pll_power_enable(dev_priv, pll, enable_reg);
+
+       icl_mg_pll_write(dev_priv, pll);
+
+       /*
+        * DVFS pre sequence would be here, but in our driver the cdclk code
+        * paths should already be setting the appropriate voltage, hence we do
+        * nothing here.
+        */
+
+       icl_pll_enable(dev_priv, pll, enable_reg);
+
+       /* DVFS post sequence would be here. See the comment above. */
+}
+
+static void icl_pll_disable(struct drm_i915_private *dev_priv,
+                           struct intel_shared_dpll *pll,
+                           i915_reg_t enable_reg)
+{
+       u32 val;
+
+       /* The first steps are done by intel_ddi_post_disable(). */
+
+       /*
+        * DVFS pre sequence would be here, but in our driver the cdclk code
+        * paths should already be setting the appropriate voltage, hence we do
+        * nothign here.
+        */
+
+       val = I915_READ(enable_reg);
+       val &= ~PLL_ENABLE;
+       I915_WRITE(enable_reg, val);
+
+       /* Timeout is actually 1us. */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   enable_reg, PLL_LOCK, 0, 1))
+               DRM_ERROR("PLL %d locked\n", pll->info->id);
+
+       /* DVFS post sequence would be here. See the comment above. */
+
+       val = I915_READ(enable_reg);
+       val &= ~PLL_POWER_ENABLE;
+       I915_WRITE(enable_reg, val);
+
+       /*
+        * The spec says we need to "wait" but it also says it should be
+        * immediate.
+        */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   enable_reg, PLL_POWER_STATE, 0, 1))
+               DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
+}
+
+static void combo_pll_disable(struct drm_i915_private *dev_priv,
+                             struct intel_shared_dpll *pll)
+{
+       icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
+}
+
+static void tbt_pll_disable(struct drm_i915_private *dev_priv,
+                           struct intel_shared_dpll *pll)
+{
+       icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
+}
+
+static void mg_pll_disable(struct drm_i915_private *dev_priv,
+                          struct intel_shared_dpll *pll)
+{
+       i915_reg_t enable_reg =
+               MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+
+       icl_pll_disable(dev_priv, pll, enable_reg);
+}
+
+static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
+                             const struct intel_dpll_hw_state *hw_state)
+{
+       DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
+                     "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
+                     "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
+                     "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
+                     "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
+                     "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
+                     hw_state->cfgcr0, hw_state->cfgcr1,
+                     hw_state->mg_refclkin_ctl,
+                     hw_state->mg_clktop2_coreclkctl1,
+                     hw_state->mg_clktop2_hsclkctl,
+                     hw_state->mg_pll_div0,
+                     hw_state->mg_pll_div1,
+                     hw_state->mg_pll_lf,
+                     hw_state->mg_pll_frac_lock,
+                     hw_state->mg_pll_ssc,
+                     hw_state->mg_pll_bias,
+                     hw_state->mg_pll_tdc_coldst_bias);
+}
+
+static const struct intel_shared_dpll_funcs combo_pll_funcs = {
+       .enable = combo_pll_enable,
+       .disable = combo_pll_disable,
+       .get_hw_state = combo_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
+       .enable = tbt_pll_enable,
+       .disable = tbt_pll_disable,
+       .get_hw_state = tbt_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs mg_pll_funcs = {
+       .enable = mg_pll_enable,
+       .disable = mg_pll_disable,
+       .get_hw_state = mg_pll_get_hw_state,
+};
+
+static const struct dpll_info icl_plls[] = {
+       { "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
+       { "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
+       { "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
+       { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+       { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+       { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+       { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+       { },
+};
+
+static const struct intel_dpll_mgr icl_pll_mgr = {
+       .dpll_info = icl_plls,
+       .get_dpll = icl_get_dpll,
+       .dump_hw_state = icl_dump_hw_state,
+};
+
+static const struct dpll_info ehl_plls[] = {
+       { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+       { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+       { },
+};
+
+static const struct intel_dpll_mgr ehl_pll_mgr = {
+       .dpll_info = ehl_plls,
+       .get_dpll = icl_get_dpll,
+       .dump_hw_state = icl_dump_hw_state,
+};
+
+/**
+ * intel_shared_dpll_init - Initialize shared DPLLs
+ * @dev: drm device
+ *
+ * Initialize shared DPLLs for @dev.
+ */
+void intel_shared_dpll_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       const struct intel_dpll_mgr *dpll_mgr = NULL;
+       const struct dpll_info *dpll_info;
+       int i;
+
+       if (IS_ELKHARTLAKE(dev_priv))
+               dpll_mgr = &ehl_pll_mgr;
+       else if (INTEL_GEN(dev_priv) >= 11)
+               dpll_mgr = &icl_pll_mgr;
+       else if (IS_CANNONLAKE(dev_priv))
+               dpll_mgr = &cnl_pll_mgr;
+       else if (IS_GEN9_BC(dev_priv))
+               dpll_mgr = &skl_pll_mgr;
+       else if (IS_GEN9_LP(dev_priv))
+               dpll_mgr = &bxt_pll_mgr;
+       else if (HAS_DDI(dev_priv))
+               dpll_mgr = &hsw_pll_mgr;
+       else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
+               dpll_mgr = &pch_pll_mgr;
+
+       if (!dpll_mgr) {
+               dev_priv->num_shared_dpll = 0;
+               return;
+       }
+
+       dpll_info = dpll_mgr->dpll_info;
+
+       for (i = 0; dpll_info[i].name; i++) {
+               WARN_ON(i != dpll_info[i].id);
+               dev_priv->shared_dplls[i].info = &dpll_info[i];
+       }
+
+       dev_priv->dpll_mgr = dpll_mgr;
+       dev_priv->num_shared_dpll = i;
+       mutex_init(&dev_priv->dpll_lock);
+
+       BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+}
+
+/**
+ * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
+ * @crtc_state: atomic state for the crtc
+ * @encoder: encoder
+ *
+ * Find an appropriate DPLL for the given CRTC and encoder combination. A
+ * reference from the @crtc_state to the returned pll is registered in the
+ * atomic state. That configuration is made effective by calling
+ * intel_shared_dpll_swap_state(). The reference should be released by calling
+ * intel_release_shared_dpll().
+ *
+ * Returns:
+ * A shared DPLL to be used by @crtc_state and @encoder.
+ */
+struct intel_shared_dpll *
+intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
+                     struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+       const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+
+       if (WARN_ON(!dpll_mgr))
+               return NULL;
+
+       return dpll_mgr->get_dpll(crtc_state, encoder);
+}
+
+/**
+ * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
+ * @dpll: dpll in use by @crtc
+ * @crtc: crtc
+ * @state: atomic state
+ *
+ * This function releases the reference from @crtc to @dpll from the
+ * atomic @state. The new configuration is made effective by calling
+ * intel_shared_dpll_swap_state().
+ */
+void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
+                              struct intel_crtc *crtc,
+                              struct drm_atomic_state *state)
+{
+       struct intel_shared_dpll_state *shared_dpll_state;
+
+       shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
+       shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe);
+}
+
+/**
+ * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
+ * @dev_priv: i915 drm device
+ * @hw_state: hw state to be written to the log
+ *
+ * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
+ */
+void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
+                             const struct intel_dpll_hw_state *hw_state)
+{
+       if (dev_priv->dpll_mgr) {
+               dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
+       } else {
+               /* fallback for platforms that don't use the shared dpll
+                * infrastructure
+                */
+               DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+                             "fp0: 0x%x, fp1: 0x%x\n",
+                             hw_state->dpll,
+                             hw_state->dpll_md,
+                             hw_state->fp0,
+                             hw_state->fp1);
+       }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
new file mode 100644 (file)
index 0000000..d057041
--- /dev/null
@@ -0,0 +1,351 @@
+/*
+ * Copyright © 2012-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DPLL_MGR_H_
+#define _INTEL_DPLL_MGR_H_
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+
+/*FIXME: Move this to a more appropriate place. */
+#define abs_diff(a, b) ({                      \
+       typeof(a) __a = (a);                    \
+       typeof(b) __b = (b);                    \
+       (void) (&__a == &__b);                  \
+       __a > __b ? (__a - __b) : (__b - __a); })
+
+struct drm_atomic_state;
+struct drm_device;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_encoder;
+struct intel_shared_dpll;
+
+/**
+ * enum intel_dpll_id - possible DPLL ids
+ *
+ * Enumeration of possible IDs for a DPLL. Real shared dpll ids must be >= 0.
+ */
+enum intel_dpll_id {
+       /**
+        * @DPLL_ID_PRIVATE: non-shared dpll in use
+        */
+       DPLL_ID_PRIVATE = -1,
+
+       /**
+        * @DPLL_ID_PCH_PLL_A: DPLL A in ILK, SNB and IVB
+        */
+       DPLL_ID_PCH_PLL_A = 0,
+       /**
+        * @DPLL_ID_PCH_PLL_B: DPLL B in ILK, SNB and IVB
+        */
+       DPLL_ID_PCH_PLL_B = 1,
+
+
+       /**
+        * @DPLL_ID_WRPLL1: HSW and BDW WRPLL1
+        */
+       DPLL_ID_WRPLL1 = 0,
+       /**
+        * @DPLL_ID_WRPLL2: HSW and BDW WRPLL2
+        */
+       DPLL_ID_WRPLL2 = 1,
+       /**
+        * @DPLL_ID_SPLL: HSW and BDW SPLL
+        */
+       DPLL_ID_SPLL = 2,
+       /**
+        * @DPLL_ID_LCPLL_810: HSW and BDW 0.81 GHz LCPLL
+        */
+       DPLL_ID_LCPLL_810 = 3,
+       /**
+        * @DPLL_ID_LCPLL_1350: HSW and BDW 1.35 GHz LCPLL
+        */
+       DPLL_ID_LCPLL_1350 = 4,
+       /**
+        * @DPLL_ID_LCPLL_2700: HSW and BDW 2.7 GHz LCPLL
+        */
+       DPLL_ID_LCPLL_2700 = 5,
+
+
+       /**
+        * @DPLL_ID_SKL_DPLL0: SKL and later DPLL0
+        */
+       DPLL_ID_SKL_DPLL0 = 0,
+       /**
+        * @DPLL_ID_SKL_DPLL1: SKL and later DPLL1
+        */
+       DPLL_ID_SKL_DPLL1 = 1,
+       /**
+        * @DPLL_ID_SKL_DPLL2: SKL and later DPLL2
+        */
+       DPLL_ID_SKL_DPLL2 = 2,
+       /**
+        * @DPLL_ID_SKL_DPLL3: SKL and later DPLL3
+        */
+       DPLL_ID_SKL_DPLL3 = 3,
+
+
+       /**
+        * @DPLL_ID_ICL_DPLL0: ICL combo PHY DPLL0
+        */
+       DPLL_ID_ICL_DPLL0 = 0,
+       /**
+        * @DPLL_ID_ICL_DPLL1: ICL combo PHY DPLL1
+        */
+       DPLL_ID_ICL_DPLL1 = 1,
+       /**
+        * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL
+        */
+       DPLL_ID_ICL_TBTPLL = 2,
+       /**
+        * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C)
+        */
+       DPLL_ID_ICL_MGPLL1 = 3,
+       /**
+        * @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
+        */
+       DPLL_ID_ICL_MGPLL2 = 4,
+       /**
+        * @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
+        */
+       DPLL_ID_ICL_MGPLL3 = 5,
+       /**
+        * @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
+        */
+       DPLL_ID_ICL_MGPLL4 = 6,
+};
+#define I915_NUM_PLLS 7
+
+struct intel_dpll_hw_state {
+       /* i9xx, pch plls */
+       u32 dpll;
+       u32 dpll_md;
+       u32 fp0;
+       u32 fp1;
+
+       /* hsw, bdw */
+       u32 wrpll;
+       u32 spll;
+
+       /* skl */
+       /*
+        * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
+        * lower part of ctrl1 and they get shifted into position when writing
+        * the register.  This allows us to easily compare the state to share
+        * the DPLL.
+        */
+       u32 ctrl1;
+       /* HDMI only, 0 when used for DP */
+       u32 cfgcr1, cfgcr2;
+
+       /* cnl */
+       u32 cfgcr0;
+       /* CNL also uses cfgcr1 */
+
+       /* bxt */
+       u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
+
+       /*
+        * ICL uses the following, already defined:
+        * u32 cfgcr0, cfgcr1;
+        */
+       u32 mg_refclkin_ctl;
+       u32 mg_clktop2_coreclkctl1;
+       u32 mg_clktop2_hsclkctl;
+       u32 mg_pll_div0;
+       u32 mg_pll_div1;
+       u32 mg_pll_lf;
+       u32 mg_pll_frac_lock;
+       u32 mg_pll_ssc;
+       u32 mg_pll_bias;
+       u32 mg_pll_tdc_coldst_bias;
+       u32 mg_pll_bias_mask;
+       u32 mg_pll_tdc_coldst_bias_mask;
+};
+
+/**
+ * struct intel_shared_dpll_state - hold the DPLL atomic state
+ *
+ * This structure holds an atomic state for the DPLL, that can represent
+ * either its current state (in struct &intel_shared_dpll) or a desired
+ * future state which would be applied by an atomic mode set (stored in
+ * a struct &intel_atomic_state).
+ *
+ * See also intel_get_shared_dpll() and intel_release_shared_dpll().
+ */
+struct intel_shared_dpll_state {
+       /**
+        * @crtc_mask: mask of CRTC using this DPLL, active or not
+        */
+       unsigned crtc_mask;
+
+       /**
+        * @hw_state: hardware configuration for the DPLL stored in
+        * struct &intel_dpll_hw_state.
+        */
+       struct intel_dpll_hw_state hw_state;
+};
+
+/**
+ * struct intel_shared_dpll_funcs - platform specific hooks for managing DPLLs
+ */
+struct intel_shared_dpll_funcs {
+       /**
+        * @prepare:
+        *
+        * Optional hook to perform operations prior to enabling the PLL.
+        * Called from intel_prepare_shared_dpll() function unless the PLL
+        * is already enabled.
+        */
+       void (*prepare)(struct drm_i915_private *dev_priv,
+                       struct intel_shared_dpll *pll);
+
+       /**
+        * @enable:
+        *
+        * Hook for enabling the pll, called from intel_enable_shared_dpll()
+        * if the pll is not already enabled.
+        */
+       void (*enable)(struct drm_i915_private *dev_priv,
+                      struct intel_shared_dpll *pll);
+
+       /**
+        * @disable:
+        *
+        * Hook for disabling the pll, called from intel_disable_shared_dpll()
+        * only when it is safe to disable the pll, i.e., there are no more
+        * tracked users for it.
+        */
+       void (*disable)(struct drm_i915_private *dev_priv,
+                       struct intel_shared_dpll *pll);
+
+       /**
+        * @get_hw_state:
+        *
+        * Hook for reading the values currently programmed to the DPLL
+        * registers. This is used for initial hw state readout and state
+        * verification after a mode set.
+        */
+       bool (*get_hw_state)(struct drm_i915_private *dev_priv,
+                            struct intel_shared_dpll *pll,
+                            struct intel_dpll_hw_state *hw_state);
+};
+
+/**
+ * struct dpll_info - display PLL platform specific info
+ */
+struct dpll_info {
+       /**
+        * @name: DPLL name; used for logging
+        */
+       const char *name;
+
+       /**
+        * @funcs: platform specific hooks
+        */
+       const struct intel_shared_dpll_funcs *funcs;
+
+       /**
+        * @id: unique indentifier for this DPLL; should match the index in the
+        * dev_priv->shared_dplls array
+        */
+       enum intel_dpll_id id;
+
+#define INTEL_DPLL_ALWAYS_ON   (1 << 0)
+       /**
+        * @flags:
+        *
+        * INTEL_DPLL_ALWAYS_ON
+        *     Inform the state checker that the DPLL is kept enabled even if
+        *     not in use by any CRTC.
+        */
+       u32 flags;
+};
+
+/**
+ * struct intel_shared_dpll - display PLL with tracked state and users
+ */
+struct intel_shared_dpll {
+       /**
+        * @state:
+        *
+        * Store the state for the pll, including its hw state
+        * and CRTCs using it.
+        */
+       struct intel_shared_dpll_state state;
+
+       /**
+        * @active_mask: mask of active CRTCs (i.e. DPMS on) using this DPLL
+        */
+       unsigned active_mask;
+
+       /**
+        * @on: is the PLL actually active? Disabled during modeset
+        */
+       bool on;
+
+       /**
+        * @info: platform specific info
+        */
+       const struct dpll_info *info;
+};
+
+#define SKL_DPLL0 0
+#define SKL_DPLL1 1
+#define SKL_DPLL2 2
+#define SKL_DPLL3 3
+
+/* shared dpll functions */
+struct intel_shared_dpll *
+intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
+                           enum intel_dpll_id id);
+enum intel_dpll_id
+intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
+                        struct intel_shared_dpll *pll);
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+                       struct intel_shared_dpll *pll,
+                       bool state);
+#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
+#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state,
+                                               struct intel_encoder *encoder);
+void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
+                              struct intel_crtc *crtc,
+                              struct drm_atomic_state *state);
+void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
+void intel_shared_dpll_init(struct drm_device *dev);
+
+void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
+                             const struct intel_dpll_hw_state *hw_state);
+int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
+enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
+bool intel_dpll_is_combophy(enum intel_dpll_id id);
+
+#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
new file mode 100644 (file)
index 0000000..d36cada
--- /dev/null
@@ -0,0 +1,1345 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Frame Buffer Compression (FBC)
+ *
+ * FBC tries to save memory bandwidth (and so power consumption) by
+ * compressing the amount of memory used by the display. It is total
+ * transparent to user space and completely handled in the kernel.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns. It comes from keeping the memory footprint small
+ * and having fewer memory pages opened and accessed for refreshing the display.
+ *
+ * i915 is responsible to reserve stolen memory for FBC and configure its
+ * offset on proper registers. The hardware takes care of all
+ * compress/decompress. However there are many known cases where we have to
+ * forcibly disable it to allow proper screen updates.
+ */
+
+#include <drm/drm_fourcc.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_fbc.h"
+#include "intel_frontbuffer.h"
+
+static inline bool fbc_supported(struct drm_i915_private *dev_priv)
+{
+       return HAS_FBC(dev_priv);
+}
+
+static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
+{
+       return INTEL_GEN(dev_priv) <= 3;
+}
+
+/*
+ * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
+ * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
+ * origin so the x and y offsets can actually fit the registers. As a
+ * consequence, the fence doesn't really start exactly at the display plane
+ * address we program because it starts at the real start of the buffer, so we
+ * have to take this into consideration here.
+ */
+static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
+{
+       return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
+}
+
+/*
+ * For SKL+, the plane source size used by the hardware is based on the value we
+ * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
+ * we wrote to PIPESRC.
+ */
+static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
+                                           int *width, int *height)
+{
+       if (width)
+               *width = cache->plane.src_w;
+       if (height)
+               *height = cache->plane.src_h;
+}
+
+static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
+                                       struct intel_fbc_state_cache *cache)
+{
+       int lines;
+
+       intel_fbc_get_plane_source_size(cache, NULL, &lines);
+       if (IS_GEN(dev_priv, 7))
+               lines = min(lines, 2048);
+       else if (INTEL_GEN(dev_priv) >= 8)
+               lines = min(lines, 2560);
+
+       /* Hardware needs the full buffer stride, not just the active area. */
+       return lines * cache->fb.stride;
+}
+
+static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
+{
+       u32 fbc_ctl;
+
+       /* Disable compression */
+       fbc_ctl = I915_READ(FBC_CONTROL);
+       if ((fbc_ctl & FBC_CTL_EN) == 0)
+               return;
+
+       fbc_ctl &= ~FBC_CTL_EN;
+       I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+       /* Wait for compressing bit to clear */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   FBC_STATUS, FBC_STAT_COMPRESSING, 0,
+                                   10)) {
+               DRM_DEBUG_KMS("FBC idle timed out\n");
+               return;
+       }
+}
+
+static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+       int cfb_pitch;
+       int i;
+       u32 fbc_ctl;
+
+       /* Note: fbc.threshold == 1 for i8xx */
+       cfb_pitch = params->cfb_size / FBC_LL_SIZE;
+       if (params->fb.stride < cfb_pitch)
+               cfb_pitch = params->fb.stride;
+
+       /* FBC_CTL wants 32B or 64B units */
+       if (IS_GEN(dev_priv, 2))
+               cfb_pitch = (cfb_pitch / 32) - 1;
+       else
+               cfb_pitch = (cfb_pitch / 64) - 1;
+
+       /* Clear old tags */
+       for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+               I915_WRITE(FBC_TAG(i), 0);
+
+       if (IS_GEN(dev_priv, 4)) {
+               u32 fbc_ctl2;
+
+               /* Set it up... */
+               fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+               fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
+               I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+               I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
+       }
+
+       /* enable it... */
+       fbc_ctl = I915_READ(FBC_CONTROL);
+       fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
+       fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
+       if (IS_I945GM(dev_priv))
+               fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+       fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+       fbc_ctl |= params->vma->fence->id;
+       I915_WRITE(FBC_CONTROL, fbc_ctl);
+}
+
+static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
+{
+       return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+       u32 dpfc_ctl;
+
+       dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
+       if (params->fb.format->cpp[0] == 2)
+               dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+       else
+               dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+
+       if (params->flags & PLANE_HAS_FENCE) {
+               dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
+               I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+       } else {
+               I915_WRITE(DPFC_FENCE_YOFF, 0);
+       }
+
+       /* enable it... */
+       I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+}
+
+static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
+{
+       u32 dpfc_ctl;
+
+       /* Disable compression */
+       dpfc_ctl = I915_READ(DPFC_CONTROL);
+       if (dpfc_ctl & DPFC_CTL_EN) {
+               dpfc_ctl &= ~DPFC_CTL_EN;
+               I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+       }
+}
+
+static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
+{
+       return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+/* This function forces a CFB recompression through the nuke operation. */
+static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
+       POSTING_READ(MSG_FBC_REND_STATE);
+}
+
+static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+       u32 dpfc_ctl;
+       int threshold = dev_priv->fbc.threshold;
+
+       dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
+       if (params->fb.format->cpp[0] == 2)
+               threshold++;
+
+       switch (threshold) {
+       case 4:
+       case 3:
+               dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+               break;
+       case 2:
+               dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+               break;
+       case 1:
+               dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+               break;
+       }
+
+       if (params->flags & PLANE_HAS_FENCE) {
+               dpfc_ctl |= DPFC_CTL_FENCE_EN;
+               if (IS_GEN(dev_priv, 5))
+                       dpfc_ctl |= params->vma->fence->id;
+               if (IS_GEN(dev_priv, 6)) {
+                       I915_WRITE(SNB_DPFC_CTL_SA,
+                                  SNB_CPU_FENCE_ENABLE |
+                                  params->vma->fence->id);
+                       I915_WRITE(DPFC_CPU_FENCE_OFFSET,
+                                  params->crtc.fence_y_offset);
+               }
+       } else {
+               if (IS_GEN(dev_priv, 6)) {
+                       I915_WRITE(SNB_DPFC_CTL_SA, 0);
+                       I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+               }
+       }
+
+       I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+       I915_WRITE(ILK_FBC_RT_BASE,
+                  i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
+       /* enable it... */
+       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+       intel_fbc_recompress(dev_priv);
+}
+
+static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
+{
+       u32 dpfc_ctl;
+
+       /* Disable compression */
+       dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+       if (dpfc_ctl & DPFC_CTL_EN) {
+               dpfc_ctl &= ~DPFC_CTL_EN;
+               I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+       }
+}
+
+static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
+{
+       return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+       u32 dpfc_ctl;
+       int threshold = dev_priv->fbc.threshold;
+
+       /* Display WA #0529: skl, kbl, bxt. */
+       if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
+               u32 val = I915_READ(CHICKEN_MISC_4);
+
+               val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
+
+               if (i915_gem_object_get_tiling(params->vma->obj) !=
+                   I915_TILING_X)
+                       val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
+
+               I915_WRITE(CHICKEN_MISC_4, val);
+       }
+
+       dpfc_ctl = 0;
+       if (IS_IVYBRIDGE(dev_priv))
+               dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
+
+       if (params->fb.format->cpp[0] == 2)
+               threshold++;
+
+       switch (threshold) {
+       case 4:
+       case 3:
+               dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+               break;
+       case 2:
+               dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+               break;
+       case 1:
+               dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+               break;
+       }
+
+       if (params->flags & PLANE_HAS_FENCE) {
+               dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
+               I915_WRITE(SNB_DPFC_CTL_SA,
+                          SNB_CPU_FENCE_ENABLE |
+                          params->vma->fence->id);
+               I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
+       } else {
+               I915_WRITE(SNB_DPFC_CTL_SA,0);
+               I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+       }
+
+       if (dev_priv->fbc.false_color)
+               dpfc_ctl |= FBC_CTL_FALSE_COLOR;
+
+       if (IS_IVYBRIDGE(dev_priv)) {
+               /* WaFbcAsynchFlipDisableFbcQueue:ivb */
+               I915_WRITE(ILK_DISPLAY_CHICKEN1,
+                          I915_READ(ILK_DISPLAY_CHICKEN1) |
+                          ILK_FBCQ_DIS);
+       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+               /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+               I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
+                          I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
+                          HSW_FBCQ_DIS);
+       }
+
+       if (IS_GEN(dev_priv, 11))
+               /* Wa_1409120013:icl,ehl */
+               I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+
+       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+       intel_fbc_recompress(dev_priv);
+}
+
+static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
+{
+       if (INTEL_GEN(dev_priv) >= 5)
+               return ilk_fbc_is_active(dev_priv);
+       else if (IS_GM45(dev_priv))
+               return g4x_fbc_is_active(dev_priv);
+       else
+               return i8xx_fbc_is_active(dev_priv);
+}
+
+static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       fbc->active = true;
+
+       if (INTEL_GEN(dev_priv) >= 7)
+               gen7_fbc_activate(dev_priv);
+       else if (INTEL_GEN(dev_priv) >= 5)
+               ilk_fbc_activate(dev_priv);
+       else if (IS_GM45(dev_priv))
+               g4x_fbc_activate(dev_priv);
+       else
+               i8xx_fbc_activate(dev_priv);
+}
+
+static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       fbc->active = false;
+
+       if (INTEL_GEN(dev_priv) >= 5)
+               ilk_fbc_deactivate(dev_priv);
+       else if (IS_GM45(dev_priv))
+               g4x_fbc_deactivate(dev_priv);
+       else
+               i8xx_fbc_deactivate(dev_priv);
+}
+
+/**
+ * intel_fbc_is_active - Is FBC active?
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to verify the current state of FBC.
+ *
+ * FIXME: This should be tracked in the plane config eventually
+ * instead of queried at runtime for most callers.
+ */
+bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
+{
+       return dev_priv->fbc.active;
+}
+
+static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
+                                const char *reason)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       WARN_ON(!mutex_is_locked(&fbc->lock));
+
+       if (fbc->active)
+               intel_fbc_hw_deactivate(dev_priv);
+
+       fbc->no_fbc_reason = reason;
+}
+
+static bool multiple_pipes_ok(struct intel_crtc *crtc,
+                             struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       enum pipe pipe = crtc->pipe;
+
+       /* Don't even bother tracking anything we don't need. */
+       if (!no_fbc_on_multiple_pipes(dev_priv))
+               return true;
+
+       if (plane_state->base.visible)
+               fbc->visible_pipes_mask |= (1 << pipe);
+       else
+               fbc->visible_pipes_mask &= ~(1 << pipe);
+
+       return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
+}
+
+static int find_compression_threshold(struct drm_i915_private *dev_priv,
+                                     struct drm_mm_node *node,
+                                     int size,
+                                     int fb_cpp)
+{
+       int compression_threshold = 1;
+       int ret;
+       u64 end;
+
+       /* The FBC hardware for BDW/SKL doesn't have access to the stolen
+        * reserved range size, so it always assumes the maximum (8mb) is used.
+        * If we enable FBC using a CFB on that memory range we'll get FIFO
+        * underruns, even if that range is not reserved by the BIOS. */
+       if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
+               end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
+       else
+               end = U64_MAX;
+
+       /* HACK: This code depends on what we will do in *_enable_fbc. If that
+        * code changes, this code needs to change as well.
+        *
+        * The enable_fbc code will attempt to use one of our 2 compression
+        * thresholds, therefore, in that case, we only have 1 resort.
+        */
+
+       /* Try to over-allocate to reduce reallocations and fragmentation. */
+       ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
+                                                  4096, 0, end);
+       if (ret == 0)
+               return compression_threshold;
+
+again:
+       /* HW's ability to limit the CFB is 1:4 */
+       if (compression_threshold > 4 ||
+           (fb_cpp == 2 && compression_threshold == 2))
+               return 0;
+
+       ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
+                                                  4096, 0, end);
+       if (ret && INTEL_GEN(dev_priv) <= 4) {
+               return 0;
+       } else if (ret) {
+               compression_threshold <<= 1;
+               goto again;
+       } else {
+               return compression_threshold;
+       }
+}
+
+static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct drm_mm_node *uninitialized_var(compressed_llb);
+       int size, fb_cpp, ret;
+
+       WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
+
+       size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
+       fb_cpp = fbc->state_cache.fb.format->cpp[0];
+
+       ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
+                                        size, fb_cpp);
+       if (!ret)
+               goto err_llb;
+       else if (ret > 1) {
+               DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+       }
+
+       fbc->threshold = ret;
+
+       if (INTEL_GEN(dev_priv) >= 5)
+               I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
+       else if (IS_GM45(dev_priv)) {
+               I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
+       } else {
+               compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
+               if (!compressed_llb)
+                       goto err_fb;
+
+               ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
+                                                 4096, 4096);
+               if (ret)
+                       goto err_fb;
+
+               fbc->compressed_llb = compressed_llb;
+
+               GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
+                                            fbc->compressed_fb.start,
+                                            U32_MAX));
+               GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
+                                            fbc->compressed_llb->start,
+                                            U32_MAX));
+               I915_WRITE(FBC_CFB_BASE,
+                          dev_priv->dsm.start + fbc->compressed_fb.start);
+               I915_WRITE(FBC_LL_BASE,
+                          dev_priv->dsm.start + compressed_llb->start);
+       }
+
+       DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
+                     fbc->compressed_fb.size, fbc->threshold);
+
+       return 0;
+
+err_fb:
+       kfree(compressed_llb);
+       i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+err_llb:
+       if (drm_mm_initialized(&dev_priv->mm.stolen))
+               pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+       return -ENOSPC;
+}
+
+static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       if (drm_mm_node_allocated(&fbc->compressed_fb))
+               i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+
+       if (fbc->compressed_llb) {
+               i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
+               kfree(fbc->compressed_llb);
+       }
+}
+
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       if (!fbc_supported(dev_priv))
+               return;
+
+       mutex_lock(&fbc->lock);
+       __intel_fbc_cleanup_cfb(dev_priv);
+       mutex_unlock(&fbc->lock);
+}
+
+static bool stride_is_valid(struct drm_i915_private *dev_priv,
+                           unsigned int stride)
+{
+       /* This should have been caught earlier. */
+       if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
+               return false;
+
+       /* Below are the additional FBC restrictions. */
+       if (stride < 512)
+               return false;
+
+       if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
+               return stride == 4096 || stride == 8192;
+
+       if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
+               return false;
+
+       if (stride > 16384)
+               return false;
+
+       return true;
+}
+
+static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
+                                 u32 pixel_format)
+{
+       switch (pixel_format) {
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+               return true;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_RGB565:
+               /* 16bpp not supported on gen2 */
+               if (IS_GEN(dev_priv, 2))
+                       return false;
+               /* WaFbcOnly1to1Ratio:ctg */
+               if (IS_G4X(dev_priv))
+                       return false;
+               return true;
+       default:
+               return false;
+       }
+}
+
+/*
+ * For some reason, the hardware tracking starts looking at whatever we
+ * programmed as the display plane base address register. It does not look at
+ * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
+ * variables instead of just looking at the pipe/plane size.
+ */
+static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       unsigned int effective_w, effective_h, max_w, max_h;
+
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+               max_w = 5120;
+               max_h = 4096;
+       } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
+               max_w = 4096;
+               max_h = 4096;
+       } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
+               max_w = 4096;
+               max_h = 2048;
+       } else {
+               max_w = 2048;
+               max_h = 1536;
+       }
+
+       intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
+                                       &effective_h);
+       effective_w += fbc->state_cache.plane.adjusted_x;
+       effective_h += fbc->state_cache.plane.adjusted_y;
+
+       return effective_w <= max_w && effective_h <= max_h;
+}
+
+static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
+                                        struct intel_crtc_state *crtc_state,
+                                        struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct intel_fbc_state_cache *cache = &fbc->state_cache;
+       struct drm_framebuffer *fb = plane_state->base.fb;
+
+       cache->vma = NULL;
+       cache->flags = 0;
+
+       cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
+
+       cache->plane.rotation = plane_state->base.rotation;
+       /*
+        * Src coordinates are already rotated by 270 degrees for
+        * the 90/270 degree plane rotation cases (to match the
+        * GTT mapping), hence no need to account for rotation here.
+        */
+       cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
+       cache->plane.visible = plane_state->base.visible;
+       cache->plane.adjusted_x = plane_state->color_plane[0].x;
+       cache->plane.adjusted_y = plane_state->color_plane[0].y;
+       cache->plane.y = plane_state->base.src.y1 >> 16;
+
+       cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
+
+       if (!cache->plane.visible)
+               return;
+
+       cache->fb.format = fb->format;
+       cache->fb.stride = fb->pitches[0];
+
+       cache->vma = plane_state->vma;
+       cache->flags = plane_state->flags;
+       if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
+               cache->flags &= ~PLANE_HAS_FENCE;
+}
+
+static bool intel_fbc_can_activate(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct intel_fbc_state_cache *cache = &fbc->state_cache;
+
+       /* We don't need to use a state cache here since this information is
+        * global for all CRTC.
+        */
+       if (fbc->underrun_detected) {
+               fbc->no_fbc_reason = "underrun detected";
+               return false;
+       }
+
+       if (!cache->vma) {
+               fbc->no_fbc_reason = "primary plane not visible";
+               return false;
+       }
+
+       if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
+               fbc->no_fbc_reason = "incompatible mode";
+               return false;
+       }
+
+       if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
+               fbc->no_fbc_reason = "mode too large for compression";
+               return false;
+       }
+
+       /* The use of a CPU fence is mandatory in order to detect writes
+        * by the CPU to the scanout and trigger updates to the FBC.
+        *
+        * Note that is possible for a tiled surface to be unmappable (and
+        * so have no fence associated with it) due to aperture constaints
+        * at the time of pinning.
+        *
+        * FIXME with 90/270 degree rotation we should use the fence on
+        * the normal GTT view (the rotated view doesn't even have a
+        * fence). Would need changes to the FBC fence Y offset as well.
+        * For now this will effecively disable FBC with 90/270 degree
+        * rotation.
+        */
+       if (!(cache->flags & PLANE_HAS_FENCE)) {
+               fbc->no_fbc_reason = "framebuffer not tiled or fenced";
+               return false;
+       }
+       if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
+           cache->plane.rotation != DRM_MODE_ROTATE_0) {
+               fbc->no_fbc_reason = "rotation unsupported";
+               return false;
+       }
+
+       if (!stride_is_valid(dev_priv, cache->fb.stride)) {
+               fbc->no_fbc_reason = "framebuffer stride not supported";
+               return false;
+       }
+
+       if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
+               fbc->no_fbc_reason = "pixel format is invalid";
+               return false;
+       }
+
+       if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
+           cache->fb.format->has_alpha) {
+               fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
+               return false;
+       }
+
+       /* WaFbcExceedCdClockThreshold:hsw,bdw */
+       if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
+           cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
+               fbc->no_fbc_reason = "pixel rate is too big";
+               return false;
+       }
+
+       /* It is possible for the required CFB size change without a
+        * crtc->disable + crtc->enable since it is possible to change the
+        * stride without triggering a full modeset. Since we try to
+        * over-allocate the CFB, there's a chance we may keep FBC enabled even
+        * if this happens, but if we exceed the current CFB size we'll have to
+        * disable FBC. Notice that it would be possible to disable FBC, wait
+        * for a frame, free the stolen node, then try to reenable FBC in case
+        * we didn't get any invalidate/deactivate calls, but this would require
+        * a lot of tracking just for a specific case. If we conclude it's an
+        * important case, we can implement it later. */
+       if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
+           fbc->compressed_fb.size * fbc->threshold) {
+               fbc->no_fbc_reason = "CFB requirements changed";
+               return false;
+       }
+
+       /*
+        * Work around a problem on GEN9+ HW, where enabling FBC on a plane
+        * having a Y offset that isn't divisible by 4 causes FIFO underrun
+        * and screen flicker.
+        */
+       if (IS_GEN_RANGE(dev_priv, 9, 10) &&
+           (fbc->state_cache.plane.adjusted_y & 3)) {
+               fbc->no_fbc_reason = "plane Y offset is misaligned";
+               return false;
+       }
+
+       return true;
+}
+
+static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       if (intel_vgpu_active(dev_priv)) {
+               fbc->no_fbc_reason = "VGPU is active";
+               return false;
+       }
+
+       if (!i915_modparams.enable_fbc) {
+               fbc->no_fbc_reason = "disabled per module param or by default";
+               return false;
+       }
+
+       if (fbc->underrun_detected) {
+               fbc->no_fbc_reason = "underrun detected";
+               return false;
+       }
+
+       return true;
+}
+
+static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
+                                    struct intel_fbc_reg_params *params)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct intel_fbc_state_cache *cache = &fbc->state_cache;
+
+       /* Since all our fields are integer types, use memset here so the
+        * comparison function can rely on memcmp because the padding will be
+        * zero. */
+       memset(params, 0, sizeof(*params));
+
+       params->vma = cache->vma;
+       params->flags = cache->flags;
+
+       params->crtc.pipe = crtc->pipe;
+       params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
+       params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
+
+       params->fb.format = cache->fb.format;
+       params->fb.stride = cache->fb.stride;
+
+       params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
+
+       if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
+               params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
+                                               32 * fbc->threshold) * 8;
+}
+
+void intel_fbc_pre_update(struct intel_crtc *crtc,
+                         struct intel_crtc_state *crtc_state,
+                         struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       const char *reason = "update pending";
+
+       if (!fbc_supported(dev_priv))
+               return;
+
+       mutex_lock(&fbc->lock);
+
+       if (!multiple_pipes_ok(crtc, plane_state)) {
+               reason = "more than one pipe active";
+               goto deactivate;
+       }
+
+       if (!fbc->enabled || fbc->crtc != crtc)
+               goto unlock;
+
+       intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
+       fbc->flip_pending = true;
+
+deactivate:
+       intel_fbc_deactivate(dev_priv, reason);
+unlock:
+       mutex_unlock(&fbc->lock);
+}
+
+/**
+ * __intel_fbc_disable - disable FBC
+ * @dev_priv: i915 device instance
+ *
+ * This is the low level function that actually disables FBC. Callers should
+ * grab the FBC lock.
+ */
+static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct intel_crtc *crtc = fbc->crtc;
+
+       WARN_ON(!mutex_is_locked(&fbc->lock));
+       WARN_ON(!fbc->enabled);
+       WARN_ON(fbc->active);
+
+       DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+
+       __intel_fbc_cleanup_cfb(dev_priv);
+
+       fbc->enabled = false;
+       fbc->crtc = NULL;
+}
+
+static void __intel_fbc_post_update(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       WARN_ON(!mutex_is_locked(&fbc->lock));
+
+       if (!fbc->enabled || fbc->crtc != crtc)
+               return;
+
+       fbc->flip_pending = false;
+       WARN_ON(fbc->active);
+
+       if (!i915_modparams.enable_fbc) {
+               intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
+               __intel_fbc_disable(dev_priv);
+
+               return;
+       }
+
+       intel_fbc_get_reg_params(crtc, &fbc->params);
+
+       if (!intel_fbc_can_activate(crtc))
+               return;
+
+       if (!fbc->busy_bits) {
+               intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
+               intel_fbc_hw_activate(dev_priv);
+       } else
+               intel_fbc_deactivate(dev_priv, "frontbuffer write");
+}
+
+void intel_fbc_post_update(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       if (!fbc_supported(dev_priv))
+               return;
+
+       mutex_lock(&fbc->lock);
+       __intel_fbc_post_update(crtc);
+       mutex_unlock(&fbc->lock);
+}
+
+static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
+{
+       if (fbc->enabled)
+               return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
+       else
+               return fbc->possible_framebuffer_bits;
+}
+
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+                         unsigned int frontbuffer_bits,
+                         enum fb_op_origin origin)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       if (!fbc_supported(dev_priv))
+               return;
+
+       if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
+               return;
+
+       mutex_lock(&fbc->lock);
+
+       fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
+
+       if (fbc->enabled && fbc->busy_bits)
+               intel_fbc_deactivate(dev_priv, "frontbuffer write");
+
+       mutex_unlock(&fbc->lock);
+}
+
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+                    unsigned int frontbuffer_bits, enum fb_op_origin origin)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       if (!fbc_supported(dev_priv))
+               return;
+
+       mutex_lock(&fbc->lock);
+
+       fbc->busy_bits &= ~frontbuffer_bits;
+
+       if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
+               goto out;
+
+       if (!fbc->busy_bits && fbc->enabled &&
+           (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
+               if (fbc->active)
+                       intel_fbc_recompress(dev_priv);
+               else if (!fbc->flip_pending)
+                       __intel_fbc_post_update(fbc->crtc);
+       }
+
+out:
+       mutex_unlock(&fbc->lock);
+}
+
+/**
+ * intel_fbc_choose_crtc - select a CRTC to enable FBC on
+ * @dev_priv: i915 device instance
+ * @state: the atomic state structure
+ *
+ * This function looks at the proposed state for CRTCs and planes, then chooses
+ * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
+ * true.
+ *
+ * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
+ * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
+ */
+void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
+                          struct intel_atomic_state *state)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct intel_plane *plane;
+       struct intel_plane_state *plane_state;
+       bool crtc_chosen = false;
+       int i;
+
+       mutex_lock(&fbc->lock);
+
+       /* Does this atomic commit involve the CRTC currently tied to FBC? */
+       if (fbc->crtc &&
+           !intel_atomic_get_new_crtc_state(state, fbc->crtc))
+               goto out;
+
+       if (!intel_fbc_can_enable(dev_priv))
+               goto out;
+
+       /* Simply choose the first CRTC that is compatible and has a visible
+        * plane. We could go for fancier schemes such as checking the plane
+        * size, but this would just affect the few platforms that don't tie FBC
+        * to pipe or plane A. */
+       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+               struct intel_crtc_state *crtc_state;
+               struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
+
+               if (!plane->has_fbc)
+                       continue;
+
+               if (!plane_state->base.visible)
+                       continue;
+
+               crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+               crtc_state->enable_fbc = true;
+               crtc_chosen = true;
+               break;
+       }
+
+       if (!crtc_chosen)
+               fbc->no_fbc_reason = "no suitable CRTC for FBC";
+
+out:
+       mutex_unlock(&fbc->lock);
+}
+
+/**
+ * intel_fbc_enable: tries to enable FBC on the CRTC
+ * @crtc: the CRTC
+ * @crtc_state: corresponding &drm_crtc_state for @crtc
+ * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
+ *
+ * This function checks if the given CRTC was chosen for FBC, then enables it if
+ * possible. Notice that it doesn't activate FBC. It is valid to call
+ * intel_fbc_enable multiple times for the same pipe without an
+ * intel_fbc_disable in the middle, as long as it is deactivated.
+ */
+void intel_fbc_enable(struct intel_crtc *crtc,
+                     struct intel_crtc_state *crtc_state,
+                     struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       if (!fbc_supported(dev_priv))
+               return;
+
+       mutex_lock(&fbc->lock);
+
+       if (fbc->enabled) {
+               WARN_ON(fbc->crtc == NULL);
+               if (fbc->crtc == crtc) {
+                       WARN_ON(!crtc_state->enable_fbc);
+                       WARN_ON(fbc->active);
+               }
+               goto out;
+       }
+
+       if (!crtc_state->enable_fbc)
+               goto out;
+
+       WARN_ON(fbc->active);
+       WARN_ON(fbc->crtc != NULL);
+
+       intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
+       if (intel_fbc_alloc_cfb(crtc)) {
+               fbc->no_fbc_reason = "not enough stolen memory";
+               goto out;
+       }
+
+       DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+       fbc->no_fbc_reason = "FBC enabled but not active yet\n";
+
+       fbc->enabled = true;
+       fbc->crtc = crtc;
+out:
+       mutex_unlock(&fbc->lock);
+}
+
+/**
+ * intel_fbc_disable - disable FBC if it's associated with crtc
+ * @crtc: the CRTC
+ *
+ * This function disables FBC if it's associated with the provided CRTC.
+ */
+void intel_fbc_disable(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       if (!fbc_supported(dev_priv))
+               return;
+
+       mutex_lock(&fbc->lock);
+       if (fbc->crtc == crtc)
+               __intel_fbc_disable(dev_priv);
+       mutex_unlock(&fbc->lock);
+}
+
+/**
+ * intel_fbc_global_disable - globally disable FBC
+ * @dev_priv: i915 device instance
+ *
+ * This function disables FBC regardless of which CRTC is associated with it.
+ */
+void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       if (!fbc_supported(dev_priv))
+               return;
+
+       mutex_lock(&fbc->lock);
+       if (fbc->enabled) {
+               WARN_ON(fbc->crtc->active);
+               __intel_fbc_disable(dev_priv);
+       }
+       mutex_unlock(&fbc->lock);
+}
+
+static void intel_fbc_underrun_work_fn(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, struct drm_i915_private, fbc.underrun_work);
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       mutex_lock(&fbc->lock);
+
+       /* Maybe we were scheduled twice. */
+       if (fbc->underrun_detected || !fbc->enabled)
+               goto out;
+
+       DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
+       fbc->underrun_detected = true;
+
+       intel_fbc_deactivate(dev_priv, "FIFO underrun");
+out:
+       mutex_unlock(&fbc->lock);
+}
+
+/*
+ * intel_fbc_reset_underrun - reset FBC fifo underrun status.
+ * @dev_priv: i915 device instance
+ *
+ * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
+ * want to re-enable FBC after an underrun to increase test coverage.
+ */
+int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
+{
+       int ret;
+
+       cancel_work_sync(&dev_priv->fbc.underrun_work);
+
+       ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
+       if (ret)
+               return ret;
+
+       if (dev_priv->fbc.underrun_detected) {
+               DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
+               dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
+       }
+
+       dev_priv->fbc.underrun_detected = false;
+       mutex_unlock(&dev_priv->fbc.lock);
+
+       return 0;
+}
+
+/**
+ * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
+ * @dev_priv: i915 device instance
+ *
+ * Without FBC, most underruns are harmless and don't really cause too many
+ * problems, except for an annoying message on dmesg. With FBC, underruns can
+ * become black screens or even worse, especially when paired with bad
+ * watermarks. So in order for us to be on the safe side, completely disable FBC
+ * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
+ * already suggests that watermarks may be bad, so try to be as safe as
+ * possible.
+ *
+ * This function is called from the IRQ handler.
+ */
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       if (!fbc_supported(dev_priv))
+               return;
+
+       /* There's no guarantee that underrun_detected won't be set to true
+        * right after this check and before the work is scheduled, but that's
+        * not a problem since we'll check it again under the work function
+        * while FBC is locked. This check here is just to prevent us from
+        * unnecessarily scheduling the work, and it relies on the fact that we
+        * never switch underrun_detect back to false after it's true. */
+       if (READ_ONCE(fbc->underrun_detected))
+               return;
+
+       schedule_work(&fbc->underrun_work);
+}
+
+/**
+ * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
+ * @dev_priv: i915 device instance
+ *
+ * The FBC code needs to track CRTC visibility since the older platforms can't
+ * have FBC enabled while multiple pipes are used. This function does the
+ * initial setup at driver load to make sure FBC is matching the real hardware.
+ */
+void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
+{
+       struct intel_crtc *crtc;
+
+       /* Don't even bother tracking anything if we don't need. */
+       if (!no_fbc_on_multiple_pipes(dev_priv))
+               return;
+
+       for_each_intel_crtc(&dev_priv->drm, crtc)
+               if (intel_crtc_active(crtc) &&
+                   crtc->base.primary->state->visible)
+                       dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
+}
+
+/*
+ * The DDX driver changes its behavior depending on the value it reads from
+ * i915.enable_fbc, so sanitize it by translating the default value into either
+ * 0 or 1 in order to allow it to know what's going on.
+ *
+ * Notice that this is done at driver initialization and we still allow user
+ * space to change the value during runtime without sanitizing it again. IGT
+ * relies on being able to change i915.enable_fbc at runtime.
+ */
+static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
+{
+       if (i915_modparams.enable_fbc >= 0)
+               return !!i915_modparams.enable_fbc;
+
+       if (!HAS_FBC(dev_priv))
+               return 0;
+
+       /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
+       if (IS_GEMINILAKE(dev_priv))
+               return 0;
+
+       if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
+               return 1;
+
+       return 0;
+}
+
+static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
+{
+       /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
+       if (intel_vtd_active() &&
+           (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
+               DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
+               return true;
+       }
+
+       return false;
+}
+
+/**
+ * intel_fbc_init - Initialize FBC
+ * @dev_priv: the i915 device
+ *
+ * This function might be called during PM init process.
+ */
+void intel_fbc_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
+       mutex_init(&fbc->lock);
+       fbc->enabled = false;
+       fbc->active = false;
+
+       if (need_fbc_vtd_wa(dev_priv))
+               mkwrite_device_info(dev_priv)->display.has_fbc = false;
+
+       i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
+       DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
+                     i915_modparams.enable_fbc);
+
+       if (!HAS_FBC(dev_priv)) {
+               fbc->no_fbc_reason = "unsupported by this chipset";
+               return;
+       }
+
+       /* This value was pulled out of someone's hat */
+       if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
+               I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
+
+       /* We still don't have any sort of hardware state readout for FBC, so
+        * deactivate it in case the BIOS activated it to make sure software
+        * matches the hardware state. */
+       if (intel_fbc_hw_is_active(dev_priv))
+               intel_fbc_hw_deactivate(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
new file mode 100644 (file)
index 0000000..50272ed
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FBC_H__
+#define __INTEL_FBC_H__
+
+#include <linux/types.h>
+
+#include "intel_frontbuffer.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
+                          struct intel_atomic_state *state);
+bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
+void intel_fbc_pre_update(struct intel_crtc *crtc,
+                         struct intel_crtc_state *crtc_state,
+                         struct intel_plane_state *plane_state);
+void intel_fbc_post_update(struct intel_crtc *crtc);
+void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
+void intel_fbc_enable(struct intel_crtc *crtc,
+                     struct intel_crtc_state *crtc_state,
+                     struct intel_plane_state *plane_state);
+void intel_fbc_disable(struct intel_crtc *crtc);
+void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+                         unsigned int frontbuffer_bits,
+                         enum fb_op_origin origin);
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+                    unsigned int frontbuffer_bits, enum fb_op_origin origin);
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
+int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
new file mode 100644 (file)
index 0000000..1edd44e
--- /dev/null
@@ -0,0 +1,640 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+
+#include <linux/async.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/vga_switcheroo.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_fbdev.h"
+#include "intel_frontbuffer.h"
+
+static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+{
+       struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
+       unsigned int origin =
+               ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
+
+       intel_fb_obj_invalidate(obj, origin);
+}
+
+static int intel_fbdev_set_par(struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct intel_fbdev *ifbdev =
+               container_of(fb_helper, struct intel_fbdev, helper);
+       int ret;
+
+       ret = drm_fb_helper_set_par(info);
+       if (ret == 0)
+               intel_fbdev_invalidate(ifbdev);
+
+       return ret;
+}
+
+static int intel_fbdev_blank(int blank, struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct intel_fbdev *ifbdev =
+               container_of(fb_helper, struct intel_fbdev, helper);
+       int ret;
+
+       ret = drm_fb_helper_blank(blank, info);
+       if (ret == 0)
+               intel_fbdev_invalidate(ifbdev);
+
+       return ret;
+}
+
+static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
+                                  struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct intel_fbdev *ifbdev =
+               container_of(fb_helper, struct intel_fbdev, helper);
+       int ret;
+
+       ret = drm_fb_helper_pan_display(var, info);
+       if (ret == 0)
+               intel_fbdev_invalidate(ifbdev);
+
+       return ret;
+}
+
+static struct fb_ops intelfb_ops = {
+       .owner = THIS_MODULE,
+       DRM_FB_HELPER_DEFAULT_OPS,
+       .fb_set_par = intel_fbdev_set_par,
+       .fb_fillrect = drm_fb_helper_cfb_fillrect,
+       .fb_copyarea = drm_fb_helper_cfb_copyarea,
+       .fb_imageblit = drm_fb_helper_cfb_imageblit,
+       .fb_pan_display = intel_fbdev_pan_display,
+       .fb_blank = intel_fbdev_blank,
+};
+
+static int intelfb_alloc(struct drm_fb_helper *helper,
+                        struct drm_fb_helper_surface_size *sizes)
+{
+       struct intel_fbdev *ifbdev =
+               container_of(helper, struct intel_fbdev, helper);
+       struct drm_framebuffer *fb;
+       struct drm_device *dev = helper->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_mode_fb_cmd2 mode_cmd = {};
+       struct drm_i915_gem_object *obj;
+       int size, ret;
+
+       /* we don't do packed 24bpp */
+       if (sizes->surface_bpp == 24)
+               sizes->surface_bpp = 32;
+
+       mode_cmd.width = sizes->surface_width;
+       mode_cmd.height = sizes->surface_height;
+
+       mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
+                                   DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                                         sizes->surface_depth);
+
+       size = mode_cmd.pitches[0] * mode_cmd.height;
+       size = PAGE_ALIGN(size);
+
+       /* If the FB is too big, just don't use it since fbdev is not very
+        * important and we should probably use that space with FBC or other
+        * features. */
+       obj = NULL;
+       if (size * 2 < dev_priv->stolen_usable_size)
+               obj = i915_gem_object_create_stolen(dev_priv, size);
+       if (obj == NULL)
+               obj = i915_gem_object_create_shmem(dev_priv, size);
+       if (IS_ERR(obj)) {
+               DRM_ERROR("failed to allocate framebuffer\n");
+               ret = PTR_ERR(obj);
+               goto err;
+       }
+
+       fb = intel_framebuffer_create(obj, &mode_cmd);
+       if (IS_ERR(fb)) {
+               ret = PTR_ERR(fb);
+               goto err_obj;
+       }
+
+       ifbdev->fb = to_intel_framebuffer(fb);
+
+       return 0;
+
+err_obj:
+       i915_gem_object_put(obj);
+err:
+       return ret;
+}
+
+static int intelfb_create(struct drm_fb_helper *helper,
+                         struct drm_fb_helper_surface_size *sizes)
+{
+       struct intel_fbdev *ifbdev =
+               container_of(helper, struct intel_fbdev, helper);
+       struct intel_framebuffer *intel_fb = ifbdev->fb;
+       struct drm_device *dev = helper->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       const struct i915_ggtt_view view = {
+               .type = I915_GGTT_VIEW_NORMAL,
+       };
+       struct drm_framebuffer *fb;
+       intel_wakeref_t wakeref;
+       struct fb_info *info;
+       struct i915_vma *vma;
+       unsigned long flags = 0;
+       bool prealloc = false;
+       void __iomem *vaddr;
+       int ret;
+
+       if (intel_fb &&
+           (sizes->fb_width > intel_fb->base.width ||
+            sizes->fb_height > intel_fb->base.height)) {
+               DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
+                             " releasing it\n",
+                             intel_fb->base.width, intel_fb->base.height,
+                             sizes->fb_width, sizes->fb_height);
+               drm_framebuffer_put(&intel_fb->base);
+               intel_fb = ifbdev->fb = NULL;
+       }
+       if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
+               DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
+               ret = intelfb_alloc(helper, sizes);
+               if (ret)
+                       return ret;
+               intel_fb = ifbdev->fb;
+       } else {
+               DRM_DEBUG_KMS("re-using BIOS fb\n");
+               prealloc = true;
+               sizes->fb_width = intel_fb->base.width;
+               sizes->fb_height = intel_fb->base.height;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+       /* Pin the GGTT vma for our access via info->screen_base.
+        * This also validates that any existing fb inherited from the
+        * BIOS is suitable for own access.
+        */
+       vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
+                                        &view, false, &flags);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto out_unlock;
+       }
+
+       fb = &ifbdev->fb->base;
+       intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);
+
+       info = drm_fb_helper_alloc_fbi(helper);
+       if (IS_ERR(info)) {
+               DRM_ERROR("Failed to allocate fb_info\n");
+               ret = PTR_ERR(info);
+               goto out_unpin;
+       }
+
+       ifbdev->helper.fb = fb;
+
+       info->fbops = &intelfb_ops;
+
+       /* setup aperture base/size for vesafb takeover */
+       info->apertures->ranges[0].base = dev->mode_config.fb_base;
+       info->apertures->ranges[0].size = ggtt->mappable_end;
+
+       info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma);
+       info->fix.smem_len = vma->node.size;
+
+       vaddr = i915_vma_pin_iomap(vma);
+       if (IS_ERR(vaddr)) {
+               DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
+               ret = PTR_ERR(vaddr);
+               goto out_unpin;
+       }
+       info->screen_base = vaddr;
+       info->screen_size = vma->node.size;
+
+       drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
+
+       /* If the object is shmemfs backed, it will have given us zeroed pages.
+        * If the object is stolen however, it will be full of whatever
+        * garbage was left in there.
+        */
+       if (intel_fb_obj(fb)->stolen && !prealloc)
+               memset_io(info->screen_base, 0, info->screen_size);
+
+       /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+       DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
+                     fb->width, fb->height, i915_ggtt_offset(vma));
+       ifbdev->vma = vma;
+       ifbdev->vma_flags = flags;
+
+       intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+       mutex_unlock(&dev->struct_mutex);
+       vga_switcheroo_client_fb_set(pdev, info);
+       return 0;
+
+out_unpin:
+       intel_unpin_fb_vma(vma, flags);
+out_unlock:
+       intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+       .fb_probe = intelfb_create,
+};
+
+static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
+{
+       /* We rely on the object-free to release the VMA pinning for
+        * the info->screen_base mmaping. Leaking the VMA is simpler than
+        * trying to rectify all the possible error paths leading here.
+        */
+
+       drm_fb_helper_fini(&ifbdev->helper);
+
+       if (ifbdev->vma) {
+               mutex_lock(&ifbdev->helper.dev->struct_mutex);
+               intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
+               mutex_unlock(&ifbdev->helper.dev->struct_mutex);
+       }
+
+       if (ifbdev->fb)
+               drm_framebuffer_remove(&ifbdev->fb->base);
+
+       kfree(ifbdev);
+}
+
+/*
+ * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
+ * The core display code will have read out the current plane configuration,
+ * so we use that to figure out if there's an object for us to use as the
+ * fb, and if so, we re-use it for the fbdev configuration.
+ *
+ * Note we only support a single fb shared across pipes for boot (mostly for
+ * fbcon), so we just find the biggest and use that.
+ */
+static bool intel_fbdev_init_bios(struct drm_device *dev,
+                                struct intel_fbdev *ifbdev)
+{
+       struct intel_framebuffer *fb = NULL;
+       struct drm_crtc *crtc;
+       struct intel_crtc *intel_crtc;
+       unsigned int max_size = 0;
+
+       /* Find the largest fb */
+       for_each_crtc(dev, crtc) {
+               struct drm_i915_gem_object *obj =
+                       intel_fb_obj(crtc->primary->state->fb);
+               intel_crtc = to_intel_crtc(crtc);
+
+               if (!crtc->state->active || !obj) {
+                       DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
+                                     pipe_name(intel_crtc->pipe));
+                       continue;
+               }
+
+               if (obj->base.size > max_size) {
+                       DRM_DEBUG_KMS("found possible fb from plane %c\n",
+                                     pipe_name(intel_crtc->pipe));
+                       fb = to_intel_framebuffer(crtc->primary->state->fb);
+                       max_size = obj->base.size;
+               }
+       }
+
+       if (!fb) {
+               DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
+               goto out;
+       }
+
+       /* Now make sure all the pipes will fit into it */
+       for_each_crtc(dev, crtc) {
+               unsigned int cur_size;
+
+               intel_crtc = to_intel_crtc(crtc);
+
+               if (!crtc->state->active) {
+                       DRM_DEBUG_KMS("pipe %c not active, skipping\n",
+                                     pipe_name(intel_crtc->pipe));
+                       continue;
+               }
+
+               DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
+                             pipe_name(intel_crtc->pipe));
+
+               /*
+                * See if the plane fb we found above will fit on this
+                * pipe.  Note we need to use the selected fb's pitch and bpp
+                * rather than the current pipe's, since they differ.
+                */
+               cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
+               cur_size = cur_size * fb->base.format->cpp[0];
+               if (fb->base.pitches[0] < cur_size) {
+                       DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
+                                     pipe_name(intel_crtc->pipe),
+                                     cur_size, fb->base.pitches[0]);
+                       fb = NULL;
+                       break;
+               }
+
+               cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
+               cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
+               cur_size *= fb->base.pitches[0];
+               DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
+                             pipe_name(intel_crtc->pipe),
+                             crtc->state->adjusted_mode.crtc_hdisplay,
+                             crtc->state->adjusted_mode.crtc_vdisplay,
+                             fb->base.format->cpp[0] * 8,
+                             cur_size);
+
+               if (cur_size > max_size) {
+                       DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
+                                     pipe_name(intel_crtc->pipe),
+                                     cur_size, max_size);
+                       fb = NULL;
+                       break;
+               }
+
+               DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
+                             pipe_name(intel_crtc->pipe),
+                             max_size, cur_size);
+       }
+
+       if (!fb) {
+               DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
+               goto out;
+       }
+
+       ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
+       ifbdev->fb = fb;
+
+       drm_framebuffer_get(&ifbdev->fb->base);
+
+       /* Final pass to check if any active pipes don't have fbs */
+       for_each_crtc(dev, crtc) {
+               intel_crtc = to_intel_crtc(crtc);
+
+               if (!crtc->state->active)
+                       continue;
+
+               WARN(!crtc->primary->state->fb,
+                    "re-used BIOS config but lost an fb on crtc %d\n",
+                    crtc->base.id);
+       }
+
+
+       DRM_DEBUG_KMS("using BIOS fb for initial console\n");
+       return true;
+
+out:
+
+       return false;
+}
+
+static void intel_fbdev_suspend_worker(struct work_struct *work)
+{
+       intel_fbdev_set_suspend(&container_of(work,
+                                             struct drm_i915_private,
+                                             fbdev_suspend_work)->drm,
+                               FBINFO_STATE_RUNNING,
+                               true);
+}
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_fbdev *ifbdev;
+       int ret;
+
+       if (WARN_ON(!HAS_DISPLAY(dev_priv)))
+               return -ENODEV;
+
+       ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+       if (ifbdev == NULL)
+               return -ENOMEM;
+
+       mutex_init(&ifbdev->hpd_lock);
+       drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
+
+       if (!intel_fbdev_init_bios(dev, ifbdev))
+               ifbdev->preferred_bpp = 32;
+
+       ret = drm_fb_helper_init(dev, &ifbdev->helper, 4);
+       if (ret) {
+               kfree(ifbdev);
+               return ret;
+       }
+
+       dev_priv->fbdev = ifbdev;
+       INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
+
+       drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+
+       return 0;
+}
+
+static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
+{
+       struct intel_fbdev *ifbdev = data;
+
+       /* Due to peculiar init order wrt to hpd handling this is separate. */
+       if (drm_fb_helper_initial_config(&ifbdev->helper,
+                                        ifbdev->preferred_bpp))
+               intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
+}
+
+void intel_fbdev_initial_config_async(struct drm_device *dev)
+{
+       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+
+       if (!ifbdev)
+               return;
+
+       ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
+}
+
+static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
+{
+       if (!ifbdev->cookie)
+               return;
+
+       /* Only serialises with all preceding async calls, hence +1 */
+       async_synchronize_cookie(ifbdev->cookie + 1);
+       ifbdev->cookie = 0;
+}
+
+void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbdev *ifbdev = dev_priv->fbdev;
+
+       if (!ifbdev)
+               return;
+
+       cancel_work_sync(&dev_priv->fbdev_suspend_work);
+       if (!current_is_async())
+               intel_fbdev_sync(ifbdev);
+
+       drm_fb_helper_unregister_fbi(&ifbdev->helper);
+}
+
+void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->fbdev);
+
+       if (!ifbdev)
+               return;
+
+       intel_fbdev_destroy(ifbdev);
+}
+
+/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
+ * processing, fbdev will perform a full connector reprobe if a hotplug event
+ * was received while HPD was suspended.
+ */
+static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
+{
+       bool send_hpd = false;
+
+       mutex_lock(&ifbdev->hpd_lock);
+       ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
+       send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
+       ifbdev->hpd_waiting = false;
+       mutex_unlock(&ifbdev->hpd_lock);
+
+       if (send_hpd) {
+               DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
+               drm_fb_helper_hotplug_event(&ifbdev->helper);
+       }
+}
+
+void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_fbdev *ifbdev = dev_priv->fbdev;
+       struct fb_info *info;
+
+       if (!ifbdev || !ifbdev->vma)
+               return;
+
+       info = ifbdev->helper.fbdev;
+
+       if (synchronous) {
+               /* Flush any pending work to turn the console on, and then
+                * wait to turn it off. It must be synchronous as we are
+                * about to suspend or unload the driver.
+                *
+                * Note that from within the work-handler, we cannot flush
+                * ourselves, so only flush outstanding work upon suspend!
+                */
+               if (state != FBINFO_STATE_RUNNING)
+                       flush_work(&dev_priv->fbdev_suspend_work);
+
+               console_lock();
+       } else {
+               /*
+                * The console lock can be pretty contented on resume due
+                * to all the printk activity.  Try to keep it out of the hot
+                * path of resume if possible.
+                */
+               WARN_ON(state != FBINFO_STATE_RUNNING);
+               if (!console_trylock()) {
+                       /* Don't block our own workqueue as this can
+                        * be run in parallel with other i915.ko tasks.
+                        */
+                       schedule_work(&dev_priv->fbdev_suspend_work);
+                       return;
+               }
+       }
+
+       /* On resume from hibernation: If the object is shmemfs backed, it has
+        * been restored from swap. If the object is stolen however, it will be
+        * full of whatever garbage was left in there.
+        */
+       if (state == FBINFO_STATE_RUNNING &&
+           intel_fb_obj(&ifbdev->fb->base)->stolen)
+               memset_io(info->screen_base, 0, info->screen_size);
+
+       drm_fb_helper_set_suspend(&ifbdev->helper, state);
+       console_unlock();
+
+       intel_fbdev_hpd_set_suspend(ifbdev, state);
+}
+
+void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+       bool send_hpd;
+
+       if (!ifbdev)
+               return;
+
+       intel_fbdev_sync(ifbdev);
+
+       mutex_lock(&ifbdev->hpd_lock);
+       send_hpd = !ifbdev->hpd_suspended;
+       ifbdev->hpd_waiting = true;
+       mutex_unlock(&ifbdev->hpd_lock);
+
+       if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
+               drm_fb_helper_hotplug_event(&ifbdev->helper);
+}
+
+void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+
+       if (!ifbdev)
+               return;
+
+       intel_fbdev_sync(ifbdev);
+       if (!ifbdev->vma)
+               return;
+
+       if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
+               intel_fbdev_invalidate(ifbdev);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
new file mode 100644 (file)
index 0000000..de7c842
--- /dev/null
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FBDEV_H__
+#define __INTEL_FBDEV_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_i915_private;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int intel_fbdev_init(struct drm_device *dev);
+void intel_fbdev_initial_config_async(struct drm_device *dev);
+void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
+void intel_fbdev_fini(struct drm_i915_private *dev_priv);
+void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
+void intel_fbdev_output_poll_changed(struct drm_device *dev);
+void intel_fbdev_restore_mode(struct drm_device *dev);
+#else
+static inline int intel_fbdev_init(struct drm_device *dev)
+{
+       return 0;
+}
+
+static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
+{
+}
+
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+#endif
+
+#endif /* __INTEL_FBDEV_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
new file mode 100644 (file)
index 0000000..8545ad3
--- /dev/null
@@ -0,0 +1,458 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_fbc.h"
+#include "intel_fifo_underrun.h"
+
+/**
+ * DOC: fifo underrun handling
+ *
+ * The i915 driver checks for display fifo underruns using the interrupt signals
+ * provided by the hardware. This is enabled by default and fairly useful to
+ * debug display issues, especially watermark settings.
+ *
+ * If an underrun is detected this is logged into dmesg. To avoid flooding logs
+ * and occupying the cpu underrun interrupts are disabled after the first
+ * occurrence until the next modeset on a given pipe.
+ *
+ * Note that underrun detection on gmch platforms is a bit more ugly since there
+ * is no interrupt (despite that the signalling bit is in the PIPESTAT pipe
+ * interrupt register). Also on some other platforms underrun interrupts are
+ * shared, which means that if we detect an underrun we need to disable underrun
+ * reporting on all pipes.
+ *
+ * The code also supports underrun detection on the PCH transcoder.
+ */
+
+static bool ivb_can_enable_err_int(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *crtc;
+       enum pipe pipe;
+
+       lockdep_assert_held(&dev_priv->irq_lock);
+
+       for_each_pipe(dev_priv, pipe) {
+               crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+               if (crtc->cpu_fifo_underrun_disabled)
+                       return false;
+       }
+
+       return true;
+}
+
+static bool cpt_can_enable_serr_int(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       enum pipe pipe;
+       struct intel_crtc *crtc;
+
+       lockdep_assert_held(&dev_priv->irq_lock);
+
+       for_each_pipe(dev_priv, pipe) {
+               crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+               if (crtc->pch_fifo_underrun_disabled)
+                       return false;
+       }
+
+       return true;
+}
+
+static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       i915_reg_t reg = PIPESTAT(crtc->pipe);
+       u32 enable_mask;
+
+       lockdep_assert_held(&dev_priv->irq_lock);
+
+       if ((I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+               return;
+
+       enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe);
+       I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
+       POSTING_READ(reg);
+
+       trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
+       DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+}
+
+static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
+                                            enum pipe pipe,
+                                            bool enable, bool old)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       i915_reg_t reg = PIPESTAT(pipe);
+
+       lockdep_assert_held(&dev_priv->irq_lock);
+
+       if (enable) {
+               u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+
+               I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
+               POSTING_READ(reg);
+       } else {
+               if (old && I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS)
+                       DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+       }
+}
+
+static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
+                                                enum pipe pipe, bool enable)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       u32 bit = (pipe == PIPE_A) ?
+               DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
+
+       if (enable)
+               ilk_enable_display_irq(dev_priv, bit);
+       else
+               ilk_disable_display_irq(dev_priv, bit);
+}
+
+static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+       u32 err_int = I915_READ(GEN7_ERR_INT);
+
+       lockdep_assert_held(&dev_priv->irq_lock);
+
+       if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
+               return;
+
+       I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+       POSTING_READ(GEN7_ERR_INT);
+
+       trace_intel_cpu_fifo_underrun(dev_priv, pipe);
+       DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
+}
+
+static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
+                                                 enum pipe pipe,
+                                                 bool enable, bool old)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       if (enable) {
+               I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+
+               if (!ivb_can_enable_err_int(dev))
+                       return;
+
+               ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+       } else {
+               ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
+               if (old &&
+                   I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+                       DRM_ERROR("uncleared fifo underrun on pipe %c\n",
+                                 pipe_name(pipe));
+               }
+       }
+}
+
+static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
+                                                 enum pipe pipe, bool enable)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       if (enable)
+               bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+       else
+               bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+}
+
+static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+                                           enum pipe pch_transcoder,
+                                           bool enable)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       u32 bit = (pch_transcoder == PIPE_A) ?
+               SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
+
+       if (enable)
+               ibx_enable_display_interrupt(dev_priv, bit);
+       else
+               ibx_disable_display_interrupt(dev_priv, bit);
+}
+
+static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pch_transcoder = crtc->pipe;
+       u32 serr_int = I915_READ(SERR_INT);
+
+       lockdep_assert_held(&dev_priv->irq_lock);
+
+       if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
+               return;
+
+       I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+       POSTING_READ(SERR_INT);
+
+       trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
+       DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
+                 pipe_name(pch_transcoder));
+}
+
+static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
+                                           enum pipe pch_transcoder,
+                                           bool enable, bool old)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       if (enable) {
+               I915_WRITE(SERR_INT,
+                          SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+
+               if (!cpt_can_enable_serr_int(dev))
+                       return;
+
+               ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+       } else {
+               ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+
+               if (old && I915_READ(SERR_INT) &
+                   SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
+                       DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
+                                 pipe_name(pch_transcoder));
+               }
+       }
+}
+
+static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+                                                   enum pipe pipe, bool enable)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+       bool old;
+
+       lockdep_assert_held(&dev_priv->irq_lock);
+
+       old = !crtc->cpu_fifo_underrun_disabled;
+       crtc->cpu_fifo_underrun_disabled = !enable;
+
+       if (HAS_GMCH(dev_priv))
+               i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
+       else if (IS_GEN_RANGE(dev_priv, 5, 6))
+               ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+       else if (IS_GEN(dev_priv, 7))
+               ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
+       else if (INTEL_GEN(dev_priv) >= 8)
+               broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
+
+       return old;
+}
+
+/**
+ * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ * @enable: whether underruns should be reported or not
+ *
+ * This function sets the fifo underrun state for @pipe. It is used in the
+ * modeset code to avoid false positives since on many platforms underruns are
+ * expected when disabling or enabling the pipe.
+ *
+ * Notice that on some platforms disabling underrun reports for one pipe
+ * disables for all due to shared interrupts. Actual reporting is still per-pipe
+ * though.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+                                          enum pipe pipe, bool enable)
+{
+       unsigned long flags;
+       bool ret;
+
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
+                                                     enable);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+       return ret;
+}
+
+/**
+ * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ * @enable: whether underruns should be reported or not
+ *
+ * This function makes us disable or enable PCH fifo underruns for a specific
+ * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
+ * underrun reporting for one transcoder may also disable all the other PCH
+ * error interruts for the other transcoders, due to the fact that there's just
+ * one interrupt mask/enable bit for all the transcoders.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+                                          enum pipe pch_transcoder,
+                                          bool enable)
+{
+       struct intel_crtc *crtc =
+               intel_get_crtc_for_pipe(dev_priv, pch_transcoder);
+       unsigned long flags;
+       bool old;
+
+       /*
+        * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
+        * has only one pch transcoder A that all pipes can use. To avoid racy
+        * pch transcoder -> pipe lookups from interrupt code simply store the
+        * underrun statistics in crtc A. Since we never expose this anywhere
+        * nor use it outside of the fifo underrun code here using the "wrong"
+        * crtc on LPT won't cause issues.
+        */
+
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+       old = !crtc->pch_fifo_underrun_disabled;
+       crtc->pch_fifo_underrun_disabled = !enable;
+
+       if (HAS_PCH_IBX(dev_priv))
+               ibx_set_fifo_underrun_reporting(&dev_priv->drm,
+                                               pch_transcoder,
+                                               enable);
+       else
+               cpt_set_fifo_underrun_reporting(&dev_priv->drm,
+                                               pch_transcoder,
+                                               enable, old);
+
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       return old;
+}
+
+/**
+ * intel_cpu_fifo_underrun_irq_handler - handle CPU fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ *
+ * This handles a CPU fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+                                        enum pipe pipe)
+{
+       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+       /* We may be called too early in init, thanks BIOS! */
+       if (crtc == NULL)
+               return;
+
+       /* GMCH can't disable fifo underruns, filter them. */
+       if (HAS_GMCH(dev_priv) &&
+           crtc->cpu_fifo_underrun_disabled)
+               return;
+
+       if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
+               trace_intel_cpu_fifo_underrun(dev_priv, pipe);
+               DRM_ERROR("CPU pipe %c FIFO underrun\n",
+                         pipe_name(pipe));
+       }
+
+       intel_fbc_handle_fifo_underrun_irq(dev_priv);
+}
+
+/**
+ * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ *
+ * This handles a PCH fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+                                        enum pipe pch_transcoder)
+{
+       if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
+                                                 false)) {
+               trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
+               DRM_ERROR("PCH transcoder %c FIFO underrun\n",
+                         pipe_name(pch_transcoder));
+       }
+}
+
+/**
+ * intel_check_cpu_fifo_underruns - check for CPU fifo underruns immediately
+ * @dev_priv: i915 device instance
+ *
+ * Check for CPU fifo underruns immediately. Useful on IVB/HSW where the shared
+ * error interrupt may have been disabled, and so CPU fifo underruns won't
+ * necessarily raise an interrupt, and on GMCH platforms where underruns never
+ * raise an interrupt.
+ */
+void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
+{
+       struct intel_crtc *crtc;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
+               if (crtc->cpu_fifo_underrun_disabled)
+                       continue;
+
+               if (HAS_GMCH(dev_priv))
+                       i9xx_check_fifo_underruns(crtc);
+               else if (IS_GEN(dev_priv, 7))
+                       ivybridge_check_fifo_underruns(crtc);
+       }
+
+       spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+/**
+ * intel_check_pch_fifo_underruns - check for PCH fifo underruns immediately
+ * @dev_priv: i915 device instance
+ *
+ * Check for PCH fifo underruns immediately. Useful on CPT/PPT where the shared
+ * error interrupt may have been disabled, and so PCH fifo underruns won't
+ * necessarily raise an interrupt.
+ */
+void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
+{
+       struct intel_crtc *crtc;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
+               if (crtc->pch_fifo_underrun_disabled)
+                       continue;
+
+               if (HAS_PCH_CPT(dev_priv))
+                       cpt_check_pch_fifo_underruns(crtc);
+       }
+
+       spin_unlock_irq(&dev_priv->irq_lock);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
new file mode 100644 (file)
index 0000000..e04f22a
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FIFO_UNDERRUN_H__
+#define __INTEL_FIFO_UNDERRUN_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+
+struct drm_i915_private;
+
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+                                          enum pipe pipe, bool enable);
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+                                          enum pipe pch_transcoder,
+                                          bool enable);
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+                                        enum pipe pipe);
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+                                        enum pipe pch_transcoder);
+void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
+void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_FIFO_UNDERRUN_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
new file mode 100644 (file)
index 0000000..44273c1
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+/**
+ * DOC: frontbuffer tracking
+ *
+ * Many features require us to track changes to the currently active
+ * frontbuffer, especially rendering targeted at the frontbuffer.
+ *
+ * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
+ * frontbuffer slots through i915_gem_track_fb(). The function in this file are
+ * then called when the contents of the frontbuffer are invalidated, when
+ * frontbuffer rendering has stopped again to flush out all the changes and when
+ * the frontbuffer is exchanged with a flip. Subsystems interested in
+ * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
+ * into the relevant places and filter for the frontbuffer slots that they are
+ * interested int.
+ *
+ * On a high level there are two types of powersaving features. The first one
+ * work like a special cache (FBC and PSR) and are interested when they should
+ * stop caching and when to restart caching. This is done by placing callbacks
+ * into the invalidate and the flush functions: At invalidate the caching must
+ * be stopped and at flush time it can be restarted. And maybe they need to know
+ * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
+ * and flush on its own) which can be achieved with placing callbacks into the
+ * flip functions.
+ *
+ * The other type of display power saving feature only cares about busyness
+ * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
+ * busyness. There is no direct way to detect idleness. Instead an idle timer
+ * work delayed work should be started from the flush and flip functions and
+ * cancelled as soon as busyness is detected.
+ */
+
+#include "display/intel_dp.h"
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_fbc.h"
+#include "intel_frontbuffer.h"
+#include "intel_psr.h"
+
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+                              enum fb_op_origin origin,
+                              unsigned int frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+
+       if (origin == ORIGIN_CS) {
+               spin_lock(&dev_priv->fb_tracking.lock);
+               dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
+               dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+               spin_unlock(&dev_priv->fb_tracking.lock);
+       }
+
+       might_sleep();
+       intel_psr_invalidate(dev_priv, frontbuffer_bits, origin);
+       intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
+       intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
+}
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some outstanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
+                                   unsigned frontbuffer_bits,
+                                   enum fb_op_origin origin)
+{
+       /* Delay flushing when rings are still busy.*/
+       spin_lock(&dev_priv->fb_tracking.lock);
+       frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
+       spin_unlock(&dev_priv->fb_tracking.lock);
+
+       if (!frontbuffer_bits)
+               return;
+
+       might_sleep();
+       intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
+       intel_psr_flush(dev_priv, frontbuffer_bits, origin);
+       intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
+}
+
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+                         enum fb_op_origin origin,
+                         unsigned int frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+
+       if (origin == ORIGIN_CS) {
+               spin_lock(&dev_priv->fb_tracking.lock);
+               /* Filter out new bits since rendering started. */
+               frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
+               dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+               spin_unlock(&dev_priv->fb_tracking.lock);
+       }
+
+       if (frontbuffer_bits)
+               intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+                                   unsigned frontbuffer_bits)
+{
+       spin_lock(&dev_priv->fb_tracking.lock);
+       dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
+       /* Remove stale busy bits due to the old buffer. */
+       dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+       spin_unlock(&dev_priv->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+                                    unsigned frontbuffer_bits)
+{
+       spin_lock(&dev_priv->fb_tracking.lock);
+       /* Mask any cancelled flips. */
+       frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
+       dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+       spin_unlock(&dev_priv->fb_tracking.lock);
+
+       if (frontbuffer_bits)
+               intel_frontbuffer_flush(dev_priv,
+                                       frontbuffer_bits, ORIGIN_FLIP);
+}
+
+/**
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+                           unsigned frontbuffer_bits)
+{
+       spin_lock(&dev_priv->fb_tracking.lock);
+       /* Remove stale busy bits due to the old buffer. */
+       dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+       spin_unlock(&dev_priv->fb_tracking.lock);
+
+       intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
new file mode 100644 (file)
index 0000000..5727320
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __INTEL_FRONTBUFFER_H__
+#define __INTEL_FRONTBUFFER_H__
+
+#include "gem/i915_gem_object.h"
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+
+enum fb_op_origin {
+       ORIGIN_GTT,
+       ORIGIN_CPU,
+       ORIGIN_CS,
+       ORIGIN_FLIP,
+       ORIGIN_DIRTYFB,
+};
+
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+                                   unsigned frontbuffer_bits);
+void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+                                    unsigned frontbuffer_bits);
+void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+                           unsigned frontbuffer_bits);
+
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+                              enum fb_op_origin origin,
+                              unsigned int frontbuffer_bits);
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+                         enum fb_op_origin origin,
+                         unsigned int frontbuffer_bits);
+
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @origin: which operation caused the invalidation
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+                                          enum fb_op_origin origin)
+{
+       unsigned int frontbuffer_bits;
+
+       frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+       if (!frontbuffer_bits)
+               return false;
+
+       __intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+       return true;
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @origin: which operation caused the flush
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again.
+ */
+static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+                                     enum fb_op_origin origin)
+{
+       unsigned int frontbuffer_bits;
+
+       frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+       if (!frontbuffer_bits)
+               return;
+
+       __intel_fb_obj_flush(obj, origin, frontbuffer_bits);
+}
+
+#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
new file mode 100644 (file)
index 0000000..bc3a94d
--- /dev/null
@@ -0,0 +1,1977 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+
+#include <linux/component.h>
+#include <linux/i2c.h>
+#include <linux/random.h>
+
+#include <drm/drm_hdcp.h>
+#include <drm/i915_component.h>
+
+#include "i915_reg.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_sideband.h"
+
+#define KEY_LOAD_TRIES 5
+#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS       50
+#define HDCP2_LC_RETRY_CNT                     3
+
+static
+bool intel_hdcp_is_ksv_valid(u8 *ksv)
+{
+       int i, ones = 0;
+       /* KSV has 20 1's and 20 0's */
+       for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
+               ones += hweight8(ksv[i]);
+       if (ones != 20)
+               return false;
+
+       return true;
+}
+
+static
+int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
+                              const struct intel_hdcp_shim *shim, u8 *bksv)
+{
+       int ret, i, tries = 2;
+
+       /* HDCP spec states that we must retry the bksv if it is invalid */
+       for (i = 0; i < tries; i++) {
+               ret = shim->read_bksv(intel_dig_port, bksv);
+               if (ret)
+                       return ret;
+               if (intel_hdcp_is_ksv_valid(bksv))
+                       break;
+       }
+       if (i == tries) {
+               DRM_DEBUG_KMS("Bksv is invalid\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+/* Is HDCP1.4 capable on Platform and Sink */
+bool intel_hdcp_capable(struct intel_connector *connector)
+{
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       const struct intel_hdcp_shim *shim = connector->hdcp.shim;
+       bool capable = false;
+       u8 bksv[5];
+
+       if (!shim)
+               return capable;
+
+       if (shim->hdcp_capable) {
+               shim->hdcp_capable(intel_dig_port, &capable);
+       } else {
+               if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
+                       capable = true;
+       }
+
+       return capable;
+}
+
+/* Is HDCP2.2 capable on Platform and Sink */
+bool intel_hdcp2_capable(struct intel_connector *connector)
+{
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       bool capable = false;
+
+       /* I915 support for HDCP2.2 */
+       if (!hdcp->hdcp2_supported)
+               return false;
+
+       /* MEI interface is solid */
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return false;
+       }
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       /* Sink's capability for HDCP2.2 */
+       hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
+
+       return capable;
+}
+
+static inline bool intel_hdcp_in_use(struct intel_connector *connector)
+{
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       enum port port = connector->encoder->port;
+       u32 reg;
+
+       reg = I915_READ(PORT_HDCP_STATUS(port));
+       return reg & HDCP_STATUS_ENC;
+}
+
+static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
+{
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       enum port port = connector->encoder->port;
+       u32 reg;
+
+       reg = I915_READ(HDCP2_STATUS_DDI(port));
+       return reg & LINK_ENCRYPTION_STATUS;
+}
+
+static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
+                                   const struct intel_hdcp_shim *shim)
+{
+       int ret, read_ret;
+       bool ksv_ready;
+
+       /* Poll for ksv list ready (spec says max time allowed is 5s) */
+       ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
+                                                        &ksv_ready),
+                        read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
+                        100 * 1000);
+       if (ret)
+               return ret;
+       if (read_ret)
+               return read_ret;
+       if (!ksv_ready)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
+       enum i915_power_well_id id;
+       bool enabled = false;
+
+       /*
+        * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
+        * On all BXT+, SW can load the keys only when the PW#1 is turned on.
+        */
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               id = HSW_DISP_PW_GLOBAL;
+       else
+               id = SKL_DISP_PW_1;
+
+       mutex_lock(&power_domains->lock);
+
+       /* PG1 (power well #1) needs to be enabled */
+       for_each_power_well(dev_priv, power_well) {
+               if (power_well->desc->id == id) {
+                       enabled = power_well->desc->ops->is_enabled(dev_priv,
+                                                                   power_well);
+                       break;
+               }
+       }
+       mutex_unlock(&power_domains->lock);
+
+       /*
+        * Another req for hdcp key loadability is enabled state of pll for
+        * cdclk. Without active crtc we wont land here. So we are assuming that
+        * cdclk is already on.
+        */
+
+       return enabled;
+}
+
+static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
+       I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
+                  HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
+}
+
+static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
+{
+       int ret;
+       u32 val;
+
+       val = I915_READ(HDCP_KEY_STATUS);
+       if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
+               return 0;
+
+       /*
+        * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
+        * out of reset. So if Key is not already loaded, its an error state.
+        */
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
+                       return -ENXIO;
+
+       /*
+        * Initiate loading the HDCP key from fuses.
+        *
+        * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
+        * platforms except BXT and GLK, differ in the key load trigger process
+        * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
+        */
+       if (IS_GEN9_BC(dev_priv)) {
+               ret = sandybridge_pcode_write(dev_priv,
+                                             SKL_PCODE_LOAD_HDCP_KEYS, 1);
+               if (ret) {
+                       DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
+                                 ret);
+                       return ret;
+               }
+       } else {
+               I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
+       }
+
+       /* Wait for the keys to load (500us) */
+       ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
+                                       HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
+                                       10, 1, &val);
+       if (ret)
+               return ret;
+       else if (!(val & HDCP_KEY_LOAD_STATUS))
+               return -ENXIO;
+
+       /* Send Aksv over to PCH display for use in authentication */
+       I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
+
+       return 0;
+}
+
+/* Returns updated SHA-1 index */
+static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
+{
+       I915_WRITE(HDCP_SHA_TEXT, sha_text);
+       if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
+                                   HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
+               DRM_ERROR("Timed out waiting for SHA1 ready\n");
+               return -ETIMEDOUT;
+       }
+       return 0;
+}
+
+static
+u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
+{
+       enum port port = intel_dig_port->base.port;
+       switch (port) {
+       case PORT_A:
+               return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
+       case PORT_B:
+               return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
+       case PORT_C:
+               return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
+       case PORT_D:
+               return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
+       case PORT_E:
+               return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
+       default:
+               break;
+       }
+       DRM_ERROR("Unknown port %d\n", port);
+       return -EINVAL;
+}
+
+static
+int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
+                               const struct intel_hdcp_shim *shim,
+                               u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
+{
+       struct drm_i915_private *dev_priv;
+       u32 vprime, sha_text, sha_leftovers, rep_ctl;
+       int ret, i, j, sha_idx;
+
+       dev_priv = intel_dig_port->base.base.dev->dev_private;
+
+       /* Process V' values from the receiver */
+       for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
+               ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
+               if (ret)
+                       return ret;
+               I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
+       }
+
+       /*
+        * We need to write the concatenation of all device KSVs, BINFO (DP) ||
+        * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
+        * stream is written via the HDCP_SHA_TEXT register in 32-bit
+        * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
+        * index will keep track of our progress through the 64 bytes as well as
+        * helping us work the 40-bit KSVs through our 32-bit register.
+        *
+        * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
+        */
+       sha_idx = 0;
+       sha_text = 0;
+       sha_leftovers = 0;
+       rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
+       I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+       for (i = 0; i < num_downstream; i++) {
+               unsigned int sha_empty;
+               u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
+
+               /* Fill up the empty slots in sha_text and write it out */
+               sha_empty = sizeof(sha_text) - sha_leftovers;
+               for (j = 0; j < sha_empty; j++)
+                       sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
+
+               ret = intel_write_sha_text(dev_priv, sha_text);
+               if (ret < 0)
+                       return ret;
+
+               /* Programming guide writes this every 64 bytes */
+               sha_idx += sizeof(sha_text);
+               if (!(sha_idx % 64))
+                       I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+
+               /* Store the leftover bytes from the ksv in sha_text */
+               sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
+               sha_text = 0;
+               for (j = 0; j < sha_leftovers; j++)
+                       sha_text |= ksv[sha_empty + j] <<
+                                       ((sizeof(sha_text) - j - 1) * 8);
+
+               /*
+                * If we still have room in sha_text for more data, continue.
+                * Otherwise, write it out immediately.
+                */
+               if (sizeof(sha_text) > sha_leftovers)
+                       continue;
+
+               ret = intel_write_sha_text(dev_priv, sha_text);
+               if (ret < 0)
+                       return ret;
+               sha_leftovers = 0;
+               sha_text = 0;
+               sha_idx += sizeof(sha_text);
+       }
+
+       /*
+        * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
+        * bytes are leftover from the last ksv, we might be able to fit them
+        * all in sha_text (first 2 cases), or we might need to split them up
+        * into 2 writes (last 2 cases).
+        */
+       if (sha_leftovers == 0) {
+               /* Write 16 bits of text, 16 bits of M0 */
+               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+               ret = intel_write_sha_text(dev_priv,
+                                          bstatus[0] << 8 | bstatus[1]);
+               if (ret < 0)
+                       return ret;
+               sha_idx += sizeof(sha_text);
+
+               /* Write 32 bits of M0 */
+               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+               ret = intel_write_sha_text(dev_priv, 0);
+               if (ret < 0)
+                       return ret;
+               sha_idx += sizeof(sha_text);
+
+               /* Write 16 bits of M0 */
+               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+               ret = intel_write_sha_text(dev_priv, 0);
+               if (ret < 0)
+                       return ret;
+               sha_idx += sizeof(sha_text);
+
+       } else if (sha_leftovers == 1) {
+               /* Write 24 bits of text, 8 bits of M0 */
+               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+               sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
+               /* Only 24-bits of data, must be in the LSB */
+               sha_text = (sha_text & 0xffffff00) >> 8;
+               ret = intel_write_sha_text(dev_priv, sha_text);
+               if (ret < 0)
+                       return ret;
+               sha_idx += sizeof(sha_text);
+
+               /* Write 32 bits of M0 */
+               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+               ret = intel_write_sha_text(dev_priv, 0);
+               if (ret < 0)
+                       return ret;
+               sha_idx += sizeof(sha_text);
+
+               /* Write 24 bits of M0 */
+               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+               ret = intel_write_sha_text(dev_priv, 0);
+               if (ret < 0)
+                       return ret;
+               sha_idx += sizeof(sha_text);
+
+       } else if (sha_leftovers == 2) {
+               /* Write 32 bits of text */
+               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+               sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
+               ret = intel_write_sha_text(dev_priv, sha_text);
+               if (ret < 0)
+                       return ret;
+               sha_idx += sizeof(sha_text);
+
+               /* Write 64 bits of M0 */
+               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+               for (i = 0; i < 2; i++) {
+                       ret = intel_write_sha_text(dev_priv, 0);
+                       if (ret < 0)
+                               return ret;
+                       sha_idx += sizeof(sha_text);
+               }
+       } else if (sha_leftovers == 3) {
+               /* Write 32 bits of text */
+               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+               sha_text |= bstatus[0] << 24;
+               ret = intel_write_sha_text(dev_priv, sha_text);
+               if (ret < 0)
+                       return ret;
+               sha_idx += sizeof(sha_text);
+
+               /* Write 8 bits of text, 24 bits of M0 */
+               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+               ret = intel_write_sha_text(dev_priv, bstatus[1]);
+               if (ret < 0)
+                       return ret;
+               sha_idx += sizeof(sha_text);
+
+               /* Write 32 bits of M0 */
+               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+               ret = intel_write_sha_text(dev_priv, 0);
+               if (ret < 0)
+                       return ret;
+               sha_idx += sizeof(sha_text);
+
+               /* Write 8 bits of M0 */
+               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+               ret = intel_write_sha_text(dev_priv, 0);
+               if (ret < 0)
+                       return ret;
+               sha_idx += sizeof(sha_text);
+       } else {
+               DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
+                             sha_leftovers);
+               return -EINVAL;
+       }
+
+       I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+       /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
+       while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
+               ret = intel_write_sha_text(dev_priv, 0);
+               if (ret < 0)
+                       return ret;
+               sha_idx += sizeof(sha_text);
+       }
+
+       /*
+        * Last write gets the length of the concatenation in bits. That is:
+        *  - 5 bytes per device
+        *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
+        */
+       sha_text = (num_downstream * 5 + 10) * 8;
+       ret = intel_write_sha_text(dev_priv, sha_text);
+       if (ret < 0)
+               return ret;
+
+       /* Tell the HW we're done with the hash and wait for it to ACK */
+       I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
+       if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
+                                   HDCP_SHA1_COMPLETE,
+                                   HDCP_SHA1_COMPLETE, 1)) {
+               DRM_ERROR("Timed out waiting for SHA1 complete\n");
+               return -ETIMEDOUT;
+       }
+       if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
+               DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
+               return -ENXIO;
+       }
+
+       return 0;
+}
+
+/* Implements Part 2 of the HDCP authorization procedure */
+static
+int intel_hdcp_auth_downstream(struct intel_connector *connector)
+{
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       const struct intel_hdcp_shim *shim = connector->hdcp.shim;
+       struct drm_device *dev = connector->base.dev;
+       u8 bstatus[2], num_downstream, *ksv_fifo;
+       int ret, i, tries = 3;
+
+       ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
+       if (ret) {
+               DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
+               return ret;
+       }
+
+       ret = shim->read_bstatus(intel_dig_port, bstatus);
+       if (ret)
+               return ret;
+
+       if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
+           DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
+               DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
+               return -EPERM;
+       }
+
+       /*
+        * When repeater reports 0 device count, HDCP1.4 spec allows disabling
+        * the HDCP encryption. That implies that repeater can't have its own
+        * display. As there is no consumption of encrypted content in the
+        * repeater with 0 downstream devices, we are failing the
+        * authentication.
+        */
+       num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
+       if (num_downstream == 0)
+               return -EINVAL;
+
+       ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
+       if (!ksv_fifo)
+               return -ENOMEM;
+
+       ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
+       if (ret)
+               goto err;
+
+       if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
+               DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
+               return -EPERM;
+       }
+
+       /*
+        * When V prime mismatches, DP Spec mandates re-read of
+        * V prime atleast twice.
+        */
+       for (i = 0; i < tries; i++) {
+               ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
+                                                 ksv_fifo, num_downstream,
+                                                 bstatus);
+               if (!ret)
+                       break;
+       }
+
+       if (i == tries) {
+               DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
+               goto err;
+       }
+
+       DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
+                     num_downstream);
+       ret = 0;
+err:
+       kfree(ksv_fifo);
+       return ret;
+}
+
+/* Implements Part 1 of the HDCP authorization procedure */
+static int intel_hdcp_auth(struct intel_connector *connector)
+{
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       struct drm_device *dev = connector->base.dev;
+       const struct intel_hdcp_shim *shim = hdcp->shim;
+       struct drm_i915_private *dev_priv;
+       enum port port;
+       unsigned long r0_prime_gen_start;
+       int ret, i, tries = 2;
+       union {
+               u32 reg[2];
+               u8 shim[DRM_HDCP_AN_LEN];
+       } an;
+       union {
+               u32 reg[2];
+               u8 shim[DRM_HDCP_KSV_LEN];
+       } bksv;
+       union {
+               u32 reg;
+               u8 shim[DRM_HDCP_RI_LEN];
+       } ri;
+       bool repeater_present, hdcp_capable;
+
+       dev_priv = intel_dig_port->base.base.dev->dev_private;
+
+       port = intel_dig_port->base.port;
+
+       /*
+        * Detects whether the display is HDCP capable. Although we check for
+        * valid Bksv below, the HDCP over DP spec requires that we check
+        * whether the display supports HDCP before we write An. For HDMI
+        * displays, this is not necessary.
+        */
+       if (shim->hdcp_capable) {
+               ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
+               if (ret)
+                       return ret;
+               if (!hdcp_capable) {
+                       DRM_DEBUG_KMS("Panel is not HDCP capable\n");
+                       return -EINVAL;
+               }
+       }
+
+       /* Initialize An with 2 random values and acquire it */
+       for (i = 0; i < 2; i++)
+               I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
+       I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
+
+       /* Wait for An to be acquired */
+       if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
+                                   HDCP_STATUS_AN_READY,
+                                   HDCP_STATUS_AN_READY, 1)) {
+               DRM_ERROR("Timed out waiting for An\n");
+               return -ETIMEDOUT;
+       }
+
+       an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
+       an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
+       ret = shim->write_an_aksv(intel_dig_port, an.shim);
+       if (ret)
+               return ret;
+
+       r0_prime_gen_start = jiffies;
+
+       memset(&bksv, 0, sizeof(bksv));
+
+       ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
+       if (ret < 0)
+               return ret;
+
+       if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) {
+               DRM_ERROR("BKSV is revoked\n");
+               return -EPERM;
+       }
+
+       I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
+       I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
+
+       ret = shim->repeater_present(intel_dig_port, &repeater_present);
+       if (ret)
+               return ret;
+       if (repeater_present)
+               I915_WRITE(HDCP_REP_CTL,
+                          intel_hdcp_get_repeater_ctl(intel_dig_port));
+
+       ret = shim->toggle_signalling(intel_dig_port, true);
+       if (ret)
+               return ret;
+
+       I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
+
+       /* Wait for R0 ready */
+       if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+                    (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
+               DRM_ERROR("Timed out waiting for R0 ready\n");
+               return -ETIMEDOUT;
+       }
+
+       /*
+        * Wait for R0' to become available. The spec says 100ms from Aksv, but
+        * some monitors can take longer than this. We'll set the timeout at
+        * 300ms just to be sure.
+        *
+        * On DP, there's an R0_READY bit available but no such bit
+        * exists on HDMI. Since the upper-bound is the same, we'll just do
+        * the stupid thing instead of polling on one and not the other.
+        */
+       wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
+
+       tries = 3;
+
+       /*
+        * DP HDCP Spec mandates the two more reattempt to read R0, incase
+        * of R0 mismatch.
+        */
+       for (i = 0; i < tries; i++) {
+               ri.reg = 0;
+               ret = shim->read_ri_prime(intel_dig_port, ri.shim);
+               if (ret)
+                       return ret;
+               I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+
+               /* Wait for Ri prime match */
+               if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+                   (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
+                       break;
+       }
+
+       if (i == tries) {
+               DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
+                             I915_READ(PORT_HDCP_STATUS(port)));
+               return -ETIMEDOUT;
+       }
+
+       /* Wait for encryption confirmation */
+       if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
+                                   HDCP_STATUS_ENC, HDCP_STATUS_ENC,
+                                   ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+               DRM_ERROR("Timed out waiting for encryption\n");
+               return -ETIMEDOUT;
+       }
+
+       /*
+        * XXX: If we have MST-connected devices, we need to enable encryption
+        * on those as well.
+        */
+
+       if (repeater_present)
+               return intel_hdcp_auth_downstream(connector);
+
+       DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
+       return 0;
+}
+
+static int _intel_hdcp_disable(struct intel_connector *connector)
+{
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       enum port port = intel_dig_port->base.port;
+       int ret;
+
+       DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
+                     connector->base.name, connector->base.base.id);
+
+       hdcp->hdcp_encrypted = false;
+       I915_WRITE(PORT_HDCP_CONF(port), 0);
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   PORT_HDCP_STATUS(port), ~0, 0,
+                                   ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+               DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
+               return -ETIMEDOUT;
+       }
+
+       ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
+       if (ret) {
+               DRM_ERROR("Failed to disable HDCP signalling\n");
+               return ret;
+       }
+
+       DRM_DEBUG_KMS("HDCP is disabled\n");
+       return 0;
+}
+
+static int _intel_hdcp_enable(struct intel_connector *connector)
+{
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+       int i, ret, tries = 3;
+
+       DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
+                     connector->base.name, connector->base.base.id);
+
+       if (!hdcp_key_loadable(dev_priv)) {
+               DRM_ERROR("HDCP key Load is not possible\n");
+               return -ENXIO;
+       }
+
+       for (i = 0; i < KEY_LOAD_TRIES; i++) {
+               ret = intel_hdcp_load_keys(dev_priv);
+               if (!ret)
+                       break;
+               intel_hdcp_clear_keys(dev_priv);
+       }
+       if (ret) {
+               DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
+               return ret;
+       }
+
+       /* Incase of authentication failures, HDCP spec expects reauth. */
+       for (i = 0; i < tries; i++) {
+               ret = intel_hdcp_auth(connector);
+               if (!ret) {
+                       hdcp->hdcp_encrypted = true;
+                       return 0;
+               }
+
+               DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
+
+               /* Ensuring HDCP encryption and signalling are stopped. */
+               _intel_hdcp_disable(connector);
+       }
+
+       DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
+       return ret;
+}
+
+static inline
+struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
+{
+       return container_of(hdcp, struct intel_connector, hdcp);
+}
+
+/* Implements Part 3 of the HDCP authorization procedure */
+static int intel_hdcp_check_link(struct intel_connector *connector)
+{
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       enum port port = intel_dig_port->base.port;
+       int ret = 0;
+
+       mutex_lock(&hdcp->mutex);
+
+       /* Check_link valid only when HDCP1.4 is enabled */
+       if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
+           !hdcp->hdcp_encrypted) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (WARN_ON(!intel_hdcp_in_use(connector))) {
+               DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
+                         connector->base.name, connector->base.base.id,
+                         I915_READ(PORT_HDCP_STATUS(port)));
+               ret = -ENXIO;
+               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               schedule_work(&hdcp->prop_work);
+               goto out;
+       }
+
+       if (hdcp->shim->check_link(intel_dig_port)) {
+               if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+                       hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+                       schedule_work(&hdcp->prop_work);
+               }
+               goto out;
+       }
+
+       DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
+                     connector->base.name, connector->base.base.id);
+
+       ret = _intel_hdcp_disable(connector);
+       if (ret) {
+               DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
+               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               schedule_work(&hdcp->prop_work);
+               goto out;
+       }
+
+       ret = _intel_hdcp_enable(connector);
+       if (ret) {
+               DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
+               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               schedule_work(&hdcp->prop_work);
+               goto out;
+       }
+
+out:
+       mutex_unlock(&hdcp->mutex);
+       return ret;
+}
+
+static void intel_hdcp_prop_work(struct work_struct *work)
+{
+       struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
+                                              prop_work);
+       struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+       struct drm_device *dev = connector->base.dev;
+       struct drm_connector_state *state;
+
+       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+       mutex_lock(&hdcp->mutex);
+
+       /*
+        * This worker is only used to flip between ENABLED/DESIRED. Either of
+        * those to UNDESIRED is handled by core. If value == UNDESIRED,
+        * we're running just after hdcp has been disabled, so just exit
+        */
+       if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+               state = connector->base.state;
+               state->content_protection = hdcp->value;
+       }
+
+       mutex_unlock(&hdcp->mutex);
+       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
+{
+       /* PORT E doesn't have HDCP, and PORT F is disabled */
+       return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
+}
+
+static int
+hdcp2_prepare_ake_init(struct intel_connector *connector,
+                      struct hdcp2_ake_init *ake_data)
+{
+       struct hdcp_port_data *data = &connector->hdcp.port_data;
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct i915_hdcp_comp_master *comp;
+       int ret;
+
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       comp = dev_priv->hdcp_master;
+
+       if (!comp || !comp->ops) {
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return -EINVAL;
+       }
+
+       ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
+       if (ret)
+               DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       return ret;
+}
+
+static int
+hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
+                               struct hdcp2_ake_send_cert *rx_cert,
+                               bool *paired,
+                               struct hdcp2_ake_no_stored_km *ek_pub_km,
+                               size_t *msg_sz)
+{
+       struct hdcp_port_data *data = &connector->hdcp.port_data;
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct i915_hdcp_comp_master *comp;
+       int ret;
+
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       comp = dev_priv->hdcp_master;
+
+       if (!comp || !comp->ops) {
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return -EINVAL;
+       }
+
+       ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
+                                                        rx_cert, paired,
+                                                        ek_pub_km, msg_sz);
+       if (ret < 0)
+               DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       return ret;
+}
+
+static int hdcp2_verify_hprime(struct intel_connector *connector,
+                              struct hdcp2_ake_send_hprime *rx_hprime)
+{
+       struct hdcp_port_data *data = &connector->hdcp.port_data;
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct i915_hdcp_comp_master *comp;
+       int ret;
+
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       comp = dev_priv->hdcp_master;
+
+       if (!comp || !comp->ops) {
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return -EINVAL;
+       }
+
+       ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
+       if (ret < 0)
+               DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       return ret;
+}
+
+static int
+hdcp2_store_pairing_info(struct intel_connector *connector,
+                        struct hdcp2_ake_send_pairing_info *pairing_info)
+{
+       struct hdcp_port_data *data = &connector->hdcp.port_data;
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct i915_hdcp_comp_master *comp;
+       int ret;
+
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       comp = dev_priv->hdcp_master;
+
+       if (!comp || !comp->ops) {
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return -EINVAL;
+       }
+
+       ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
+       if (ret < 0)
+               DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       return ret;
+}
+
+static int
+hdcp2_prepare_lc_init(struct intel_connector *connector,
+                     struct hdcp2_lc_init *lc_init)
+{
+       struct hdcp_port_data *data = &connector->hdcp.port_data;
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct i915_hdcp_comp_master *comp;
+       int ret;
+
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       comp = dev_priv->hdcp_master;
+
+       if (!comp || !comp->ops) {
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return -EINVAL;
+       }
+
+       ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
+       if (ret < 0)
+               DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       return ret;
+}
+
+static int
+hdcp2_verify_lprime(struct intel_connector *connector,
+                   struct hdcp2_lc_send_lprime *rx_lprime)
+{
+       struct hdcp_port_data *data = &connector->hdcp.port_data;
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct i915_hdcp_comp_master *comp;
+       int ret;
+
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       comp = dev_priv->hdcp_master;
+
+       if (!comp || !comp->ops) {
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return -EINVAL;
+       }
+
+       ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
+       if (ret < 0)
+               DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       return ret;
+}
+
+static int hdcp2_prepare_skey(struct intel_connector *connector,
+                             struct hdcp2_ske_send_eks *ske_data)
+{
+       struct hdcp_port_data *data = &connector->hdcp.port_data;
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct i915_hdcp_comp_master *comp;
+       int ret;
+
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       comp = dev_priv->hdcp_master;
+
+       if (!comp || !comp->ops) {
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return -EINVAL;
+       }
+
+       ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
+       if (ret < 0)
+               DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       return ret;
+}
+
+static int
+hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
+                                     struct hdcp2_rep_send_receiverid_list
+                                                               *rep_topology,
+                                     struct hdcp2_rep_send_ack *rep_send_ack)
+{
+       struct hdcp_port_data *data = &connector->hdcp.port_data;
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct i915_hdcp_comp_master *comp;
+       int ret;
+
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       comp = dev_priv->hdcp_master;
+
+       if (!comp || !comp->ops) {
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return -EINVAL;
+       }
+
+       ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
+                                                        rep_topology,
+                                                        rep_send_ack);
+       if (ret < 0)
+               DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       return ret;
+}
+
+static int
+hdcp2_verify_mprime(struct intel_connector *connector,
+                   struct hdcp2_rep_stream_ready *stream_ready)
+{
+       struct hdcp_port_data *data = &connector->hdcp.port_data;
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct i915_hdcp_comp_master *comp;
+       int ret;
+
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       comp = dev_priv->hdcp_master;
+
+       if (!comp || !comp->ops) {
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return -EINVAL;
+       }
+
+       ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
+       if (ret < 0)
+               DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       return ret;
+}
+
+static int hdcp2_authenticate_port(struct intel_connector *connector)
+{
+       struct hdcp_port_data *data = &connector->hdcp.port_data;
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct i915_hdcp_comp_master *comp;
+       int ret;
+
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       comp = dev_priv->hdcp_master;
+
+       if (!comp || !comp->ops) {
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return -EINVAL;
+       }
+
+       ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
+       if (ret < 0)
+               DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       return ret;
+}
+
+static int hdcp2_close_mei_session(struct intel_connector *connector)
+{
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct i915_hdcp_comp_master *comp;
+       int ret;
+
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       comp = dev_priv->hdcp_master;
+
+       if (!comp || !comp->ops) {
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return -EINVAL;
+       }
+
+       ret = comp->ops->close_hdcp_session(comp->mei_dev,
+                                            &connector->hdcp.port_data);
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       return ret;
+}
+
+static int hdcp2_deauthenticate_port(struct intel_connector *connector)
+{
+       return hdcp2_close_mei_session(connector);
+}
+
+/* Authentication flow starts from here */
+static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
+{
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       struct drm_device *dev = connector->base.dev;
+       union {
+               struct hdcp2_ake_init ake_init;
+               struct hdcp2_ake_send_cert send_cert;
+               struct hdcp2_ake_no_stored_km no_stored_km;
+               struct hdcp2_ake_send_hprime send_hprime;
+               struct hdcp2_ake_send_pairing_info pairing_info;
+       } msgs;
+       const struct intel_hdcp_shim *shim = hdcp->shim;
+       size_t size;
+       int ret;
+
+       /* Init for seq_num */
+       hdcp->seq_num_v = 0;
+       hdcp->seq_num_m = 0;
+
+       ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
+       if (ret < 0)
+               return ret;
+
+       ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
+                                 sizeof(msgs.ake_init));
+       if (ret < 0)
+               return ret;
+
+       ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
+                                &msgs.send_cert, sizeof(msgs.send_cert));
+       if (ret < 0)
+               return ret;
+
+       if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
+               return -EINVAL;
+
+       hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
+
+       if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id,
+                                       1)) {
+               DRM_ERROR("Receiver ID is revoked\n");
+               return -EPERM;
+       }
+
+       /*
+        * Here msgs.no_stored_km will hold msgs corresponding to the km
+        * stored also.
+        */
+       ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
+                                             &hdcp->is_paired,
+                                             &msgs.no_stored_km, &size);
+       if (ret < 0)
+               return ret;
+
+       ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
+       if (ret < 0)
+               return ret;
+
+       ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
+                                &msgs.send_hprime, sizeof(msgs.send_hprime));
+       if (ret < 0)
+               return ret;
+
+       ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
+       if (ret < 0)
+               return ret;
+
+       if (!hdcp->is_paired) {
+               /* Pairing is required */
+               ret = shim->read_2_2_msg(intel_dig_port,
+                                        HDCP_2_2_AKE_SEND_PAIRING_INFO,
+                                        &msgs.pairing_info,
+                                        sizeof(msgs.pairing_info));
+               if (ret < 0)
+                       return ret;
+
+               ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
+               if (ret < 0)
+                       return ret;
+               hdcp->is_paired = true;
+       }
+
+       return 0;
+}
+
+static int hdcp2_locality_check(struct intel_connector *connector)
+{
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       union {
+               struct hdcp2_lc_init lc_init;
+               struct hdcp2_lc_send_lprime send_lprime;
+       } msgs;
+       const struct intel_hdcp_shim *shim = hdcp->shim;
+       int tries = HDCP2_LC_RETRY_CNT, ret, i;
+
+       for (i = 0; i < tries; i++) {
+               ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
+               if (ret < 0)
+                       continue;
+
+               ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
+                                     sizeof(msgs.lc_init));
+               if (ret < 0)
+                       continue;
+
+               ret = shim->read_2_2_msg(intel_dig_port,
+                                        HDCP_2_2_LC_SEND_LPRIME,
+                                        &msgs.send_lprime,
+                                        sizeof(msgs.send_lprime));
+               if (ret < 0)
+                       continue;
+
+               ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
+               if (!ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static int hdcp2_session_key_exchange(struct intel_connector *connector)
+{
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       struct hdcp2_ske_send_eks send_eks;
+       int ret;
+
+       ret = hdcp2_prepare_skey(connector, &send_eks);
+       if (ret < 0)
+               return ret;
+
+       ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
+                                       sizeof(send_eks));
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static
+int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
+{
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       union {
+               struct hdcp2_rep_stream_manage stream_manage;
+               struct hdcp2_rep_stream_ready stream_ready;
+       } msgs;
+       const struct intel_hdcp_shim *shim = hdcp->shim;
+       int ret;
+
+       /* Prepare RepeaterAuth_Stream_Manage msg */
+       msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
+       drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
+
+       /* K no of streams is fixed as 1. Stored as big-endian. */
+       msgs.stream_manage.k = cpu_to_be16(1);
+
+       /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
+       msgs.stream_manage.streams[0].stream_id = 0;
+       msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
+
+       /* Send it to Repeater */
+       ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
+                                 sizeof(msgs.stream_manage));
+       if (ret < 0)
+               return ret;
+
+       ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
+                                &msgs.stream_ready, sizeof(msgs.stream_ready));
+       if (ret < 0)
+               return ret;
+
+       hdcp->port_data.seq_num_m = hdcp->seq_num_m;
+       hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+
+       ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
+       if (ret < 0)
+               return ret;
+
+       hdcp->seq_num_m++;
+
+       if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
+               DRM_DEBUG_KMS("seq_num_m roll over.\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static
+int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
+{
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       struct drm_device *dev = connector->base.dev;
+       union {
+               struct hdcp2_rep_send_receiverid_list recvid_list;
+               struct hdcp2_rep_send_ack rep_ack;
+       } msgs;
+       const struct intel_hdcp_shim *shim = hdcp->shim;
+       u32 seq_num_v, device_cnt;
+       u8 *rx_info;
+       int ret;
+
+       ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
+                                &msgs.recvid_list, sizeof(msgs.recvid_list));
+       if (ret < 0)
+               return ret;
+
+       rx_info = msgs.recvid_list.rx_info;
+
+       if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
+           HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
+               DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
+               return -EINVAL;
+       }
+
+       /* Converting and Storing the seq_num_v to local variable as DWORD */
+       seq_num_v =
+               drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
+
+       if (seq_num_v < hdcp->seq_num_v) {
+               /* Roll over of the seq_num_v from repeater. Reauthenticate. */
+               DRM_DEBUG_KMS("Seq_num_v roll over.\n");
+               return -EINVAL;
+       }
+
+       device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+                     HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
+       if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids,
+                                       device_cnt)) {
+               DRM_ERROR("Revoked receiver ID(s) is in list\n");
+               return -EPERM;
+       }
+
+       ret = hdcp2_verify_rep_topology_prepare_ack(connector,
+                                                   &msgs.recvid_list,
+                                                   &msgs.rep_ack);
+       if (ret < 0)
+               return ret;
+
+       hdcp->seq_num_v = seq_num_v;
+       ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
+                                 sizeof(msgs.rep_ack));
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int hdcp2_authenticate_repeater(struct intel_connector *connector)
+{
+       int ret;
+
+       ret = hdcp2_authenticate_repeater_topology(connector);
+       if (ret < 0)
+               return ret;
+
+       return hdcp2_propagate_stream_management_info(connector);
+}
+
+static int hdcp2_authenticate_sink(struct intel_connector *connector)
+{
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       const struct intel_hdcp_shim *shim = hdcp->shim;
+       int ret;
+
+       ret = hdcp2_authentication_key_exchange(connector);
+       if (ret < 0) {
+               DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
+               return ret;
+       }
+
+       ret = hdcp2_locality_check(connector);
+       if (ret < 0) {
+               DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
+               return ret;
+       }
+
+       ret = hdcp2_session_key_exchange(connector);
+       if (ret < 0) {
+               DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
+               return ret;
+       }
+
+       if (shim->config_stream_type) {
+               ret = shim->config_stream_type(intel_dig_port,
+                                              hdcp->is_repeater,
+                                              hdcp->content_type);
+               if (ret < 0)
+                       return ret;
+       }
+
+       if (hdcp->is_repeater) {
+               ret = hdcp2_authenticate_repeater(connector);
+               if (ret < 0) {
+                       DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
+                       return ret;
+               }
+       }
+
+       hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+       ret = hdcp2_authenticate_port(connector);
+       if (ret < 0)
+               return ret;
+
+       return ret;
+}
+
+static int hdcp2_enable_encryption(struct intel_connector *connector)
+{
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       enum port port = connector->encoder->port;
+       int ret;
+
+       WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
+
+       if (hdcp->shim->toggle_signalling) {
+               ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
+               if (ret) {
+                       DRM_ERROR("Failed to enable HDCP signalling. %d\n",
+                                 ret);
+                       return ret;
+               }
+       }
+
+       if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
+               /* Link is Authenticated. Now set for Encryption */
+               I915_WRITE(HDCP2_CTL_DDI(port),
+                          I915_READ(HDCP2_CTL_DDI(port)) |
+                          CTL_LINK_ENCRYPTION_REQ);
+       }
+
+       ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
+                                     LINK_ENCRYPTION_STATUS,
+                                     LINK_ENCRYPTION_STATUS,
+                                     ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+
+       return ret;
+}
+
+static int hdcp2_disable_encryption(struct intel_connector *connector)
+{
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       enum port port = connector->encoder->port;
+       int ret;
+
+       WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
+
+       I915_WRITE(HDCP2_CTL_DDI(port),
+                  I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
+
+       ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
+                                     LINK_ENCRYPTION_STATUS, 0x0,
+                                     ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+       if (ret == -ETIMEDOUT)
+               DRM_DEBUG_KMS("Disable Encryption Timedout");
+
+       if (hdcp->shim->toggle_signalling) {
+               ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
+               if (ret) {
+                       DRM_ERROR("Failed to disable HDCP signalling. %d\n",
+                                 ret);
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
+static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
+{
+       int ret, i, tries = 3;
+
+       for (i = 0; i < tries; i++) {
+               ret = hdcp2_authenticate_sink(connector);
+               if (!ret)
+                       break;
+
+               /* Clearing the mei hdcp session */
+               DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
+                             i + 1, tries, ret);
+               if (hdcp2_deauthenticate_port(connector) < 0)
+                       DRM_DEBUG_KMS("Port deauth failed.\n");
+       }
+
+       if (i != tries) {
+               /*
+                * Ensuring the required 200mSec min time interval between
+                * Session Key Exchange and encryption.
+                */
+               msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
+               ret = hdcp2_enable_encryption(connector);
+               if (ret < 0) {
+                       DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
+                       if (hdcp2_deauthenticate_port(connector) < 0)
+                               DRM_DEBUG_KMS("Port deauth failed.\n");
+               }
+       }
+
+       return ret;
+}
+
+static int _intel_hdcp2_enable(struct intel_connector *connector)
+{
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       int ret;
+
+       DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
+                     connector->base.name, connector->base.base.id,
+                     hdcp->content_type);
+
+       ret = hdcp2_authenticate_and_encrypt(connector);
+       if (ret) {
+               DRM_DEBUG_KMS("HDCP2 Type%d  Enabling Failed. (%d)\n",
+                             hdcp->content_type, ret);
+               return ret;
+       }
+
+       DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
+                     connector->base.name, connector->base.base.id,
+                     hdcp->content_type);
+
+       hdcp->hdcp2_encrypted = true;
+       return 0;
+}
+
+static int _intel_hdcp2_disable(struct intel_connector *connector)
+{
+       int ret;
+
+       DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
+                     connector->base.name, connector->base.base.id);
+
+       ret = hdcp2_disable_encryption(connector);
+
+       if (hdcp2_deauthenticate_port(connector) < 0)
+               DRM_DEBUG_KMS("Port deauth failed.\n");
+
+       connector->hdcp.hdcp2_encrypted = false;
+
+       return ret;
+}
+
+/* Implements the Link Integrity Check for HDCP2.2 */
+static int intel_hdcp2_check_link(struct intel_connector *connector)
+{
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       enum port port = connector->encoder->port;
+       int ret = 0;
+
+       mutex_lock(&hdcp->mutex);
+
+       /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
+       if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
+           !hdcp->hdcp2_encrypted) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (WARN_ON(!intel_hdcp2_in_use(connector))) {
+               DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
+                         I915_READ(HDCP2_STATUS_DDI(port)));
+               ret = -ENXIO;
+               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               schedule_work(&hdcp->prop_work);
+               goto out;
+       }
+
+       ret = hdcp->shim->check_2_2_link(intel_dig_port);
+       if (ret == HDCP_LINK_PROTECTED) {
+               if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+                       hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+                       schedule_work(&hdcp->prop_work);
+               }
+               goto out;
+       }
+
+       if (ret == HDCP_TOPOLOGY_CHANGE) {
+               if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+                       goto out;
+
+               DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
+               ret = hdcp2_authenticate_repeater_topology(connector);
+               if (!ret) {
+                       hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+                       schedule_work(&hdcp->prop_work);
+                       goto out;
+               }
+               DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
+                             connector->base.name, connector->base.base.id,
+                             ret);
+       } else {
+               DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
+                             connector->base.name, connector->base.base.id);
+       }
+
+       ret = _intel_hdcp2_disable(connector);
+       if (ret) {
+               DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
+                         connector->base.name, connector->base.base.id, ret);
+               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               schedule_work(&hdcp->prop_work);
+               goto out;
+       }
+
+       ret = _intel_hdcp2_enable(connector);
+       if (ret) {
+               DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
+                             connector->base.name, connector->base.base.id,
+                             ret);
+               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               schedule_work(&hdcp->prop_work);
+               goto out;
+       }
+
+out:
+       mutex_unlock(&hdcp->mutex);
+       return ret;
+}
+
+static void intel_hdcp_check_work(struct work_struct *work)
+{
+       struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
+                                              struct intel_hdcp,
+                                              check_work);
+       struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+
+       if (!intel_hdcp2_check_link(connector))
+               schedule_delayed_work(&hdcp->check_work,
+                                     DRM_HDCP2_CHECK_PERIOD_MS);
+       else if (!intel_hdcp_check_link(connector))
+               schedule_delayed_work(&hdcp->check_work,
+                                     DRM_HDCP_CHECK_PERIOD_MS);
+}
+
+static int i915_hdcp_component_bind(struct device *i915_kdev,
+                                   struct device *mei_kdev, void *data)
+{
+       struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+       DRM_DEBUG("I915 HDCP comp bind\n");
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
+       dev_priv->hdcp_master->mei_dev = mei_kdev;
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       return 0;
+}
+
+static void i915_hdcp_component_unbind(struct device *i915_kdev,
+                                      struct device *mei_kdev, void *data)
+{
+       struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+       DRM_DEBUG("I915 HDCP comp unbind\n");
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       dev_priv->hdcp_master = NULL;
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+}
+
+static const struct component_ops i915_hdcp_component_ops = {
+       .bind   = i915_hdcp_component_bind,
+       .unbind = i915_hdcp_component_unbind,
+};
+
+static inline int initialize_hdcp_port_data(struct intel_connector *connector)
+{
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       struct hdcp_port_data *data = &hdcp->port_data;
+
+       data->port = connector->encoder->port;
+       data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
+       data->protocol = (u8)hdcp->shim->protocol;
+
+       data->k = 1;
+       if (!data->streams)
+               data->streams = kcalloc(data->k,
+                                       sizeof(struct hdcp2_streamid_type),
+                                       GFP_KERNEL);
+       if (!data->streams) {
+               DRM_ERROR("Out of Memory\n");
+               return -ENOMEM;
+       }
+
+       data->streams[0].stream_id = 0;
+       data->streams[0].stream_type = hdcp->content_type;
+
+       return 0;
+}
+
+static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
+{
+       if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
+               return false;
+
+       return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
+               IS_KABYLAKE(dev_priv));
+}
+
+void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
+{
+       int ret;
+
+       if (!is_hdcp2_supported(dev_priv))
+               return;
+
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       WARN_ON(dev_priv->hdcp_comp_added);
+
+       dev_priv->hdcp_comp_added = true;
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+       ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
+                                 I915_COMPONENT_HDCP);
+       if (ret < 0) {
+               DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
+               mutex_lock(&dev_priv->hdcp_comp_mutex);
+               dev_priv->hdcp_comp_added = false;
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return;
+       }
+}
+
+static void intel_hdcp2_init(struct intel_connector *connector)
+{
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       int ret;
+
+       ret = initialize_hdcp_port_data(connector);
+       if (ret) {
+               DRM_DEBUG_KMS("Mei hdcp data init failed\n");
+               return;
+       }
+
+       hdcp->hdcp2_supported = true;
+}
+
+int intel_hdcp_init(struct intel_connector *connector,
+                   const struct intel_hdcp_shim *shim)
+{
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       int ret;
+
+       if (!shim)
+               return -EINVAL;
+
+       ret = drm_connector_attach_content_protection_property(&connector->base);
+       if (ret)
+               return ret;
+
+       hdcp->shim = shim;
+       mutex_init(&hdcp->mutex);
+       INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
+       INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
+
+       if (is_hdcp2_supported(dev_priv))
+               intel_hdcp2_init(connector);
+       init_waitqueue_head(&hdcp->cp_irq_queue);
+
+       return 0;
+}
+
+int intel_hdcp_enable(struct intel_connector *connector)
+{
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
+       int ret = -EINVAL;
+
+       if (!hdcp->shim)
+               return -ENOENT;
+
+       mutex_lock(&hdcp->mutex);
+       WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
+
+       /*
+        * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
+        * is capable of HDCP2.2, it is preferred to use HDCP2.2.
+        */
+       if (intel_hdcp2_capable(connector)) {
+               ret = _intel_hdcp2_enable(connector);
+               if (!ret)
+                       check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
+       }
+
+       /* When HDCP2.2 fails, HDCP1.4 will be attempted */
+       if (ret && intel_hdcp_capable(connector)) {
+               ret = _intel_hdcp_enable(connector);
+       }
+
+       if (!ret) {
+               schedule_delayed_work(&hdcp->check_work, check_link_interval);
+               hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+               schedule_work(&hdcp->prop_work);
+       }
+
+       mutex_unlock(&hdcp->mutex);
+       return ret;
+}
+
+int intel_hdcp_disable(struct intel_connector *connector)
+{
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       int ret = 0;
+
+       if (!hdcp->shim)
+               return -ENOENT;
+
+       mutex_lock(&hdcp->mutex);
+
+       if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+               hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
+               if (hdcp->hdcp2_encrypted)
+                       ret = _intel_hdcp2_disable(connector);
+               else if (hdcp->hdcp_encrypted)
+                       ret = _intel_hdcp_disable(connector);
+       }
+
+       mutex_unlock(&hdcp->mutex);
+       cancel_delayed_work_sync(&hdcp->check_work);
+       return ret;
+}
+
+void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
+{
+       mutex_lock(&dev_priv->hdcp_comp_mutex);
+       if (!dev_priv->hdcp_comp_added) {
+               mutex_unlock(&dev_priv->hdcp_comp_mutex);
+               return;
+       }
+
+       dev_priv->hdcp_comp_added = false;
+       mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+       component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
+}
+
+void intel_hdcp_cleanup(struct intel_connector *connector)
+{
+       if (!connector->hdcp.shim)
+               return;
+
+       mutex_lock(&connector->hdcp.mutex);
+       kfree(connector->hdcp.port_data.streams);
+       mutex_unlock(&connector->hdcp.mutex);
+}
+
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+                            struct drm_connector_state *old_state,
+                            struct drm_connector_state *new_state)
+{
+       u64 old_cp = old_state->content_protection;
+       u64 new_cp = new_state->content_protection;
+       struct drm_crtc_state *crtc_state;
+
+       if (!new_state->crtc) {
+               /*
+                * If the connector is being disabled with CP enabled, mark it
+                * desired so it's re-enabled when the connector is brought back
+                */
+               if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+                       new_state->content_protection =
+                               DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               return;
+       }
+
+       /*
+        * Nothing to do if the state didn't change, or HDCP was activated since
+        * the last commit
+        */
+       if (old_cp == new_cp ||
+           (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+            new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
+               return;
+
+       crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+                                                  new_state->crtc);
+       crtc_state->mode_changed = true;
+}
+
+/* Handles the CP_IRQ raised from the DP HDCP sink */
+void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
+{
+       struct intel_hdcp *hdcp = &connector->hdcp;
+
+       if (!hdcp->shim)
+               return;
+
+       atomic_inc(&connector->hdcp.cp_irq_count);
+       wake_up_all(&connector->hdcp.cp_irq_queue);
+
+       schedule_delayed_work(&hdcp->check_work, 0);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
new file mode 100644 (file)
index 0000000..be8da85
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_HDCP_H__
+#define __INTEL_HDCP_H__
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_connector;
+struct intel_hdcp_shim;
+
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+                            struct drm_connector_state *old_state,
+                            struct drm_connector_state *new_state);
+int intel_hdcp_init(struct intel_connector *connector,
+                   const struct intel_hdcp_shim *hdcp_shim);
+int intel_hdcp_enable(struct intel_connector *connector);
+int intel_hdcp_disable(struct intel_connector *connector);
+bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
+bool intel_hdcp_capable(struct intel_connector *connector);
+bool intel_hdcp2_capable(struct intel_connector *connector);
+void intel_hdcp_component_init(struct drm_i915_private *dev_priv);
+void intel_hdcp_component_fini(struct drm_i915_private *dev_priv);
+void intel_hdcp_cleanup(struct intel_connector *connector);
+void intel_hdcp_handle_cp_irq(struct intel_connector *connector);
+
+#endif /* __INTEL_HDCP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
new file mode 100644 (file)
index 0000000..ea3de4a
--- /dev/null
@@ -0,0 +1,687 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_hotplug.h"
+
+/**
+ * DOC: Hotplug
+ *
+ * Simply put, hotplug occurs when a display is connected to or disconnected
+ * from the system. However, there may be adapters and docking stations and
+ * Display Port short pulses and MST devices involved, complicating matters.
+ *
+ * Hotplug in i915 is handled in many different levels of abstraction.
+ *
+ * The platform dependent interrupt handling code in i915_irq.c enables,
+ * disables, and does preliminary handling of the interrupts. The interrupt
+ * handlers gather the hotplug detect (HPD) information from relevant registers
+ * into a platform independent mask of hotplug pins that have fired.
+ *
+ * The platform independent interrupt handler intel_hpd_irq_handler() in
+ * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
+ * further processing to appropriate bottom halves (Display Port specific and
+ * regular hotplug).
+ *
+ * The Display Port work function i915_digport_work_func() calls into
+ * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
+ * pulses, with failures and non-MST long pulses triggering regular hotplug
+ * processing on the connector.
+ *
+ * The regular hotplug work function i915_hotplug_work_func() calls connector
+ * detect hooks, and, if connector status changes, triggers sending of hotplug
+ * uevent to userspace via drm_kms_helper_hotplug_event().
+ *
+ * Finally, the userspace is responsible for triggering a modeset upon receiving
+ * the hotplug uevent, disabling or enabling the crtc as needed.
+ *
+ * The hotplug interrupt storm detection and mitigation code keeps track of the
+ * number of interrupts per hotplug pin per a period of time, and if the number
+ * of interrupts exceeds a certain threshold, the interrupt is disabled for a
+ * while before being re-enabled. The intention is to mitigate issues raising
+ * from broken hardware triggering massive amounts of interrupts and grinding
+ * the system to a halt.
+ *
+ * Current implementation expects that hotplug interrupt storm will not be
+ * seen when display port sink is connected, hence on platforms whose DP
+ * callback is handled by i915_digport_work_func reenabling of hpd is not
+ * performed (it was never expected to be disabled in the first place ;) )
+ * this is specific to DP sinks handled by this routine and any other display
+ * such as HDMI or DVI enabled on the same port will have proper logic since
+ * it will use i915_hotplug_work_func where this logic is handled.
+ */
+
+/**
+ * intel_hpd_pin_default - return default pin associated with certain port.
+ * @dev_priv: private driver data pointer
+ * @port: the hpd port to get associated pin
+ *
+ * It is only valid and used by digital port encoder.
+ *
+ * Return pin that is associatade with @port and HDP_NONE if no pin is
+ * hard associated with that @port.
+ */
+enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
+                                  enum port port)
+{
+       switch (port) {
+       case PORT_A:
+               return HPD_PORT_A;
+       case PORT_B:
+               return HPD_PORT_B;
+       case PORT_C:
+               return HPD_PORT_C;
+       case PORT_D:
+               return HPD_PORT_D;
+       case PORT_E:
+               return HPD_PORT_E;
+       case PORT_F:
+               if (IS_CNL_WITH_PORT_F(dev_priv))
+                       return HPD_PORT_E;
+               return HPD_PORT_F;
+       default:
+               MISSING_CASE(port);
+               return HPD_NONE;
+       }
+}
+
+#define HPD_STORM_DETECT_PERIOD                1000
+#define HPD_STORM_REENABLE_DELAY       (2 * 60 * 1000)
+
+/**
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
+ * @dev_priv: private driver data pointer
+ * @pin: the pin to gather stats on
+ * @long_hpd: whether the HPD IRQ was long or short
+ *
+ * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
+ * storms. Only the pin specific stats and state are changed, the caller is
+ * responsible for further action.
+ *
+ * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
+ * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
+ * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
+ * short IRQs count as +1. If this threshold is exceeded, it's considered an
+ * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
+ *
+ * By default, most systems will only count long IRQs towards
+ * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
+ * suffer from short IRQ storms and must also track these. Because short IRQ
+ * storms are naturally caused by sideband interactions with DP MST devices,
+ * short IRQ detection is only enabled for systems without DP MST support.
+ * Systems which are new enough to support DP MST are far less likely to
+ * suffer from IRQ storms at all, so this is fine.
+ *
+ * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
+ * and should only be adjusted for automated hotplug testing.
+ *
+ * Return true if an IRQ storm was detected on @pin.
+ */
+static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+                                      enum hpd_pin pin, bool long_hpd)
+{
+       struct i915_hotplug *hpd = &dev_priv->hotplug;
+       unsigned long start = hpd->stats[pin].last_jiffies;
+       unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
+       const int increment = long_hpd ? 10 : 1;
+       const int threshold = hpd->hpd_storm_threshold;
+       bool storm = false;
+
+       if (!threshold ||
+           (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
+               return false;
+
+       if (!time_in_range(jiffies, start, end)) {
+               hpd->stats[pin].last_jiffies = jiffies;
+               hpd->stats[pin].count = 0;
+       }
+
+       hpd->stats[pin].count += increment;
+       if (hpd->stats[pin].count > threshold) {
+               hpd->stats[pin].state = HPD_MARK_DISABLED;
+               DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+               storm = true;
+       } else {
+               DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+                             hpd->stats[pin].count);
+       }
+
+       return storm;
+}
+
+static void
+intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       struct intel_connector *intel_connector;
+       struct intel_encoder *intel_encoder;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       enum hpd_pin pin;
+       bool hpd_disabled = false;
+
+       lockdep_assert_held(&dev_priv->irq_lock);
+
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
+               if (connector->polled != DRM_CONNECTOR_POLL_HPD)
+                       continue;
+
+               intel_connector = to_intel_connector(connector);
+               intel_encoder = intel_connector->encoder;
+               if (!intel_encoder)
+                       continue;
+
+               pin = intel_encoder->hpd_pin;
+               if (pin == HPD_NONE ||
+                   dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
+                       continue;
+
+               DRM_INFO("HPD interrupt storm detected on connector %s: "
+                        "switching from hotplug detection to polling\n",
+                        connector->name);
+
+               dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+               connector->polled = DRM_CONNECTOR_POLL_CONNECT
+                       | DRM_CONNECTOR_POLL_DISCONNECT;
+               hpd_disabled = true;
+       }
+       drm_connector_list_iter_end(&conn_iter);
+
+       /* Enable polling and queue hotplug re-enabling. */
+       if (hpd_disabled) {
+               drm_kms_helper_poll_enable(dev);
+               mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
+                                msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
+       }
+}
+
+static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv),
+                            hotplug.reenable_work.work);
+       struct drm_device *dev = &dev_priv->drm;
+       intel_wakeref_t wakeref;
+       enum hpd_pin pin;
+
+       wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       for_each_hpd_pin(pin) {
+               struct drm_connector *connector;
+               struct drm_connector_list_iter conn_iter;
+
+               if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
+                       continue;
+
+               dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+
+               drm_connector_list_iter_begin(dev, &conn_iter);
+               drm_for_each_connector_iter(connector, &conn_iter) {
+                       struct intel_connector *intel_connector = to_intel_connector(connector);
+
+                       /* Don't check MST ports, they don't have pins */
+                       if (!intel_connector->mst_port &&
+                           intel_connector->encoder->hpd_pin == pin) {
+                               if (connector->polled != intel_connector->polled)
+                                       DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
+                                                        connector->name);
+                               connector->polled = intel_connector->polled;
+                               if (!connector->polled)
+                                       connector->polled = DRM_CONNECTOR_POLL_HPD;
+                       }
+               }
+               drm_connector_list_iter_end(&conn_iter);
+       }
+       if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
+               dev_priv->display.hpd_irq_setup(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+}
+
+bool intel_encoder_hotplug(struct intel_encoder *encoder,
+                          struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       enum drm_connector_status old_status;
+
+       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+       old_status = connector->base.status;
+
+       connector->base.status =
+               drm_helper_probe_detect(&connector->base, NULL, false);
+
+       if (old_status == connector->base.status)
+               return false;
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
+                     connector->base.base.id,
+                     connector->base.name,
+                     drm_get_connector_status_name(old_status),
+                     drm_get_connector_status_name(connector->base.status));
+
+       return true;
+}
+
+static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
+{
+       return intel_encoder_is_dig_port(encoder) &&
+               enc_to_dig_port(&encoder->base)->hpd_pulse != NULL;
+}
+
+static void i915_digport_work_func(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, struct drm_i915_private, hotplug.dig_port_work);
+       u32 long_port_mask, short_port_mask;
+       struct intel_encoder *encoder;
+       u32 old_bits = 0;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       long_port_mask = dev_priv->hotplug.long_port_mask;
+       dev_priv->hotplug.long_port_mask = 0;
+       short_port_mask = dev_priv->hotplug.short_port_mask;
+       dev_priv->hotplug.short_port_mask = 0;
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               struct intel_digital_port *dig_port;
+               enum port port = encoder->port;
+               bool long_hpd, short_hpd;
+               enum irqreturn ret;
+
+               if (!intel_encoder_has_hpd_pulse(encoder))
+                       continue;
+
+               long_hpd = long_port_mask & BIT(port);
+               short_hpd = short_port_mask & BIT(port);
+
+               if (!long_hpd && !short_hpd)
+                       continue;
+
+               dig_port = enc_to_dig_port(&encoder->base);
+
+               ret = dig_port->hpd_pulse(dig_port, long_hpd);
+               if (ret == IRQ_NONE) {
+                       /* fall back to old school hpd */
+                       old_bits |= BIT(encoder->hpd_pin);
+               }
+       }
+
+       if (old_bits) {
+               spin_lock_irq(&dev_priv->irq_lock);
+               dev_priv->hotplug.event_bits |= old_bits;
+               spin_unlock_irq(&dev_priv->irq_lock);
+               schedule_work(&dev_priv->hotplug.hotplug_work);
+       }
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void i915_hotplug_work_func(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, struct drm_i915_private, hotplug.hotplug_work);
+       struct drm_device *dev = &dev_priv->drm;
+       struct intel_connector *intel_connector;
+       struct intel_encoder *intel_encoder;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       bool changed = false;
+       u32 hpd_event_bits;
+
+       mutex_lock(&dev->mode_config.mutex);
+       DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+       spin_lock_irq(&dev_priv->irq_lock);
+
+       hpd_event_bits = dev_priv->hotplug.event_bits;
+       dev_priv->hotplug.event_bits = 0;
+
+       /* Enable polling for connectors which had HPD IRQ storms */
+       intel_hpd_irq_storm_switch_to_polling(dev_priv);
+
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
+               intel_connector = to_intel_connector(connector);
+               if (!intel_connector->encoder)
+                       continue;
+               intel_encoder = intel_connector->encoder;
+               if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+                       DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
+                                     connector->name, intel_encoder->hpd_pin);
+
+                       changed |= intel_encoder->hotplug(intel_encoder,
+                                                         intel_connector);
+               }
+       }
+       drm_connector_list_iter_end(&conn_iter);
+       mutex_unlock(&dev->mode_config.mutex);
+
+       if (changed)
+               drm_kms_helper_hotplug_event(dev);
+}
+
+
+/**
+ * intel_hpd_irq_handler - main hotplug irq handler
+ * @dev_priv: drm_i915_private
+ * @pin_mask: a mask of hpd pins that have triggered the irq
+ * @long_mask: a mask of hpd pins that may be long hpd pulses
+ *
+ * This is the main hotplug irq handler for all platforms. The platform specific
+ * irq handlers call the platform specific hotplug irq handlers, which read and
+ * decode the appropriate registers into bitmasks about hpd pins that have
+ * triggered (@pin_mask), and which of those pins may be long pulses
+ * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
+ * is not a digital port.
+ *
+ * Here, we do hotplug irq storm detection and mitigation, and pass further
+ * processing to appropriate bottom halves.
+ */
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+                          u32 pin_mask, u32 long_mask)
+{
+       struct intel_encoder *encoder;
+       bool storm_detected = false;
+       bool queue_dig = false, queue_hp = false;
+       u32 long_hpd_pulse_mask = 0;
+       u32 short_hpd_pulse_mask = 0;
+       enum hpd_pin pin;
+
+       if (!pin_mask)
+               return;
+
+       spin_lock(&dev_priv->irq_lock);
+
+       /*
+        * Determine whether ->hpd_pulse() exists for each pin, and
+        * whether we have a short or a long pulse. This is needed
+        * as each pin may have up to two encoders (HDMI and DP) and
+        * only the one of them (DP) will have ->hpd_pulse().
+        */
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
+               enum port port = encoder->port;
+               bool long_hpd;
+
+               pin = encoder->hpd_pin;
+               if (!(BIT(pin) & pin_mask))
+                       continue;
+
+               if (!has_hpd_pulse)
+                       continue;
+
+               long_hpd = long_mask & BIT(pin);
+
+               DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+                                long_hpd ? "long" : "short");
+               queue_dig = true;
+
+               if (long_hpd) {
+                       long_hpd_pulse_mask |= BIT(pin);
+                       dev_priv->hotplug.long_port_mask |= BIT(port);
+               } else {
+                       short_hpd_pulse_mask |= BIT(pin);
+                       dev_priv->hotplug.short_port_mask |= BIT(port);
+               }
+       }
+
+       /* Now process each pin just once */
+       for_each_hpd_pin(pin) {
+               bool long_hpd;
+
+               if (!(BIT(pin) & pin_mask))
+                       continue;
+
+               if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
+                       /*
+                        * On GMCH platforms the interrupt mask bits only
+                        * prevent irq generation, not the setting of the
+                        * hotplug bits itself. So only WARN about unexpected
+                        * interrupts on saner platforms.
+                        */
+                       WARN_ONCE(!HAS_GMCH(dev_priv),
+                                 "Received HPD interrupt on pin %d although disabled\n", pin);
+                       continue;
+               }
+
+               if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
+                       continue;
+
+               /*
+                * Delegate to ->hpd_pulse() if one of the encoders for this
+                * pin has it, otherwise let the hotplug_work deal with this
+                * pin directly.
+                */
+               if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
+                       long_hpd = long_hpd_pulse_mask & BIT(pin);
+               } else {
+                       dev_priv->hotplug.event_bits |= BIT(pin);
+                       long_hpd = true;
+                       queue_hp = true;
+               }
+
+               if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
+                       dev_priv->hotplug.event_bits &= ~BIT(pin);
+                       storm_detected = true;
+                       queue_hp = true;
+               }
+       }
+
+       /*
+        * Disable any IRQs that storms were detected on. Polling enablement
+        * happens later in our hotplug work.
+        */
+       if (storm_detected && dev_priv->display_irqs_enabled)
+               dev_priv->display.hpd_irq_setup(dev_priv);
+       spin_unlock(&dev_priv->irq_lock);
+
+       /*
+        * Our hotplug handler can grab modeset locks (by calling down into the
+        * fb helpers). Hence it must not be run on our own dev-priv->wq work
+        * queue for otherwise the flush_work in the pageflip code will
+        * deadlock.
+        */
+       if (queue_dig)
+               queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
+       if (queue_hp)
+               schedule_work(&dev_priv->hotplug.hotplug_work);
+}
+
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ *
+ * Also see: intel_hpd_poll_init(), which enables connector polling
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
+{
+       int i;
+
+       for_each_hpd_pin(i) {
+               dev_priv->hotplug.stats[i].count = 0;
+               dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+       }
+
+       WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
+       schedule_work(&dev_priv->hotplug.poll_init_work);
+
+       /*
+        * Interrupt setup is already guaranteed to be single-threaded, this is
+        * just to make the assert_spin_locked checks happy.
+        */
+       if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
+               spin_lock_irq(&dev_priv->irq_lock);
+               if (dev_priv->display_irqs_enabled)
+                       dev_priv->display.hpd_irq_setup(dev_priv);
+               spin_unlock_irq(&dev_priv->irq_lock);
+       }
+}
+
+static void i915_hpd_poll_init_work(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, struct drm_i915_private,
+                            hotplug.poll_init_work);
+       struct drm_device *dev = &dev_priv->drm;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       bool enabled;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
+
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
+               struct intel_connector *intel_connector =
+                       to_intel_connector(connector);
+               connector->polled = intel_connector->polled;
+
+               /* MST has a dynamic intel_connector->encoder and it's reprobing
+                * is all handled by the MST helpers. */
+               if (intel_connector->mst_port)
+                       continue;
+
+               if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
+                   intel_connector->encoder->hpd_pin > HPD_NONE) {
+                       connector->polled = enabled ?
+                               DRM_CONNECTOR_POLL_CONNECT |
+                               DRM_CONNECTOR_POLL_DISCONNECT :
+                               DRM_CONNECTOR_POLL_HPD;
+               }
+       }
+       drm_connector_list_iter_end(&conn_iter);
+
+       if (enabled)
+               drm_kms_helper_poll_enable(dev);
+
+       mutex_unlock(&dev->mode_config.mutex);
+
+       /*
+        * We might have missed any hotplugs that happened while we were
+        * in the middle of disabling polling
+        */
+       if (!enabled)
+               drm_helper_hpd_irq_event(dev);
+}
+
+/**
+ * intel_hpd_poll_init - enables/disables polling for connectors with hpd
+ * @dev_priv: i915 device instance
+ *
+ * This function enables polling for all connectors, regardless of whether or
+ * not they support hotplug detection. Under certain conditions HPD may not be
+ * functional. On most Intel GPUs, this happens when we enter runtime suspend.
+ * On Valleyview and Cherryview systems, this also happens when we shut off all
+ * of the powerwells.
+ *
+ * Since this function can get called in contexts where we're already holding
+ * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
+ * worker.
+ *
+ * Also see: intel_hpd_init(), which restores hpd handling.
+ */
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
+{
+       WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
+
+       /*
+        * We might already be holding dev->mode_config.mutex, so do this in a
+        * seperate worker
+        * As well, there's no issue if we race here since we always reschedule
+        * this worker anyway
+        */
+       schedule_work(&dev_priv->hotplug.poll_init_work);
+}
+
+void intel_hpd_init_work(struct drm_i915_private *dev_priv)
+{
+       INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
+       INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
+       INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
+       INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
+                         intel_hpd_irq_storm_reenable_work);
+}
+
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+{
+       spin_lock_irq(&dev_priv->irq_lock);
+
+       dev_priv->hotplug.long_port_mask = 0;
+       dev_priv->hotplug.short_port_mask = 0;
+       dev_priv->hotplug.event_bits = 0;
+
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       cancel_work_sync(&dev_priv->hotplug.dig_port_work);
+       cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+       cancel_work_sync(&dev_priv->hotplug.poll_init_work);
+       cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
+}
+
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+{
+       bool ret = false;
+
+       if (pin == HPD_NONE)
+               return false;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
+               dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+               ret = true;
+       }
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       return ret;
+}
+
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+{
+       if (pin == HPD_NONE)
+               return;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+       spin_unlock_irq(&dev_priv->irq_lock);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
new file mode 100644 (file)
index 0000000..805f897
--- /dev/null
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_HOTPLUG_H__
+#define __INTEL_HOTPLUG_H__
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_i915_private;
+struct intel_connector;
+struct intel_encoder;
+
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
+bool intel_encoder_hotplug(struct intel_encoder *encoder,
+                          struct intel_connector *connector);
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+                          u32 pin_mask, u32 long_mask);
+void intel_hpd_init(struct drm_i915_private *dev_priv);
+void intel_hpd_init_work(struct drm_i915_private *dev_priv);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
+                                  enum port port);
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+
+#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
new file mode 100644 (file)
index 0000000..b19800b
--- /dev/null
@@ -0,0 +1,363 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+ *    Jerome Anand <jerome.anand@intel.com>
+ *    based on VED patches
+ *
+ */
+
+/**
+ * DOC: LPE Audio integration for HDMI or DP playback
+ *
+ * Motivation:
+ * Atom platforms (e.g. valleyview and cherryTrail) integrates a DMA-based
+ * interface as an alternative to the traditional HDaudio path. While this
+ * mode is unrelated to the LPE aka SST audio engine, the documentation refers
+ * to this mode as LPE so we keep this notation for the sake of consistency.
+ *
+ * The interface is handled by a separate standalone driver maintained in the
+ * ALSA subsystem for simplicity. To minimize the interaction between the two
+ * subsystems, a bridge is setup between the hdmi-lpe-audio and i915:
+ * 1. Create a platform device to share MMIO/IRQ resources
+ * 2. Make the platform device child of i915 device for runtime PM.
+ * 3. Create IRQ chip to forward the LPE audio irqs.
+ * the hdmi-lpe-audio driver probes the lpe audio device and creates a new
+ * sound card
+ *
+ * Threats:
+ * Due to the restriction in Linux platform device model, user need manually
+ * uninstall the hdmi-lpe-audio driver before uninstalling i915 module,
+ * otherwise we might run into use-after-free issues after i915 removes the
+ * platform device: even though hdmi-lpe-audio driver is released, the modules
+ * is still in "installed" status.
+ *
+ * Implementation:
+ * The MMIO/REG platform resources are created according to the registers
+ * specification.
+ * When forwarding LPE audio irqs, the flow control handler selection depends
+ * on the platform, for example on valleyview handle_simple_irq is enough.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/intel_lpe_audio.h>
+
+#include "i915_drv.h"
+#include "intel_lpe_audio.h"
+
+#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL)
+
+static struct platform_device *
+lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       struct platform_device_info pinfo = {};
+       struct resource *rsc;
+       struct platform_device *platdev;
+       struct intel_hdmi_lpe_audio_pdata *pdata;
+
+       pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return ERR_PTR(-ENOMEM);
+
+       rsc = kcalloc(2, sizeof(*rsc), GFP_KERNEL);
+       if (!rsc) {
+               kfree(pdata);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       rsc[0].start    = rsc[0].end = dev_priv->lpe_audio.irq;
+       rsc[0].flags    = IORESOURCE_IRQ;
+       rsc[0].name     = "hdmi-lpe-audio-irq";
+
+       rsc[1].start    = pci_resource_start(dev->pdev, 0) +
+               I915_HDMI_LPE_AUDIO_BASE;
+       rsc[1].end      = pci_resource_start(dev->pdev, 0) +
+               I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1;
+       rsc[1].flags    = IORESOURCE_MEM;
+       rsc[1].name     = "hdmi-lpe-audio-mmio";
+
+       pinfo.parent = dev->dev;
+       pinfo.name = "hdmi-lpe-audio";
+       pinfo.id = -1;
+       pinfo.res = rsc;
+       pinfo.num_res = 2;
+       pinfo.data = pdata;
+       pinfo.size_data = sizeof(*pdata);
+       pinfo.dma_mask = DMA_BIT_MASK(32);
+
+       pdata->num_pipes = INTEL_INFO(dev_priv)->num_pipes;
+       pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */
+       pdata->port[0].pipe = -1;
+       pdata->port[1].pipe = -1;
+       pdata->port[2].pipe = -1;
+       spin_lock_init(&pdata->lpe_audio_slock);
+
+       platdev = platform_device_register_full(&pinfo);
+       kfree(rsc);
+       kfree(pdata);
+
+       if (IS_ERR(platdev)) {
+               DRM_ERROR("Failed to allocate LPE audio platform device\n");
+               return platdev;
+       }
+
+       pm_runtime_no_callbacks(&platdev->dev);
+
+       return platdev;
+}
+
+static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
+{
+       /* XXX Note that platform_device_register_full() allocates a dma_mask
+        * and never frees it. We can't free it here as we cannot guarantee
+        * this is the last reference (i.e. that the dma_mask will not be
+        * used after our unregister). So ee choose to leak the sizeof(u64)
+        * allocation here - it should be fixed in the platform_device rather
+        * than us fiddle with its internals.
+        */
+
+       platform_device_unregister(dev_priv->lpe_audio.platdev);
+}
+
+static void lpe_audio_irq_unmask(struct irq_data *d)
+{
+}
+
+static void lpe_audio_irq_mask(struct irq_data *d)
+{
+}
+
+static struct irq_chip lpe_audio_irqchip = {
+       .name = "hdmi_lpe_audio_irqchip",
+       .irq_mask = lpe_audio_irq_mask,
+       .irq_unmask = lpe_audio_irq_unmask,
+};
+
+static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
+{
+       int irq = dev_priv->lpe_audio.irq;
+
+       WARN_ON(!intel_irqs_enabled(dev_priv));
+       irq_set_chip_and_handler_name(irq,
+                               &lpe_audio_irqchip,
+                               handle_simple_irq,
+                               "hdmi_lpe_audio_irq_handler");
+
+       return irq_set_chip_data(irq, dev_priv);
+}
+
+static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
+{
+       int lpe_present = false;
+
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               static const struct pci_device_id atom_hdaudio_ids[] = {
+                       /* Baytrail */
+                       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f04)},
+                       /* Braswell */
+                       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2284)},
+                       {}
+               };
+
+               if (!pci_dev_present(atom_hdaudio_ids)) {
+                       DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
+                       lpe_present = true;
+               }
+       }
+       return lpe_present;
+}
+
+static int lpe_audio_setup(struct drm_i915_private *dev_priv)
+{
+       int ret;
+
+       dev_priv->lpe_audio.irq = irq_alloc_desc(0);
+       if (dev_priv->lpe_audio.irq < 0) {
+               DRM_ERROR("Failed to allocate IRQ desc: %d\n",
+                       dev_priv->lpe_audio.irq);
+               ret = dev_priv->lpe_audio.irq;
+               goto err;
+       }
+
+       DRM_DEBUG("irq = %d\n", dev_priv->lpe_audio.irq);
+
+       ret = lpe_audio_irq_init(dev_priv);
+
+       if (ret) {
+               DRM_ERROR("Failed to initialize irqchip for lpe audio: %d\n",
+                       ret);
+               goto err_free_irq;
+       }
+
+       dev_priv->lpe_audio.platdev = lpe_audio_platdev_create(dev_priv);
+
+       if (IS_ERR(dev_priv->lpe_audio.platdev)) {
+               ret = PTR_ERR(dev_priv->lpe_audio.platdev);
+               DRM_ERROR("Failed to create lpe audio platform device: %d\n",
+                       ret);
+               goto err_free_irq;
+       }
+
+       /* enable chicken bit; at least this is required for Dell Wyse 3040
+        * with DP outputs (but only sometimes by some reason!)
+        */
+       I915_WRITE(VLV_AUD_CHICKEN_BIT_REG, VLV_CHICKEN_BIT_DBG_ENABLE);
+
+       return 0;
+err_free_irq:
+       irq_free_desc(dev_priv->lpe_audio.irq);
+err:
+       dev_priv->lpe_audio.irq = -1;
+       dev_priv->lpe_audio.platdev = NULL;
+       return ret;
+}
+
+/**
+ * intel_lpe_audio_irq_handler() - forwards the LPE audio irq
+ * @dev_priv: the i915 drm device private data
+ *
+ * the LPE Audio irq is forwarded to the irq handler registered by LPE audio
+ * driver.
+ */
+void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
+{
+       int ret;
+
+       if (!HAS_LPE_AUDIO(dev_priv))
+               return;
+
+       ret = generic_handle_irq(dev_priv->lpe_audio.irq);
+       if (ret)
+               DRM_ERROR_RATELIMITED("error handling LPE audio irq: %d\n",
+                               ret);
+}
+
+/**
+ * intel_lpe_audio_init() - detect and setup the bridge between HDMI LPE Audio
+ * driver and i915
+ * @dev_priv: the i915 drm device private data
+ *
+ * Return: 0 if successful. non-zero if detection or
+ * llocation/initialization fails
+ */
+int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
+{
+       int ret = -ENODEV;
+
+       if (lpe_audio_detect(dev_priv)) {
+               ret = lpe_audio_setup(dev_priv);
+               if (ret < 0)
+                       DRM_ERROR("failed to setup LPE Audio bridge\n");
+       }
+       return ret;
+}
+
+/**
+ * intel_lpe_audio_teardown() - destroy the bridge between HDMI LPE
+ * audio driver and i915
+ * @dev_priv: the i915 drm device private data
+ *
+ * release all the resources for LPE audio <-> i915 bridge.
+ */
+void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
+{
+       struct irq_desc *desc;
+
+       if (!HAS_LPE_AUDIO(dev_priv))
+               return;
+
+       desc = irq_to_desc(dev_priv->lpe_audio.irq);
+
+       lpe_audio_platdev_destroy(dev_priv);
+
+       irq_free_desc(dev_priv->lpe_audio.irq);
+
+       dev_priv->lpe_audio.irq = -1;
+       dev_priv->lpe_audio.platdev = NULL;
+}
+
+/**
+ * intel_lpe_audio_notify() - notify lpe audio event
+ * audio driver and i915
+ * @dev_priv: the i915 drm device private data
+ * @pipe: pipe
+ * @port: port
+ * @eld : ELD data
+ * @ls_clock: Link symbol clock in kHz
+ * @dp_output: Driving a DP output?
+ *
+ * Notify lpe audio driver of eld change.
+ */
+void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+                           enum pipe pipe, enum port port,
+                           const void *eld, int ls_clock, bool dp_output)
+{
+       unsigned long irqflags;
+       struct intel_hdmi_lpe_audio_pdata *pdata;
+       struct intel_hdmi_lpe_audio_port_pdata *ppdata;
+       u32 audio_enable;
+
+       if (!HAS_LPE_AUDIO(dev_priv))
+               return;
+
+       pdata = dev_get_platdata(&dev_priv->lpe_audio.platdev->dev);
+       ppdata = &pdata->port[port - PORT_B];
+
+       spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
+
+       audio_enable = I915_READ(VLV_AUD_PORT_EN_DBG(port));
+
+       if (eld != NULL) {
+               memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
+               ppdata->pipe = pipe;
+               ppdata->ls_clock = ls_clock;
+               ppdata->dp_output = dp_output;
+
+               /* Unmute the amp for both DP and HDMI */
+               I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
+                          audio_enable & ~VLV_AMP_MUTE);
+       } else {
+               memset(ppdata->eld, 0, HDMI_MAX_ELD_BYTES);
+               ppdata->pipe = -1;
+               ppdata->ls_clock = 0;
+               ppdata->dp_output = false;
+
+               /* Mute the amp for both DP and HDMI */
+               I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
+                          audio_enable | VLV_AMP_MUTE);
+       }
+
+       if (pdata->notify_audio_lpe)
+               pdata->notify_audio_lpe(dev_priv->lpe_audio.platdev, port - PORT_B);
+
+       spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.h b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
new file mode 100644 (file)
index 0000000..f848c50
--- /dev/null
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_LPE_AUDIO_H__
+#define __INTEL_LPE_AUDIO_H__
+
+#include <linux/types.h>
+
+enum pipe;
+enum port;
+struct drm_i915_private;
+
+int  intel_lpe_audio_init(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+                           enum pipe pipe, enum port port,
+                           const void *eld, int ls_clock, bool dp_output);
+
+#endif /* __INTEL_LPE_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
new file mode 100644 (file)
index 0000000..8248812
--- /dev/null
@@ -0,0 +1,1176 @@
+/*
+ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
+ * Copyright 2008 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT.  IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/firmware.h>
+#include <acpi/video.h>
+
+#include <drm/i915_drm.h>
+
+#include "display/intel_panel.h"
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_opregion.h"
+
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET   0x100
+#define   ACPI_CLID 0x01ac /* current lid state indicator */
+#define   ACPI_CDCK 0x01b0 /* current docking state indicator */
+#define OPREGION_SWSCI_OFFSET  0x200
+#define OPREGION_ASLE_OFFSET   0x300
+#define OPREGION_VBT_OFFSET    0x400
+#define OPREGION_ASLE_EXT_OFFSET       0x1C00
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI      (1<<0)
+#define MBOX_SWSCI     (1<<1)
+#define MBOX_ASLE      (1<<2)
+#define MBOX_ASLE_EXT  (1<<4)
+
+struct opregion_header {
+       u8 signature[16];
+       u32 size;
+       struct {
+               u8 rsvd;
+               u8 revision;
+               u8 minor;
+               u8 major;
+       }  __packed over;
+       u8 bios_ver[32];
+       u8 vbios_ver[16];
+       u8 driver_ver[16];
+       u32 mboxes;
+       u32 driver_model;
+       u32 pcon;
+       u8 dver[32];
+       u8 rsvd[124];
+} __packed;
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+       u32 drdy;       /* driver readiness */
+       u32 csts;       /* notification status */
+       u32 cevt;       /* current event */
+       u8 rsvd1[20];
+       u32 didl[8];    /* supported display devices ID list */
+       u32 cpdl[8];    /* currently presented display list */
+       u32 cadl[8];    /* currently active display list */
+       u32 nadl[8];    /* next active devices list */
+       u32 aslp;       /* ASL sleep time-out */
+       u32 tidx;       /* toggle table index */
+       u32 chpd;       /* current hotplug enable indicator */
+       u32 clid;       /* current lid state*/
+       u32 cdck;       /* current docking state */
+       u32 sxsw;       /* Sx state resume */
+       u32 evts;       /* ASL supported events */
+       u32 cnot;       /* current OS notification */
+       u32 nrdy;       /* driver status */
+       u32 did2[7];    /* extended supported display devices ID list */
+       u32 cpd2[7];    /* extended attached display devices list */
+       u8 rsvd2[4];
+} __packed;
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+       u32 scic;       /* SWSCI command|status|data */
+       u32 parm;       /* command parameters */
+       u32 dslp;       /* driver sleep time-out */
+       u8 rsvd[244];
+} __packed;
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+       u32 ardy;       /* driver readiness */
+       u32 aslc;       /* ASLE interrupt command */
+       u32 tche;       /* technology enabled indicator */
+       u32 alsi;       /* current ALS illuminance reading */
+       u32 bclp;       /* backlight brightness to set */
+       u32 pfit;       /* panel fitting state */
+       u32 cblv;       /* current brightness level */
+       u16 bclm[20];   /* backlight level duty cycle mapping table */
+       u32 cpfm;       /* current panel fitting mode */
+       u32 epfm;       /* enabled panel fitting modes */
+       u8 plut[74];    /* panel LUT and identifier */
+       u32 pfmb;       /* PWM freq and min brightness */
+       u32 cddv;       /* color correction default values */
+       u32 pcft;       /* power conservation features */
+       u32 srot;       /* supported rotation angles */
+       u32 iuer;       /* IUER events */
+       u64 fdss;
+       u32 fdsp;
+       u32 stat;
+       u64 rvda;       /* Physical (2.0) or relative from opregion (2.1+)
+                        * address of raw VBT data. */
+       u32 rvds;       /* Size of raw vbt data */
+       u8 rsvd[58];
+} __packed;
+
+/* OpRegion mailbox #5: ASLE ext */
+struct opregion_asle_ext {
+       u32 phed;       /* Panel Header */
+       u8 bddc[256];   /* Panel EDID */
+       u8 rsvd[764];
+} __packed;
+
+/* Driver readiness indicator */
+#define ASLE_ARDY_READY                (1 << 0)
+#define ASLE_ARDY_NOT_READY    (0 << 0)
+
+/* ASLE Interrupt Command (ASLC) bits */
+#define ASLC_SET_ALS_ILLUM             (1 << 0)
+#define ASLC_SET_BACKLIGHT             (1 << 1)
+#define ASLC_SET_PFIT                  (1 << 2)
+#define ASLC_SET_PWM_FREQ              (1 << 3)
+#define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4)
+#define ASLC_BUTTON_ARRAY              (1 << 5)
+#define ASLC_CONVERTIBLE_INDICATOR     (1 << 6)
+#define ASLC_DOCKING_INDICATOR         (1 << 7)
+#define ASLC_ISCT_STATE_CHANGE         (1 << 8)
+#define ASLC_REQ_MSK                   0x1ff
+/* response bits */
+#define ASLC_ALS_ILLUM_FAILED          (1 << 10)
+#define ASLC_BACKLIGHT_FAILED          (1 << 12)
+#define ASLC_PFIT_FAILED               (1 << 14)
+#define ASLC_PWM_FREQ_FAILED           (1 << 16)
+#define ASLC_ROTATION_ANGLES_FAILED    (1 << 18)
+#define ASLC_BUTTON_ARRAY_FAILED       (1 << 20)
+#define ASLC_CONVERTIBLE_FAILED                (1 << 22)
+#define ASLC_DOCKING_FAILED            (1 << 24)
+#define ASLC_ISCT_STATE_FAILED         (1 << 26)
+
+/* Technology enabled indicator */
+#define ASLE_TCHE_ALS_EN       (1 << 0)
+#define ASLE_TCHE_BLC_EN       (1 << 1)
+#define ASLE_TCHE_PFIT_EN      (1 << 2)
+#define ASLE_TCHE_PFMB_EN      (1 << 3)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID                (1<<31)
+#define ASLE_BCLP_MSK          (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID         (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID         (1<<31)
+
+/* IUER */
+#define ASLE_IUER_DOCKING              (1 << 7)
+#define ASLE_IUER_CONVERTIBLE          (1 << 6)
+#define ASLE_IUER_ROTATION_LOCK_BTN    (1 << 4)
+#define ASLE_IUER_VOLUME_DOWN_BTN      (1 << 3)
+#define ASLE_IUER_VOLUME_UP_BTN                (1 << 2)
+#define ASLE_IUER_WINDOWS_BTN          (1 << 1)
+#define ASLE_IUER_POWER_BTN            (1 << 0)
+
+/* Software System Control Interrupt (SWSCI) */
+#define SWSCI_SCIC_INDICATOR           (1 << 0)
+#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1
+#define SWSCI_SCIC_MAIN_FUNCTION_MASK  (0xf << 1)
+#define SWSCI_SCIC_SUB_FUNCTION_SHIFT  8
+#define SWSCI_SCIC_SUB_FUNCTION_MASK   (0xff << 8)
+#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT        8
+#define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8)
+#define SWSCI_SCIC_EXIT_STATUS_SHIFT   5
+#define SWSCI_SCIC_EXIT_STATUS_MASK    (7 << 5)
+#define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1
+
+#define SWSCI_FUNCTION_CODE(main, sub) \
+       ((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
+        (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
+
+/* SWSCI: Get BIOS Data (GBDA) */
+#define SWSCI_GBDA                     4
+#define SWSCI_GBDA_SUPPORTED_CALLS     SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
+#define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
+#define SWSCI_GBDA_BOOT_DISPLAY_PREF   SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
+#define SWSCI_GBDA_PANEL_DETAILS       SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
+#define SWSCI_GBDA_TV_STANDARD         SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
+#define SWSCI_GBDA_INTERNAL_GRAPHICS   SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
+#define SWSCI_GBDA_SPREAD_SPECTRUM     SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
+
+/* SWSCI: System BIOS Callbacks (SBCB) */
+#define SWSCI_SBCB                     6
+#define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
+#define SWSCI_SBCB_INIT_COMPLETION     SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
+#define SWSCI_SBCB_PRE_HIRES_SET_MODE  SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
+#define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
+#define SWSCI_SBCB_DISPLAY_SWITCH      SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
+#define SWSCI_SBCB_SET_TV_FORMAT       SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
+#define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
+#define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
+#define SWSCI_SBCB_SET_BOOT_DISPLAY    SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
+#define SWSCI_SBCB_SET_PANEL_DETAILS   SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
+#define SWSCI_SBCB_SET_INTERNAL_GFX    SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
+#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS        SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
+#define SWSCI_SBCB_SUSPEND_RESUME      SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
+#define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
+#define SWSCI_SBCB_POST_VBE_PM         SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
+#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO        SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
+
+/*
+ * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
+ * Attached to the Display Adapter).
+ */
+#define ACPI_DISPLAY_INDEX_SHIFT               0
+#define ACPI_DISPLAY_INDEX_MASK                        (0xf << 0)
+#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT     4
+#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK      (0xf << 4)
+#define ACPI_DISPLAY_TYPE_SHIFT                        8
+#define ACPI_DISPLAY_TYPE_MASK                 (0xf << 8)
+#define ACPI_DISPLAY_TYPE_OTHER                        (0 << 8)
+#define ACPI_DISPLAY_TYPE_VGA                  (1 << 8)
+#define ACPI_DISPLAY_TYPE_TV                   (2 << 8)
+#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL     (3 << 8)
+#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL     (4 << 8)
+#define ACPI_VENDOR_SPECIFIC_SHIFT             12
+#define ACPI_VENDOR_SPECIFIC_MASK              (0xf << 12)
+#define ACPI_BIOS_CAN_DETECT                   (1 << 16)
+#define ACPI_DEPENDS_ON_VGA                    (1 << 17)
+#define ACPI_PIPE_ID_SHIFT                     18
+#define ACPI_PIPE_ID_MASK                      (7 << 18)
+#define ACPI_DEVICE_ID_SCHEME                  (1 << 31)
+
+#define MAX_DSLP       1500
+
+static int swsci(struct drm_i915_private *dev_priv,
+                u32 function, u32 parm, u32 *parm_out)
+{
+       struct opregion_swsci *swsci = dev_priv->opregion.swsci;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       u32 main_function, sub_function, scic;
+       u16 swsci_val;
+       u32 dslp;
+
+       if (!swsci)
+               return -ENODEV;
+
+       main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
+               SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
+       sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
+               SWSCI_SCIC_SUB_FUNCTION_SHIFT;
+
+       /* Check if we can call the function. See swsci_setup for details. */
+       if (main_function == SWSCI_SBCB) {
+               if ((dev_priv->opregion.swsci_sbcb_sub_functions &
+                    (1 << sub_function)) == 0)
+                       return -EINVAL;
+       } else if (main_function == SWSCI_GBDA) {
+               if ((dev_priv->opregion.swsci_gbda_sub_functions &
+                    (1 << sub_function)) == 0)
+                       return -EINVAL;
+       }
+
+       /* Driver sleep timeout in ms. */
+       dslp = swsci->dslp;
+       if (!dslp) {
+               /* The spec says 2ms should be the default, but it's too small
+                * for some machines. */
+               dslp = 50;
+       } else if (dslp > MAX_DSLP) {
+               /* Hey bios, trust must be earned. */
+               DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
+                             "using %u ms instead\n", dslp, MAX_DSLP);
+               dslp = MAX_DSLP;
+       }
+
+       /* The spec tells us to do this, but we are the only user... */
+       scic = swsci->scic;
+       if (scic & SWSCI_SCIC_INDICATOR) {
+               DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
+               return -EBUSY;
+       }
+
+       scic = function | SWSCI_SCIC_INDICATOR;
+
+       swsci->parm = parm;
+       swsci->scic = scic;
+
+       /* Ensure SCI event is selected and event trigger is cleared. */
+       pci_read_config_word(pdev, SWSCI, &swsci_val);
+       if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
+               swsci_val |= SWSCI_SCISEL;
+               swsci_val &= ~SWSCI_GSSCIE;
+               pci_write_config_word(pdev, SWSCI, swsci_val);
+       }
+
+       /* Use event trigger to tell bios to check the mail. */
+       swsci_val |= SWSCI_GSSCIE;
+       pci_write_config_word(pdev, SWSCI, swsci_val);
+
+       /* Poll for the result. */
+#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
+       if (wait_for(C, dslp)) {
+               DRM_DEBUG_DRIVER("SWSCI request timed out\n");
+               return -ETIMEDOUT;
+       }
+
+       scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
+               SWSCI_SCIC_EXIT_STATUS_SHIFT;
+
+       /* Note: scic == 0 is an error! */
+       if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
+               DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
+               return -EIO;
+       }
+
+       if (parm_out)
+               *parm_out = swsci->parm;
+
+       return 0;
+
+#undef C
+}
+
+#define DISPLAY_TYPE_CRT                       0
+#define DISPLAY_TYPE_TV                                1
+#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL       2
+#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL       3
+
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+                                 bool enable)
+{
+       struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+       u32 parm = 0;
+       u32 type = 0;
+       u32 port;
+
+       /* don't care about old stuff for now */
+       if (!HAS_DDI(dev_priv))
+               return 0;
+
+       if (intel_encoder->type == INTEL_OUTPUT_DSI)
+               port = 0;
+       else
+               port = intel_encoder->port;
+
+       if (port == PORT_E)  {
+               port = 0;
+       } else {
+               parm |= 1 << port;
+               port++;
+       }
+
+       if (!enable)
+               parm |= 4 << 8;
+
+       switch (intel_encoder->type) {
+       case INTEL_OUTPUT_ANALOG:
+               type = DISPLAY_TYPE_CRT;
+               break;
+       case INTEL_OUTPUT_DDI:
+       case INTEL_OUTPUT_DP:
+       case INTEL_OUTPUT_HDMI:
+       case INTEL_OUTPUT_DP_MST:
+               type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
+               break;
+       case INTEL_OUTPUT_EDP:
+       case INTEL_OUTPUT_DSI:
+               type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
+               break;
+       default:
+               WARN_ONCE(1, "unsupported intel_encoder type %d\n",
+                         intel_encoder->type);
+               return -EINVAL;
+       }
+
+       parm |= type << (16 + port * 3);
+
+       return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+}
+
+static const struct {
+       pci_power_t pci_power_state;
+       u32 parm;
+} power_state_map[] = {
+       { PCI_D0,       0x00 },
+       { PCI_D1,       0x01 },
+       { PCI_D2,       0x02 },
+       { PCI_D3hot,    0x04 },
+       { PCI_D3cold,   0x04 },
+};
+
+int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+                                 pci_power_t state)
+{
+       int i;
+
+       if (!HAS_DDI(dev_priv))
+               return 0;
+
+       for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
+               if (state == power_state_map[i].pci_power_state)
+                       return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
+                                    power_state_map[i].parm, NULL);
+       }
+
+       return -EINVAL;
+}
+
+static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
+{
+       struct intel_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       struct opregion_asle *asle = dev_priv->opregion.asle;
+       struct drm_device *dev = &dev_priv->drm;
+
+       DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+
+       if (acpi_video_get_backlight_type() == acpi_backlight_native) {
+               DRM_DEBUG_KMS("opregion backlight request ignored\n");
+               return 0;
+       }
+
+       if (!(bclp & ASLE_BCLP_VALID))
+               return ASLC_BACKLIGHT_FAILED;
+
+       bclp &= ASLE_BCLP_MSK;
+       if (bclp > 255)
+               return ASLC_BACKLIGHT_FAILED;
+
+       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+       /*
+        * Update backlight on all connectors that support backlight (usually
+        * only one).
+        */
+       DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       for_each_intel_connector_iter(connector, &conn_iter)
+               intel_panel_set_backlight_acpi(connector->base.state, bclp, 255);
+       drm_connector_list_iter_end(&conn_iter);
+       asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
+
+       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+
+       return 0;
+}
+
+static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
+{
+       /* alsi is the current ALS reading in lux. 0 indicates below sensor
+          range, 0xffff indicates above sensor range. 1-0xfffe are valid */
+       DRM_DEBUG_DRIVER("Illum is not supported\n");
+       return ASLC_ALS_ILLUM_FAILED;
+}
+
+static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
+{
+       DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+       return ASLC_PWM_FREQ_FAILED;
+}
+
+static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
+{
+       /* Panel fitting is currently controlled by the X code, so this is a
+          noop until modesetting support works fully */
+       DRM_DEBUG_DRIVER("Pfit is not supported\n");
+       return ASLC_PFIT_FAILED;
+}
+
+static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
+{
+       DRM_DEBUG_DRIVER("SROT is not supported\n");
+       return ASLC_ROTATION_ANGLES_FAILED;
+}
+
+static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
+{
+       if (!iuer)
+               DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
+       if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
+               DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
+       if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
+               DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
+       if (iuer & ASLE_IUER_VOLUME_UP_BTN)
+               DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
+       if (iuer & ASLE_IUER_WINDOWS_BTN)
+               DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
+       if (iuer & ASLE_IUER_POWER_BTN)
+               DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
+
+       return ASLC_BUTTON_ARRAY_FAILED;
+}
+
+static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
+{
+       if (iuer & ASLE_IUER_CONVERTIBLE)
+               DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
+       else
+               DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
+
+       return ASLC_CONVERTIBLE_FAILED;
+}
+
+static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
+{
+       if (iuer & ASLE_IUER_DOCKING)
+               DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
+       else
+               DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
+
+       return ASLC_DOCKING_FAILED;
+}
+
+static u32 asle_isct_state(struct drm_i915_private *dev_priv)
+{
+       DRM_DEBUG_DRIVER("ISCT is not supported\n");
+       return ASLC_ISCT_STATE_FAILED;
+}
+
+static void asle_work(struct work_struct *work)
+{
+       struct intel_opregion *opregion =
+               container_of(work, struct intel_opregion, asle_work);
+       struct drm_i915_private *dev_priv =
+               container_of(opregion, struct drm_i915_private, opregion);
+       struct opregion_asle *asle = dev_priv->opregion.asle;
+       u32 aslc_stat = 0;
+       u32 aslc_req;
+
+       if (!asle)
+               return;
+
+       aslc_req = asle->aslc;
+
+       if (!(aslc_req & ASLC_REQ_MSK)) {
+               DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
+                                aslc_req);
+               return;
+       }
+
+       if (aslc_req & ASLC_SET_ALS_ILLUM)
+               aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
+
+       if (aslc_req & ASLC_SET_BACKLIGHT)
+               aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
+
+       if (aslc_req & ASLC_SET_PFIT)
+               aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
+
+       if (aslc_req & ASLC_SET_PWM_FREQ)
+               aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
+
+       if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
+               aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
+                                                       asle->srot);
+
+       if (aslc_req & ASLC_BUTTON_ARRAY)
+               aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
+
+       if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
+               aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
+
+       if (aslc_req & ASLC_DOCKING_INDICATOR)
+               aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
+
+       if (aslc_req & ASLC_ISCT_STATE_CHANGE)
+               aslc_stat |= asle_isct_state(dev_priv);
+
+       asle->aslc = aslc_stat;
+}
+
+void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+{
+       if (dev_priv->opregion.asle)
+               schedule_work(&dev_priv->opregion.asle_work);
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID            (1<<1)
+#define ACPI_EV_DOCK           (1<<2)
+
+/*
+ * The only video events relevant to opregion are 0x80. These indicate either a
+ * docking event, lid switch or display switch request. In Linux, these are
+ * handled by the dock, button and video drivers.
+ */
+static int intel_opregion_video_event(struct notifier_block *nb,
+                                     unsigned long val, void *data)
+{
+       struct intel_opregion *opregion = container_of(nb, struct intel_opregion,
+                                                      acpi_notifier);
+       struct acpi_bus_event *event = data;
+       struct opregion_acpi *acpi;
+       int ret = NOTIFY_OK;
+
+       if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+               return NOTIFY_DONE;
+
+       acpi = opregion->acpi;
+
+       if (event->type == 0x80 && ((acpi->cevt & 1) == 0))
+               ret = NOTIFY_BAD;
+
+       acpi->csts = 0;
+
+       return ret;
+}
+
+/*
+ * Initialise the DIDL field in opregion. This passes a list of devices to
+ * the firmware. Values are defined by section B.4.2 of the ACPI specification
+ * (version 3)
+ */
+
+static void set_did(struct intel_opregion *opregion, int i, u32 val)
+{
+       if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+               opregion->acpi->didl[i] = val;
+       } else {
+               i -= ARRAY_SIZE(opregion->acpi->didl);
+
+               if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+                       return;
+
+               opregion->acpi->did2[i] = val;
+       }
+}
+
+static u32 acpi_display_type(struct intel_connector *connector)
+{
+       u32 display_type;
+
+       switch (connector->base.connector_type) {
+       case DRM_MODE_CONNECTOR_VGA:
+       case DRM_MODE_CONNECTOR_DVIA:
+               display_type = ACPI_DISPLAY_TYPE_VGA;
+               break;
+       case DRM_MODE_CONNECTOR_Composite:
+       case DRM_MODE_CONNECTOR_SVIDEO:
+       case DRM_MODE_CONNECTOR_Component:
+       case DRM_MODE_CONNECTOR_9PinDIN:
+       case DRM_MODE_CONNECTOR_TV:
+               display_type = ACPI_DISPLAY_TYPE_TV;
+               break;
+       case DRM_MODE_CONNECTOR_DVII:
+       case DRM_MODE_CONNECTOR_DVID:
+       case DRM_MODE_CONNECTOR_DisplayPort:
+       case DRM_MODE_CONNECTOR_HDMIA:
+       case DRM_MODE_CONNECTOR_HDMIB:
+               display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
+               break;
+       case DRM_MODE_CONNECTOR_LVDS:
+       case DRM_MODE_CONNECTOR_eDP:
+       case DRM_MODE_CONNECTOR_DSI:
+               display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
+               break;
+       case DRM_MODE_CONNECTOR_Unknown:
+       case DRM_MODE_CONNECTOR_VIRTUAL:
+               display_type = ACPI_DISPLAY_TYPE_OTHER;
+               break;
+       default:
+               MISSING_CASE(connector->base.connector_type);
+               display_type = ACPI_DISPLAY_TYPE_OTHER;
+               break;
+       }
+
+       return display_type;
+}
+
+static void intel_didl_outputs(struct drm_i915_private *dev_priv)
+{
+       struct intel_opregion *opregion = &dev_priv->opregion;
+       struct intel_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       int i = 0, max_outputs;
+       int display_index[16] = {};
+
+       /*
+        * In theory, did2, the extended didl, gets added at opregion version
+        * 3.0. In practice, however, we're supposed to set it for earlier
+        * versions as well, since a BIOS that doesn't understand did2 should
+        * not look at it anyway. Use a variable so we can tweak this if a need
+        * arises later.
+        */
+       max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
+               ARRAY_SIZE(opregion->acpi->did2);
+
+       drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+       for_each_intel_connector_iter(connector, &conn_iter) {
+               u32 device_id, type;
+
+               device_id = acpi_display_type(connector);
+
+               /* Use display type specific display index. */
+               type = (device_id & ACPI_DISPLAY_TYPE_MASK)
+                       >> ACPI_DISPLAY_TYPE_SHIFT;
+               device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
+
+               connector->acpi_device_id = device_id;
+               if (i < max_outputs)
+                       set_did(opregion, i, device_id);
+               i++;
+       }
+       drm_connector_list_iter_end(&conn_iter);
+
+       DRM_DEBUG_KMS("%d outputs detected\n", i);
+
+       if (i > max_outputs)
+               DRM_ERROR("More than %d outputs in connector list\n",
+                         max_outputs);
+
+       /* If fewer than max outputs, the list must be null terminated */
+       if (i < max_outputs)
+               set_did(opregion, i, 0);
+}
+
+static void intel_setup_cadls(struct drm_i915_private *dev_priv)
+{
+       struct intel_opregion *opregion = &dev_priv->opregion;
+       struct intel_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       int i = 0;
+
+       /*
+        * Initialize the CADL field from the connector device ids. This is
+        * essentially the same as copying from the DIDL. Technically, this is
+        * not always correct as display outputs may exist, but not active. This
+        * initialization is necessary for some Clevo laptops that check this
+        * field before processing the brightness and display switching hotkeys.
+        *
+        * Note that internal panels should be at the front of the connector
+        * list already, ensuring they're not left out.
+        */
+       drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+       for_each_intel_connector_iter(connector, &conn_iter) {
+               if (i >= ARRAY_SIZE(opregion->acpi->cadl))
+                       break;
+               opregion->acpi->cadl[i++] = connector->acpi_device_id;
+       }
+       drm_connector_list_iter_end(&conn_iter);
+
+       /* If fewer than 8 active devices, the list must be null terminated */
+       if (i < ARRAY_SIZE(opregion->acpi->cadl))
+               opregion->acpi->cadl[i] = 0;
+}
+
+static void swsci_setup(struct drm_i915_private *dev_priv)
+{
+       struct intel_opregion *opregion = &dev_priv->opregion;
+       bool requested_callbacks = false;
+       u32 tmp;
+
+       /* Sub-function code 0 is okay, let's allow them. */
+       opregion->swsci_gbda_sub_functions = 1;
+       opregion->swsci_sbcb_sub_functions = 1;
+
+       /* We use GBDA to ask for supported GBDA calls. */
+       if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+               /* make the bits match the sub-function codes */
+               tmp <<= 1;
+               opregion->swsci_gbda_sub_functions |= tmp;
+       }
+
+       /*
+        * We also use GBDA to ask for _requested_ SBCB callbacks. The driver
+        * must not call interfaces that are not specifically requested by the
+        * bios.
+        */
+       if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+               /* here, the bits already match sub-function codes */
+               opregion->swsci_sbcb_sub_functions |= tmp;
+               requested_callbacks = true;
+       }
+
+       /*
+        * But we use SBCB to ask for _supported_ SBCB calls. This does not mean
+        * the callback is _requested_. But we still can't call interfaces that
+        * are not requested.
+        */
+       if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+               /* make the bits match the sub-function codes */
+               u32 low = tmp & 0x7ff;
+               u32 high = tmp & ~0xfff; /* bit 11 is reserved */
+               tmp = (high << 4) | (low << 1) | 1;
+
+               /* best guess what to do with supported wrt requested */
+               if (requested_callbacks) {
+                       u32 req = opregion->swsci_sbcb_sub_functions;
+                       if ((req & tmp) != req)
+                               DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
+                       /* XXX: for now, trust the requested callbacks */
+                       /* opregion->swsci_sbcb_sub_functions &= tmp; */
+               } else {
+                       opregion->swsci_sbcb_sub_functions |= tmp;
+               }
+       }
+
+       DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
+                        opregion->swsci_gbda_sub_functions,
+                        opregion->swsci_sbcb_sub_functions);
+}
+
+static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+{
+       DRM_DEBUG_KMS("Falling back to manually reading VBT from "
+                     "VBIOS ROM for %s\n", id->ident);
+       return 1;
+}
+
+static const struct dmi_system_id intel_no_opregion_vbt[] = {
+       {
+               .callback = intel_no_opregion_vbt_callback,
+               .ident = "ThinkCentre A57",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
+               },
+       },
+       { }
+};
+
+static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
+{
+       struct intel_opregion *opregion = &dev_priv->opregion;
+       const struct firmware *fw = NULL;
+       const char *name = i915_modparams.vbt_firmware;
+       int ret;
+
+       if (!name || !*name)
+               return -ENOENT;
+
+       ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev);
+       if (ret) {
+               DRM_ERROR("Requesting VBT firmware \"%s\" failed (%d)\n",
+                         name, ret);
+               return ret;
+       }
+
+       if (intel_bios_is_valid_vbt(fw->data, fw->size)) {
+               opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
+               if (opregion->vbt_firmware) {
+                       DRM_DEBUG_KMS("Found valid VBT firmware \"%s\"\n", name);
+                       opregion->vbt = opregion->vbt_firmware;
+                       opregion->vbt_size = fw->size;
+                       ret = 0;
+               } else {
+                       ret = -ENOMEM;
+               }
+       } else {
+               DRM_DEBUG_KMS("Invalid VBT firmware \"%s\"\n", name);
+               ret = -EINVAL;
+       }
+
+       release_firmware(fw);
+
+       return ret;
+}
+
+int intel_opregion_setup(struct drm_i915_private *dev_priv)
+{
+       struct intel_opregion *opregion = &dev_priv->opregion;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       u32 asls, mboxes;
+       char buf[sizeof(OPREGION_SIGNATURE)];
+       int err = 0;
+       void *base;
+       const void *vbt;
+       u32 vbt_size;
+
+       BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
+       BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
+       BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
+       BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
+       BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
+
+       pci_read_config_dword(pdev, ASLS, &asls);
+       DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
+       if (asls == 0) {
+               DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
+               return -ENOTSUPP;
+       }
+
+       INIT_WORK(&opregion->asle_work, asle_work);
+
+       base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
+       if (!base)
+               return -ENOMEM;
+
+       memcpy(buf, base, sizeof(buf));
+
+       if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
+               DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+               err = -EINVAL;
+               goto err_out;
+       }
+       opregion->header = base;
+       opregion->lid_state = base + ACPI_CLID;
+
+       DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n",
+                        opregion->header->over.major,
+                        opregion->header->over.minor,
+                        opregion->header->over.revision);
+
+       mboxes = opregion->header->mboxes;
+       if (mboxes & MBOX_ACPI) {
+               DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+               opregion->acpi = base + OPREGION_ACPI_OFFSET;
+       }
+
+       if (mboxes & MBOX_SWSCI) {
+               DRM_DEBUG_DRIVER("SWSCI supported\n");
+               opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+               swsci_setup(dev_priv);
+       }
+
+       if (mboxes & MBOX_ASLE) {
+               DRM_DEBUG_DRIVER("ASLE supported\n");
+               opregion->asle = base + OPREGION_ASLE_OFFSET;
+
+               opregion->asle->ardy = ASLE_ARDY_NOT_READY;
+       }
+
+       if (mboxes & MBOX_ASLE_EXT)
+               DRM_DEBUG_DRIVER("ASLE extension supported\n");
+
+       if (intel_load_vbt_firmware(dev_priv) == 0)
+               goto out;
+
+       if (dmi_check_system(intel_no_opregion_vbt))
+               goto out;
+
+       if (opregion->header->over.major >= 2 && opregion->asle &&
+           opregion->asle->rvda && opregion->asle->rvds) {
+               resource_size_t rvda = opregion->asle->rvda;
+
+               /*
+                * opregion 2.0: rvda is the physical VBT address.
+                *
+                * opregion 2.1+: rvda is unsigned, relative offset from
+                * opregion base, and should never point within opregion.
+                */
+               if (opregion->header->over.major > 2 ||
+                   opregion->header->over.minor >= 1) {
+                       WARN_ON(rvda < OPREGION_SIZE);
+
+                       rvda += asls;
+               }
+
+               opregion->rvda = memremap(rvda, opregion->asle->rvds,
+                                         MEMREMAP_WB);
+
+               vbt = opregion->rvda;
+               vbt_size = opregion->asle->rvds;
+               if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
+                       DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n");
+                       opregion->vbt = vbt;
+                       opregion->vbt_size = vbt_size;
+                       goto out;
+               } else {
+                       DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
+                       memunmap(opregion->rvda);
+                       opregion->rvda = NULL;
+               }
+       }
+
+       vbt = base + OPREGION_VBT_OFFSET;
+       /*
+        * The VBT specification says that if the ASLE ext mailbox is not used
+        * its area is reserved, but on some CHT boards the VBT extends into the
+        * ASLE ext area. Allow this even though it is against the spec, so we
+        * do not end up rejecting the VBT on those boards (and end up not
+        * finding the LCD panel because of this).
+        */
+       vbt_size = (mboxes & MBOX_ASLE_EXT) ?
+               OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
+       vbt_size -= OPREGION_VBT_OFFSET;
+       if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
+               DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
+               opregion->vbt = vbt;
+               opregion->vbt_size = vbt_size;
+       } else {
+               DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
+       }
+
+out:
+       return 0;
+
+err_out:
+       memunmap(base);
+       return err;
+}
+
+static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id)
+{
+       DRM_INFO("Using panel type from OpRegion on %s\n", id->ident);
+       return 1;
+}
+
+static const struct dmi_system_id intel_use_opregion_panel_type[] = {
+       {
+               .callback = intel_use_opregion_panel_type_callback,
+               .ident = "Conrac GmbH IX45GM2",
+               .matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"),
+                           DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"),
+               },
+       },
+       { }
+};
+
+int
+intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
+{
+       u32 panel_details;
+       int ret;
+
+       ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
+       if (ret) {
+               DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
+                             ret);
+               return ret;
+       }
+
+       ret = (panel_details >> 8) & 0xff;
+       if (ret > 0x10) {
+               DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret);
+               return -EINVAL;
+       }
+
+       /* fall back to VBT panel type? */
+       if (ret == 0x0) {
+               DRM_DEBUG_KMS("No panel type in OpRegion\n");
+               return -ENODEV;
+       }
+
+       /*
+        * So far we know that some machined must use it, others must not use it.
+        * There doesn't seem to be any way to determine which way to go, except
+        * via a quirk list :(
+        */
+       if (!dmi_check_system(intel_use_opregion_panel_type)) {
+               DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+               return -ENODEV;
+       }
+
+       return ret - 1;
+}
+
+void intel_opregion_register(struct drm_i915_private *i915)
+{
+       struct intel_opregion *opregion = &i915->opregion;
+
+       if (!opregion->header)
+               return;
+
+       if (opregion->acpi) {
+               opregion->acpi_notifier.notifier_call =
+                       intel_opregion_video_event;
+               register_acpi_notifier(&opregion->acpi_notifier);
+       }
+
+       intel_opregion_resume(i915);
+}
+
+void intel_opregion_resume(struct drm_i915_private *i915)
+{
+       struct intel_opregion *opregion = &i915->opregion;
+
+       if (!opregion->header)
+               return;
+
+       if (opregion->acpi) {
+               intel_didl_outputs(i915);
+               intel_setup_cadls(i915);
+
+               /*
+                * Notify BIOS we are ready to handle ACPI video ext notifs.
+                * Right now, all the events are handled by the ACPI video
+                * module. We don't actually need to do anything with them.
+                */
+               opregion->acpi->csts = 0;
+               opregion->acpi->drdy = 1;
+       }
+
+       if (opregion->asle) {
+               opregion->asle->tche = ASLE_TCHE_BLC_EN;
+               opregion->asle->ardy = ASLE_ARDY_READY;
+       }
+
+       intel_opregion_notify_adapter(i915, PCI_D0);
+}
+
+void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+{
+       struct intel_opregion *opregion = &i915->opregion;
+
+       if (!opregion->header)
+               return;
+
+       intel_opregion_notify_adapter(i915, state);
+
+       if (opregion->asle)
+               opregion->asle->ardy = ASLE_ARDY_NOT_READY;
+
+       cancel_work_sync(&i915->opregion.asle_work);
+
+       if (opregion->acpi)
+               opregion->acpi->drdy = 0;
+}
+
+void intel_opregion_unregister(struct drm_i915_private *i915)
+{
+       struct intel_opregion *opregion = &i915->opregion;
+
+       intel_opregion_suspend(i915, PCI_D1);
+
+       if (!opregion->header)
+               return;
+
+       if (opregion->acpi_notifier.notifier_call) {
+               unregister_acpi_notifier(&opregion->acpi_notifier);
+               opregion->acpi_notifier.notifier_call = NULL;
+       }
+
+       /* just clear all opregion memory pointers now */
+       memunmap(opregion->header);
+       if (opregion->rvda) {
+               memunmap(opregion->rvda);
+               opregion->rvda = NULL;
+       }
+       if (opregion->vbt_firmware) {
+               kfree(opregion->vbt_firmware);
+               opregion->vbt_firmware = NULL;
+       }
+       opregion->header = NULL;
+       opregion->acpi = NULL;
+       opregion->swsci = NULL;
+       opregion->asle = NULL;
+       opregion->vbt = NULL;
+       opregion->lid_state = NULL;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
new file mode 100644 (file)
index 0000000..4aa68ff
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * Copyright © 2008-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_OPREGION_H_
+#define _INTEL_OPREGION_H_
+
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+
+struct drm_i915_private;
+struct intel_encoder;
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct intel_opregion {
+       struct opregion_header *header;
+       struct opregion_acpi *acpi;
+       struct opregion_swsci *swsci;
+       u32 swsci_gbda_sub_functions;
+       u32 swsci_sbcb_sub_functions;
+       struct opregion_asle *asle;
+       void *rvda;
+       void *vbt_firmware;
+       const void *vbt;
+       u32 vbt_size;
+       u32 *lid_state;
+       struct work_struct asle_work;
+       struct notifier_block acpi_notifier;
+};
+
+#define OPREGION_SIZE            (8 * 1024)
+
+#ifdef CONFIG_ACPI
+
+int intel_opregion_setup(struct drm_i915_private *dev_priv);
+
+void intel_opregion_register(struct drm_i915_private *dev_priv);
+void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+
+void intel_opregion_resume(struct drm_i915_private *dev_priv);
+void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+                           pci_power_t state);
+
+void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+                                 bool enable);
+int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+                                 pci_power_t state);
+int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
+
+#else /* CONFIG_ACPI*/
+
+static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
+{
+       return 0;
+}
+
+static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_resume(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+                                         pci_power_t state)
+{
+}
+
+static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+       return 0;
+}
+
+static inline int
+intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
+{
+       return 0;
+}
+
+static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
+{
+       return -ENODEV;
+}
+
+#endif /* CONFIG_ACPI */
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
new file mode 100644 (file)
index 0000000..21339b7
--- /dev/null
@@ -0,0 +1,1497 @@
+/*
+ * Copyright © 2009
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Daniel Vetter <daniel@ffwll.ch>
+ *
+ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+ */
+
+#include <drm/drm_fourcc.h>
+#include <drm/i915_drm.h>
+
+#include "gem/i915_gem_pm.h"
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_drv.h"
+#include "intel_frontbuffer.h"
+#include "intel_overlay.h"
+
+/* Limits for overlay size. According to intel doc, the real limits are:
+ * Y width: 4095, UV width (planar): 2047, Y height: 2047,
+ * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
+ * the mininum of both.  */
+#define IMAGE_MAX_WIDTH                2048
+#define IMAGE_MAX_HEIGHT       2046 /* 2 * 1023 */
+/* on 830 and 845 these large limits result in the card hanging */
+#define IMAGE_MAX_WIDTH_LEGACY 1024
+#define IMAGE_MAX_HEIGHT_LEGACY        1088
+
+/* overlay register definitions */
+/* OCMD register */
+#define OCMD_TILED_SURFACE     (0x1<<19)
+#define OCMD_MIRROR_MASK       (0x3<<17)
+#define OCMD_MIRROR_MODE       (0x3<<17)
+#define OCMD_MIRROR_HORIZONTAL (0x1<<17)
+#define OCMD_MIRROR_VERTICAL   (0x2<<17)
+#define OCMD_MIRROR_BOTH       (0x3<<17)
+#define OCMD_BYTEORDER_MASK    (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
+#define OCMD_UV_SWAP           (0x1<<14) /* YVYU */
+#define OCMD_Y_SWAP            (0x2<<14) /* UYVY or FOURCC UYVY */
+#define OCMD_Y_AND_UV_SWAP     (0x3<<14) /* VYUY */
+#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
+#define OCMD_RGB_888           (0x1<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_555           (0x2<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_565           (0x3<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_422_PACKED    (0x8<<10)
+#define OCMD_YUV_411_PACKED    (0x9<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_420_PLANAR    (0xc<<10)
+#define OCMD_YUV_422_PLANAR    (0xd<<10)
+#define OCMD_YUV_410_PLANAR    (0xe<<10) /* also 411 */
+#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
+#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
+#define OCMD_BUF_TYPE_MASK     (0x1<<5)
+#define OCMD_BUF_TYPE_FRAME    (0x0<<5)
+#define OCMD_BUF_TYPE_FIELD    (0x1<<5)
+#define OCMD_TEST_MODE         (0x1<<4)
+#define OCMD_BUFFER_SELECT     (0x3<<2)
+#define OCMD_BUFFER0           (0x0<<2)
+#define OCMD_BUFFER1           (0x1<<2)
+#define OCMD_FIELD_SELECT      (0x1<<2)
+#define OCMD_FIELD0            (0x0<<1)
+#define OCMD_FIELD1            (0x1<<1)
+#define OCMD_ENABLE            (0x1<<0)
+
+/* OCONFIG register */
+#define OCONF_PIPE_MASK                (0x1<<18)
+#define OCONF_PIPE_A           (0x0<<18)
+#define OCONF_PIPE_B           (0x1<<18)
+#define OCONF_GAMMA2_ENABLE    (0x1<<16)
+#define OCONF_CSC_MODE_BT601   (0x0<<5)
+#define OCONF_CSC_MODE_BT709   (0x1<<5)
+#define OCONF_CSC_BYPASS       (0x1<<4)
+#define OCONF_CC_OUT_8BIT      (0x1<<3)
+#define OCONF_TEST_MODE                (0x1<<2)
+#define OCONF_THREE_LINE_BUFFER        (0x1<<0)
+#define OCONF_TWO_LINE_BUFFER  (0x0<<0)
+
+/* DCLRKM (dst-key) register */
+#define DST_KEY_ENABLE         (0x1<<31)
+#define CLK_RGB24_MASK         0x0
+#define CLK_RGB16_MASK         0x070307
+#define CLK_RGB15_MASK         0x070707
+#define CLK_RGB8I_MASK         0xffffff
+
+#define RGB16_TO_COLORKEY(c) \
+       (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
+#define RGB15_TO_COLORKEY(c) \
+       (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
+
+/* overlay flip addr flag */
+#define OFC_UPDATE             0x1
+
+/* polyphase filter coefficients */
+#define N_HORIZ_Y_TAPS          5
+#define N_VERT_Y_TAPS           3
+#define N_HORIZ_UV_TAPS         3
+#define N_VERT_UV_TAPS          3
+#define N_PHASES                17
+#define MAX_TAPS                5
+
+/* memory bufferd overlay registers */
+struct overlay_registers {
+       u32 OBUF_0Y;
+       u32 OBUF_1Y;
+       u32 OBUF_0U;
+       u32 OBUF_0V;
+       u32 OBUF_1U;
+       u32 OBUF_1V;
+       u32 OSTRIDE;
+       u32 YRGB_VPH;
+       u32 UV_VPH;
+       u32 HORZ_PH;
+       u32 INIT_PHS;
+       u32 DWINPOS;
+       u32 DWINSZ;
+       u32 SWIDTH;
+       u32 SWIDTHSW;
+       u32 SHEIGHT;
+       u32 YRGBSCALE;
+       u32 UVSCALE;
+       u32 OCLRC0;
+       u32 OCLRC1;
+       u32 DCLRKV;
+       u32 DCLRKM;
+       u32 SCLRKVH;
+       u32 SCLRKVL;
+       u32 SCLRKEN;
+       u32 OCONFIG;
+       u32 OCMD;
+       u32 RESERVED1; /* 0x6C */
+       u32 OSTART_0Y;
+       u32 OSTART_1Y;
+       u32 OSTART_0U;
+       u32 OSTART_0V;
+       u32 OSTART_1U;
+       u32 OSTART_1V;
+       u32 OTILEOFF_0Y;
+       u32 OTILEOFF_1Y;
+       u32 OTILEOFF_0U;
+       u32 OTILEOFF_0V;
+       u32 OTILEOFF_1U;
+       u32 OTILEOFF_1V;
+       u32 FASTHSCALE; /* 0xA0 */
+       u32 UVSCALEV; /* 0xA4 */
+       u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+       u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+       u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+       u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+       u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+       u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+       u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+       u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+       u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+};
+
+struct intel_overlay {
+       struct drm_i915_private *i915;
+       struct intel_crtc *crtc;
+       struct i915_vma *vma;
+       struct i915_vma *old_vma;
+       bool active;
+       bool pfit_active;
+       u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+       u32 color_key:24;
+       u32 color_key_enabled:1;
+       u32 brightness, contrast, saturation;
+       u32 old_xscale, old_yscale;
+       /* register access */
+       struct drm_i915_gem_object *reg_bo;
+       struct overlay_registers __iomem *regs;
+       u32 flip_addr;
+       /* flip handling */
+       struct i915_active_request last_flip;
+};
+
+static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
+                                     bool enable)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       u8 val;
+
+       /* WA_OVERLAY_CLKGATE:alm */
+       if (enable)
+               I915_WRITE(DSPCLK_GATE_D, 0);
+       else
+               I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+
+       /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
+       pci_bus_read_config_byte(pdev->bus,
+                                PCI_DEVFN(0, 0), I830_CLOCK_GATE, &val);
+       if (enable)
+               val &= ~I830_L2_CACHE_CLOCK_GATE_DISABLE;
+       else
+               val |= I830_L2_CACHE_CLOCK_GATE_DISABLE;
+       pci_bus_write_config_byte(pdev->bus,
+                                 PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
+}
+
+static void intel_overlay_submit_request(struct intel_overlay *overlay,
+                                        struct i915_request *rq,
+                                        i915_active_retire_fn retire)
+{
+       GEM_BUG_ON(i915_active_request_peek(&overlay->last_flip,
+                                           &overlay->i915->drm.struct_mutex));
+       i915_active_request_set_retire_fn(&overlay->last_flip, retire,
+                                         &overlay->i915->drm.struct_mutex);
+       __i915_active_request_set(&overlay->last_flip, rq);
+       i915_request_add(rq);
+}
+
+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+                                        struct i915_request *rq,
+                                        i915_active_retire_fn retire)
+{
+       intel_overlay_submit_request(overlay, rq, retire);
+       return i915_active_request_retire(&overlay->last_flip,
+                                         &overlay->i915->drm.struct_mutex);
+}
+
+static struct i915_request *alloc_request(struct intel_overlay *overlay)
+{
+       struct intel_engine_cs *engine = overlay->i915->engine[RCS0];
+
+       return i915_request_create(engine->kernel_context);
+}
+
+/* overlay needs to be disable in OCMD reg */
+static int intel_overlay_on(struct intel_overlay *overlay)
+{
+       struct drm_i915_private *dev_priv = overlay->i915;
+       struct i915_request *rq;
+       u32 *cs;
+
+       WARN_ON(overlay->active);
+
+       rq = alloc_request(overlay);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+
+       cs = intel_ring_begin(rq, 4);
+       if (IS_ERR(cs)) {
+               i915_request_add(rq);
+               return PTR_ERR(cs);
+       }
+
+       overlay->active = true;
+
+       if (IS_I830(dev_priv))
+               i830_overlay_clock_gating(dev_priv, false);
+
+       *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
+       *cs++ = overlay->flip_addr | OFC_UPDATE;
+       *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+       *cs++ = MI_NOOP;
+       intel_ring_advance(rq, cs);
+
+       return intel_overlay_do_wait_request(overlay, rq, NULL);
+}
+
+static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
+                                      struct i915_vma *vma)
+{
+       enum pipe pipe = overlay->crtc->pipe;
+
+       WARN_ON(overlay->old_vma);
+
+       i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
+                         vma ? vma->obj : NULL,
+                         INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+       intel_frontbuffer_flip_prepare(overlay->i915,
+                                      INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+       overlay->old_vma = overlay->vma;
+       if (vma)
+               overlay->vma = i915_vma_get(vma);
+       else
+               overlay->vma = NULL;
+}
+
+/* overlay needs to be enabled in OCMD reg */
+static int intel_overlay_continue(struct intel_overlay *overlay,
+                                 struct i915_vma *vma,
+                                 bool load_polyphase_filter)
+{
+       struct drm_i915_private *dev_priv = overlay->i915;
+       struct i915_request *rq;
+       u32 flip_addr = overlay->flip_addr;
+       u32 tmp, *cs;
+
+       WARN_ON(!overlay->active);
+
+       if (load_polyphase_filter)
+               flip_addr |= OFC_UPDATE;
+
+       /* check for underruns */
+       tmp = I915_READ(DOVSTA);
+       if (tmp & (1 << 17))
+               DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+
+       rq = alloc_request(overlay);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+
+       cs = intel_ring_begin(rq, 2);
+       if (IS_ERR(cs)) {
+               i915_request_add(rq);
+               return PTR_ERR(cs);
+       }
+
+       *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+       *cs++ = flip_addr;
+       intel_ring_advance(rq, cs);
+
+       intel_overlay_flip_prepare(overlay, vma);
+
+       intel_overlay_submit_request(overlay, rq, NULL);
+
+       return 0;
+}
+
+static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
+{
+       struct i915_vma *vma;
+
+       vma = fetch_and_zero(&overlay->old_vma);
+       if (WARN_ON(!vma))
+               return;
+
+       intel_frontbuffer_flip_complete(overlay->i915,
+                                       INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+
+       i915_gem_object_unpin_from_display_plane(vma);
+       i915_vma_put(vma);
+}
+
+static void
+intel_overlay_release_old_vid_tail(struct i915_active_request *active,
+                                  struct i915_request *rq)
+{
+       struct intel_overlay *overlay =
+               container_of(active, typeof(*overlay), last_flip);
+
+       intel_overlay_release_old_vma(overlay);
+}
+
+static void intel_overlay_off_tail(struct i915_active_request *active,
+                                  struct i915_request *rq)
+{
+       struct intel_overlay *overlay =
+               container_of(active, typeof(*overlay), last_flip);
+       struct drm_i915_private *dev_priv = overlay->i915;
+
+       intel_overlay_release_old_vma(overlay);
+
+       overlay->crtc->overlay = NULL;
+       overlay->crtc = NULL;
+       overlay->active = false;
+
+       if (IS_I830(dev_priv))
+               i830_overlay_clock_gating(dev_priv, true);
+}
+
+/* overlay needs to be disabled in OCMD reg */
+static int intel_overlay_off(struct intel_overlay *overlay)
+{
+       struct i915_request *rq;
+       u32 *cs, flip_addr = overlay->flip_addr;
+
+       WARN_ON(!overlay->active);
+
+       /* According to intel docs the overlay hw may hang (when switching
+        * off) without loading the filter coeffs. It is however unclear whether
+        * this applies to the disabling of the overlay or to the switching off
+        * of the hw. Do it in both cases */
+       flip_addr |= OFC_UPDATE;
+
+       rq = alloc_request(overlay);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+
+       cs = intel_ring_begin(rq, 6);
+       if (IS_ERR(cs)) {
+               i915_request_add(rq);
+               return PTR_ERR(cs);
+       }
+
+       /* wait for overlay to go idle */
+       *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+       *cs++ = flip_addr;
+       *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+
+       /* turn overlay off */
+       *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
+       *cs++ = flip_addr;
+       *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+
+       intel_ring_advance(rq, cs);
+
+       intel_overlay_flip_prepare(overlay, NULL);
+
+       return intel_overlay_do_wait_request(overlay, rq,
+                                            intel_overlay_off_tail);
+}
+
+/* recover from an interruption due to a signal
+ * We have to be careful not to repeat work forever an make forward progess. */
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
+{
+       return i915_active_request_retire(&overlay->last_flip,
+                                         &overlay->i915->drm.struct_mutex);
+}
+
+/* Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs
+ */
+static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
+{
+       struct drm_i915_private *dev_priv = overlay->i915;
+       u32 *cs;
+       int ret;
+
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+       /* Only wait if there is actually an old frame to release to
+        * guarantee forward progress.
+        */
+       if (!overlay->old_vma)
+               return 0;
+
+       if (I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+               /* synchronous slowpath */
+               struct i915_request *rq;
+
+               rq = alloc_request(overlay);
+               if (IS_ERR(rq))
+                       return PTR_ERR(rq);
+
+               cs = intel_ring_begin(rq, 2);
+               if (IS_ERR(cs)) {
+                       i915_request_add(rq);
+                       return PTR_ERR(cs);
+               }
+
+               *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+               *cs++ = MI_NOOP;
+               intel_ring_advance(rq, cs);
+
+               ret = intel_overlay_do_wait_request(overlay, rq,
+                                                   intel_overlay_release_old_vid_tail);
+               if (ret)
+                       return ret;
+       } else
+               intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
+
+       return 0;
+}
+
+void intel_overlay_reset(struct drm_i915_private *dev_priv)
+{
+       struct intel_overlay *overlay = dev_priv->overlay;
+
+       if (!overlay)
+               return;
+
+       overlay->old_xscale = 0;
+       overlay->old_yscale = 0;
+       overlay->crtc = NULL;
+       overlay->active = false;
+}
+
+static int packed_depth_bytes(u32 format)
+{
+       switch (format & I915_OVERLAY_DEPTH_MASK) {
+       case I915_OVERLAY_YUV422:
+               return 4;
+       case I915_OVERLAY_YUV411:
+               /* return 6; not implemented */
+       default:
+               return -EINVAL;
+       }
+}
+
+static int packed_width_bytes(u32 format, short width)
+{
+       switch (format & I915_OVERLAY_DEPTH_MASK) {
+       case I915_OVERLAY_YUV422:
+               return width << 1;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int uv_hsubsampling(u32 format)
+{
+       switch (format & I915_OVERLAY_DEPTH_MASK) {
+       case I915_OVERLAY_YUV422:
+       case I915_OVERLAY_YUV420:
+               return 2;
+       case I915_OVERLAY_YUV411:
+       case I915_OVERLAY_YUV410:
+               return 4;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int uv_vsubsampling(u32 format)
+{
+       switch (format & I915_OVERLAY_DEPTH_MASK) {
+       case I915_OVERLAY_YUV420:
+       case I915_OVERLAY_YUV410:
+               return 2;
+       case I915_OVERLAY_YUV422:
+       case I915_OVERLAY_YUV411:
+               return 1;
+       default:
+               return -EINVAL;
+       }
+}
+
+static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
+{
+       u32 sw;
+
+       if (IS_GEN(dev_priv, 2))
+               sw = ALIGN((offset & 31) + width, 32);
+       else
+               sw = ALIGN((offset & 63) + width, 64);
+
+       if (sw == 0)
+               return 0;
+
+       return (sw - 32) >> 3;
+}
+
+static const u16 y_static_hcoeffs[N_PHASES][N_HORIZ_Y_TAPS] = {
+       [ 0] = { 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, },
+       [ 1] = { 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, },
+       [ 2] = { 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, },
+       [ 3] = { 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, },
+       [ 4] = { 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, },
+       [ 5] = { 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, },
+       [ 6] = { 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, },
+       [ 7] = { 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, },
+       [ 8] = { 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, },
+       [ 9] = { 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, },
+       [10] = { 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, },
+       [11] = { 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, },
+       [12] = { 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, },
+       [13] = { 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, },
+       [14] = { 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, },
+       [15] = { 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, },
+       [16] = { 0xb000, 0x3000, 0x0800, 0x3000, 0xb000, },
+};
+
+static const u16 uv_static_hcoeffs[N_PHASES][N_HORIZ_UV_TAPS] = {
+       [ 0] = { 0x3000, 0x1800, 0x1800, },
+       [ 1] = { 0xb000, 0x18d0, 0x2e60, },
+       [ 2] = { 0xb000, 0x1990, 0x2ce0, },
+       [ 3] = { 0xb020, 0x1a68, 0x2b40, },
+       [ 4] = { 0xb040, 0x1b20, 0x29e0, },
+       [ 5] = { 0xb060, 0x1bd8, 0x2880, },
+       [ 6] = { 0xb080, 0x1c88, 0x3e60, },
+       [ 7] = { 0xb0a0, 0x1d28, 0x3c00, },
+       [ 8] = { 0xb0c0, 0x1db8, 0x39e0, },
+       [ 9] = { 0xb0e0, 0x1e40, 0x37e0, },
+       [10] = { 0xb100, 0x1eb8, 0x3620, },
+       [11] = { 0xb100, 0x1f18, 0x34a0, },
+       [12] = { 0xb100, 0x1f68, 0x3360, },
+       [13] = { 0xb0e0, 0x1fa8, 0x3240, },
+       [14] = { 0xb0c0, 0x1fe0, 0x3140, },
+       [15] = { 0xb060, 0x1ff0, 0x30a0, },
+       [16] = { 0x3000, 0x0800, 0x3000, },
+};
+
+static void update_polyphase_filter(struct overlay_registers __iomem *regs)
+{
+       memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+       memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
+                   sizeof(uv_static_hcoeffs));
+}
+
+static bool update_scaling_factors(struct intel_overlay *overlay,
+                                  struct overlay_registers __iomem *regs,
+                                  struct drm_intel_overlay_put_image *params)
+{
+       /* fixed point with a 12 bit shift */
+       u32 xscale, yscale, xscale_UV, yscale_UV;
+#define FP_SHIFT 12
+#define FRACT_MASK 0xfff
+       bool scale_changed = false;
+       int uv_hscale = uv_hsubsampling(params->flags);
+       int uv_vscale = uv_vsubsampling(params->flags);
+
+       if (params->dst_width > 1)
+               xscale = ((params->src_scan_width - 1) << FP_SHIFT) /
+                       params->dst_width;
+       else
+               xscale = 1 << FP_SHIFT;
+
+       if (params->dst_height > 1)
+               yscale = ((params->src_scan_height - 1) << FP_SHIFT) /
+                       params->dst_height;
+       else
+               yscale = 1 << FP_SHIFT;
+
+       /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+       xscale_UV = xscale/uv_hscale;
+       yscale_UV = yscale/uv_vscale;
+       /* make the Y scale to UV scale ratio an exact multiply */
+       xscale = xscale_UV * uv_hscale;
+       yscale = yscale_UV * uv_vscale;
+       /*} else {
+         xscale_UV = 0;
+         yscale_UV = 0;
+         }*/
+
+       if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+               scale_changed = true;
+       overlay->old_xscale = xscale;
+       overlay->old_yscale = yscale;
+
+       iowrite32(((yscale & FRACT_MASK) << 20) |
+                 ((xscale >> FP_SHIFT)  << 16) |
+                 ((xscale & FRACT_MASK) << 3),
+                &regs->YRGBSCALE);
+
+       iowrite32(((yscale_UV & FRACT_MASK) << 20) |
+                 ((xscale_UV >> FP_SHIFT)  << 16) |
+                 ((xscale_UV & FRACT_MASK) << 3),
+                &regs->UVSCALE);
+
+       iowrite32((((yscale    >> FP_SHIFT) << 16) |
+                  ((yscale_UV >> FP_SHIFT) << 0)),
+                &regs->UVSCALEV);
+
+       if (scale_changed)
+               update_polyphase_filter(regs);
+
+       return scale_changed;
+}
+
+static void update_colorkey(struct intel_overlay *overlay,
+                           struct overlay_registers __iomem *regs)
+{
+       const struct intel_plane_state *state =
+               to_intel_plane_state(overlay->crtc->base.primary->state);
+       u32 key = overlay->color_key;
+       u32 format = 0;
+       u32 flags = 0;
+
+       if (overlay->color_key_enabled)
+               flags |= DST_KEY_ENABLE;
+
+       if (state->base.visible)
+               format = state->base.fb->format->format;
+
+       switch (format) {
+       case DRM_FORMAT_C8:
+               key = 0;
+               flags |= CLK_RGB8I_MASK;
+               break;
+       case DRM_FORMAT_XRGB1555:
+               key = RGB15_TO_COLORKEY(key);
+               flags |= CLK_RGB15_MASK;
+               break;
+       case DRM_FORMAT_RGB565:
+               key = RGB16_TO_COLORKEY(key);
+               flags |= CLK_RGB16_MASK;
+               break;
+       default:
+               flags |= CLK_RGB24_MASK;
+               break;
+       }
+
+       iowrite32(key, &regs->DCLRKV);
+       iowrite32(flags, &regs->DCLRKM);
+}
+
+static u32 overlay_cmd_reg(struct drm_intel_overlay_put_image *params)
+{
+       u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
+
+       if (params->flags & I915_OVERLAY_YUV_PLANAR) {
+               switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
+               case I915_OVERLAY_YUV422:
+                       cmd |= OCMD_YUV_422_PLANAR;
+                       break;
+               case I915_OVERLAY_YUV420:
+                       cmd |= OCMD_YUV_420_PLANAR;
+                       break;
+               case I915_OVERLAY_YUV411:
+               case I915_OVERLAY_YUV410:
+                       cmd |= OCMD_YUV_410_PLANAR;
+                       break;
+               }
+       } else { /* YUV packed */
+               switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
+               case I915_OVERLAY_YUV422:
+                       cmd |= OCMD_YUV_422_PACKED;
+                       break;
+               case I915_OVERLAY_YUV411:
+                       cmd |= OCMD_YUV_411_PACKED;
+                       break;
+               }
+
+               switch (params->flags & I915_OVERLAY_SWAP_MASK) {
+               case I915_OVERLAY_NO_SWAP:
+                       break;
+               case I915_OVERLAY_UV_SWAP:
+                       cmd |= OCMD_UV_SWAP;
+                       break;
+               case I915_OVERLAY_Y_SWAP:
+                       cmd |= OCMD_Y_SWAP;
+                       break;
+               case I915_OVERLAY_Y_AND_UV_SWAP:
+                       cmd |= OCMD_Y_AND_UV_SWAP;
+                       break;
+               }
+       }
+
+       return cmd;
+}
+
+static int intel_overlay_do_put_image(struct intel_overlay *overlay,
+                                     struct drm_i915_gem_object *new_bo,
+                                     struct drm_intel_overlay_put_image *params)
+{
+       struct overlay_registers __iomem *regs = overlay->regs;
+       struct drm_i915_private *dev_priv = overlay->i915;
+       u32 swidth, swidthsw, sheight, ostride;
+       enum pipe pipe = overlay->crtc->pipe;
+       bool scale_changed = false;
+       struct i915_vma *vma;
+       int ret, tmp_width;
+
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+
+       ret = intel_overlay_release_old_vid(overlay);
+       if (ret != 0)
+               return ret;
+
+       atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
+       i915_gem_object_lock(new_bo);
+       vma = i915_gem_object_pin_to_display_plane(new_bo,
+                                                  0, NULL, PIN_MAPPABLE);
+       i915_gem_object_unlock(new_bo);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto out_pin_section;
+       }
+       intel_fb_obj_flush(new_bo, ORIGIN_DIRTYFB);
+
+       ret = i915_vma_put_fence(vma);
+       if (ret)
+               goto out_unpin;
+
+       if (!overlay->active) {
+               u32 oconfig;
+
+               oconfig = OCONF_CC_OUT_8BIT;
+               if (IS_GEN(dev_priv, 4))
+                       oconfig |= OCONF_CSC_MODE_BT709;
+               oconfig |= pipe == 0 ?
+                       OCONF_PIPE_A : OCONF_PIPE_B;
+               iowrite32(oconfig, &regs->OCONFIG);
+
+               ret = intel_overlay_on(overlay);
+               if (ret != 0)
+                       goto out_unpin;
+       }
+
+       iowrite32(params->dst_y << 16 | params->dst_x, &regs->DWINPOS);
+       iowrite32(params->dst_height << 16 | params->dst_width, &regs->DWINSZ);
+
+       if (params->flags & I915_OVERLAY_YUV_PACKED)
+               tmp_width = packed_width_bytes(params->flags,
+                                              params->src_width);
+       else
+               tmp_width = params->src_width;
+
+       swidth = params->src_width;
+       swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
+       sheight = params->src_height;
+       iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
+       ostride = params->stride_Y;
+
+       if (params->flags & I915_OVERLAY_YUV_PLANAR) {
+               int uv_hscale = uv_hsubsampling(params->flags);
+               int uv_vscale = uv_vsubsampling(params->flags);
+               u32 tmp_U, tmp_V;
+
+               swidth |= (params->src_width / uv_hscale) << 16;
+               sheight |= (params->src_height / uv_vscale) << 16;
+
+               tmp_U = calc_swidthsw(dev_priv, params->offset_U,
+                                     params->src_width / uv_hscale);
+               tmp_V = calc_swidthsw(dev_priv, params->offset_V,
+                                     params->src_width / uv_hscale);
+               swidthsw |= max(tmp_U, tmp_V) << 16;
+
+               iowrite32(i915_ggtt_offset(vma) + params->offset_U,
+                         &regs->OBUF_0U);
+               iowrite32(i915_ggtt_offset(vma) + params->offset_V,
+                         &regs->OBUF_0V);
+
+               ostride |= params->stride_UV << 16;
+       }
+
+       iowrite32(swidth, &regs->SWIDTH);
+       iowrite32(swidthsw, &regs->SWIDTHSW);
+       iowrite32(sheight, &regs->SHEIGHT);
+       iowrite32(ostride, &regs->OSTRIDE);
+
+       scale_changed = update_scaling_factors(overlay, regs, params);
+
+       update_colorkey(overlay, regs);
+
+       iowrite32(overlay_cmd_reg(params), &regs->OCMD);
+
+       ret = intel_overlay_continue(overlay, vma, scale_changed);
+       if (ret)
+               goto out_unpin;
+
+       return 0;
+
+out_unpin:
+       i915_gem_object_unpin_from_display_plane(vma);
+out_pin_section:
+       atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+
+       return ret;
+}
+
+int intel_overlay_switch_off(struct intel_overlay *overlay)
+{
+       struct drm_i915_private *dev_priv = overlay->i915;
+       int ret;
+
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+
+       ret = intel_overlay_recover_from_interrupt(overlay);
+       if (ret != 0)
+               return ret;
+
+       if (!overlay->active)
+               return 0;
+
+       ret = intel_overlay_release_old_vid(overlay);
+       if (ret != 0)
+               return ret;
+
+       iowrite32(0, &overlay->regs->OCMD);
+
+       return intel_overlay_off(overlay);
+}
+
+static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+                                         struct intel_crtc *crtc)
+{
+       if (!crtc->active)
+               return -EINVAL;
+
+       /* can't use the overlay with double wide pipe */
+       if (crtc->config->double_wide)
+               return -EINVAL;
+
+       return 0;
+}
+
+static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
+{
+       struct drm_i915_private *dev_priv = overlay->i915;
+       u32 pfit_control = I915_READ(PFIT_CONTROL);
+       u32 ratio;
+
+       /* XXX: This is not the same logic as in the xorg driver, but more in
+        * line with the intel documentation for the i965
+        */
+       if (INTEL_GEN(dev_priv) >= 4) {
+               /* on i965 use the PGM reg to read out the autoscaler values */
+               ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+       } else {
+               if (pfit_control & VERT_AUTO_SCALE)
+                       ratio = I915_READ(PFIT_AUTO_RATIOS);
+               else
+                       ratio = I915_READ(PFIT_PGM_RATIOS);
+               ratio >>= PFIT_VERT_SCALE_SHIFT;
+       }
+
+       overlay->pfit_vscale_ratio = ratio;
+}
+
+static int check_overlay_dst(struct intel_overlay *overlay,
+                            struct drm_intel_overlay_put_image *rec)
+{
+       const struct intel_crtc_state *pipe_config =
+               overlay->crtc->config;
+
+       if (rec->dst_x < pipe_config->pipe_src_w &&
+           rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
+           rec->dst_y < pipe_config->pipe_src_h &&
+           rec->dst_y + rec->dst_height <= pipe_config->pipe_src_h)
+               return 0;
+       else
+               return -EINVAL;
+}
+
+static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
+{
+       u32 tmp;
+
+       /* downscaling limit is 8.0 */
+       tmp = ((rec->src_scan_height << 16) / rec->dst_height) >> 16;
+       if (tmp > 7)
+               return -EINVAL;
+
+       tmp = ((rec->src_scan_width << 16) / rec->dst_width) >> 16;
+       if (tmp > 7)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int check_overlay_src(struct drm_i915_private *dev_priv,
+                            struct drm_intel_overlay_put_image *rec,
+                            struct drm_i915_gem_object *new_bo)
+{
+       int uv_hscale = uv_hsubsampling(rec->flags);
+       int uv_vscale = uv_vsubsampling(rec->flags);
+       u32 stride_mask;
+       int depth;
+       u32 tmp;
+
+       /* check src dimensions */
+       if (IS_I845G(dev_priv) || IS_I830(dev_priv)) {
+               if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
+                   rec->src_width  > IMAGE_MAX_WIDTH_LEGACY)
+                       return -EINVAL;
+       } else {
+               if (rec->src_height > IMAGE_MAX_HEIGHT ||
+                   rec->src_width  > IMAGE_MAX_WIDTH)
+                       return -EINVAL;
+       }
+
+       /* better safe than sorry, use 4 as the maximal subsampling ratio */
+       if (rec->src_height < N_VERT_Y_TAPS*4 ||
+           rec->src_width  < N_HORIZ_Y_TAPS*4)
+               return -EINVAL;
+
+       /* check alignment constraints */
+       switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+       case I915_OVERLAY_RGB:
+               /* not implemented */
+               return -EINVAL;
+
+       case I915_OVERLAY_YUV_PACKED:
+               if (uv_vscale != 1)
+                       return -EINVAL;
+
+               depth = packed_depth_bytes(rec->flags);
+               if (depth < 0)
+                       return depth;
+
+               /* ignore UV planes */
+               rec->stride_UV = 0;
+               rec->offset_U = 0;
+               rec->offset_V = 0;
+               /* check pixel alignment */
+               if (rec->offset_Y % depth)
+                       return -EINVAL;
+               break;
+
+       case I915_OVERLAY_YUV_PLANAR:
+               if (uv_vscale < 0 || uv_hscale < 0)
+                       return -EINVAL;
+               /* no offset restrictions for planar formats */
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if (rec->src_width % uv_hscale)
+               return -EINVAL;
+
+       /* stride checking */
+       if (IS_I830(dev_priv) || IS_I845G(dev_priv))
+               stride_mask = 255;
+       else
+               stride_mask = 63;
+
+       if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+               return -EINVAL;
+       if (IS_GEN(dev_priv, 4) && rec->stride_Y < 512)
+               return -EINVAL;
+
+       tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+               4096 : 8192;
+       if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
+               return -EINVAL;
+
+       /* check buffer dimensions */
+       switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+       case I915_OVERLAY_RGB:
+       case I915_OVERLAY_YUV_PACKED:
+               /* always 4 Y values per depth pixels */
+               if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
+                       return -EINVAL;
+
+               tmp = rec->stride_Y*rec->src_height;
+               if (rec->offset_Y + tmp > new_bo->base.size)
+                       return -EINVAL;
+               break;
+
+       case I915_OVERLAY_YUV_PLANAR:
+               if (rec->src_width > rec->stride_Y)
+                       return -EINVAL;
+               if (rec->src_width/uv_hscale > rec->stride_UV)
+                       return -EINVAL;
+
+               tmp = rec->stride_Y * rec->src_height;
+               if (rec->offset_Y + tmp > new_bo->base.size)
+                       return -EINVAL;
+
+               tmp = rec->stride_UV * (rec->src_height / uv_vscale);
+               if (rec->offset_U + tmp > new_bo->base.size ||
+                   rec->offset_V + tmp > new_bo->base.size)
+                       return -EINVAL;
+               break;
+       }
+
+       return 0;
+}
+
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+                                 struct drm_file *file_priv)
+{
+       struct drm_intel_overlay_put_image *params = data;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_overlay *overlay;
+       struct drm_crtc *drmmode_crtc;
+       struct intel_crtc *crtc;
+       struct drm_i915_gem_object *new_bo;
+       int ret;
+
+       overlay = dev_priv->overlay;
+       if (!overlay) {
+               DRM_DEBUG("userspace bug: no overlay\n");
+               return -ENODEV;
+       }
+
+       if (!(params->flags & I915_OVERLAY_ENABLE)) {
+               drm_modeset_lock_all(dev);
+               mutex_lock(&dev->struct_mutex);
+
+               ret = intel_overlay_switch_off(overlay);
+
+               mutex_unlock(&dev->struct_mutex);
+               drm_modeset_unlock_all(dev);
+
+               return ret;
+       }
+
+       drmmode_crtc = drm_crtc_find(dev, file_priv, params->crtc_id);
+       if (!drmmode_crtc)
+               return -ENOENT;
+       crtc = to_intel_crtc(drmmode_crtc);
+
+       new_bo = i915_gem_object_lookup(file_priv, params->bo_handle);
+       if (!new_bo)
+               return -ENOENT;
+
+       drm_modeset_lock_all(dev);
+       mutex_lock(&dev->struct_mutex);
+
+       if (i915_gem_object_is_tiled(new_bo)) {
+               DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       ret = intel_overlay_recover_from_interrupt(overlay);
+       if (ret != 0)
+               goto out_unlock;
+
+       if (overlay->crtc != crtc) {
+               ret = intel_overlay_switch_off(overlay);
+               if (ret != 0)
+                       goto out_unlock;
+
+               ret = check_overlay_possible_on_crtc(overlay, crtc);
+               if (ret != 0)
+                       goto out_unlock;
+
+               overlay->crtc = crtc;
+               crtc->overlay = overlay;
+
+               /* line too wide, i.e. one-line-mode */
+               if (crtc->config->pipe_src_w > 1024 &&
+                   crtc->config->gmch_pfit.control & PFIT_ENABLE) {
+                       overlay->pfit_active = true;
+                       update_pfit_vscale_ratio(overlay);
+               } else
+                       overlay->pfit_active = false;
+       }
+
+       ret = check_overlay_dst(overlay, params);
+       if (ret != 0)
+               goto out_unlock;
+
+       if (overlay->pfit_active) {
+               params->dst_y = (((u32)params->dst_y << 12) /
+                                overlay->pfit_vscale_ratio);
+               /* shifting right rounds downwards, so add 1 */
+               params->dst_height = (((u32)params->dst_height << 12) /
+                                overlay->pfit_vscale_ratio) + 1;
+       }
+
+       if (params->src_scan_height > params->src_height ||
+           params->src_scan_width > params->src_width) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       ret = check_overlay_src(dev_priv, params, new_bo);
+       if (ret != 0)
+               goto out_unlock;
+
+       /* Check scaling after src size to prevent a divide-by-zero. */
+       ret = check_overlay_scaling(params);
+       if (ret != 0)
+               goto out_unlock;
+
+       ret = intel_overlay_do_put_image(overlay, new_bo, params);
+       if (ret != 0)
+               goto out_unlock;
+
+       mutex_unlock(&dev->struct_mutex);
+       drm_modeset_unlock_all(dev);
+       i915_gem_object_put(new_bo);
+
+       return 0;
+
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       drm_modeset_unlock_all(dev);
+       i915_gem_object_put(new_bo);
+
+       return ret;
+}
+
+static void update_reg_attrs(struct intel_overlay *overlay,
+                            struct overlay_registers __iomem *regs)
+{
+       iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
+                 &regs->OCLRC0);
+       iowrite32(overlay->saturation, &regs->OCLRC1);
+}
+
+static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
+{
+       int i;
+
+       if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
+               return false;
+
+       for (i = 0; i < 3; i++) {
+               if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+                       return false;
+       }
+
+       return true;
+}
+
+static bool check_gamma5_errata(u32 gamma5)
+{
+       int i;
+
+       for (i = 0; i < 3; i++) {
+               if (((gamma5 >> i*8) & 0xff) == 0x80)
+                       return false;
+       }
+
+       return true;
+}
+
+static int check_gamma(struct drm_intel_overlay_attrs *attrs)
+{
+       if (!check_gamma_bounds(0, attrs->gamma0) ||
+           !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
+           !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
+           !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
+           !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
+           !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
+           !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+               return -EINVAL;
+
+       if (!check_gamma5_errata(attrs->gamma5))
+               return -EINVAL;
+
+       return 0;
+}
+
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
+{
+       struct drm_intel_overlay_attrs *attrs = data;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_overlay *overlay;
+       int ret;
+
+       overlay = dev_priv->overlay;
+       if (!overlay) {
+               DRM_DEBUG("userspace bug: no overlay\n");
+               return -ENODEV;
+       }
+
+       drm_modeset_lock_all(dev);
+       mutex_lock(&dev->struct_mutex);
+
+       ret = -EINVAL;
+       if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+               attrs->color_key  = overlay->color_key;
+               attrs->brightness = overlay->brightness;
+               attrs->contrast   = overlay->contrast;
+               attrs->saturation = overlay->saturation;
+
+               if (!IS_GEN(dev_priv, 2)) {
+                       attrs->gamma0 = I915_READ(OGAMC0);
+                       attrs->gamma1 = I915_READ(OGAMC1);
+                       attrs->gamma2 = I915_READ(OGAMC2);
+                       attrs->gamma3 = I915_READ(OGAMC3);
+                       attrs->gamma4 = I915_READ(OGAMC4);
+                       attrs->gamma5 = I915_READ(OGAMC5);
+               }
+       } else {
+               if (attrs->brightness < -128 || attrs->brightness > 127)
+                       goto out_unlock;
+               if (attrs->contrast > 255)
+                       goto out_unlock;
+               if (attrs->saturation > 1023)
+                       goto out_unlock;
+
+               overlay->color_key  = attrs->color_key;
+               overlay->brightness = attrs->brightness;
+               overlay->contrast   = attrs->contrast;
+               overlay->saturation = attrs->saturation;
+
+               update_reg_attrs(overlay, overlay->regs);
+
+               if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+                       if (IS_GEN(dev_priv, 2))
+                               goto out_unlock;
+
+                       if (overlay->active) {
+                               ret = -EBUSY;
+                               goto out_unlock;
+                       }
+
+                       ret = check_gamma(attrs);
+                       if (ret)
+                               goto out_unlock;
+
+                       I915_WRITE(OGAMC0, attrs->gamma0);
+                       I915_WRITE(OGAMC1, attrs->gamma1);
+                       I915_WRITE(OGAMC2, attrs->gamma2);
+                       I915_WRITE(OGAMC3, attrs->gamma3);
+                       I915_WRITE(OGAMC4, attrs->gamma4);
+                       I915_WRITE(OGAMC5, attrs->gamma5);
+               }
+       }
+       overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0;
+
+       ret = 0;
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       drm_modeset_unlock_all(dev);
+
+       return ret;
+}
+
+static int get_registers(struct intel_overlay *overlay, bool use_phys)
+{
+       struct drm_i915_private *i915 = overlay->i915;
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int err;
+
+       mutex_lock(&i915->drm.struct_mutex);
+
+       obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
+       if (obj == NULL)
+               obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+       if (IS_ERR(obj)) {
+               err = PTR_ERR(obj);
+               goto err_unlock;
+       }
+
+       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto err_put_bo;
+       }
+
+       if (use_phys)
+               overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
+       else
+               overlay->flip_addr = i915_ggtt_offset(vma);
+       overlay->regs = i915_vma_pin_iomap(vma);
+       i915_vma_unpin(vma);
+
+       if (IS_ERR(overlay->regs)) {
+               err = PTR_ERR(overlay->regs);
+               goto err_put_bo;
+       }
+
+       overlay->reg_bo = obj;
+       mutex_unlock(&i915->drm.struct_mutex);
+       return 0;
+
+err_put_bo:
+       i915_gem_object_put(obj);
+err_unlock:
+       mutex_unlock(&i915->drm.struct_mutex);
+       return err;
+}
+
+void intel_overlay_setup(struct drm_i915_private *dev_priv)
+{
+       struct intel_overlay *overlay;
+       int ret;
+
+       if (!HAS_OVERLAY(dev_priv))
+               return;
+
+       overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+       if (!overlay)
+               return;
+
+       overlay->i915 = dev_priv;
+
+       overlay->color_key = 0x0101fe;
+       overlay->color_key_enabled = true;
+       overlay->brightness = -19;
+       overlay->contrast = 75;
+       overlay->saturation = 146;
+
+       INIT_ACTIVE_REQUEST(&overlay->last_flip);
+
+       ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
+       if (ret)
+               goto out_free;
+
+       memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
+       update_polyphase_filter(overlay->regs);
+       update_reg_attrs(overlay, overlay->regs);
+
+       dev_priv->overlay = overlay;
+       DRM_INFO("Initialized overlay support.\n");
+       return;
+
+out_free:
+       kfree(overlay);
+}
+
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
+{
+       struct intel_overlay *overlay;
+
+       overlay = fetch_and_zero(&dev_priv->overlay);
+       if (!overlay)
+               return;
+
+       /*
+        * The bo's should be free'd by the generic code already.
+        * Furthermore modesetting teardown happens beforehand so the
+        * hardware should be off already.
+        */
+       WARN_ON(overlay->active);
+
+       i915_gem_object_put(overlay->reg_bo);
+
+       kfree(overlay);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
+struct intel_overlay_error_state {
+       struct overlay_registers regs;
+       unsigned long base;
+       u32 dovsta;
+       u32 isr;
+};
+
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
+{
+       struct intel_overlay *overlay = dev_priv->overlay;
+       struct intel_overlay_error_state *error;
+
+       if (!overlay || !overlay->active)
+               return NULL;
+
+       error = kmalloc(sizeof(*error), GFP_ATOMIC);
+       if (error == NULL)
+               return NULL;
+
+       error->dovsta = I915_READ(DOVSTA);
+       error->isr = I915_READ(GEN2_ISR);
+       error->base = overlay->flip_addr;
+
+       memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
+
+       return error;
+}
+
+void
+intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
+                               struct intel_overlay_error_state *error)
+{
+       i915_error_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
+                         error->dovsta, error->isr);
+       i915_error_printf(m, "  Register file at 0x%08lx:\n",
+                         error->base);
+
+#define P(x) i915_error_printf(m, "    " #x ": 0x%08x\n", error->regs.x)
+       P(OBUF_0Y);
+       P(OBUF_1Y);
+       P(OBUF_0U);
+       P(OBUF_0V);
+       P(OBUF_1U);
+       P(OBUF_1V);
+       P(OSTRIDE);
+       P(YRGB_VPH);
+       P(UV_VPH);
+       P(HORZ_PH);
+       P(INIT_PHS);
+       P(DWINPOS);
+       P(DWINSZ);
+       P(SWIDTH);
+       P(SWIDTHSW);
+       P(SHEIGHT);
+       P(YRGBSCALE);
+       P(UVSCALE);
+       P(OCLRC0);
+       P(OCLRC1);
+       P(DCLRKV);
+       P(DCLRKM);
+       P(SCLRKVH);
+       P(SCLRKVL);
+       P(SCLRKEN);
+       P(OCONFIG);
+       P(OCMD);
+       P(OSTART_0Y);
+       P(OSTART_1Y);
+       P(OSTART_0U);
+       P(OSTART_0V);
+       P(OSTART_1U);
+       P(OSTART_1V);
+       P(OTILEOFF_0Y);
+       P(OTILEOFF_1Y);
+       P(OTILEOFF_0U);
+       P(OTILEOFF_0V);
+       P(OTILEOFF_1U);
+       P(OTILEOFF_1V);
+       P(FASTHSCALE);
+       P(UVSCALEV);
+#undef P
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h
new file mode 100644 (file)
index 0000000..a167c28
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_OVERLAY_H__
+#define __INTEL_OVERLAY_H__
+
+struct drm_device;
+struct drm_file;
+struct drm_i915_error_state_buf;
+struct drm_i915_private;
+struct intel_overlay;
+struct intel_overlay_error_state;
+
+void intel_overlay_setup(struct drm_i915_private *dev_priv);
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
+int intel_overlay_switch_off(struct intel_overlay *overlay);
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+                                 struct drm_file *file_priv);
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
+void intel_overlay_reset(struct drm_i915_private *dev_priv);
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
+void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
+                                    struct intel_overlay_error_state *error);
+
+#endif /* __INTEL_OVERLAY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
new file mode 100644 (file)
index 0000000..1e2c430
--- /dev/null
@@ -0,0 +1,671 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Author: Damien Lespiau <damien.lespiau@intel.com>
+ *
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "intel_atomic.h"
+#include "intel_drv.h"
+#include "intel_pipe_crc.h"
+
+static const char * const pipe_crc_sources[] = {
+       [INTEL_PIPE_CRC_SOURCE_NONE] = "none",
+       [INTEL_PIPE_CRC_SOURCE_PLANE1] = "plane1",
+       [INTEL_PIPE_CRC_SOURCE_PLANE2] = "plane2",
+       [INTEL_PIPE_CRC_SOURCE_PLANE3] = "plane3",
+       [INTEL_PIPE_CRC_SOURCE_PLANE4] = "plane4",
+       [INTEL_PIPE_CRC_SOURCE_PLANE5] = "plane5",
+       [INTEL_PIPE_CRC_SOURCE_PLANE6] = "plane6",
+       [INTEL_PIPE_CRC_SOURCE_PLANE7] = "plane7",
+       [INTEL_PIPE_CRC_SOURCE_PIPE] = "pipe",
+       [INTEL_PIPE_CRC_SOURCE_TV] = "TV",
+       [INTEL_PIPE_CRC_SOURCE_DP_B] = "DP-B",
+       [INTEL_PIPE_CRC_SOURCE_DP_C] = "DP-C",
+       [INTEL_PIPE_CRC_SOURCE_DP_D] = "DP-D",
+       [INTEL_PIPE_CRC_SOURCE_AUTO] = "auto",
+};
+
+static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+                                u32 *val)
+{
+       if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+               *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+       switch (*source) {
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               *val = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+                                    enum pipe pipe,
+                                    enum intel_pipe_crc_source *source)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       struct intel_encoder *encoder;
+       struct intel_crtc *crtc;
+       struct intel_digital_port *dig_port;
+       int ret = 0;
+
+       *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+       drm_modeset_lock_all(dev);
+       for_each_intel_encoder(dev, encoder) {
+               if (!encoder->base.crtc)
+                       continue;
+
+               crtc = to_intel_crtc(encoder->base.crtc);
+
+               if (crtc->pipe != pipe)
+                       continue;
+
+               switch (encoder->type) {
+               case INTEL_OUTPUT_TVOUT:
+                       *source = INTEL_PIPE_CRC_SOURCE_TV;
+                       break;
+               case INTEL_OUTPUT_DP:
+               case INTEL_OUTPUT_EDP:
+                       dig_port = enc_to_dig_port(&encoder->base);
+                       switch (dig_port->base.port) {
+                       case PORT_B:
+                               *source = INTEL_PIPE_CRC_SOURCE_DP_B;
+                               break;
+                       case PORT_C:
+                               *source = INTEL_PIPE_CRC_SOURCE_DP_C;
+                               break;
+                       case PORT_D:
+                               *source = INTEL_PIPE_CRC_SOURCE_DP_D;
+                               break;
+                       default:
+                               WARN(1, "nonexisting DP port %c\n",
+                                    port_name(dig_port->base.port));
+                               break;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+       drm_modeset_unlock_all(dev);
+
+       return ret;
+}
+
+static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+                               enum pipe pipe,
+                               enum intel_pipe_crc_source *source,
+                               u32 *val)
+{
+       bool need_stable_symbols = false;
+
+       if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+               int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+               if (ret)
+                       return ret;
+       }
+
+       switch (*source) {
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_DP_B:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
+               need_stable_symbols = true;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_DP_C:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
+               need_stable_symbols = true;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_DP_D:
+               if (!IS_CHERRYVIEW(dev_priv))
+                       return -EINVAL;
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
+               need_stable_symbols = true;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               *val = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /*
+        * When the pipe CRC tap point is after the transcoders we need
+        * to tweak symbol-level features to produce a deterministic series of
+        * symbols for a given frame. We need to reset those features only once
+        * a frame (instead of every nth symbol):
+        *   - DC-balance: used to ensure a better clock recovery from the data
+        *     link (SDVO)
+        *   - DisplayPort scrambling: used for EMI reduction
+        */
+       if (need_stable_symbols) {
+               u32 tmp = I915_READ(PORT_DFT2_G4X);
+
+               tmp |= DC_BALANCE_RESET_VLV;
+               switch (pipe) {
+               case PIPE_A:
+                       tmp |= PIPE_A_SCRAMBLE_RESET;
+                       break;
+               case PIPE_B:
+                       tmp |= PIPE_B_SCRAMBLE_RESET;
+                       break;
+               case PIPE_C:
+                       tmp |= PIPE_C_SCRAMBLE_RESET;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               I915_WRITE(PORT_DFT2_G4X, tmp);
+       }
+
+       return 0;
+}
+
+static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+                                enum pipe pipe,
+                                enum intel_pipe_crc_source *source,
+                                u32 *val)
+{
+       if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+               int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+               if (ret)
+                       return ret;
+       }
+
+       switch (*source) {
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_TV:
+               if (!SUPPORTS_TV(dev_priv))
+                       return -EINVAL;
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               *val = 0;
+               break;
+       default:
+               /*
+                * The DP CRC source doesn't work on g4x.
+                * It can be made to work to some degree by selecting
+                * the correct CRC source before the port is enabled,
+                * and not touching the CRC source bits again until
+                * the port is disabled. But even then the bits
+                * eventually get stuck and a reboot is needed to get
+                * working CRCs on the pipe again. Let's simply
+                * refuse to use DP CRCs on g4x.
+                */
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
+                                        enum pipe pipe)
+{
+       u32 tmp = I915_READ(PORT_DFT2_G4X);
+
+       switch (pipe) {
+       case PIPE_A:
+               tmp &= ~PIPE_A_SCRAMBLE_RESET;
+               break;
+       case PIPE_B:
+               tmp &= ~PIPE_B_SCRAMBLE_RESET;
+               break;
+       case PIPE_C:
+               tmp &= ~PIPE_C_SCRAMBLE_RESET;
+               break;
+       default:
+               return;
+       }
+       if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
+               tmp &= ~DC_BALANCE_RESET_VLV;
+       I915_WRITE(PORT_DFT2_G4X, tmp);
+}
+
+static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+                               u32 *val)
+{
+       if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+               *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+       switch (*source) {
+       case INTEL_PIPE_CRC_SOURCE_PLANE1:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PLANE2:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               *val = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void
+intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_crtc_state *pipe_config;
+       struct drm_atomic_state *state;
+       struct drm_modeset_acquire_ctx ctx;
+       int ret;
+
+       drm_modeset_acquire_init(&ctx, 0);
+
+       state = drm_atomic_state_alloc(&dev_priv->drm);
+       if (!state) {
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       state->acquire_ctx = &ctx;
+
+retry:
+       pipe_config = intel_atomic_get_crtc_state(state, crtc);
+       if (IS_ERR(pipe_config)) {
+               ret = PTR_ERR(pipe_config);
+               goto put_state;
+       }
+
+       pipe_config->base.mode_changed = pipe_config->has_psr;
+       pipe_config->crc_enabled = enable;
+
+       if (IS_HASWELL(dev_priv) &&
+           pipe_config->base.active && crtc->pipe == PIPE_A &&
+           pipe_config->cpu_transcoder == TRANSCODER_EDP)
+               pipe_config->base.mode_changed = true;
+
+       ret = drm_atomic_commit(state);
+
+put_state:
+       if (ret == -EDEADLK) {
+               drm_atomic_state_clear(state);
+               drm_modeset_backoff(&ctx);
+               goto retry;
+       }
+
+       drm_atomic_state_put(state);
+unlock:
+       WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+}
+
+static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+                               enum pipe pipe,
+                               enum intel_pipe_crc_source *source,
+                               u32 *val)
+{
+       if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+               *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+       switch (*source) {
+       case INTEL_PIPE_CRC_SOURCE_PLANE1:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PLANE2:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               *val = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+                               enum pipe pipe,
+                               enum intel_pipe_crc_source *source,
+                               u32 *val)
+{
+       if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+               *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+       switch (*source) {
+       case INTEL_PIPE_CRC_SOURCE_PLANE1:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_1_SKL;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PLANE2:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_2_SKL;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PLANE3:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_3_SKL;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PLANE4:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_4_SKL;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PLANE5:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_5_SKL;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PLANE6:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_6_SKL;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PLANE7:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_7_SKL;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DMUX_SKL;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               *val = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
+                              enum pipe pipe,
+                              enum intel_pipe_crc_source *source, u32 *val)
+{
+       if (IS_GEN(dev_priv, 2))
+               return i8xx_pipe_crc_ctl_reg(source, val);
+       else if (INTEL_GEN(dev_priv) < 5)
+               return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+       else if (IS_GEN_RANGE(dev_priv, 5, 6))
+               return ilk_pipe_crc_ctl_reg(source, val);
+       else if (INTEL_GEN(dev_priv) < 9)
+               return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+       else
+               return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+}
+
+static int
+display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
+{
+       int i;
+
+       if (!buf) {
+               *s = INTEL_PIPE_CRC_SOURCE_NONE;
+               return 0;
+       }
+
+       i = match_string(pipe_crc_sources, ARRAY_SIZE(pipe_crc_sources), buf);
+       if (i < 0)
+               return i;
+
+       *s = i;
+       return 0;
+}
+
+void intel_display_crc_init(struct drm_i915_private *dev_priv)
+{
+       enum pipe pipe;
+
+       for_each_pipe(dev_priv, pipe) {
+               struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+
+               spin_lock_init(&pipe_crc->lock);
+       }
+}
+
+static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
+                                const enum intel_pipe_crc_source source)
+{
+       switch (source) {
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
+                                const enum intel_pipe_crc_source source)
+{
+       switch (source) {
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+       case INTEL_PIPE_CRC_SOURCE_TV:
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
+                               const enum intel_pipe_crc_source source)
+{
+       switch (source) {
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+       case INTEL_PIPE_CRC_SOURCE_DP_B:
+       case INTEL_PIPE_CRC_SOURCE_DP_C:
+       case INTEL_PIPE_CRC_SOURCE_DP_D:
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
+                               const enum intel_pipe_crc_source source)
+{
+       switch (source) {
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+       case INTEL_PIPE_CRC_SOURCE_PLANE1:
+       case INTEL_PIPE_CRC_SOURCE_PLANE2:
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
+                               const enum intel_pipe_crc_source source)
+{
+       switch (source) {
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+       case INTEL_PIPE_CRC_SOURCE_PLANE1:
+       case INTEL_PIPE_CRC_SOURCE_PLANE2:
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
+                               const enum intel_pipe_crc_source source)
+{
+       switch (source) {
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+       case INTEL_PIPE_CRC_SOURCE_PLANE1:
+       case INTEL_PIPE_CRC_SOURCE_PLANE2:
+       case INTEL_PIPE_CRC_SOURCE_PLANE3:
+       case INTEL_PIPE_CRC_SOURCE_PLANE4:
+       case INTEL_PIPE_CRC_SOURCE_PLANE5:
+       case INTEL_PIPE_CRC_SOURCE_PLANE6:
+       case INTEL_PIPE_CRC_SOURCE_PLANE7:
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int
+intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
+                         const enum intel_pipe_crc_source source)
+{
+       if (IS_GEN(dev_priv, 2))
+               return i8xx_crc_source_valid(dev_priv, source);
+       else if (INTEL_GEN(dev_priv) < 5)
+               return i9xx_crc_source_valid(dev_priv, source);
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               return vlv_crc_source_valid(dev_priv, source);
+       else if (IS_GEN_RANGE(dev_priv, 5, 6))
+               return ilk_crc_source_valid(dev_priv, source);
+       else if (INTEL_GEN(dev_priv) < 9)
+               return ivb_crc_source_valid(dev_priv, source);
+       else
+               return skl_crc_source_valid(dev_priv, source);
+}
+
+const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
+                                             size_t *count)
+{
+       *count = ARRAY_SIZE(pipe_crc_sources);
+       return pipe_crc_sources;
+}
+
+int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+                                size_t *values_cnt)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       enum intel_pipe_crc_source source;
+
+       if (display_crc_ctl_parse_source(source_name, &source) < 0) {
+               DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+               return -EINVAL;
+       }
+
+       if (source == INTEL_PIPE_CRC_SOURCE_AUTO ||
+           intel_is_valid_crc_source(dev_priv, source) == 0) {
+               *values_cnt = 5;
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+       enum intel_display_power_domain power_domain;
+       enum intel_pipe_crc_source source;
+       intel_wakeref_t wakeref;
+       u32 val = 0; /* shut up gcc */
+       int ret = 0;
+       bool enable;
+
+       if (display_crc_ctl_parse_source(source_name, &source) < 0) {
+               DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+               return -EINVAL;
+       }
+
+       power_domain = POWER_DOMAIN_PIPE(crtc->index);
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref) {
+               DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+               return -EIO;
+       }
+
+       enable = source != INTEL_PIPE_CRC_SOURCE_NONE;
+       if (enable)
+               intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), true);
+
+       ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
+       if (ret != 0)
+               goto out;
+
+       pipe_crc->source = source;
+       I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
+       POSTING_READ(PIPE_CRC_CTL(crtc->index));
+
+       if (!source) {
+               if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+                       vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
+       }
+
+       pipe_crc->skipped = 0;
+
+out:
+       if (!enable)
+               intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), false);
+
+       intel_display_power_put(dev_priv, power_domain, wakeref);
+
+       return ret;
+}
+
+void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
+{
+       struct drm_crtc *crtc = &intel_crtc->base;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+       u32 val = 0;
+
+       if (!crtc->crc.opened)
+               return;
+
+       if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val) < 0)
+               return;
+
+       /* Don't need pipe_crc->lock here, IRQs are not generated. */
+       pipe_crc->skipped = 0;
+
+       I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
+       POSTING_READ(PIPE_CRC_CTL(crtc->index));
+}
+
+void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
+{
+       struct drm_crtc *crtc = &intel_crtc->base;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+
+       /* Swallow crc's until we stop generating them. */
+       spin_lock_irq(&pipe_crc->lock);
+       pipe_crc->skipped = INT_MIN;
+       spin_unlock_irq(&pipe_crc->lock);
+
+       I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
+       POSTING_READ(PIPE_CRC_CTL(crtc->index));
+       synchronize_irq(dev_priv->drm.irq);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.h b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
new file mode 100644 (file)
index 0000000..db258a7
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PIPE_CRC_H__
+#define __INTEL_PIPE_CRC_H__
+
+#include <linux/types.h>
+
+struct drm_crtc;
+struct drm_i915_private;
+struct intel_crtc;
+
+#ifdef CONFIG_DEBUG_FS
+void intel_display_crc_init(struct drm_i915_private *dev_priv);
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
+int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
+                                const char *source_name, size_t *values_cnt);
+const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
+                                             size_t *count);
+void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
+void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
+#else
+static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
+#define intel_crtc_set_crc_source NULL
+#define intel_crtc_verify_crc_source NULL
+#define intel_crtc_get_crc_sources NULL
+static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
+{
+}
+
+static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
+{
+}
+#endif
+
+#endif /* __INTEL_PIPE_CRC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
new file mode 100644 (file)
index 0000000..69709df
--- /dev/null
@@ -0,0 +1,1303 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_atomic_helper.h>
+
+#include "display/intel_dp.h"
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+
+/**
+ * DOC: Panel Self Refresh (PSR/SRD)
+ *
+ * Since Haswell Display controller supports Panel Self-Refresh on display
+ * panels witch have a remote frame buffer (RFB) implemented according to PSR
+ * spec in eDP1.3. PSR feature allows the display to go to lower standby states
+ * when system is idle but display is on as it eliminates display refresh
+ * request to DDR memory completely as long as the frame buffer for that
+ * display is unchanged.
+ *
+ * Panel Self Refresh must be supported by both Hardware (source) and
+ * Panel (sink).
+ *
+ * PSR saves power by caching the framebuffer in the panel RFB, which allows us
+ * to power down the link and memory controller. For DSI panels the same idea
+ * is called "manual mode".
+ *
+ * The implementation uses the hardware-based PSR support which automatically
+ * enters/exits self-refresh mode. The hardware takes care of sending the
+ * required DP aux message and could even retrain the link (that part isn't
+ * enabled yet though). The hardware also keeps track of any frontbuffer
+ * changes to know when to exit self-refresh mode again. Unfortunately that
+ * part doesn't work too well, hence why the i915 PSR support uses the
+ * software frontbuffer tracking to make sure it doesn't miss a screen
+ * update. For this integration intel_psr_invalidate() and intel_psr_flush()
+ * get called by the frontbuffer tracking code. Note that because of locking
+ * issues the self-refresh re-enable code is done from a work queue, which
+ * must be correctly synchronized/cancelled when shutting down the pipe."
+ */
+
+static bool psr_global_enabled(u32 debug)
+{
+       switch (debug & I915_PSR_DEBUG_MODE_MASK) {
+       case I915_PSR_DEBUG_DEFAULT:
+               return i915_modparams.enable_psr;
+       case I915_PSR_DEBUG_DISABLE:
+               return false;
+       default:
+               return true;
+       }
+}
+
+static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
+                              const struct intel_crtc_state *crtc_state)
+{
+       /* Cannot enable DSC and PSR2 simultaneously */
+       WARN_ON(crtc_state->dsc_params.compression_enable &&
+               crtc_state->has_psr2);
+
+       switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+       case I915_PSR_DEBUG_DISABLE:
+       case I915_PSR_DEBUG_FORCE_PSR1:
+               return false;
+       default:
+               return crtc_state->has_psr2;
+       }
+}
+
+static int edp_psr_shift(enum transcoder cpu_transcoder)
+{
+       switch (cpu_transcoder) {
+       case TRANSCODER_A:
+               return EDP_PSR_TRANSCODER_A_SHIFT;
+       case TRANSCODER_B:
+               return EDP_PSR_TRANSCODER_B_SHIFT;
+       case TRANSCODER_C:
+               return EDP_PSR_TRANSCODER_C_SHIFT;
+       default:
+               MISSING_CASE(cpu_transcoder);
+               /* fallthrough */
+       case TRANSCODER_EDP:
+               return EDP_PSR_TRANSCODER_EDP_SHIFT;
+       }
+}
+
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
+{
+       u32 debug_mask, mask;
+       enum transcoder cpu_transcoder;
+       u32 transcoders = BIT(TRANSCODER_EDP);
+
+       if (INTEL_GEN(dev_priv) >= 8)
+               transcoders |= BIT(TRANSCODER_A) |
+                              BIT(TRANSCODER_B) |
+                              BIT(TRANSCODER_C);
+
+       debug_mask = 0;
+       mask = 0;
+       for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+               int shift = edp_psr_shift(cpu_transcoder);
+
+               mask |= EDP_PSR_ERROR(shift);
+               debug_mask |= EDP_PSR_POST_EXIT(shift) |
+                             EDP_PSR_PRE_ENTRY(shift);
+       }
+
+       if (debug & I915_PSR_DEBUG_IRQ)
+               mask |= debug_mask;
+
+       I915_WRITE(EDP_PSR_IMR, ~mask);
+}
+
+static void psr_event_print(u32 val, bool psr2_enabled)
+{
+       DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
+       if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
+               DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
+       if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
+               DRM_DEBUG_KMS("\tPSR2 disabled\n");
+       if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
+               DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
+       if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
+               DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
+       if (val & PSR_EVENT_GRAPHICS_RESET)
+               DRM_DEBUG_KMS("\tGraphics reset\n");
+       if (val & PSR_EVENT_PCH_INTERRUPT)
+               DRM_DEBUG_KMS("\tPCH interrupt\n");
+       if (val & PSR_EVENT_MEMORY_UP)
+               DRM_DEBUG_KMS("\tMemory up\n");
+       if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
+               DRM_DEBUG_KMS("\tFront buffer modification\n");
+       if (val & PSR_EVENT_WD_TIMER_EXPIRE)
+               DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
+       if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
+               DRM_DEBUG_KMS("\tPIPE registers updated\n");
+       if (val & PSR_EVENT_REGISTER_UPDATE)
+               DRM_DEBUG_KMS("\tRegister updated\n");
+       if (val & PSR_EVENT_HDCP_ENABLE)
+               DRM_DEBUG_KMS("\tHDCP enabled\n");
+       if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
+               DRM_DEBUG_KMS("\tKVMR session enabled\n");
+       if (val & PSR_EVENT_VBI_ENABLE)
+               DRM_DEBUG_KMS("\tVBI enabled\n");
+       if (val & PSR_EVENT_LPSP_MODE_EXIT)
+               DRM_DEBUG_KMS("\tLPSP mode exited\n");
+       if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
+               DRM_DEBUG_KMS("\tPSR disabled\n");
+}
+
+void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
+{
+       u32 transcoders = BIT(TRANSCODER_EDP);
+       enum transcoder cpu_transcoder;
+       ktime_t time_ns =  ktime_get();
+       u32 mask = 0;
+
+       if (INTEL_GEN(dev_priv) >= 8)
+               transcoders |= BIT(TRANSCODER_A) |
+                              BIT(TRANSCODER_B) |
+                              BIT(TRANSCODER_C);
+
+       for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+               int shift = edp_psr_shift(cpu_transcoder);
+
+               if (psr_iir & EDP_PSR_ERROR(shift)) {
+                       DRM_WARN("[transcoder %s] PSR aux error\n",
+                                transcoder_name(cpu_transcoder));
+
+                       dev_priv->psr.irq_aux_error = true;
+
+                       /*
+                        * If this interruption is not masked it will keep
+                        * interrupting so fast that it prevents the scheduled
+                        * work to run.
+                        * Also after a PSR error, we don't want to arm PSR
+                        * again so we don't care about unmask the interruption
+                        * or unset irq_aux_error.
+                        */
+                       mask |= EDP_PSR_ERROR(shift);
+               }
+
+               if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
+                       dev_priv->psr.last_entry_attempt = time_ns;
+                       DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
+                                     transcoder_name(cpu_transcoder));
+               }
+
+               if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
+                       dev_priv->psr.last_exit = time_ns;
+                       DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
+                                     transcoder_name(cpu_transcoder));
+
+                       if (INTEL_GEN(dev_priv) >= 9) {
+                               u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
+                               bool psr2_enabled = dev_priv->psr.psr2_enabled;
+
+                               I915_WRITE(PSR_EVENT(cpu_transcoder), val);
+                               psr_event_print(val, psr2_enabled);
+                       }
+               }
+       }
+
+       if (mask) {
+               mask |= I915_READ(EDP_PSR_IMR);
+               I915_WRITE(EDP_PSR_IMR, mask);
+
+               schedule_work(&dev_priv->psr.work);
+       }
+}
+
+static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
+{
+       u8 alpm_caps = 0;
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
+                             &alpm_caps) != 1)
+               return false;
+       return alpm_caps & DP_ALPM_CAP;
+}
+
+static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
+{
+       u8 val = 8; /* assume the worst if we can't read the value */
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux,
+                             DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
+               val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
+       else
+               DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
+       return val;
+}
+
+static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
+{
+       u16 val;
+       ssize_t r;
+
+       /*
+        * Returning the default X granularity if granularity not required or
+        * if DPCD read fails
+        */
+       if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
+               return 4;
+
+       r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
+       if (r != 2)
+               DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
+
+       /*
+        * Spec says that if the value read is 0 the default granularity should
+        * be used instead.
+        */
+       if (r != 2 || val == 0)
+               val = 4;
+
+       return val;
+}
+
+void intel_psr_init_dpcd(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+
+       drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
+                        sizeof(intel_dp->psr_dpcd));
+
+       if (!intel_dp->psr_dpcd[0])
+               return;
+       DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
+                     intel_dp->psr_dpcd[0]);
+
+       if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
+               DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
+               return;
+       }
+
+       if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
+               DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
+               return;
+       }
+
+       dev_priv->psr.sink_support = true;
+       dev_priv->psr.sink_sync_latency =
+               intel_dp_get_sink_sync_latency(intel_dp);
+
+       WARN_ON(dev_priv->psr.dp);
+       dev_priv->psr.dp = intel_dp;
+
+       if (INTEL_GEN(dev_priv) >= 9 &&
+           (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+               bool y_req = intel_dp->psr_dpcd[1] &
+                            DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+               bool alpm = intel_dp_get_alpm_status(intel_dp);
+
+               /*
+                * All panels that supports PSR version 03h (PSR2 +
+                * Y-coordinate) can handle Y-coordinates in VSC but we are
+                * only sure that it is going to be used when required by the
+                * panel. This way panel is capable to do selective update
+                * without a aux frame sync.
+                *
+                * To support PSR version 02h and PSR version 03h without
+                * Y-coordinate requirement panels we would need to enable
+                * GTC first.
+                */
+               dev_priv->psr.sink_psr2_support = y_req && alpm;
+               DRM_DEBUG_KMS("PSR2 %ssupported\n",
+                             dev_priv->psr.sink_psr2_support ? "" : "not ");
+
+               if (dev_priv->psr.sink_psr2_support) {
+                       dev_priv->psr.colorimetry_support =
+                               intel_dp_get_colorimetry_status(intel_dp);
+                       dev_priv->psr.su_x_granularity =
+                               intel_dp_get_su_x_granulartiy(intel_dp);
+               }
+       }
+}
+
+static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
+                               const struct intel_crtc_state *crtc_state)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       struct dp_sdp psr_vsc;
+
+       if (dev_priv->psr.psr2_enabled) {
+               /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
+               memset(&psr_vsc, 0, sizeof(psr_vsc));
+               psr_vsc.sdp_header.HB0 = 0;
+               psr_vsc.sdp_header.HB1 = 0x7;
+               if (dev_priv->psr.colorimetry_support) {
+                       psr_vsc.sdp_header.HB2 = 0x5;
+                       psr_vsc.sdp_header.HB3 = 0x13;
+               } else {
+                       psr_vsc.sdp_header.HB2 = 0x4;
+                       psr_vsc.sdp_header.HB3 = 0xe;
+               }
+       } else {
+               /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+               memset(&psr_vsc, 0, sizeof(psr_vsc));
+               psr_vsc.sdp_header.HB0 = 0;
+               psr_vsc.sdp_header.HB1 = 0x7;
+               psr_vsc.sdp_header.HB2 = 0x2;
+               psr_vsc.sdp_header.HB3 = 0x8;
+       }
+
+       intel_dig_port->write_infoframe(&intel_dig_port->base,
+                                       crtc_state,
+                                       DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
+}
+
+static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       u32 aux_clock_divider, aux_ctl;
+       int i;
+       static const u8 aux_msg[] = {
+               [0] = DP_AUX_NATIVE_WRITE << 4,
+               [1] = DP_SET_POWER >> 8,
+               [2] = DP_SET_POWER & 0xff,
+               [3] = 1 - 1,
+               [4] = DP_SET_POWER_D0,
+       };
+       u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
+                          EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
+                          EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
+                          EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
+
+       BUILD_BUG_ON(sizeof(aux_msg) > 20);
+       for (i = 0; i < sizeof(aux_msg); i += 4)
+               I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
+                          intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+
+       aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
+       /* Start with bits set for DDI_AUX_CTL register */
+       aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
+                                            aux_clock_divider);
+
+       /* Select only valid bits for SRD_AUX_CTL */
+       aux_ctl &= psr_aux_mask;
+       I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
+}
+
+static void intel_psr_enable_sink(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       u8 dpcd_val = DP_PSR_ENABLE;
+
+       /* Enable ALPM at sink for psr2 */
+       if (dev_priv->psr.psr2_enabled) {
+               drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
+                                  DP_ALPM_ENABLE);
+               dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
+       } else {
+               if (dev_priv->psr.link_standby)
+                       dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+
+               if (INTEL_GEN(dev_priv) >= 8)
+                       dpcd_val |= DP_PSR_CRC_VERIFICATION;
+       }
+
+       drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
+
+       drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+}
+
+static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       u32 val = 0;
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               val |= EDP_PSR_TP4_TIME_0US;
+
+       if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
+               val |= EDP_PSR_TP1_TIME_0us;
+       else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
+               val |= EDP_PSR_TP1_TIME_100us;
+       else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
+               val |= EDP_PSR_TP1_TIME_500us;
+       else
+               val |= EDP_PSR_TP1_TIME_2500us;
+
+       if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
+               val |= EDP_PSR_TP2_TP3_TIME_0us;
+       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+               val |= EDP_PSR_TP2_TP3_TIME_100us;
+       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+               val |= EDP_PSR_TP2_TP3_TIME_500us;
+       else
+               val |= EDP_PSR_TP2_TP3_TIME_2500us;
+
+       if (intel_dp_source_supports_hbr2(intel_dp) &&
+           drm_dp_tps3_supported(intel_dp->dpcd))
+               val |= EDP_PSR_TP1_TP3_SEL;
+       else
+               val |= EDP_PSR_TP1_TP2_SEL;
+
+       return val;
+}
+
+static void hsw_activate_psr1(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       u32 max_sleep_time = 0x1f;
+       u32 val = EDP_PSR_ENABLE;
+
+       /* Let's use 6 as the minimum to cover all known cases including the
+        * off-by-one issue that HW has in some cases.
+        */
+       int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+
+       /* sink_sync_latency of 8 means source has to wait for more than 8
+        * frames, we'll go with 9 frames for now
+        */
+       idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+       val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+
+       val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+       if (IS_HASWELL(dev_priv))
+               val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+
+       if (dev_priv->psr.link_standby)
+               val |= EDP_PSR_LINK_STANDBY;
+
+       val |= intel_psr1_get_tp_time(intel_dp);
+
+       if (INTEL_GEN(dev_priv) >= 8)
+               val |= EDP_PSR_CRC_ENABLE;
+
+       val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
+       I915_WRITE(EDP_PSR_CTL, val);
+}
+
+static void hsw_activate_psr2(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       u32 val;
+
+       /* Let's use 6 as the minimum to cover all known cases including the
+        * off-by-one issue that HW has in some cases.
+        */
+       int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+
+       idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+       val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
+
+       val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               val |= EDP_Y_COORDINATE_ENABLE;
+
+       val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
+
+       if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
+           dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
+               val |= EDP_PSR2_TP2_TIME_50us;
+       else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
+               val |= EDP_PSR2_TP2_TIME_100us;
+       else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
+               val |= EDP_PSR2_TP2_TIME_500us;
+       else
+               val |= EDP_PSR2_TP2_TIME_2500us;
+
+       /*
+        * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
+        * recommending keep this bit unset while PSR2 is enabled.
+        */
+       I915_WRITE(EDP_PSR_CTL, 0);
+
+       I915_WRITE(EDP_PSR2_CTL, val);
+}
+
+static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+                                   struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
+       int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
+       int psr_max_h = 0, psr_max_v = 0;
+
+       if (!dev_priv->psr.sink_psr2_support)
+               return false;
+
+       /*
+        * DSC and PSR2 cannot be enabled simultaneously. If a requested
+        * resolution requires DSC to be enabled, priority is given to DSC
+        * over PSR2.
+        */
+       if (crtc_state->dsc_params.compression_enable) {
+               DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
+               return false;
+       }
+
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+               psr_max_h = 4096;
+               psr_max_v = 2304;
+       } else if (IS_GEN(dev_priv, 9)) {
+               psr_max_h = 3640;
+               psr_max_v = 2304;
+       }
+
+       if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
+               DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
+                             crtc_hdisplay, crtc_vdisplay,
+                             psr_max_h, psr_max_v);
+               return false;
+       }
+
+       /*
+        * HW sends SU blocks of size four scan lines, which means the starting
+        * X coordinate and Y granularity requirements will always be met. We
+        * only need to validate the SU block width is a multiple of
+        * x granularity.
+        */
+       if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
+               DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
+                             crtc_hdisplay, dev_priv->psr.su_x_granularity);
+               return false;
+       }
+
+       if (crtc_state->crc_enabled) {
+               DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
+               return false;
+       }
+
+       return true;
+}
+
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+                             struct intel_crtc_state *crtc_state)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->base.adjusted_mode;
+       int psr_setup_time;
+
+       if (!CAN_PSR(dev_priv))
+               return;
+
+       if (intel_dp != dev_priv->psr.dp)
+               return;
+
+       /*
+        * HSW spec explicitly says PSR is tied to port A.
+        * BDW+ platforms with DDI implementation of PSR have different
+        * PSR registers per transcoder and we only implement transcoder EDP
+        * ones. Since by Display design transcoder EDP is tied to port A
+        * we can safely escape based on the port A.
+        */
+       if (dig_port->base.port != PORT_A) {
+               DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
+               return;
+       }
+
+       if (dev_priv->psr.sink_not_reliable) {
+               DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
+               return;
+       }
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
+               return;
+       }
+
+       psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
+       if (psr_setup_time < 0) {
+               DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
+                             intel_dp->psr_dpcd[1]);
+               return;
+       }
+
+       if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
+           adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
+               DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
+                             psr_setup_time);
+               return;
+       }
+
+       crtc_state->has_psr = true;
+       crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
+}
+
+static void intel_psr_activate(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
+       WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+       WARN_ON(dev_priv->psr.active);
+       lockdep_assert_held(&dev_priv->psr.lock);
+
+       /* psr1 and psr2 are mutually exclusive.*/
+       if (dev_priv->psr.psr2_enabled)
+               hsw_activate_psr2(intel_dp);
+       else
+               hsw_activate_psr1(intel_dp);
+
+       dev_priv->psr.active = true;
+}
+
+static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
+                                        enum transcoder cpu_transcoder)
+{
+       static const i915_reg_t regs[] = {
+               [TRANSCODER_A] = CHICKEN_TRANS_A,
+               [TRANSCODER_B] = CHICKEN_TRANS_B,
+               [TRANSCODER_C] = CHICKEN_TRANS_C,
+               [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
+       };
+
+       WARN_ON(INTEL_GEN(dev_priv) < 9);
+
+       if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
+                   !regs[cpu_transcoder].reg))
+               cpu_transcoder = TRANSCODER_A;
+
+       return regs[cpu_transcoder];
+}
+
+static void intel_psr_enable_source(struct intel_dp *intel_dp,
+                                   const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+       u32 mask;
+
+       /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
+        * use hardcoded values PSR AUX transactions
+        */
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               hsw_psr_setup_aux(intel_dp);
+
+       if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
+                                          !IS_GEMINILAKE(dev_priv))) {
+               i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
+                                                       cpu_transcoder);
+               u32 chicken = I915_READ(reg);
+
+               chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
+                          PSR2_ADD_VERTICAL_LINE_COUNT;
+               I915_WRITE(reg, chicken);
+       }
+
+       /*
+        * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
+        * mask LPSP to avoid dependency on other drivers that might block
+        * runtime_pm besides preventing  other hw tracking issues now we
+        * can rely on frontbuffer tracking.
+        */
+       mask = EDP_PSR_DEBUG_MASK_MEMUP |
+              EDP_PSR_DEBUG_MASK_HPD |
+              EDP_PSR_DEBUG_MASK_LPSP |
+              EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+
+       if (INTEL_GEN(dev_priv) < 11)
+               mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
+
+       I915_WRITE(EDP_PSR_DEBUG, mask);
+}
+
+static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
+                                   const struct intel_crtc_state *crtc_state)
+{
+       struct intel_dp *intel_dp = dev_priv->psr.dp;
+
+       WARN_ON(dev_priv->psr.enabled);
+
+       dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
+       dev_priv->psr.busy_frontbuffer_bits = 0;
+       dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+
+       DRM_DEBUG_KMS("Enabling PSR%s\n",
+                     dev_priv->psr.psr2_enabled ? "2" : "1");
+       intel_psr_setup_vsc(intel_dp, crtc_state);
+       intel_psr_enable_sink(intel_dp);
+       intel_psr_enable_source(intel_dp, crtc_state);
+       dev_priv->psr.enabled = true;
+
+       intel_psr_activate(intel_dp);
+}
+
+/**
+ * intel_psr_enable - Enable PSR
+ * @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
+ *
+ * This function can only be called after the pipe is fully trained and enabled.
+ */
+void intel_psr_enable(struct intel_dp *intel_dp,
+                     const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+       if (!crtc_state->has_psr)
+               return;
+
+       if (WARN_ON(!CAN_PSR(dev_priv)))
+               return;
+
+       WARN_ON(dev_priv->drrs.dp);
+
+       mutex_lock(&dev_priv->psr.lock);
+
+       if (!psr_global_enabled(dev_priv->psr.debug)) {
+               DRM_DEBUG_KMS("PSR disabled by flag\n");
+               goto unlock;
+       }
+
+       intel_psr_enable_locked(dev_priv, crtc_state);
+
+unlock:
+       mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void intel_psr_exit(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       if (!dev_priv->psr.active) {
+               if (INTEL_GEN(dev_priv) >= 9)
+                       WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
+               WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+               return;
+       }
+
+       if (dev_priv->psr.psr2_enabled) {
+               val = I915_READ(EDP_PSR2_CTL);
+               WARN_ON(!(val & EDP_PSR2_ENABLE));
+               I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
+       } else {
+               val = I915_READ(EDP_PSR_CTL);
+               WARN_ON(!(val & EDP_PSR_ENABLE));
+               I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
+       }
+       dev_priv->psr.active = false;
+}
+
+static void intel_psr_disable_locked(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       i915_reg_t psr_status;
+       u32 psr_status_mask;
+
+       lockdep_assert_held(&dev_priv->psr.lock);
+
+       if (!dev_priv->psr.enabled)
+               return;
+
+       DRM_DEBUG_KMS("Disabling PSR%s\n",
+                     dev_priv->psr.psr2_enabled ? "2" : "1");
+
+       intel_psr_exit(dev_priv);
+
+       if (dev_priv->psr.psr2_enabled) {
+               psr_status = EDP_PSR2_STATUS;
+               psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
+       } else {
+               psr_status = EDP_PSR_STATUS;
+               psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
+       }
+
+       /* Wait till PSR is idle */
+       if (intel_wait_for_register(&dev_priv->uncore,
+                                   psr_status, psr_status_mask, 0, 2000))
+               DRM_ERROR("Timed out waiting PSR idle state\n");
+
+       /* Disable PSR on Sink */
+       drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+
+       dev_priv->psr.enabled = false;
+}
+
+/**
+ * intel_psr_disable - Disable PSR
+ * @intel_dp: Intel DP
+ * @old_crtc_state: old CRTC state
+ *
+ * This function needs to be called before disabling pipe.
+ */
+void intel_psr_disable(struct intel_dp *intel_dp,
+                      const struct intel_crtc_state *old_crtc_state)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+       if (!old_crtc_state->has_psr)
+               return;
+
+       if (WARN_ON(!CAN_PSR(dev_priv)))
+               return;
+
+       mutex_lock(&dev_priv->psr.lock);
+
+       intel_psr_disable_locked(intel_dp);
+
+       mutex_unlock(&dev_priv->psr.lock);
+       cancel_work_sync(&dev_priv->psr.work);
+}
+
+static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
+{
+       /*
+        * Display WA #0884: all
+        * This documented WA for bxt can be safely applied
+        * broadly so we can force HW tracking to exit PSR
+        * instead of disabling and re-enabling.
+        * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+        * but it makes more sense write to the current active
+        * pipe.
+        */
+       I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+}
+
+/**
+ * intel_psr_update - Update PSR state
+ * @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
+ *
+ * This functions will update PSR states, disabling, enabling or switching PSR
+ * version when executing fastsets. For full modeset, intel_psr_disable() and
+ * intel_psr_enable() should be called instead.
+ */
+void intel_psr_update(struct intel_dp *intel_dp,
+                     const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       struct i915_psr *psr = &dev_priv->psr;
+       bool enable, psr2_enable;
+
+       if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
+               return;
+
+       mutex_lock(&dev_priv->psr.lock);
+
+       enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
+       psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
+
+       if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
+               /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
+               if (crtc_state->crc_enabled && psr->enabled)
+                       psr_force_hw_tracking_exit(dev_priv);
+
+               goto unlock;
+       }
+
+       if (psr->enabled)
+               intel_psr_disable_locked(intel_dp);
+
+       if (enable)
+               intel_psr_enable_locked(dev_priv, crtc_state);
+
+unlock:
+       mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_wait_for_idle - wait for PSR1 to idle
+ * @new_crtc_state: new CRTC state
+ * @out_value: PSR status in case of failure
+ *
+ * This function is expected to be called from pipe_update_start() where it is
+ * not expected to race with PSR enable or disable.
+ *
+ * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
+ */
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
+                           u32 *out_value)
+{
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
+               return 0;
+
+       /* FIXME: Update this for PSR2 if we need to wait for idle */
+       if (READ_ONCE(dev_priv->psr.psr2_enabled))
+               return 0;
+
+       /*
+        * From bspec: Panel Self Refresh (BDW+)
+        * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
+        * exit training time + 1.5 ms of aux channel handshake. 50 ms is
+        * defensive enough to cover everything.
+        */
+
+       return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
+                                        EDP_PSR_STATUS_STATE_MASK,
+                                        EDP_PSR_STATUS_STATE_IDLE, 2, 50,
+                                        out_value);
+}
+
+static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
+{
+       i915_reg_t reg;
+       u32 mask;
+       int err;
+
+       if (!dev_priv->psr.enabled)
+               return false;
+
+       if (dev_priv->psr.psr2_enabled) {
+               reg = EDP_PSR2_STATUS;
+               mask = EDP_PSR2_STATUS_STATE_MASK;
+       } else {
+               reg = EDP_PSR_STATUS;
+               mask = EDP_PSR_STATUS_STATE_MASK;
+       }
+
+       mutex_unlock(&dev_priv->psr.lock);
+
+       err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
+       if (err)
+               DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+
+       /* After the unlocked wait, verify that PSR is still wanted! */
+       mutex_lock(&dev_priv->psr.lock);
+       return err == 0 && dev_priv->psr.enabled;
+}
+
+static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       struct drm_modeset_acquire_ctx ctx;
+       struct drm_atomic_state *state;
+       struct drm_crtc *crtc;
+       int err;
+
+       state = drm_atomic_state_alloc(dev);
+       if (!state)
+               return -ENOMEM;
+
+       drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+       state->acquire_ctx = &ctx;
+
+retry:
+       drm_for_each_crtc(crtc, dev) {
+               struct drm_crtc_state *crtc_state;
+               struct intel_crtc_state *intel_crtc_state;
+
+               crtc_state = drm_atomic_get_crtc_state(state, crtc);
+               if (IS_ERR(crtc_state)) {
+                       err = PTR_ERR(crtc_state);
+                       goto error;
+               }
+
+               intel_crtc_state = to_intel_crtc_state(crtc_state);
+
+               if (crtc_state->active && intel_crtc_state->has_psr) {
+                       /* Mark mode as changed to trigger a pipe->update() */
+                       crtc_state->mode_changed = true;
+                       break;
+               }
+       }
+
+       err = drm_atomic_commit(state);
+
+error:
+       if (err == -EDEADLK) {
+               drm_atomic_state_clear(state);
+               err = drm_modeset_backoff(&ctx);
+               if (!err)
+                       goto retry;
+       }
+
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+       drm_atomic_state_put(state);
+
+       return err;
+}
+
+int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
+{
+       const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
+       u32 old_mode;
+       int ret;
+
+       if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
+           mode > I915_PSR_DEBUG_FORCE_PSR1) {
+               DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
+               return -EINVAL;
+       }
+
+       ret = mutex_lock_interruptible(&dev_priv->psr.lock);
+       if (ret)
+               return ret;
+
+       old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
+       dev_priv->psr.debug = val;
+       intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+
+       mutex_unlock(&dev_priv->psr.lock);
+
+       if (old_mode != mode)
+               ret = intel_psr_fastset_force(dev_priv);
+
+       return ret;
+}
+
+static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+{
+       struct i915_psr *psr = &dev_priv->psr;
+
+       intel_psr_disable_locked(psr->dp);
+       psr->sink_not_reliable = true;
+       /* let's make sure that sink is awaken */
+       drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+}
+
+static void intel_psr_work(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), psr.work);
+
+       mutex_lock(&dev_priv->psr.lock);
+
+       if (!dev_priv->psr.enabled)
+               goto unlock;
+
+       if (READ_ONCE(dev_priv->psr.irq_aux_error))
+               intel_psr_handle_irq(dev_priv);
+
+       /*
+        * We have to make sure PSR is ready for re-enable
+        * otherwise it keeps disabled until next full enable/disable cycle.
+        * PSR might take some time to get fully disabled
+        * and be ready for re-enable.
+        */
+       if (!__psr_wait_for_idle_locked(dev_priv))
+               goto unlock;
+
+       /*
+        * The delayed work can race with an invalidate hence we need to
+        * recheck. Since psr_flush first clears this and then reschedules we
+        * won't ever miss a flush when bailing out here.
+        */
+       if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
+               goto unlock;
+
+       intel_psr_activate(dev_priv->psr.dp);
+unlock:
+       mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_invalidate - Invalidade PSR
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the invalidate
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
+ * disabled if the frontbuffer mask contains a buffer relevant to PSR.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
+ */
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+                         unsigned frontbuffer_bits, enum fb_op_origin origin)
+{
+       if (!CAN_PSR(dev_priv))
+               return;
+
+       if (origin == ORIGIN_FLIP)
+               return;
+
+       mutex_lock(&dev_priv->psr.lock);
+       if (!dev_priv->psr.enabled) {
+               mutex_unlock(&dev_priv->psr.lock);
+               return;
+       }
+
+       frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
+       dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+
+       if (frontbuffer_bits)
+               intel_psr_exit(dev_priv);
+
+       mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_flush - Flush PSR
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering has completed and flushed out to memory. PSR
+ * can be enabled again if no other frontbuffer relevant to PSR is dirty.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
+ */
+void intel_psr_flush(struct drm_i915_private *dev_priv,
+                    unsigned frontbuffer_bits, enum fb_op_origin origin)
+{
+       if (!CAN_PSR(dev_priv))
+               return;
+
+       if (origin == ORIGIN_FLIP)
+               return;
+
+       mutex_lock(&dev_priv->psr.lock);
+       if (!dev_priv->psr.enabled) {
+               mutex_unlock(&dev_priv->psr.lock);
+               return;
+       }
+
+       frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
+       dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+       /* By definition flush = invalidate + flush */
+       if (frontbuffer_bits)
+               psr_force_hw_tracking_exit(dev_priv);
+
+       if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
+               schedule_work(&dev_priv->psr.work);
+       mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_init - Init basic PSR work and mutex.
+ * @dev_priv: i915 device private
+ *
+ * This function is  called only once at driver load to initialize basic
+ * PSR stuff.
+ */
+void intel_psr_init(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       if (!HAS_PSR(dev_priv))
+               return;
+
+       dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
+               HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
+
+       if (!dev_priv->psr.sink_support)
+               return;
+
+       if (i915_modparams.enable_psr == -1)
+               if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
+                       i915_modparams.enable_psr = 0;
+
+       /*
+        * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
+        * will still keep the error set even after the reset done in the
+        * irq_preinstall and irq_uninstall hooks.
+        * And enabling in this situation cause the screen to freeze in the
+        * first time that PSR HW tries to activate so lets keep PSR disabled
+        * to avoid any rendering problems.
+        */
+       val = I915_READ(EDP_PSR_IIR);
+       val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
+       if (val) {
+               DRM_DEBUG_KMS("PSR interruption error set\n");
+               dev_priv->psr.sink_not_reliable = true;
+       }
+
+       /* Set link_standby x link_off defaults */
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               /* HSW and BDW require workarounds that we don't implement. */
+               dev_priv->psr.link_standby = false;
+       else
+               /* For new platforms let's respect VBT back again */
+               dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
+
+       INIT_WORK(&dev_priv->psr.work, intel_psr_work);
+       mutex_init(&dev_priv->psr.lock);
+}
+
+void intel_psr_short_pulse(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       struct i915_psr *psr = &dev_priv->psr;
+       u8 val;
+       const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
+                         DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
+                         DP_PSR_LINK_CRC_ERROR;
+
+       if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+               return;
+
+       mutex_lock(&psr->lock);
+
+       if (!psr->enabled || psr->dp != intel_dp)
+               goto exit;
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
+               DRM_ERROR("PSR_STATUS dpcd read failed\n");
+               goto exit;
+       }
+
+       if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
+               DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+               intel_psr_disable_locked(intel_dp);
+               psr->sink_not_reliable = true;
+       }
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
+               DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
+               goto exit;
+       }
+
+       if (val & DP_PSR_RFB_STORAGE_ERROR)
+               DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
+       if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
+               DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
+       if (val & DP_PSR_LINK_CRC_ERROR)
+               DRM_ERROR("PSR Link CRC error, disabling PSR\n");
+
+       if (val & ~errors)
+               DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
+                         val & ~errors);
+       if (val & errors) {
+               intel_psr_disable_locked(intel_dp);
+               psr->sink_not_reliable = true;
+       }
+       /* clear status register */
+       drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
+exit:
+       mutex_unlock(&psr->lock);
+}
+
+bool intel_psr_enabled(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       bool ret;
+
+       if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+               return false;
+
+       mutex_lock(&dev_priv->psr.lock);
+       ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
+       mutex_unlock(&dev_priv->psr.lock);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
new file mode 100644 (file)
index 0000000..dc81882
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PSR_H__
+#define __INTEL_PSR_H__
+
+#include "intel_frontbuffer.h"
+
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_dp;
+
+#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
+void intel_psr_init_dpcd(struct intel_dp *intel_dp);
+void intel_psr_enable(struct intel_dp *intel_dp,
+                     const struct intel_crtc_state *crtc_state);
+void intel_psr_disable(struct intel_dp *intel_dp,
+                      const struct intel_crtc_state *old_crtc_state);
+void intel_psr_update(struct intel_dp *intel_dp,
+                     const struct intel_crtc_state *crtc_state);
+int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+                         unsigned frontbuffer_bits,
+                         enum fb_op_origin origin);
+void intel_psr_flush(struct drm_i915_private *dev_priv,
+                    unsigned frontbuffer_bits,
+                    enum fb_op_origin origin);
+void intel_psr_init(struct drm_i915_private *dev_priv);
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+                             struct intel_crtc_state *crtc_state);
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
+void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
+void intel_psr_short_pulse(struct intel_dp *intel_dp);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
+                           u32 *out_value);
+bool intel_psr_enabled(struct intel_dp *intel_dp);
+
+#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
new file mode 100644 (file)
index 0000000..0b749c2
--- /dev/null
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/dmi.h>
+
+#include "intel_drv.h"
+#include "intel_quirks.h"
+
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_i915_private *i915)
+{
+       i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
+       DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_i915_private *i915)
+{
+       i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
+       DRM_INFO("applying inverted panel brightness quirk\n");
+}
+
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_i915_private *i915)
+{
+       i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
+       DRM_INFO("applying backlight present quirk\n");
+}
+
+/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
+ * which is 300 ms greater than eDP spec T12 min.
+ */
+static void quirk_increase_t12_delay(struct drm_i915_private *i915)
+{
+       i915->quirks |= QUIRK_INCREASE_T12_DELAY;
+       DRM_INFO("Applying T12 delay quirk\n");
+}
+
+/*
+ * GeminiLake NUC HDMI outputs require additional off time
+ * this allows the onboard retimer to correctly sync to signal
+ */
+static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
+{
+       i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+       DRM_INFO("Applying Increase DDI Disabled quirk\n");
+}
+
+struct intel_quirk {
+       int device;
+       int subsystem_vendor;
+       int subsystem_device;
+       void (*hook)(struct drm_i915_private *i915);
+};
+
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+       void (*hook)(struct drm_i915_private *i915);
+       const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+       DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+       return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+       {
+               .dmi_id_list = &(const struct dmi_system_id[]) {
+                       {
+                               .callback = intel_dmi_reverse_brightness,
+                               .ident = "NCR Corporation",
+                               .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+                                           DMI_MATCH(DMI_PRODUCT_NAME, ""),
+                               },
+                       },
+                       { }  /* terminating entry */
+               },
+               .hook = quirk_invert_brightness,
+       },
+};
+
+static struct intel_quirk intel_quirks[] = {
+       /* Lenovo U160 cannot use SSC on LVDS */
+       { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+       /* Sony Vaio Y cannot use SSC on LVDS */
+       { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+       /* Acer Aspire 5734Z must invert backlight brightness */
+       { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+       /* Acer/eMachines G725 */
+       { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+       /* Acer/eMachines e725 */
+       { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+       /* Acer/Packard Bell NCL20 */
+       { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+       /* Acer Aspire 4736Z */
+       { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+       /* Acer Aspire 5336 */
+       { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+       /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+       { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+       /* Acer C720 Chromebook (Core i3 4005U) */
+       { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
+
+       /* Apple Macbook 2,1 (Core 2 T7400) */
+       { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
+
+       /* Apple Macbook 4,1 */
+       { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
+
+       /* Toshiba CB35 Chromebook (Celeron 2955U) */
+       { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+       /* HP Chromebook 14 (Celeron 2955U) */
+       { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
+
+       /* Dell Chromebook 11 */
+       { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
+
+       /* Dell Chromebook 11 (2015 version) */
+       { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
+
+       /* Toshiba Satellite P50-C-18C */
+       { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+
+       /* GeminiLake NUC */
+       { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+       { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+       /* ASRock ITX*/
+       { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+       { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+};
+
+void intel_init_quirks(struct drm_i915_private *i915)
+{
+       struct pci_dev *d = i915->drm.pdev;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+               struct intel_quirk *q = &intel_quirks[i];
+
+               if (d->device == q->device &&
+                   (d->subsystem_vendor == q->subsystem_vendor ||
+                    q->subsystem_vendor == PCI_ANY_ID) &&
+                   (d->subsystem_device == q->subsystem_device ||
+                    q->subsystem_device == PCI_ANY_ID))
+                       q->hook(i915);
+       }
+       for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+               if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+                       intel_dmi_quirks[i].hook(i915);
+       }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
new file mode 100644 (file)
index 0000000..b0fcff1
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_QUIRKS_H__
+#define __INTEL_QUIRKS_H__
+
+struct drm_i915_private;
+
+void intel_init_quirks(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_QUIRKS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
new file mode 100644 (file)
index 0000000..004b520
--- /dev/null
@@ -0,0 +1,2464 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *   Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * New plane/sprite handling.
+ *
+ * The older chips had a separate interface for programming plane related
+ * registers; newer ones are much simpler and we can use the new DRM plane
+ * support.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_atomic_plane.h"
+#include "intel_drv.h"
+#include "intel_frontbuffer.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+
+bool is_planar_yuv_format(u32 pixelformat)
+{
+       switch (pixelformat) {
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_P010:
+       case DRM_FORMAT_P012:
+       case DRM_FORMAT_P016:
+               return true;
+       default:
+               return false;
+       }
+}
+
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+                            int usecs)
+{
+       /* paranoia */
+       if (!adjusted_mode->crtc_htotal)
+               return 1;
+
+       return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock,
+                           1000 * adjusted_mode->crtc_htotal);
+}
+
+/* FIXME: We should instead only take spinlocks once for the entire update
+ * instead of once per mmio. */
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+#define VBLANK_EVASION_TIME_US 250
+#else
+#define VBLANK_EVASION_TIME_US 100
+#endif
+
+/**
+ * intel_pipe_update_start() - start update of a set of display registers
+ * @new_crtc_state: the new crtc state
+ *
+ * Mark the start of an update to pipe registers that should be updated
+ * atomically regarding vblank. If the next vblank will happens within
+ * the next 100 us, this function waits until the vblank passes.
+ *
+ * After a successful call to this function, interrupts will be disabled
+ * until a subsequent call to intel_pipe_update_end(). That is done to
+ * avoid random delays.
+ */
+void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       const struct drm_display_mode *adjusted_mode = &new_crtc_state->base.adjusted_mode;
+       long timeout = msecs_to_jiffies_timeout(1);
+       int scanline, min, max, vblank_start;
+       wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+       bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+               intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
+       DEFINE_WAIT(wait);
+       u32 psr_status;
+
+       vblank_start = adjusted_mode->crtc_vblank_start;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+               vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+       /* FIXME needs to be calibrated sensibly */
+       min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
+                                                     VBLANK_EVASION_TIME_US);
+       max = vblank_start - 1;
+
+       if (min <= 0 || max <= 0)
+               goto irq_disable;
+
+       if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
+               goto irq_disable;
+
+       /*
+        * Wait for psr to idle out after enabling the VBL interrupts
+        * VBL interrupts will start the PSR exit and prevent a PSR
+        * re-entry as well.
+        */
+       if (intel_psr_wait_for_idle(new_crtc_state, &psr_status))
+               DRM_ERROR("PSR idle timed out 0x%x, atomic update may fail\n",
+                         psr_status);
+
+       local_irq_disable();
+
+       crtc->debug.min_vbl = min;
+       crtc->debug.max_vbl = max;
+       trace_i915_pipe_update_start(crtc);
+
+       for (;;) {
+               /*
+                * prepare_to_wait() has a memory barrier, which guarantees
+                * other CPUs can see the task state update by the time we
+                * read the scanline.
+                */
+               prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+
+               scanline = intel_get_crtc_scanline(crtc);
+               if (scanline < min || scanline > max)
+                       break;
+
+               if (!timeout) {
+                       DRM_ERROR("Potential atomic update failure on pipe %c\n",
+                                 pipe_name(crtc->pipe));
+                       break;
+               }
+
+               local_irq_enable();
+
+               timeout = schedule_timeout(timeout);
+
+               local_irq_disable();
+       }
+
+       finish_wait(wq, &wait);
+
+       drm_crtc_vblank_put(&crtc->base);
+
+       /*
+        * On VLV/CHV DSI the scanline counter would appear to
+        * increment approx. 1/3 of a scanline before start of vblank.
+        * The registers still get latched at start of vblank however.
+        * This means we must not write any registers on the first
+        * line of vblank (since not the whole line is actually in
+        * vblank). And unfortunately we can't use the interrupt to
+        * wait here since it will fire too soon. We could use the
+        * frame start interrupt instead since it will fire after the
+        * critical scanline, but that would require more changes
+        * in the interrupt code. So for now we'll just do the nasty
+        * thing and poll for the bad scanline to pass us by.
+        *
+        * FIXME figure out if BXT+ DSI suffers from this as well
+        */
+       while (need_vlv_dsi_wa && scanline == vblank_start)
+               scanline = intel_get_crtc_scanline(crtc);
+
+       crtc->debug.scanline_start = scanline;
+       crtc->debug.start_vbl_time = ktime_get();
+       crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
+
+       trace_i915_pipe_update_vblank_evaded(crtc);
+       return;
+
+irq_disable:
+       local_irq_disable();
+}
+
+/**
+ * intel_pipe_update_end() - end update of a set of display registers
+ * @new_crtc_state: the new crtc state
+ *
+ * Mark the end of an update started with intel_pipe_update_start(). This
+ * re-enables interrupts and verifies the update was actually completed
+ * before a vblank.
+ */
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+       enum pipe pipe = crtc->pipe;
+       int scanline_end = intel_get_crtc_scanline(crtc);
+       u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
+       ktime_t end_vbl_time = ktime_get();
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+       trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
+
+       /* We're still in the vblank-evade critical section, this can't race.
+        * Would be slightly nice to just grab the vblank count and arm the
+        * event outside of the critical section - the spinlock might spin for a
+        * while ... */
+       if (new_crtc_state->base.event) {
+               WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
+
+               spin_lock(&crtc->base.dev->event_lock);
+               drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->base.event);
+               spin_unlock(&crtc->base.dev->event_lock);
+
+               new_crtc_state->base.event = NULL;
+       }
+
+       local_irq_enable();
+
+       if (intel_vgpu_active(dev_priv))
+               return;
+
+       if (crtc->debug.start_vbl_count &&
+           crtc->debug.start_vbl_count != end_vbl_count) {
+               DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
+                         pipe_name(pipe), crtc->debug.start_vbl_count,
+                         end_vbl_count,
+                         ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
+                         crtc->debug.min_vbl, crtc->debug.max_vbl,
+                         crtc->debug.scanline_start, scanline_end);
+       }
+#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
+       else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
+                VBLANK_EVASION_TIME_US)
+               DRM_WARN("Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+                        pipe_name(pipe),
+                        ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
+                        VBLANK_EVASION_TIME_US);
+#endif
+}
+
+int intel_plane_check_stride(const struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       u32 stride, max_stride;
+
+       /*
+        * We ignore stride for all invisible planes that
+        * can be remapped. Otherwise we could end up
+        * with a false positive when the remapping didn't
+        * kick in due the plane being invisible.
+        */
+       if (intel_plane_can_remap(plane_state) &&
+           !plane_state->base.visible)
+               return 0;
+
+       /* FIXME other color planes? */
+       stride = plane_state->color_plane[0].stride;
+       max_stride = plane->max_stride(plane, fb->format->format,
+                                      fb->modifier, rotation);
+
+       if (stride > max_stride) {
+               DRM_DEBUG_KMS("[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n",
+                             fb->base.id, stride,
+                             plane->base.base.id, plane->base.name, max_stride);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       struct drm_rect *src = &plane_state->base.src;
+       u32 src_x, src_y, src_w, src_h, hsub, vsub;
+       bool rotated = drm_rotation_90_or_270(plane_state->base.rotation);
+
+       /*
+        * Hardware doesn't handle subpixel coordinates.
+        * Adjust to (macro)pixel boundary, but be careful not to
+        * increase the source viewport size, because that could
+        * push the downscaling factor out of bounds.
+        */
+       src_x = src->x1 >> 16;
+       src_w = drm_rect_width(src) >> 16;
+       src_y = src->y1 >> 16;
+       src_h = drm_rect_height(src) >> 16;
+
+       src->x1 = src_x << 16;
+       src->x2 = (src_x + src_w) << 16;
+       src->y1 = src_y << 16;
+       src->y2 = (src_y + src_h) << 16;
+
+       if (!fb->format->is_yuv)
+               return 0;
+
+       /* YUV specific checks */
+       if (!rotated) {
+               hsub = fb->format->hsub;
+               vsub = fb->format->vsub;
+       } else {
+               hsub = vsub = max(fb->format->hsub, fb->format->vsub);
+       }
+
+       if (src_x % hsub || src_w % hsub) {
+               DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n",
+                             src_x, src_w, hsub, rotated ? "rotated " : "");
+               return -EINVAL;
+       }
+
+       if (src_y % vsub || src_h % vsub) {
+               DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n",
+                             src_y, src_h, vsub, rotated ? "rotated " : "");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static unsigned int
+skl_plane_max_stride(struct intel_plane *plane,
+                    u32 pixel_format, u64 modifier,
+                    unsigned int rotation)
+{
+       const struct drm_format_info *info = drm_format_info(pixel_format);
+       int cpp = info->cpp[0];
+
+       /*
+        * "The stride in bytes must not exceed the
+        * of the size of 8K pixels and 32K bytes."
+        */
+       if (drm_rotation_90_or_270(rotation))
+               return min(8192, 32768 / cpp);
+       else
+               return min(8192 * cpp, 32768);
+}
+
+static void
+skl_program_scaler(struct intel_plane *plane,
+                  const struct intel_crtc_state *crtc_state,
+                  const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
+       int scaler_id = plane_state->scaler_id;
+       const struct intel_scaler *scaler =
+               &crtc_state->scaler_state.scalers[scaler_id];
+       int crtc_x = plane_state->base.dst.x1;
+       int crtc_y = plane_state->base.dst.y1;
+       u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+       u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+       u16 y_hphase, uv_rgb_hphase;
+       u16 y_vphase, uv_rgb_vphase;
+       int hscale, vscale;
+
+       hscale = drm_rect_calc_hscale(&plane_state->base.src,
+                                     &plane_state->base.dst,
+                                     0, INT_MAX);
+       vscale = drm_rect_calc_vscale(&plane_state->base.src,
+                                     &plane_state->base.dst,
+                                     0, INT_MAX);
+
+       /* TODO: handle sub-pixel coordinates */
+       if (is_planar_yuv_format(plane_state->base.fb->format->format) &&
+           !icl_is_hdr_plane(dev_priv, plane->id)) {
+               y_hphase = skl_scaler_calc_phase(1, hscale, false);
+               y_vphase = skl_scaler_calc_phase(1, vscale, false);
+
+               /* MPEG2 chroma siting convention */
+               uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
+               uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
+       } else {
+               /* not used */
+               y_hphase = 0;
+               y_vphase = 0;
+
+               uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+               uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+       }
+
+       I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
+                     PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
+       I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
+                     PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+       I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
+                     PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+       I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
+       I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
+}
+
+/* Preoffset values for YUV to RGB Conversion */
+#define PREOFF_YUV_TO_RGB_HI           0x1800
+#define PREOFF_YUV_TO_RGB_ME           0x1F00
+#define PREOFF_YUV_TO_RGB_LO           0x1800
+
+#define  ROFF(x)          (((x) & 0xffff) << 16)
+#define  GOFF(x)          (((x) & 0xffff) << 0)
+#define  BOFF(x)          (((x) & 0xffff) << 16)
+
+static void
+icl_program_input_csc(struct intel_plane *plane,
+                     const struct intel_crtc_state *crtc_state,
+                     const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
+       enum plane_id plane_id = plane->id;
+
+       static const u16 input_csc_matrix[][9] = {
+               /*
+                * BT.601 full range YCbCr -> full range RGB
+                * The matrix required is :
+                * [1.000, 0.000, 1.371,
+                *  1.000, -0.336, -0.698,
+                *  1.000, 1.732, 0.0000]
+                */
+               [DRM_COLOR_YCBCR_BT601] = {
+                       0x7AF8, 0x7800, 0x0,
+                       0x8B28, 0x7800, 0x9AC0,
+                       0x0, 0x7800, 0x7DD8,
+               },
+               /*
+                * BT.709 full range YCbCr -> full range RGB
+                * The matrix required is :
+                * [1.000, 0.000, 1.574,
+                *  1.000, -0.187, -0.468,
+                *  1.000, 1.855, 0.0000]
+                */
+               [DRM_COLOR_YCBCR_BT709] = {
+                       0x7C98, 0x7800, 0x0,
+                       0x9EF8, 0x7800, 0xABF8,
+                       0x0, 0x7800,  0x7ED8,
+               },
+       };
+
+       /* Matrix for Limited Range to Full Range Conversion */
+       static const u16 input_csc_matrix_lr[][9] = {
+               /*
+                * BT.601 Limted range YCbCr -> full range RGB
+                * The matrix required is :
+                * [1.164384, 0.000, 1.596370,
+                *  1.138393, -0.382500, -0.794598,
+                *  1.138393, 1.971696, 0.0000]
+                */
+               [DRM_COLOR_YCBCR_BT601] = {
+                       0x7CC8, 0x7950, 0x0,
+                       0x8CB8, 0x7918, 0x9C40,
+                       0x0, 0x7918, 0x7FC8,
+               },
+               /*
+                * BT.709 Limited range YCbCr -> full range RGB
+                * The matrix required is :
+                * [1.164, 0.000, 1.833671,
+                *  1.138393, -0.213249, -0.532909,
+                *  1.138393, 2.112402, 0.0000]
+                */
+               [DRM_COLOR_YCBCR_BT709] = {
+                       0x7EA8, 0x7950, 0x0,
+                       0x8888, 0x7918, 0xADA8,
+                       0x0, 0x7918,  0x6870,
+               },
+       };
+       const u16 *csc;
+
+       if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+               csc = input_csc_matrix[plane_state->base.color_encoding];
+       else
+               csc = input_csc_matrix_lr[plane_state->base.color_encoding];
+
+       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
+                     GOFF(csc[1]));
+       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2]));
+       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) |
+                     GOFF(csc[4]));
+       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5]));
+       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) |
+                     GOFF(csc[7]));
+       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8]));
+
+       I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+                     PREOFF_YUV_TO_RGB_HI);
+       I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+                     PREOFF_YUV_TO_RGB_ME);
+       I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+                     PREOFF_YUV_TO_RGB_LO);
+       I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+       I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+       I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+}
+
+static void
+skl_program_plane(struct intel_plane *plane,
+                 const struct intel_crtc_state *crtc_state,
+                 const struct intel_plane_state *plane_state,
+                 int color_plane, bool slave, u32 plane_ctl)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum plane_id plane_id = plane->id;
+       enum pipe pipe = plane->pipe;
+       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+       u32 surf_addr = plane_state->color_plane[color_plane].offset;
+       u32 stride = skl_plane_stride(plane_state, color_plane);
+       u32 aux_stride = skl_plane_stride(plane_state, 1);
+       int crtc_x = plane_state->base.dst.x1;
+       int crtc_y = plane_state->base.dst.y1;
+       u32 x = plane_state->color_plane[color_plane].x;
+       u32 y = plane_state->color_plane[color_plane].y;
+       u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+       struct intel_plane *linked = plane_state->linked_plane;
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       u8 alpha = plane_state->base.alpha >> 8;
+       u32 plane_color_ctl = 0;
+       unsigned long irqflags;
+       u32 keymsk, keymax;
+
+       plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               plane_color_ctl = plane_state->color_ctl |
+                       glk_plane_color_ctl_crtc(crtc_state);
+
+       /* Sizes are 0 based */
+       src_w--;
+       src_h--;
+
+       keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
+
+       keymsk = key->channel_mask & 0x7ffffff;
+       if (alpha < 0xff)
+               keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
+
+       /* The scaler will handle the output position */
+       if (plane_state->scaler_id >= 0) {
+               crtc_x = 0;
+               crtc_y = 0;
+       }
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
+       I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+       I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
+       I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
+                     (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
+
+       if (icl_is_hdr_plane(dev_priv, plane_id)) {
+               u32 cus_ctl = 0;
+
+               if (linked) {
+                       /* Enable and use MPEG-2 chroma siting */
+                       cus_ctl = PLANE_CUS_ENABLE |
+                               PLANE_CUS_HPHASE_0 |
+                               PLANE_CUS_VPHASE_SIGN_NEGATIVE |
+                               PLANE_CUS_VPHASE_0_25;
+
+                       if (linked->id == PLANE_SPRITE5)
+                               cus_ctl |= PLANE_CUS_PLANE_7;
+                       else if (linked->id == PLANE_SPRITE4)
+                               cus_ctl |= PLANE_CUS_PLANE_6;
+                       else
+                               MISSING_CASE(linked->id);
+               }
+
+               I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
+       }
+
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
+
+       if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
+               icl_program_input_csc(plane, crtc_state, plane_state);
+
+       skl_write_plane_wm(plane, crtc_state);
+
+       I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
+       I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
+       I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
+
+       I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+
+       if (INTEL_GEN(dev_priv) < 11)
+               I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
+                             (plane_state->color_plane[1].y << 16) |
+                             plane_state->color_plane[1].x);
+
+       /*
+        * The control register self-arms if the plane was previously
+        * disabled. Try to make the plane enable atomic by writing
+        * the control register just before the surface register.
+        */
+       I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
+       I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
+                     intel_plane_ggtt_offset(plane_state) + surf_addr);
+
+       if (!slave && plane_state->scaler_id >= 0)
+               skl_program_scaler(plane, crtc_state, plane_state);
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+skl_update_plane(struct intel_plane *plane,
+                const struct intel_crtc_state *crtc_state,
+                const struct intel_plane_state *plane_state)
+{
+       int color_plane = 0;
+
+       if (plane_state->linked_plane) {
+               /* Program the UV plane */
+               color_plane = 1;
+       }
+
+       skl_program_plane(plane, crtc_state, plane_state,
+                         color_plane, false, plane_state->ctl);
+}
+
+static void
+icl_update_slave(struct intel_plane *plane,
+                const struct intel_crtc_state *crtc_state,
+                const struct intel_plane_state *plane_state)
+{
+       skl_program_plane(plane, crtc_state, plane_state, 0, true,
+                         plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE);
+}
+
+static void
+skl_disable_plane(struct intel_plane *plane,
+                 const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum plane_id plane_id = plane->id;
+       enum pipe pipe = plane->pipe;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       if (icl_is_hdr_plane(dev_priv, plane_id))
+               I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0);
+
+       skl_write_plane_wm(plane, crtc_state);
+
+       I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
+       I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static bool
+skl_plane_get_hw_state(struct intel_plane *plane,
+                      enum pipe *pipe)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum plane_id plane_id = plane->id;
+       intel_wakeref_t wakeref;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
+               return false;
+
+       ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+       *pipe = plane->pipe;
+
+       intel_display_power_put(dev_priv, power_domain, wakeref);
+
+       return ret;
+}
+
+static void
+chv_update_csc(const struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       enum plane_id plane_id = plane->id;
+       /*
+        * |r|   | c0 c1 c2 |   |cr|
+        * |g| = | c3 c4 c5 | x |y |
+        * |b|   | c6 c7 c8 |   |cb|
+        *
+        * Coefficients are s3.12.
+        *
+        * Cb and Cr apparently come in as signed already, and
+        * we always get full range data in on account of CLRC0/1.
+        */
+       static const s16 csc_matrix[][9] = {
+               /* BT.601 full range YCbCr -> full range RGB */
+               [DRM_COLOR_YCBCR_BT601] = {
+                        5743, 4096,     0,
+                       -2925, 4096, -1410,
+                           0, 4096,  7258,
+               },
+               /* BT.709 full range YCbCr -> full range RGB */
+               [DRM_COLOR_YCBCR_BT709] = {
+                        6450, 4096,     0,
+                       -1917, 4096,  -767,
+                           0, 4096,  7601,
+               },
+       };
+       const s16 *csc = csc_matrix[plane_state->base.color_encoding];
+
+       /* Seems RGB data bypasses the CSC always */
+       if (!fb->format->is_yuv)
+               return;
+
+       I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+       I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+       I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+       I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
+       I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
+       I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
+       I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
+       I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(csc[8]));
+
+       I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0));
+       I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+       I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+
+       I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+       I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+       I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+}
+
+#define SIN_0 0
+#define COS_0 1
+
+static void
+vlv_update_clrc(const struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       enum pipe pipe = plane->pipe;
+       enum plane_id plane_id = plane->id;
+       int contrast, brightness, sh_scale, sh_sin, sh_cos;
+
+       if (fb->format->is_yuv &&
+           plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
+               /*
+                * Expand limited range to full range:
+                * Contrast is applied first and is used to expand Y range.
+                * Brightness is applied second and is used to remove the
+                * offset from Y. Saturation/hue is used to expand CbCr range.
+                */
+               contrast = DIV_ROUND_CLOSEST(255 << 6, 235 - 16);
+               brightness = -DIV_ROUND_CLOSEST(16 * 255, 235 - 16);
+               sh_scale = DIV_ROUND_CLOSEST(128 << 7, 240 - 128);
+               sh_sin = SIN_0 * sh_scale;
+               sh_cos = COS_0 * sh_scale;
+       } else {
+               /* Pass-through everything. */
+               contrast = 1 << 6;
+               brightness = 0;
+               sh_scale = 1 << 7;
+               sh_sin = SIN_0 * sh_scale;
+               sh_cos = COS_0 * sh_scale;
+       }
+
+       /* FIXME these register are single buffered :( */
+       I915_WRITE_FW(SPCLRC0(pipe, plane_id),
+                     SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
+       I915_WRITE_FW(SPCLRC1(pipe, plane_id),
+                     SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
+}
+
+static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       u32 sprctl = 0;
+
+       if (crtc_state->gamma_enable)
+               sprctl |= SP_GAMMA_ENABLE;
+
+       return sprctl;
+}
+
+static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
+                         const struct intel_plane_state *plane_state)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+       u32 sprctl;
+
+       sprctl = SP_ENABLE;
+
+       switch (fb->format->format) {
+       case DRM_FORMAT_YUYV:
+               sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV;
+               break;
+       case DRM_FORMAT_YVYU:
+               sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YVYU;
+               break;
+       case DRM_FORMAT_UYVY:
+               sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_UYVY;
+               break;
+       case DRM_FORMAT_VYUY:
+               sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
+               break;
+       case DRM_FORMAT_RGB565:
+               sprctl |= SP_FORMAT_BGR565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               sprctl |= SP_FORMAT_BGRX8888;
+               break;
+       case DRM_FORMAT_ARGB8888:
+               sprctl |= SP_FORMAT_BGRA8888;
+               break;
+       case DRM_FORMAT_XBGR2101010:
+               sprctl |= SP_FORMAT_RGBX1010102;
+               break;
+       case DRM_FORMAT_ABGR2101010:
+               sprctl |= SP_FORMAT_RGBA1010102;
+               break;
+       case DRM_FORMAT_XBGR8888:
+               sprctl |= SP_FORMAT_RGBX8888;
+               break;
+       case DRM_FORMAT_ABGR8888:
+               sprctl |= SP_FORMAT_RGBA8888;
+               break;
+       default:
+               MISSING_CASE(fb->format->format);
+               return 0;
+       }
+
+       if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+               sprctl |= SP_YUV_FORMAT_BT709;
+
+       if (fb->modifier == I915_FORMAT_MOD_X_TILED)
+               sprctl |= SP_TILED;
+
+       if (rotation & DRM_MODE_ROTATE_180)
+               sprctl |= SP_ROTATE_180;
+
+       if (rotation & DRM_MODE_REFLECT_X)
+               sprctl |= SP_MIRROR;
+
+       if (key->flags & I915_SET_COLORKEY_SOURCE)
+               sprctl |= SP_SOURCE_KEY;
+
+       return sprctl;
+}
+
+static void
+vlv_update_plane(struct intel_plane *plane,
+                const struct intel_crtc_state *crtc_state,
+                const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
+       enum plane_id plane_id = plane->id;
+       u32 sprsurf_offset = plane_state->color_plane[0].offset;
+       u32 linear_offset;
+       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+       int crtc_x = plane_state->base.dst.x1;
+       int crtc_y = plane_state->base.dst.y1;
+       u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+       u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+       u32 x = plane_state->color_plane[0].x;
+       u32 y = plane_state->color_plane[0].y;
+       unsigned long irqflags;
+       u32 sprctl;
+
+       sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state);
+
+       /* Sizes are 0 based */
+       crtc_w--;
+       crtc_h--;
+
+       linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
+                     plane_state->color_plane[0].stride);
+       I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+       I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+       I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
+
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
+               chv_update_csc(plane_state);
+
+       if (key->flags) {
+               I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
+               I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
+               I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
+       }
+
+       I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+       I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
+
+       /*
+        * The control register self-arms if the plane was previously
+        * disabled. Try to make the plane enable atomic by writing
+        * the control register just before the surface register.
+        */
+       I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
+       I915_WRITE_FW(SPSURF(pipe, plane_id),
+                     intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+
+       vlv_update_clrc(plane_state);
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+vlv_disable_plane(struct intel_plane *plane,
+                 const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
+       enum plane_id plane_id = plane->id;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
+       I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static bool
+vlv_plane_get_hw_state(struct intel_plane *plane,
+                      enum pipe *pipe)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum plane_id plane_id = plane->id;
+       intel_wakeref_t wakeref;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
+               return false;
+
+       ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+
+       *pipe = plane->pipe;
+
+       intel_display_power_put(dev_priv, power_domain, wakeref);
+
+       return ret;
+}
+
+static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       u32 sprctl = 0;
+
+       if (crtc_state->gamma_enable)
+               sprctl |= SPRITE_GAMMA_ENABLE;
+
+       if (crtc_state->csc_enable)
+               sprctl |= SPRITE_PIPE_CSC_ENABLE;
+
+       return sprctl;
+}
+
+static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
+                         const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+       u32 sprctl;
+
+       sprctl = SPRITE_ENABLE;
+
+       if (IS_IVYBRIDGE(dev_priv))
+               sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+
+       switch (fb->format->format) {
+       case DRM_FORMAT_XBGR8888:
+               sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               sprctl |= SPRITE_FORMAT_RGBX888;
+               break;
+       case DRM_FORMAT_YUYV:
+               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+               break;
+       case DRM_FORMAT_YVYU:
+               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+               break;
+       case DRM_FORMAT_UYVY:
+               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+               break;
+       case DRM_FORMAT_VYUY:
+               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+               break;
+       default:
+               MISSING_CASE(fb->format->format);
+               return 0;
+       }
+
+       if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+               sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
+
+       if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+               sprctl |= SPRITE_YUV_RANGE_CORRECTION_DISABLE;
+
+       if (fb->modifier == I915_FORMAT_MOD_X_TILED)
+               sprctl |= SPRITE_TILED;
+
+       if (rotation & DRM_MODE_ROTATE_180)
+               sprctl |= SPRITE_ROTATE_180;
+
+       if (key->flags & I915_SET_COLORKEY_DESTINATION)
+               sprctl |= SPRITE_DEST_KEY;
+       else if (key->flags & I915_SET_COLORKEY_SOURCE)
+               sprctl |= SPRITE_SOURCE_KEY;
+
+       return sprctl;
+}
+
+static void
+ivb_update_plane(struct intel_plane *plane,
+                const struct intel_crtc_state *crtc_state,
+                const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
+       u32 sprsurf_offset = plane_state->color_plane[0].offset;
+       u32 linear_offset;
+       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+       int crtc_x = plane_state->base.dst.x1;
+       int crtc_y = plane_state->base.dst.y1;
+       u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+       u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+       u32 x = plane_state->color_plane[0].x;
+       u32 y = plane_state->color_plane[0].y;
+       u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+       u32 sprctl, sprscale = 0;
+       unsigned long irqflags;
+
+       sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state);
+
+       /* Sizes are 0 based */
+       src_w--;
+       src_h--;
+       crtc_w--;
+       crtc_h--;
+
+       if (crtc_w != src_w || crtc_h != src_h)
+               sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
+
+       linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
+       I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+       I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+       if (IS_IVYBRIDGE(dev_priv))
+               I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+
+       if (key->flags) {
+               I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
+               I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
+               I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
+       }
+
+       /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+        * register */
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+               I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
+       } else {
+               I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
+               I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
+       }
+
+       /*
+        * The control register self-arms if the plane was previously
+        * disabled. Try to make the plane enable atomic by writing
+        * the control register just before the surface register.
+        */
+       I915_WRITE_FW(SPRCTL(pipe), sprctl);
+       I915_WRITE_FW(SPRSURF(pipe),
+                     intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+ivb_disable_plane(struct intel_plane *plane,
+                 const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       I915_WRITE_FW(SPRCTL(pipe), 0);
+       /* Disable the scaler */
+       if (IS_IVYBRIDGE(dev_priv))
+               I915_WRITE_FW(SPRSCALE(pipe), 0);
+       I915_WRITE_FW(SPRSURF(pipe), 0);
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static bool
+ivb_plane_get_hw_state(struct intel_plane *plane,
+                      enum pipe *pipe)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       intel_wakeref_t wakeref;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
+               return false;
+
+       ret =  I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+
+       *pipe = plane->pipe;
+
+       intel_display_power_put(dev_priv, power_domain, wakeref);
+
+       return ret;
+}
+
+static unsigned int
+g4x_sprite_max_stride(struct intel_plane *plane,
+                     u32 pixel_format, u64 modifier,
+                     unsigned int rotation)
+{
+       return 16384;
+}
+
+static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+       u32 dvscntr = 0;
+
+       if (crtc_state->gamma_enable)
+               dvscntr |= DVS_GAMMA_ENABLE;
+
+       if (crtc_state->csc_enable)
+               dvscntr |= DVS_PIPE_CSC_ENABLE;
+
+       return dvscntr;
+}
+
+static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
+                         const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+       u32 dvscntr;
+
+       dvscntr = DVS_ENABLE;
+
+       if (IS_GEN(dev_priv, 6))
+               dvscntr |= DVS_TRICKLE_FEED_DISABLE;
+
+       switch (fb->format->format) {
+       case DRM_FORMAT_XBGR8888:
+               dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               dvscntr |= DVS_FORMAT_RGBX888;
+               break;
+       case DRM_FORMAT_YUYV:
+               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+               break;
+       case DRM_FORMAT_YVYU:
+               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+               break;
+       case DRM_FORMAT_UYVY:
+               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+               break;
+       case DRM_FORMAT_VYUY:
+               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+               break;
+       default:
+               MISSING_CASE(fb->format->format);
+               return 0;
+       }
+
+       if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+               dvscntr |= DVS_YUV_FORMAT_BT709;
+
+       if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+               dvscntr |= DVS_YUV_RANGE_CORRECTION_DISABLE;
+
+       if (fb->modifier == I915_FORMAT_MOD_X_TILED)
+               dvscntr |= DVS_TILED;
+
+       if (rotation & DRM_MODE_ROTATE_180)
+               dvscntr |= DVS_ROTATE_180;
+
+       if (key->flags & I915_SET_COLORKEY_DESTINATION)
+               dvscntr |= DVS_DEST_KEY;
+       else if (key->flags & I915_SET_COLORKEY_SOURCE)
+               dvscntr |= DVS_SOURCE_KEY;
+
+       return dvscntr;
+}
+
+static void
+g4x_update_plane(struct intel_plane *plane,
+                const struct intel_crtc_state *crtc_state,
+                const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
+       u32 dvssurf_offset = plane_state->color_plane[0].offset;
+       u32 linear_offset;
+       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+       int crtc_x = plane_state->base.dst.x1;
+       int crtc_y = plane_state->base.dst.y1;
+       u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+       u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+       u32 x = plane_state->color_plane[0].x;
+       u32 y = plane_state->color_plane[0].y;
+       u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
+       u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+       u32 dvscntr, dvsscale = 0;
+       unsigned long irqflags;
+
+       dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state);
+
+       /* Sizes are 0 based */
+       src_w--;
+       src_h--;
+       crtc_w--;
+       crtc_h--;
+
+       if (crtc_w != src_w || crtc_h != src_h)
+               dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
+
+       linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
+       I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+       I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+       I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+
+       if (key->flags) {
+               I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
+               I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
+               I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
+       }
+
+       I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
+       I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
+
+       /*
+        * The control register self-arms if the plane was previously
+        * disabled. Try to make the plane enable atomic by writing
+        * the control register just before the surface register.
+        */
+       I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
+       I915_WRITE_FW(DVSSURF(pipe),
+                     intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+g4x_disable_plane(struct intel_plane *plane,
+                 const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       I915_WRITE_FW(DVSCNTR(pipe), 0);
+       /* Disable the scaler */
+       I915_WRITE_FW(DVSSCALE(pipe), 0);
+       I915_WRITE_FW(DVSSURF(pipe), 0);
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static bool
+g4x_plane_get_hw_state(struct intel_plane *plane,
+                      enum pipe *pipe)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       intel_wakeref_t wakeref;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wakeref)
+               return false;
+
+       ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
+
+       *pipe = plane->pipe;
+
+       intel_display_power_put(dev_priv, power_domain, wakeref);
+
+       return ret;
+}
+
+static bool intel_fb_scalable(const struct drm_framebuffer *fb)
+{
+       if (!fb)
+               return false;
+
+       switch (fb->format->format) {
+       case DRM_FORMAT_C8:
+               return false;
+       default:
+               return true;
+       }
+}
+
+static int
+g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
+                        struct intel_plane_state *plane_state)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       const struct drm_rect *src = &plane_state->base.src;
+       const struct drm_rect *dst = &plane_state->base.dst;
+       int src_x, src_y, src_w, src_h, crtc_w, crtc_h;
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->base.adjusted_mode;
+       unsigned int cpp = fb->format->cpp[0];
+       unsigned int width_bytes;
+       int min_width, min_height;
+
+       crtc_w = drm_rect_width(dst);
+       crtc_h = drm_rect_height(dst);
+
+       src_x = src->x1 >> 16;
+       src_y = src->y1 >> 16;
+       src_w = drm_rect_width(src) >> 16;
+       src_h = drm_rect_height(src) >> 16;
+
+       if (src_w == crtc_w && src_h == crtc_h)
+               return 0;
+
+       min_width = 3;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               if (src_h & 1) {
+                       DRM_DEBUG_KMS("Source height must be even with interlaced modes\n");
+                       return -EINVAL;
+               }
+               min_height = 6;
+       } else {
+               min_height = 3;
+       }
+
+       width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
+
+       if (src_w < min_width || src_h < min_height ||
+           src_w > 2048 || src_h > 2048) {
+               DRM_DEBUG_KMS("Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n",
+                             src_w, src_h, min_width, min_height, 2048, 2048);
+               return -EINVAL;
+       }
+
+       if (width_bytes > 4096) {
+               DRM_DEBUG_KMS("Fetch width (%d) exceeds hardware max with scaling (%u)\n",
+                             width_bytes, 4096);
+               return -EINVAL;
+       }
+
+       if (width_bytes > 4096 || fb->pitches[0] > 4096) {
+               DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n",
+                             fb->pitches[0], 4096);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+g4x_sprite_check(struct intel_crtc_state *crtc_state,
+                struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+       int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+       int ret;
+
+       if (intel_fb_scalable(plane_state->base.fb)) {
+               if (INTEL_GEN(dev_priv) < 7) {
+                       min_scale = 1;
+                       max_scale = 16 << 16;
+               } else if (IS_IVYBRIDGE(dev_priv)) {
+                       min_scale = 1;
+                       max_scale = 2 << 16;
+               }
+       }
+
+       ret = drm_atomic_helper_check_plane_state(&plane_state->base,
+                                                 &crtc_state->base,
+                                                 min_scale, max_scale,
+                                                 true, true);
+       if (ret)
+               return ret;
+
+       ret = i9xx_check_plane_surface(plane_state);
+       if (ret)
+               return ret;
+
+       if (!plane_state->base.visible)
+               return 0;
+
+       ret = intel_plane_check_src_coordinates(plane_state);
+       if (ret)
+               return ret;
+
+       ret = g4x_sprite_check_scaling(crtc_state, plane_state);
+       if (ret)
+               return ret;
+
+       if (INTEL_GEN(dev_priv) >= 7)
+               plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
+       else
+               plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state);
+
+       return 0;
+}
+
+int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       unsigned int rotation = plane_state->base.rotation;
+
+       /* CHV ignores the mirror bit when the rotate bit is set :( */
+       if (IS_CHERRYVIEW(dev_priv) &&
+           rotation & DRM_MODE_ROTATE_180 &&
+           rotation & DRM_MODE_REFLECT_X) {
+               DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+vlv_sprite_check(struct intel_crtc_state *crtc_state,
+                struct intel_plane_state *plane_state)
+{
+       int ret;
+
+       ret = chv_plane_check_rotation(plane_state);
+       if (ret)
+               return ret;
+
+       ret = drm_atomic_helper_check_plane_state(&plane_state->base,
+                                                 &crtc_state->base,
+                                                 DRM_PLANE_HELPER_NO_SCALING,
+                                                 DRM_PLANE_HELPER_NO_SCALING,
+                                                 true, true);
+       if (ret)
+               return ret;
+
+       ret = i9xx_check_plane_surface(plane_state);
+       if (ret)
+               return ret;
+
+       if (!plane_state->base.visible)
+               return 0;
+
+       ret = intel_plane_check_src_coordinates(plane_state);
+       if (ret)
+               return ret;
+
+       plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state);
+
+       return 0;
+}
+
+static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
+                             const struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       struct drm_format_name_buf format_name;
+
+       if (!fb)
+               return 0;
+
+       if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
+           is_ccs_modifier(fb->modifier)) {
+               DRM_DEBUG_KMS("RC support only with 0/180 degree rotation (%x)\n",
+                             rotation);
+               return -EINVAL;
+       }
+
+       if (rotation & DRM_MODE_REFLECT_X &&
+           fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+               DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
+               return -EINVAL;
+       }
+
+       if (drm_rotation_90_or_270(rotation)) {
+               if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+                   fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
+                       DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
+                       return -EINVAL;
+               }
+
+               /*
+                * 90/270 is not allowed with RGB64 16:16:16:16 and
+                * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
+                */
+               switch (fb->format->format) {
+               case DRM_FORMAT_RGB565:
+                       if (INTEL_GEN(dev_priv) >= 11)
+                               break;
+                       /* fall through */
+               case DRM_FORMAT_C8:
+               case DRM_FORMAT_XRGB16161616F:
+               case DRM_FORMAT_XBGR16161616F:
+               case DRM_FORMAT_ARGB16161616F:
+               case DRM_FORMAT_ABGR16161616F:
+               case DRM_FORMAT_Y210:
+               case DRM_FORMAT_Y212:
+               case DRM_FORMAT_Y216:
+               case DRM_FORMAT_XVYU12_16161616:
+               case DRM_FORMAT_XVYU16161616:
+                       DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
+                                     drm_get_format_name(fb->format->format,
+                                                         &format_name));
+                       return -EINVAL;
+               default:
+                       break;
+               }
+       }
+
+       /* Y-tiling is not supported in IF-ID Interlace mode */
+       if (crtc_state->base.enable &&
+           crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
+           (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+            fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
+            fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+            fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) {
+               DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state,
+                                          const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       int crtc_x = plane_state->base.dst.x1;
+       int crtc_w = drm_rect_width(&plane_state->base.dst);
+       int pipe_src_w = crtc_state->pipe_src_w;
+
+       /*
+        * Display WA #1175: cnl,glk
+        * Planes other than the cursor may cause FIFO underflow and display
+        * corruption if starting less than 4 pixels from the right edge of
+        * the screen.
+        * Besides the above WA fix the similar problem, where planes other
+        * than the cursor ending less than 4 pixels from the left edge of the
+        * screen may cause FIFO underflow and display corruption.
+        */
+       if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
+           (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
+               DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
+                             crtc_x + crtc_w < 4 ? "end" : "start",
+                             crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
+                             4, pipe_src_w - 4);
+               return -ERANGE;
+       }
+
+       return 0;
+}
+
+static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
+{
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       unsigned int rotation = plane_state->base.rotation;
+       int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+
+       /* Display WA #1106 */
+       if (is_planar_yuv_format(fb->format->format) && src_w & 3 &&
+           (rotation == DRM_MODE_ROTATE_270 ||
+            rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
+               DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int skl_plane_check(struct intel_crtc_state *crtc_state,
+                          struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+       int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+       int ret;
+
+       ret = skl_plane_check_fb(crtc_state, plane_state);
+       if (ret)
+               return ret;
+
+       /* use scaler when colorkey is not required */
+       if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
+               min_scale = 1;
+               max_scale = skl_max_scale(crtc_state, fb->format->format);
+       }
+
+       ret = drm_atomic_helper_check_plane_state(&plane_state->base,
+                                                 &crtc_state->base,
+                                                 min_scale, max_scale,
+                                                 true, true);
+       if (ret)
+               return ret;
+
+       ret = skl_check_plane_surface(plane_state);
+       if (ret)
+               return ret;
+
+       if (!plane_state->base.visible)
+               return 0;
+
+       ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
+       if (ret)
+               return ret;
+
+       ret = intel_plane_check_src_coordinates(plane_state);
+       if (ret)
+               return ret;
+
+       ret = skl_plane_check_nv12_rotation(plane_state);
+       if (ret)
+               return ret;
+
+       /* HW only has 8 bits pixel precision, disable plane if invisible */
+       if (!(plane_state->base.alpha >> 8))
+               plane_state->base.visible = false;
+
+       plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
+
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
+                                                            plane_state);
+
+       return 0;
+}
+
+static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+{
+       return INTEL_GEN(dev_priv) >= 9;
+}
+
+static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
+                                const struct drm_intel_sprite_colorkey *set)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+
+       *key = *set;
+
+       /*
+        * We want src key enabled on the
+        * sprite and not on the primary.
+        */
+       if (plane->id == PLANE_PRIMARY &&
+           set->flags & I915_SET_COLORKEY_SOURCE)
+               key->flags = 0;
+
+       /*
+        * On SKL+ we want dst key enabled on
+        * the primary and not on the sprite.
+        */
+       if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+           set->flags & I915_SET_COLORKEY_DESTINATION)
+               key->flags = 0;
+}
+
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+                                   struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_intel_sprite_colorkey *set = data;
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
+       struct drm_atomic_state *state;
+       struct drm_modeset_acquire_ctx ctx;
+       int ret = 0;
+
+       /* ignore the pointless "none" flag */
+       set->flags &= ~I915_SET_COLORKEY_NONE;
+
+       if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+               return -EINVAL;
+
+       /* Make sure we don't try to enable both src & dest simultaneously */
+       if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+               return -EINVAL;
+
+       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+           set->flags & I915_SET_COLORKEY_DESTINATION)
+               return -EINVAL;
+
+       plane = drm_plane_find(dev, file_priv, set->plane_id);
+       if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
+               return -ENOENT;
+
+       /*
+        * SKL+ only plane 2 can do destination keying against plane 1.
+        * Also multiple planes can't do destination keying on the same
+        * pipe simultaneously.
+        */
+       if (INTEL_GEN(dev_priv) >= 9 &&
+           to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
+           set->flags & I915_SET_COLORKEY_DESTINATION)
+               return -EINVAL;
+
+       drm_modeset_acquire_init(&ctx, 0);
+
+       state = drm_atomic_state_alloc(plane->dev);
+       if (!state) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       state->acquire_ctx = &ctx;
+
+       while (1) {
+               plane_state = drm_atomic_get_plane_state(state, plane);
+               ret = PTR_ERR_OR_ZERO(plane_state);
+               if (!ret)
+                       intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+
+               /*
+                * On some platforms we have to configure
+                * the dst colorkey on the primary plane.
+                */
+               if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+                       struct intel_crtc *crtc =
+                               intel_get_crtc_for_pipe(dev_priv,
+                                                       to_intel_plane(plane)->pipe);
+
+                       plane_state = drm_atomic_get_plane_state(state,
+                                                                crtc->base.primary);
+                       ret = PTR_ERR_OR_ZERO(plane_state);
+                       if (!ret)
+                               intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+               }
+
+               if (!ret)
+                       ret = drm_atomic_commit(state);
+
+               if (ret != -EDEADLK)
+                       break;
+
+               drm_atomic_state_clear(state);
+               drm_modeset_backoff(&ctx);
+       }
+
+       drm_atomic_state_put(state);
+out:
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+       return ret;
+}
+
+static const u32 g4x_plane_formats[] = {
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+};
+
+static const u64 i9xx_plane_format_modifiers[] = {
+       I915_FORMAT_MOD_X_TILED,
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID
+};
+
+static const u32 snb_plane_formats[] = {
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+};
+
+static const u32 vlv_plane_formats[] = {
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR2101010,
+       DRM_FORMAT_ABGR2101010,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+};
+
+static const u32 skl_plane_formats[] = {
+       DRM_FORMAT_C8,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB2101010,
+       DRM_FORMAT_XBGR2101010,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+};
+
+static const u32 icl_plane_formats[] = {
+       DRM_FORMAT_C8,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB2101010,
+       DRM_FORMAT_XBGR2101010,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+       DRM_FORMAT_Y210,
+       DRM_FORMAT_Y212,
+       DRM_FORMAT_Y216,
+       DRM_FORMAT_XVYU2101010,
+       DRM_FORMAT_XVYU12_16161616,
+       DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_hdr_plane_formats[] = {
+       DRM_FORMAT_C8,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB2101010,
+       DRM_FORMAT_XBGR2101010,
+       DRM_FORMAT_XRGB16161616F,
+       DRM_FORMAT_XBGR16161616F,
+       DRM_FORMAT_ARGB16161616F,
+       DRM_FORMAT_ABGR16161616F,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+       DRM_FORMAT_Y210,
+       DRM_FORMAT_Y212,
+       DRM_FORMAT_Y216,
+       DRM_FORMAT_XVYU2101010,
+       DRM_FORMAT_XVYU12_16161616,
+       DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 skl_planar_formats[] = {
+       DRM_FORMAT_C8,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB2101010,
+       DRM_FORMAT_XBGR2101010,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+       DRM_FORMAT_NV12,
+};
+
+static const u32 glk_planar_formats[] = {
+       DRM_FORMAT_C8,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB2101010,
+       DRM_FORMAT_XBGR2101010,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+       DRM_FORMAT_NV12,
+       DRM_FORMAT_P010,
+       DRM_FORMAT_P012,
+       DRM_FORMAT_P016,
+};
+
+static const u32 icl_planar_formats[] = {
+       DRM_FORMAT_C8,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB2101010,
+       DRM_FORMAT_XBGR2101010,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+       DRM_FORMAT_NV12,
+       DRM_FORMAT_P010,
+       DRM_FORMAT_P012,
+       DRM_FORMAT_P016,
+       DRM_FORMAT_Y210,
+       DRM_FORMAT_Y212,
+       DRM_FORMAT_Y216,
+       DRM_FORMAT_XVYU2101010,
+       DRM_FORMAT_XVYU12_16161616,
+       DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_hdr_planar_formats[] = {
+       DRM_FORMAT_C8,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB2101010,
+       DRM_FORMAT_XBGR2101010,
+       DRM_FORMAT_XRGB16161616F,
+       DRM_FORMAT_XBGR16161616F,
+       DRM_FORMAT_ARGB16161616F,
+       DRM_FORMAT_ABGR16161616F,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+       DRM_FORMAT_NV12,
+       DRM_FORMAT_P010,
+       DRM_FORMAT_P012,
+       DRM_FORMAT_P016,
+       DRM_FORMAT_Y210,
+       DRM_FORMAT_Y212,
+       DRM_FORMAT_Y216,
+       DRM_FORMAT_XVYU2101010,
+       DRM_FORMAT_XVYU12_16161616,
+       DRM_FORMAT_XVYU16161616,
+};
+
+static const u64 skl_plane_format_modifiers_noccs[] = {
+       I915_FORMAT_MOD_Yf_TILED,
+       I915_FORMAT_MOD_Y_TILED,
+       I915_FORMAT_MOD_X_TILED,
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID
+};
+
+static const u64 skl_plane_format_modifiers_ccs[] = {
+       I915_FORMAT_MOD_Yf_TILED_CCS,
+       I915_FORMAT_MOD_Y_TILED_CCS,
+       I915_FORMAT_MOD_Yf_TILED,
+       I915_FORMAT_MOD_Y_TILED,
+       I915_FORMAT_MOD_X_TILED,
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID
+};
+
+static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
+{
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
+       switch (format) {
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+               if (modifier == DRM_FORMAT_MOD_LINEAR ||
+                   modifier == I915_FORMAT_MOD_X_TILED)
+                       return true;
+               /* fall through */
+       default:
+               return false;
+       }
+}
+
+static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
+{
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
+       switch (format) {
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+               if (modifier == DRM_FORMAT_MOD_LINEAR ||
+                   modifier == I915_FORMAT_MOD_X_TILED)
+                       return true;
+               /* fall through */
+       default:
+               return false;
+       }
+}
+
+static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
+{
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
+       switch (format) {
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+               if (modifier == DRM_FORMAT_MOD_LINEAR ||
+                   modifier == I915_FORMAT_MOD_X_TILED)
+                       return true;
+               /* fall through */
+       default:
+               return false;
+       }
+}
+
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+                                          u32 format, u64 modifier)
+{
+       struct intel_plane *plane = to_intel_plane(_plane);
+
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+       case I915_FORMAT_MOD_Y_TILED:
+       case I915_FORMAT_MOD_Yf_TILED:
+               break;
+       case I915_FORMAT_MOD_Y_TILED_CCS:
+       case I915_FORMAT_MOD_Yf_TILED_CCS:
+               if (!plane->has_ccs)
+                       return false;
+               break;
+       default:
+               return false;
+       }
+
+       switch (format) {
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+               if (is_ccs_modifier(modifier))
+                       return true;
+               /* fall through */
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_P010:
+       case DRM_FORMAT_P012:
+       case DRM_FORMAT_P016:
+       case DRM_FORMAT_XVYU2101010:
+               if (modifier == I915_FORMAT_MOD_Yf_TILED)
+                       return true;
+               /* fall through */
+       case DRM_FORMAT_C8:
+       case DRM_FORMAT_XBGR16161616F:
+       case DRM_FORMAT_ABGR16161616F:
+       case DRM_FORMAT_XRGB16161616F:
+       case DRM_FORMAT_ARGB16161616F:
+       case DRM_FORMAT_Y210:
+       case DRM_FORMAT_Y212:
+       case DRM_FORMAT_Y216:
+       case DRM_FORMAT_XVYU12_16161616:
+       case DRM_FORMAT_XVYU16161616:
+               if (modifier == DRM_FORMAT_MOD_LINEAR ||
+                   modifier == I915_FORMAT_MOD_X_TILED ||
+                   modifier == I915_FORMAT_MOD_Y_TILED)
+                       return true;
+               /* fall through */
+       default:
+               return false;
+       }
+}
+
+static const struct drm_plane_funcs g4x_sprite_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = g4x_sprite_format_mod_supported,
+};
+
+static const struct drm_plane_funcs snb_sprite_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = snb_sprite_format_mod_supported,
+};
+
+static const struct drm_plane_funcs vlv_sprite_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = vlv_sprite_format_mod_supported,
+};
+
+static const struct drm_plane_funcs skl_plane_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = skl_plane_format_mod_supported,
+};
+
+static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
+                             enum pipe pipe, enum plane_id plane_id)
+{
+       if (!HAS_FBC(dev_priv))
+               return false;
+
+       return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
+}
+
+static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+                                enum pipe pipe, enum plane_id plane_id)
+{
+       if (INTEL_GEN(dev_priv) >= 11)
+               return plane_id <= PLANE_SPRITE3;
+
+       /* Display WA #0870: skl, bxt */
+       if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+               return false;
+
+       if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+               return false;
+
+       if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
+               return false;
+
+       return true;
+}
+
+static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
+                             enum pipe pipe, enum plane_id plane_id)
+{
+       if (plane_id == PLANE_CURSOR)
+               return false;
+
+       if (INTEL_GEN(dev_priv) >= 10)
+               return true;
+
+       if (IS_GEMINILAKE(dev_priv))
+               return pipe != PIPE_C;
+
+       return pipe != PIPE_C &&
+               (plane_id == PLANE_PRIMARY ||
+                plane_id == PLANE_SPRITE0);
+}
+
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+                          enum pipe pipe, enum plane_id plane_id)
+{
+       struct intel_plane *plane;
+       enum drm_plane_type plane_type;
+       unsigned int supported_rotations;
+       unsigned int possible_crtcs;
+       const u64 *modifiers;
+       const u32 *formats;
+       int num_formats;
+       int ret;
+
+       plane = intel_plane_alloc();
+       if (IS_ERR(plane))
+               return plane;
+
+       plane->pipe = pipe;
+       plane->id = plane_id;
+       plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
+
+       plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
+       if (plane->has_fbc) {
+               struct intel_fbc *fbc = &dev_priv->fbc;
+
+               fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
+       }
+
+       plane->max_stride = skl_plane_max_stride;
+       plane->update_plane = skl_update_plane;
+       plane->disable_plane = skl_disable_plane;
+       plane->get_hw_state = skl_plane_get_hw_state;
+       plane->check_plane = skl_plane_check;
+       if (icl_is_nv12_y_plane(plane_id))
+               plane->update_slave = icl_update_slave;
+
+       if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+               if (icl_is_hdr_plane(dev_priv, plane_id)) {
+                       formats = icl_hdr_planar_formats;
+                       num_formats = ARRAY_SIZE(icl_hdr_planar_formats);
+               } else if (INTEL_GEN(dev_priv) >= 11) {
+                       formats = icl_planar_formats;
+                       num_formats = ARRAY_SIZE(icl_planar_formats);
+               } else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) {
+                       formats = glk_planar_formats;
+                       num_formats = ARRAY_SIZE(glk_planar_formats);
+               } else {
+                       formats = skl_planar_formats;
+                       num_formats = ARRAY_SIZE(skl_planar_formats);
+               }
+       } else if (icl_is_hdr_plane(dev_priv, plane_id)) {
+               formats = icl_hdr_plane_formats;
+               num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
+       } else if (INTEL_GEN(dev_priv) >= 11) {
+               formats = icl_plane_formats;
+               num_formats = ARRAY_SIZE(icl_plane_formats);
+       } else {
+               formats = skl_plane_formats;
+               num_formats = ARRAY_SIZE(skl_plane_formats);
+       }
+
+       plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
+       if (plane->has_ccs)
+               modifiers = skl_plane_format_modifiers_ccs;
+       else
+               modifiers = skl_plane_format_modifiers_noccs;
+
+       if (plane_id == PLANE_PRIMARY)
+               plane_type = DRM_PLANE_TYPE_PRIMARY;
+       else
+               plane_type = DRM_PLANE_TYPE_OVERLAY;
+
+       possible_crtcs = BIT(pipe);
+
+       ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+                                      possible_crtcs, &skl_plane_funcs,
+                                      formats, num_formats, modifiers,
+                                      plane_type,
+                                      "plane %d%c", plane_id + 1,
+                                      pipe_name(pipe));
+       if (ret)
+               goto fail;
+
+       supported_rotations =
+               DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+               DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
+
+       if (INTEL_GEN(dev_priv) >= 10)
+               supported_rotations |= DRM_MODE_REFLECT_X;
+
+       drm_plane_create_rotation_property(&plane->base,
+                                          DRM_MODE_ROTATE_0,
+                                          supported_rotations);
+
+       drm_plane_create_color_properties(&plane->base,
+                                         BIT(DRM_COLOR_YCBCR_BT601) |
+                                         BIT(DRM_COLOR_YCBCR_BT709),
+                                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+                                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+                                         DRM_COLOR_YCBCR_BT709,
+                                         DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+       drm_plane_create_alpha_property(&plane->base);
+       drm_plane_create_blend_mode_property(&plane->base,
+                                            BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+                                            BIT(DRM_MODE_BLEND_PREMULTI) |
+                                            BIT(DRM_MODE_BLEND_COVERAGE));
+
+       drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+       return plane;
+
+fail:
+       intel_plane_free(plane);
+
+       return ERR_PTR(ret);
+}
+
+struct intel_plane *
+intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+                         enum pipe pipe, int sprite)
+{
+       struct intel_plane *plane;
+       const struct drm_plane_funcs *plane_funcs;
+       unsigned long possible_crtcs;
+       unsigned int supported_rotations;
+       const u64 *modifiers;
+       const u32 *formats;
+       int num_formats;
+       int ret;
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               return skl_universal_plane_create(dev_priv, pipe,
+                                                 PLANE_SPRITE0 + sprite);
+
+       plane = intel_plane_alloc();
+       if (IS_ERR(plane))
+               return plane;
+
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               plane->max_stride = i9xx_plane_max_stride;
+               plane->update_plane = vlv_update_plane;
+               plane->disable_plane = vlv_disable_plane;
+               plane->get_hw_state = vlv_plane_get_hw_state;
+               plane->check_plane = vlv_sprite_check;
+
+               formats = vlv_plane_formats;
+               num_formats = ARRAY_SIZE(vlv_plane_formats);
+               modifiers = i9xx_plane_format_modifiers;
+
+               plane_funcs = &vlv_sprite_funcs;
+       } else if (INTEL_GEN(dev_priv) >= 7) {
+               plane->max_stride = g4x_sprite_max_stride;
+               plane->update_plane = ivb_update_plane;
+               plane->disable_plane = ivb_disable_plane;
+               plane->get_hw_state = ivb_plane_get_hw_state;
+               plane->check_plane = g4x_sprite_check;
+
+               formats = snb_plane_formats;
+               num_formats = ARRAY_SIZE(snb_plane_formats);
+               modifiers = i9xx_plane_format_modifiers;
+
+               plane_funcs = &snb_sprite_funcs;
+       } else {
+               plane->max_stride = g4x_sprite_max_stride;
+               plane->update_plane = g4x_update_plane;
+               plane->disable_plane = g4x_disable_plane;
+               plane->get_hw_state = g4x_plane_get_hw_state;
+               plane->check_plane = g4x_sprite_check;
+
+               modifiers = i9xx_plane_format_modifiers;
+               if (IS_GEN(dev_priv, 6)) {
+                       formats = snb_plane_formats;
+                       num_formats = ARRAY_SIZE(snb_plane_formats);
+
+                       plane_funcs = &snb_sprite_funcs;
+               } else {
+                       formats = g4x_plane_formats;
+                       num_formats = ARRAY_SIZE(g4x_plane_formats);
+
+                       plane_funcs = &g4x_sprite_funcs;
+               }
+       }
+
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+               supported_rotations =
+                       DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+                       DRM_MODE_REFLECT_X;
+       } else {
+               supported_rotations =
+                       DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+       }
+
+       plane->pipe = pipe;
+       plane->id = PLANE_SPRITE0 + sprite;
+       plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
+
+       possible_crtcs = BIT(pipe);
+
+       ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+                                      possible_crtcs, plane_funcs,
+                                      formats, num_formats, modifiers,
+                                      DRM_PLANE_TYPE_OVERLAY,
+                                      "sprite %c", sprite_name(pipe, sprite));
+       if (ret)
+               goto fail;
+
+       drm_plane_create_rotation_property(&plane->base,
+                                          DRM_MODE_ROTATE_0,
+                                          supported_rotations);
+
+       drm_plane_create_color_properties(&plane->base,
+                                         BIT(DRM_COLOR_YCBCR_BT601) |
+                                         BIT(DRM_COLOR_YCBCR_BT709),
+                                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+                                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+                                         DRM_COLOR_YCBCR_BT709,
+                                         DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+       drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+       return plane;
+
+fail:
+       intel_plane_free(plane);
+
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
new file mode 100644 (file)
index 0000000..500f6bf
--- /dev/null
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SPRITE_H__
+#define __INTEL_SPRITE_H__
+
+#include <linux/types.h>
+
+#include "i915_drv.h"
+#include "intel_display.h"
+
+struct drm_device;
+struct drm_display_mode;
+struct drm_file;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+bool is_planar_yuv_format(u32 pixelformat);
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+                            int usecs);
+struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+                                             enum pipe pipe, int plane);
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+                                   struct drm_file *file_priv);
+void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
+int intel_plane_check_stride(const struct intel_plane_state *plane_state);
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
+int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+                          enum pipe pipe, enum plane_id plane_id);
+
+static inline bool icl_is_nv12_y_plane(enum plane_id id)
+{
+       /* Don't need to do a gen check, these planes are only available on gen11 */
+       if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
+               return true;
+
+       return false;
+}
+
+static inline u8 icl_hdr_plane_mask(void)
+{
+       return BIT(PLANE_PRIMARY) |
+               BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
+}
+
+static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
+                                   enum plane_id plane_id)
+{
+       return INTEL_GEN(dev_priv) >= 11 &&
+               icl_hdr_plane_mask() & BIT(plane_id);
+}
+
+#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
new file mode 100644 (file)
index 0000000..89ef14c
--- /dev/null
@@ -0,0 +1,808 @@
+/*
+ * Copyright © 2006-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/*
+ * This information is private to VBT parsing in intel_bios.c.
+ *
+ * Please do NOT include anywhere else.
+ */
+#ifndef _INTEL_BIOS_PRIVATE
+#error "intel_vbt_defs.h is private to intel_bios.c"
+#endif
+
+#ifndef _INTEL_VBT_DEFS_H_
+#define _INTEL_VBT_DEFS_H_
+
+#include "intel_bios.h"
+
+/**
+ * struct vbt_header - VBT Header structure
+ * @signature:         VBT signature, always starts with "$VBT"
+ * @version:           Version of this structure
+ * @header_size:       Size of this structure
+ * @vbt_size:          Size of VBT (VBT Header, BDB Header and data blocks)
+ * @vbt_checksum:      Checksum
+ * @reserved0:         Reserved
+ * @bdb_offset:                Offset of &struct bdb_header from beginning of VBT
+ * @aim_offset:                Offsets of add-in data blocks from beginning of VBT
+ */
+struct vbt_header {
+       u8 signature[20];
+       u16 version;
+       u16 header_size;
+       u16 vbt_size;
+       u8 vbt_checksum;
+       u8 reserved0;
+       u32 bdb_offset;
+       u32 aim_offset[4];
+} __packed;
+
+/**
+ * struct bdb_header - BDB Header structure
+ * @signature:         BDB signature "BIOS_DATA_BLOCK"
+ * @version:           Version of the data block definitions
+ * @header_size:       Size of this structure
+ * @bdb_size:          Size of BDB (BDB Header and data blocks)
+ */
+struct bdb_header {
+       u8 signature[16];
+       u16 version;
+       u16 header_size;
+       u16 bdb_size;
+} __packed;
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+enum bdb_block_id {
+       BDB_GENERAL_FEATURES            = 1,
+       BDB_GENERAL_DEFINITIONS         = 2,
+       BDB_OLD_TOGGLE_LIST             = 3,
+       BDB_MODE_SUPPORT_LIST           = 4,
+       BDB_GENERIC_MODE_TABLE          = 5,
+       BDB_EXT_MMIO_REGS               = 6,
+       BDB_SWF_IO                      = 7,
+       BDB_SWF_MMIO                    = 8,
+       BDB_PSR                         = 9,
+       BDB_MODE_REMOVAL_TABLE          = 10,
+       BDB_CHILD_DEVICE_TABLE          = 11,
+       BDB_DRIVER_FEATURES             = 12,
+       BDB_DRIVER_PERSISTENCE          = 13,
+       BDB_EXT_TABLE_PTRS              = 14,
+       BDB_DOT_CLOCK_OVERRIDE          = 15,
+       BDB_DISPLAY_SELECT              = 16,
+       BDB_DRIVER_ROTATION             = 18,
+       BDB_DISPLAY_REMOVE              = 19,
+       BDB_OEM_CUSTOM                  = 20,
+       BDB_EFP_LIST                    = 21, /* workarounds for VGA hsync/vsync */
+       BDB_SDVO_LVDS_OPTIONS           = 22,
+       BDB_SDVO_PANEL_DTDS             = 23,
+       BDB_SDVO_LVDS_PNP_IDS           = 24,
+       BDB_SDVO_LVDS_POWER_SEQ         = 25,
+       BDB_TV_OPTIONS                  = 26,
+       BDB_EDP                         = 27,
+       BDB_LVDS_OPTIONS                = 40,
+       BDB_LVDS_LFP_DATA_PTRS          = 41,
+       BDB_LVDS_LFP_DATA               = 42,
+       BDB_LVDS_BACKLIGHT              = 43,
+       BDB_LVDS_POWER                  = 44,
+       BDB_MIPI_CONFIG                 = 52,
+       BDB_MIPI_SEQUENCE               = 53,
+       BDB_SKIP                        = 254, /* VBIOS private block, ignore */
+};
+
+/*
+ * Block 1 - General Bit Definitions
+ */
+
+struct bdb_general_features {
+        /* bits 1 */
+       u8 panel_fitting:2;
+       u8 flexaim:1;
+       u8 msg_enable:1;
+       u8 clear_screen:3;
+       u8 color_flip:1;
+
+        /* bits 2 */
+       u8 download_ext_vbt:1;
+       u8 enable_ssc:1;
+       u8 ssc_freq:1;
+       u8 enable_lfp_on_override:1;
+       u8 disable_ssc_ddt:1;
+       u8 underscan_vga_timings:1;
+       u8 display_clock_mode:1;
+       u8 vbios_hotplug_support:1;
+
+        /* bits 3 */
+       u8 disable_smooth_vision:1;
+       u8 single_dvi:1;
+       u8 rotate_180:1;                                        /* 181 */
+       u8 fdi_rx_polarity_inverted:1;
+       u8 vbios_extended_mode:1;                               /* 160 */
+       u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1;                    /* 160 */
+       u8 panel_best_fit_timing:1;                             /* 160 */
+       u8 ignore_strap_state:1;                                /* 160 */
+
+        /* bits 4 */
+       u8 legacy_monitor_detect;
+
+        /* bits 5 */
+       u8 int_crt_support:1;
+       u8 int_tv_support:1;
+       u8 int_efp_support:1;
+       u8 dp_ssc_enable:1;     /* PCH attached eDP supports SSC */
+       u8 dp_ssc_freq:1;       /* SSC freq for PCH attached eDP */
+       u8 dp_ssc_dongle_supported:1;
+       u8 rsvd11:2; /* finish byte */
+} __packed;
+
+/*
+ * Block 2 - General Bytes Definition
+ */
+
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS      0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C       0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC       0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C   0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE       0x00
+#define DEVICE_TYPE_CRT                0x01
+#define DEVICE_TYPE_TV         0x09
+#define DEVICE_TYPE_EFP                0x12
+#define DEVICE_TYPE_LFP                0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS           0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG   0x4001
+#define DEVICE_TYPE_TV_COMPOSITE       0x0209
+#define DEVICE_TYPE_TV_MACROVISION     0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE    0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE        0x0609
+#define DEVICE_TYPE_TV_SCART           0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR    0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR        0x6052
+#define DEVICE_TYPE_EFP_DVI_I          0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL     0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP     0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR        0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX    0x6162
+#define DEVICE_TYPE_LFP_PANELLINK      0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR       0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR       0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL      0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP            0x1022
+#define DEVICE_TYPE_INT_TV             0x1009
+#define DEVICE_TYPE_HDMI               0x60D2
+#define DEVICE_TYPE_DP                 0x68C6
+#define DEVICE_TYPE_DP_DUAL_MODE       0x60D6
+#define DEVICE_TYPE_eDP                        0x78C6
+
+#define DEVICE_TYPE_CLASS_EXTENSION    (1 << 15)
+#define DEVICE_TYPE_POWER_MANAGEMENT   (1 << 14)
+#define DEVICE_TYPE_HOTPLUG_SIGNALING  (1 << 13)
+#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
+#define DEVICE_TYPE_NOT_HDMI_OUTPUT    (1 << 11)
+#define DEVICE_TYPE_MIPI_OUTPUT                (1 << 10)
+#define DEVICE_TYPE_COMPOSITE_OUTPUT   (1 << 9)
+#define DEVICE_TYPE_DUAL_CHANNEL       (1 << 8)
+#define DEVICE_TYPE_HIGH_SPEED_LINK    (1 << 6)
+#define DEVICE_TYPE_LVDS_SIGNALING     (1 << 5)
+#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
+#define DEVICE_TYPE_VIDEO_SIGNALING    (1 << 3)
+#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
+#define DEVICE_TYPE_DIGITAL_OUTPUT     (1 << 1)
+#define DEVICE_TYPE_ANALOG_OUTPUT      (1 << 0)
+
+/*
+ * Bits we care about when checking for DEVICE_TYPE_eDP. Depending on the
+ * system, the other bits may or may not be set for eDP outputs.
+ */
+#define DEVICE_TYPE_eDP_BITS \
+       (DEVICE_TYPE_INTERNAL_CONNECTOR |       \
+        DEVICE_TYPE_MIPI_OUTPUT |              \
+        DEVICE_TYPE_COMPOSITE_OUTPUT |         \
+        DEVICE_TYPE_DUAL_CHANNEL |             \
+        DEVICE_TYPE_LVDS_SIGNALING |           \
+        DEVICE_TYPE_TMDS_DVI_SIGNALING |       \
+        DEVICE_TYPE_VIDEO_SIGNALING |          \
+        DEVICE_TYPE_DISPLAYPORT_OUTPUT |       \
+        DEVICE_TYPE_ANALOG_OUTPUT)
+
+#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
+       (DEVICE_TYPE_INTERNAL_CONNECTOR |       \
+        DEVICE_TYPE_MIPI_OUTPUT |              \
+        DEVICE_TYPE_COMPOSITE_OUTPUT |         \
+        DEVICE_TYPE_LVDS_SIGNALING |           \
+        DEVICE_TYPE_TMDS_DVI_SIGNALING |       \
+        DEVICE_TYPE_VIDEO_SIGNALING |          \
+        DEVICE_TYPE_DISPLAYPORT_OUTPUT |       \
+        DEVICE_TYPE_DIGITAL_OUTPUT |           \
+        DEVICE_TYPE_ANALOG_OUTPUT)
+
+#define DEVICE_CFG_NONE                0x00
+#define DEVICE_CFG_12BIT_DVOB  0x01
+#define DEVICE_CFG_12BIT_DVOC  0x02
+#define DEVICE_CFG_24BIT_DVOBC 0x09
+#define DEVICE_CFG_24BIT_DVOCB 0x0a
+#define DEVICE_CFG_DUAL_DVOB   0x11
+#define DEVICE_CFG_DUAL_DVOC   0x12
+#define DEVICE_CFG_DUAL_DVOBC  0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC     0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB     0x1a
+
+#define DEVICE_WIRE_NONE       0x00
+#define DEVICE_WIRE_DVOB       0x01
+#define DEVICE_WIRE_DVOC       0x02
+#define DEVICE_WIRE_DVOBC      0x03
+#define DEVICE_WIRE_DVOBB      0x05
+#define DEVICE_WIRE_DVOCC      0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+/* dvo_port pre BDB 155 */
+#define DEVICE_PORT_DVOA       0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB       0x01
+#define DEVICE_PORT_DVOC       0x02
+
+/* dvo_port BDB 155+ */
+#define DVO_PORT_HDMIA         0
+#define DVO_PORT_HDMIB         1
+#define DVO_PORT_HDMIC         2
+#define DVO_PORT_HDMID         3
+#define DVO_PORT_LVDS          4
+#define DVO_PORT_TV            5
+#define DVO_PORT_CRT           6
+#define DVO_PORT_DPB           7
+#define DVO_PORT_DPC           8
+#define DVO_PORT_DPD           9
+#define DVO_PORT_DPA           10
+#define DVO_PORT_DPE           11                              /* 193 */
+#define DVO_PORT_HDMIE         12                              /* 193 */
+#define DVO_PORT_DPF           13                              /* N/A */
+#define DVO_PORT_HDMIF         14                              /* N/A */
+#define DVO_PORT_MIPIA         21                              /* 171 */
+#define DVO_PORT_MIPIB         22                              /* 171 */
+#define DVO_PORT_MIPIC         23                              /* 171 */
+#define DVO_PORT_MIPID         24                              /* 171 */
+
+#define HDMI_MAX_DATA_RATE_PLATFORM    0                       /* 204 */
+#define HDMI_MAX_DATA_RATE_297         1                       /* 204 */
+#define HDMI_MAX_DATA_RATE_165         2                       /* 204 */
+
+#define LEGACY_CHILD_DEVICE_CONFIG_SIZE                33
+
+/* DDC Bus DDI Type 155+ */
+enum vbt_gmbus_ddi {
+       DDC_BUS_DDI_B = 0x1,
+       DDC_BUS_DDI_C,
+       DDC_BUS_DDI_D,
+       DDC_BUS_DDI_F,
+       ICL_DDC_BUS_DDI_A = 0x1,
+       ICL_DDC_BUS_DDI_B,
+       ICL_DDC_BUS_PORT_1 = 0x4,
+       ICL_DDC_BUS_PORT_2,
+       ICL_DDC_BUS_PORT_3,
+       ICL_DDC_BUS_PORT_4,
+};
+
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
+#define DP_AUX_F 0x60
+
+#define VBT_DP_MAX_LINK_RATE_HBR3      0
+#define VBT_DP_MAX_LINK_RATE_HBR2      1
+#define VBT_DP_MAX_LINK_RATE_HBR       2
+#define VBT_DP_MAX_LINK_RATE_LBR       3
+
+/*
+ * The child device config, aka the display device data structure, provides a
+ * description of a port and its configuration on the platform.
+ *
+ * The child device config size has been increased, and fields have been added
+ * and their meaning has changed over time. Care must be taken when accessing
+ * basically any of the fields to ensure the correct interpretation for the BDB
+ * version in question.
+ *
+ * When we copy the child device configs to dev_priv->vbt.child_dev, we reserve
+ * space for the full structure below, and initialize the tail not actually
+ * present in VBT to zeros. Accessing those fields is fine, as long as the
+ * default zero is taken into account, again according to the BDB version.
+ *
+ * BDB versions 155 and below are considered legacy, and version 155 seems to be
+ * a baseline for some of the VBT documentation. When adding new fields, please
+ * include the BDB version when the field was added, if it's above that.
+ */
+struct child_device_config {
+       u16 handle;
+       u16 device_type; /* See DEVICE_TYPE_* above */
+
+       union {
+               u8  device_id[10]; /* ascii string */
+               struct {
+                       u8 i2c_speed;
+                       u8 dp_onboard_redriver;                 /* 158 */
+                       u8 dp_ondock_redriver;                  /* 158 */
+                       u8 hdmi_level_shifter_value:5;          /* 169 */
+                       u8 hdmi_max_data_rate:3;                /* 204 */
+                       u16 dtd_buf_ptr;                        /* 161 */
+                       u8 edidless_efp:1;                      /* 161 */
+                       u8 compression_enable:1;                /* 198 */
+                       u8 compression_method:1;                /* 198 */
+                       u8 ganged_edp:1;                        /* 202 */
+                       u8 reserved0:4;
+                       u8 compression_structure_index:4;       /* 198 */
+                       u8 reserved1:4;
+                       u8 slave_port;                          /* 202 */
+                       u8 reserved2;
+               } __packed;
+       } __packed;
+
+       u16 addin_offset;
+       u8 dvo_port; /* See DEVICE_PORT_* and DVO_PORT_* above */
+       u8 i2c_pin;
+       u8 slave_addr;
+       u8 ddc_pin;
+       u16 edid_ptr;
+       u8 dvo_cfg; /* See DEVICE_CFG_* above */
+
+       union {
+               struct {
+                       u8 dvo2_port;
+                       u8 i2c2_pin;
+                       u8 slave2_addr;
+                       u8 ddc2_pin;
+               } __packed;
+               struct {
+                       u8 efp_routed:1;                        /* 158 */
+                       u8 lane_reversal:1;                     /* 184 */
+                       u8 lspcon:1;                            /* 192 */
+                       u8 iboost:1;                            /* 196 */
+                       u8 hpd_invert:1;                        /* 196 */
+                       u8 use_vbt_vswing:1;                    /* 218 */
+                       u8 flag_reserved:2;
+                       u8 hdmi_support:1;                      /* 158 */
+                       u8 dp_support:1;                        /* 158 */
+                       u8 tmds_support:1;                      /* 158 */
+                       u8 support_reserved:5;
+                       u8 aux_channel;
+                       u8 dongle_detect;
+               } __packed;
+       } __packed;
+
+       u8 pipe_cap:2;
+       u8 sdvo_stall:1;                                        /* 158 */
+       u8 hpd_status:2;
+       u8 integrated_encoder:1;
+       u8 capabilities_reserved:2;
+       u8 dvo_wiring; /* See DEVICE_WIRE_* above */
+
+       union {
+               u8 dvo2_wiring;
+               u8 mipi_bridge_type;                            /* 171 */
+       } __packed;
+
+       u16 extended_type;
+       u8 dvo_function;
+       u8 dp_usb_type_c:1;                                     /* 195 */
+       u8 tbt:1;                                               /* 209 */
+       u8 flags2_reserved:2;                                   /* 195 */
+       u8 dp_port_trace_length:4;                              /* 209 */
+       u8 dp_gpio_index;                                       /* 195 */
+       u16 dp_gpio_pin_num;                                    /* 195 */
+       u8 dp_iboost_level:4;                                   /* 196 */
+       u8 hdmi_iboost_level:4;                                 /* 196 */
+       u8 dp_max_link_rate:2;                                  /* 216 CNL+ */
+       u8 dp_max_link_rate_reserved:6;                         /* 216 */
+} __packed;
+
+struct bdb_general_definitions {
+       /* DDC GPIO */
+       u8 crt_ddc_gmbus_pin;
+
+       /* DPMS bits */
+       u8 dpms_acpi:1;
+       u8 skip_boot_crt_detect:1;
+       u8 dpms_aim:1;
+       u8 rsvd1:5; /* finish byte */
+
+       /* boot device bits */
+       u8 boot_display[2];
+       u8 child_dev_size;
+
+       /*
+        * Device info:
+        * If TV is present, it'll be at devices[0].
+        * LVDS will be next, either devices[0] or [1], if present.
+        * On some platforms the number of device is 6. But could be as few as
+        * 4 if both TV and LVDS are missing.
+        * And the device num is related with the size of general definition
+        * block. It is obtained by using the following formula:
+        * number = (block_size - sizeof(bdb_general_definitions))/
+        *           defs->child_dev_size;
+        */
+       u8 devices[0];
+} __packed;
+
+/*
+ * Block 9 - SRD Feature Block
+ */
+
+struct psr_table {
+       /* Feature bits */
+       u8 full_link:1;
+       u8 require_aux_to_wakeup:1;
+       u8 feature_bits_rsvd:6;
+
+       /* Wait times */
+       u8 idle_frames:4;
+       u8 lines_to_wait:3;
+       u8 wait_times_rsvd:1;
+
+       /* TP wake up time in multiple of 100 */
+       u16 tp1_wakeup_time;
+       u16 tp2_tp3_wakeup_time;
+
+       /* PSR2 TP2/TP3 wakeup time for 16 panels */
+       u32 psr2_tp2_tp3_wakeup_time;
+} __packed;
+
+struct bdb_psr {
+       struct psr_table psr_table[16];
+} __packed;
+
+/*
+ * Block 12 - Driver Features Data Block
+ */
+
+#define BDB_DRIVER_FEATURE_NO_LVDS             0
+#define BDB_DRIVER_FEATURE_INT_LVDS            1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS           2
+#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS       3
+
+struct bdb_driver_features {
+       u8 boot_dev_algorithm:1;
+       u8 block_display_switch:1;
+       u8 allow_display_switch:1;
+       u8 hotplug_dvo:1;
+       u8 dual_view_zoom:1;
+       u8 int15h_hook:1;
+       u8 sprite_in_clone:1;
+       u8 primary_lfp_id:1;
+
+       u16 boot_mode_x;
+       u16 boot_mode_y;
+       u8 boot_mode_bpp;
+       u8 boot_mode_refresh;
+
+       u16 enable_lfp_primary:1;
+       u16 selective_mode_pruning:1;
+       u16 dual_frequency:1;
+       u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+       u16 nt_clone_support:1;
+       u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+       u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+       u16 cui_aspect_scaling:1;
+       u16 preserve_aspect_ratio:1;
+       u16 sdvo_device_power_down:1;
+       u16 crt_hotplug:1;
+       u16 lvds_config:2;
+       u16 tv_hotplug:1;
+       u16 hdmi_config:2;
+
+       u8 static_display:1;
+       u8 reserved2:7;
+       u16 legacy_crt_max_x;
+       u16 legacy_crt_max_y;
+       u8 legacy_crt_max_refresh;
+
+       u8 hdmi_termination;
+       u8 custom_vbt_version;
+       /* Driver features data block */
+       u16 rmpm_enabled:1;
+       u16 s2ddt_enabled:1;
+       u16 dpst_enabled:1;
+       u16 bltclt_enabled:1;
+       u16 adb_enabled:1;
+       u16 drrs_enabled:1;
+       u16 grs_enabled:1;
+       u16 gpmt_enabled:1;
+       u16 tbt_enabled:1;
+       u16 psr_enabled:1;
+       u16 ips_enabled:1;
+       u16 reserved3:4;
+       u16 pc_feature_valid:1;
+} __packed;
+
+/*
+ * Block 22 - SDVO LVDS General Options
+ */
+
+struct bdb_sdvo_lvds_options {
+       u8 panel_backlight;
+       u8 h40_set_panel_type;
+       u8 panel_type;
+       u8 ssc_clk_freq;
+       u16 als_low_trip;
+       u16 als_high_trip;
+       u8 sclalarcoeff_tab_row_num;
+       u8 sclalarcoeff_tab_row_size;
+       u8 coefficient[8];
+       u8 panel_misc_bits_1;
+       u8 panel_misc_bits_2;
+       u8 panel_misc_bits_3;
+       u8 panel_misc_bits_4;
+} __packed;
+
+/*
+ * Block 23 - SDVO LVDS Panel DTDs
+ */
+
+struct lvds_dvo_timing {
+       u16 clock;              /**< In 10khz */
+       u8 hactive_lo;
+       u8 hblank_lo;
+       u8 hblank_hi:4;
+       u8 hactive_hi:4;
+       u8 vactive_lo;
+       u8 vblank_lo;
+       u8 vblank_hi:4;
+       u8 vactive_hi:4;
+       u8 hsync_off_lo;
+       u8 hsync_pulse_width_lo;
+       u8 vsync_pulse_width_lo:4;
+       u8 vsync_off_lo:4;
+       u8 vsync_pulse_width_hi:2;
+       u8 vsync_off_hi:2;
+       u8 hsync_pulse_width_hi:2;
+       u8 hsync_off_hi:2;
+       u8 himage_lo;
+       u8 vimage_lo;
+       u8 vimage_hi:4;
+       u8 himage_hi:4;
+       u8 h_border;
+       u8 v_border;
+       u8 rsvd1:3;
+       u8 digital:2;
+       u8 vsync_positive:1;
+       u8 hsync_positive:1;
+       u8 non_interlaced:1;
+} __packed;
+
+struct bdb_sdvo_panel_dtds {
+       struct lvds_dvo_timing dtds[4];
+} __packed;
+
+/*
+ * Block 27 - eDP VBT Block
+ */
+
+#define EDP_18BPP      0
+#define EDP_24BPP      1
+#define EDP_30BPP      2
+#define EDP_RATE_1_62  0
+#define EDP_RATE_2_7   1
+#define EDP_LANE_1     0
+#define EDP_LANE_2     1
+#define EDP_LANE_4     3
+#define EDP_PREEMPHASIS_NONE   0
+#define EDP_PREEMPHASIS_3_5dB  1
+#define EDP_PREEMPHASIS_6dB    2
+#define EDP_PREEMPHASIS_9_5dB  3
+#define EDP_VSWING_0_4V                0
+#define EDP_VSWING_0_6V                1
+#define EDP_VSWING_0_8V                2
+#define EDP_VSWING_1_2V                3
+
+
+struct edp_fast_link_params {
+       u8 rate:4;
+       u8 lanes:4;
+       u8 preemphasis:4;
+       u8 vswing:4;
+} __packed;
+
+struct edp_pwm_delays {
+       u16 pwm_on_to_backlight_enable;
+       u16 backlight_disable_to_pwm_off;
+} __packed;
+
+struct edp_full_link_params {
+       u8 preemphasis:4;
+       u8 vswing:4;
+} __packed;
+
+struct bdb_edp {
+       struct edp_power_seq power_seqs[16];
+       u32 color_depth;
+       struct edp_fast_link_params fast_link_params[16];
+       u32 sdrrs_msa_timing_delay;
+
+       /* ith bit indicates enabled/disabled for (i+1)th panel */
+       u16 edp_s3d_feature;                                    /* 162 */
+       u16 edp_t3_optimization;                                /* 165 */
+       u64 edp_vswing_preemph;                                 /* 173 */
+       u16 fast_link_training;                                 /* 182 */
+       u16 dpcd_600h_write_required;                           /* 185 */
+       struct edp_pwm_delays pwm_delays[16];                   /* 186 */
+       u16 full_link_params_provided;                          /* 199 */
+       struct edp_full_link_params full_link_params[16];       /* 199 */
+} __packed;
+
+/*
+ * Block 40 - LFP Data Block
+ */
+
+/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
+#define MODE_MASK              0x3
+
+struct bdb_lvds_options {
+       u8 panel_type;
+       u8 panel_type2;                                         /* 212 */
+       /* LVDS capabilities, stored in a dword */
+       u8 pfit_mode:2;
+       u8 pfit_text_mode_enhanced:1;
+       u8 pfit_gfx_mode_enhanced:1;
+       u8 pfit_ratio_auto:1;
+       u8 pixel_dither:1;
+       u8 lvds_edid:1;
+       u8 rsvd2:1;
+       u8 rsvd4;
+       /* LVDS Panel channel bits stored here */
+       u32 lvds_panel_channel_bits;
+       /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
+       u16 ssc_bits;
+       u16 ssc_freq;
+       u16 ssc_ddt;
+       /* Panel color depth defined here */
+       u16 panel_color_depth;
+       /* LVDS panel type bits stored here */
+       u32 dps_panel_type_bits;
+       /* LVDS backlight control type bits stored here */
+       u32 blt_control_type_bits;
+
+       u16 lcdvcc_s0_enable;                                   /* 200 */
+       u32 rotation;                                           /* 228 */
+} __packed;
+
+/*
+ * Block 41 - LFP Data Table Pointers
+ */
+
+/* LFP pointer table contains entries to the struct below */
+struct lvds_lfp_data_ptr {
+       u16 fp_timing_offset; /* offsets are from start of bdb */
+       u8 fp_table_size;
+       u16 dvo_timing_offset;
+       u8 dvo_table_size;
+       u16 panel_pnp_id_offset;
+       u8 pnp_table_size;
+} __packed;
+
+struct bdb_lvds_lfp_data_ptrs {
+       u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+       struct lvds_lfp_data_ptr ptr[16];
+} __packed;
+
+/*
+ * Block 42 - LFP Data Tables
+ */
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+       u16 x_res;
+       u16 y_res;
+       u32 lvds_reg;
+       u32 lvds_reg_val;
+       u32 pp_on_reg;
+       u32 pp_on_reg_val;
+       u32 pp_off_reg;
+       u32 pp_off_reg_val;
+       u32 pp_cycle_reg;
+       u32 pp_cycle_reg_val;
+       u32 pfit_reg;
+       u32 pfit_reg_val;
+       u16 terminator;
+} __packed;
+
+struct lvds_pnp_id {
+       u16 mfg_name;
+       u16 product_code;
+       u32 serial;
+       u8 mfg_week;
+       u8 mfg_year;
+} __packed;
+
+struct lvds_lfp_data_entry {
+       struct lvds_fp_timing fp_timing;
+       struct lvds_dvo_timing dvo_timing;
+       struct lvds_pnp_id pnp_id;
+} __packed;
+
+struct bdb_lvds_lfp_data {
+       struct lvds_lfp_data_entry data[16];
+} __packed;
+
+/*
+ * Block 43 - LFP Backlight Control Data Block
+ */
+
+#define BDB_BACKLIGHT_TYPE_NONE        0
+#define BDB_BACKLIGHT_TYPE_PWM 2
+
+struct lfp_backlight_data_entry {
+       u8 type:2;
+       u8 active_low_pwm:1;
+       u8 obsolete1:5;
+       u16 pwm_freq_hz;
+       u8 min_brightness;
+       u8 obsolete2;
+       u8 obsolete3;
+} __packed;
+
+struct lfp_backlight_control_method {
+       u8 type:4;
+       u8 controller:4;
+} __packed;
+
+struct bdb_lfp_backlight_data {
+       u8 entry_size;
+       struct lfp_backlight_data_entry data[16];
+       u8 level[16];
+       struct lfp_backlight_control_method backlight_control[16];
+} __packed;
+
+/*
+ * Block 52 - MIPI Configuration Block
+ */
+
+#define MAX_MIPI_CONFIGURATIONS        6
+
+struct bdb_mipi_config {
+       struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
+       struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
+} __packed;
+
+/*
+ * Block 53 - MIPI Sequence Block
+ */
+
+struct bdb_mipi_sequence {
+       u8 version;
+       u8 data[0]; /* up to 6 variable length blocks */
+} __packed;
+
+#endif /* _INTEL_VBT_DEFS_H_ */
index 537aa2337cc8c969b19c5511115c29a769807e1f..9018e12b536b2d50c8b270588c08c54ca5f212ce 100644 (file)
@@ -4,9 +4,10 @@
  * Copyright © 2016 Intel Corporation
  */
 
+#include "display/intel_frontbuffer.h"
+
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
-#include "intel_frontbuffer.h"
 
 static DEFINE_SPINLOCK(clflush_lock);
 
index bd180ef46aeb2d34bcbdb32f4f243a4dda0c586f..2e3ce2a6965395ad2eceff5b981c20a4059badce 100644 (file)
@@ -4,13 +4,14 @@
  * Copyright © 2014-2016 Intel Corporation
  */
 
+#include "display/intel_frontbuffer.h"
+
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
 #include "i915_gem_gtt.h"
 #include "i915_gem_ioctls.h"
 #include "i915_gem_object.h"
 #include "i915_vma.h"
-#include "intel_frontbuffer.h"
 
 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
 {
index 528eea44dccf58c60703bc9449fd9312941b2d61..5fae0e50aad06881a53bc70c708ceccb7cbfe7d7 100644 (file)
@@ -12,6 +12,8 @@
 #include <drm/drm_syncobj.h>
 #include <drm/i915_drm.h>
 
+#include "display/intel_frontbuffer.h"
+
 #include "gem/i915_gem_ioctls.h"
 #include "gt/intel_context.h"
 #include "gt/intel_gt_pm.h"
@@ -21,7 +23,6 @@
 #include "i915_gem_context.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
-#include "intel_frontbuffer.h"
 
 enum {
        FORCE_CPU_RELOC = 1,
index a4047a585c8ba7c6e455c530303dcfc625a00515..272ce30ce1d339465fd7608b38f87a9adcabaff6 100644 (file)
  *
  */
 
+#include "display/intel_frontbuffer.h"
+
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
 #include "i915_gem_context.h"
 #include "i915_gem_object.h"
 #include "i915_globals.h"
-#include "intel_frontbuffer.h"
 
 static struct i915_global_object {
        struct i915_global base;
index 84c670bdb081369b048f7b82e9b1c47dc629b1ee..4c478b38e4209aab66e7b3078431ac785602a659 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/sched/mm.h>
 #include <linux/stop_machine.h>
 
+#include "display/intel_overlay.h"
+
 #include "gem/i915_gem_context.h"
 
 #include "i915_drv.h"
@@ -17,7 +19,6 @@
 #include "intel_reset.h"
 
 #include "intel_guc.h"
-#include "intel_overlay.h"
 
 #define RESET_MAX_RETRIES 3
 
index 276db53f1bf113e1a3b5c75409bd52ff52b8b041..867e7629025be440cc903bd9d6b63a84608bac4b 100644 (file)
@@ -30,7 +30,7 @@
  * not do like this.
  */
 #define _INTEL_BIOS_PRIVATE
-#include "intel_vbt_defs.h"
+#include "display/intel_vbt_defs.h"
 
 #define OPREGION_SIGNATURE "IntelGraphicsMem"
 #define MBOX_VBT      (1<<3)
index 7bee60c40394e101770fe68dbbf288163740a626..2bddb3dd2bf5e7182615938ecbcfabbf45063956 100644 (file)
 #include <drm/drm_fourcc.h>
 
 #include "display/intel_dp.h"
+#include "display/intel_fbc.h"
+#include "display/intel_hdcp.h"
 #include "display/intel_hdmi.h"
+#include "display/intel_psr.h"
 
 #include "gem/i915_gem_context.h"
 #include "gt/intel_reset.h"
 #include "i915_irq.h"
 #include "intel_csr.h"
 #include "intel_drv.h"
-#include "intel_fbc.h"
 #include "intel_guc_submission.h"
-#include "intel_hdcp.h"
 #include "intel_pm.h"
-#include "intel_psr.h"
 #include "intel_sideband.h"
 
 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
index a9a26fe65266c866d32f645ae6be57a8ca45e157..535b9be4fc5887acba97e4ba8df6afb56359f780 100644 (file)
 #include <drm/drm_probe_helper.h>
 #include <drm/i915_drm.h>
 
+#include "display/intel_acpi.h"
+#include "display/intel_audio.h"
+#include "display/intel_bw.h"
+#include "display/intel_cdclk.h"
 #include "display/intel_dp.h"
+#include "display/intel_fbdev.h"
 #include "display/intel_gmbus.h"
+#include "display/intel_hotplug.h"
+#include "display/intel_overlay.h"
+#include "display/intel_pipe_crc.h"
+#include "display/intel_sprite.h"
 
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_ioctls.h"
 #include "i915_query.h"
 #include "i915_trace.h"
 #include "i915_vgpu.h"
-#include "intel_acpi.h"
-#include "intel_audio.h"
-#include "intel_bw.h"
-#include "intel_cdclk.h"
 #include "intel_csr.h"
 #include "intel_drv.h"
-#include "intel_fbdev.h"
-#include "intel_hotplug.h"
-#include "intel_overlay.h"
-#include "intel_pipe_crc.h"
 #include "intel_pm.h"
-#include "intel_sprite.h"
 #include "intel_uc.h"
 
 static struct drm_driver driver;
index 9dce1c71bb9d1638d4eaaf3ac649856b18f250bb..7a9c2392cc7c6fbb64c13dcd93b876a8b7f4d217 100644 (file)
 #include "i915_reg.h"
 #include "i915_utils.h"
 
+#include "display/intel_bios.h"
+#include "display/intel_display.h"
+#include "display/intel_display_power.h"
+#include "display/intel_dpll_mgr.h"
+#include "display/intel_frontbuffer.h"
+#include "display/intel_opregion.h"
+
 #include "gt/intel_lrc.h"
 #include "gt/intel_engine.h"
 #include "gt/intel_workarounds.h"
 
-#include "intel_bios.h"
 #include "intel_device_info.h"
-#include "intel_display.h"
-#include "intel_display_power.h"
-#include "intel_dpll_mgr.h"
-#include "intel_frontbuffer.h"
-#include "intel_opregion.h"
 #include "intel_runtime_pm.h"
 #include "intel_uc.h"
 #include "intel_uncore.h"
index 335efeaad4f168c63551a74f35471363299bf3d6..190ad54fb072dedf5b44cdd0fd18f0fbe006d8b0 100644 (file)
@@ -38,6 +38,9 @@
 #include <linux/dma-buf.h>
 #include <linux/mman.h>
 
+#include "display/intel_display.h"
+#include "display/intel_frontbuffer.h"
+
 #include "gem/i915_gem_clflush.h"
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_ioctls.h"
@@ -54,9 +57,7 @@
 #include "i915_trace.h"
 #include "i915_vgpu.h"
 
-#include "intel_display.h"
 #include "intel_drv.h"
-#include "intel_frontbuffer.h"
 #include "intel_pm.h"
 
 static int
index 278de04a96aaa3a6061114aa0c404900c439503d..0392a4c4bb9b419ce8931a522d6a27e234640650 100644 (file)
 
 #include <drm/i915_drm.h>
 
+#include "display/intel_frontbuffer.h"
+
 #include "i915_drv.h"
 #include "i915_scatterlist.h"
 #include "i915_trace.h"
 #include "i915_vgpu.h"
 #include "intel_drv.h"
-#include "intel_frontbuffer.h"
 
 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
 
index f411e3244208eb83c54b35713423e4ef59bbbb20..b7e9fddef270a27811d352368789e4d79cdcbc56 100644 (file)
 
 #include <drm/drm_print.h>
 
+#include "display/intel_atomic.h"
+#include "display/intel_overlay.h"
+
 #include "gem/i915_gem_context.h"
 
 #include "i915_drv.h"
 #include "i915_gpu_error.h"
 #include "i915_scatterlist.h"
-#include "intel_atomic.h"
 #include "intel_csr.h"
-#include "intel_overlay.h"
 
 static inline const struct intel_engine_cs *
 engine_lookup(const struct drm_i915_private *i915, unsigned int id)
index 6faa6d6c60b8a0be00ff779b5c78e4e011a37a15..2aeb0431c432ece6d34e053d95ee6e21fa98abd0 100644 (file)
 #include <drm/drm_irq.h>
 #include <drm/i915_drm.h>
 
+#include "display/intel_fifo_underrun.h"
+#include "display/intel_hotplug.h"
+#include "display/intel_lpe_audio.h"
+#include "display/intel_psr.h"
+
 #include "i915_drv.h"
 #include "i915_irq.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
-#include "intel_fifo_underrun.h"
-#include "intel_hotplug.h"
-#include "intel_lpe_audio.h"
 #include "intel_pm.h"
-#include "intel_psr.h"
 
 /**
  * DOC: interrupt handling
index ed6b1f6402c40e3dfe3f7d4881c9fcf3895cadc9..6c9f46fc3e1222b4568a31eb01a746100054e19d 100644 (file)
 
 #include <drm/drm_drv.h>
 
+#include "display/intel_fbdev.h"
+
 #include "i915_drv.h"
 #include "i915_globals.h"
 #include "i915_selftest.h"
-#include "intel_fbdev.h"
 
 #define PLATFORM(x) .platform = (x)
 #define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
index 1e38a60a8ec7f48c495d17860091549c80d08645..a08d7d16621b794fda4dca89cc9cc86325b08398 100644 (file)
 
 #include <drm/i915_drm.h>
 
+#include "display/intel_fbc.h"
 #include "display/intel_gmbus.h"
 
 #include "i915_reg.h"
 #include "intel_drv.h"
-#include "intel_fbc.h"
 
 static void i915_save_display(struct drm_i915_private *dev_priv)
 {
index b295c53085ee405a71785f062869abbc4db28f3f..5fc0fda32e2a4994920c5df00d1841ce1c879d06 100644 (file)
  *
  */
 
-#include "gt/intel_engine.h"
+#include <drm/drm_gem.h>
 
-#include "i915_vma.h"
+#include "display/intel_frontbuffer.h"
+
+#include "gt/intel_engine.h"
 
 #include "i915_drv.h"
 #include "i915_globals.h"
-#include "intel_frontbuffer.h"
-
-#include <drm/drm_gem.h>
+#include "i915_vma.h"
 
 static struct i915_global_vma {
        struct i915_global base;
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
deleted file mode 100644 (file)
index 3456d33..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel ACPI functions
- *
- * _DSM related code stolen from nouveau_acpi.c.
- */
-
-#include <linux/pci.h>
-#include <linux/acpi.h>
-
-#include "i915_drv.h"
-#include "intel_acpi.h"
-
-#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
-#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
-
-static const guid_t intel_dsm_guid =
-       GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
-                 0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
-
-static char *intel_dsm_port_name(u8 id)
-{
-       switch (id) {
-       case 0:
-               return "Reserved";
-       case 1:
-               return "Analog VGA";
-       case 2:
-               return "LVDS";
-       case 3:
-               return "Reserved";
-       case 4:
-               return "HDMI/DVI_B";
-       case 5:
-               return "HDMI/DVI_C";
-       case 6:
-               return "HDMI/DVI_D";
-       case 7:
-               return "DisplayPort_A";
-       case 8:
-               return "DisplayPort_B";
-       case 9:
-               return "DisplayPort_C";
-       case 0xa:
-               return "DisplayPort_D";
-       case 0xb:
-       case 0xc:
-       case 0xd:
-               return "Reserved";
-       case 0xe:
-               return "WiDi";
-       default:
-               return "bad type";
-       }
-}
-
-static char *intel_dsm_mux_type(u8 type)
-{
-       switch (type) {
-       case 0:
-               return "unknown";
-       case 1:
-               return "No MUX, iGPU only";
-       case 2:
-               return "No MUX, dGPU only";
-       case 3:
-               return "MUXed between iGPU and dGPU";
-       default:
-               return "bad type";
-       }
-}
-
-static void intel_dsm_platform_mux_info(acpi_handle dhandle)
-{
-       int i;
-       union acpi_object *pkg, *connector_count;
-
-       pkg = acpi_evaluate_dsm_typed(dhandle, &intel_dsm_guid,
-                       INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
-                       NULL, ACPI_TYPE_PACKAGE);
-       if (!pkg) {
-               DRM_DEBUG_DRIVER("failed to evaluate _DSM\n");
-               return;
-       }
-
-       connector_count = &pkg->package.elements[0];
-       DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
-                 (unsigned long long)connector_count->integer.value);
-       for (i = 1; i < pkg->package.count; i++) {
-               union acpi_object *obj = &pkg->package.elements[i];
-               union acpi_object *connector_id = &obj->package.elements[0];
-               union acpi_object *info = &obj->package.elements[1];
-               DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
-                         (unsigned long long)connector_id->integer.value);
-               DRM_DEBUG_DRIVER("  port id: %s\n",
-                      intel_dsm_port_name(info->buffer.pointer[0]));
-               DRM_DEBUG_DRIVER("  display mux info: %s\n",
-                      intel_dsm_mux_type(info->buffer.pointer[1]));
-               DRM_DEBUG_DRIVER("  aux/dc mux info: %s\n",
-                      intel_dsm_mux_type(info->buffer.pointer[2]));
-               DRM_DEBUG_DRIVER("  hpd mux info: %s\n",
-                      intel_dsm_mux_type(info->buffer.pointer[3]));
-       }
-
-       ACPI_FREE(pkg);
-}
-
-static acpi_handle intel_dsm_pci_probe(struct pci_dev *pdev)
-{
-       acpi_handle dhandle;
-
-       dhandle = ACPI_HANDLE(&pdev->dev);
-       if (!dhandle)
-               return NULL;
-
-       if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,
-                           1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
-               DRM_DEBUG_KMS("no _DSM method for intel device\n");
-               return NULL;
-       }
-
-       intel_dsm_platform_mux_info(dhandle);
-
-       return dhandle;
-}
-
-static bool intel_dsm_detect(void)
-{
-       acpi_handle dhandle = NULL;
-       char acpi_method_name[255] = { 0 };
-       struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
-       struct pci_dev *pdev = NULL;
-       int vga_count = 0;
-
-       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
-               vga_count++;
-               dhandle = intel_dsm_pci_probe(pdev) ?: dhandle;
-       }
-
-       if (vga_count == 2 && dhandle) {
-               acpi_get_name(dhandle, ACPI_FULL_PATHNAME, &buffer);
-               DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n",
-                                acpi_method_name);
-               return true;
-       }
-
-       return false;
-}
-
-void intel_register_dsm_handler(void)
-{
-       if (!intel_dsm_detect())
-               return;
-}
-
-void intel_unregister_dsm_handler(void)
-{
-}
diff --git a/drivers/gpu/drm/i915/intel_acpi.h b/drivers/gpu/drm/i915/intel_acpi.h
deleted file mode 100644 (file)
index 1c576b3..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_ACPI_H__
-#define __INTEL_ACPI_H__
-
-#ifdef CONFIG_ACPI
-void intel_register_dsm_handler(void);
-void intel_unregister_dsm_handler(void);
-#else
-static inline void intel_register_dsm_handler(void) { return; }
-static inline void intel_unregister_dsm_handler(void) { return; }
-#endif /* CONFIG_ACPI */
-
-#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
deleted file mode 100644 (file)
index 6b985e8..0000000
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- * Copyright © 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/**
- * DOC: atomic modeset support
- *
- * The functions here implement the state management and hardware programming
- * dispatch required by the atomic modeset infrastructure.
- * See intel_atomic_plane.c for the plane-specific atomic functionality.
- */
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
-
-#include "intel_atomic.h"
-#include "intel_drv.h"
-#include "intel_hdcp.h"
-#include "intel_sprite.h"
-
-/**
- * intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
- * @connector: Connector to get the property for.
- * @state: Connector state to retrieve the property from.
- * @property: Property to retrieve.
- * @val: Return value for the property.
- *
- * Returns the atomic property value for a digital connector.
- */
-int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
-                                               const struct drm_connector_state *state,
-                                               struct drm_property *property,
-                                               u64 *val)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_digital_connector_state *intel_conn_state =
-               to_intel_digital_connector_state(state);
-
-       if (property == dev_priv->force_audio_property)
-               *val = intel_conn_state->force_audio;
-       else if (property == dev_priv->broadcast_rgb_property)
-               *val = intel_conn_state->broadcast_rgb;
-       else {
-               DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
-                                property->base.id, property->name);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-/**
- * intel_digital_connector_atomic_set_property - hook for connector->atomic_set_property.
- * @connector: Connector to set the property for.
- * @state: Connector state to set the property on.
- * @property: Property to set.
- * @val: New value for the property.
- *
- * Sets the atomic property value for a digital connector.
- */
-int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
-                                               struct drm_connector_state *state,
-                                               struct drm_property *property,
-                                               u64 val)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_digital_connector_state *intel_conn_state =
-               to_intel_digital_connector_state(state);
-
-       if (property == dev_priv->force_audio_property) {
-               intel_conn_state->force_audio = val;
-               return 0;
-       }
-
-       if (property == dev_priv->broadcast_rgb_property) {
-               intel_conn_state->broadcast_rgb = val;
-               return 0;
-       }
-
-       DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
-                        property->base.id, property->name);
-       return -EINVAL;
-}
-
-static bool blob_equal(const struct drm_property_blob *a,
-                      const struct drm_property_blob *b)
-{
-       if (a && b)
-               return a->length == b->length &&
-                       !memcmp(a->data, b->data, a->length);
-
-       return !a == !b;
-}
-
-int intel_digital_connector_atomic_check(struct drm_connector *conn,
-                                        struct drm_connector_state *new_state)
-{
-       struct intel_digital_connector_state *new_conn_state =
-               to_intel_digital_connector_state(new_state);
-       struct drm_connector_state *old_state =
-               drm_atomic_get_old_connector_state(new_state->state, conn);
-       struct intel_digital_connector_state *old_conn_state =
-               to_intel_digital_connector_state(old_state);
-       struct drm_crtc_state *crtc_state;
-
-       intel_hdcp_atomic_check(conn, old_state, new_state);
-
-       if (!new_state->crtc)
-               return 0;
-
-       crtc_state = drm_atomic_get_new_crtc_state(new_state->state, new_state->crtc);
-
-       /*
-        * These properties are handled by fastset, and might not end
-        * up in a modeset.
-        */
-       if (new_conn_state->force_audio != old_conn_state->force_audio ||
-           new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
-           new_conn_state->base.colorspace != old_conn_state->base.colorspace ||
-           new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
-           new_conn_state->base.content_type != old_conn_state->base.content_type ||
-           new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode ||
-           !blob_equal(new_conn_state->base.hdr_output_metadata,
-                       old_conn_state->base.hdr_output_metadata))
-               crtc_state->mode_changed = true;
-
-       return 0;
-}
-
-/**
- * intel_digital_connector_duplicate_state - duplicate connector state
- * @connector: digital connector
- *
- * Allocates and returns a copy of the connector state (both common and
- * digital connector specific) for the specified connector.
- *
- * Returns: The newly allocated connector state, or NULL on failure.
- */
-struct drm_connector_state *
-intel_digital_connector_duplicate_state(struct drm_connector *connector)
-{
-       struct intel_digital_connector_state *state;
-
-       state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL);
-       if (!state)
-               return NULL;
-
-       __drm_atomic_helper_connector_duplicate_state(connector, &state->base);
-       return &state->base;
-}
-
-/**
- * intel_crtc_duplicate_state - duplicate crtc state
- * @crtc: drm crtc
- *
- * Allocates and returns a copy of the crtc state (both common and
- * Intel-specific) for the specified crtc.
- *
- * Returns: The newly allocated crtc state, or NULL on failure.
- */
-struct drm_crtc_state *
-intel_crtc_duplicate_state(struct drm_crtc *crtc)
-{
-       struct intel_crtc_state *crtc_state;
-
-       crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
-       if (!crtc_state)
-               return NULL;
-
-       __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
-
-       crtc_state->update_pipe = false;
-       crtc_state->disable_lp_wm = false;
-       crtc_state->disable_cxsr = false;
-       crtc_state->update_wm_pre = false;
-       crtc_state->update_wm_post = false;
-       crtc_state->fb_changed = false;
-       crtc_state->fifo_changed = false;
-       crtc_state->wm.need_postvbl_update = false;
-       crtc_state->fb_bits = 0;
-       crtc_state->update_planes = 0;
-
-       return &crtc_state->base;
-}
-
-/**
- * intel_crtc_destroy_state - destroy crtc state
- * @crtc: drm crtc
- * @state: the state to destroy
- *
- * Destroys the crtc state (both common and Intel-specific) for the
- * specified crtc.
- */
-void
-intel_crtc_destroy_state(struct drm_crtc *crtc,
-                        struct drm_crtc_state *state)
-{
-       drm_atomic_helper_crtc_destroy_state(crtc, state);
-}
-
-static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
-                                     int num_scalers_need, struct intel_crtc *intel_crtc,
-                                     const char *name, int idx,
-                                     struct intel_plane_state *plane_state,
-                                     int *scaler_id)
-{
-       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
-       int j;
-       u32 mode;
-
-       if (*scaler_id < 0) {
-               /* find a free scaler */
-               for (j = 0; j < intel_crtc->num_scalers; j++) {
-                       if (scaler_state->scalers[j].in_use)
-                               continue;
-
-                       *scaler_id = j;
-                       scaler_state->scalers[*scaler_id].in_use = 1;
-                       break;
-               }
-       }
-
-       if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
-               return;
-
-       /* set scaler mode */
-       if (plane_state && plane_state->base.fb &&
-           plane_state->base.fb->format->is_yuv &&
-           plane_state->base.fb->format->num_planes > 1) {
-               struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-               if (IS_GEN(dev_priv, 9) &&
-                   !IS_GEMINILAKE(dev_priv)) {
-                       mode = SKL_PS_SCALER_MODE_NV12;
-               } else if (icl_is_hdr_plane(dev_priv, plane->id)) {
-                       /*
-                        * On gen11+'s HDR planes we only use the scaler for
-                        * scaling. They have a dedicated chroma upsampler, so
-                        * we don't need the scaler to upsample the UV plane.
-                        */
-                       mode = PS_SCALER_MODE_NORMAL;
-               } else {
-                       mode = PS_SCALER_MODE_PLANAR;
-
-                       if (plane_state->linked_plane)
-                               mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
-               }
-       } else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
-               mode = PS_SCALER_MODE_NORMAL;
-       } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
-               /*
-                * when only 1 scaler is in use on a pipe with 2 scalers
-                * scaler 0 operates in high quality (HQ) mode.
-                * In this case use scaler 0 to take advantage of HQ mode
-                */
-               scaler_state->scalers[*scaler_id].in_use = 0;
-               *scaler_id = 0;
-               scaler_state->scalers[0].in_use = 1;
-               mode = SKL_PS_SCALER_MODE_HQ;
-       } else {
-               mode = SKL_PS_SCALER_MODE_DYN;
-       }
-
-       DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
-                     intel_crtc->pipe, *scaler_id, name, idx);
-       scaler_state->scalers[*scaler_id].mode = mode;
-}
-
-/**
- * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
- * @dev_priv: i915 device
- * @intel_crtc: intel crtc
- * @crtc_state: incoming crtc_state to validate and setup scalers
- *
- * This function sets up scalers based on staged scaling requests for
- * a @crtc and its planes. It is called from crtc level check path. If request
- * is a supportable request, it attaches scalers to requested planes and crtc.
- *
- * This function takes into account the current scaler(s) in use by any planes
- * not being part of this atomic state
- *
- *  Returns:
- *         0 - scalers were setup succesfully
- *         error code - otherwise
- */
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
-                              struct intel_crtc *intel_crtc,
-                              struct intel_crtc_state *crtc_state)
-{
-       struct drm_plane *plane = NULL;
-       struct intel_plane *intel_plane;
-       struct intel_plane_state *plane_state = NULL;
-       struct intel_crtc_scaler_state *scaler_state =
-               &crtc_state->scaler_state;
-       struct drm_atomic_state *drm_state = crtc_state->base.state;
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
-       int num_scalers_need;
-       int i;
-
-       num_scalers_need = hweight32(scaler_state->scaler_users);
-
-       /*
-        * High level flow:
-        * - staged scaler requests are already in scaler_state->scaler_users
-        * - check whether staged scaling requests can be supported
-        * - add planes using scalers that aren't in current transaction
-        * - assign scalers to requested users
-        * - as part of plane commit, scalers will be committed
-        *   (i.e., either attached or detached) to respective planes in hw
-        * - as part of crtc_commit, scaler will be either attached or detached
-        *   to crtc in hw
-        */
-
-       /* fail if required scalers > available scalers */
-       if (num_scalers_need > intel_crtc->num_scalers){
-               DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
-                       num_scalers_need, intel_crtc->num_scalers);
-               return -EINVAL;
-       }
-
-       /* walkthrough scaler_users bits and start assigning scalers */
-       for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
-               int *scaler_id;
-               const char *name;
-               int idx;
-
-               /* skip if scaler not required */
-               if (!(scaler_state->scaler_users & (1 << i)))
-                       continue;
-
-               if (i == SKL_CRTC_INDEX) {
-                       name = "CRTC";
-                       idx = intel_crtc->base.base.id;
-
-                       /* panel fitter case: assign as a crtc scaler */
-                       scaler_id = &scaler_state->scaler_id;
-               } else {
-                       name = "PLANE";
-
-                       /* plane scaler case: assign as a plane scaler */
-                       /* find the plane that set the bit as scaler_user */
-                       plane = drm_state->planes[i].ptr;
-
-                       /*
-                        * to enable/disable hq mode, add planes that are using scaler
-                        * into this transaction
-                        */
-                       if (!plane) {
-                               struct drm_plane_state *state;
-                               plane = drm_plane_from_index(&dev_priv->drm, i);
-                               state = drm_atomic_get_plane_state(drm_state, plane);
-                               if (IS_ERR(state)) {
-                                       DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
-                                               plane->base.id);
-                                       return PTR_ERR(state);
-                               }
-
-                               /*
-                                * the plane is added after plane checks are run,
-                                * but since this plane is unchanged just do the
-                                * minimum required validation.
-                                */
-                               crtc_state->base.planes_changed = true;
-                       }
-
-                       intel_plane = to_intel_plane(plane);
-                       idx = plane->base.id;
-
-                       /* plane on different crtc cannot be a scaler user of this crtc */
-                       if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
-                               continue;
-
-                       plane_state = intel_atomic_get_new_plane_state(intel_state,
-                                                                      intel_plane);
-                       scaler_id = &plane_state->scaler_id;
-               }
-
-               intel_atomic_setup_scaler(scaler_state, num_scalers_need,
-                                         intel_crtc, name, idx,
-                                         plane_state, scaler_id);
-       }
-
-       return 0;
-}
-
-struct drm_atomic_state *
-intel_atomic_state_alloc(struct drm_device *dev)
-{
-       struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
-
-       if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
-               kfree(state);
-               return NULL;
-       }
-
-       return &state->base;
-}
-
-void intel_atomic_state_clear(struct drm_atomic_state *s)
-{
-       struct intel_atomic_state *state = to_intel_atomic_state(s);
-       drm_atomic_state_default_clear(&state->base);
-       state->dpll_set = state->modeset = false;
-}
-
-struct intel_crtc_state *
-intel_atomic_get_crtc_state(struct drm_atomic_state *state,
-                           struct intel_crtc *crtc)
-{
-       struct drm_crtc_state *crtc_state;
-       crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
-       if (IS_ERR(crtc_state))
-               return ERR_CAST(crtc_state);
-
-       return to_intel_crtc_state(crtc_state);
-}
diff --git a/drivers/gpu/drm/i915/intel_atomic.h b/drivers/gpu/drm/i915/intel_atomic.h
deleted file mode 100644 (file)
index 1c8507d..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_ATOMIC_H__
-#define __INTEL_ATOMIC_H__
-
-#include <linux/types.h>
-
-struct drm_atomic_state;
-struct drm_connector;
-struct drm_connector_state;
-struct drm_crtc;
-struct drm_crtc_state;
-struct drm_device;
-struct drm_i915_private;
-struct drm_property;
-struct intel_crtc;
-struct intel_crtc_state;
-
-int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
-                                               const struct drm_connector_state *state,
-                                               struct drm_property *property,
-                                               u64 *val);
-int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
-                                               struct drm_connector_state *state,
-                                               struct drm_property *property,
-                                               u64 val);
-int intel_digital_connector_atomic_check(struct drm_connector *conn,
-                                        struct drm_connector_state *new_state);
-struct drm_connector_state *
-intel_digital_connector_duplicate_state(struct drm_connector *connector);
-
-struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
-void intel_crtc_destroy_state(struct drm_crtc *crtc,
-                              struct drm_crtc_state *state);
-struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
-void intel_atomic_state_clear(struct drm_atomic_state *state);
-
-struct intel_crtc_state *
-intel_atomic_get_crtc_state(struct drm_atomic_state *state,
-                           struct intel_crtc *crtc);
-
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
-                              struct intel_crtc *intel_crtc,
-                              struct intel_crtc_state *crtc_state);
-
-#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
deleted file mode 100644 (file)
index 30bd4e7..0000000
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/**
- * DOC: atomic plane helpers
- *
- * The functions here are used by the atomic plane helper functions to
- * implement legacy plane updates (i.e., drm_plane->update_plane() and
- * drm_plane->disable_plane()).  This allows plane updates to use the
- * atomic state infrastructure and perform plane updates as separate
- * prepare/check/commit/cleanup steps.
- */
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
-
-#include "intel_atomic_plane.h"
-#include "intel_drv.h"
-#include "intel_pm.h"
-#include "intel_sprite.h"
-
-struct intel_plane *intel_plane_alloc(void)
-{
-       struct intel_plane_state *plane_state;
-       struct intel_plane *plane;
-
-       plane = kzalloc(sizeof(*plane), GFP_KERNEL);
-       if (!plane)
-               return ERR_PTR(-ENOMEM);
-
-       plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
-       if (!plane_state) {
-               kfree(plane);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
-       plane_state->scaler_id = -1;
-
-       return plane;
-}
-
-void intel_plane_free(struct intel_plane *plane)
-{
-       intel_plane_destroy_state(&plane->base, plane->base.state);
-       kfree(plane);
-}
-
-/**
- * intel_plane_duplicate_state - duplicate plane state
- * @plane: drm plane
- *
- * Allocates and returns a copy of the plane state (both common and
- * Intel-specific) for the specified plane.
- *
- * Returns: The newly allocated plane state, or NULL on failure.
- */
-struct drm_plane_state *
-intel_plane_duplicate_state(struct drm_plane *plane)
-{
-       struct drm_plane_state *state;
-       struct intel_plane_state *intel_state;
-
-       intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
-
-       if (!intel_state)
-               return NULL;
-
-       state = &intel_state->base;
-
-       __drm_atomic_helper_plane_duplicate_state(plane, state);
-
-       intel_state->vma = NULL;
-       intel_state->flags = 0;
-
-       return state;
-}
-
-/**
- * intel_plane_destroy_state - destroy plane state
- * @plane: drm plane
- * @state: state object to destroy
- *
- * Destroys the plane state (both common and Intel-specific) for the
- * specified plane.
- */
-void
-intel_plane_destroy_state(struct drm_plane *plane,
-                         struct drm_plane_state *state)
-{
-       WARN_ON(to_intel_plane_state(state)->vma);
-
-       drm_atomic_helper_plane_destroy_state(plane, state);
-}
-
-unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
-                                  const struct intel_plane_state *plane_state)
-{
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       unsigned int cpp;
-
-       if (!plane_state->base.visible)
-               return 0;
-
-       cpp = fb->format->cpp[0];
-
-       /*
-        * Based on HSD#:1408715493
-        * NV12 cpp == 4, P010 cpp == 8
-        *
-        * FIXME what is the logic behind this?
-        */
-       if (fb->format->is_yuv && fb->format->num_planes > 1)
-               cpp *= 4;
-
-       return cpp * crtc_state->pixel_rate;
-}
-
-int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
-                                       struct intel_crtc_state *new_crtc_state,
-                                       const struct intel_plane_state *old_plane_state,
-                                       struct intel_plane_state *new_plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
-       int ret;
-
-       new_crtc_state->active_planes &= ~BIT(plane->id);
-       new_crtc_state->nv12_planes &= ~BIT(plane->id);
-       new_crtc_state->c8_planes &= ~BIT(plane->id);
-       new_crtc_state->data_rate[plane->id] = 0;
-       new_plane_state->base.visible = false;
-
-       if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
-               return 0;
-
-       ret = plane->check_plane(new_crtc_state, new_plane_state);
-       if (ret)
-               return ret;
-
-       /* FIXME pre-g4x don't work like this */
-       if (new_plane_state->base.visible)
-               new_crtc_state->active_planes |= BIT(plane->id);
-
-       if (new_plane_state->base.visible &&
-           is_planar_yuv_format(new_plane_state->base.fb->format->format))
-               new_crtc_state->nv12_planes |= BIT(plane->id);
-
-       if (new_plane_state->base.visible &&
-           new_plane_state->base.fb->format->format == DRM_FORMAT_C8)
-               new_crtc_state->c8_planes |= BIT(plane->id);
-
-       if (new_plane_state->base.visible || old_plane_state->base.visible)
-               new_crtc_state->update_planes |= BIT(plane->id);
-
-       new_crtc_state->data_rate[plane->id] =
-               intel_plane_data_rate(new_crtc_state, new_plane_state);
-
-       return intel_plane_atomic_calc_changes(old_crtc_state,
-                                              &new_crtc_state->base,
-                                              old_plane_state,
-                                              &new_plane_state->base);
-}
-
-static int intel_plane_atomic_check(struct drm_plane *plane,
-                                   struct drm_plane_state *new_plane_state)
-{
-       struct drm_atomic_state *state = new_plane_state->state;
-       const struct drm_plane_state *old_plane_state =
-               drm_atomic_get_old_plane_state(state, plane);
-       struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
-       const struct drm_crtc_state *old_crtc_state;
-       struct drm_crtc_state *new_crtc_state;
-
-       new_plane_state->visible = false;
-       if (!crtc)
-               return 0;
-
-       old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
-       new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
-
-       return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state),
-                                                  to_intel_crtc_state(new_crtc_state),
-                                                  to_intel_plane_state(old_plane_state),
-                                                  to_intel_plane_state(new_plane_state));
-}
-
-static struct intel_plane *
-skl_next_plane_to_commit(struct intel_atomic_state *state,
-                        struct intel_crtc *crtc,
-                        struct skl_ddb_entry entries_y[I915_MAX_PLANES],
-                        struct skl_ddb_entry entries_uv[I915_MAX_PLANES],
-                        unsigned int *update_mask)
-{
-       struct intel_crtc_state *crtc_state =
-               intel_atomic_get_new_crtc_state(state, crtc);
-       struct intel_plane_state *plane_state;
-       struct intel_plane *plane;
-       int i;
-
-       if (*update_mask == 0)
-               return NULL;
-
-       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
-               enum plane_id plane_id = plane->id;
-
-               if (crtc->pipe != plane->pipe ||
-                   !(*update_mask & BIT(plane_id)))
-                       continue;
-
-               if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
-                                               entries_y,
-                                               I915_MAX_PLANES, plane_id) ||
-                   skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id],
-                                               entries_uv,
-                                               I915_MAX_PLANES, plane_id))
-                       continue;
-
-               *update_mask &= ~BIT(plane_id);
-               entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
-               entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id];
-
-               return plane;
-       }
-
-       /* should never happen */
-       WARN_ON(1);
-
-       return NULL;
-}
-
-void intel_update_plane(struct intel_plane *plane,
-                       const struct intel_crtc_state *crtc_state,
-                       const struct intel_plane_state *plane_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
-       trace_intel_update_plane(&plane->base, crtc);
-       plane->update_plane(plane, crtc_state, plane_state);
-}
-
-void intel_update_slave(struct intel_plane *plane,
-                       const struct intel_crtc_state *crtc_state,
-                       const struct intel_plane_state *plane_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
-       trace_intel_update_plane(&plane->base, crtc);
-       plane->update_slave(plane, crtc_state, plane_state);
-}
-
-void intel_disable_plane(struct intel_plane *plane,
-                        const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
-       trace_intel_disable_plane(&plane->base, crtc);
-       plane->disable_plane(plane, crtc_state);
-}
-
-void skl_update_planes_on_crtc(struct intel_atomic_state *state,
-                              struct intel_crtc *crtc)
-{
-       struct intel_crtc_state *old_crtc_state =
-               intel_atomic_get_old_crtc_state(state, crtc);
-       struct intel_crtc_state *new_crtc_state =
-               intel_atomic_get_new_crtc_state(state, crtc);
-       struct skl_ddb_entry entries_y[I915_MAX_PLANES];
-       struct skl_ddb_entry entries_uv[I915_MAX_PLANES];
-       u32 update_mask = new_crtc_state->update_planes;
-       struct intel_plane *plane;
-
-       memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y,
-              sizeof(old_crtc_state->wm.skl.plane_ddb_y));
-       memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv,
-              sizeof(old_crtc_state->wm.skl.plane_ddb_uv));
-
-       while ((plane = skl_next_plane_to_commit(state, crtc,
-                                                entries_y, entries_uv,
-                                                &update_mask))) {
-               struct intel_plane_state *new_plane_state =
-                       intel_atomic_get_new_plane_state(state, plane);
-
-               if (new_plane_state->base.visible) {
-                       intel_update_plane(plane, new_crtc_state, new_plane_state);
-               } else if (new_plane_state->slave) {
-                       struct intel_plane *master =
-                               new_plane_state->linked_plane;
-
-                       /*
-                        * We update the slave plane from this function because
-                        * programming it from the master plane's update_plane
-                        * callback runs into issues when the Y plane is
-                        * reassigned, disabled or used by a different plane.
-                        *
-                        * The slave plane is updated with the master plane's
-                        * plane_state.
-                        */
-                       new_plane_state =
-                               intel_atomic_get_new_plane_state(state, master);
-
-                       intel_update_slave(plane, new_crtc_state, new_plane_state);
-               } else {
-                       intel_disable_plane(plane, new_crtc_state);
-               }
-       }
-}
-
-void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
-                               struct intel_crtc *crtc)
-{
-       struct intel_crtc_state *new_crtc_state =
-               intel_atomic_get_new_crtc_state(state, crtc);
-       u32 update_mask = new_crtc_state->update_planes;
-       struct intel_plane_state *new_plane_state;
-       struct intel_plane *plane;
-       int i;
-
-       for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
-               if (crtc->pipe != plane->pipe ||
-                   !(update_mask & BIT(plane->id)))
-                       continue;
-
-               if (new_plane_state->base.visible)
-                       intel_update_plane(plane, new_crtc_state, new_plane_state);
-               else
-                       intel_disable_plane(plane, new_crtc_state);
-       }
-}
-
-const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
-       .prepare_fb = intel_prepare_plane_fb,
-       .cleanup_fb = intel_cleanup_plane_fb,
-       .atomic_check = intel_plane_atomic_check,
-};
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.h b/drivers/gpu/drm/i915/intel_atomic_plane.h
deleted file mode 100644 (file)
index 1437a87..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_ATOMIC_PLANE_H__
-#define __INTEL_ATOMIC_PLANE_H__
-
-#include <linux/types.h>
-
-struct drm_crtc_state;
-struct drm_plane;
-struct drm_property;
-struct intel_atomic_state;
-struct intel_crtc;
-struct intel_crtc_state;
-struct intel_plane;
-struct intel_plane_state;
-
-extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
-
-unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
-                                  const struct intel_plane_state *plane_state);
-void intel_update_plane(struct intel_plane *plane,
-                       const struct intel_crtc_state *crtc_state,
-                       const struct intel_plane_state *plane_state);
-void intel_update_slave(struct intel_plane *plane,
-                       const struct intel_crtc_state *crtc_state,
-                       const struct intel_plane_state *plane_state);
-void intel_disable_plane(struct intel_plane *plane,
-                        const struct intel_crtc_state *crtc_state);
-struct intel_plane *intel_plane_alloc(void);
-void intel_plane_free(struct intel_plane *plane);
-struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
-void intel_plane_destroy_state(struct drm_plane *plane,
-                              struct drm_plane_state *state);
-void skl_update_planes_on_crtc(struct intel_atomic_state *state,
-                              struct intel_crtc *crtc);
-void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
-                               struct intel_crtc *crtc);
-int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
-                                       struct intel_crtc_state *crtc_state,
-                                       const struct intel_plane_state *old_plane_state,
-                                       struct intel_plane_state *intel_state);
-int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
-                                   struct drm_crtc_state *crtc_state,
-                                   const struct intel_plane_state *old_plane_state,
-                                   struct drm_plane_state *plane_state);
-
-#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
deleted file mode 100644 (file)
index 840daff..0000000
+++ /dev/null
@@ -1,1104 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/component.h>
-#include <linux/kernel.h>
-
-#include <drm/drm_edid.h>
-#include <drm/i915_component.h>
-
-#include "i915_drv.h"
-#include "intel_audio.h"
-#include "intel_drv.h"
-#include "intel_lpe_audio.h"
-
-/**
- * DOC: High Definition Audio over HDMI and Display Port
- *
- * The graphics and audio drivers together support High Definition Audio over
- * HDMI and Display Port. The audio programming sequences are divided into audio
- * codec and controller enable and disable sequences. The graphics driver
- * handles the audio codec sequences, while the audio driver handles the audio
- * controller sequences.
- *
- * The disable sequences must be performed before disabling the transcoder or
- * port. The enable sequences may only be performed after enabling the
- * transcoder and port, and after completed link training. Therefore the audio
- * enable/disable sequences are part of the modeset sequence.
- *
- * The codec and controller sequences could be done either parallel or serial,
- * but generally the ELDV/PD change in the codec sequence indicates to the audio
- * driver that the controller sequence should start. Indeed, most of the
- * co-operation between the graphics and audio drivers is handled via audio
- * related registers. (The notable exception is the power management, not
- * covered here.)
- *
- * The struct &i915_audio_component is used to interact between the graphics
- * and audio drivers. The struct &i915_audio_component_ops @ops in it is
- * defined in graphics driver and called in audio driver. The
- * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
- */
-
-/* DP N/M table */
-#define LC_810M        810000
-#define LC_540M        540000
-#define LC_270M        270000
-#define LC_162M        162000
-
-struct dp_aud_n_m {
-       int sample_rate;
-       int clock;
-       u16 m;
-       u16 n;
-};
-
-/* Values according to DP 1.4 Table 2-104 */
-static const struct dp_aud_n_m dp_aud_n_m[] = {
-       { 32000, LC_162M, 1024, 10125 },
-       { 44100, LC_162M, 784, 5625 },
-       { 48000, LC_162M, 512, 3375 },
-       { 64000, LC_162M, 2048, 10125 },
-       { 88200, LC_162M, 1568, 5625 },
-       { 96000, LC_162M, 1024, 3375 },
-       { 128000, LC_162M, 4096, 10125 },
-       { 176400, LC_162M, 3136, 5625 },
-       { 192000, LC_162M, 2048, 3375 },
-       { 32000, LC_270M, 1024, 16875 },
-       { 44100, LC_270M, 784, 9375 },
-       { 48000, LC_270M, 512, 5625 },
-       { 64000, LC_270M, 2048, 16875 },
-       { 88200, LC_270M, 1568, 9375 },
-       { 96000, LC_270M, 1024, 5625 },
-       { 128000, LC_270M, 4096, 16875 },
-       { 176400, LC_270M, 3136, 9375 },
-       { 192000, LC_270M, 2048, 5625 },
-       { 32000, LC_540M, 1024, 33750 },
-       { 44100, LC_540M, 784, 18750 },
-       { 48000, LC_540M, 512, 11250 },
-       { 64000, LC_540M, 2048, 33750 },
-       { 88200, LC_540M, 1568, 18750 },
-       { 96000, LC_540M, 1024, 11250 },
-       { 128000, LC_540M, 4096, 33750 },
-       { 176400, LC_540M, 3136, 18750 },
-       { 192000, LC_540M, 2048, 11250 },
-       { 32000, LC_810M, 1024, 50625 },
-       { 44100, LC_810M, 784, 28125 },
-       { 48000, LC_810M, 512, 16875 },
-       { 64000, LC_810M, 2048, 50625 },
-       { 88200, LC_810M, 1568, 28125 },
-       { 96000, LC_810M, 1024, 16875 },
-       { 128000, LC_810M, 4096, 50625 },
-       { 176400, LC_810M, 3136, 28125 },
-       { 192000, LC_810M, 2048, 16875 },
-};
-
-static const struct dp_aud_n_m *
-audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
-               if (rate == dp_aud_n_m[i].sample_rate &&
-                   crtc_state->port_clock == dp_aud_n_m[i].clock)
-                       return &dp_aud_n_m[i];
-       }
-
-       return NULL;
-}
-
-static const struct {
-       int clock;
-       u32 config;
-} hdmi_audio_clock[] = {
-       { 25175, AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
-       { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
-       { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
-       { 27027, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
-       { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
-       { 54054, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
-       { 74176, AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
-       { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
-       { 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
-       { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
-};
-
-/* HDMI N/CTS table */
-#define TMDS_297M 297000
-#define TMDS_296M 296703
-#define TMDS_594M 594000
-#define TMDS_593M 593407
-
-static const struct {
-       int sample_rate;
-       int clock;
-       int n;
-       int cts;
-} hdmi_aud_ncts[] = {
-       { 32000, TMDS_296M, 5824, 421875 },
-       { 32000, TMDS_297M, 3072, 222750 },
-       { 32000, TMDS_593M, 5824, 843750 },
-       { 32000, TMDS_594M, 3072, 445500 },
-       { 44100, TMDS_296M, 4459, 234375 },
-       { 44100, TMDS_297M, 4704, 247500 },
-       { 44100, TMDS_593M, 8918, 937500 },
-       { 44100, TMDS_594M, 9408, 990000 },
-       { 88200, TMDS_296M, 8918, 234375 },
-       { 88200, TMDS_297M, 9408, 247500 },
-       { 88200, TMDS_593M, 17836, 937500 },
-       { 88200, TMDS_594M, 18816, 990000 },
-       { 176400, TMDS_296M, 17836, 234375 },
-       { 176400, TMDS_297M, 18816, 247500 },
-       { 176400, TMDS_593M, 35672, 937500 },
-       { 176400, TMDS_594M, 37632, 990000 },
-       { 48000, TMDS_296M, 5824, 281250 },
-       { 48000, TMDS_297M, 5120, 247500 },
-       { 48000, TMDS_593M, 5824, 562500 },
-       { 48000, TMDS_594M, 6144, 594000 },
-       { 96000, TMDS_296M, 11648, 281250 },
-       { 96000, TMDS_297M, 10240, 247500 },
-       { 96000, TMDS_593M, 11648, 562500 },
-       { 96000, TMDS_594M, 12288, 594000 },
-       { 192000, TMDS_296M, 23296, 281250 },
-       { 192000, TMDS_297M, 20480, 247500 },
-       { 192000, TMDS_593M, 23296, 562500 },
-       { 192000, TMDS_594M, 24576, 594000 },
-};
-
-/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
-static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
-{
-       const struct drm_display_mode *adjusted_mode =
-               &crtc_state->base.adjusted_mode;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
-               if (adjusted_mode->crtc_clock == hdmi_audio_clock[i].clock)
-                       break;
-       }
-
-       if (i == ARRAY_SIZE(hdmi_audio_clock)) {
-               DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
-                             adjusted_mode->crtc_clock);
-               i = 1;
-       }
-
-       DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
-                     hdmi_audio_clock[i].clock,
-                     hdmi_audio_clock[i].config);
-
-       return hdmi_audio_clock[i].config;
-}
-
-static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
-                                  int rate)
-{
-       const struct drm_display_mode *adjusted_mode =
-               &crtc_state->base.adjusted_mode;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
-               if (rate == hdmi_aud_ncts[i].sample_rate &&
-                   adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) {
-                       return hdmi_aud_ncts[i].n;
-               }
-       }
-       return 0;
-}
-
-static bool intel_eld_uptodate(struct drm_connector *connector,
-                              i915_reg_t reg_eldv, u32 bits_eldv,
-                              i915_reg_t reg_elda, u32 bits_elda,
-                              i915_reg_t reg_edid)
-{
-       struct drm_i915_private *dev_priv = to_i915(connector->dev);
-       const u8 *eld = connector->eld;
-       u32 tmp;
-       int i;
-
-       tmp = I915_READ(reg_eldv);
-       tmp &= bits_eldv;
-
-       if (!tmp)
-               return false;
-
-       tmp = I915_READ(reg_elda);
-       tmp &= ~bits_elda;
-       I915_WRITE(reg_elda, tmp);
-
-       for (i = 0; i < drm_eld_size(eld) / 4; i++)
-               if (I915_READ(reg_edid) != *((const u32 *)eld + i))
-                       return false;
-
-       return true;
-}
-
-static void g4x_audio_codec_disable(struct intel_encoder *encoder,
-                                   const struct intel_crtc_state *old_crtc_state,
-                                   const struct drm_connector_state *old_conn_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       u32 eldv, tmp;
-
-       DRM_DEBUG_KMS("Disable audio codec\n");
-
-       tmp = I915_READ(G4X_AUD_VID_DID);
-       if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
-               eldv = G4X_ELDV_DEVCL_DEVBLC;
-       else
-               eldv = G4X_ELDV_DEVCTG;
-
-       /* Invalidate ELD */
-       tmp = I915_READ(G4X_AUD_CNTL_ST);
-       tmp &= ~eldv;
-       I915_WRITE(G4X_AUD_CNTL_ST, tmp);
-}
-
-static void g4x_audio_codec_enable(struct intel_encoder *encoder,
-                                  const struct intel_crtc_state *crtc_state,
-                                  const struct drm_connector_state *conn_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct drm_connector *connector = conn_state->connector;
-       const u8 *eld = connector->eld;
-       u32 eldv;
-       u32 tmp;
-       int len, i;
-
-       DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld));
-
-       tmp = I915_READ(G4X_AUD_VID_DID);
-       if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
-               eldv = G4X_ELDV_DEVCL_DEVBLC;
-       else
-               eldv = G4X_ELDV_DEVCTG;
-
-       if (intel_eld_uptodate(connector,
-                              G4X_AUD_CNTL_ST, eldv,
-                              G4X_AUD_CNTL_ST, G4X_ELD_ADDR_MASK,
-                              G4X_HDMIW_HDMIEDID))
-               return;
-
-       tmp = I915_READ(G4X_AUD_CNTL_ST);
-       tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
-       len = (tmp >> 9) & 0x1f;                /* ELD buffer size */
-       I915_WRITE(G4X_AUD_CNTL_ST, tmp);
-
-       len = min(drm_eld_size(eld) / 4, len);
-       DRM_DEBUG_DRIVER("ELD size %d\n", len);
-       for (i = 0; i < len; i++)
-               I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i));
-
-       tmp = I915_READ(G4X_AUD_CNTL_ST);
-       tmp |= eldv;
-       I915_WRITE(G4X_AUD_CNTL_ST, tmp);
-}
-
-static void
-hsw_dp_audio_config_update(struct intel_encoder *encoder,
-                          const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct i915_audio_component *acomp = dev_priv->audio_component;
-       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-       enum port port = encoder->port;
-       const struct dp_aud_n_m *nm;
-       int rate;
-       u32 tmp;
-
-       rate = acomp ? acomp->aud_sample_rate[port] : 0;
-       nm = audio_config_dp_get_n_m(crtc_state, rate);
-       if (nm)
-               DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
-       else
-               DRM_DEBUG_KMS("using automatic Maud, Naud\n");
-
-       tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
-       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
-       tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
-       tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
-       tmp |= AUD_CONFIG_N_VALUE_INDEX;
-
-       if (nm) {
-               tmp &= ~AUD_CONFIG_N_MASK;
-               tmp |= AUD_CONFIG_N(nm->n);
-               tmp |= AUD_CONFIG_N_PROG_ENABLE;
-       }
-
-       I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
-
-       tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
-       tmp &= ~AUD_CONFIG_M_MASK;
-       tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
-       tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
-
-       if (nm) {
-               tmp |= nm->m;
-               tmp |= AUD_M_CTS_M_VALUE_INDEX;
-               tmp |= AUD_M_CTS_M_PROG_ENABLE;
-       }
-
-       I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
-}
-
-static void
-hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
-                            const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct i915_audio_component *acomp = dev_priv->audio_component;
-       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-       enum port port = encoder->port;
-       int n, rate;
-       u32 tmp;
-
-       rate = acomp ? acomp->aud_sample_rate[port] : 0;
-
-       tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
-       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
-       tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
-       tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
-       tmp |= audio_config_hdmi_pixel_clock(crtc_state);
-
-       n = audio_config_hdmi_get_n(crtc_state, rate);
-       if (n != 0) {
-               DRM_DEBUG_KMS("using N %d\n", n);
-
-               tmp &= ~AUD_CONFIG_N_MASK;
-               tmp |= AUD_CONFIG_N(n);
-               tmp |= AUD_CONFIG_N_PROG_ENABLE;
-       } else {
-               DRM_DEBUG_KMS("using automatic N\n");
-       }
-
-       I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
-
-       /*
-        * Let's disable "Enable CTS or M Prog bit"
-        * and let HW calculate the value
-        */
-       tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
-       tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
-       tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
-       I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
-}
-
-static void
-hsw_audio_config_update(struct intel_encoder *encoder,
-                       const struct intel_crtc_state *crtc_state)
-{
-       if (intel_crtc_has_dp_encoder(crtc_state))
-               hsw_dp_audio_config_update(encoder, crtc_state);
-       else
-               hsw_hdmi_audio_config_update(encoder, crtc_state);
-}
-
-static void hsw_audio_codec_disable(struct intel_encoder *encoder,
-                                   const struct intel_crtc_state *old_crtc_state,
-                                   const struct drm_connector_state *old_conn_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
-       u32 tmp;
-
-       DRM_DEBUG_KMS("Disable audio codec on transcoder %s\n",
-                     transcoder_name(cpu_transcoder));
-
-       mutex_lock(&dev_priv->av_mutex);
-
-       /* Disable timestamps */
-       tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
-       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
-       tmp |= AUD_CONFIG_N_PROG_ENABLE;
-       tmp &= ~AUD_CONFIG_UPPER_N_MASK;
-       tmp &= ~AUD_CONFIG_LOWER_N_MASK;
-       if (intel_crtc_has_dp_encoder(old_crtc_state))
-               tmp |= AUD_CONFIG_N_VALUE_INDEX;
-       I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
-
-       /* Invalidate ELD */
-       tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
-       tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
-       tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
-       I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
-
-       mutex_unlock(&dev_priv->av_mutex);
-}
-
-static void hsw_audio_codec_enable(struct intel_encoder *encoder,
-                                  const struct intel_crtc_state *crtc_state,
-                                  const struct drm_connector_state *conn_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct drm_connector *connector = conn_state->connector;
-       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-       const u8 *eld = connector->eld;
-       u32 tmp;
-       int len, i;
-
-       DRM_DEBUG_KMS("Enable audio codec on transcoder %s, %u bytes ELD\n",
-                     transcoder_name(cpu_transcoder), drm_eld_size(eld));
-
-       mutex_lock(&dev_priv->av_mutex);
-
-       /* Enable audio presence detect, invalidate ELD */
-       tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
-       tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
-       tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
-       I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
-
-       /*
-        * FIXME: We're supposed to wait for vblank here, but we have vblanks
-        * disabled during the mode set. The proper fix would be to push the
-        * rest of the setup into a vblank work item, queued here, but the
-        * infrastructure is not there yet.
-        */
-
-       /* Reset ELD write address */
-       tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
-       tmp &= ~IBX_ELD_ADDRESS_MASK;
-       I915_WRITE(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
-
-       /* Up to 84 bytes of hw ELD buffer */
-       len = min(drm_eld_size(eld), 84);
-       for (i = 0; i < len / 4; i++)
-               I915_WRITE(HSW_AUD_EDID_DATA(cpu_transcoder), *((const u32 *)eld + i));
-
-       /* ELD valid */
-       tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
-       tmp |= AUDIO_ELD_VALID(cpu_transcoder);
-       I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
-
-       /* Enable timestamps */
-       hsw_audio_config_update(encoder, crtc_state);
-
-       mutex_unlock(&dev_priv->av_mutex);
-}
-
-static void ilk_audio_codec_disable(struct intel_encoder *encoder,
-                                   const struct intel_crtc_state *old_crtc_state,
-                                   const struct drm_connector_state *old_conn_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-       enum pipe pipe = crtc->pipe;
-       enum port port = encoder->port;
-       u32 tmp, eldv;
-       i915_reg_t aud_config, aud_cntrl_st2;
-
-       DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
-                     port_name(port), pipe_name(pipe));
-
-       if (WARN_ON(port == PORT_A))
-               return;
-
-       if (HAS_PCH_IBX(dev_priv)) {
-               aud_config = IBX_AUD_CFG(pipe);
-               aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
-       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               aud_config = VLV_AUD_CFG(pipe);
-               aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
-       } else {
-               aud_config = CPT_AUD_CFG(pipe);
-               aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
-       }
-
-       /* Disable timestamps */
-       tmp = I915_READ(aud_config);
-       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
-       tmp |= AUD_CONFIG_N_PROG_ENABLE;
-       tmp &= ~AUD_CONFIG_UPPER_N_MASK;
-       tmp &= ~AUD_CONFIG_LOWER_N_MASK;
-       if (intel_crtc_has_dp_encoder(old_crtc_state))
-               tmp |= AUD_CONFIG_N_VALUE_INDEX;
-       I915_WRITE(aud_config, tmp);
-
-       eldv = IBX_ELD_VALID(port);
-
-       /* Invalidate ELD */
-       tmp = I915_READ(aud_cntrl_st2);
-       tmp &= ~eldv;
-       I915_WRITE(aud_cntrl_st2, tmp);
-}
-
-static void ilk_audio_codec_enable(struct intel_encoder *encoder,
-                                  const struct intel_crtc_state *crtc_state,
-                                  const struct drm_connector_state *conn_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_connector *connector = conn_state->connector;
-       enum pipe pipe = crtc->pipe;
-       enum port port = encoder->port;
-       const u8 *eld = connector->eld;
-       u32 tmp, eldv;
-       int len, i;
-       i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
-
-       DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
-                     port_name(port), pipe_name(pipe), drm_eld_size(eld));
-
-       if (WARN_ON(port == PORT_A))
-               return;
-
-       /*
-        * FIXME: We're supposed to wait for vblank here, but we have vblanks
-        * disabled during the mode set. The proper fix would be to push the
-        * rest of the setup into a vblank work item, queued here, but the
-        * infrastructure is not there yet.
-        */
-
-       if (HAS_PCH_IBX(dev_priv)) {
-               hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
-               aud_config = IBX_AUD_CFG(pipe);
-               aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
-               aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
-       } else if (IS_VALLEYVIEW(dev_priv) ||
-                  IS_CHERRYVIEW(dev_priv)) {
-               hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
-               aud_config = VLV_AUD_CFG(pipe);
-               aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
-               aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
-       } else {
-               hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
-               aud_config = CPT_AUD_CFG(pipe);
-               aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
-               aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
-       }
-
-       eldv = IBX_ELD_VALID(port);
-
-       /* Invalidate ELD */
-       tmp = I915_READ(aud_cntrl_st2);
-       tmp &= ~eldv;
-       I915_WRITE(aud_cntrl_st2, tmp);
-
-       /* Reset ELD write address */
-       tmp = I915_READ(aud_cntl_st);
-       tmp &= ~IBX_ELD_ADDRESS_MASK;
-       I915_WRITE(aud_cntl_st, tmp);
-
-       /* Up to 84 bytes of hw ELD buffer */
-       len = min(drm_eld_size(eld), 84);
-       for (i = 0; i < len / 4; i++)
-               I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i));
-
-       /* ELD valid */
-       tmp = I915_READ(aud_cntrl_st2);
-       tmp |= eldv;
-       I915_WRITE(aud_cntrl_st2, tmp);
-
-       /* Enable timestamps */
-       tmp = I915_READ(aud_config);
-       tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
-       tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
-       tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
-       if (intel_crtc_has_dp_encoder(crtc_state))
-               tmp |= AUD_CONFIG_N_VALUE_INDEX;
-       else
-               tmp |= audio_config_hdmi_pixel_clock(crtc_state);
-       I915_WRITE(aud_config, tmp);
-}
-
-/**
- * intel_audio_codec_enable - Enable the audio codec for HD audio
- * @encoder: encoder on which to enable audio
- * @crtc_state: pointer to the current crtc state.
- * @conn_state: pointer to the current connector state.
- *
- * The enable sequences may only be performed after enabling the transcoder and
- * port, and after completed link training.
- */
-void intel_audio_codec_enable(struct intel_encoder *encoder,
-                             const struct intel_crtc_state *crtc_state,
-                             const struct drm_connector_state *conn_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct i915_audio_component *acomp = dev_priv->audio_component;
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_connector *connector = conn_state->connector;
-       const struct drm_display_mode *adjusted_mode =
-               &crtc_state->base.adjusted_mode;
-       enum port port = encoder->port;
-       enum pipe pipe = crtc->pipe;
-
-       /* FIXME precompute the ELD in .compute_config() */
-       if (!connector->eld[0])
-               DRM_DEBUG_KMS("Bogus ELD on [CONNECTOR:%d:%s]\n",
-                             connector->base.id, connector->name);
-
-       DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
-                        connector->base.id,
-                        connector->name,
-                        connector->encoder->base.id,
-                        connector->encoder->name);
-
-       connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
-
-       if (dev_priv->display.audio_codec_enable)
-               dev_priv->display.audio_codec_enable(encoder,
-                                                    crtc_state,
-                                                    conn_state);
-
-       mutex_lock(&dev_priv->av_mutex);
-       encoder->audio_connector = connector;
-
-       /* referred in audio callbacks */
-       dev_priv->av_enc_map[pipe] = encoder;
-       mutex_unlock(&dev_priv->av_mutex);
-
-       if (acomp && acomp->base.audio_ops &&
-           acomp->base.audio_ops->pin_eld_notify) {
-               /* audio drivers expect pipe = -1 to indicate Non-MST cases */
-               if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
-                       pipe = -1;
-               acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
-                                                (int) port, (int) pipe);
-       }
-
-       intel_lpe_audio_notify(dev_priv, pipe, port, connector->eld,
-                              crtc_state->port_clock,
-                              intel_crtc_has_dp_encoder(crtc_state));
-}
-
-/**
- * intel_audio_codec_disable - Disable the audio codec for HD audio
- * @encoder: encoder on which to disable audio
- * @old_crtc_state: pointer to the old crtc state.
- * @old_conn_state: pointer to the old connector state.
- *
- * The disable sequences must be performed before disabling the transcoder or
- * port.
- */
-void intel_audio_codec_disable(struct intel_encoder *encoder,
-                              const struct intel_crtc_state *old_crtc_state,
-                              const struct drm_connector_state *old_conn_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct i915_audio_component *acomp = dev_priv->audio_component;
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-       enum port port = encoder->port;
-       enum pipe pipe = crtc->pipe;
-
-       if (dev_priv->display.audio_codec_disable)
-               dev_priv->display.audio_codec_disable(encoder,
-                                                     old_crtc_state,
-                                                     old_conn_state);
-
-       mutex_lock(&dev_priv->av_mutex);
-       encoder->audio_connector = NULL;
-       dev_priv->av_enc_map[pipe] = NULL;
-       mutex_unlock(&dev_priv->av_mutex);
-
-       if (acomp && acomp->base.audio_ops &&
-           acomp->base.audio_ops->pin_eld_notify) {
-               /* audio drivers expect pipe = -1 to indicate Non-MST cases */
-               if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
-                       pipe = -1;
-               acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
-                                                (int) port, (int) pipe);
-       }
-
-       intel_lpe_audio_notify(dev_priv, pipe, port, NULL, 0, false);
-}
-
-/**
- * intel_init_audio_hooks - Set up chip specific audio hooks
- * @dev_priv: device private
- */
-void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
-{
-       if (IS_G4X(dev_priv)) {
-               dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
-               dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
-       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
-               dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
-       } else if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) {
-               dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
-               dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
-       } else if (HAS_PCH_SPLIT(dev_priv)) {
-               dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
-               dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
-       }
-}
-
-static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
-                                 bool enable)
-{
-       struct drm_modeset_acquire_ctx ctx;
-       struct drm_atomic_state *state;
-       int ret;
-
-       drm_modeset_acquire_init(&ctx, 0);
-       state = drm_atomic_state_alloc(&dev_priv->drm);
-       if (WARN_ON(!state))
-               return;
-
-       state->acquire_ctx = &ctx;
-
-retry:
-       to_intel_atomic_state(state)->cdclk.force_min_cdclk_changed = true;
-       to_intel_atomic_state(state)->cdclk.force_min_cdclk =
-               enable ? 2 * 96000 : 0;
-
-       /*
-        * Protects dev_priv->cdclk.force_min_cdclk
-        * Need to lock this here in case we have no active pipes
-        * and thus wouldn't lock it during the commit otherwise.
-        */
-       ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
-                              &ctx);
-       if (!ret)
-               ret = drm_atomic_commit(state);
-
-       if (ret == -EDEADLK) {
-               drm_atomic_state_clear(state);
-               drm_modeset_backoff(&ctx);
-               goto retry;
-       }
-
-       WARN_ON(ret);
-
-       drm_atomic_state_put(state);
-
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-}
-
-static unsigned long i915_audio_component_get_power(struct device *kdev)
-{
-       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
-       intel_wakeref_t ret;
-
-       /* Catch potential impedance mismatches before they occur! */
-       BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
-
-       ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
-
-       /* Force CDCLK to 2*BCLK as long as we need audio to be powered. */
-       if (dev_priv->audio_power_refcount++ == 0)
-               if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
-                       glk_force_audio_cdclk(dev_priv, true);
-
-       return ret;
-}
-
-static void i915_audio_component_put_power(struct device *kdev,
-                                          unsigned long cookie)
-{
-       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
-
-       /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
-       if (--dev_priv->audio_power_refcount == 0)
-               if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
-                       glk_force_audio_cdclk(dev_priv, false);
-
-       intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
-}
-
-static void i915_audio_component_codec_wake_override(struct device *kdev,
-                                                    bool enable)
-{
-       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
-       unsigned long cookie;
-       u32 tmp;
-
-       if (!IS_GEN(dev_priv, 9))
-               return;
-
-       cookie = i915_audio_component_get_power(kdev);
-
-       /*
-        * Enable/disable generating the codec wake signal, overriding the
-        * internal logic to generate the codec wake to controller.
-        */
-       tmp = I915_READ(HSW_AUD_CHICKENBIT);
-       tmp &= ~SKL_AUD_CODEC_WAKE_SIGNAL;
-       I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
-       usleep_range(1000, 1500);
-
-       if (enable) {
-               tmp = I915_READ(HSW_AUD_CHICKENBIT);
-               tmp |= SKL_AUD_CODEC_WAKE_SIGNAL;
-               I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
-               usleep_range(1000, 1500);
-       }
-
-       i915_audio_component_put_power(kdev, cookie);
-}
-
-/* Get CDCLK in kHz  */
-static int i915_audio_component_get_cdclk_freq(struct device *kdev)
-{
-       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
-
-       if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
-               return -ENODEV;
-
-       return dev_priv->cdclk.hw.cdclk;
-}
-
-/*
- * get the intel_encoder according to the parameter port and pipe
- * intel_encoder is saved by the index of pipe
- * MST & (pipe >= 0): return the av_enc_map[pipe],
- *   when port is matched
- * MST & (pipe < 0): this is invalid
- * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry)
- *   will get the right intel_encoder with port matched
- * Non-MST & (pipe < 0): get the right intel_encoder with port matched
- */
-static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
-                                              int port, int pipe)
-{
-       struct intel_encoder *encoder;
-
-       /* MST */
-       if (pipe >= 0) {
-               if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
-                       return NULL;
-
-               encoder = dev_priv->av_enc_map[pipe];
-               /*
-                * when bootup, audio driver may not know it is
-                * MST or not. So it will poll all the port & pipe
-                * combinations
-                */
-               if (encoder != NULL && encoder->port == port &&
-                   encoder->type == INTEL_OUTPUT_DP_MST)
-                       return encoder;
-       }
-
-       /* Non-MST */
-       if (pipe > 0)
-               return NULL;
-
-       for_each_pipe(dev_priv, pipe) {
-               encoder = dev_priv->av_enc_map[pipe];
-               if (encoder == NULL)
-                       continue;
-
-               if (encoder->type == INTEL_OUTPUT_DP_MST)
-                       continue;
-
-               if (port == encoder->port)
-                       return encoder;
-       }
-
-       return NULL;
-}
-
-static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
-                                               int pipe, int rate)
-{
-       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
-       struct i915_audio_component *acomp = dev_priv->audio_component;
-       struct intel_encoder *encoder;
-       struct intel_crtc *crtc;
-       unsigned long cookie;
-       int err = 0;
-
-       if (!HAS_DDI(dev_priv))
-               return 0;
-
-       cookie = i915_audio_component_get_power(kdev);
-       mutex_lock(&dev_priv->av_mutex);
-
-       /* 1. get the pipe */
-       encoder = get_saved_enc(dev_priv, port, pipe);
-       if (!encoder || !encoder->base.crtc) {
-               DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
-               err = -ENODEV;
-               goto unlock;
-       }
-
-       crtc = to_intel_crtc(encoder->base.crtc);
-
-       /* port must be valid now, otherwise the pipe will be invalid */
-       acomp->aud_sample_rate[port] = rate;
-
-       hsw_audio_config_update(encoder, crtc->config);
-
- unlock:
-       mutex_unlock(&dev_priv->av_mutex);
-       i915_audio_component_put_power(kdev, cookie);
-       return err;
-}
-
-static int i915_audio_component_get_eld(struct device *kdev, int port,
-                                       int pipe, bool *enabled,
-                                       unsigned char *buf, int max_bytes)
-{
-       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
-       struct intel_encoder *intel_encoder;
-       const u8 *eld;
-       int ret = -EINVAL;
-
-       mutex_lock(&dev_priv->av_mutex);
-
-       intel_encoder = get_saved_enc(dev_priv, port, pipe);
-       if (!intel_encoder) {
-               DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
-               mutex_unlock(&dev_priv->av_mutex);
-               return ret;
-       }
-
-       ret = 0;
-       *enabled = intel_encoder->audio_connector != NULL;
-       if (*enabled) {
-               eld = intel_encoder->audio_connector->eld;
-               ret = drm_eld_size(eld);
-               memcpy(buf, eld, min(max_bytes, ret));
-       }
-
-       mutex_unlock(&dev_priv->av_mutex);
-       return ret;
-}
-
-static const struct drm_audio_component_ops i915_audio_component_ops = {
-       .owner          = THIS_MODULE,
-       .get_power      = i915_audio_component_get_power,
-       .put_power      = i915_audio_component_put_power,
-       .codec_wake_override = i915_audio_component_codec_wake_override,
-       .get_cdclk_freq = i915_audio_component_get_cdclk_freq,
-       .sync_audio_rate = i915_audio_component_sync_audio_rate,
-       .get_eld        = i915_audio_component_get_eld,
-};
-
-static int i915_audio_component_bind(struct device *i915_kdev,
-                                    struct device *hda_kdev, void *data)
-{
-       struct i915_audio_component *acomp = data;
-       struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
-       int i;
-
-       if (WARN_ON(acomp->base.ops || acomp->base.dev))
-               return -EEXIST;
-
-       if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
-               return -ENOMEM;
-
-       drm_modeset_lock_all(&dev_priv->drm);
-       acomp->base.ops = &i915_audio_component_ops;
-       acomp->base.dev = i915_kdev;
-       BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
-       for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
-               acomp->aud_sample_rate[i] = 0;
-       dev_priv->audio_component = acomp;
-       drm_modeset_unlock_all(&dev_priv->drm);
-
-       return 0;
-}
-
-static void i915_audio_component_unbind(struct device *i915_kdev,
-                                       struct device *hda_kdev, void *data)
-{
-       struct i915_audio_component *acomp = data;
-       struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
-
-       drm_modeset_lock_all(&dev_priv->drm);
-       acomp->base.ops = NULL;
-       acomp->base.dev = NULL;
-       dev_priv->audio_component = NULL;
-       drm_modeset_unlock_all(&dev_priv->drm);
-
-       device_link_remove(hda_kdev, i915_kdev);
-}
-
-static const struct component_ops i915_audio_component_bind_ops = {
-       .bind   = i915_audio_component_bind,
-       .unbind = i915_audio_component_unbind,
-};
-
-/**
- * i915_audio_component_init - initialize and register the audio component
- * @dev_priv: i915 device instance
- *
- * This will register with the component framework a child component which
- * will bind dynamically to the snd_hda_intel driver's corresponding master
- * component when the latter is registered. During binding the child
- * initializes an instance of struct i915_audio_component which it receives
- * from the master. The master can then start to use the interface defined by
- * this struct. Each side can break the binding at any point by deregistering
- * its own component after which each side's component unbind callback is
- * called.
- *
- * We ignore any error during registration and continue with reduced
- * functionality (i.e. without HDMI audio).
- */
-static void i915_audio_component_init(struct drm_i915_private *dev_priv)
-{
-       int ret;
-
-       ret = component_add_typed(dev_priv->drm.dev,
-                                 &i915_audio_component_bind_ops,
-                                 I915_COMPONENT_AUDIO);
-       if (ret < 0) {
-               DRM_ERROR("failed to add audio component (%d)\n", ret);
-               /* continue with reduced functionality */
-               return;
-       }
-
-       dev_priv->audio_component_registered = true;
-}
-
-/**
- * i915_audio_component_cleanup - deregister the audio component
- * @dev_priv: i915 device instance
- *
- * Deregisters the audio component, breaking any existing binding to the
- * corresponding snd_hda_intel driver's master component.
- */
-static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
-{
-       if (!dev_priv->audio_component_registered)
-               return;
-
-       component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
-       dev_priv->audio_component_registered = false;
-}
-
-/**
- * intel_audio_init() - Initialize the audio driver either using
- * component framework or using lpe audio bridge
- * @dev_priv: the i915 drm device private data
- *
- */
-void intel_audio_init(struct drm_i915_private *dev_priv)
-{
-       if (intel_lpe_audio_init(dev_priv) < 0)
-               i915_audio_component_init(dev_priv);
-}
-
-/**
- * intel_audio_deinit() - deinitialize the audio driver
- * @dev_priv: the i915 drm device private data
- *
- */
-void intel_audio_deinit(struct drm_i915_private *dev_priv)
-{
-       if ((dev_priv)->lpe_audio.platdev != NULL)
-               intel_lpe_audio_teardown(dev_priv);
-       else
-               i915_audio_component_cleanup(dev_priv);
-}
diff --git a/drivers/gpu/drm/i915/intel_audio.h b/drivers/gpu/drm/i915/intel_audio.h
deleted file mode 100644 (file)
index a3657c7..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_AUDIO_H__
-#define __INTEL_AUDIO_H__
-
-struct drm_connector_state;
-struct drm_i915_private;
-struct intel_crtc_state;
-struct intel_encoder;
-
-void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
-void intel_audio_codec_enable(struct intel_encoder *encoder,
-                             const struct intel_crtc_state *crtc_state,
-                             const struct drm_connector_state *conn_state);
-void intel_audio_codec_disable(struct intel_encoder *encoder,
-                              const struct intel_crtc_state *old_crtc_state,
-                              const struct drm_connector_state *old_conn_state);
-void intel_audio_init(struct drm_i915_private *dev_priv);
-void intel_audio_deinit(struct drm_i915_private *dev_priv);
-
-#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
deleted file mode 100644 (file)
index 270719f..0000000
+++ /dev/null
@@ -1,2253 +0,0 @@
-/*
- * Copyright © 2006 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- *    Eric Anholt <eric@anholt.net>
- *
- */
-
-#include <drm/drm_dp_helper.h>
-#include <drm/i915_drm.h>
-
-#include "display/intel_gmbus.h"
-
-#include "i915_drv.h"
-
-#define _INTEL_BIOS_PRIVATE
-#include "intel_vbt_defs.h"
-
-/**
- * DOC: Video BIOS Table (VBT)
- *
- * The Video BIOS Table, or VBT, provides platform and board specific
- * configuration information to the driver that is not discoverable or available
- * through other means. The configuration is mostly related to display
- * hardware. The VBT is available via the ACPI OpRegion or, on older systems, in
- * the PCI ROM.
- *
- * The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB
- * Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that
- * contain the actual configuration information. The VBT Header, and thus the
- * VBT, begins with "$VBT" signature. The VBT Header contains the offset of the
- * BDB Header. The data blocks are concatenated after the BDB Header. The data
- * blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of
- * data. (Block 53, the MIPI Sequence Block is an exception.)
- *
- * The driver parses the VBT during load. The relevant information is stored in
- * driver private data for ease of use, and the actual VBT is not read after
- * that.
- */
-
-#define        SLAVE_ADDR1     0x70
-#define        SLAVE_ADDR2     0x72
-
-/* Get BDB block size given a pointer to Block ID. */
-static u32 _get_blocksize(const u8 *block_base)
-{
-       /* The MIPI Sequence Block v3+ has a separate size field. */
-       if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3)
-               return *((const u32 *)(block_base + 4));
-       else
-               return *((const u16 *)(block_base + 1));
-}
-
-/* Get BDB block size give a pointer to data after Block ID and Block Size. */
-static u32 get_blocksize(const void *block_data)
-{
-       return _get_blocksize(block_data - 3);
-}
-
-static const void *
-find_section(const void *_bdb, enum bdb_block_id section_id)
-{
-       const struct bdb_header *bdb = _bdb;
-       const u8 *base = _bdb;
-       int index = 0;
-       u32 total, current_size;
-       enum bdb_block_id current_id;
-
-       /* skip to first section */
-       index += bdb->header_size;
-       total = bdb->bdb_size;
-
-       /* walk the sections looking for section_id */
-       while (index + 3 < total) {
-               current_id = *(base + index);
-               current_size = _get_blocksize(base + index);
-               index += 3;
-
-               if (index + current_size > total)
-                       return NULL;
-
-               if (current_id == section_id)
-                       return base + index;
-
-               index += current_size;
-       }
-
-       return NULL;
-}
-
-static void
-fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
-                       const struct lvds_dvo_timing *dvo_timing)
-{
-       panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
-               dvo_timing->hactive_lo;
-       panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
-               ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
-       panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
-               ((dvo_timing->hsync_pulse_width_hi << 8) |
-                       dvo_timing->hsync_pulse_width_lo);
-       panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
-               ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
-
-       panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
-               dvo_timing->vactive_lo;
-       panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
-               ((dvo_timing->vsync_off_hi << 4) | dvo_timing->vsync_off_lo);
-       panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
-               ((dvo_timing->vsync_pulse_width_hi << 4) |
-                       dvo_timing->vsync_pulse_width_lo);
-       panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
-               ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
-       panel_fixed_mode->clock = dvo_timing->clock * 10;
-       panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
-
-       if (dvo_timing->hsync_positive)
-               panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
-       else
-               panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
-
-       if (dvo_timing->vsync_positive)
-               panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
-       else
-               panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
-
-       panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
-               dvo_timing->himage_lo;
-       panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
-               dvo_timing->vimage_lo;
-
-       /* Some VBTs have bogus h/vtotal values */
-       if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
-               panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
-       if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
-               panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
-
-       drm_mode_set_name(panel_fixed_mode);
-}
-
-static const struct lvds_dvo_timing *
-get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
-                   const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
-                   int index)
-{
-       /*
-        * the size of fp_timing varies on the different platform.
-        * So calculate the DVO timing relative offset in LVDS data
-        * entry to get the DVO timing entry
-        */
-
-       int lfp_data_size =
-               lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
-               lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
-       int dvo_timing_offset =
-               lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
-               lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
-       char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
-
-       return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
-}
-
-/* get lvds_fp_timing entry
- * this function may return NULL if the corresponding entry is invalid
- */
-static const struct lvds_fp_timing *
-get_lvds_fp_timing(const struct bdb_header *bdb,
-                  const struct bdb_lvds_lfp_data *data,
-                  const struct bdb_lvds_lfp_data_ptrs *ptrs,
-                  int index)
-{
-       size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
-       u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
-       size_t ofs;
-
-       if (index >= ARRAY_SIZE(ptrs->ptr))
-               return NULL;
-       ofs = ptrs->ptr[index].fp_timing_offset;
-       if (ofs < data_ofs ||
-           ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
-               return NULL;
-       return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
-}
-
-/* Try to find integrated panel data */
-static void
-parse_lfp_panel_data(struct drm_i915_private *dev_priv,
-                    const struct bdb_header *bdb)
-{
-       const struct bdb_lvds_options *lvds_options;
-       const struct bdb_lvds_lfp_data *lvds_lfp_data;
-       const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
-       const struct lvds_dvo_timing *panel_dvo_timing;
-       const struct lvds_fp_timing *fp_timing;
-       struct drm_display_mode *panel_fixed_mode;
-       int panel_type;
-       int drrs_mode;
-       int ret;
-
-       lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
-       if (!lvds_options)
-               return;
-
-       dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
-
-       ret = intel_opregion_get_panel_type(dev_priv);
-       if (ret >= 0) {
-               WARN_ON(ret > 0xf);
-               panel_type = ret;
-               DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type);
-       } else {
-               if (lvds_options->panel_type > 0xf) {
-                       DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n",
-                                     lvds_options->panel_type);
-                       return;
-               }
-               panel_type = lvds_options->panel_type;
-               DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
-       }
-
-       dev_priv->vbt.panel_type = panel_type;
-
-       drrs_mode = (lvds_options->dps_panel_type_bits
-                               >> (panel_type * 2)) & MODE_MASK;
-       /*
-        * VBT has static DRRS = 0 and seamless DRRS = 2.
-        * The below piece of code is required to adjust vbt.drrs_type
-        * to match the enum drrs_support_type.
-        */
-       switch (drrs_mode) {
-       case 0:
-               dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
-               DRM_DEBUG_KMS("DRRS supported mode is static\n");
-               break;
-       case 2:
-               dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
-               DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
-               break;
-       default:
-               dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
-               DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
-               break;
-       }
-
-       lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
-       if (!lvds_lfp_data)
-               return;
-
-       lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
-       if (!lvds_lfp_data_ptrs)
-               return;
-
-       panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
-                                              lvds_lfp_data_ptrs,
-                                              panel_type);
-
-       panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
-       if (!panel_fixed_mode)
-               return;
-
-       fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
-
-       dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
-
-       DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
-       drm_mode_debug_printmodeline(panel_fixed_mode);
-
-       fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
-                                      lvds_lfp_data_ptrs,
-                                      panel_type);
-       if (fp_timing) {
-               /* check the resolution, just to be sure */
-               if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
-                   fp_timing->y_res == panel_fixed_mode->vdisplay) {
-                       dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
-                       DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
-                                     dev_priv->vbt.bios_lvds_val);
-               }
-       }
-}
-
-static void
-parse_lfp_backlight(struct drm_i915_private *dev_priv,
-                   const struct bdb_header *bdb)
-{
-       const struct bdb_lfp_backlight_data *backlight_data;
-       const struct lfp_backlight_data_entry *entry;
-       int panel_type = dev_priv->vbt.panel_type;
-
-       backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
-       if (!backlight_data)
-               return;
-
-       if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
-               DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
-                             backlight_data->entry_size);
-               return;
-       }
-
-       entry = &backlight_data->data[panel_type];
-
-       dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
-       if (!dev_priv->vbt.backlight.present) {
-               DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
-                             entry->type);
-               return;
-       }
-
-       dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
-       if (bdb->version >= 191 &&
-           get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
-               const struct lfp_backlight_control_method *method;
-
-               method = &backlight_data->backlight_control[panel_type];
-               dev_priv->vbt.backlight.type = method->type;
-               dev_priv->vbt.backlight.controller = method->controller;
-       }
-
-       dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
-       dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
-       dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
-       DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
-                     "active %s, min brightness %u, level %u, controller %u\n",
-                     dev_priv->vbt.backlight.pwm_freq_hz,
-                     dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
-                     dev_priv->vbt.backlight.min_brightness,
-                     backlight_data->level[panel_type],
-                     dev_priv->vbt.backlight.controller);
-}
-
-/* Try to find sdvo panel data */
-static void
-parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
-                     const struct bdb_header *bdb)
-{
-       const struct bdb_sdvo_panel_dtds *dtds;
-       struct drm_display_mode *panel_fixed_mode;
-       int index;
-
-       index = i915_modparams.vbt_sdvo_panel_type;
-       if (index == -2) {
-               DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
-               return;
-       }
-
-       if (index == -1) {
-               const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
-
-               sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
-               if (!sdvo_lvds_options)
-                       return;
-
-               index = sdvo_lvds_options->panel_type;
-       }
-
-       dtds = find_section(bdb, BDB_SDVO_PANEL_DTDS);
-       if (!dtds)
-               return;
-
-       panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
-       if (!panel_fixed_mode)
-               return;
-
-       fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]);
-
-       dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
-
-       DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
-       drm_mode_debug_printmodeline(panel_fixed_mode);
-}
-
-static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv,
-                                   bool alternate)
-{
-       switch (INTEL_GEN(dev_priv)) {
-       case 2:
-               return alternate ? 66667 : 48000;
-       case 3:
-       case 4:
-               return alternate ? 100000 : 96000;
-       default:
-               return alternate ? 100000 : 120000;
-       }
-}
-
-static void
-parse_general_features(struct drm_i915_private *dev_priv,
-                      const struct bdb_header *bdb)
-{
-       const struct bdb_general_features *general;
-
-       general = find_section(bdb, BDB_GENERAL_FEATURES);
-       if (!general)
-               return;
-
-       dev_priv->vbt.int_tv_support = general->int_tv_support;
-       /* int_crt_support can't be trusted on earlier platforms */
-       if (bdb->version >= 155 &&
-           (HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv)))
-               dev_priv->vbt.int_crt_support = general->int_crt_support;
-       dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
-       dev_priv->vbt.lvds_ssc_freq =
-               intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
-       dev_priv->vbt.display_clock_mode = general->display_clock_mode;
-       dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
-       if (bdb->version >= 181) {
-               dev_priv->vbt.orientation = general->rotate_180 ?
-                       DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
-                       DRM_MODE_PANEL_ORIENTATION_NORMAL;
-       } else {
-               dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-       }
-       DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
-                     dev_priv->vbt.int_tv_support,
-                     dev_priv->vbt.int_crt_support,
-                     dev_priv->vbt.lvds_use_ssc,
-                     dev_priv->vbt.lvds_ssc_freq,
-                     dev_priv->vbt.display_clock_mode,
-                     dev_priv->vbt.fdi_rx_polarity_inverted);
-}
-
-static const struct child_device_config *
-child_device_ptr(const struct bdb_general_definitions *defs, int i)
-{
-       return (const void *) &defs->devices[i * defs->child_dev_size];
-}
-
-static void
-parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
-{
-       struct sdvo_device_mapping *mapping;
-       const struct child_device_config *child;
-       int i, count = 0;
-
-       /*
-        * Only parse SDVO mappings on gens that could have SDVO. This isn't
-        * accurate and doesn't have to be, as long as it's not too strict.
-        */
-       if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
-               DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
-               return;
-       }
-
-       for (i = 0, count = 0; i < dev_priv->vbt.child_dev_num; i++) {
-               child = dev_priv->vbt.child_dev + i;
-
-               if (child->slave_addr != SLAVE_ADDR1 &&
-                   child->slave_addr != SLAVE_ADDR2) {
-                       /*
-                        * If the slave address is neither 0x70 nor 0x72,
-                        * it is not a SDVO device. Skip it.
-                        */
-                       continue;
-               }
-               if (child->dvo_port != DEVICE_PORT_DVOB &&
-                   child->dvo_port != DEVICE_PORT_DVOC) {
-                       /* skip the incorrect SDVO port */
-                       DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
-                       continue;
-               }
-               DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
-                             " %s port\n",
-                             child->slave_addr,
-                             (child->dvo_port == DEVICE_PORT_DVOB) ?
-                             "SDVOB" : "SDVOC");
-               mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
-               if (!mapping->initialized) {
-                       mapping->dvo_port = child->dvo_port;
-                       mapping->slave_addr = child->slave_addr;
-                       mapping->dvo_wiring = child->dvo_wiring;
-                       mapping->ddc_pin = child->ddc_pin;
-                       mapping->i2c_pin = child->i2c_pin;
-                       mapping->initialized = 1;
-                       DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
-                                     mapping->dvo_port,
-                                     mapping->slave_addr,
-                                     mapping->dvo_wiring,
-                                     mapping->ddc_pin,
-                                     mapping->i2c_pin);
-               } else {
-                       DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
-                                        "two SDVO device.\n");
-               }
-               if (child->slave2_addr) {
-                       /* Maybe this is a SDVO device with multiple inputs */
-                       /* And the mapping info is not added */
-                       DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
-                               " is a SDVO device with multiple inputs.\n");
-               }
-               count++;
-       }
-
-       if (!count) {
-               /* No SDVO device info is found */
-               DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
-       }
-}
-
-static void
-parse_driver_features(struct drm_i915_private *dev_priv,
-                     const struct bdb_header *bdb)
-{
-       const struct bdb_driver_features *driver;
-
-       driver = find_section(bdb, BDB_DRIVER_FEATURES);
-       if (!driver)
-               return;
-
-       if (INTEL_GEN(dev_priv) >= 5) {
-               /*
-                * Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
-                * to mean "eDP". The VBT spec doesn't agree with that
-                * interpretation, but real world VBTs seem to.
-                */
-               if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
-                       dev_priv->vbt.int_lvds_support = 0;
-       } else {
-               /*
-                * FIXME it's not clear which BDB version has the LVDS config
-                * bits defined. Revision history in the VBT spec says:
-                * "0.92 | Add two definitions for VBT value of LVDS Active
-                *  Config (00b and 11b values defined) | 06/13/2005"
-                * but does not the specify the BDB version.
-                *
-                * So far version 134 (on i945gm) is the oldest VBT observed
-                * in the wild with the bits correctly populated. Version
-                * 108 (on i85x) does not have the bits correctly populated.
-                */
-               if (bdb->version >= 134 &&
-                   driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
-                   driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
-                       dev_priv->vbt.int_lvds_support = 0;
-       }
-
-       DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
-       /*
-        * If DRRS is not supported, drrs_type has to be set to 0.
-        * This is because, VBT is configured in such a way that
-        * static DRRS is 0 and DRRS not supported is represented by
-        * driver->drrs_enabled=false
-        */
-       if (!driver->drrs_enabled)
-               dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
-       dev_priv->vbt.psr.enable = driver->psr_enabled;
-}
-
-static void
-parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
-{
-       const struct bdb_edp *edp;
-       const struct edp_power_seq *edp_pps;
-       const struct edp_fast_link_params *edp_link_params;
-       int panel_type = dev_priv->vbt.panel_type;
-
-       edp = find_section(bdb, BDB_EDP);
-       if (!edp)
-               return;
-
-       switch ((edp->color_depth >> (panel_type * 2)) & 3) {
-       case EDP_18BPP:
-               dev_priv->vbt.edp.bpp = 18;
-               break;
-       case EDP_24BPP:
-               dev_priv->vbt.edp.bpp = 24;
-               break;
-       case EDP_30BPP:
-               dev_priv->vbt.edp.bpp = 30;
-               break;
-       }
-
-       /* Get the eDP sequencing and link info */
-       edp_pps = &edp->power_seqs[panel_type];
-       edp_link_params = &edp->fast_link_params[panel_type];
-
-       dev_priv->vbt.edp.pps = *edp_pps;
-
-       switch (edp_link_params->rate) {
-       case EDP_RATE_1_62:
-               dev_priv->vbt.edp.rate = DP_LINK_BW_1_62;
-               break;
-       case EDP_RATE_2_7:
-               dev_priv->vbt.edp.rate = DP_LINK_BW_2_7;
-               break;
-       default:
-               DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
-                             edp_link_params->rate);
-               break;
-       }
-
-       switch (edp_link_params->lanes) {
-       case EDP_LANE_1:
-               dev_priv->vbt.edp.lanes = 1;
-               break;
-       case EDP_LANE_2:
-               dev_priv->vbt.edp.lanes = 2;
-               break;
-       case EDP_LANE_4:
-               dev_priv->vbt.edp.lanes = 4;
-               break;
-       default:
-               DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
-                             edp_link_params->lanes);
-               break;
-       }
-
-       switch (edp_link_params->preemphasis) {
-       case EDP_PREEMPHASIS_NONE:
-               dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
-               break;
-       case EDP_PREEMPHASIS_3_5dB:
-               dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
-               break;
-       case EDP_PREEMPHASIS_6dB:
-               dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
-               break;
-       case EDP_PREEMPHASIS_9_5dB:
-               dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
-               break;
-       default:
-               DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
-                             edp_link_params->preemphasis);
-               break;
-       }
-
-       switch (edp_link_params->vswing) {
-       case EDP_VSWING_0_4V:
-               dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
-               break;
-       case EDP_VSWING_0_6V:
-               dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
-               break;
-       case EDP_VSWING_0_8V:
-               dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
-               break;
-       case EDP_VSWING_1_2V:
-               dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
-               break;
-       default:
-               DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
-                             edp_link_params->vswing);
-               break;
-       }
-
-       if (bdb->version >= 173) {
-               u8 vswing;
-
-               /* Don't read from VBT if module parameter has valid value*/
-               if (i915_modparams.edp_vswing) {
-                       dev_priv->vbt.edp.low_vswing =
-                               i915_modparams.edp_vswing == 1;
-               } else {
-                       vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
-                       dev_priv->vbt.edp.low_vswing = vswing == 0;
-               }
-       }
-}
-
-static void
-parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
-{
-       const struct bdb_psr *psr;
-       const struct psr_table *psr_table;
-       int panel_type = dev_priv->vbt.panel_type;
-
-       psr = find_section(bdb, BDB_PSR);
-       if (!psr) {
-               DRM_DEBUG_KMS("No PSR BDB found.\n");
-               return;
-       }
-
-       psr_table = &psr->psr_table[panel_type];
-
-       dev_priv->vbt.psr.full_link = psr_table->full_link;
-       dev_priv->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
-
-       /* Allowed VBT values goes from 0 to 15 */
-       dev_priv->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
-               psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
-
-       switch (psr_table->lines_to_wait) {
-       case 0:
-               dev_priv->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
-               break;
-       case 1:
-               dev_priv->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
-               break;
-       case 2:
-               dev_priv->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
-               break;
-       case 3:
-               dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
-               break;
-       default:
-               DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
-                             psr_table->lines_to_wait);
-               break;
-       }
-
-       /*
-        * New psr options 0=500us, 1=100us, 2=2500us, 3=0us
-        * Old decimal value is wake up time in multiples of 100 us.
-        */
-       if (bdb->version >= 205 &&
-           (IS_GEN9_BC(dev_priv) || IS_GEMINILAKE(dev_priv) ||
-            INTEL_GEN(dev_priv) >= 10)) {
-               switch (psr_table->tp1_wakeup_time) {
-               case 0:
-                       dev_priv->vbt.psr.tp1_wakeup_time_us = 500;
-                       break;
-               case 1:
-                       dev_priv->vbt.psr.tp1_wakeup_time_us = 100;
-                       break;
-               case 3:
-                       dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
-                       break;
-               default:
-                       DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
-                                       psr_table->tp1_wakeup_time);
-                       /* fallthrough */
-               case 2:
-                       dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
-                       break;
-               }
-
-               switch (psr_table->tp2_tp3_wakeup_time) {
-               case 0:
-                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 500;
-                       break;
-               case 1:
-                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 100;
-                       break;
-               case 3:
-                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
-                       break;
-               default:
-                       DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
-                                       psr_table->tp2_tp3_wakeup_time);
-                       /* fallthrough */
-               case 2:
-                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
-               break;
-               }
-       } else {
-               dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
-               dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
-       }
-
-       if (bdb->version >= 226) {
-               u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time;
-
-               wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
-               switch (wakeup_time) {
-               case 0:
-                       wakeup_time = 500;
-                       break;
-               case 1:
-                       wakeup_time = 100;
-                       break;
-               case 3:
-                       wakeup_time = 50;
-                       break;
-               default:
-               case 2:
-                       wakeup_time = 2500;
-                       break;
-               }
-               dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
-       } else {
-               /* Reusing PSR1 wakeup time for PSR2 in older VBTs */
-               dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = dev_priv->vbt.psr.tp2_tp3_wakeup_time_us;
-       }
-}
-
-static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
-                                     u16 version, enum port port)
-{
-       if (!dev_priv->vbt.dsi.config->dual_link || version < 197) {
-               dev_priv->vbt.dsi.bl_ports = BIT(port);
-               if (dev_priv->vbt.dsi.config->cabc_supported)
-                       dev_priv->vbt.dsi.cabc_ports = BIT(port);
-
-               return;
-       }
-
-       switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
-       case DL_DCS_PORT_A:
-               dev_priv->vbt.dsi.bl_ports = BIT(PORT_A);
-               break;
-       case DL_DCS_PORT_C:
-               dev_priv->vbt.dsi.bl_ports = BIT(PORT_C);
-               break;
-       default:
-       case DL_DCS_PORT_A_AND_C:
-               dev_priv->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
-               break;
-       }
-
-       if (!dev_priv->vbt.dsi.config->cabc_supported)
-               return;
-
-       switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
-       case DL_DCS_PORT_A:
-               dev_priv->vbt.dsi.cabc_ports = BIT(PORT_A);
-               break;
-       case DL_DCS_PORT_C:
-               dev_priv->vbt.dsi.cabc_ports = BIT(PORT_C);
-               break;
-       default:
-       case DL_DCS_PORT_A_AND_C:
-               dev_priv->vbt.dsi.cabc_ports =
-                                       BIT(PORT_A) | BIT(PORT_C);
-               break;
-       }
-}
-
-static void
-parse_mipi_config(struct drm_i915_private *dev_priv,
-                 const struct bdb_header *bdb)
-{
-       const struct bdb_mipi_config *start;
-       const struct mipi_config *config;
-       const struct mipi_pps_data *pps;
-       int panel_type = dev_priv->vbt.panel_type;
-       enum port port;
-
-       /* parse MIPI blocks only if LFP type is MIPI */
-       if (!intel_bios_is_dsi_present(dev_priv, &port))
-               return;
-
-       /* Initialize this to undefined indicating no generic MIPI support */
-       dev_priv->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
-
-       /* Block #40 is already parsed and panel_fixed_mode is
-        * stored in dev_priv->lfp_lvds_vbt_mode
-        * resuse this when needed
-        */
-
-       /* Parse #52 for panel index used from panel_type already
-        * parsed
-        */
-       start = find_section(bdb, BDB_MIPI_CONFIG);
-       if (!start) {
-               DRM_DEBUG_KMS("No MIPI config BDB found");
-               return;
-       }
-
-       DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n",
-                                                               panel_type);
-
-       /*
-        * get hold of the correct configuration block and pps data as per
-        * the panel_type as index
-        */
-       config = &start->config[panel_type];
-       pps = &start->pps[panel_type];
-
-       /* store as of now full data. Trim when we realise all is not needed */
-       dev_priv->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
-       if (!dev_priv->vbt.dsi.config)
-               return;
-
-       dev_priv->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
-       if (!dev_priv->vbt.dsi.pps) {
-               kfree(dev_priv->vbt.dsi.config);
-               return;
-       }
-
-       parse_dsi_backlight_ports(dev_priv, bdb->version, port);
-
-       /* FIXME is the 90 vs. 270 correct? */
-       switch (config->rotation) {
-       case ENABLE_ROTATION_0:
-               /*
-                * Most (all?) VBTs claim 0 degrees despite having
-                * an upside down panel, thus we do not trust this.
-                */
-               dev_priv->vbt.dsi.orientation =
-                       DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-               break;
-       case ENABLE_ROTATION_90:
-               dev_priv->vbt.dsi.orientation =
-                       DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
-               break;
-       case ENABLE_ROTATION_180:
-               dev_priv->vbt.dsi.orientation =
-                       DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
-               break;
-       case ENABLE_ROTATION_270:
-               dev_priv->vbt.dsi.orientation =
-                       DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
-               break;
-       }
-
-       /* We have mandatory mipi config blocks. Initialize as generic panel */
-       dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
-}
-
-/* Find the sequence block and size for the given panel. */
-static const u8 *
-find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
-                         u16 panel_id, u32 *seq_size)
-{
-       u32 total = get_blocksize(sequence);
-       const u8 *data = &sequence->data[0];
-       u8 current_id;
-       u32 current_size;
-       int header_size = sequence->version >= 3 ? 5 : 3;
-       int index = 0;
-       int i;
-
-       /* skip new block size */
-       if (sequence->version >= 3)
-               data += 4;
-
-       for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
-               if (index + header_size > total) {
-                       DRM_ERROR("Invalid sequence block (header)\n");
-                       return NULL;
-               }
-
-               current_id = *(data + index);
-               if (sequence->version >= 3)
-                       current_size = *((const u32 *)(data + index + 1));
-               else
-                       current_size = *((const u16 *)(data + index + 1));
-
-               index += header_size;
-
-               if (index + current_size > total) {
-                       DRM_ERROR("Invalid sequence block\n");
-                       return NULL;
-               }
-
-               if (current_id == panel_id) {
-                       *seq_size = current_size;
-                       return data + index;
-               }
-
-               index += current_size;
-       }
-
-       DRM_ERROR("Sequence block detected but no valid configuration\n");
-
-       return NULL;
-}
-
-static int goto_next_sequence(const u8 *data, int index, int total)
-{
-       u16 len;
-
-       /* Skip Sequence Byte. */
-       for (index = index + 1; index < total; index += len) {
-               u8 operation_byte = *(data + index);
-               index++;
-
-               switch (operation_byte) {
-               case MIPI_SEQ_ELEM_END:
-                       return index;
-               case MIPI_SEQ_ELEM_SEND_PKT:
-                       if (index + 4 > total)
-                               return 0;
-
-                       len = *((const u16 *)(data + index + 2)) + 4;
-                       break;
-               case MIPI_SEQ_ELEM_DELAY:
-                       len = 4;
-                       break;
-               case MIPI_SEQ_ELEM_GPIO:
-                       len = 2;
-                       break;
-               case MIPI_SEQ_ELEM_I2C:
-                       if (index + 7 > total)
-                               return 0;
-                       len = *(data + index + 6) + 7;
-                       break;
-               default:
-                       DRM_ERROR("Unknown operation byte\n");
-                       return 0;
-               }
-       }
-
-       return 0;
-}
-
-static int goto_next_sequence_v3(const u8 *data, int index, int total)
-{
-       int seq_end;
-       u16 len;
-       u32 size_of_sequence;
-
-       /*
-        * Could skip sequence based on Size of Sequence alone, but also do some
-        * checking on the structure.
-        */
-       if (total < 5) {
-               DRM_ERROR("Too small sequence size\n");
-               return 0;
-       }
-
-       /* Skip Sequence Byte. */
-       index++;
-
-       /*
-        * Size of Sequence. Excludes the Sequence Byte and the size itself,
-        * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
-        * byte.
-        */
-       size_of_sequence = *((const u32 *)(data + index));
-       index += 4;
-
-       seq_end = index + size_of_sequence;
-       if (seq_end > total) {
-               DRM_ERROR("Invalid sequence size\n");
-               return 0;
-       }
-
-       for (; index < total; index += len) {
-               u8 operation_byte = *(data + index);
-               index++;
-
-               if (operation_byte == MIPI_SEQ_ELEM_END) {
-                       if (index != seq_end) {
-                               DRM_ERROR("Invalid element structure\n");
-                               return 0;
-                       }
-                       return index;
-               }
-
-               len = *(data + index);
-               index++;
-
-               /*
-                * FIXME: Would be nice to check elements like for v1/v2 in
-                * goto_next_sequence() above.
-                */
-               switch (operation_byte) {
-               case MIPI_SEQ_ELEM_SEND_PKT:
-               case MIPI_SEQ_ELEM_DELAY:
-               case MIPI_SEQ_ELEM_GPIO:
-               case MIPI_SEQ_ELEM_I2C:
-               case MIPI_SEQ_ELEM_SPI:
-               case MIPI_SEQ_ELEM_PMIC:
-                       break;
-               default:
-                       DRM_ERROR("Unknown operation byte %u\n",
-                                 operation_byte);
-                       break;
-               }
-       }
-
-       return 0;
-}
-
-/*
- * Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
- * skip all delay + gpio operands and stop at the first DSI packet op.
- */
-static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
-{
-       const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
-       int index, len;
-
-       if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
-               return 0;
-
-       /* index = 1 to skip sequence byte */
-       for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) {
-               switch (data[index]) {
-               case MIPI_SEQ_ELEM_SEND_PKT:
-                       return index == 1 ? 0 : index;
-               case MIPI_SEQ_ELEM_DELAY:
-                       len = 5; /* 1 byte for operand + uint32 */
-                       break;
-               case MIPI_SEQ_ELEM_GPIO:
-                       len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */
-                       break;
-               default:
-                       return 0;
-               }
-       }
-
-       return 0;
-}
-
-/*
- * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
- * The deassert must be done before calling intel_dsi_device_ready, so for
- * these devices we split the init OTP sequence into a deassert sequence and
- * the actual init OTP part.
- */
-static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
-{
-       u8 *init_otp;
-       int len;
-
-       /* Limit this to VLV for now. */
-       if (!IS_VALLEYVIEW(dev_priv))
-               return;
-
-       /* Limit this to v1 vid-mode sequences */
-       if (dev_priv->vbt.dsi.config->is_cmd_mode ||
-           dev_priv->vbt.dsi.seq_version != 1)
-               return;
-
-       /* Only do this if there are otp and assert seqs and no deassert seq */
-       if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
-           !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
-           dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
-               return;
-
-       /* The deassert-sequence ends at the first DSI packet */
-       len = get_init_otp_deassert_fragment_len(dev_priv);
-       if (!len)
-               return;
-
-       DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
-
-       /* Copy the fragment, update seq byte and terminate it */
-       init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
-       dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
-       if (!dev_priv->vbt.dsi.deassert_seq)
-               return;
-       dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
-       dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
-       /* Use the copy for deassert */
-       dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
-               dev_priv->vbt.dsi.deassert_seq;
-       /* Replace the last byte of the fragment with init OTP seq byte */
-       init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
-       /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
-       dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
-}
-
-static void
-parse_mipi_sequence(struct drm_i915_private *dev_priv,
-                   const struct bdb_header *bdb)
-{
-       int panel_type = dev_priv->vbt.panel_type;
-       const struct bdb_mipi_sequence *sequence;
-       const u8 *seq_data;
-       u32 seq_size;
-       u8 *data;
-       int index = 0;
-
-       /* Only our generic panel driver uses the sequence block. */
-       if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
-               return;
-
-       sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
-       if (!sequence) {
-               DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
-               return;
-       }
-
-       /* Fail gracefully for forward incompatible sequence block. */
-       if (sequence->version >= 4) {
-               DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n",
-                         sequence->version);
-               return;
-       }
-
-       DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version);
-
-       seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
-       if (!seq_data)
-               return;
-
-       data = kmemdup(seq_data, seq_size, GFP_KERNEL);
-       if (!data)
-               return;
-
-       /* Parse the sequences, store pointers to each sequence. */
-       for (;;) {
-               u8 seq_id = *(data + index);
-               if (seq_id == MIPI_SEQ_END)
-                       break;
-
-               if (seq_id >= MIPI_SEQ_MAX) {
-                       DRM_ERROR("Unknown sequence %u\n", seq_id);
-                       goto err;
-               }
-
-               /* Log about presence of sequences we won't run. */
-               if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
-                       DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id);
-
-               dev_priv->vbt.dsi.sequence[seq_id] = data + index;
-
-               if (sequence->version >= 3)
-                       index = goto_next_sequence_v3(data, index, seq_size);
-               else
-                       index = goto_next_sequence(data, index, seq_size);
-               if (!index) {
-                       DRM_ERROR("Invalid sequence %u\n", seq_id);
-                       goto err;
-               }
-       }
-
-       dev_priv->vbt.dsi.data = data;
-       dev_priv->vbt.dsi.size = seq_size;
-       dev_priv->vbt.dsi.seq_version = sequence->version;
-
-       fixup_mipi_sequences(dev_priv);
-
-       DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
-       return;
-
-err:
-       kfree(data);
-       memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
-}
-
-static u8 translate_iboost(u8 val)
-{
-       static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
-
-       if (val >= ARRAY_SIZE(mapping)) {
-               DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
-               return 0;
-       }
-       return mapping[val];
-}
-
-static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
-{
-       const struct ddi_vbt_port_info *info;
-       enum port port;
-
-       for (port = PORT_A; port < I915_MAX_PORTS; port++) {
-               info = &i915->vbt.ddi_port_info[port];
-
-               if (info->child && ddc_pin == info->alternate_ddc_pin)
-                       return port;
-       }
-
-       return PORT_NONE;
-}
-
-static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
-                            enum port port)
-{
-       struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
-       enum port p;
-
-       if (!info->alternate_ddc_pin)
-               return;
-
-       p = get_port_by_ddc_pin(dev_priv, info->alternate_ddc_pin);
-       if (p != PORT_NONE) {
-               DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
-                             "disabling port %c DVI/HDMI support\n",
-                             port_name(port), info->alternate_ddc_pin,
-                             port_name(p), port_name(port));
-
-               /*
-                * If we have multiple ports supposedly sharing the
-                * pin, then dvi/hdmi couldn't exist on the shared
-                * port. Otherwise they share the same ddc bin and
-                * system couldn't communicate with them separately.
-                *
-                * Give child device order the priority, first come first
-                * served.
-                */
-               info->supports_dvi = false;
-               info->supports_hdmi = false;
-               info->alternate_ddc_pin = 0;
-       }
-}
-
-static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
-{
-       const struct ddi_vbt_port_info *info;
-       enum port port;
-
-       for (port = PORT_A; port < I915_MAX_PORTS; port++) {
-               info = &i915->vbt.ddi_port_info[port];
-
-               if (info->child && aux_ch == info->alternate_aux_channel)
-                       return port;
-       }
-
-       return PORT_NONE;
-}
-
-static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
-                           enum port port)
-{
-       struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
-       enum port p;
-
-       if (!info->alternate_aux_channel)
-               return;
-
-       p = get_port_by_aux_ch(dev_priv, info->alternate_aux_channel);
-       if (p != PORT_NONE) {
-               DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
-                             "disabling port %c DP support\n",
-                             port_name(port), info->alternate_aux_channel,
-                             port_name(p), port_name(port));
-
-               /*
-                * If we have multiple ports supposedlt sharing the
-                * aux channel, then DP couldn't exist on the shared
-                * port. Otherwise they share the same aux channel
-                * and system couldn't communicate with them separately.
-                *
-                * Give child device order the priority, first come first
-                * served.
-                */
-               info->supports_dp = false;
-               info->alternate_aux_channel = 0;
-       }
-}
-
-static const u8 cnp_ddc_pin_map[] = {
-       [0] = 0, /* N/A */
-       [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
-       [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
-       [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
-       [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
-};
-
-static const u8 icp_ddc_pin_map[] = {
-       [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
-       [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
-       [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
-       [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
-       [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
-       [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
-};
-
-static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
-{
-       const u8 *ddc_pin_map;
-       int n_entries;
-
-       if (HAS_PCH_ICP(dev_priv)) {
-               ddc_pin_map = icp_ddc_pin_map;
-               n_entries = ARRAY_SIZE(icp_ddc_pin_map);
-       } else if (HAS_PCH_CNP(dev_priv)) {
-               ddc_pin_map = cnp_ddc_pin_map;
-               n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
-       } else {
-               /* Assuming direct map */
-               return vbt_pin;
-       }
-
-       if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
-               return ddc_pin_map[vbt_pin];
-
-       DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
-                     vbt_pin);
-       return 0;
-}
-
-static enum port dvo_port_to_port(u8 dvo_port)
-{
-       /*
-        * Each DDI port can have more than one value on the "DVO Port" field,
-        * so look for all the possible values for each port.
-        */
-       static const int dvo_ports[][3] = {
-               [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
-               [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
-               [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
-               [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
-               [PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
-               [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
-       };
-       enum port port;
-       int i;
-
-       for (port = PORT_A; port < ARRAY_SIZE(dvo_ports); port++) {
-               for (i = 0; i < ARRAY_SIZE(dvo_ports[port]); i++) {
-                       if (dvo_ports[port][i] == -1)
-                               break;
-
-                       if (dvo_port == dvo_ports[port][i])
-                               return port;
-               }
-       }
-
-       return PORT_NONE;
-}
-
-static void parse_ddi_port(struct drm_i915_private *dev_priv,
-                          const struct child_device_config *child,
-                          u8 bdb_version)
-{
-       struct ddi_vbt_port_info *info;
-       bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
-       enum port port;
-
-       port = dvo_port_to_port(child->dvo_port);
-       if (port == PORT_NONE)
-               return;
-
-       info = &dev_priv->vbt.ddi_port_info[port];
-
-       if (info->child) {
-               DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
-                             port_name(port));
-               return;
-       }
-
-       is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
-       is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
-       is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT;
-       is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
-       is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
-
-       if (port == PORT_A && is_dvi) {
-               DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
-                             is_hdmi ? "/HDMI" : "");
-               is_dvi = false;
-               is_hdmi = false;
-       }
-
-       info->supports_dvi = is_dvi;
-       info->supports_hdmi = is_hdmi;
-       info->supports_dp = is_dp;
-       info->supports_edp = is_edp;
-
-       if (bdb_version >= 195)
-               info->supports_typec_usb = child->dp_usb_type_c;
-
-       if (bdb_version >= 209)
-               info->supports_tbt = child->tbt;
-
-       DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d\n",
-                     port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
-                     HAS_LSPCON(dev_priv) && child->lspcon,
-                     info->supports_typec_usb, info->supports_tbt);
-
-       if (is_edp && is_dvi)
-               DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
-                             port_name(port));
-       if (is_crt && port != PORT_E)
-               DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
-       if (is_crt && (is_dvi || is_dp))
-               DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
-                             port_name(port));
-       if (is_dvi && (port == PORT_A || port == PORT_E))
-               DRM_DEBUG_KMS("Port %c is TMDS compatible\n", port_name(port));
-       if (!is_dvi && !is_dp && !is_crt)
-               DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
-                             port_name(port));
-       if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
-               DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
-
-       if (is_dvi) {
-               u8 ddc_pin;
-
-               ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
-               if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
-                       info->alternate_ddc_pin = ddc_pin;
-                       sanitize_ddc_pin(dev_priv, port);
-               } else {
-                       DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
-                                     "sticking to defaults\n",
-                                     port_name(port), ddc_pin);
-               }
-       }
-
-       if (is_dp) {
-               info->alternate_aux_channel = child->aux_channel;
-
-               sanitize_aux_ch(dev_priv, port);
-       }
-
-       if (bdb_version >= 158) {
-               /* The VBT HDMI level shift values match the table we have. */
-               u8 hdmi_level_shift = child->hdmi_level_shifter_value;
-               DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
-                             port_name(port),
-                             hdmi_level_shift);
-               info->hdmi_level_shift = hdmi_level_shift;
-       }
-
-       if (bdb_version >= 204) {
-               int max_tmds_clock;
-
-               switch (child->hdmi_max_data_rate) {
-               default:
-                       MISSING_CASE(child->hdmi_max_data_rate);
-                       /* fall through */
-               case HDMI_MAX_DATA_RATE_PLATFORM:
-                       max_tmds_clock = 0;
-                       break;
-               case HDMI_MAX_DATA_RATE_297:
-                       max_tmds_clock = 297000;
-                       break;
-               case HDMI_MAX_DATA_RATE_165:
-                       max_tmds_clock = 165000;
-                       break;
-               }
-
-               if (max_tmds_clock)
-                       DRM_DEBUG_KMS("VBT HDMI max TMDS clock for port %c: %d kHz\n",
-                                     port_name(port), max_tmds_clock);
-               info->max_tmds_clock = max_tmds_clock;
-       }
-
-       /* Parse the I_boost config for SKL and above */
-       if (bdb_version >= 196 && child->iboost) {
-               info->dp_boost_level = translate_iboost(child->dp_iboost_level);
-               DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
-                             port_name(port), info->dp_boost_level);
-               info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level);
-               DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
-                             port_name(port), info->hdmi_boost_level);
-       }
-
-       /* DP max link rate for CNL+ */
-       if (bdb_version >= 216) {
-               switch (child->dp_max_link_rate) {
-               default:
-               case VBT_DP_MAX_LINK_RATE_HBR3:
-                       info->dp_max_link_rate = 810000;
-                       break;
-               case VBT_DP_MAX_LINK_RATE_HBR2:
-                       info->dp_max_link_rate = 540000;
-                       break;
-               case VBT_DP_MAX_LINK_RATE_HBR:
-                       info->dp_max_link_rate = 270000;
-                       break;
-               case VBT_DP_MAX_LINK_RATE_LBR:
-                       info->dp_max_link_rate = 162000;
-                       break;
-               }
-               DRM_DEBUG_KMS("VBT DP max link rate for port %c: %d\n",
-                             port_name(port), info->dp_max_link_rate);
-       }
-
-       info->child = child;
-}
-
-static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
-{
-       const struct child_device_config *child;
-       int i;
-
-       if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
-               return;
-
-       if (bdb_version < 155)
-               return;
-
-       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-               child = dev_priv->vbt.child_dev + i;
-
-               parse_ddi_port(dev_priv, child, bdb_version);
-       }
-}
-
-static void
-parse_general_definitions(struct drm_i915_private *dev_priv,
-                         const struct bdb_header *bdb)
-{
-       const struct bdb_general_definitions *defs;
-       const struct child_device_config *child;
-       int i, child_device_num, count;
-       u8 expected_size;
-       u16 block_size;
-       int bus_pin;
-
-       defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
-       if (!defs) {
-               DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
-               return;
-       }
-
-       block_size = get_blocksize(defs);
-       if (block_size < sizeof(*defs)) {
-               DRM_DEBUG_KMS("General definitions block too small (%u)\n",
-                             block_size);
-               return;
-       }
-
-       bus_pin = defs->crt_ddc_gmbus_pin;
-       DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
-       if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
-               dev_priv->vbt.crt_ddc_pin = bus_pin;
-
-       if (bdb->version < 106) {
-               expected_size = 22;
-       } else if (bdb->version < 111) {
-               expected_size = 27;
-       } else if (bdb->version < 195) {
-               expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
-       } else if (bdb->version == 195) {
-               expected_size = 37;
-       } else if (bdb->version <= 215) {
-               expected_size = 38;
-       } else if (bdb->version <= 216) {
-               expected_size = 39;
-       } else {
-               expected_size = sizeof(*child);
-               BUILD_BUG_ON(sizeof(*child) < 39);
-               DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
-                                bdb->version, expected_size);
-       }
-
-       /* Flag an error for unexpected size, but continue anyway. */
-       if (defs->child_dev_size != expected_size)
-               DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
-                         defs->child_dev_size, expected_size, bdb->version);
-
-       /* The legacy sized child device config is the minimum we need. */
-       if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
-               DRM_DEBUG_KMS("Child device config size %u is too small.\n",
-                             defs->child_dev_size);
-               return;
-       }
-
-       /* get the number of child device */
-       child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
-       count = 0;
-       /* get the number of child device that is present */
-       for (i = 0; i < child_device_num; i++) {
-               child = child_device_ptr(defs, i);
-               if (!child->device_type)
-                       continue;
-               count++;
-       }
-       if (!count) {
-               DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
-               return;
-       }
-       dev_priv->vbt.child_dev = kcalloc(count, sizeof(*child), GFP_KERNEL);
-       if (!dev_priv->vbt.child_dev) {
-               DRM_DEBUG_KMS("No memory space for child device\n");
-               return;
-       }
-
-       dev_priv->vbt.child_dev_num = count;
-       count = 0;
-       for (i = 0; i < child_device_num; i++) {
-               child = child_device_ptr(defs, i);
-               if (!child->device_type)
-                       continue;
-
-               /*
-                * Copy as much as we know (sizeof) and is available
-                * (child_dev_size) of the child device. Accessing the data must
-                * depend on VBT version.
-                */
-               memcpy(dev_priv->vbt.child_dev + count, child,
-                      min_t(size_t, defs->child_dev_size, sizeof(*child)));
-               count++;
-       }
-}
-
-/* Common defaults which may be overridden by VBT. */
-static void
-init_vbt_defaults(struct drm_i915_private *dev_priv)
-{
-       enum port port;
-
-       dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
-
-       /* Default to having backlight */
-       dev_priv->vbt.backlight.present = true;
-
-       /* LFP panel data */
-       dev_priv->vbt.lvds_dither = 1;
-
-       /* SDVO panel data */
-       dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
-
-       /* general features */
-       dev_priv->vbt.int_tv_support = 1;
-       dev_priv->vbt.int_crt_support = 1;
-
-       /* driver features */
-       dev_priv->vbt.int_lvds_support = 1;
-
-       /* Default to using SSC */
-       dev_priv->vbt.lvds_use_ssc = 1;
-       /*
-        * Core/SandyBridge/IvyBridge use alternative (120MHz) reference
-        * clock for LVDS.
-        */
-       dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
-                       !HAS_PCH_SPLIT(dev_priv));
-       DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
-
-       for (port = PORT_A; port < I915_MAX_PORTS; port++) {
-               struct ddi_vbt_port_info *info =
-                       &dev_priv->vbt.ddi_port_info[port];
-
-               info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
-       }
-}
-
-/* Defaults to initialize only if there is no VBT. */
-static void
-init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
-{
-       enum port port;
-
-       for (port = PORT_A; port < I915_MAX_PORTS; port++) {
-               struct ddi_vbt_port_info *info =
-                       &dev_priv->vbt.ddi_port_info[port];
-
-               /*
-                * VBT has the TypeC mode (native,TBT/USB) and we don't want
-                * to detect it.
-                */
-               if (intel_port_is_tc(dev_priv, port))
-                       continue;
-
-               info->supports_dvi = (port != PORT_A && port != PORT_E);
-               info->supports_hdmi = info->supports_dvi;
-               info->supports_dp = (port != PORT_E);
-               info->supports_edp = (port == PORT_A);
-       }
-}
-
-static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
-{
-       const void *_vbt = vbt;
-
-       return _vbt + vbt->bdb_offset;
-}
-
-/**
- * intel_bios_is_valid_vbt - does the given buffer contain a valid VBT
- * @buf:       pointer to a buffer to validate
- * @size:      size of the buffer
- *
- * Returns true on valid VBT.
- */
-bool intel_bios_is_valid_vbt(const void *buf, size_t size)
-{
-       const struct vbt_header *vbt = buf;
-       const struct bdb_header *bdb;
-
-       if (!vbt)
-               return false;
-
-       if (sizeof(struct vbt_header) > size) {
-               DRM_DEBUG_DRIVER("VBT header incomplete\n");
-               return false;
-       }
-
-       if (memcmp(vbt->signature, "$VBT", 4)) {
-               DRM_DEBUG_DRIVER("VBT invalid signature\n");
-               return false;
-       }
-
-       if (range_overflows_t(size_t,
-                             vbt->bdb_offset,
-                             sizeof(struct bdb_header),
-                             size)) {
-               DRM_DEBUG_DRIVER("BDB header incomplete\n");
-               return false;
-       }
-
-       bdb = get_bdb_header(vbt);
-       if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) {
-               DRM_DEBUG_DRIVER("BDB incomplete\n");
-               return false;
-       }
-
-       return vbt;
-}
-
-static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
-{
-       size_t i;
-
-       /* Scour memory looking for the VBT signature. */
-       for (i = 0; i + 4 < size; i++) {
-               void *vbt;
-
-               if (ioread32(bios + i) != *((const u32 *) "$VBT"))
-                       continue;
-
-               /*
-                * This is the one place where we explicitly discard the address
-                * space (__iomem) of the BIOS/VBT.
-                */
-               vbt = (void __force *) bios + i;
-               if (intel_bios_is_valid_vbt(vbt, size - i))
-                       return vbt;
-
-               break;
-       }
-
-       return NULL;
-}
-
-/**
- * intel_bios_init - find VBT and initialize settings from the BIOS
- * @dev_priv: i915 device instance
- *
- * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
- * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
- * initialize some defaults if the VBT is not present at all.
- */
-void intel_bios_init(struct drm_i915_private *dev_priv)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       const struct vbt_header *vbt = dev_priv->opregion.vbt;
-       const struct bdb_header *bdb;
-       u8 __iomem *bios = NULL;
-
-       if (!HAS_DISPLAY(dev_priv)) {
-               DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
-               return;
-       }
-
-       init_vbt_defaults(dev_priv);
-
-       /* If the OpRegion does not have VBT, look in PCI ROM. */
-       if (!vbt) {
-               size_t size;
-
-               bios = pci_map_rom(pdev, &size);
-               if (!bios)
-                       goto out;
-
-               vbt = find_vbt(bios, size);
-               if (!vbt)
-                       goto out;
-
-               DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
-       }
-
-       bdb = get_bdb_header(vbt);
-
-       DRM_DEBUG_KMS("VBT signature \"%.*s\", BDB version %d\n",
-                     (int)sizeof(vbt->signature), vbt->signature, bdb->version);
-
-       /* Grab useful general definitions */
-       parse_general_features(dev_priv, bdb);
-       parse_general_definitions(dev_priv, bdb);
-       parse_lfp_panel_data(dev_priv, bdb);
-       parse_lfp_backlight(dev_priv, bdb);
-       parse_sdvo_panel_data(dev_priv, bdb);
-       parse_driver_features(dev_priv, bdb);
-       parse_edp(dev_priv, bdb);
-       parse_psr(dev_priv, bdb);
-       parse_mipi_config(dev_priv, bdb);
-       parse_mipi_sequence(dev_priv, bdb);
-
-       /* Further processing on pre-parsed data */
-       parse_sdvo_device_mapping(dev_priv, bdb->version);
-       parse_ddi_ports(dev_priv, bdb->version);
-
-out:
-       if (!vbt) {
-               DRM_INFO("Failed to find VBIOS tables (VBT)\n");
-               init_vbt_missing_defaults(dev_priv);
-       }
-
-       if (bios)
-               pci_unmap_rom(pdev, bios);
-}
-
-/**
- * intel_bios_cleanup - Free any resources allocated by intel_bios_init()
- * @dev_priv: i915 device instance
- */
-void intel_bios_cleanup(struct drm_i915_private *dev_priv)
-{
-       kfree(dev_priv->vbt.child_dev);
-       dev_priv->vbt.child_dev = NULL;
-       dev_priv->vbt.child_dev_num = 0;
-       kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
-       dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
-       kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
-       dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
-       kfree(dev_priv->vbt.dsi.data);
-       dev_priv->vbt.dsi.data = NULL;
-       kfree(dev_priv->vbt.dsi.pps);
-       dev_priv->vbt.dsi.pps = NULL;
-       kfree(dev_priv->vbt.dsi.config);
-       dev_priv->vbt.dsi.config = NULL;
-       kfree(dev_priv->vbt.dsi.deassert_seq);
-       dev_priv->vbt.dsi.deassert_seq = NULL;
-}
-
-/**
- * intel_bios_is_tv_present - is integrated TV present in VBT
- * @dev_priv:  i915 device instance
- *
- * Return true if TV is present. If no child devices were parsed from VBT,
- * assume TV is present.
- */
-bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
-{
-       const struct child_device_config *child;
-       int i;
-
-       if (!dev_priv->vbt.int_tv_support)
-               return false;
-
-       if (!dev_priv->vbt.child_dev_num)
-               return true;
-
-       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-               child = dev_priv->vbt.child_dev + i;
-               /*
-                * If the device type is not TV, continue.
-                */
-               switch (child->device_type) {
-               case DEVICE_TYPE_INT_TV:
-               case DEVICE_TYPE_TV:
-               case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
-                       break;
-               default:
-                       continue;
-               }
-               /* Only when the addin_offset is non-zero, it is regarded
-                * as present.
-                */
-               if (child->addin_offset)
-                       return true;
-       }
-
-       return false;
-}
-
-/**
- * intel_bios_is_lvds_present - is LVDS present in VBT
- * @dev_priv:  i915 device instance
- * @i2c_pin:   i2c pin for LVDS if present
- *
- * Return true if LVDS is present. If no child devices were parsed from VBT,
- * assume LVDS is present.
- */
-bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
-{
-       const struct child_device_config *child;
-       int i;
-
-       if (!dev_priv->vbt.child_dev_num)
-               return true;
-
-       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-               child = dev_priv->vbt.child_dev + i;
-
-               /* If the device type is not LFP, continue.
-                * We have to check both the new identifiers as well as the
-                * old for compatibility with some BIOSes.
-                */
-               if (child->device_type != DEVICE_TYPE_INT_LFP &&
-                   child->device_type != DEVICE_TYPE_LFP)
-                       continue;
-
-               if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
-                       *i2c_pin = child->i2c_pin;
-
-               /* However, we cannot trust the BIOS writers to populate
-                * the VBT correctly.  Since LVDS requires additional
-                * information from AIM blocks, a non-zero addin offset is
-                * a good indicator that the LVDS is actually present.
-                */
-               if (child->addin_offset)
-                       return true;
-
-               /* But even then some BIOS writers perform some black magic
-                * and instantiate the device without reference to any
-                * additional data.  Trust that if the VBT was written into
-                * the OpRegion then they have validated the LVDS's existence.
-                */
-               if (dev_priv->opregion.vbt)
-                       return true;
-       }
-
-       return false;
-}
-
-/**
- * intel_bios_is_port_present - is the specified digital port present
- * @dev_priv:  i915 device instance
- * @port:      port to check
- *
- * Return true if the device in %port is present.
- */
-bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
-{
-       const struct child_device_config *child;
-       static const struct {
-               u16 dp, hdmi;
-       } port_mapping[] = {
-               [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
-               [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
-               [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
-               [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
-               [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
-       };
-       int i;
-
-       if (HAS_DDI(dev_priv)) {
-               const struct ddi_vbt_port_info *port_info =
-                       &dev_priv->vbt.ddi_port_info[port];
-
-               return port_info->supports_dp ||
-                      port_info->supports_dvi ||
-                      port_info->supports_hdmi;
-       }
-
-       /* FIXME maybe deal with port A as well? */
-       if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
-               return false;
-
-       if (!dev_priv->vbt.child_dev_num)
-               return false;
-
-       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-               child = dev_priv->vbt.child_dev + i;
-
-               if ((child->dvo_port == port_mapping[port].dp ||
-                    child->dvo_port == port_mapping[port].hdmi) &&
-                   (child->device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
-                                          DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
-                       return true;
-       }
-
-       return false;
-}
-
-/**
- * intel_bios_is_port_edp - is the device in given port eDP
- * @dev_priv:  i915 device instance
- * @port:      port to check
- *
- * Return true if the device in %port is eDP.
- */
-bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
-{
-       const struct child_device_config *child;
-       static const short port_mapping[] = {
-               [PORT_B] = DVO_PORT_DPB,
-               [PORT_C] = DVO_PORT_DPC,
-               [PORT_D] = DVO_PORT_DPD,
-               [PORT_E] = DVO_PORT_DPE,
-               [PORT_F] = DVO_PORT_DPF,
-       };
-       int i;
-
-       if (HAS_DDI(dev_priv))
-               return dev_priv->vbt.ddi_port_info[port].supports_edp;
-
-       if (!dev_priv->vbt.child_dev_num)
-               return false;
-
-       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-               child = dev_priv->vbt.child_dev + i;
-
-               if (child->dvo_port == port_mapping[port] &&
-                   (child->device_type & DEVICE_TYPE_eDP_BITS) ==
-                   (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
-                       return true;
-       }
-
-       return false;
-}
-
-static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
-                                     enum port port)
-{
-       static const struct {
-               u16 dp, hdmi;
-       } port_mapping[] = {
-               /*
-                * Buggy VBTs may declare DP ports as having
-                * HDMI type dvo_port :( So let's check both.
-                */
-               [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
-               [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
-               [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
-               [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
-               [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
-       };
-
-       if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
-               return false;
-
-       if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
-           (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
-               return false;
-
-       if (child->dvo_port == port_mapping[port].dp)
-               return true;
-
-       /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
-       if (child->dvo_port == port_mapping[port].hdmi &&
-           child->aux_channel != 0)
-               return true;
-
-       return false;
-}
-
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
-                                    enum port port)
-{
-       const struct child_device_config *child;
-       int i;
-
-       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-               child = dev_priv->vbt.child_dev + i;
-
-               if (child_dev_is_dp_dual_mode(child, port))
-                       return true;
-       }
-
-       return false;
-}
-
-/**
- * intel_bios_is_dsi_present - is DSI present in VBT
- * @dev_priv:  i915 device instance
- * @port:      port for DSI if present
- *
- * Return true if DSI is present, and return the port in %port.
- */
-bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
-                              enum port *port)
-{
-       const struct child_device_config *child;
-       u8 dvo_port;
-       int i;
-
-       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-               child = dev_priv->vbt.child_dev + i;
-
-               if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
-                       continue;
-
-               dvo_port = child->dvo_port;
-
-               if (dvo_port == DVO_PORT_MIPIA ||
-                   (dvo_port == DVO_PORT_MIPIB && INTEL_GEN(dev_priv) >= 11) ||
-                   (dvo_port == DVO_PORT_MIPIC && INTEL_GEN(dev_priv) < 11)) {
-                       if (port)
-                               *port = dvo_port - DVO_PORT_MIPIA;
-                       return true;
-               } else if (dvo_port == DVO_PORT_MIPIB ||
-                          dvo_port == DVO_PORT_MIPIC ||
-                          dvo_port == DVO_PORT_MIPID) {
-                       DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
-                                     port_name(dvo_port - DVO_PORT_MIPIA));
-               }
-       }
-
-       return false;
-}
-
-/**
- * intel_bios_is_port_hpd_inverted - is HPD inverted for %port
- * @i915:      i915 device instance
- * @port:      port to check
- *
- * Return true if HPD should be inverted for %port.
- */
-bool
-intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
-                               enum port port)
-{
-       const struct child_device_config *child =
-               i915->vbt.ddi_port_info[port].child;
-
-       if (WARN_ON_ONCE(!IS_GEN9_LP(i915)))
-               return false;
-
-       return child && child->hpd_invert;
-}
-
-/**
- * intel_bios_is_lspcon_present - if LSPCON is attached on %port
- * @i915:      i915 device instance
- * @port:      port to check
- *
- * Return true if LSPCON is present on this port
- */
-bool
-intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
-                            enum port port)
-{
-       const struct child_device_config *child =
-               i915->vbt.ddi_port_info[port].child;
-
-       return HAS_LSPCON(i915) && child && child->lspcon;
-}
-
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
-                                  enum port port)
-{
-       const struct ddi_vbt_port_info *info =
-               &dev_priv->vbt.ddi_port_info[port];
-       enum aux_ch aux_ch;
-
-       if (!info->alternate_aux_channel) {
-               aux_ch = (enum aux_ch)port;
-
-               DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
-                             aux_ch_name(aux_ch), port_name(port));
-               return aux_ch;
-       }
-
-       switch (info->alternate_aux_channel) {
-       case DP_AUX_A:
-               aux_ch = AUX_CH_A;
-               break;
-       case DP_AUX_B:
-               aux_ch = AUX_CH_B;
-               break;
-       case DP_AUX_C:
-               aux_ch = AUX_CH_C;
-               break;
-       case DP_AUX_D:
-               aux_ch = AUX_CH_D;
-               break;
-       case DP_AUX_E:
-               aux_ch = AUX_CH_E;
-               break;
-       case DP_AUX_F:
-               aux_ch = AUX_CH_F;
-               break;
-       default:
-               MISSING_CASE(info->alternate_aux_channel);
-               aux_ch = AUX_CH_A;
-               break;
-       }
-
-       DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
-                     aux_ch_name(aux_ch), port_name(port));
-
-       return aux_ch;
-}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
deleted file mode 100644 (file)
index 4e42cfa..0000000
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * Please use intel_vbt_defs.h for VBT private data, to hide and abstract away
- * the VBT from the rest of the driver. Add the parsed, clean data to struct
- * intel_vbt_data within struct drm_i915_private.
- */
-
-#ifndef _INTEL_BIOS_H_
-#define _INTEL_BIOS_H_
-
-#include <linux/types.h>
-
-#include <drm/i915_drm.h>
-
-struct drm_i915_private;
-
-enum intel_backlight_type {
-       INTEL_BACKLIGHT_PMIC,
-       INTEL_BACKLIGHT_LPSS,
-       INTEL_BACKLIGHT_DISPLAY_DDI,
-       INTEL_BACKLIGHT_DSI_DCS,
-       INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
-};
-
-struct edp_power_seq {
-       u16 t1_t3;
-       u16 t8;
-       u16 t9;
-       u16 t10;
-       u16 t11_t12;
-} __packed;
-
-/*
- * MIPI Sequence Block definitions
- *
- * Note the VBT spec has AssertReset / DeassertReset swapped from their
- * usual naming, we use the proper names here to avoid confusion when
- * reading the code.
- */
-enum mipi_seq {
-       MIPI_SEQ_END = 0,
-       MIPI_SEQ_DEASSERT_RESET,        /* Spec says MipiAssertResetPin */
-       MIPI_SEQ_INIT_OTP,
-       MIPI_SEQ_DISPLAY_ON,
-       MIPI_SEQ_DISPLAY_OFF,
-       MIPI_SEQ_ASSERT_RESET,          /* Spec says MipiDeassertResetPin */
-       MIPI_SEQ_BACKLIGHT_ON,          /* sequence block v2+ */
-       MIPI_SEQ_BACKLIGHT_OFF,         /* sequence block v2+ */
-       MIPI_SEQ_TEAR_ON,               /* sequence block v2+ */
-       MIPI_SEQ_TEAR_OFF,              /* sequence block v3+ */
-       MIPI_SEQ_POWER_ON,              /* sequence block v3+ */
-       MIPI_SEQ_POWER_OFF,             /* sequence block v3+ */
-       MIPI_SEQ_MAX
-};
-
-enum mipi_seq_element {
-       MIPI_SEQ_ELEM_END = 0,
-       MIPI_SEQ_ELEM_SEND_PKT,
-       MIPI_SEQ_ELEM_DELAY,
-       MIPI_SEQ_ELEM_GPIO,
-       MIPI_SEQ_ELEM_I2C,              /* sequence block v2+ */
-       MIPI_SEQ_ELEM_SPI,              /* sequence block v3+ */
-       MIPI_SEQ_ELEM_PMIC,             /* sequence block v3+ */
-       MIPI_SEQ_ELEM_MAX
-};
-
-#define MIPI_DSI_UNDEFINED_PANEL_ID    0
-#define MIPI_DSI_GENERIC_PANEL_ID      1
-
-struct mipi_config {
-       u16 panel_id;
-
-       /* General Params */
-       u32 enable_dithering:1;
-       u32 rsvd1:1;
-       u32 is_bridge:1;
-
-       u32 panel_arch_type:2;
-       u32 is_cmd_mode:1;
-
-#define NON_BURST_SYNC_PULSE   0x1
-#define NON_BURST_SYNC_EVENTS  0x2
-#define BURST_MODE             0x3
-       u32 video_transfer_mode:2;
-
-       u32 cabc_supported:1;
-#define PPS_BLC_PMIC   0
-#define PPS_BLC_SOC    1
-       u32 pwm_blc:1;
-
-       /* Bit 13:10 */
-#define PIXEL_FORMAT_RGB565                    0x1
-#define PIXEL_FORMAT_RGB666                    0x2
-#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED     0x3
-#define PIXEL_FORMAT_RGB888                    0x4
-       u32 videomode_color_format:4;
-
-       /* Bit 15:14 */
-#define ENABLE_ROTATION_0      0x0
-#define ENABLE_ROTATION_90     0x1
-#define ENABLE_ROTATION_180    0x2
-#define ENABLE_ROTATION_270    0x3
-       u32 rotation:2;
-       u32 bta_enabled:1;
-       u32 rsvd2:15;
-
-       /* 2 byte Port Description */
-#define DUAL_LINK_NOT_SUPPORTED        0
-#define DUAL_LINK_FRONT_BACK   1
-#define DUAL_LINK_PIXEL_ALT    2
-       u16 dual_link:2;
-       u16 lane_cnt:2;
-       u16 pixel_overlap:3;
-       u16 rgb_flip:1;
-#define DL_DCS_PORT_A                  0x00
-#define DL_DCS_PORT_C                  0x01
-#define DL_DCS_PORT_A_AND_C            0x02
-       u16 dl_dcs_cabc_ports:2;
-       u16 dl_dcs_backlight_ports:2;
-       u16 rsvd3:4;
-
-       u16 rsvd4;
-
-       u8 rsvd5;
-       u32 target_burst_mode_freq;
-       u32 dsi_ddr_clk;
-       u32 bridge_ref_clk;
-
-#define  BYTE_CLK_SEL_20MHZ            0
-#define  BYTE_CLK_SEL_10MHZ            1
-#define  BYTE_CLK_SEL_5MHZ             2
-       u8 byte_clk_sel:2;
-
-       u8 rsvd6:6;
-
-       /* DPHY Flags */
-       u16 dphy_param_valid:1;
-       u16 eot_pkt_disabled:1;
-       u16 enable_clk_stop:1;
-       u16 rsvd7:13;
-
-       u32 hs_tx_timeout;
-       u32 lp_rx_timeout;
-       u32 turn_around_timeout;
-       u32 device_reset_timer;
-       u32 master_init_timer;
-       u32 dbi_bw_timer;
-       u32 lp_byte_clk_val;
-
-       /*  4 byte Dphy Params */
-       u32 prepare_cnt:6;
-       u32 rsvd8:2;
-       u32 clk_zero_cnt:8;
-       u32 trail_cnt:5;
-       u32 rsvd9:3;
-       u32 exit_zero_cnt:6;
-       u32 rsvd10:2;
-
-       u32 clk_lane_switch_cnt;
-       u32 hl_switch_cnt;
-
-       u32 rsvd11[6];
-
-       /* timings based on dphy spec */
-       u8 tclk_miss;
-       u8 tclk_post;
-       u8 rsvd12;
-       u8 tclk_pre;
-       u8 tclk_prepare;
-       u8 tclk_settle;
-       u8 tclk_term_enable;
-       u8 tclk_trail;
-       u16 tclk_prepare_clkzero;
-       u8 rsvd13;
-       u8 td_term_enable;
-       u8 teot;
-       u8 ths_exit;
-       u8 ths_prepare;
-       u16 ths_prepare_hszero;
-       u8 rsvd14;
-       u8 ths_settle;
-       u8 ths_skip;
-       u8 ths_trail;
-       u8 tinit;
-       u8 tlpx;
-       u8 rsvd15[3];
-
-       /* GPIOs */
-       u8 panel_enable;
-       u8 bl_enable;
-       u8 pwm_enable;
-       u8 reset_r_n;
-       u8 pwr_down_r;
-       u8 stdby_r_n;
-
-} __packed;
-
-/* all delays have a unit of 100us */
-struct mipi_pps_data {
-       u16 panel_on_delay;
-       u16 bl_enable_delay;
-       u16 bl_disable_delay;
-       u16 panel_off_delay;
-       u16 panel_power_cycle_delay;
-} __packed;
-
-void intel_bios_init(struct drm_i915_private *dev_priv);
-void intel_bios_cleanup(struct drm_i915_private *dev_priv);
-bool intel_bios_is_valid_vbt(const void *buf, size_t size);
-bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
-bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
-bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
-bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
-                                    enum port port);
-bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
-                                 enum port port);
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
-
-#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_bw.c b/drivers/gpu/drm/i915/intel_bw.c
deleted file mode 100644 (file)
index 753ac31..0000000
+++ /dev/null
@@ -1,421 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#include <drm/drm_atomic_state_helper.h>
-
-#include "intel_bw.h"
-#include "intel_drv.h"
-#include "intel_sideband.h"
-
-/* Parameters for Qclk Geyserville (QGV) */
-struct intel_qgv_point {
-       u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
-};
-
-struct intel_qgv_info {
-       struct intel_qgv_point points[3];
-       u8 num_points;
-       u8 num_channels;
-       u8 t_bl;
-       enum intel_dram_type dram_type;
-};
-
-static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
-                                         struct intel_qgv_info *qi)
-{
-       u32 val = 0;
-       int ret;
-
-       ret = sandybridge_pcode_read(dev_priv,
-                                    ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
-                                    ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
-                                    &val, NULL);
-       if (ret)
-               return ret;
-
-       switch (val & 0xf) {
-       case 0:
-               qi->dram_type = INTEL_DRAM_DDR4;
-               break;
-       case 1:
-               qi->dram_type = INTEL_DRAM_DDR3;
-               break;
-       case 2:
-               qi->dram_type = INTEL_DRAM_LPDDR3;
-               break;
-       case 3:
-               qi->dram_type = INTEL_DRAM_LPDDR3;
-               break;
-       default:
-               MISSING_CASE(val & 0xf);
-               break;
-       }
-
-       qi->num_channels = (val & 0xf0) >> 4;
-       qi->num_points = (val & 0xf00) >> 8;
-
-       qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
-
-       return 0;
-}
-
-static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
-                                        struct intel_qgv_point *sp,
-                                        int point)
-{
-       u32 val = 0, val2;
-       int ret;
-
-       ret = sandybridge_pcode_read(dev_priv,
-                                    ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
-                                    ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
-                                    &val, &val2);
-       if (ret)
-               return ret;
-
-       sp->dclk = val & 0xffff;
-       sp->t_rp = (val & 0xff0000) >> 16;
-       sp->t_rcd = (val & 0xff000000) >> 24;
-
-       sp->t_rdpre = val2 & 0xff;
-       sp->t_ras = (val2 & 0xff00) >> 8;
-
-       sp->t_rc = sp->t_rp + sp->t_ras;
-
-       return 0;
-}
-
-static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
-                             struct intel_qgv_info *qi)
-{
-       int i, ret;
-
-       ret = icl_pcode_read_mem_global_info(dev_priv, qi);
-       if (ret)
-               return ret;
-
-       if (WARN_ON(qi->num_points > ARRAY_SIZE(qi->points)))
-               qi->num_points = ARRAY_SIZE(qi->points);
-
-       for (i = 0; i < qi->num_points; i++) {
-               struct intel_qgv_point *sp = &qi->points[i];
-
-               ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
-               if (ret)
-                       return ret;
-
-               DRM_DEBUG_KMS("QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
-                             i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
-                             sp->t_rcd, sp->t_rc);
-       }
-
-       return 0;
-}
-
-static int icl_calc_bw(int dclk, int num, int den)
-{
-       /* multiples of 16.666MHz (100/6) */
-       return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6);
-}
-
-static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
-{
-       u16 dclk = 0;
-       int i;
-
-       for (i = 0; i < qi->num_points; i++)
-               dclk = max(dclk, qi->points[i].dclk);
-
-       return dclk;
-}
-
-struct intel_sa_info {
-       u8 deburst, mpagesize, deprogbwlimit, displayrtids;
-};
-
-static const struct intel_sa_info icl_sa_info = {
-       .deburst = 8,
-       .mpagesize = 16,
-       .deprogbwlimit = 25, /* GB/s */
-       .displayrtids = 128,
-};
-
-static int icl_get_bw_info(struct drm_i915_private *dev_priv)
-{
-       struct intel_qgv_info qi = {};
-       const struct intel_sa_info *sa = &icl_sa_info;
-       bool is_y_tile = true; /* assume y tile may be used */
-       int num_channels;
-       int deinterleave;
-       int ipqdepth, ipqdepthpch;
-       int dclk_max;
-       int maxdebw;
-       int i, ret;
-
-       ret = icl_get_qgv_points(dev_priv, &qi);
-       if (ret) {
-               DRM_DEBUG_KMS("Failed to get memory subsystem information, ignoring bandwidth limits");
-               return ret;
-       }
-       num_channels = qi.num_channels;
-
-       deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
-       dclk_max = icl_sagv_max_dclk(&qi);
-
-       ipqdepthpch = 16;
-
-       maxdebw = min(sa->deprogbwlimit * 1000,
-                     icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */
-       ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
-
-       for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
-               struct intel_bw_info *bi = &dev_priv->max_bw[i];
-               int clpchgroup;
-               int j;
-
-               clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
-               bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
-
-               for (j = 0; j < qi.num_points; j++) {
-                       const struct intel_qgv_point *sp = &qi.points[j];
-                       int ct, bw;
-
-                       /*
-                        * Max row cycle time
-                        *
-                        * FIXME what is the logic behind the
-                        * assumed burst length?
-                        */
-                       ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
-                                  (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
-                       bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
-
-                       bi->deratedbw[j] = min(maxdebw,
-                                              bw * 9 / 10); /* 90% */
-
-                       DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n",
-                                     i, j, bi->num_planes, bi->deratedbw[j]);
-               }
-
-               if (bi->num_planes == 1)
-                       break;
-       }
-
-       return 0;
-}
-
-static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
-                              int num_planes, int qgv_point)
-{
-       int i;
-
-       /* Did we initialize the bw limits successfully? */
-       if (dev_priv->max_bw[0].num_planes == 0)
-               return UINT_MAX;
-
-       for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
-               const struct intel_bw_info *bi =
-                       &dev_priv->max_bw[i];
-
-               if (num_planes >= bi->num_planes)
-                       return bi->deratedbw[qgv_point];
-       }
-
-       return 0;
-}
-
-void intel_bw_init_hw(struct drm_i915_private *dev_priv)
-{
-       if (IS_GEN(dev_priv, 11))
-               icl_get_bw_info(dev_priv);
-}
-
-static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
-                                       int num_planes)
-{
-       if (IS_GEN(dev_priv, 11))
-               /*
-                * FIXME with SAGV disabled maybe we can assume
-                * point 1 will always be used? Seems to match
-                * the behaviour observed in the wild.
-                */
-               return min3(icl_max_bw(dev_priv, num_planes, 0),
-                           icl_max_bw(dev_priv, num_planes, 1),
-                           icl_max_bw(dev_priv, num_planes, 2));
-       else
-               return UINT_MAX;
-}
-
-static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
-{
-       /*
-        * We assume cursors are small enough
-        * to not not cause bandwidth problems.
-        */
-       return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
-}
-
-static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       unsigned int data_rate = 0;
-       enum plane_id plane_id;
-
-       for_each_plane_id_on_crtc(crtc, plane_id) {
-               /*
-                * We assume cursors are small enough
-                * to not not cause bandwidth problems.
-                */
-               if (plane_id == PLANE_CURSOR)
-                       continue;
-
-               data_rate += crtc_state->data_rate[plane_id];
-       }
-
-       return data_rate;
-}
-
-void intel_bw_crtc_update(struct intel_bw_state *bw_state,
-                         const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
-       bw_state->data_rate[crtc->pipe] =
-               intel_bw_crtc_data_rate(crtc_state);
-       bw_state->num_active_planes[crtc->pipe] =
-               intel_bw_crtc_num_active_planes(crtc_state);
-
-       DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
-                     pipe_name(crtc->pipe),
-                     bw_state->data_rate[crtc->pipe],
-                     bw_state->num_active_planes[crtc->pipe]);
-}
-
-static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
-                                              const struct intel_bw_state *bw_state)
-{
-       unsigned int num_active_planes = 0;
-       enum pipe pipe;
-
-       for_each_pipe(dev_priv, pipe)
-               num_active_planes += bw_state->num_active_planes[pipe];
-
-       return num_active_planes;
-}
-
-static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
-                                      const struct intel_bw_state *bw_state)
-{
-       unsigned int data_rate = 0;
-       enum pipe pipe;
-
-       for_each_pipe(dev_priv, pipe)
-               data_rate += bw_state->data_rate[pipe];
-
-       return data_rate;
-}
-
-int intel_bw_atomic_check(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       struct intel_crtc_state *new_crtc_state, *old_crtc_state;
-       struct intel_bw_state *bw_state = NULL;
-       unsigned int data_rate, max_data_rate;
-       unsigned int num_active_planes;
-       struct intel_crtc *crtc;
-       int i;
-
-       /* FIXME earlier gens need some checks too */
-       if (INTEL_GEN(dev_priv) < 11)
-               return 0;
-
-       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
-                                           new_crtc_state, i) {
-               unsigned int old_data_rate =
-                       intel_bw_crtc_data_rate(old_crtc_state);
-               unsigned int new_data_rate =
-                       intel_bw_crtc_data_rate(new_crtc_state);
-               unsigned int old_active_planes =
-                       intel_bw_crtc_num_active_planes(old_crtc_state);
-               unsigned int new_active_planes =
-                       intel_bw_crtc_num_active_planes(new_crtc_state);
-
-               /*
-                * Avoid locking the bw state when
-                * nothing significant has changed.
-                */
-               if (old_data_rate == new_data_rate &&
-                   old_active_planes == new_active_planes)
-                       continue;
-
-               bw_state  = intel_atomic_get_bw_state(state);
-               if (IS_ERR(bw_state))
-                       return PTR_ERR(bw_state);
-
-               bw_state->data_rate[crtc->pipe] = new_data_rate;
-               bw_state->num_active_planes[crtc->pipe] = new_active_planes;
-
-               DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
-                             pipe_name(crtc->pipe),
-                             bw_state->data_rate[crtc->pipe],
-                             bw_state->num_active_planes[crtc->pipe]);
-       }
-
-       if (!bw_state)
-               return 0;
-
-       data_rate = intel_bw_data_rate(dev_priv, bw_state);
-       num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
-
-       max_data_rate = intel_max_data_rate(dev_priv, num_active_planes);
-
-       data_rate = DIV_ROUND_UP(data_rate, 1000);
-
-       if (data_rate > max_data_rate) {
-               DRM_DEBUG_KMS("Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
-                             data_rate, max_data_rate, num_active_planes);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj *obj)
-{
-       struct intel_bw_state *state;
-
-       state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
-       if (!state)
-               return NULL;
-
-       __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
-
-       return &state->base;
-}
-
-static void intel_bw_destroy_state(struct drm_private_obj *obj,
-                                  struct drm_private_state *state)
-{
-       kfree(state);
-}
-
-static const struct drm_private_state_funcs intel_bw_funcs = {
-       .atomic_duplicate_state = intel_bw_duplicate_state,
-       .atomic_destroy_state = intel_bw_destroy_state,
-};
-
-int intel_bw_init(struct drm_i915_private *dev_priv)
-{
-       struct intel_bw_state *state;
-
-       state = kzalloc(sizeof(*state), GFP_KERNEL);
-       if (!state)
-               return -ENOMEM;
-
-       drm_atomic_private_obj_init(&dev_priv->drm, &dev_priv->bw_obj,
-                                   &state->base, &intel_bw_funcs);
-
-       return 0;
-}
diff --git a/drivers/gpu/drm/i915/intel_bw.h b/drivers/gpu/drm/i915/intel_bw.h
deleted file mode 100644 (file)
index e9d9c6d..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_BW_H__
-#define __INTEL_BW_H__
-
-#include <drm/drm_atomic.h>
-
-#include "i915_drv.h"
-#include "intel_display.h"
-
-struct drm_i915_private;
-struct intel_atomic_state;
-struct intel_crtc_state;
-
-struct intel_bw_state {
-       struct drm_private_state base;
-
-       unsigned int data_rate[I915_MAX_PIPES];
-       u8 num_active_planes[I915_MAX_PIPES];
-};
-
-#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
-
-static inline struct intel_bw_state *
-intel_atomic_get_bw_state(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       struct drm_private_state *bw_state;
-
-       bw_state = drm_atomic_get_private_obj_state(&state->base,
-                                                   &dev_priv->bw_obj);
-       if (IS_ERR(bw_state))
-               return ERR_CAST(bw_state);
-
-       return to_intel_bw_state(bw_state);
-}
-
-void intel_bw_init_hw(struct drm_i915_private *dev_priv);
-int intel_bw_init(struct drm_i915_private *dev_priv);
-int intel_bw_atomic_check(struct intel_atomic_state *state);
-void intel_bw_crtc_update(struct intel_bw_state *bw_state,
-                         const struct intel_crtc_state *crtc_state);
-
-#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
deleted file mode 100644 (file)
index 8993ab2..0000000
+++ /dev/null
@@ -1,2853 +0,0 @@
-/*
- * Copyright © 2006-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "intel_cdclk.h"
-#include "intel_drv.h"
-#include "intel_sideband.h"
-
-/**
- * DOC: CDCLK / RAWCLK
- *
- * The display engine uses several different clocks to do its work. There
- * are two main clocks involved that aren't directly related to the actual
- * pixel clock or any symbol/bit clock of the actual output port. These
- * are the core display clock (CDCLK) and RAWCLK.
- *
- * CDCLK clocks most of the display pipe logic, and thus its frequency
- * must be high enough to support the rate at which pixels are flowing
- * through the pipes. Downscaling must also be accounted as that increases
- * the effective pixel rate.
- *
- * On several platforms the CDCLK frequency can be changed dynamically
- * to minimize power consumption for a given display configuration.
- * Typically changes to the CDCLK frequency require all the display pipes
- * to be shut down while the frequency is being changed.
- *
- * On SKL+ the DMC will toggle the CDCLK off/on during DC5/6 entry/exit.
- * DMC will not change the active CDCLK frequency however, so that part
- * will still be performed by the driver directly.
- *
- * RAWCLK is a fixed frequency clock, often used by various auxiliary
- * blocks such as AUX CH or backlight PWM. Hence the only thing we
- * really need to know about RAWCLK is its frequency so that various
- * dividers can be programmed correctly.
- */
-
-static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
-                                  struct intel_cdclk_state *cdclk_state)
-{
-       cdclk_state->cdclk = 133333;
-}
-
-static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv,
-                                  struct intel_cdclk_state *cdclk_state)
-{
-       cdclk_state->cdclk = 200000;
-}
-
-static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv,
-                                  struct intel_cdclk_state *cdclk_state)
-{
-       cdclk_state->cdclk = 266667;
-}
-
-static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv,
-                                  struct intel_cdclk_state *cdclk_state)
-{
-       cdclk_state->cdclk = 333333;
-}
-
-static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv,
-                                  struct intel_cdclk_state *cdclk_state)
-{
-       cdclk_state->cdclk = 400000;
-}
-
-static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
-                                  struct intel_cdclk_state *cdclk_state)
-{
-       cdclk_state->cdclk = 450000;
-}
-
-static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
-                          struct intel_cdclk_state *cdclk_state)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       u16 hpllcc = 0;
-
-       /*
-        * 852GM/852GMV only supports 133 MHz and the HPLLCC
-        * encoding is different :(
-        * FIXME is this the right way to detect 852GM/852GMV?
-        */
-       if (pdev->revision == 0x1) {
-               cdclk_state->cdclk = 133333;
-               return;
-       }
-
-       pci_bus_read_config_word(pdev->bus,
-                                PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
-
-       /* Assume that the hardware is in the high speed state.  This
-        * should be the default.
-        */
-       switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
-       case GC_CLOCK_133_200:
-       case GC_CLOCK_133_200_2:
-       case GC_CLOCK_100_200:
-               cdclk_state->cdclk = 200000;
-               break;
-       case GC_CLOCK_166_250:
-               cdclk_state->cdclk = 250000;
-               break;
-       case GC_CLOCK_100_133:
-               cdclk_state->cdclk = 133333;
-               break;
-       case GC_CLOCK_133_266:
-       case GC_CLOCK_133_266_2:
-       case GC_CLOCK_166_266:
-               cdclk_state->cdclk = 266667;
-               break;
-       }
-}
-
-static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
-                            struct intel_cdclk_state *cdclk_state)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       u16 gcfgc = 0;
-
-       pci_read_config_word(pdev, GCFGC, &gcfgc);
-
-       if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
-               cdclk_state->cdclk = 133333;
-               return;
-       }
-
-       switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
-       case GC_DISPLAY_CLOCK_333_320_MHZ:
-               cdclk_state->cdclk = 333333;
-               break;
-       default:
-       case GC_DISPLAY_CLOCK_190_200_MHZ:
-               cdclk_state->cdclk = 190000;
-               break;
-       }
-}
-
-static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
-                            struct intel_cdclk_state *cdclk_state)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       u16 gcfgc = 0;
-
-       pci_read_config_word(pdev, GCFGC, &gcfgc);
-
-       if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
-               cdclk_state->cdclk = 133333;
-               return;
-       }
-
-       switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
-       case GC_DISPLAY_CLOCK_333_320_MHZ:
-               cdclk_state->cdclk = 320000;
-               break;
-       default:
-       case GC_DISPLAY_CLOCK_190_200_MHZ:
-               cdclk_state->cdclk = 200000;
-               break;
-       }
-}
-
-static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
-{
-       static const unsigned int blb_vco[8] = {
-               [0] = 3200000,
-               [1] = 4000000,
-               [2] = 5333333,
-               [3] = 4800000,
-               [4] = 6400000,
-       };
-       static const unsigned int pnv_vco[8] = {
-               [0] = 3200000,
-               [1] = 4000000,
-               [2] = 5333333,
-               [3] = 4800000,
-               [4] = 2666667,
-       };
-       static const unsigned int cl_vco[8] = {
-               [0] = 3200000,
-               [1] = 4000000,
-               [2] = 5333333,
-               [3] = 6400000,
-               [4] = 3333333,
-               [5] = 3566667,
-               [6] = 4266667,
-       };
-       static const unsigned int elk_vco[8] = {
-               [0] = 3200000,
-               [1] = 4000000,
-               [2] = 5333333,
-               [3] = 4800000,
-       };
-       static const unsigned int ctg_vco[8] = {
-               [0] = 3200000,
-               [1] = 4000000,
-               [2] = 5333333,
-               [3] = 6400000,
-               [4] = 2666667,
-               [5] = 4266667,
-       };
-       const unsigned int *vco_table;
-       unsigned int vco;
-       u8 tmp = 0;
-
-       /* FIXME other chipsets? */
-       if (IS_GM45(dev_priv))
-               vco_table = ctg_vco;
-       else if (IS_G45(dev_priv))
-               vco_table = elk_vco;
-       else if (IS_I965GM(dev_priv))
-               vco_table = cl_vco;
-       else if (IS_PINEVIEW(dev_priv))
-               vco_table = pnv_vco;
-       else if (IS_G33(dev_priv))
-               vco_table = blb_vco;
-       else
-               return 0;
-
-       tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ?
-                       HPLLVCO_MOBILE : HPLLVCO);
-
-       vco = vco_table[tmp & 0x7];
-       if (vco == 0)
-               DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
-       else
-               DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
-
-       return vco;
-}
-
-static void g33_get_cdclk(struct drm_i915_private *dev_priv,
-                         struct intel_cdclk_state *cdclk_state)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       static const u8 div_3200[] = { 12, 10,  8,  7, 5, 16 };
-       static const u8 div_4000[] = { 14, 12, 10,  8, 6, 20 };
-       static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 };
-       static const u8 div_5333[] = { 20, 16, 12, 12, 8, 28 };
-       const u8 *div_table;
-       unsigned int cdclk_sel;
-       u16 tmp = 0;
-
-       cdclk_state->vco = intel_hpll_vco(dev_priv);
-
-       pci_read_config_word(pdev, GCFGC, &tmp);
-
-       cdclk_sel = (tmp >> 4) & 0x7;
-
-       if (cdclk_sel >= ARRAY_SIZE(div_3200))
-               goto fail;
-
-       switch (cdclk_state->vco) {
-       case 3200000:
-               div_table = div_3200;
-               break;
-       case 4000000:
-               div_table = div_4000;
-               break;
-       case 4800000:
-               div_table = div_4800;
-               break;
-       case 5333333:
-               div_table = div_5333;
-               break;
-       default:
-               goto fail;
-       }
-
-       cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
-                                              div_table[cdclk_sel]);
-       return;
-
-fail:
-       DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
-                 cdclk_state->vco, tmp);
-       cdclk_state->cdclk = 190476;
-}
-
-static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
-                         struct intel_cdclk_state *cdclk_state)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       u16 gcfgc = 0;
-
-       pci_read_config_word(pdev, GCFGC, &gcfgc);
-
-       switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
-       case GC_DISPLAY_CLOCK_267_MHZ_PNV:
-               cdclk_state->cdclk = 266667;
-               break;
-       case GC_DISPLAY_CLOCK_333_MHZ_PNV:
-               cdclk_state->cdclk = 333333;
-               break;
-       case GC_DISPLAY_CLOCK_444_MHZ_PNV:
-               cdclk_state->cdclk = 444444;
-               break;
-       case GC_DISPLAY_CLOCK_200_MHZ_PNV:
-               cdclk_state->cdclk = 200000;
-               break;
-       default:
-               DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
-               /* fall through */
-       case GC_DISPLAY_CLOCK_133_MHZ_PNV:
-               cdclk_state->cdclk = 133333;
-               break;
-       case GC_DISPLAY_CLOCK_167_MHZ_PNV:
-               cdclk_state->cdclk = 166667;
-               break;
-       }
-}
-
-static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
-                            struct intel_cdclk_state *cdclk_state)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       static const u8 div_3200[] = { 16, 10,  8 };
-       static const u8 div_4000[] = { 20, 12, 10 };
-       static const u8 div_5333[] = { 24, 16, 14 };
-       const u8 *div_table;
-       unsigned int cdclk_sel;
-       u16 tmp = 0;
-
-       cdclk_state->vco = intel_hpll_vco(dev_priv);
-
-       pci_read_config_word(pdev, GCFGC, &tmp);
-
-       cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
-
-       if (cdclk_sel >= ARRAY_SIZE(div_3200))
-               goto fail;
-
-       switch (cdclk_state->vco) {
-       case 3200000:
-               div_table = div_3200;
-               break;
-       case 4000000:
-               div_table = div_4000;
-               break;
-       case 5333333:
-               div_table = div_5333;
-               break;
-       default:
-               goto fail;
-       }
-
-       cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
-                                              div_table[cdclk_sel]);
-       return;
-
-fail:
-       DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
-                 cdclk_state->vco, tmp);
-       cdclk_state->cdclk = 200000;
-}
-
-static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
-                          struct intel_cdclk_state *cdclk_state)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       unsigned int cdclk_sel;
-       u16 tmp = 0;
-
-       cdclk_state->vco = intel_hpll_vco(dev_priv);
-
-       pci_read_config_word(pdev, GCFGC, &tmp);
-
-       cdclk_sel = (tmp >> 12) & 0x1;
-
-       switch (cdclk_state->vco) {
-       case 2666667:
-       case 4000000:
-       case 5333333:
-               cdclk_state->cdclk = cdclk_sel ? 333333 : 222222;
-               break;
-       case 3200000:
-               cdclk_state->cdclk = cdclk_sel ? 320000 : 228571;
-               break;
-       default:
-               DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
-                         cdclk_state->vco, tmp);
-               cdclk_state->cdclk = 222222;
-               break;
-       }
-}
-
-static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
-                         struct intel_cdclk_state *cdclk_state)
-{
-       u32 lcpll = I915_READ(LCPLL_CTL);
-       u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
-
-       if (lcpll & LCPLL_CD_SOURCE_FCLK)
-               cdclk_state->cdclk = 800000;
-       else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
-               cdclk_state->cdclk = 450000;
-       else if (freq == LCPLL_CLK_FREQ_450)
-               cdclk_state->cdclk = 450000;
-       else if (IS_HSW_ULT(dev_priv))
-               cdclk_state->cdclk = 337500;
-       else
-               cdclk_state->cdclk = 540000;
-}
-
-static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
-{
-       int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ?
-               333333 : 320000;
-
-       /*
-        * We seem to get an unstable or solid color picture at 200MHz.
-        * Not sure what's wrong. For now use 200MHz only when all pipes
-        * are off.
-        */
-       if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320)
-               return 400000;
-       else if (min_cdclk > 266667)
-               return freq_320;
-       else if (min_cdclk > 0)
-               return 266667;
-       else
-               return 200000;
-}
-
-static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
-{
-       if (IS_VALLEYVIEW(dev_priv)) {
-               if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
-                       return 2;
-               else if (cdclk >= 266667)
-                       return 1;
-               else
-                       return 0;
-       } else {
-               /*
-                * Specs are full of misinformation, but testing on actual
-                * hardware has shown that we just need to write the desired
-                * CCK divider into the Punit register.
-                */
-               return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
-       }
-}
-
-static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
-                         struct intel_cdclk_state *cdclk_state)
-{
-       u32 val;
-
-       vlv_iosf_sb_get(dev_priv,
-                       BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
-
-       cdclk_state->vco = vlv_get_hpll_vco(dev_priv);
-       cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
-                                              CCK_DISPLAY_CLOCK_CONTROL,
-                                              cdclk_state->vco);
-
-       val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
-
-       vlv_iosf_sb_put(dev_priv,
-                       BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
-
-       if (IS_VALLEYVIEW(dev_priv))
-               cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
-                       DSPFREQGUAR_SHIFT;
-       else
-               cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
-                       DSPFREQGUAR_SHIFT_CHV;
-}
-
-static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
-{
-       unsigned int credits, default_credits;
-
-       if (IS_CHERRYVIEW(dev_priv))
-               default_credits = PFI_CREDIT(12);
-       else
-               default_credits = PFI_CREDIT(8);
-
-       if (dev_priv->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
-               /* CHV suggested value is 31 or 63 */
-               if (IS_CHERRYVIEW(dev_priv))
-                       credits = PFI_CREDIT_63;
-               else
-                       credits = PFI_CREDIT(15);
-       } else {
-               credits = default_credits;
-       }
-
-       /*
-        * WA - write default credits before re-programming
-        * FIXME: should we also set the resend bit here?
-        */
-       I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
-                  default_credits);
-
-       I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
-                  credits | PFI_CREDIT_RESEND);
-
-       /*
-        * FIXME is this guaranteed to clear
-        * immediately or should we poll for it?
-        */
-       WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
-}
-
-static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
-                         const struct intel_cdclk_state *cdclk_state,
-                         enum pipe pipe)
-{
-       int cdclk = cdclk_state->cdclk;
-       u32 val, cmd = cdclk_state->voltage_level;
-       intel_wakeref_t wakeref;
-
-       switch (cdclk) {
-       case 400000:
-       case 333333:
-       case 320000:
-       case 266667:
-       case 200000:
-               break;
-       default:
-               MISSING_CASE(cdclk);
-               return;
-       }
-
-       /* There are cases where we can end up here with power domains
-        * off and a CDCLK frequency other than the minimum, like when
-        * issuing a modeset without actually changing any display after
-        * a system suspend.  So grab the PIPE-A domain, which covers
-        * the HW blocks needed for the following programming.
-        */
-       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
-
-       vlv_iosf_sb_get(dev_priv,
-                       BIT(VLV_IOSF_SB_CCK) |
-                       BIT(VLV_IOSF_SB_BUNIT) |
-                       BIT(VLV_IOSF_SB_PUNIT));
-
-       val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
-       val &= ~DSPFREQGUAR_MASK;
-       val |= (cmd << DSPFREQGUAR_SHIFT);
-       vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
-       if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
-                     DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
-                    50)) {
-               DRM_ERROR("timed out waiting for CDclk change\n");
-       }
-
-       if (cdclk == 400000) {
-               u32 divider;
-
-               divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1,
-                                           cdclk) - 1;
-
-               /* adjust cdclk divider */
-               val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
-               val &= ~CCK_FREQUENCY_VALUES;
-               val |= divider;
-               vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
-
-               if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
-                             CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
-                            50))
-                       DRM_ERROR("timed out waiting for CDclk change\n");
-       }
-
-       /* adjust self-refresh exit latency value */
-       val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
-       val &= ~0x7f;
-
-       /*
-        * For high bandwidth configs, we set a higher latency in the bunit
-        * so that the core display fetch happens in time to avoid underruns.
-        */
-       if (cdclk == 400000)
-               val |= 4500 / 250; /* 4.5 usec */
-       else
-               val |= 3000 / 250; /* 3.0 usec */
-       vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
-
-       vlv_iosf_sb_put(dev_priv,
-                       BIT(VLV_IOSF_SB_CCK) |
-                       BIT(VLV_IOSF_SB_BUNIT) |
-                       BIT(VLV_IOSF_SB_PUNIT));
-
-       intel_update_cdclk(dev_priv);
-
-       vlv_program_pfi_credits(dev_priv);
-
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
-}
-
-static void chv_set_cdclk(struct drm_i915_private *dev_priv,
-                         const struct intel_cdclk_state *cdclk_state,
-                         enum pipe pipe)
-{
-       int cdclk = cdclk_state->cdclk;
-       u32 val, cmd = cdclk_state->voltage_level;
-       intel_wakeref_t wakeref;
-
-       switch (cdclk) {
-       case 333333:
-       case 320000:
-       case 266667:
-       case 200000:
-               break;
-       default:
-               MISSING_CASE(cdclk);
-               return;
-       }
-
-       /* There are cases where we can end up here with power domains
-        * off and a CDCLK frequency other than the minimum, like when
-        * issuing a modeset without actually changing any display after
-        * a system suspend.  So grab the PIPE-A domain, which covers
-        * the HW blocks needed for the following programming.
-        */
-       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
-
-       vlv_punit_get(dev_priv);
-       val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
-       val &= ~DSPFREQGUAR_MASK_CHV;
-       val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
-       vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
-       if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
-                     DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
-                    50)) {
-               DRM_ERROR("timed out waiting for CDclk change\n");
-       }
-
-       vlv_punit_put(dev_priv);
-
-       intel_update_cdclk(dev_priv);
-
-       vlv_program_pfi_credits(dev_priv);
-
-       intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
-}
-
-static int bdw_calc_cdclk(int min_cdclk)
-{
-       if (min_cdclk > 540000)
-               return 675000;
-       else if (min_cdclk > 450000)
-               return 540000;
-       else if (min_cdclk > 337500)
-               return 450000;
-       else
-               return 337500;
-}
-
-static u8 bdw_calc_voltage_level(int cdclk)
-{
-       switch (cdclk) {
-       default:
-       case 337500:
-               return 2;
-       case 450000:
-               return 0;
-       case 540000:
-               return 1;
-       case 675000:
-               return 3;
-       }
-}
-
-static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
-                         struct intel_cdclk_state *cdclk_state)
-{
-       u32 lcpll = I915_READ(LCPLL_CTL);
-       u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
-
-       if (lcpll & LCPLL_CD_SOURCE_FCLK)
-               cdclk_state->cdclk = 800000;
-       else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
-               cdclk_state->cdclk = 450000;
-       else if (freq == LCPLL_CLK_FREQ_450)
-               cdclk_state->cdclk = 450000;
-       else if (freq == LCPLL_CLK_FREQ_54O_BDW)
-               cdclk_state->cdclk = 540000;
-       else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
-               cdclk_state->cdclk = 337500;
-       else
-               cdclk_state->cdclk = 675000;
-
-       /*
-        * Can't read this out :( Let's assume it's
-        * at least what the CDCLK frequency requires.
-        */
-       cdclk_state->voltage_level =
-               bdw_calc_voltage_level(cdclk_state->cdclk);
-}
-
-static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
-                         const struct intel_cdclk_state *cdclk_state,
-                         enum pipe pipe)
-{
-       int cdclk = cdclk_state->cdclk;
-       u32 val;
-       int ret;
-
-       if (WARN((I915_READ(LCPLL_CTL) &
-                 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
-                  LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
-                  LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
-                  LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
-                "trying to change cdclk frequency with cdclk not enabled\n"))
-               return;
-
-       ret = sandybridge_pcode_write(dev_priv,
-                                     BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
-       if (ret) {
-               DRM_ERROR("failed to inform pcode about cdclk change\n");
-               return;
-       }
-
-       val = I915_READ(LCPLL_CTL);
-       val |= LCPLL_CD_SOURCE_FCLK;
-       I915_WRITE(LCPLL_CTL, val);
-
-       /*
-        * According to the spec, it should be enough to poll for this 1 us.
-        * However, extensive testing shows that this can take longer.
-        */
-       if (wait_for_us(I915_READ(LCPLL_CTL) &
-                       LCPLL_CD_SOURCE_FCLK_DONE, 100))
-               DRM_ERROR("Switching to FCLK failed\n");
-
-       val = I915_READ(LCPLL_CTL);
-       val &= ~LCPLL_CLK_FREQ_MASK;
-
-       switch (cdclk) {
-       default:
-               MISSING_CASE(cdclk);
-               /* fall through */
-       case 337500:
-               val |= LCPLL_CLK_FREQ_337_5_BDW;
-               break;
-       case 450000:
-               val |= LCPLL_CLK_FREQ_450;
-               break;
-       case 540000:
-               val |= LCPLL_CLK_FREQ_54O_BDW;
-               break;
-       case 675000:
-               val |= LCPLL_CLK_FREQ_675_BDW;
-               break;
-       }
-
-       I915_WRITE(LCPLL_CTL, val);
-
-       val = I915_READ(LCPLL_CTL);
-       val &= ~LCPLL_CD_SOURCE_FCLK;
-       I915_WRITE(LCPLL_CTL, val);
-
-       if (wait_for_us((I915_READ(LCPLL_CTL) &
-                       LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
-               DRM_ERROR("Switching back to LCPLL failed\n");
-
-       sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
-                               cdclk_state->voltage_level);
-
-       I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
-
-       intel_update_cdclk(dev_priv);
-}
-
-static int skl_calc_cdclk(int min_cdclk, int vco)
-{
-       if (vco == 8640000) {
-               if (min_cdclk > 540000)
-                       return 617143;
-               else if (min_cdclk > 432000)
-                       return 540000;
-               else if (min_cdclk > 308571)
-                       return 432000;
-               else
-                       return 308571;
-       } else {
-               if (min_cdclk > 540000)
-                       return 675000;
-               else if (min_cdclk > 450000)
-                       return 540000;
-               else if (min_cdclk > 337500)
-                       return 450000;
-               else
-                       return 337500;
-       }
-}
-
-static u8 skl_calc_voltage_level(int cdclk)
-{
-       if (cdclk > 540000)
-               return 3;
-       else if (cdclk > 450000)
-               return 2;
-       else if (cdclk > 337500)
-               return 1;
-       else
-               return 0;
-}
-
-static void skl_dpll0_update(struct drm_i915_private *dev_priv,
-                            struct intel_cdclk_state *cdclk_state)
-{
-       u32 val;
-
-       cdclk_state->ref = 24000;
-       cdclk_state->vco = 0;
-
-       val = I915_READ(LCPLL1_CTL);
-       if ((val & LCPLL_PLL_ENABLE) == 0)
-               return;
-
-       if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
-               return;
-
-       val = I915_READ(DPLL_CTRL1);
-
-       if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
-                           DPLL_CTRL1_SSC(SKL_DPLL0) |
-                           DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
-                   DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
-               return;
-
-       switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
-       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
-       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
-       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
-       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
-               cdclk_state->vco = 8100000;
-               break;
-       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
-       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
-               cdclk_state->vco = 8640000;
-               break;
-       default:
-               MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
-               break;
-       }
-}
-
-static void skl_get_cdclk(struct drm_i915_private *dev_priv,
-                         struct intel_cdclk_state *cdclk_state)
-{
-       u32 cdctl;
-
-       skl_dpll0_update(dev_priv, cdclk_state);
-
-       cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
-
-       if (cdclk_state->vco == 0)
-               goto out;
-
-       cdctl = I915_READ(CDCLK_CTL);
-
-       if (cdclk_state->vco == 8640000) {
-               switch (cdctl & CDCLK_FREQ_SEL_MASK) {
-               case CDCLK_FREQ_450_432:
-                       cdclk_state->cdclk = 432000;
-                       break;
-               case CDCLK_FREQ_337_308:
-                       cdclk_state->cdclk = 308571;
-                       break;
-               case CDCLK_FREQ_540:
-                       cdclk_state->cdclk = 540000;
-                       break;
-               case CDCLK_FREQ_675_617:
-                       cdclk_state->cdclk = 617143;
-                       break;
-               default:
-                       MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
-                       break;
-               }
-       } else {
-               switch (cdctl & CDCLK_FREQ_SEL_MASK) {
-               case CDCLK_FREQ_450_432:
-                       cdclk_state->cdclk = 450000;
-                       break;
-               case CDCLK_FREQ_337_308:
-                       cdclk_state->cdclk = 337500;
-                       break;
-               case CDCLK_FREQ_540:
-                       cdclk_state->cdclk = 540000;
-                       break;
-               case CDCLK_FREQ_675_617:
-                       cdclk_state->cdclk = 675000;
-                       break;
-               default:
-                       MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
-                       break;
-               }
-       }
-
- out:
-       /*
-        * Can't read this out :( Let's assume it's
-        * at least what the CDCLK frequency requires.
-        */
-       cdclk_state->voltage_level =
-               skl_calc_voltage_level(cdclk_state->cdclk);
-}
-
-/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
-static int skl_cdclk_decimal(int cdclk)
-{
-       return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
-}
-
-static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
-                                       int vco)
-{
-       bool changed = dev_priv->skl_preferred_vco_freq != vco;
-
-       dev_priv->skl_preferred_vco_freq = vco;
-
-       if (changed)
-               intel_update_max_cdclk(dev_priv);
-}
-
-static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
-{
-       u32 val;
-
-       WARN_ON(vco != 8100000 && vco != 8640000);
-
-       /*
-        * We always enable DPLL0 with the lowest link rate possible, but still
-        * taking into account the VCO required to operate the eDP panel at the
-        * desired frequency. The usual DP link rates operate with a VCO of
-        * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
-        * The modeset code is responsible for the selection of the exact link
-        * rate later on, with the constraint of choosing a frequency that
-        * works with vco.
-        */
-       val = I915_READ(DPLL_CTRL1);
-
-       val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
-                DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
-       val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
-       if (vco == 8640000)
-               val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
-                                           SKL_DPLL0);
-       else
-               val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
-                                           SKL_DPLL0);
-
-       I915_WRITE(DPLL_CTRL1, val);
-       POSTING_READ(DPLL_CTRL1);
-
-       I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
-
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
-                                   5))
-               DRM_ERROR("DPLL0 not locked\n");
-
-       dev_priv->cdclk.hw.vco = vco;
-
-       /* We'll want to keep using the current vco from now on. */
-       skl_set_preferred_cdclk_vco(dev_priv, vco);
-}
-
-static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
-{
-       I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
-                                   1))
-               DRM_ERROR("Couldn't disable DPLL0\n");
-
-       dev_priv->cdclk.hw.vco = 0;
-}
-
-static void skl_set_cdclk(struct drm_i915_private *dev_priv,
-                         const struct intel_cdclk_state *cdclk_state,
-                         enum pipe pipe)
-{
-       int cdclk = cdclk_state->cdclk;
-       int vco = cdclk_state->vco;
-       u32 freq_select, cdclk_ctl;
-       int ret;
-
-       /*
-        * Based on WA#1183 CDCLK rates 308 and 617MHz CDCLK rates are
-        * unsupported on SKL. In theory this should never happen since only
-        * the eDP1.4 2.16 and 4.32Gbps rates require it, but eDP1.4 is not
-        * supported on SKL either, see the above WA. WARN whenever trying to
-        * use the corresponding VCO freq as that always leads to using the
-        * minimum 308MHz CDCLK.
-        */
-       WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
-
-       ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
-                               SKL_CDCLK_PREPARE_FOR_CHANGE,
-                               SKL_CDCLK_READY_FOR_CHANGE,
-                               SKL_CDCLK_READY_FOR_CHANGE, 3);
-       if (ret) {
-               DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
-                         ret);
-               return;
-       }
-
-       /* Choose frequency for this cdclk */
-       switch (cdclk) {
-       default:
-               WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
-               WARN_ON(vco != 0);
-               /* fall through */
-       case 308571:
-       case 337500:
-               freq_select = CDCLK_FREQ_337_308;
-               break;
-       case 450000:
-       case 432000:
-               freq_select = CDCLK_FREQ_450_432;
-               break;
-       case 540000:
-               freq_select = CDCLK_FREQ_540;
-               break;
-       case 617143:
-       case 675000:
-               freq_select = CDCLK_FREQ_675_617;
-               break;
-       }
-
-       if (dev_priv->cdclk.hw.vco != 0 &&
-           dev_priv->cdclk.hw.vco != vco)
-               skl_dpll0_disable(dev_priv);
-
-       cdclk_ctl = I915_READ(CDCLK_CTL);
-
-       if (dev_priv->cdclk.hw.vco != vco) {
-               /* Wa Display #1183: skl,kbl,cfl */
-               cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
-               cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
-               I915_WRITE(CDCLK_CTL, cdclk_ctl);
-       }
-
-       /* Wa Display #1183: skl,kbl,cfl */
-       cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
-       I915_WRITE(CDCLK_CTL, cdclk_ctl);
-       POSTING_READ(CDCLK_CTL);
-
-       if (dev_priv->cdclk.hw.vco != vco)
-               skl_dpll0_enable(dev_priv, vco);
-
-       /* Wa Display #1183: skl,kbl,cfl */
-       cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
-       I915_WRITE(CDCLK_CTL, cdclk_ctl);
-
-       cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
-       I915_WRITE(CDCLK_CTL, cdclk_ctl);
-
-       /* Wa Display #1183: skl,kbl,cfl */
-       cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
-       I915_WRITE(CDCLK_CTL, cdclk_ctl);
-       POSTING_READ(CDCLK_CTL);
-
-       /* inform PCU of the change */
-       sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
-                               cdclk_state->voltage_level);
-
-       intel_update_cdclk(dev_priv);
-}
-
-static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
-{
-       u32 cdctl, expected;
-
-       /*
-        * check if the pre-os initialized the display
-        * There is SWF18 scratchpad register defined which is set by the
-        * pre-os which can be used by the OS drivers to check the status
-        */
-       if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
-               goto sanitize;
-
-       intel_update_cdclk(dev_priv);
-       intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-
-       /* Is PLL enabled and locked ? */
-       if (dev_priv->cdclk.hw.vco == 0 ||
-           dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
-               goto sanitize;
-
-       /* DPLL okay; verify the cdclock
-        *
-        * Noticed in some instances that the freq selection is correct but
-        * decimal part is programmed wrong from BIOS where pre-os does not
-        * enable display. Verify the same as well.
-        */
-       cdctl = I915_READ(CDCLK_CTL);
-       expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
-               skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
-       if (cdctl == expected)
-               /* All well; nothing to sanitize */
-               return;
-
-sanitize:
-       DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-
-       /* force cdclk programming */
-       dev_priv->cdclk.hw.cdclk = 0;
-       /* force full PLL disable + enable */
-       dev_priv->cdclk.hw.vco = -1;
-}
-
-static void skl_init_cdclk(struct drm_i915_private *dev_priv)
-{
-       struct intel_cdclk_state cdclk_state;
-
-       skl_sanitize_cdclk(dev_priv);
-
-       if (dev_priv->cdclk.hw.cdclk != 0 &&
-           dev_priv->cdclk.hw.vco != 0) {
-               /*
-                * Use the current vco as our initial
-                * guess as to what the preferred vco is.
-                */
-               if (dev_priv->skl_preferred_vco_freq == 0)
-                       skl_set_preferred_cdclk_vco(dev_priv,
-                                                   dev_priv->cdclk.hw.vco);
-               return;
-       }
-
-       cdclk_state = dev_priv->cdclk.hw;
-
-       cdclk_state.vco = dev_priv->skl_preferred_vco_freq;
-       if (cdclk_state.vco == 0)
-               cdclk_state.vco = 8100000;
-       cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
-       cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
-
-       skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
-       struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
-
-       cdclk_state.cdclk = cdclk_state.bypass;
-       cdclk_state.vco = 0;
-       cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
-
-       skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static int bxt_calc_cdclk(int min_cdclk)
-{
-       if (min_cdclk > 576000)
-               return 624000;
-       else if (min_cdclk > 384000)
-               return 576000;
-       else if (min_cdclk > 288000)
-               return 384000;
-       else if (min_cdclk > 144000)
-               return 288000;
-       else
-               return 144000;
-}
-
-static int glk_calc_cdclk(int min_cdclk)
-{
-       if (min_cdclk > 158400)
-               return 316800;
-       else if (min_cdclk > 79200)
-               return 158400;
-       else
-               return 79200;
-}
-
-static u8 bxt_calc_voltage_level(int cdclk)
-{
-       return DIV_ROUND_UP(cdclk, 25000);
-}
-
-static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
-       int ratio;
-
-       if (cdclk == dev_priv->cdclk.hw.bypass)
-               return 0;
-
-       switch (cdclk) {
-       default:
-               MISSING_CASE(cdclk);
-               /* fall through */
-       case 144000:
-       case 288000:
-       case 384000:
-       case 576000:
-               ratio = 60;
-               break;
-       case 624000:
-               ratio = 65;
-               break;
-       }
-
-       return dev_priv->cdclk.hw.ref * ratio;
-}
-
-static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
-       int ratio;
-
-       if (cdclk == dev_priv->cdclk.hw.bypass)
-               return 0;
-
-       switch (cdclk) {
-       default:
-               MISSING_CASE(cdclk);
-               /* fall through */
-       case  79200:
-       case 158400:
-       case 316800:
-               ratio = 33;
-               break;
-       }
-
-       return dev_priv->cdclk.hw.ref * ratio;
-}
-
-static void bxt_de_pll_update(struct drm_i915_private *dev_priv,
-                             struct intel_cdclk_state *cdclk_state)
-{
-       u32 val;
-
-       cdclk_state->ref = 19200;
-       cdclk_state->vco = 0;
-
-       val = I915_READ(BXT_DE_PLL_ENABLE);
-       if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
-               return;
-
-       if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
-               return;
-
-       val = I915_READ(BXT_DE_PLL_CTL);
-       cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
-}
-
-static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
-                         struct intel_cdclk_state *cdclk_state)
-{
-       u32 divider;
-       int div;
-
-       bxt_de_pll_update(dev_priv, cdclk_state);
-
-       cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
-
-       if (cdclk_state->vco == 0)
-               goto out;
-
-       divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
-
-       switch (divider) {
-       case BXT_CDCLK_CD2X_DIV_SEL_1:
-               div = 2;
-               break;
-       case BXT_CDCLK_CD2X_DIV_SEL_1_5:
-               WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
-               div = 3;
-               break;
-       case BXT_CDCLK_CD2X_DIV_SEL_2:
-               div = 4;
-               break;
-       case BXT_CDCLK_CD2X_DIV_SEL_4:
-               div = 8;
-               break;
-       default:
-               MISSING_CASE(divider);
-               return;
-       }
-
-       cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
-
- out:
-       /*
-        * Can't read this out :( Let's assume it's
-        * at least what the CDCLK frequency requires.
-        */
-       cdclk_state->voltage_level =
-               bxt_calc_voltage_level(cdclk_state->cdclk);
-}
-
-static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
-{
-       I915_WRITE(BXT_DE_PLL_ENABLE, 0);
-
-       /* Timeout 200us */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
-                                   1))
-               DRM_ERROR("timeout waiting for DE PLL unlock\n");
-
-       dev_priv->cdclk.hw.vco = 0;
-}
-
-static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
-{
-       int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
-       u32 val;
-
-       val = I915_READ(BXT_DE_PLL_CTL);
-       val &= ~BXT_DE_PLL_RATIO_MASK;
-       val |= BXT_DE_PLL_RATIO(ratio);
-       I915_WRITE(BXT_DE_PLL_CTL, val);
-
-       I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
-
-       /* Timeout 200us */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   BXT_DE_PLL_ENABLE,
-                                   BXT_DE_PLL_LOCK,
-                                   BXT_DE_PLL_LOCK,
-                                   1))
-               DRM_ERROR("timeout waiting for DE PLL lock\n");
-
-       dev_priv->cdclk.hw.vco = vco;
-}
-
-static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
-                         const struct intel_cdclk_state *cdclk_state,
-                         enum pipe pipe)
-{
-       int cdclk = cdclk_state->cdclk;
-       int vco = cdclk_state->vco;
-       u32 val, divider;
-       int ret;
-
-       /* cdclk = vco / 2 / div{1,1.5,2,4} */
-       switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
-       default:
-               WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
-               WARN_ON(vco != 0);
-               /* fall through */
-       case 2:
-               divider = BXT_CDCLK_CD2X_DIV_SEL_1;
-               break;
-       case 3:
-               WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
-               divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
-               break;
-       case 4:
-               divider = BXT_CDCLK_CD2X_DIV_SEL_2;
-               break;
-       case 8:
-               divider = BXT_CDCLK_CD2X_DIV_SEL_4;
-               break;
-       }
-
-       /*
-        * Inform power controller of upcoming frequency change. BSpec
-        * requires us to wait up to 150usec, but that leads to timeouts;
-        * the 2ms used here is based on experiment.
-        */
-       ret = sandybridge_pcode_write_timeout(dev_priv,
-                                             HSW_PCODE_DE_WRITE_FREQ_REQ,
-                                             0x80000000, 150, 2);
-       if (ret) {
-               DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
-                         ret, cdclk);
-               return;
-       }
-
-       if (dev_priv->cdclk.hw.vco != 0 &&
-           dev_priv->cdclk.hw.vco != vco)
-               bxt_de_pll_disable(dev_priv);
-
-       if (dev_priv->cdclk.hw.vco != vco)
-               bxt_de_pll_enable(dev_priv, vco);
-
-       val = divider | skl_cdclk_decimal(cdclk);
-       if (pipe == INVALID_PIPE)
-               val |= BXT_CDCLK_CD2X_PIPE_NONE;
-       else
-               val |= BXT_CDCLK_CD2X_PIPE(pipe);
-       /*
-        * Disable SSA Precharge when CD clock frequency < 500 MHz,
-        * enable otherwise.
-        */
-       if (cdclk >= 500000)
-               val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-       I915_WRITE(CDCLK_CTL, val);
-
-       if (pipe != INVALID_PIPE)
-               intel_wait_for_vblank(dev_priv, pipe);
-
-       /*
-        * The timeout isn't specified, the 2ms used here is based on
-        * experiment.
-        * FIXME: Waiting for the request completion could be delayed until
-        * the next PCODE request based on BSpec.
-        */
-       ret = sandybridge_pcode_write_timeout(dev_priv,
-                                             HSW_PCODE_DE_WRITE_FREQ_REQ,
-                                             cdclk_state->voltage_level, 150, 2);
-       if (ret) {
-               DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
-                         ret, cdclk);
-               return;
-       }
-
-       intel_update_cdclk(dev_priv);
-}
-
-static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
-{
-       u32 cdctl, expected;
-
-       intel_update_cdclk(dev_priv);
-       intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-
-       if (dev_priv->cdclk.hw.vco == 0 ||
-           dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
-               goto sanitize;
-
-       /* DPLL okay; verify the cdclock
-        *
-        * Some BIOS versions leave an incorrect decimal frequency value and
-        * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
-        * so sanitize this register.
-        */
-       cdctl = I915_READ(CDCLK_CTL);
-       /*
-        * Let's ignore the pipe field, since BIOS could have configured the
-        * dividers both synching to an active pipe, or asynchronously
-        * (PIPE_NONE).
-        */
-       cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
-
-       expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
-               skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
-       /*
-        * Disable SSA Precharge when CD clock frequency < 500 MHz,
-        * enable otherwise.
-        */
-       if (dev_priv->cdclk.hw.cdclk >= 500000)
-               expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-
-       if (cdctl == expected)
-               /* All well; nothing to sanitize */
-               return;
-
-sanitize:
-       DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-
-       /* force cdclk programming */
-       dev_priv->cdclk.hw.cdclk = 0;
-
-       /* force full PLL disable + enable */
-       dev_priv->cdclk.hw.vco = -1;
-}
-
-static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
-{
-       struct intel_cdclk_state cdclk_state;
-
-       bxt_sanitize_cdclk(dev_priv);
-
-       if (dev_priv->cdclk.hw.cdclk != 0 &&
-           dev_priv->cdclk.hw.vco != 0)
-               return;
-
-       cdclk_state = dev_priv->cdclk.hw;
-
-       /*
-        * FIXME:
-        * - The initial CDCLK needs to be read from VBT.
-        *   Need to make this change after VBT has changes for BXT.
-        */
-       if (IS_GEMINILAKE(dev_priv)) {
-               cdclk_state.cdclk = glk_calc_cdclk(0);
-               cdclk_state.vco = glk_de_pll_vco(dev_priv, cdclk_state.cdclk);
-       } else {
-               cdclk_state.cdclk = bxt_calc_cdclk(0);
-               cdclk_state.vco = bxt_de_pll_vco(dev_priv, cdclk_state.cdclk);
-       }
-       cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
-
-       bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
-       struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
-
-       cdclk_state.cdclk = cdclk_state.bypass;
-       cdclk_state.vco = 0;
-       cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
-
-       bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static int cnl_calc_cdclk(int min_cdclk)
-{
-       if (min_cdclk > 336000)
-               return 528000;
-       else if (min_cdclk > 168000)
-               return 336000;
-       else
-               return 168000;
-}
-
-static u8 cnl_calc_voltage_level(int cdclk)
-{
-       if (cdclk > 336000)
-               return 2;
-       else if (cdclk > 168000)
-               return 1;
-       else
-               return 0;
-}
-
-static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
-                                struct intel_cdclk_state *cdclk_state)
-{
-       u32 val;
-
-       if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
-               cdclk_state->ref = 24000;
-       else
-               cdclk_state->ref = 19200;
-
-       cdclk_state->vco = 0;
-
-       val = I915_READ(BXT_DE_PLL_ENABLE);
-       if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
-               return;
-
-       if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
-               return;
-
-       cdclk_state->vco = (val & CNL_CDCLK_PLL_RATIO_MASK) * cdclk_state->ref;
-}
-
-static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
-                        struct intel_cdclk_state *cdclk_state)
-{
-       u32 divider;
-       int div;
-
-       cnl_cdclk_pll_update(dev_priv, cdclk_state);
-
-       cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
-
-       if (cdclk_state->vco == 0)
-               goto out;
-
-       divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
-
-       switch (divider) {
-       case BXT_CDCLK_CD2X_DIV_SEL_1:
-               div = 2;
-               break;
-       case BXT_CDCLK_CD2X_DIV_SEL_2:
-               div = 4;
-               break;
-       default:
-               MISSING_CASE(divider);
-               return;
-       }
-
-       cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
-
- out:
-       /*
-        * Can't read this out :( Let's assume it's
-        * at least what the CDCLK frequency requires.
-        */
-       cdclk_state->voltage_level =
-               cnl_calc_voltage_level(cdclk_state->cdclk);
-}
-
-static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       val = I915_READ(BXT_DE_PLL_ENABLE);
-       val &= ~BXT_DE_PLL_PLL_ENABLE;
-       I915_WRITE(BXT_DE_PLL_ENABLE, val);
-
-       /* Timeout 200us */
-       if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
-               DRM_ERROR("timeout waiting for CDCLK PLL unlock\n");
-
-       dev_priv->cdclk.hw.vco = 0;
-}
-
-static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
-{
-       int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
-       u32 val;
-
-       val = CNL_CDCLK_PLL_RATIO(ratio);
-       I915_WRITE(BXT_DE_PLL_ENABLE, val);
-
-       val |= BXT_DE_PLL_PLL_ENABLE;
-       I915_WRITE(BXT_DE_PLL_ENABLE, val);
-
-       /* Timeout 200us */
-       if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
-               DRM_ERROR("timeout waiting for CDCLK PLL lock\n");
-
-       dev_priv->cdclk.hw.vco = vco;
-}
-
-static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
-                         const struct intel_cdclk_state *cdclk_state,
-                         enum pipe pipe)
-{
-       int cdclk = cdclk_state->cdclk;
-       int vco = cdclk_state->vco;
-       u32 val, divider;
-       int ret;
-
-       ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
-                               SKL_CDCLK_PREPARE_FOR_CHANGE,
-                               SKL_CDCLK_READY_FOR_CHANGE,
-                               SKL_CDCLK_READY_FOR_CHANGE, 3);
-       if (ret) {
-               DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
-                         ret);
-               return;
-       }
-
-       /* cdclk = vco / 2 / div{1,2} */
-       switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
-       default:
-               WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
-               WARN_ON(vco != 0);
-               /* fall through */
-       case 2:
-               divider = BXT_CDCLK_CD2X_DIV_SEL_1;
-               break;
-       case 4:
-               divider = BXT_CDCLK_CD2X_DIV_SEL_2;
-               break;
-       }
-
-       if (dev_priv->cdclk.hw.vco != 0 &&
-           dev_priv->cdclk.hw.vco != vco)
-               cnl_cdclk_pll_disable(dev_priv);
-
-       if (dev_priv->cdclk.hw.vco != vco)
-               cnl_cdclk_pll_enable(dev_priv, vco);
-
-       val = divider | skl_cdclk_decimal(cdclk);
-       if (pipe == INVALID_PIPE)
-               val |= BXT_CDCLK_CD2X_PIPE_NONE;
-       else
-               val |= BXT_CDCLK_CD2X_PIPE(pipe);
-       I915_WRITE(CDCLK_CTL, val);
-
-       if (pipe != INVALID_PIPE)
-               intel_wait_for_vblank(dev_priv, pipe);
-
-       /* inform PCU of the change */
-       sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
-                               cdclk_state->voltage_level);
-
-       intel_update_cdclk(dev_priv);
-
-       /*
-        * Can't read out the voltage level :(
-        * Let's just assume everything is as expected.
-        */
-       dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
-}
-
-static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
-       int ratio;
-
-       if (cdclk == dev_priv->cdclk.hw.bypass)
-               return 0;
-
-       switch (cdclk) {
-       default:
-               MISSING_CASE(cdclk);
-               /* fall through */
-       case 168000:
-       case 336000:
-               ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28;
-               break;
-       case 528000:
-               ratio = dev_priv->cdclk.hw.ref == 19200 ? 55 : 44;
-               break;
-       }
-
-       return dev_priv->cdclk.hw.ref * ratio;
-}
-
-static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
-{
-       u32 cdctl, expected;
-
-       intel_update_cdclk(dev_priv);
-       intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-
-       if (dev_priv->cdclk.hw.vco == 0 ||
-           dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
-               goto sanitize;
-
-       /* DPLL okay; verify the cdclock
-        *
-        * Some BIOS versions leave an incorrect decimal frequency value and
-        * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
-        * so sanitize this register.
-        */
-       cdctl = I915_READ(CDCLK_CTL);
-       /*
-        * Let's ignore the pipe field, since BIOS could have configured the
-        * dividers both synching to an active pipe, or asynchronously
-        * (PIPE_NONE).
-        */
-       cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
-
-       expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
-                  skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
-
-       if (cdctl == expected)
-               /* All well; nothing to sanitize */
-               return;
-
-sanitize:
-       DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-
-       /* force cdclk programming */
-       dev_priv->cdclk.hw.cdclk = 0;
-
-       /* force full PLL disable + enable */
-       dev_priv->cdclk.hw.vco = -1;
-}
-
-static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
-{
-       int ranges_24[] = { 312000, 552000, 648000 };
-       int ranges_19_38[] = { 307200, 556800, 652800 };
-       int *ranges;
-
-       switch (ref) {
-       default:
-               MISSING_CASE(ref);
-               /* fall through */
-       case 24000:
-               ranges = ranges_24;
-               break;
-       case 19200:
-       case 38400:
-               ranges = ranges_19_38;
-               break;
-       }
-
-       if (min_cdclk > ranges[1])
-               return ranges[2];
-       else if (min_cdclk > ranges[0])
-               return ranges[1];
-       else
-               return ranges[0];
-}
-
-static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
-       int ratio;
-
-       if (cdclk == dev_priv->cdclk.hw.bypass)
-               return 0;
-
-       switch (cdclk) {
-       default:
-               MISSING_CASE(cdclk);
-               /* fall through */
-       case 307200:
-       case 556800:
-       case 652800:
-               WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
-                       dev_priv->cdclk.hw.ref != 38400);
-               break;
-       case 312000:
-       case 552000:
-       case 648000:
-               WARN_ON(dev_priv->cdclk.hw.ref != 24000);
-       }
-
-       ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
-
-       return dev_priv->cdclk.hw.ref * ratio;
-}
-
-static void icl_set_cdclk(struct drm_i915_private *dev_priv,
-                         const struct intel_cdclk_state *cdclk_state,
-                         enum pipe pipe)
-{
-       unsigned int cdclk = cdclk_state->cdclk;
-       unsigned int vco = cdclk_state->vco;
-       int ret;
-
-       ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
-                               SKL_CDCLK_PREPARE_FOR_CHANGE,
-                               SKL_CDCLK_READY_FOR_CHANGE,
-                               SKL_CDCLK_READY_FOR_CHANGE, 3);
-       if (ret) {
-               DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
-                         ret);
-               return;
-       }
-
-       if (dev_priv->cdclk.hw.vco != 0 &&
-           dev_priv->cdclk.hw.vco != vco)
-               cnl_cdclk_pll_disable(dev_priv);
-
-       if (dev_priv->cdclk.hw.vco != vco)
-               cnl_cdclk_pll_enable(dev_priv, vco);
-
-       /*
-        * On ICL CD2X_DIV can only be 1, so we'll never end up changing the
-        * divider here synchronized to a pipe while CDCLK is on, nor will we
-        * need the corresponding vblank wait.
-        */
-       I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
-                             skl_cdclk_decimal(cdclk));
-
-       sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
-                               cdclk_state->voltage_level);
-
-       intel_update_cdclk(dev_priv);
-
-       /*
-        * Can't read out the voltage level :(
-        * Let's just assume everything is as expected.
-        */
-       dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
-}
-
-static u8 icl_calc_voltage_level(int cdclk)
-{
-       if (cdclk > 556800)
-               return 2;
-       else if (cdclk > 312000)
-               return 1;
-       else
-               return 0;
-}
-
-static void icl_get_cdclk(struct drm_i915_private *dev_priv,
-                         struct intel_cdclk_state *cdclk_state)
-{
-       u32 val;
-
-       cdclk_state->bypass = 50000;
-
-       val = I915_READ(SKL_DSSM);
-       switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) {
-       default:
-               MISSING_CASE(val);
-               /* fall through */
-       case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
-               cdclk_state->ref = 24000;
-               break;
-       case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
-               cdclk_state->ref = 19200;
-               break;
-       case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
-               cdclk_state->ref = 38400;
-               break;
-       }
-
-       val = I915_READ(BXT_DE_PLL_ENABLE);
-       if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
-           (val & BXT_DE_PLL_LOCK) == 0) {
-               /*
-                * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
-                * setting it to zero is a way to signal that.
-                */
-               cdclk_state->vco = 0;
-               cdclk_state->cdclk = cdclk_state->bypass;
-               goto out;
-       }
-
-       cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
-
-       val = I915_READ(CDCLK_CTL);
-       WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
-
-       cdclk_state->cdclk = cdclk_state->vco / 2;
-
-out:
-       /*
-        * Can't read this out :( Let's assume it's
-        * at least what the CDCLK frequency requires.
-        */
-       cdclk_state->voltage_level =
-               icl_calc_voltage_level(cdclk_state->cdclk);
-}
-
-static void icl_init_cdclk(struct drm_i915_private *dev_priv)
-{
-       struct intel_cdclk_state sanitized_state;
-       u32 val;
-
-       /* This sets dev_priv->cdclk.hw. */
-       intel_update_cdclk(dev_priv);
-       intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-
-       /* This means CDCLK disabled. */
-       if (dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
-               goto sanitize;
-
-       val = I915_READ(CDCLK_CTL);
-
-       if ((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0)
-               goto sanitize;
-
-       if ((val & CDCLK_FREQ_DECIMAL_MASK) !=
-           skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk))
-               goto sanitize;
-
-       return;
-
-sanitize:
-       DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-
-       sanitized_state.ref = dev_priv->cdclk.hw.ref;
-       sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
-       sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
-                                                    sanitized_state.cdclk);
-       sanitized_state.voltage_level =
-                               icl_calc_voltage_level(sanitized_state.cdclk);
-
-       icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
-}
-
-static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
-       struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
-
-       cdclk_state.cdclk = cdclk_state.bypass;
-       cdclk_state.vco = 0;
-       cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
-
-       icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
-{
-       struct intel_cdclk_state cdclk_state;
-
-       cnl_sanitize_cdclk(dev_priv);
-
-       if (dev_priv->cdclk.hw.cdclk != 0 &&
-           dev_priv->cdclk.hw.vco != 0)
-               return;
-
-       cdclk_state = dev_priv->cdclk.hw;
-
-       cdclk_state.cdclk = cnl_calc_cdclk(0);
-       cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
-       cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
-
-       cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
-       struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
-
-       cdclk_state.cdclk = cdclk_state.bypass;
-       cdclk_state.vco = 0;
-       cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
-
-       cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-/**
- * intel_cdclk_init - Initialize CDCLK
- * @i915: i915 device
- *
- * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
- * sanitizing the state of the hardware if needed. This is generally done only
- * during the display core initialization sequence, after which the DMC will
- * take care of turning CDCLK off/on as needed.
- */
-void intel_cdclk_init(struct drm_i915_private *i915)
-{
-       if (INTEL_GEN(i915) >= 11)
-               icl_init_cdclk(i915);
-       else if (IS_CANNONLAKE(i915))
-               cnl_init_cdclk(i915);
-       else if (IS_GEN9_BC(i915))
-               skl_init_cdclk(i915);
-       else if (IS_GEN9_LP(i915))
-               bxt_init_cdclk(i915);
-}
-
-/**
- * intel_cdclk_uninit - Uninitialize CDCLK
- * @i915: i915 device
- *
- * Uninitialize CDCLK. This is done only during the display core
- * uninitialization sequence.
- */
-void intel_cdclk_uninit(struct drm_i915_private *i915)
-{
-       if (INTEL_GEN(i915) >= 11)
-               icl_uninit_cdclk(i915);
-       else if (IS_CANNONLAKE(i915))
-               cnl_uninit_cdclk(i915);
-       else if (IS_GEN9_BC(i915))
-               skl_uninit_cdclk(i915);
-       else if (IS_GEN9_LP(i915))
-               bxt_uninit_cdclk(i915);
-}
-
-/**
- * intel_cdclk_needs_modeset - Determine if two CDCLK states require a modeset on all pipes
- * @a: first CDCLK state
- * @b: second CDCLK state
- *
- * Returns:
- * True if the CDCLK states require pipes to be off during reprogramming, false if not.
- */
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
-                              const struct intel_cdclk_state *b)
-{
-       return a->cdclk != b->cdclk ||
-               a->vco != b->vco ||
-               a->ref != b->ref;
-}
-
-/**
- * intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update
- * @dev_priv: Not a CDCLK state, it's the drm_i915_private!
- * @a: first CDCLK state
- * @b: second CDCLK state
- *
- * Returns:
- * True if the CDCLK states require just a cd2x divider update, false if not.
- */
-bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
-                                  const struct intel_cdclk_state *a,
-                                  const struct intel_cdclk_state *b)
-{
-       /* Older hw doesn't have the capability */
-       if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
-               return false;
-
-       return a->cdclk != b->cdclk &&
-               a->vco == b->vco &&
-               a->ref == b->ref;
-}
-
-/**
- * intel_cdclk_changed - Determine if two CDCLK states are different
- * @a: first CDCLK state
- * @b: second CDCLK state
- *
- * Returns:
- * True if the CDCLK states don't match, false if they do.
- */
-bool intel_cdclk_changed(const struct intel_cdclk_state *a,
-                        const struct intel_cdclk_state *b)
-{
-       return intel_cdclk_needs_modeset(a, b) ||
-               a->voltage_level != b->voltage_level;
-}
-
-/**
- * intel_cdclk_swap_state - make atomic CDCLK configuration effective
- * @state: atomic state
- *
- * This is the CDCLK version of drm_atomic_helper_swap_state() since the
- * helper does not handle driver-specific global state.
- *
- * Similarly to the atomic helpers this function does a complete swap,
- * i.e. it also puts the old state into @state. This is used by the commit
- * code to determine how CDCLK has changed (for instance did it increase or
- * decrease).
- */
-void intel_cdclk_swap_state(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-
-       swap(state->cdclk.logical, dev_priv->cdclk.logical);
-       swap(state->cdclk.actual, dev_priv->cdclk.actual);
-}
-
-void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
-                           const char *context)
-{
-       DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n",
-                        context, cdclk_state->cdclk, cdclk_state->vco,
-                        cdclk_state->ref, cdclk_state->bypass,
-                        cdclk_state->voltage_level);
-}
-
-/**
- * intel_set_cdclk - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
- * @cdclk_state: new CDCLK state
- * @pipe: pipe with which to synchronize the update
- *
- * Program the hardware based on the passed in CDCLK state,
- * if necessary.
- */
-static void intel_set_cdclk(struct drm_i915_private *dev_priv,
-                           const struct intel_cdclk_state *cdclk_state,
-                           enum pipe pipe)
-{
-       if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
-               return;
-
-       if (WARN_ON_ONCE(!dev_priv->display.set_cdclk))
-               return;
-
-       intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
-
-       dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe);
-
-       if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
-                "cdclk state doesn't match!\n")) {
-               intel_dump_cdclk_state(&dev_priv->cdclk.hw, "[hw state]");
-               intel_dump_cdclk_state(cdclk_state, "[sw state]");
-       }
-}
-
-/**
- * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
- * @old_state: old CDCLK state
- * @new_state: new CDCLK state
- * @pipe: pipe with which to synchronize the update
- *
- * Program the hardware before updating the HW plane state based on the passed
- * in CDCLK state, if necessary.
- */
-void
-intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
-                                const struct intel_cdclk_state *old_state,
-                                const struct intel_cdclk_state *new_state,
-                                enum pipe pipe)
-{
-       if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk)
-               intel_set_cdclk(dev_priv, new_state, pipe);
-}
-
-/**
- * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
- * @old_state: old CDCLK state
- * @new_state: new CDCLK state
- * @pipe: pipe with which to synchronize the update
- *
- * Program the hardware after updating the HW plane state based on the passed
- * in CDCLK state, if necessary.
- */
-void
-intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
-                                 const struct intel_cdclk_state *old_state,
-                                 const struct intel_cdclk_state *new_state,
-                                 enum pipe pipe)
-{
-       if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk)
-               intel_set_cdclk(dev_priv, new_state, pipe);
-}
-
-static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
-                                    int pixel_rate)
-{
-       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-               return DIV_ROUND_UP(pixel_rate, 2);
-       else if (IS_GEN(dev_priv, 9) ||
-                IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
-               return pixel_rate;
-       else if (IS_CHERRYVIEW(dev_priv))
-               return DIV_ROUND_UP(pixel_rate * 100, 95);
-       else
-               return DIV_ROUND_UP(pixel_rate * 100, 90);
-}
-
-int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(crtc_state->base.crtc->dev);
-       int min_cdclk;
-
-       if (!crtc_state->base.enable)
-               return 0;
-
-       min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate);
-
-       /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-       if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
-               min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
-
-       /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
-        * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
-        * there may be audio corruption or screen corruption." This cdclk
-        * restriction for GLK is 316.8 MHz.
-        */
-       if (intel_crtc_has_dp_encoder(crtc_state) &&
-           crtc_state->has_audio &&
-           crtc_state->port_clock >= 540000 &&
-           crtc_state->lane_count == 4) {
-               if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
-                       /* Display WA #1145: glk,cnl */
-                       min_cdclk = max(316800, min_cdclk);
-               } else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv)) {
-                       /* Display WA #1144: skl,bxt */
-                       min_cdclk = max(432000, min_cdclk);
-               }
-       }
-
-       /*
-        * According to BSpec, "The CD clock frequency must be at least twice
-        * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
-        */
-       if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
-               min_cdclk = max(2 * 96000, min_cdclk);
-
-       /*
-        * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
-        * than 320000KHz.
-        */
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
-           IS_VALLEYVIEW(dev_priv))
-               min_cdclk = max(320000, min_cdclk);
-
-       /*
-        * On Geminilake once the CDCLK gets as low as 79200
-        * picture gets unstable, despite that values are
-        * correct for DSI PLL and DE PLL.
-        */
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
-           IS_GEMINILAKE(dev_priv))
-               min_cdclk = max(158400, min_cdclk);
-
-       if (min_cdclk > dev_priv->max_cdclk_freq) {
-               DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
-                             min_cdclk, dev_priv->max_cdclk_freq);
-               return -EINVAL;
-       }
-
-       return min_cdclk;
-}
-
-static int intel_compute_min_cdclk(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       struct intel_crtc *crtc;
-       struct intel_crtc_state *crtc_state;
-       int min_cdclk, i;
-       enum pipe pipe;
-
-       memcpy(state->min_cdclk, dev_priv->min_cdclk,
-              sizeof(state->min_cdclk));
-
-       for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
-               min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
-               if (min_cdclk < 0)
-                       return min_cdclk;
-
-               state->min_cdclk[i] = min_cdclk;
-       }
-
-       min_cdclk = state->cdclk.force_min_cdclk;
-       for_each_pipe(dev_priv, pipe)
-               min_cdclk = max(state->min_cdclk[pipe], min_cdclk);
-
-       return min_cdclk;
-}
-
-/*
- * Note that this functions assumes that 0 is
- * the lowest voltage value, and higher values
- * correspond to increasingly higher voltages.
- *
- * Should that relationship no longer hold on
- * future platforms this code will need to be
- * adjusted.
- */
-static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       struct intel_crtc *crtc;
-       struct intel_crtc_state *crtc_state;
-       u8 min_voltage_level;
-       int i;
-       enum pipe pipe;
-
-       memcpy(state->min_voltage_level, dev_priv->min_voltage_level,
-              sizeof(state->min_voltage_level));
-
-       for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
-               if (crtc_state->base.enable)
-                       state->min_voltage_level[i] =
-                               crtc_state->min_voltage_level;
-               else
-                       state->min_voltage_level[i] = 0;
-       }
-
-       min_voltage_level = 0;
-       for_each_pipe(dev_priv, pipe)
-               min_voltage_level = max(state->min_voltage_level[pipe],
-                                       min_voltage_level);
-
-       return min_voltage_level;
-}
-
-static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       int min_cdclk, cdclk;
-
-       min_cdclk = intel_compute_min_cdclk(state);
-       if (min_cdclk < 0)
-               return min_cdclk;
-
-       cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
-
-       state->cdclk.logical.cdclk = cdclk;
-       state->cdclk.logical.voltage_level =
-               vlv_calc_voltage_level(dev_priv, cdclk);
-
-       if (!state->active_crtcs) {
-               cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
-
-               state->cdclk.actual.cdclk = cdclk;
-               state->cdclk.actual.voltage_level =
-                       vlv_calc_voltage_level(dev_priv, cdclk);
-       } else {
-               state->cdclk.actual = state->cdclk.logical;
-       }
-
-       return 0;
-}
-
-static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
-{
-       int min_cdclk, cdclk;
-
-       min_cdclk = intel_compute_min_cdclk(state);
-       if (min_cdclk < 0)
-               return min_cdclk;
-
-       /*
-        * FIXME should also account for plane ratio
-        * once 64bpp pixel formats are supported.
-        */
-       cdclk = bdw_calc_cdclk(min_cdclk);
-
-       state->cdclk.logical.cdclk = cdclk;
-       state->cdclk.logical.voltage_level =
-               bdw_calc_voltage_level(cdclk);
-
-       if (!state->active_crtcs) {
-               cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk);
-
-               state->cdclk.actual.cdclk = cdclk;
-               state->cdclk.actual.voltage_level =
-                       bdw_calc_voltage_level(cdclk);
-       } else {
-               state->cdclk.actual = state->cdclk.logical;
-       }
-
-       return 0;
-}
-
-static int skl_dpll0_vco(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       struct intel_crtc *crtc;
-       struct intel_crtc_state *crtc_state;
-       int vco, i;
-
-       vco = state->cdclk.logical.vco;
-       if (!vco)
-               vco = dev_priv->skl_preferred_vco_freq;
-
-       for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
-               if (!crtc_state->base.enable)
-                       continue;
-
-               if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
-                       continue;
-
-               /*
-                * DPLL0 VCO may need to be adjusted to get the correct
-                * clock for eDP. This will affect cdclk as well.
-                */
-               switch (crtc_state->port_clock / 2) {
-               case 108000:
-               case 216000:
-                       vco = 8640000;
-                       break;
-               default:
-                       vco = 8100000;
-                       break;
-               }
-       }
-
-       return vco;
-}
-
-static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
-{
-       int min_cdclk, cdclk, vco;
-
-       min_cdclk = intel_compute_min_cdclk(state);
-       if (min_cdclk < 0)
-               return min_cdclk;
-
-       vco = skl_dpll0_vco(state);
-
-       /*
-        * FIXME should also account for plane ratio
-        * once 64bpp pixel formats are supported.
-        */
-       cdclk = skl_calc_cdclk(min_cdclk, vco);
-
-       state->cdclk.logical.vco = vco;
-       state->cdclk.logical.cdclk = cdclk;
-       state->cdclk.logical.voltage_level =
-               skl_calc_voltage_level(cdclk);
-
-       if (!state->active_crtcs) {
-               cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco);
-
-               state->cdclk.actual.vco = vco;
-               state->cdclk.actual.cdclk = cdclk;
-               state->cdclk.actual.voltage_level =
-                       skl_calc_voltage_level(cdclk);
-       } else {
-               state->cdclk.actual = state->cdclk.logical;
-       }
-
-       return 0;
-}
-
-static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       int min_cdclk, cdclk, vco;
-
-       min_cdclk = intel_compute_min_cdclk(state);
-       if (min_cdclk < 0)
-               return min_cdclk;
-
-       if (IS_GEMINILAKE(dev_priv)) {
-               cdclk = glk_calc_cdclk(min_cdclk);
-               vco = glk_de_pll_vco(dev_priv, cdclk);
-       } else {
-               cdclk = bxt_calc_cdclk(min_cdclk);
-               vco = bxt_de_pll_vco(dev_priv, cdclk);
-       }
-
-       state->cdclk.logical.vco = vco;
-       state->cdclk.logical.cdclk = cdclk;
-       state->cdclk.logical.voltage_level =
-               bxt_calc_voltage_level(cdclk);
-
-       if (!state->active_crtcs) {
-               if (IS_GEMINILAKE(dev_priv)) {
-                       cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk);
-                       vco = glk_de_pll_vco(dev_priv, cdclk);
-               } else {
-                       cdclk = bxt_calc_cdclk(state->cdclk.force_min_cdclk);
-                       vco = bxt_de_pll_vco(dev_priv, cdclk);
-               }
-
-               state->cdclk.actual.vco = vco;
-               state->cdclk.actual.cdclk = cdclk;
-               state->cdclk.actual.voltage_level =
-                       bxt_calc_voltage_level(cdclk);
-       } else {
-               state->cdclk.actual = state->cdclk.logical;
-       }
-
-       return 0;
-}
-
-static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       int min_cdclk, cdclk, vco;
-
-       min_cdclk = intel_compute_min_cdclk(state);
-       if (min_cdclk < 0)
-               return min_cdclk;
-
-       cdclk = cnl_calc_cdclk(min_cdclk);
-       vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
-
-       state->cdclk.logical.vco = vco;
-       state->cdclk.logical.cdclk = cdclk;
-       state->cdclk.logical.voltage_level =
-               max(cnl_calc_voltage_level(cdclk),
-                   cnl_compute_min_voltage_level(state));
-
-       if (!state->active_crtcs) {
-               cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk);
-               vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
-
-               state->cdclk.actual.vco = vco;
-               state->cdclk.actual.cdclk = cdclk;
-               state->cdclk.actual.voltage_level =
-                       cnl_calc_voltage_level(cdclk);
-       } else {
-               state->cdclk.actual = state->cdclk.logical;
-       }
-
-       return 0;
-}
-
-static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       unsigned int ref = state->cdclk.logical.ref;
-       int min_cdclk, cdclk, vco;
-
-       min_cdclk = intel_compute_min_cdclk(state);
-       if (min_cdclk < 0)
-               return min_cdclk;
-
-       cdclk = icl_calc_cdclk(min_cdclk, ref);
-       vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
-
-       state->cdclk.logical.vco = vco;
-       state->cdclk.logical.cdclk = cdclk;
-       state->cdclk.logical.voltage_level =
-               max(icl_calc_voltage_level(cdclk),
-                   cnl_compute_min_voltage_level(state));
-
-       if (!state->active_crtcs) {
-               cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref);
-               vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
-
-               state->cdclk.actual.vco = vco;
-               state->cdclk.actual.cdclk = cdclk;
-               state->cdclk.actual.voltage_level =
-                       icl_calc_voltage_level(cdclk);
-       } else {
-               state->cdclk.actual = state->cdclk.logical;
-       }
-
-       return 0;
-}
-
-static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
-{
-       int max_cdclk_freq = dev_priv->max_cdclk_freq;
-
-       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-               return 2 * max_cdclk_freq;
-       else if (IS_GEN(dev_priv, 9) ||
-                IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
-               return max_cdclk_freq;
-       else if (IS_CHERRYVIEW(dev_priv))
-               return max_cdclk_freq*95/100;
-       else if (INTEL_GEN(dev_priv) < 4)
-               return 2*max_cdclk_freq*90/100;
-       else
-               return max_cdclk_freq*90/100;
-}
-
-/**
- * intel_update_max_cdclk - Determine the maximum support CDCLK frequency
- * @dev_priv: i915 device
- *
- * Determine the maximum CDCLK frequency the platform supports, and also
- * derive the maximum dot clock frequency the maximum CDCLK frequency
- * allows.
- */
-void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
-{
-       if (INTEL_GEN(dev_priv) >= 11) {
-               if (dev_priv->cdclk.hw.ref == 24000)
-                       dev_priv->max_cdclk_freq = 648000;
-               else
-                       dev_priv->max_cdclk_freq = 652800;
-       } else if (IS_CANNONLAKE(dev_priv)) {
-               dev_priv->max_cdclk_freq = 528000;
-       } else if (IS_GEN9_BC(dev_priv)) {
-               u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
-               int max_cdclk, vco;
-
-               vco = dev_priv->skl_preferred_vco_freq;
-               WARN_ON(vco != 8100000 && vco != 8640000);
-
-               /*
-                * Use the lower (vco 8640) cdclk values as a
-                * first guess. skl_calc_cdclk() will correct it
-                * if the preferred vco is 8100 instead.
-                */
-               if (limit == SKL_DFSM_CDCLK_LIMIT_675)
-                       max_cdclk = 617143;
-               else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
-                       max_cdclk = 540000;
-               else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
-                       max_cdclk = 432000;
-               else
-                       max_cdclk = 308571;
-
-               dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
-       } else if (IS_GEMINILAKE(dev_priv)) {
-               dev_priv->max_cdclk_freq = 316800;
-       } else if (IS_BROXTON(dev_priv)) {
-               dev_priv->max_cdclk_freq = 624000;
-       } else if (IS_BROADWELL(dev_priv))  {
-               /*
-                * FIXME with extra cooling we can allow
-                * 540 MHz for ULX and 675 Mhz for ULT.
-                * How can we know if extra cooling is
-                * available? PCI ID, VTB, something else?
-                */
-               if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
-                       dev_priv->max_cdclk_freq = 450000;
-               else if (IS_BDW_ULX(dev_priv))
-                       dev_priv->max_cdclk_freq = 450000;
-               else if (IS_BDW_ULT(dev_priv))
-                       dev_priv->max_cdclk_freq = 540000;
-               else
-                       dev_priv->max_cdclk_freq = 675000;
-       } else if (IS_CHERRYVIEW(dev_priv)) {
-               dev_priv->max_cdclk_freq = 320000;
-       } else if (IS_VALLEYVIEW(dev_priv)) {
-               dev_priv->max_cdclk_freq = 400000;
-       } else {
-               /* otherwise assume cdclk is fixed */
-               dev_priv->max_cdclk_freq = dev_priv->cdclk.hw.cdclk;
-       }
-
-       dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
-
-       DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
-                        dev_priv->max_cdclk_freq);
-
-       DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
-                        dev_priv->max_dotclk_freq);
-}
-
-/**
- * intel_update_cdclk - Determine the current CDCLK frequency
- * @dev_priv: i915 device
- *
- * Determine the current CDCLK frequency.
- */
-void intel_update_cdclk(struct drm_i915_private *dev_priv)
-{
-       dev_priv->display.get_cdclk(dev_priv, &dev_priv->cdclk.hw);
-
-       /*
-        * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
-        * Programmng [sic] note: bit[9:2] should be programmed to the number
-        * of cdclk that generates 4MHz reference clock freq which is used to
-        * generate GMBus clock. This will vary with the cdclk freq.
-        */
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               I915_WRITE(GMBUSFREQ_VLV,
-                          DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
-}
-
-static int cnp_rawclk(struct drm_i915_private *dev_priv)
-{
-       u32 rawclk;
-       int divider, fraction;
-
-       if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
-               /* 24 MHz */
-               divider = 24000;
-               fraction = 0;
-       } else {
-               /* 19.2 MHz */
-               divider = 19000;
-               fraction = 200;
-       }
-
-       rawclk = CNP_RAWCLK_DIV(divider / 1000);
-       if (fraction) {
-               int numerator = 1;
-
-               rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
-                                                          fraction) - 1);
-               if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
-                       rawclk |= ICP_RAWCLK_NUM(numerator);
-       }
-
-       I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
-       return divider + fraction;
-}
-
-static int pch_rawclk(struct drm_i915_private *dev_priv)
-{
-       return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
-}
-
-static int vlv_hrawclk(struct drm_i915_private *dev_priv)
-{
-       /* RAWCLK_FREQ_VLV register updated from power well code */
-       return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
-                                     CCK_DISPLAY_REF_CLOCK_CONTROL);
-}
-
-static int g4x_hrawclk(struct drm_i915_private *dev_priv)
-{
-       u32 clkcfg;
-
-       /* hrawclock is 1/4 the FSB frequency */
-       clkcfg = I915_READ(CLKCFG);
-       switch (clkcfg & CLKCFG_FSB_MASK) {
-       case CLKCFG_FSB_400:
-               return 100000;
-       case CLKCFG_FSB_533:
-               return 133333;
-       case CLKCFG_FSB_667:
-               return 166667;
-       case CLKCFG_FSB_800:
-               return 200000;
-       case CLKCFG_FSB_1067:
-       case CLKCFG_FSB_1067_ALT:
-               return 266667;
-       case CLKCFG_FSB_1333:
-       case CLKCFG_FSB_1333_ALT:
-               return 333333;
-       default:
-               return 133333;
-       }
-}
-
-/**
- * intel_update_rawclk - Determine the current RAWCLK frequency
- * @dev_priv: i915 device
- *
- * Determine the current RAWCLK frequency. RAWCLK is a fixed
- * frequency clock so this needs to done only once.
- */
-void intel_update_rawclk(struct drm_i915_private *dev_priv)
-{
-       if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
-               dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
-       else if (HAS_PCH_SPLIT(dev_priv))
-               dev_priv->rawclk_freq = pch_rawclk(dev_priv);
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               dev_priv->rawclk_freq = vlv_hrawclk(dev_priv);
-       else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
-               dev_priv->rawclk_freq = g4x_hrawclk(dev_priv);
-       else
-               /* no rawclk on other platforms, or no need to know it */
-               return;
-
-       DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
-}
-
-/**
- * intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks
- * @dev_priv: i915 device
- */
-void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
-{
-       if (INTEL_GEN(dev_priv) >= 11) {
-               dev_priv->display.set_cdclk = icl_set_cdclk;
-               dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
-       } else if (IS_CANNONLAKE(dev_priv)) {
-               dev_priv->display.set_cdclk = cnl_set_cdclk;
-               dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk;
-       } else if (IS_GEN9_LP(dev_priv)) {
-               dev_priv->display.set_cdclk = bxt_set_cdclk;
-               dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
-       } else if (IS_GEN9_BC(dev_priv)) {
-               dev_priv->display.set_cdclk = skl_set_cdclk;
-               dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
-       } else if (IS_BROADWELL(dev_priv)) {
-               dev_priv->display.set_cdclk = bdw_set_cdclk;
-               dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk;
-       } else if (IS_CHERRYVIEW(dev_priv)) {
-               dev_priv->display.set_cdclk = chv_set_cdclk;
-               dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
-       } else if (IS_VALLEYVIEW(dev_priv)) {
-               dev_priv->display.set_cdclk = vlv_set_cdclk;
-               dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
-       }
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               dev_priv->display.get_cdclk = icl_get_cdclk;
-       else if (IS_CANNONLAKE(dev_priv))
-               dev_priv->display.get_cdclk = cnl_get_cdclk;
-       else if (IS_GEN9_LP(dev_priv))
-               dev_priv->display.get_cdclk = bxt_get_cdclk;
-       else if (IS_GEN9_BC(dev_priv))
-               dev_priv->display.get_cdclk = skl_get_cdclk;
-       else if (IS_BROADWELL(dev_priv))
-               dev_priv->display.get_cdclk = bdw_get_cdclk;
-       else if (IS_HASWELL(dev_priv))
-               dev_priv->display.get_cdclk = hsw_get_cdclk;
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               dev_priv->display.get_cdclk = vlv_get_cdclk;
-       else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
-               dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
-       else if (IS_GEN(dev_priv, 5))
-               dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
-       else if (IS_GM45(dev_priv))
-               dev_priv->display.get_cdclk = gm45_get_cdclk;
-       else if (IS_G45(dev_priv))
-               dev_priv->display.get_cdclk = g33_get_cdclk;
-       else if (IS_I965GM(dev_priv))
-               dev_priv->display.get_cdclk = i965gm_get_cdclk;
-       else if (IS_I965G(dev_priv))
-               dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
-       else if (IS_PINEVIEW(dev_priv))
-               dev_priv->display.get_cdclk = pnv_get_cdclk;
-       else if (IS_G33(dev_priv))
-               dev_priv->display.get_cdclk = g33_get_cdclk;
-       else if (IS_I945GM(dev_priv))
-               dev_priv->display.get_cdclk = i945gm_get_cdclk;
-       else if (IS_I945G(dev_priv))
-               dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
-       else if (IS_I915GM(dev_priv))
-               dev_priv->display.get_cdclk = i915gm_get_cdclk;
-       else if (IS_I915G(dev_priv))
-               dev_priv->display.get_cdclk = fixed_333mhz_get_cdclk;
-       else if (IS_I865G(dev_priv))
-               dev_priv->display.get_cdclk = fixed_266mhz_get_cdclk;
-       else if (IS_I85X(dev_priv))
-               dev_priv->display.get_cdclk = i85x_get_cdclk;
-       else if (IS_I845G(dev_priv))
-               dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk;
-       else { /* 830 */
-               WARN(!IS_I830(dev_priv),
-                    "Unknown platform. Assuming 133 MHz CDCLK\n");
-               dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
-       }
-}
diff --git a/drivers/gpu/drm/i915/intel_cdclk.h b/drivers/gpu/drm/i915/intel_cdclk.h
deleted file mode 100644 (file)
index 4d6f7f5..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_CDCLK_H__
-#define __INTEL_CDCLK_H__
-
-#include <linux/types.h>
-
-#include "intel_display.h"
-
-struct drm_i915_private;
-struct intel_atomic_state;
-struct intel_cdclk_state;
-struct intel_crtc_state;
-
-int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
-void intel_cdclk_init(struct drm_i915_private *i915);
-void intel_cdclk_uninit(struct drm_i915_private *i915);
-void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
-void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
-                                  const struct intel_cdclk_state *a,
-                                  const struct intel_cdclk_state *b);
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
-                              const struct intel_cdclk_state *b);
-bool intel_cdclk_changed(const struct intel_cdclk_state *a,
-                        const struct intel_cdclk_state *b);
-void intel_cdclk_swap_state(struct intel_atomic_state *state);
-void
-intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
-                                const struct intel_cdclk_state *old_state,
-                                const struct intel_cdclk_state *new_state,
-                                enum pipe pipe);
-void
-intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
-                                 const struct intel_cdclk_state *old_state,
-                                 const struct intel_cdclk_state *new_state,
-                                 enum pipe pipe);
-void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
-                           const char *context);
-
-#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
deleted file mode 100644 (file)
index 23a84dd..0000000
+++ /dev/null
@@ -1,1428 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "intel_color.h"
-#include "intel_drv.h"
-
-#define CTM_COEFF_SIGN (1ULL << 63)
-
-#define CTM_COEFF_1_0  (1ULL << 32)
-#define CTM_COEFF_2_0  (CTM_COEFF_1_0 << 1)
-#define CTM_COEFF_4_0  (CTM_COEFF_2_0 << 1)
-#define CTM_COEFF_8_0  (CTM_COEFF_4_0 << 1)
-#define CTM_COEFF_0_5  (CTM_COEFF_1_0 >> 1)
-#define CTM_COEFF_0_25 (CTM_COEFF_0_5 >> 1)
-#define CTM_COEFF_0_125        (CTM_COEFF_0_25 >> 1)
-
-#define CTM_COEFF_LIMITED_RANGE ((235ULL - 16ULL) * CTM_COEFF_1_0 / 255)
-
-#define CTM_COEFF_NEGATIVE(coeff)      (((coeff) & CTM_COEFF_SIGN) != 0)
-#define CTM_COEFF_ABS(coeff)           ((coeff) & (CTM_COEFF_SIGN - 1))
-
-#define LEGACY_LUT_LENGTH              256
-
-/*
- * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
- * format). This macro takes the coefficient we want transformed and the
- * number of fractional bits.
- *
- * We only have a 9 bits precision window which slides depending on the value
- * of the CTM coefficient and we write the value from bit 3. We also round the
- * value.
- */
-#define ILK_CSC_COEFF_FP(coeff, fbits) \
-       (clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
-
-#define ILK_CSC_COEFF_LIMITED_RANGE 0x0dc0
-#define ILK_CSC_COEFF_1_0 0x7800
-
-#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
-
-static const u16 ilk_csc_off_zero[3] = {};
-
-static const u16 ilk_csc_coeff_identity[9] = {
-       ILK_CSC_COEFF_1_0, 0, 0,
-       0, ILK_CSC_COEFF_1_0, 0,
-       0, 0, ILK_CSC_COEFF_1_0,
-};
-
-static const u16 ilk_csc_postoff_limited_range[3] = {
-       ILK_CSC_POSTOFF_LIMITED_RANGE,
-       ILK_CSC_POSTOFF_LIMITED_RANGE,
-       ILK_CSC_POSTOFF_LIMITED_RANGE,
-};
-
-static const u16 ilk_csc_coeff_limited_range[9] = {
-       ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
-       0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
-       0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
-};
-
-/*
- * These values are direct register values specified in the Bspec,
- * for RGB->YUV conversion matrix (colorspace BT709)
- */
-static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
-       0x1e08, 0x9cc0, 0xb528,
-       0x2ba8, 0x09d8, 0x37e8,
-       0xbce8, 0x9ad8, 0x1e08,
-};
-
-/* Post offset values for RGB->YCBCR conversion */
-static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
-       0x0800, 0x0100, 0x0800,
-};
-
-static bool lut_is_legacy(const struct drm_property_blob *lut)
-{
-       return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
-}
-
-static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state)
-{
-       return !crtc_state->base.degamma_lut &&
-               !crtc_state->base.ctm &&
-               crtc_state->base.gamma_lut &&
-               lut_is_legacy(crtc_state->base.gamma_lut);
-}
-
-/*
- * When using limited range, multiply the matrix given by userspace by
- * the matrix that we would use for the limited range.
- */
-static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
-{
-       int i;
-
-       for (i = 0; i < 9; i++) {
-               u64 user_coeff = input[i];
-               u32 limited_coeff = CTM_COEFF_LIMITED_RANGE;
-               u32 abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 0,
-                                         CTM_COEFF_4_0 - 1) >> 2;
-
-               /*
-                * By scaling every co-efficient with limited range (16-235)
-                * vs full range (0-255) the final o/p will be scaled down to
-                * fit in the limited range supported by the panel.
-                */
-               result[i] = mul_u32_u32(limited_coeff, abs_coeff) >> 30;
-               result[i] |= user_coeff & CTM_COEFF_SIGN;
-       }
-
-       return result;
-}
-
-static void ilk_update_pipe_csc(struct intel_crtc *crtc,
-                               const u16 preoff[3],
-                               const u16 coeff[9],
-                               const u16 postoff[3])
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
-       I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
-       I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
-
-       I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
-       I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
-
-       I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
-       I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
-
-       I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
-       I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
-
-       if (INTEL_GEN(dev_priv) >= 7) {
-               I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]);
-               I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]);
-               I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]);
-       }
-}
-
-static void icl_update_output_csc(struct intel_crtc *crtc,
-                                 const u16 preoff[3],
-                                 const u16 coeff[9],
-                                 const u16 postoff[3])
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
-       I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
-       I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
-
-       I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
-       I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2] << 16);
-
-       I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
-       I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5] << 16);
-
-       I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
-       I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8] << 16);
-
-       I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
-       I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
-       I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
-}
-
-static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-
-       /*
-        * FIXME if there's a gamma LUT after the CSC, we should
-        * do the range compression using the gamma LUT instead.
-        */
-       return crtc_state->limited_color_range &&
-               (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
-                IS_GEN_RANGE(dev_priv, 9, 10));
-}
-
-static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
-                               u16 coeffs[9])
-{
-       const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
-       const u64 *input;
-       u64 temp[9];
-       int i;
-
-       if (ilk_csc_limited_range(crtc_state))
-               input = ctm_mult_by_limited(temp, ctm->matrix);
-       else
-               input = ctm->matrix;
-
-       /*
-        * Convert fixed point S31.32 input to format supported by the
-        * hardware.
-        */
-       for (i = 0; i < 9; i++) {
-               u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
-
-               /*
-                * Clamp input value to min/max supported by
-                * hardware.
-                */
-               abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
-
-               coeffs[i] = 0;
-
-               /* sign bit */
-               if (CTM_COEFF_NEGATIVE(input[i]))
-                       coeffs[i] |= 1 << 15;
-
-               if (abs_coeff < CTM_COEFF_0_125)
-                       coeffs[i] |= (3 << 12) |
-                               ILK_CSC_COEFF_FP(abs_coeff, 12);
-               else if (abs_coeff < CTM_COEFF_0_25)
-                       coeffs[i] |= (2 << 12) |
-                               ILK_CSC_COEFF_FP(abs_coeff, 11);
-               else if (abs_coeff < CTM_COEFF_0_5)
-                       coeffs[i] |= (1 << 12) |
-                               ILK_CSC_COEFF_FP(abs_coeff, 10);
-               else if (abs_coeff < CTM_COEFF_1_0)
-                       coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
-               else if (abs_coeff < CTM_COEFF_2_0)
-                       coeffs[i] |= (7 << 12) |
-                               ILK_CSC_COEFF_FP(abs_coeff, 8);
-               else
-                       coeffs[i] |= (6 << 12) |
-                               ILK_CSC_COEFF_FP(abs_coeff, 7);
-       }
-}
-
-static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       bool limited_color_range = ilk_csc_limited_range(crtc_state);
-
-       if (crtc_state->base.ctm) {
-               u16 coeff[9];
-
-               ilk_csc_convert_ctm(crtc_state, coeff);
-               ilk_update_pipe_csc(crtc, ilk_csc_off_zero, coeff,
-                                   limited_color_range ?
-                                   ilk_csc_postoff_limited_range :
-                                   ilk_csc_off_zero);
-       } else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
-               ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
-                                   ilk_csc_coeff_rgb_to_ycbcr,
-                                   ilk_csc_postoff_rgb_to_ycbcr);
-       } else if (limited_color_range) {
-               ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
-                                   ilk_csc_coeff_limited_range,
-                                   ilk_csc_postoff_limited_range);
-       } else if (crtc_state->csc_enable) {
-               /*
-                * On GLK+ both pipe CSC and degamma LUT are controlled
-                * by csc_enable. Hence for the cases where the degama
-                * LUT is needed but CSC is not we need to load an
-                * identity matrix.
-                */
-               WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_GEMINILAKE(dev_priv));
-
-               ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
-                                   ilk_csc_coeff_identity,
-                                   ilk_csc_off_zero);
-       }
-
-       I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
-}
-
-static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       if (crtc_state->base.ctm) {
-               u16 coeff[9];
-
-               ilk_csc_convert_ctm(crtc_state, coeff);
-               ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
-                                   coeff, ilk_csc_off_zero);
-       }
-
-       if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
-               icl_update_output_csc(crtc, ilk_csc_off_zero,
-                                     ilk_csc_coeff_rgb_to_ycbcr,
-                                     ilk_csc_postoff_rgb_to_ycbcr);
-       } else if (crtc_state->limited_color_range) {
-               icl_update_output_csc(crtc, ilk_csc_off_zero,
-                                     ilk_csc_coeff_limited_range,
-                                     ilk_csc_postoff_limited_range);
-       }
-
-       I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
-}
-
-/*
- * Set up the pipe CSC unit on CherryView.
- */
-static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       if (crtc_state->base.ctm) {
-               const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
-               u16 coeffs[9] = {};
-               int i;
-
-               for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
-                       u64 abs_coeff =
-                               ((1ULL << 63) - 1) & ctm->matrix[i];
-
-                       /* Round coefficient. */
-                       abs_coeff += 1 << (32 - 13);
-                       /* Clamp to hardware limits. */
-                       abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
-
-                       /* Write coefficients in S3.12 format. */
-                       if (ctm->matrix[i] & (1ULL << 63))
-                               coeffs[i] = 1 << 15;
-                       coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
-                       coeffs[i] |= (abs_coeff >> 20) & 0xfff;
-               }
-
-               I915_WRITE(CGM_PIPE_CSC_COEFF01(pipe),
-                          coeffs[1] << 16 | coeffs[0]);
-               I915_WRITE(CGM_PIPE_CSC_COEFF23(pipe),
-                          coeffs[3] << 16 | coeffs[2]);
-               I915_WRITE(CGM_PIPE_CSC_COEFF45(pipe),
-                          coeffs[5] << 16 | coeffs[4]);
-               I915_WRITE(CGM_PIPE_CSC_COEFF67(pipe),
-                          coeffs[7] << 16 | coeffs[6]);
-               I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
-       }
-
-       I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
-}
-
-/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
-static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color)
-{
-       return (color->red & 0xff) << 16 |
-               (color->green & 0xff) << 8 |
-               (color->blue & 0xff);
-}
-
-/* i965+ "10.6" interpolated format "odd DW" (high 8 bits) */
-static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
-{
-       return (color->red >> 8) << 16 |
-               (color->green >> 8) << 8 |
-               (color->blue >> 8);
-}
-
-static u32 ilk_lut_10(const struct drm_color_lut *color)
-{
-       return drm_color_lut_extract(color->red, 10) << 20 |
-               drm_color_lut_extract(color->green, 10) << 10 |
-               drm_color_lut_extract(color->blue, 10);
-}
-
-/* Loads the legacy palette/gamma unit for the CRTC. */
-static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
-                                   const struct drm_property_blob *blob)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       int i;
-
-       if (HAS_GMCH(dev_priv)) {
-               if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
-                       assert_dsi_pll_enabled(dev_priv);
-               else
-                       assert_pll_enabled(dev_priv, pipe);
-       }
-
-       if (blob) {
-               const struct drm_color_lut *lut = blob->data;
-
-               for (i = 0; i < 256; i++) {
-                       u32 word =
-                               (drm_color_lut_extract(lut[i].red, 8) << 16) |
-                               (drm_color_lut_extract(lut[i].green, 8) << 8) |
-                               drm_color_lut_extract(lut[i].blue, 8);
-
-                       if (HAS_GMCH(dev_priv))
-                               I915_WRITE(PALETTE(pipe, i), word);
-                       else
-                               I915_WRITE(LGC_PALETTE(pipe, i), word);
-               }
-       }
-}
-
-static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
-{
-       i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
-}
-
-static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       u32 val;
-
-       val = I915_READ(PIPECONF(pipe));
-       val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX;
-       val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
-       I915_WRITE(PIPECONF(pipe), val);
-}
-
-static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       u32 val;
-
-       val = I915_READ(PIPECONF(pipe));
-       val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
-       val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
-       I915_WRITE(PIPECONF(pipe), val);
-
-       ilk_load_csc_matrix(crtc_state);
-}
-
-static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
-
-       ilk_load_csc_matrix(crtc_state);
-}
-
-static void skl_color_commit(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       u32 val = 0;
-
-       /*
-        * We don't (yet) allow userspace to control the pipe background color,
-        * so force it to black, but apply pipe gamma and CSC appropriately
-        * so that its handling will match how we program our planes.
-        */
-       if (crtc_state->gamma_enable)
-               val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
-       if (crtc_state->csc_enable)
-               val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
-       I915_WRITE(SKL_BOTTOM_COLOR(pipe), val);
-
-       I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               icl_load_csc_matrix(crtc_state);
-       else
-               ilk_load_csc_matrix(crtc_state);
-}
-
-static void i965_load_lut_10p6(struct intel_crtc *crtc,
-                              const struct drm_property_blob *blob)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       const struct drm_color_lut *lut = blob->data;
-       int i, lut_size = drm_color_lut_size(blob);
-       enum pipe pipe = crtc->pipe;
-
-       for (i = 0; i < lut_size - 1; i++) {
-               I915_WRITE(PALETTE(pipe, 2 * i + 0),
-                          i965_lut_10p6_ldw(&lut[i]));
-               I915_WRITE(PALETTE(pipe, 2 * i + 1),
-                          i965_lut_10p6_udw(&lut[i]));
-       }
-
-       I915_WRITE(PIPEGCMAX(pipe, 0), lut[i].red);
-       I915_WRITE(PIPEGCMAX(pipe, 1), lut[i].green);
-       I915_WRITE(PIPEGCMAX(pipe, 2), lut[i].blue);
-}
-
-static void i965_load_luts(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-
-       if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
-               i9xx_load_luts(crtc_state);
-       else
-               i965_load_lut_10p6(crtc, gamma_lut);
-}
-
-static void ilk_load_lut_10(struct intel_crtc *crtc,
-                           const struct drm_property_blob *blob)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       const struct drm_color_lut *lut = blob->data;
-       int i, lut_size = drm_color_lut_size(blob);
-       enum pipe pipe = crtc->pipe;
-
-       for (i = 0; i < lut_size; i++)
-               I915_WRITE(PREC_PALETTE(pipe, i), ilk_lut_10(&lut[i]));
-}
-
-static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-
-       if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
-               i9xx_load_luts(crtc_state);
-       else
-               ilk_load_lut_10(crtc, gamma_lut);
-}
-
-static int ivb_lut_10_size(u32 prec_index)
-{
-       if (prec_index & PAL_PREC_SPLIT_MODE)
-               return 512;
-       else
-               return 1024;
-}
-
-/*
- * IVB/HSW Bspec / PAL_PREC_INDEX:
- * "Restriction : Index auto increment mode is not
- *  supported and must not be enabled."
- */
-static void ivb_load_lut_10(struct intel_crtc *crtc,
-                           const struct drm_property_blob *blob,
-                           u32 prec_index)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       int hw_lut_size = ivb_lut_10_size(prec_index);
-       const struct drm_color_lut *lut = blob->data;
-       int i, lut_size = drm_color_lut_size(blob);
-       enum pipe pipe = crtc->pipe;
-
-       for (i = 0; i < hw_lut_size; i++) {
-               /* We discard half the user entries in split gamma mode */
-               const struct drm_color_lut *entry =
-                       &lut[i * (lut_size - 1) / (hw_lut_size - 1)];
-
-               I915_WRITE(PREC_PAL_INDEX(pipe), prec_index++);
-               I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
-       }
-
-       /*
-        * Reset the index, otherwise it prevents the legacy palette to be
-        * written properly.
-        */
-       I915_WRITE(PREC_PAL_INDEX(pipe), 0);
-}
-
-/* On BDW+ the index auto increment mode actually works */
-static void bdw_load_lut_10(struct intel_crtc *crtc,
-                           const struct drm_property_blob *blob,
-                           u32 prec_index)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       int hw_lut_size = ivb_lut_10_size(prec_index);
-       const struct drm_color_lut *lut = blob->data;
-       int i, lut_size = drm_color_lut_size(blob);
-       enum pipe pipe = crtc->pipe;
-
-       I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
-                  PAL_PREC_AUTO_INCREMENT);
-
-       for (i = 0; i < hw_lut_size; i++) {
-               /* We discard half the user entries in split gamma mode */
-               const struct drm_color_lut *entry =
-                       &lut[i * (lut_size - 1) / (hw_lut_size - 1)];
-
-               I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
-       }
-
-       /*
-        * Reset the index, otherwise it prevents the legacy palette to be
-        * written properly.
-        */
-       I915_WRITE(PREC_PAL_INDEX(pipe), 0);
-}
-
-static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       /* Program the max register to clamp values > 1.0. */
-       I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
-       I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
-       I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
-
-       /*
-        * Program the gc max 2 register to clamp values > 1.0.
-        * ToDo: Extend the ABI to be able to program values
-        * from 3.0 to 7.0
-        */
-       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
-               I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
-               I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
-               I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
-       }
-}
-
-static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-       const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
-
-       if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
-               i9xx_load_luts(crtc_state);
-       } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
-               ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
-                               PAL_PREC_INDEX_VALUE(0));
-               ivb_load_lut_ext_max(crtc);
-               ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
-                               PAL_PREC_INDEX_VALUE(512));
-       } else {
-               const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
-
-               ivb_load_lut_10(crtc, blob,
-                               PAL_PREC_INDEX_VALUE(0));
-               ivb_load_lut_ext_max(crtc);
-       }
-}
-
-static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-       const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
-
-       if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
-               i9xx_load_luts(crtc_state);
-       } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
-               bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
-                               PAL_PREC_INDEX_VALUE(0));
-               ivb_load_lut_ext_max(crtc);
-               bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
-                               PAL_PREC_INDEX_VALUE(512));
-       } else {
-               const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
-
-               bdw_load_lut_10(crtc, blob,
-                               PAL_PREC_INDEX_VALUE(0));
-               ivb_load_lut_ext_max(crtc);
-       }
-}
-
-static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
-       const struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
-       u32 i;
-
-       /*
-        * When setting the auto-increment bit, the hardware seems to
-        * ignore the index bits, so we need to reset it to index 0
-        * separately.
-        */
-       I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
-       I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
-
-       for (i = 0; i < lut_size; i++) {
-               /*
-                * First 33 entries represent range from 0 to 1.0
-                * 34th and 35th entry will represent extended range
-                * inputs 3.0 and 7.0 respectively, currently clamped
-                * at 1.0. Since the precision is 16bit, the user
-                * value can be directly filled to register.
-                * The pipe degamma table in GLK+ onwards doesn't
-                * support different values per channel, so this just
-                * programs green value which will be equal to Red and
-                * Blue into the lut registers.
-                * ToDo: Extend to max 7.0. Enable 32 bit input value
-                * as compared to just 16 to achieve this.
-                */
-               I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green);
-       }
-
-       /* Clamp values > 1.0. */
-       while (i++ < 35)
-               I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
-}
-
-static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
-       u32 i;
-
-       /*
-        * When setting the auto-increment bit, the hardware seems to
-        * ignore the index bits, so we need to reset it to index 0
-        * separately.
-        */
-       I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
-       I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
-
-       for (i = 0; i < lut_size; i++) {
-               u32 v = (i << 16) / (lut_size - 1);
-
-               I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
-       }
-
-       /* Clamp values > 1.0. */
-       while (i++ < 35)
-               I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
-}
-
-static void glk_load_luts(const struct intel_crtc_state *crtc_state)
-{
-       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
-       /*
-        * On GLK+ both pipe CSC and degamma LUT are controlled
-        * by csc_enable. Hence for the cases where the CSC is
-        * needed but degamma LUT is not we need to load a
-        * linear degamma LUT. In fact we'll just always load
-        * the degama LUT so that we don't have to reload
-        * it every time the pipe CSC is being enabled.
-        */
-       if (crtc_state->base.degamma_lut)
-               glk_load_degamma_lut(crtc_state);
-       else
-               glk_load_degamma_lut_linear(crtc_state);
-
-       if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
-               i9xx_load_luts(crtc_state);
-       } else {
-               bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
-               ivb_load_lut_ext_max(crtc);
-       }
-}
-
-/* ilk+ "12.4" interpolated format (high 10 bits) */
-static u32 ilk_lut_12p4_udw(const struct drm_color_lut *color)
-{
-       return (color->red >> 6) << 20 | (color->green >> 6) << 10 |
-               (color->blue >> 6);
-}
-
-/* ilk+ "12.4" interpolated format (low 6 bits) */
-static u32 ilk_lut_12p4_ldw(const struct drm_color_lut *color)
-{
-       return (color->red & 0x3f) << 24 | (color->green & 0x3f) << 14 |
-               (color->blue & 0x3f) << 4;
-}
-
-static void
-icl_load_gcmax(const struct intel_crtc_state *crtc_state,
-              const struct drm_color_lut *color)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       /* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
-       I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), color->red);
-       I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), color->green);
-       I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), color->blue);
-}
-
-static void
-icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
-       const struct drm_color_lut *lut = blob->data;
-       enum pipe pipe = crtc->pipe;
-       u32 i;
-
-       /*
-        * Every entry in the multi-segment LUT is corresponding to a superfine
-        * segment step which is 1/(8 * 128 * 256).
-        *
-        * Superfine segment has 9 entries, corresponding to values
-        * 0, 1/(8 * 128 * 256), 2/(8 * 128 * 256) .... 8/(8 * 128 * 256).
-        */
-       I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
-
-       for (i = 0; i < 9; i++) {
-               const struct drm_color_lut *entry = &lut[i];
-
-               I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
-                          ilk_lut_12p4_ldw(entry));
-               I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
-                          ilk_lut_12p4_udw(entry));
-       }
-}
-
-static void
-icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
-       const struct drm_color_lut *lut = blob->data;
-       const struct drm_color_lut *entry;
-       enum pipe pipe = crtc->pipe;
-       u32 i;
-
-       /*
-        *
-        * Program Fine segment (let's call it seg2)...
-        *
-        * Fine segment's step is 1/(128 * 256) ie 1/(128 * 256),  2/(128*256)
-        * ... 256/(128*256). So in order to program fine segment of LUT we
-        * need to pick every 8'th entry in LUT, and program 256 indexes.
-        *
-        * PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
-        * with seg2[0] being unused by the hardware.
-        */
-       I915_WRITE(PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
-       for (i = 1; i < 257; i++) {
-               entry = &lut[i * 8];
-               I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
-               I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
-       }
-
-       /*
-        * Program Coarse segment (let's call it seg3)...
-        *
-        * Coarse segment's starts from index 0 and it's step is 1/256 ie 0,
-        * 1/256, 2/256 ...256/256. As per the description of each entry in LUT
-        * above, we need to pick every (8 * 128)th entry in LUT, and
-        * program 256 of those.
-        *
-        * Spec is not very clear about if entries seg3[0] and seg3[1] are
-        * being used or not, but we still need to program these to advance
-        * the index.
-        */
-       for (i = 0; i < 256; i++) {
-               entry = &lut[i * 8 * 128];
-               I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
-               I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
-       }
-
-       /* The last entry in the LUT is to be programmed in GCMAX */
-       entry = &lut[256 * 8 * 128];
-       icl_load_gcmax(crtc_state, entry);
-       ivb_load_lut_ext_max(crtc);
-}
-
-static void icl_load_luts(const struct intel_crtc_state *crtc_state)
-{
-       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
-       if (crtc_state->base.degamma_lut)
-               glk_load_degamma_lut(crtc_state);
-
-       switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
-       case GAMMA_MODE_MODE_8BIT:
-               i9xx_load_luts(crtc_state);
-               break;
-
-       case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
-               icl_program_gamma_superfine_segment(crtc_state);
-               icl_program_gamma_multi_segment(crtc_state);
-               break;
-
-       default:
-               bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
-               ivb_load_lut_ext_max(crtc);
-       }
-}
-
-static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
-{
-       return drm_color_lut_extract(color->green, 14) << 16 |
-               drm_color_lut_extract(color->blue, 14);
-}
-
-static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
-{
-       return drm_color_lut_extract(color->red, 14);
-}
-
-static void chv_load_cgm_degamma(struct intel_crtc *crtc,
-                                const struct drm_property_blob *blob)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       const struct drm_color_lut *lut = blob->data;
-       int i, lut_size = drm_color_lut_size(blob);
-       enum pipe pipe = crtc->pipe;
-
-       for (i = 0; i < lut_size; i++) {
-               I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0),
-                          chv_cgm_degamma_ldw(&lut[i]));
-               I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1),
-                          chv_cgm_degamma_udw(&lut[i]));
-       }
-}
-
-static u32 chv_cgm_gamma_ldw(const struct drm_color_lut *color)
-{
-       return drm_color_lut_extract(color->green, 10) << 16 |
-               drm_color_lut_extract(color->blue, 10);
-}
-
-static u32 chv_cgm_gamma_udw(const struct drm_color_lut *color)
-{
-       return drm_color_lut_extract(color->red, 10);
-}
-
-static void chv_load_cgm_gamma(struct intel_crtc *crtc,
-                              const struct drm_property_blob *blob)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       const struct drm_color_lut *lut = blob->data;
-       int i, lut_size = drm_color_lut_size(blob);
-       enum pipe pipe = crtc->pipe;
-
-       for (i = 0; i < lut_size; i++) {
-               I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0),
-                          chv_cgm_gamma_ldw(&lut[i]));
-               I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1),
-                          chv_cgm_gamma_udw(&lut[i]));
-       }
-}
-
-static void chv_load_luts(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-       const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
-
-       cherryview_load_csc_matrix(crtc_state);
-
-       if (crtc_state_is_legacy_gamma(crtc_state)) {
-               i9xx_load_luts(crtc_state);
-               return;
-       }
-
-       if (degamma_lut)
-               chv_load_cgm_degamma(crtc, degamma_lut);
-
-       if (gamma_lut)
-               chv_load_cgm_gamma(crtc, gamma_lut);
-}
-
-void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-
-       dev_priv->display.load_luts(crtc_state);
-}
-
-void intel_color_commit(const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-
-       dev_priv->display.color_commit(crtc_state);
-}
-
-int intel_color_check(struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-
-       return dev_priv->display.color_check(crtc_state);
-}
-
-void intel_color_get_config(struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-
-       if (dev_priv->display.read_luts)
-               dev_priv->display.read_luts(crtc_state);
-}
-
-static bool need_plane_update(struct intel_plane *plane,
-                             const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
-       /*
-        * On pre-SKL the pipe gamma enable and pipe csc enable for
-        * the pipe bottom color are configured via the primary plane.
-        * We have to reconfigure that even if the plane is inactive.
-        */
-       return crtc_state->active_planes & BIT(plane->id) ||
-               (INTEL_GEN(dev_priv) < 9 &&
-                plane->id == PLANE_PRIMARY);
-}
-
-static int
-intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_atomic_state *state =
-               to_intel_atomic_state(new_crtc_state->base.state);
-       const struct intel_crtc_state *old_crtc_state =
-               intel_atomic_get_old_crtc_state(state, crtc);
-       struct intel_plane *plane;
-
-       if (!new_crtc_state->base.active ||
-           drm_atomic_crtc_needs_modeset(&new_crtc_state->base))
-               return 0;
-
-       if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable &&
-           new_crtc_state->csc_enable == old_crtc_state->csc_enable)
-               return 0;
-
-       for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
-               struct intel_plane_state *plane_state;
-
-               if (!need_plane_update(plane, new_crtc_state))
-                       continue;
-
-               plane_state = intel_atomic_get_plane_state(state, plane);
-               if (IS_ERR(plane_state))
-                       return PTR_ERR(plane_state);
-
-               new_crtc_state->update_planes |= BIT(plane->id);
-       }
-
-       return 0;
-}
-
-static int check_lut_size(const struct drm_property_blob *lut, int expected)
-{
-       int len;
-
-       if (!lut)
-               return 0;
-
-       len = drm_color_lut_size(lut);
-       if (len != expected) {
-               DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
-                             len, expected);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int check_luts(const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-       const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
-       const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
-       int gamma_length, degamma_length;
-       u32 gamma_tests, degamma_tests;
-
-       /* Always allow legacy gamma LUT with no further checking. */
-       if (crtc_state_is_legacy_gamma(crtc_state))
-               return 0;
-
-       /* C8 relies on its palette being stored in the legacy LUT */
-       if (crtc_state->c8_planes) {
-               DRM_DEBUG_KMS("C8 pixelformat requires the legacy LUT\n");
-               return -EINVAL;
-       }
-
-       degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
-       gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
-       degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
-       gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
-
-       if (check_lut_size(degamma_lut, degamma_length) ||
-           check_lut_size(gamma_lut, gamma_length))
-               return -EINVAL;
-
-       if (drm_color_lut_check(degamma_lut, degamma_tests) ||
-           drm_color_lut_check(gamma_lut, gamma_tests))
-               return -EINVAL;
-
-       return 0;
-}
-
-static u32 i9xx_gamma_mode(struct intel_crtc_state *crtc_state)
-{
-       if (!crtc_state->gamma_enable ||
-           crtc_state_is_legacy_gamma(crtc_state))
-               return GAMMA_MODE_MODE_8BIT;
-       else
-               return GAMMA_MODE_MODE_10BIT; /* i965+ only */
-}
-
-static int i9xx_color_check(struct intel_crtc_state *crtc_state)
-{
-       int ret;
-
-       ret = check_luts(crtc_state);
-       if (ret)
-               return ret;
-
-       crtc_state->gamma_enable =
-               crtc_state->base.gamma_lut &&
-               !crtc_state->c8_planes;
-
-       crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
-
-       ret = intel_color_add_affected_planes(crtc_state);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
-{
-       u32 cgm_mode = 0;
-
-       if (crtc_state_is_legacy_gamma(crtc_state))
-               return 0;
-
-       if (crtc_state->base.degamma_lut)
-               cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
-       if (crtc_state->base.ctm)
-               cgm_mode |= CGM_PIPE_MODE_CSC;
-       if (crtc_state->base.gamma_lut)
-               cgm_mode |= CGM_PIPE_MODE_GAMMA;
-
-       return cgm_mode;
-}
-
-/*
- * CHV color pipeline:
- * u0.10 -> CGM degamma -> u0.14 -> CGM csc -> u0.14 -> CGM gamma ->
- * u0.10 -> WGC csc -> u0.10 -> pipe gamma -> u0.10
- *
- * We always bypass the WGC csc and use the CGM csc
- * instead since it has degamma and better precision.
- */
-static int chv_color_check(struct intel_crtc_state *crtc_state)
-{
-       int ret;
-
-       ret = check_luts(crtc_state);
-       if (ret)
-               return ret;
-
-       /*
-        * Pipe gamma will be used only for the legacy LUT.
-        * Otherwise we bypass it and use the CGM gamma instead.
-        */
-       crtc_state->gamma_enable =
-               crtc_state_is_legacy_gamma(crtc_state) &&
-               !crtc_state->c8_planes;
-
-       crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
-
-       crtc_state->cgm_mode = chv_cgm_mode(crtc_state);
-
-       ret = intel_color_add_affected_planes(crtc_state);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
-{
-       if (!crtc_state->gamma_enable ||
-           crtc_state_is_legacy_gamma(crtc_state))
-               return GAMMA_MODE_MODE_8BIT;
-       else
-               return GAMMA_MODE_MODE_10BIT;
-}
-
-static int ilk_color_check(struct intel_crtc_state *crtc_state)
-{
-       int ret;
-
-       ret = check_luts(crtc_state);
-       if (ret)
-               return ret;
-
-       crtc_state->gamma_enable =
-               crtc_state->base.gamma_lut &&
-               !crtc_state->c8_planes;
-
-       /*
-        * We don't expose the ctm on ilk/snb currently,
-        * nor do we enable YCbCr output. Also RGB limited
-        * range output is handled by the hw automagically.
-        */
-       crtc_state->csc_enable = false;
-
-       crtc_state->gamma_mode = ilk_gamma_mode(crtc_state);
-
-       crtc_state->csc_mode = 0;
-
-       ret = intel_color_add_affected_planes(crtc_state);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state)
-{
-       if (!crtc_state->gamma_enable ||
-           crtc_state_is_legacy_gamma(crtc_state))
-               return GAMMA_MODE_MODE_8BIT;
-       else if (crtc_state->base.gamma_lut &&
-                crtc_state->base.degamma_lut)
-               return GAMMA_MODE_MODE_SPLIT;
-       else
-               return GAMMA_MODE_MODE_10BIT;
-}
-
-static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
-{
-       bool limited_color_range = ilk_csc_limited_range(crtc_state);
-
-       /*
-        * CSC comes after the LUT in degamma, RGB->YCbCr,
-        * and RGB full->limited range mode.
-        */
-       if (crtc_state->base.degamma_lut ||
-           crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
-           limited_color_range)
-               return 0;
-
-       return CSC_POSITION_BEFORE_GAMMA;
-}
-
-static int ivb_color_check(struct intel_crtc_state *crtc_state)
-{
-       bool limited_color_range = ilk_csc_limited_range(crtc_state);
-       int ret;
-
-       ret = check_luts(crtc_state);
-       if (ret)
-               return ret;
-
-       crtc_state->gamma_enable =
-               (crtc_state->base.gamma_lut ||
-                crtc_state->base.degamma_lut) &&
-               !crtc_state->c8_planes;
-
-       crtc_state->csc_enable =
-               crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
-               crtc_state->base.ctm || limited_color_range;
-
-       crtc_state->gamma_mode = ivb_gamma_mode(crtc_state);
-
-       crtc_state->csc_mode = ivb_csc_mode(crtc_state);
-
-       ret = intel_color_add_affected_planes(crtc_state);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
-{
-       if (!crtc_state->gamma_enable ||
-           crtc_state_is_legacy_gamma(crtc_state))
-               return GAMMA_MODE_MODE_8BIT;
-       else
-               return GAMMA_MODE_MODE_10BIT;
-}
-
-static int glk_color_check(struct intel_crtc_state *crtc_state)
-{
-       int ret;
-
-       ret = check_luts(crtc_state);
-       if (ret)
-               return ret;
-
-       crtc_state->gamma_enable =
-               crtc_state->base.gamma_lut &&
-               !crtc_state->c8_planes;
-
-       /* On GLK+ degamma LUT is controlled by csc_enable */
-       crtc_state->csc_enable =
-               crtc_state->base.degamma_lut ||
-               crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
-               crtc_state->base.ctm || crtc_state->limited_color_range;
-
-       crtc_state->gamma_mode = glk_gamma_mode(crtc_state);
-
-       crtc_state->csc_mode = 0;
-
-       ret = intel_color_add_affected_planes(crtc_state);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
-{
-       u32 gamma_mode = 0;
-
-       if (crtc_state->base.degamma_lut)
-               gamma_mode |= PRE_CSC_GAMMA_ENABLE;
-
-       if (crtc_state->base.gamma_lut &&
-           !crtc_state->c8_planes)
-               gamma_mode |= POST_CSC_GAMMA_ENABLE;
-
-       if (!crtc_state->base.gamma_lut ||
-           crtc_state_is_legacy_gamma(crtc_state))
-               gamma_mode |= GAMMA_MODE_MODE_8BIT;
-       else
-               gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED;
-
-       return gamma_mode;
-}
-
-static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state)
-{
-       u32 csc_mode = 0;
-
-       if (crtc_state->base.ctm)
-               csc_mode |= ICL_CSC_ENABLE;
-
-       if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
-           crtc_state->limited_color_range)
-               csc_mode |= ICL_OUTPUT_CSC_ENABLE;
-
-       return csc_mode;
-}
-
-static int icl_color_check(struct intel_crtc_state *crtc_state)
-{
-       int ret;
-
-       ret = check_luts(crtc_state);
-       if (ret)
-               return ret;
-
-       crtc_state->gamma_mode = icl_gamma_mode(crtc_state);
-
-       crtc_state->csc_mode = icl_csc_mode(crtc_state);
-
-       return 0;
-}
-
-void intel_color_init(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       bool has_ctm = INTEL_INFO(dev_priv)->color.degamma_lut_size != 0;
-
-       drm_mode_crtc_set_gamma_size(&crtc->base, 256);
-
-       if (HAS_GMCH(dev_priv)) {
-               if (IS_CHERRYVIEW(dev_priv)) {
-                       dev_priv->display.color_check = chv_color_check;
-                       dev_priv->display.color_commit = i9xx_color_commit;
-                       dev_priv->display.load_luts = chv_load_luts;
-               } else if (INTEL_GEN(dev_priv) >= 4) {
-                       dev_priv->display.color_check = i9xx_color_check;
-                       dev_priv->display.color_commit = i9xx_color_commit;
-                       dev_priv->display.load_luts = i965_load_luts;
-               } else {
-                       dev_priv->display.color_check = i9xx_color_check;
-                       dev_priv->display.color_commit = i9xx_color_commit;
-                       dev_priv->display.load_luts = i9xx_load_luts;
-               }
-       } else {
-               if (INTEL_GEN(dev_priv) >= 11)
-                       dev_priv->display.color_check = icl_color_check;
-               else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-                       dev_priv->display.color_check = glk_color_check;
-               else if (INTEL_GEN(dev_priv) >= 7)
-                       dev_priv->display.color_check = ivb_color_check;
-               else
-                       dev_priv->display.color_check = ilk_color_check;
-
-               if (INTEL_GEN(dev_priv) >= 9)
-                       dev_priv->display.color_commit = skl_color_commit;
-               else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
-                       dev_priv->display.color_commit = hsw_color_commit;
-               else
-                       dev_priv->display.color_commit = ilk_color_commit;
-
-               if (INTEL_GEN(dev_priv) >= 11)
-                       dev_priv->display.load_luts = icl_load_luts;
-               else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
-                       dev_priv->display.load_luts = glk_load_luts;
-               else if (INTEL_GEN(dev_priv) >= 8)
-                       dev_priv->display.load_luts = bdw_load_luts;
-               else if (INTEL_GEN(dev_priv) >= 7)
-                       dev_priv->display.load_luts = ivb_load_luts;
-               else
-                       dev_priv->display.load_luts = ilk_load_luts;
-       }
-
-       drm_crtc_enable_color_mgmt(&crtc->base,
-                                  INTEL_INFO(dev_priv)->color.degamma_lut_size,
-                                  has_ctm,
-                                  INTEL_INFO(dev_priv)->color.gamma_lut_size);
-}
diff --git a/drivers/gpu/drm/i915/intel_color.h b/drivers/gpu/drm/i915/intel_color.h
deleted file mode 100644 (file)
index 057e8ac..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_COLOR_H__
-#define __INTEL_COLOR_H__
-
-struct intel_crtc_state;
-struct intel_crtc;
-
-void intel_color_init(struct intel_crtc *crtc);
-int intel_color_check(struct intel_crtc_state *crtc_state);
-void intel_color_commit(const struct intel_crtc_state *crtc_state);
-void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
-void intel_color_get_config(struct intel_crtc_state *crtc_state);
-
-#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c
deleted file mode 100644 (file)
index 841708d..0000000
+++ /dev/null
@@ -1,334 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018 Intel Corporation
- */
-
-#include "intel_combo_phy.h"
-#include "intel_drv.h"
-
-#define for_each_combo_port(__dev_priv, __port) \
-       for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)  \
-               for_each_if(intel_port_is_combophy(__dev_priv, __port))
-
-#define for_each_combo_port_reverse(__dev_priv, __port) \
-       for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
-               for_each_if(intel_port_is_combophy(__dev_priv, __port))
-
-enum {
-       PROCMON_0_85V_DOT_0,
-       PROCMON_0_95V_DOT_0,
-       PROCMON_0_95V_DOT_1,
-       PROCMON_1_05V_DOT_0,
-       PROCMON_1_05V_DOT_1,
-};
-
-static const struct cnl_procmon {
-       u32 dw1, dw9, dw10;
-} cnl_procmon_values[] = {
-       [PROCMON_0_85V_DOT_0] =
-               { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
-       [PROCMON_0_95V_DOT_0] =
-               { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
-       [PROCMON_0_95V_DOT_1] =
-               { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
-       [PROCMON_1_05V_DOT_0] =
-               { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
-       [PROCMON_1_05V_DOT_1] =
-               { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
-};
-
-/*
- * CNL has just one set of registers, while ICL has two sets: one for port A and
- * the other for port B. The CNL registers are equivalent to the ICL port A
- * registers, that's why we call the ICL macros even though the function has CNL
- * on its name.
- */
-static const struct cnl_procmon *
-cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
-{
-       const struct cnl_procmon *procmon;
-       u32 val;
-
-       val = I915_READ(ICL_PORT_COMP_DW3(port));
-       switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
-       default:
-               MISSING_CASE(val);
-               /* fall through */
-       case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
-               procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
-               break;
-       case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
-               procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
-               break;
-       case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
-               procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
-               break;
-       case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
-               procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
-               break;
-       case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
-               procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
-               break;
-       }
-
-       return procmon;
-}
-
-static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
-                                      enum port port)
-{
-       const struct cnl_procmon *procmon;
-       u32 val;
-
-       procmon = cnl_get_procmon_ref_values(dev_priv, port);
-
-       val = I915_READ(ICL_PORT_COMP_DW1(port));
-       val &= ~((0xff << 16) | 0xff);
-       val |= procmon->dw1;
-       I915_WRITE(ICL_PORT_COMP_DW1(port), val);
-
-       I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
-       I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
-}
-
-static bool check_phy_reg(struct drm_i915_private *dev_priv,
-                         enum port port, i915_reg_t reg, u32 mask,
-                         u32 expected_val)
-{
-       u32 val = I915_READ(reg);
-
-       if ((val & mask) != expected_val) {
-               DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
-                                "current %08x mask %08x expected %08x\n",
-                                port_name(port),
-                                reg.reg, val, mask, expected_val);
-               return false;
-       }
-
-       return true;
-}
-
-static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
-                                         enum port port)
-{
-       const struct cnl_procmon *procmon;
-       bool ret;
-
-       procmon = cnl_get_procmon_ref_values(dev_priv, port);
-
-       ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
-                           (0xff << 16) | 0xff, procmon->dw1);
-       ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
-                            -1U, procmon->dw9);
-       ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
-                            -1U, procmon->dw10);
-
-       return ret;
-}
-
-static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
-{
-       return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
-               (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
-}
-
-static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
-{
-       enum port port = PORT_A;
-       bool ret;
-
-       if (!cnl_combo_phy_enabled(dev_priv))
-               return false;
-
-       ret = cnl_verify_procmon_ref_values(dev_priv, port);
-
-       ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
-                            CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
-
-       return ret;
-}
-
-static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       val = I915_READ(CHICKEN_MISC_2);
-       val &= ~CNL_COMP_PWR_DOWN;
-       I915_WRITE(CHICKEN_MISC_2, val);
-
-       /* Dummy PORT_A to get the correct CNL register from the ICL macro */
-       cnl_set_procmon_ref_values(dev_priv, PORT_A);
-
-       val = I915_READ(CNL_PORT_COMP_DW0);
-       val |= COMP_INIT;
-       I915_WRITE(CNL_PORT_COMP_DW0, val);
-
-       val = I915_READ(CNL_PORT_CL1CM_DW5);
-       val |= CL_POWER_DOWN_ENABLE;
-       I915_WRITE(CNL_PORT_CL1CM_DW5, val);
-}
-
-static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       if (!cnl_combo_phy_verify_state(dev_priv))
-               DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
-
-       val = I915_READ(CHICKEN_MISC_2);
-       val |= CNL_COMP_PWR_DOWN;
-       I915_WRITE(CHICKEN_MISC_2, val);
-}
-
-static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
-                                 enum port port)
-{
-       return !(I915_READ(ICL_PHY_MISC(port)) &
-                ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
-               (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
-}
-
-static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
-                                      enum port port)
-{
-       bool ret;
-
-       if (!icl_combo_phy_enabled(dev_priv, port))
-               return false;
-
-       ret = cnl_verify_procmon_ref_values(dev_priv, port);
-
-       if (port == PORT_A)
-               ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW8(port),
-                                    IREFGEN, IREFGEN);
-
-       ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
-                            CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
-
-       return ret;
-}
-
-void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
-                                   enum port port, bool is_dsi,
-                                   int lane_count, bool lane_reversal)
-{
-       u8 lane_mask;
-       u32 val;
-
-       if (is_dsi) {
-               WARN_ON(lane_reversal);
-
-               switch (lane_count) {
-               case 1:
-                       lane_mask = PWR_DOWN_LN_3_1_0;
-                       break;
-               case 2:
-                       lane_mask = PWR_DOWN_LN_3_1;
-                       break;
-               case 3:
-                       lane_mask = PWR_DOWN_LN_3;
-                       break;
-               default:
-                       MISSING_CASE(lane_count);
-                       /* fall-through */
-               case 4:
-                       lane_mask = PWR_UP_ALL_LANES;
-                       break;
-               }
-       } else {
-               switch (lane_count) {
-               case 1:
-                       lane_mask = lane_reversal ? PWR_DOWN_LN_2_1_0 :
-                                                   PWR_DOWN_LN_3_2_1;
-                       break;
-               case 2:
-                       lane_mask = lane_reversal ? PWR_DOWN_LN_1_0 :
-                                                   PWR_DOWN_LN_3_2;
-                       break;
-               default:
-                       MISSING_CASE(lane_count);
-                       /* fall-through */
-               case 4:
-                       lane_mask = PWR_UP_ALL_LANES;
-                       break;
-               }
-       }
-
-       val = I915_READ(ICL_PORT_CL_DW10(port));
-       val &= ~PWR_DOWN_LN_MASK;
-       val |= lane_mask << PWR_DOWN_LN_SHIFT;
-       I915_WRITE(ICL_PORT_CL_DW10(port), val);
-}
-
-static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
-{
-       enum port port;
-
-       for_each_combo_port(dev_priv, port) {
-               u32 val;
-
-               if (icl_combo_phy_verify_state(dev_priv, port)) {
-                       DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
-                                        port_name(port));
-                       continue;
-               }
-
-               val = I915_READ(ICL_PHY_MISC(port));
-               val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
-               I915_WRITE(ICL_PHY_MISC(port), val);
-
-               cnl_set_procmon_ref_values(dev_priv, port);
-
-               if (port == PORT_A) {
-                       val = I915_READ(ICL_PORT_COMP_DW8(port));
-                       val |= IREFGEN;
-                       I915_WRITE(ICL_PORT_COMP_DW8(port), val);
-               }
-
-               val = I915_READ(ICL_PORT_COMP_DW0(port));
-               val |= COMP_INIT;
-               I915_WRITE(ICL_PORT_COMP_DW0(port), val);
-
-               val = I915_READ(ICL_PORT_CL_DW5(port));
-               val |= CL_POWER_DOWN_ENABLE;
-               I915_WRITE(ICL_PORT_CL_DW5(port), val);
-       }
-}
-
-static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
-{
-       enum port port;
-
-       for_each_combo_port_reverse(dev_priv, port) {
-               u32 val;
-
-               if (port == PORT_A &&
-                   !icl_combo_phy_verify_state(dev_priv, port))
-                       DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
-                                port_name(port));
-
-               val = I915_READ(ICL_PHY_MISC(port));
-               val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
-               I915_WRITE(ICL_PHY_MISC(port), val);
-
-               val = I915_READ(ICL_PORT_COMP_DW0(port));
-               val &= ~COMP_INIT;
-               I915_WRITE(ICL_PORT_COMP_DW0(port), val);
-       }
-}
-
-void intel_combo_phy_init(struct drm_i915_private *i915)
-{
-       if (INTEL_GEN(i915) >= 11)
-               icl_combo_phys_init(i915);
-       else if (IS_CANNONLAKE(i915))
-               cnl_combo_phys_init(i915);
-}
-
-void intel_combo_phy_uninit(struct drm_i915_private *i915)
-{
-       if (INTEL_GEN(i915) >= 11)
-               icl_combo_phys_uninit(i915);
-       else if (IS_CANNONLAKE(i915))
-               cnl_combo_phys_uninit(i915);
-}
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.h b/drivers/gpu/drm/i915/intel_combo_phy.h
deleted file mode 100644 (file)
index e6e195a..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_COMBO_PHY_H__
-#define __INTEL_COMBO_PHY_H__
-
-#include <linux/types.h>
-#include <drm/i915_drm.h>
-
-struct drm_i915_private;
-
-void intel_combo_phy_init(struct drm_i915_private *dev_priv);
-void intel_combo_phy_uninit(struct drm_i915_private *dev_priv);
-void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
-                                   enum port port, bool is_dsi,
-                                   int lane_count, bool lane_reversal);
-
-#endif /* __INTEL_COMBO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c
deleted file mode 100644 (file)
index 41310f8..0000000
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- * Copyright (c) 2007, 2010 Intel Corporation
- *   Jesse Barnes <jesse.barnes@intel.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/i2c.h>
-#include <linux/slab.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_edid.h>
-
-#include "display/intel_panel.h"
-
-#include "i915_drv.h"
-#include "intel_connector.h"
-#include "intel_drv.h"
-#include "intel_hdcp.h"
-
-int intel_connector_init(struct intel_connector *connector)
-{
-       struct intel_digital_connector_state *conn_state;
-
-       /*
-        * Allocate enough memory to hold intel_digital_connector_state,
-        * This might be a few bytes too many, but for connectors that don't
-        * need it we'll free the state and allocate a smaller one on the first
-        * successful commit anyway.
-        */
-       conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
-       if (!conn_state)
-               return -ENOMEM;
-
-       __drm_atomic_helper_connector_reset(&connector->base,
-                                           &conn_state->base);
-
-       return 0;
-}
-
-struct intel_connector *intel_connector_alloc(void)
-{
-       struct intel_connector *connector;
-
-       connector = kzalloc(sizeof(*connector), GFP_KERNEL);
-       if (!connector)
-               return NULL;
-
-       if (intel_connector_init(connector) < 0) {
-               kfree(connector);
-               return NULL;
-       }
-
-       return connector;
-}
-
-/*
- * Free the bits allocated by intel_connector_alloc.
- * This should only be used after intel_connector_alloc has returned
- * successfully, and before drm_connector_init returns successfully.
- * Otherwise the destroy callbacks for the connector and the state should
- * take care of proper cleanup/free (see intel_connector_destroy).
- */
-void intel_connector_free(struct intel_connector *connector)
-{
-       kfree(to_intel_digital_connector_state(connector->base.state));
-       kfree(connector);
-}
-
-/*
- * Connector type independent destroy hook for drm_connector_funcs.
- */
-void intel_connector_destroy(struct drm_connector *connector)
-{
-       struct intel_connector *intel_connector = to_intel_connector(connector);
-
-       kfree(intel_connector->detect_edid);
-
-       intel_hdcp_cleanup(intel_connector);
-
-       if (!IS_ERR_OR_NULL(intel_connector->edid))
-               kfree(intel_connector->edid);
-
-       intel_panel_fini(&intel_connector->panel);
-
-       drm_connector_cleanup(connector);
-
-       if (intel_connector->port)
-               drm_dp_mst_put_port_malloc(intel_connector->port);
-
-       kfree(connector);
-}
-
-int intel_connector_register(struct drm_connector *connector)
-{
-       struct intel_connector *intel_connector = to_intel_connector(connector);
-       int ret;
-
-       ret = intel_backlight_device_register(intel_connector);
-       if (ret)
-               goto err;
-
-       if (i915_inject_load_failure()) {
-               ret = -EFAULT;
-               goto err_backlight;
-       }
-
-       return 0;
-
-err_backlight:
-       intel_backlight_device_unregister(intel_connector);
-err:
-       return ret;
-}
-
-void intel_connector_unregister(struct drm_connector *connector)
-{
-       struct intel_connector *intel_connector = to_intel_connector(connector);
-
-       intel_backlight_device_unregister(intel_connector);
-}
-
-void intel_connector_attach_encoder(struct intel_connector *connector,
-                                   struct intel_encoder *encoder)
-{
-       connector->encoder = encoder;
-       drm_connector_attach_encoder(&connector->base, &encoder->base);
-}
-
-/*
- * Simple connector->get_hw_state implementation for encoders that support only
- * one connector and no cloning and hence the encoder state determines the state
- * of the connector.
- */
-bool intel_connector_get_hw_state(struct intel_connector *connector)
-{
-       enum pipe pipe = 0;
-       struct intel_encoder *encoder = connector->encoder;
-
-       return encoder->get_hw_state(encoder, &pipe);
-}
-
-enum pipe intel_connector_get_pipe(struct intel_connector *connector)
-{
-       struct drm_device *dev = connector->base.dev;
-
-       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-
-       if (!connector->base.state->crtc)
-               return INVALID_PIPE;
-
-       return to_intel_crtc(connector->base.state->crtc)->pipe;
-}
-
-/**
- * intel_connector_update_modes - update connector from edid
- * @connector: DRM connector device to use
- * @edid: previously read EDID information
- */
-int intel_connector_update_modes(struct drm_connector *connector,
-                               struct edid *edid)
-{
-       int ret;
-
-       drm_connector_update_edid_property(connector, edid);
-       ret = drm_add_edid_modes(connector, edid);
-
-       return ret;
-}
-
-/**
- * intel_ddc_get_modes - get modelist from monitor
- * @connector: DRM connector device to use
- * @adapter: i2c adapter
- *
- * Fetch the EDID information from @connector using the DDC bus.
- */
-int intel_ddc_get_modes(struct drm_connector *connector,
-                       struct i2c_adapter *adapter)
-{
-       struct edid *edid;
-       int ret;
-
-       edid = drm_get_edid(connector, adapter);
-       if (!edid)
-               return 0;
-
-       ret = intel_connector_update_modes(connector, edid);
-       kfree(edid);
-
-       return ret;
-}
-
-static const struct drm_prop_enum_list force_audio_names[] = {
-       { HDMI_AUDIO_OFF_DVI, "force-dvi" },
-       { HDMI_AUDIO_OFF, "off" },
-       { HDMI_AUDIO_AUTO, "auto" },
-       { HDMI_AUDIO_ON, "on" },
-};
-
-void
-intel_attach_force_audio_property(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_property *prop;
-
-       prop = dev_priv->force_audio_property;
-       if (prop == NULL) {
-               prop = drm_property_create_enum(dev, 0,
-                                          "audio",
-                                          force_audio_names,
-                                          ARRAY_SIZE(force_audio_names));
-               if (prop == NULL)
-                       return;
-
-               dev_priv->force_audio_property = prop;
-       }
-       drm_object_attach_property(&connector->base, prop, 0);
-}
-
-static const struct drm_prop_enum_list broadcast_rgb_names[] = {
-       { INTEL_BROADCAST_RGB_AUTO, "Automatic" },
-       { INTEL_BROADCAST_RGB_FULL, "Full" },
-       { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
-};
-
-void
-intel_attach_broadcast_rgb_property(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_property *prop;
-
-       prop = dev_priv->broadcast_rgb_property;
-       if (prop == NULL) {
-               prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
-                                          "Broadcast RGB",
-                                          broadcast_rgb_names,
-                                          ARRAY_SIZE(broadcast_rgb_names));
-               if (prop == NULL)
-                       return;
-
-               dev_priv->broadcast_rgb_property = prop;
-       }
-
-       drm_object_attach_property(&connector->base, prop, 0);
-}
-
-void
-intel_attach_aspect_ratio_property(struct drm_connector *connector)
-{
-       if (!drm_mode_create_aspect_ratio_property(connector->dev))
-               drm_object_attach_property(&connector->base,
-                       connector->dev->mode_config.aspect_ratio_property,
-                       DRM_MODE_PICTURE_ASPECT_NONE);
-}
-
-void
-intel_attach_colorspace_property(struct drm_connector *connector)
-{
-       if (!drm_mode_create_colorspace_property(connector))
-               drm_object_attach_property(&connector->base,
-                                          connector->colorspace_property, 0);
-}
diff --git a/drivers/gpu/drm/i915/intel_connector.h b/drivers/gpu/drm/i915/intel_connector.h
deleted file mode 100644 (file)
index 93a7375..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_CONNECTOR_H__
-#define __INTEL_CONNECTOR_H__
-
-#include "intel_display.h"
-
-struct drm_connector;
-struct edid;
-struct i2c_adapter;
-struct intel_connector;
-struct intel_encoder;
-
-int intel_connector_init(struct intel_connector *connector);
-struct intel_connector *intel_connector_alloc(void);
-void intel_connector_free(struct intel_connector *connector);
-void intel_connector_destroy(struct drm_connector *connector);
-int intel_connector_register(struct drm_connector *connector);
-void intel_connector_unregister(struct drm_connector *connector);
-void intel_connector_attach_encoder(struct intel_connector *connector,
-                                   struct intel_encoder *encoder);
-bool intel_connector_get_hw_state(struct intel_connector *connector);
-enum pipe intel_connector_get_pipe(struct intel_connector *connector);
-int intel_connector_update_modes(struct drm_connector *connector,
-                                struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-void intel_attach_force_audio_property(struct drm_connector *connector);
-void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-void intel_attach_colorspace_property(struct drm_connector *connector);
-
-#endif /* __INTEL_CONNECTOR_H__ */
index 4dfc2f83f5d0b5a810a08f878d4aa1399ec46659..ddafc819bf30a517d177a9a4da640fc32bbc63f5 100644 (file)
 
 #include <uapi/drm/i915_drm.h>
 
+#include "display/intel_display.h"
+
 #include "gt/intel_engine_types.h"
 #include "gt/intel_context_types.h"
 #include "gt/intel_sseu.h"
 
-#include "intel_display.h"
-
 struct drm_printer;
 struct drm_i915_private;
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
deleted file mode 100644 (file)
index 8d7e4c8..0000000
+++ /dev/null
@@ -1,17119 +0,0 @@
-/*
- * Copyright © 2006-2007 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *     Eric Anholt <eric@anholt.net>
- */
-
-#include <linux/i2c.h>
-#include <linux/input.h>
-#include <linux/intel-iommu.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/reservation.h>
-#include <linux/slab.h>
-#include <linux/vgaarb.h>
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_atomic_uapi.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
-
-#include "display/intel_crt.h"
-#include "display/intel_ddi.h"
-#include "display/intel_dp.h"
-#include "display/intel_dsi.h"
-#include "display/intel_dvo.h"
-#include "display/intel_gmbus.h"
-#include "display/intel_hdmi.h"
-#include "display/intel_lvds.h"
-#include "display/intel_sdvo.h"
-#include "display/intel_tv.h"
-#include "display/intel_vdsc.h"
-
-#include "i915_drv.h"
-#include "i915_trace.h"
-#include "intel_acpi.h"
-#include "intel_atomic.h"
-#include "intel_atomic_plane.h"
-#include "intel_bw.h"
-#include "intel_color.h"
-#include "intel_cdclk.h"
-#include "intel_drv.h"
-#include "intel_fbc.h"
-#include "intel_fbdev.h"
-#include "intel_fifo_underrun.h"
-#include "intel_frontbuffer.h"
-#include "intel_hdcp.h"
-#include "intel_hotplug.h"
-#include "intel_overlay.h"
-#include "intel_pipe_crc.h"
-#include "intel_pm.h"
-#include "intel_psr.h"
-#include "intel_quirks.h"
-#include "intel_sideband.h"
-#include "intel_sprite.h"
-
-/* Primary plane formats for gen <= 3 */
-static const u32 i8xx_primary_formats[] = {
-       DRM_FORMAT_C8,
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_XRGB1555,
-       DRM_FORMAT_XRGB8888,
-};
-
-/* Primary plane formats for gen >= 4 */
-static const u32 i965_primary_formats[] = {
-       DRM_FORMAT_C8,
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_XBGR8888,
-       DRM_FORMAT_XRGB2101010,
-       DRM_FORMAT_XBGR2101010,
-};
-
-static const u64 i9xx_format_modifiers[] = {
-       I915_FORMAT_MOD_X_TILED,
-       DRM_FORMAT_MOD_LINEAR,
-       DRM_FORMAT_MOD_INVALID
-};
-
-/* Cursor formats */
-static const u32 intel_cursor_formats[] = {
-       DRM_FORMAT_ARGB8888,
-};
-
-static const u64 cursor_format_modifiers[] = {
-       DRM_FORMAT_MOD_LINEAR,
-       DRM_FORMAT_MOD_INVALID
-};
-
-static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
-                               struct intel_crtc_state *pipe_config);
-static void ironlake_pch_clock_get(struct intel_crtc *crtc,
-                                  struct intel_crtc_state *pipe_config);
-
-static int intel_framebuffer_init(struct intel_framebuffer *ifb,
-                                 struct drm_i915_gem_object *obj,
-                                 struct drm_mode_fb_cmd2 *mode_cmd);
-static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
-static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
-static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
-                                        const struct intel_link_m_n *m_n,
-                                        const struct intel_link_m_n *m2_n2);
-static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
-static void vlv_prepare_pll(struct intel_crtc *crtc,
-                           const struct intel_crtc_state *pipe_config);
-static void chv_prepare_pll(struct intel_crtc *crtc,
-                           const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
-static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
-static void intel_crtc_init_scalers(struct intel_crtc *crtc,
-                                   struct intel_crtc_state *crtc_state);
-static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
-static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
-static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
-static void intel_modeset_setup_hw_state(struct drm_device *dev,
-                                        struct drm_modeset_acquire_ctx *ctx);
-static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
-
-struct intel_limit {
-       struct {
-               int min, max;
-       } dot, vco, n, m, m1, m2, p, p1;
-
-       struct {
-               int dot_limit;
-               int p2_slow, p2_fast;
-       } p2;
-};
-
-/* returns HPLL frequency in kHz */
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
-{
-       int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
-
-       /* Obtain SKU information */
-       hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
-               CCK_FUSE_HPLL_FREQ_MASK;
-
-       return vco_freq[hpll_freq] * 1000;
-}
-
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
-                     const char *name, u32 reg, int ref_freq)
-{
-       u32 val;
-       int divider;
-
-       val = vlv_cck_read(dev_priv, reg);
-       divider = val & CCK_FREQUENCY_VALUES;
-
-       WARN((val & CCK_FREQUENCY_STATUS) !=
-            (divider << CCK_FREQUENCY_STATUS_SHIFT),
-            "%s change in progress\n", name);
-
-       return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
-}
-
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
-                          const char *name, u32 reg)
-{
-       int hpll;
-
-       vlv_cck_get(dev_priv);
-
-       if (dev_priv->hpll_freq == 0)
-               dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
-
-       hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
-
-       vlv_cck_put(dev_priv);
-
-       return hpll;
-}
-
-static void intel_update_czclk(struct drm_i915_private *dev_priv)
-{
-       if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
-               return;
-
-       dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
-                                                     CCK_CZ_CLOCK_CONTROL);
-
-       DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
-}
-
-static inline u32 /* units of 100MHz */
-intel_fdi_link_freq(struct drm_i915_private *dev_priv,
-                   const struct intel_crtc_state *pipe_config)
-{
-       if (HAS_DDI(dev_priv))
-               return pipe_config->port_clock; /* SPLL */
-       else
-               return dev_priv->fdi_pll_freq;
-}
-
-static const struct intel_limit intel_limits_i8xx_dac = {
-       .dot = { .min = 25000, .max = 350000 },
-       .vco = { .min = 908000, .max = 1512000 },
-       .n = { .min = 2, .max = 16 },
-       .m = { .min = 96, .max = 140 },
-       .m1 = { .min = 18, .max = 26 },
-       .m2 = { .min = 6, .max = 16 },
-       .p = { .min = 4, .max = 128 },
-       .p1 = { .min = 2, .max = 33 },
-       .p2 = { .dot_limit = 165000,
-               .p2_slow = 4, .p2_fast = 2 },
-};
-
-static const struct intel_limit intel_limits_i8xx_dvo = {
-       .dot = { .min = 25000, .max = 350000 },
-       .vco = { .min = 908000, .max = 1512000 },
-       .n = { .min = 2, .max = 16 },
-       .m = { .min = 96, .max = 140 },
-       .m1 = { .min = 18, .max = 26 },
-       .m2 = { .min = 6, .max = 16 },
-       .p = { .min = 4, .max = 128 },
-       .p1 = { .min = 2, .max = 33 },
-       .p2 = { .dot_limit = 165000,
-               .p2_slow = 4, .p2_fast = 4 },
-};
-
-static const struct intel_limit intel_limits_i8xx_lvds = {
-       .dot = { .min = 25000, .max = 350000 },
-       .vco = { .min = 908000, .max = 1512000 },
-       .n = { .min = 2, .max = 16 },
-       .m = { .min = 96, .max = 140 },
-       .m1 = { .min = 18, .max = 26 },
-       .m2 = { .min = 6, .max = 16 },
-       .p = { .min = 4, .max = 128 },
-       .p1 = { .min = 1, .max = 6 },
-       .p2 = { .dot_limit = 165000,
-               .p2_slow = 14, .p2_fast = 7 },
-};
-
-static const struct intel_limit intel_limits_i9xx_sdvo = {
-       .dot = { .min = 20000, .max = 400000 },
-       .vco = { .min = 1400000, .max = 2800000 },
-       .n = { .min = 1, .max = 6 },
-       .m = { .min = 70, .max = 120 },
-       .m1 = { .min = 8, .max = 18 },
-       .m2 = { .min = 3, .max = 7 },
-       .p = { .min = 5, .max = 80 },
-       .p1 = { .min = 1, .max = 8 },
-       .p2 = { .dot_limit = 200000,
-               .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_i9xx_lvds = {
-       .dot = { .min = 20000, .max = 400000 },
-       .vco = { .min = 1400000, .max = 2800000 },
-       .n = { .min = 1, .max = 6 },
-       .m = { .min = 70, .max = 120 },
-       .m1 = { .min = 8, .max = 18 },
-       .m2 = { .min = 3, .max = 7 },
-       .p = { .min = 7, .max = 98 },
-       .p1 = { .min = 1, .max = 8 },
-       .p2 = { .dot_limit = 112000,
-               .p2_slow = 14, .p2_fast = 7 },
-};
-
-
-static const struct intel_limit intel_limits_g4x_sdvo = {
-       .dot = { .min = 25000, .max = 270000 },
-       .vco = { .min = 1750000, .max = 3500000},
-       .n = { .min = 1, .max = 4 },
-       .m = { .min = 104, .max = 138 },
-       .m1 = { .min = 17, .max = 23 },
-       .m2 = { .min = 5, .max = 11 },
-       .p = { .min = 10, .max = 30 },
-       .p1 = { .min = 1, .max = 3},
-       .p2 = { .dot_limit = 270000,
-               .p2_slow = 10,
-               .p2_fast = 10
-       },
-};
-
-static const struct intel_limit intel_limits_g4x_hdmi = {
-       .dot = { .min = 22000, .max = 400000 },
-       .vco = { .min = 1750000, .max = 3500000},
-       .n = { .min = 1, .max = 4 },
-       .m = { .min = 104, .max = 138 },
-       .m1 = { .min = 16, .max = 23 },
-       .m2 = { .min = 5, .max = 11 },
-       .p = { .min = 5, .max = 80 },
-       .p1 = { .min = 1, .max = 8},
-       .p2 = { .dot_limit = 165000,
-               .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
-       .dot = { .min = 20000, .max = 115000 },
-       .vco = { .min = 1750000, .max = 3500000 },
-       .n = { .min = 1, .max = 3 },
-       .m = { .min = 104, .max = 138 },
-       .m1 = { .min = 17, .max = 23 },
-       .m2 = { .min = 5, .max = 11 },
-       .p = { .min = 28, .max = 112 },
-       .p1 = { .min = 2, .max = 8 },
-       .p2 = { .dot_limit = 0,
-               .p2_slow = 14, .p2_fast = 14
-       },
-};
-
-static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
-       .dot = { .min = 80000, .max = 224000 },
-       .vco = { .min = 1750000, .max = 3500000 },
-       .n = { .min = 1, .max = 3 },
-       .m = { .min = 104, .max = 138 },
-       .m1 = { .min = 17, .max = 23 },
-       .m2 = { .min = 5, .max = 11 },
-       .p = { .min = 14, .max = 42 },
-       .p1 = { .min = 2, .max = 6 },
-       .p2 = { .dot_limit = 0,
-               .p2_slow = 7, .p2_fast = 7
-       },
-};
-
-static const struct intel_limit intel_limits_pineview_sdvo = {
-       .dot = { .min = 20000, .max = 400000},
-       .vco = { .min = 1700000, .max = 3500000 },
-       /* Pineview's Ncounter is a ring counter */
-       .n = { .min = 3, .max = 6 },
-       .m = { .min = 2, .max = 256 },
-       /* Pineview only has one combined m divider, which we treat as m2. */
-       .m1 = { .min = 0, .max = 0 },
-       .m2 = { .min = 0, .max = 254 },
-       .p = { .min = 5, .max = 80 },
-       .p1 = { .min = 1, .max = 8 },
-       .p2 = { .dot_limit = 200000,
-               .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_pineview_lvds = {
-       .dot = { .min = 20000, .max = 400000 },
-       .vco = { .min = 1700000, .max = 3500000 },
-       .n = { .min = 3, .max = 6 },
-       .m = { .min = 2, .max = 256 },
-       .m1 = { .min = 0, .max = 0 },
-       .m2 = { .min = 0, .max = 254 },
-       .p = { .min = 7, .max = 112 },
-       .p1 = { .min = 1, .max = 8 },
-       .p2 = { .dot_limit = 112000,
-               .p2_slow = 14, .p2_fast = 14 },
-};
-
-/* Ironlake / Sandybridge
- *
- * We calculate clock using (register_value + 2) for N/M1/M2, so here
- * the range value for them is (actual_value - 2).
- */
-static const struct intel_limit intel_limits_ironlake_dac = {
-       .dot = { .min = 25000, .max = 350000 },
-       .vco = { .min = 1760000, .max = 3510000 },
-       .n = { .min = 1, .max = 5 },
-       .m = { .min = 79, .max = 127 },
-       .m1 = { .min = 12, .max = 22 },
-       .m2 = { .min = 5, .max = 9 },
-       .p = { .min = 5, .max = 80 },
-       .p1 = { .min = 1, .max = 8 },
-       .p2 = { .dot_limit = 225000,
-               .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_ironlake_single_lvds = {
-       .dot = { .min = 25000, .max = 350000 },
-       .vco = { .min = 1760000, .max = 3510000 },
-       .n = { .min = 1, .max = 3 },
-       .m = { .min = 79, .max = 118 },
-       .m1 = { .min = 12, .max = 22 },
-       .m2 = { .min = 5, .max = 9 },
-       .p = { .min = 28, .max = 112 },
-       .p1 = { .min = 2, .max = 8 },
-       .p2 = { .dot_limit = 225000,
-               .p2_slow = 14, .p2_fast = 14 },
-};
-
-static const struct intel_limit intel_limits_ironlake_dual_lvds = {
-       .dot = { .min = 25000, .max = 350000 },
-       .vco = { .min = 1760000, .max = 3510000 },
-       .n = { .min = 1, .max = 3 },
-       .m = { .min = 79, .max = 127 },
-       .m1 = { .min = 12, .max = 22 },
-       .m2 = { .min = 5, .max = 9 },
-       .p = { .min = 14, .max = 56 },
-       .p1 = { .min = 2, .max = 8 },
-       .p2 = { .dot_limit = 225000,
-               .p2_slow = 7, .p2_fast = 7 },
-};
-
-/* LVDS 100mhz refclk limits. */
-static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
-       .dot = { .min = 25000, .max = 350000 },
-       .vco = { .min = 1760000, .max = 3510000 },
-       .n = { .min = 1, .max = 2 },
-       .m = { .min = 79, .max = 126 },
-       .m1 = { .min = 12, .max = 22 },
-       .m2 = { .min = 5, .max = 9 },
-       .p = { .min = 28, .max = 112 },
-       .p1 = { .min = 2, .max = 8 },
-       .p2 = { .dot_limit = 225000,
-               .p2_slow = 14, .p2_fast = 14 },
-};
-
-static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
-       .dot = { .min = 25000, .max = 350000 },
-       .vco = { .min = 1760000, .max = 3510000 },
-       .n = { .min = 1, .max = 3 },
-       .m = { .min = 79, .max = 126 },
-       .m1 = { .min = 12, .max = 22 },
-       .m2 = { .min = 5, .max = 9 },
-       .p = { .min = 14, .max = 42 },
-       .p1 = { .min = 2, .max = 6 },
-       .p2 = { .dot_limit = 225000,
-               .p2_slow = 7, .p2_fast = 7 },
-};
-
-static const struct intel_limit intel_limits_vlv = {
-        /*
-         * These are the data rate limits (measured in fast clocks)
-         * since those are the strictest limits we have. The fast
-         * clock and actual rate limits are more relaxed, so checking
-         * them would make no difference.
-         */
-       .dot = { .min = 25000 * 5, .max = 270000 * 5 },
-       .vco = { .min = 4000000, .max = 6000000 },
-       .n = { .min = 1, .max = 7 },
-       .m1 = { .min = 2, .max = 3 },
-       .m2 = { .min = 11, .max = 156 },
-       .p1 = { .min = 2, .max = 3 },
-       .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
-};
-
-static const struct intel_limit intel_limits_chv = {
-       /*
-        * These are the data rate limits (measured in fast clocks)
-        * since those are the strictest limits we have.  The fast
-        * clock and actual rate limits are more relaxed, so checking
-        * them would make no difference.
-        */
-       .dot = { .min = 25000 * 5, .max = 540000 * 5},
-       .vco = { .min = 4800000, .max = 6480000 },
-       .n = { .min = 1, .max = 1 },
-       .m1 = { .min = 2, .max = 2 },
-       .m2 = { .min = 24 << 22, .max = 175 << 22 },
-       .p1 = { .min = 2, .max = 4 },
-       .p2 = { .p2_slow = 1, .p2_fast = 14 },
-};
-
-static const struct intel_limit intel_limits_bxt = {
-       /* FIXME: find real dot limits */
-       .dot = { .min = 0, .max = INT_MAX },
-       .vco = { .min = 4800000, .max = 6700000 },
-       .n = { .min = 1, .max = 1 },
-       .m1 = { .min = 2, .max = 2 },
-       /* FIXME: find real m2 limits */
-       .m2 = { .min = 2 << 22, .max = 255 << 22 },
-       .p1 = { .min = 2, .max = 4 },
-       .p2 = { .p2_slow = 1, .p2_fast = 20 },
-};
-
-/* WA Display #0827: Gen9:all */
-static void
-skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
-{
-       if (enable)
-               I915_WRITE(CLKGATE_DIS_PSL(pipe),
-                          I915_READ(CLKGATE_DIS_PSL(pipe)) |
-                          DUPS1_GATING_DIS | DUPS2_GATING_DIS);
-       else
-               I915_WRITE(CLKGATE_DIS_PSL(pipe),
-                          I915_READ(CLKGATE_DIS_PSL(pipe)) &
-                          ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
-}
-
-/* Wa_2006604312:icl */
-static void
-icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
-                      bool enable)
-{
-       if (enable)
-               I915_WRITE(CLKGATE_DIS_PSL(pipe),
-                          I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
-       else
-               I915_WRITE(CLKGATE_DIS_PSL(pipe),
-                          I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
-}
-
-static bool
-needs_modeset(const struct drm_crtc_state *state)
-{
-       return drm_atomic_crtc_needs_modeset(state);
-}
-
-/*
- * Platform specific helpers to calculate the port PLL loopback- (clock.m),
- * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
- * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
- * The helpers' return value is the rate of the clock that is fed to the
- * display engine's pipe which can be the above fast dot clock rate or a
- * divided-down version of it.
- */
-/* m1 is reserved as 0 in Pineview, n is a ring counter */
-static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
-{
-       clock->m = clock->m2 + 2;
-       clock->p = clock->p1 * clock->p2;
-       if (WARN_ON(clock->n == 0 || clock->p == 0))
-               return 0;
-       clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
-       clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
-       return clock->dot;
-}
-
-static u32 i9xx_dpll_compute_m(struct dpll *dpll)
-{
-       return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
-}
-
-static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
-{
-       clock->m = i9xx_dpll_compute_m(clock);
-       clock->p = clock->p1 * clock->p2;
-       if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
-               return 0;
-       clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
-       clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
-       return clock->dot;
-}
-
-static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
-{
-       clock->m = clock->m1 * clock->m2;
-       clock->p = clock->p1 * clock->p2;
-       if (WARN_ON(clock->n == 0 || clock->p == 0))
-               return 0;
-       clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
-       clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
-       return clock->dot / 5;
-}
-
-int chv_calc_dpll_params(int refclk, struct dpll *clock)
-{
-       clock->m = clock->m1 * clock->m2;
-       clock->p = clock->p1 * clock->p2;
-       if (WARN_ON(clock->n == 0 || clock->p == 0))
-               return 0;
-       clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
-                                          clock->n << 22);
-       clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
-       return clock->dot / 5;
-}
-
-#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
-
-/*
- * Returns whether the given set of divisors are valid for a given refclk with
- * the given connectors.
- */
-static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
-                              const struct intel_limit *limit,
-                              const struct dpll *clock)
-{
-       if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
-               INTELPllInvalid("n out of range\n");
-       if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
-               INTELPllInvalid("p1 out of range\n");
-       if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
-               INTELPllInvalid("m2 out of range\n");
-       if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
-               INTELPllInvalid("m1 out of range\n");
-
-       if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
-           !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
-               if (clock->m1 <= clock->m2)
-                       INTELPllInvalid("m1 <= m2\n");
-
-       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
-           !IS_GEN9_LP(dev_priv)) {
-               if (clock->p < limit->p.min || limit->p.max < clock->p)
-                       INTELPllInvalid("p out of range\n");
-               if (clock->m < limit->m.min || limit->m.max < clock->m)
-                       INTELPllInvalid("m out of range\n");
-       }
-
-       if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
-               INTELPllInvalid("vco out of range\n");
-       /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
-        * connector, etc., rather than just a single range.
-        */
-       if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
-               INTELPllInvalid("dot out of range\n");
-
-       return true;
-}
-
-static int
-i9xx_select_p2_div(const struct intel_limit *limit,
-                  const struct intel_crtc_state *crtc_state,
-                  int target)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-               /*
-                * For LVDS just rely on its current settings for dual-channel.
-                * We haven't figured out how to reliably set up different
-                * single/dual channel state, if we even can.
-                */
-               if (intel_is_dual_link_lvds(dev_priv))
-                       return limit->p2.p2_fast;
-               else
-                       return limit->p2.p2_slow;
-       } else {
-               if (target < limit->p2.dot_limit)
-                       return limit->p2.p2_slow;
-               else
-                       return limit->p2.p2_fast;
-       }
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE.  The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-i9xx_find_best_dpll(const struct intel_limit *limit,
-                   struct intel_crtc_state *crtc_state,
-                   int target, int refclk, struct dpll *match_clock,
-                   struct dpll *best_clock)
-{
-       struct drm_device *dev = crtc_state->base.crtc->dev;
-       struct dpll clock;
-       int err = target;
-
-       memset(best_clock, 0, sizeof(*best_clock));
-
-       clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
-       for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
-            clock.m1++) {
-               for (clock.m2 = limit->m2.min;
-                    clock.m2 <= limit->m2.max; clock.m2++) {
-                       if (clock.m2 >= clock.m1)
-                               break;
-                       for (clock.n = limit->n.min;
-                            clock.n <= limit->n.max; clock.n++) {
-                               for (clock.p1 = limit->p1.min;
-                                       clock.p1 <= limit->p1.max; clock.p1++) {
-                                       int this_err;
-
-                                       i9xx_calc_dpll_params(refclk, &clock);
-                                       if (!intel_PLL_is_valid(to_i915(dev),
-                                                               limit,
-                                                               &clock))
-                                               continue;
-                                       if (match_clock &&
-                                           clock.p != match_clock->p)
-                                               continue;
-
-                                       this_err = abs(clock.dot - target);
-                                       if (this_err < err) {
-                                               *best_clock = clock;
-                                               err = this_err;
-                                       }
-                               }
-                       }
-               }
-       }
-
-       return (err != target);
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE.  The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-pnv_find_best_dpll(const struct intel_limit *limit,
-                  struct intel_crtc_state *crtc_state,
-                  int target, int refclk, struct dpll *match_clock,
-                  struct dpll *best_clock)
-{
-       struct drm_device *dev = crtc_state->base.crtc->dev;
-       struct dpll clock;
-       int err = target;
-
-       memset(best_clock, 0, sizeof(*best_clock));
-
-       clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
-       for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
-            clock.m1++) {
-               for (clock.m2 = limit->m2.min;
-                    clock.m2 <= limit->m2.max; clock.m2++) {
-                       for (clock.n = limit->n.min;
-                            clock.n <= limit->n.max; clock.n++) {
-                               for (clock.p1 = limit->p1.min;
-                                       clock.p1 <= limit->p1.max; clock.p1++) {
-                                       int this_err;
-
-                                       pnv_calc_dpll_params(refclk, &clock);
-                                       if (!intel_PLL_is_valid(to_i915(dev),
-                                                               limit,
-                                                               &clock))
-                                               continue;
-                                       if (match_clock &&
-                                           clock.p != match_clock->p)
-                                               continue;
-
-                                       this_err = abs(clock.dot - target);
-                                       if (this_err < err) {
-                                               *best_clock = clock;
-                                               err = this_err;
-                                       }
-                               }
-                       }
-               }
-       }
-
-       return (err != target);
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE.  The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-g4x_find_best_dpll(const struct intel_limit *limit,
-                  struct intel_crtc_state *crtc_state,
-                  int target, int refclk, struct dpll *match_clock,
-                  struct dpll *best_clock)
-{
-       struct drm_device *dev = crtc_state->base.crtc->dev;
-       struct dpll clock;
-       int max_n;
-       bool found = false;
-       /* approximately equals target * 0.00585 */
-       int err_most = (target >> 8) + (target >> 9);
-
-       memset(best_clock, 0, sizeof(*best_clock));
-
-       clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
-       max_n = limit->n.max;
-       /* based on hardware requirement, prefer smaller n to precision */
-       for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
-               /* based on hardware requirement, prefere larger m1,m2 */
-               for (clock.m1 = limit->m1.max;
-                    clock.m1 >= limit->m1.min; clock.m1--) {
-                       for (clock.m2 = limit->m2.max;
-                            clock.m2 >= limit->m2.min; clock.m2--) {
-                               for (clock.p1 = limit->p1.max;
-                                    clock.p1 >= limit->p1.min; clock.p1--) {
-                                       int this_err;
-
-                                       i9xx_calc_dpll_params(refclk, &clock);
-                                       if (!intel_PLL_is_valid(to_i915(dev),
-                                                               limit,
-                                                               &clock))
-                                               continue;
-
-                                       this_err = abs(clock.dot - target);
-                                       if (this_err < err_most) {
-                                               *best_clock = clock;
-                                               err_most = this_err;
-                                               max_n = clock.n;
-                                               found = true;
-                                       }
-                               }
-                       }
-               }
-       }
-       return found;
-}
-
-/*
- * Check if the calculated PLL configuration is more optimal compared to the
- * best configuration and error found so far. Return the calculated error.
- */
-static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
-                              const struct dpll *calculated_clock,
-                              const struct dpll *best_clock,
-                              unsigned int best_error_ppm,
-                              unsigned int *error_ppm)
-{
-       /*
-        * For CHV ignore the error and consider only the P value.
-        * Prefer a bigger P value based on HW requirements.
-        */
-       if (IS_CHERRYVIEW(to_i915(dev))) {
-               *error_ppm = 0;
-
-               return calculated_clock->p > best_clock->p;
-       }
-
-       if (WARN_ON_ONCE(!target_freq))
-               return false;
-
-       *error_ppm = div_u64(1000000ULL *
-                               abs(target_freq - calculated_clock->dot),
-                            target_freq);
-       /*
-        * Prefer a better P value over a better (smaller) error if the error
-        * is small. Ensure this preference for future configurations too by
-        * setting the error to 0.
-        */
-       if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
-               *error_ppm = 0;
-
-               return true;
-       }
-
-       return *error_ppm + 10 < best_error_ppm;
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE.  The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool
-vlv_find_best_dpll(const struct intel_limit *limit,
-                  struct intel_crtc_state *crtc_state,
-                  int target, int refclk, struct dpll *match_clock,
-                  struct dpll *best_clock)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_device *dev = crtc->base.dev;
-       struct dpll clock;
-       unsigned int bestppm = 1000000;
-       /* min update 19.2 MHz */
-       int max_n = min(limit->n.max, refclk / 19200);
-       bool found = false;
-
-       target *= 5; /* fast clock */
-
-       memset(best_clock, 0, sizeof(*best_clock));
-
-       /* based on hardware requirement, prefer smaller n to precision */
-       for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
-               for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
-                       for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
-                            clock.p2 -= clock.p2 > 10 ? 2 : 1) {
-                               clock.p = clock.p1 * clock.p2;
-                               /* based on hardware requirement, prefer bigger m1,m2 values */
-                               for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
-                                       unsigned int ppm;
-
-                                       clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
-                                                                    refclk * clock.m1);
-
-                                       vlv_calc_dpll_params(refclk, &clock);
-
-                                       if (!intel_PLL_is_valid(to_i915(dev),
-                                                               limit,
-                                                               &clock))
-                                               continue;
-
-                                       if (!vlv_PLL_is_optimal(dev, target,
-                                                               &clock,
-                                                               best_clock,
-                                                               bestppm, &ppm))
-                                               continue;
-
-                                       *best_clock = clock;
-                                       bestppm = ppm;
-                                       found = true;
-                               }
-                       }
-               }
-       }
-
-       return found;
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE.  The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool
-chv_find_best_dpll(const struct intel_limit *limit,
-                  struct intel_crtc_state *crtc_state,
-                  int target, int refclk, struct dpll *match_clock,
-                  struct dpll *best_clock)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_device *dev = crtc->base.dev;
-       unsigned int best_error_ppm;
-       struct dpll clock;
-       u64 m2;
-       int found = false;
-
-       memset(best_clock, 0, sizeof(*best_clock));
-       best_error_ppm = 1000000;
-
-       /*
-        * Based on hardware doc, the n always set to 1, and m1 always
-        * set to 2.  If requires to support 200Mhz refclk, we need to
-        * revisit this because n may not 1 anymore.
-        */
-       clock.n = 1, clock.m1 = 2;
-       target *= 5;    /* fast clock */
-
-       for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
-               for (clock.p2 = limit->p2.p2_fast;
-                               clock.p2 >= limit->p2.p2_slow;
-                               clock.p2 -= clock.p2 > 10 ? 2 : 1) {
-                       unsigned int error_ppm;
-
-                       clock.p = clock.p1 * clock.p2;
-
-                       m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
-                                                  refclk * clock.m1);
-
-                       if (m2 > INT_MAX/clock.m1)
-                               continue;
-
-                       clock.m2 = m2;
-
-                       chv_calc_dpll_params(refclk, &clock);
-
-                       if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
-                               continue;
-
-                       if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
-                                               best_error_ppm, &error_ppm))
-                               continue;
-
-                       *best_clock = clock;
-                       best_error_ppm = error_ppm;
-                       found = true;
-               }
-       }
-
-       return found;
-}
-
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
-                       struct dpll *best_clock)
-{
-       int refclk = 100000;
-       const struct intel_limit *limit = &intel_limits_bxt;
-
-       return chv_find_best_dpll(limit, crtc_state,
-                                 crtc_state->port_clock, refclk,
-                                 NULL, best_clock);
-}
-
-bool intel_crtc_active(struct intel_crtc *crtc)
-{
-       /* Be paranoid as we can arrive here with only partial
-        * state retrieved from the hardware during setup.
-        *
-        * We can ditch the adjusted_mode.crtc_clock check as soon
-        * as Haswell has gained clock readout/fastboot support.
-        *
-        * We can ditch the crtc->primary->state->fb check as soon as we can
-        * properly reconstruct framebuffers.
-        *
-        * FIXME: The intel_crtc->active here should be switched to
-        * crtc->state->active once we have proper CRTC states wired up
-        * for atomic.
-        */
-       return crtc->active && crtc->base.primary->state->fb &&
-               crtc->config->base.adjusted_mode.crtc_clock;
-}
-
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
-                                            enum pipe pipe)
-{
-       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
-       return crtc->config->cpu_transcoder;
-}
-
-static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
-                                   enum pipe pipe)
-{
-       i915_reg_t reg = PIPEDSL(pipe);
-       u32 line1, line2;
-       u32 line_mask;
-
-       if (IS_GEN(dev_priv, 2))
-               line_mask = DSL_LINEMASK_GEN2;
-       else
-               line_mask = DSL_LINEMASK_GEN3;
-
-       line1 = I915_READ(reg) & line_mask;
-       msleep(5);
-       line2 = I915_READ(reg) & line_mask;
-
-       return line1 != line2;
-}
-
-static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       /* Wait for the display line to settle/start moving */
-       if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
-               DRM_ERROR("pipe %c scanline %s wait timed out\n",
-                         pipe_name(pipe), onoff(state));
-}
-
-static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
-{
-       wait_for_pipe_scanline_moving(crtc, false);
-}
-
-static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
-{
-       wait_for_pipe_scanline_moving(crtc, true);
-}
-
-static void
-intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       if (INTEL_GEN(dev_priv) >= 4) {
-               enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
-               i915_reg_t reg = PIPECONF(cpu_transcoder);
-
-               /* Wait for the Pipe State to go off */
-               if (intel_wait_for_register(&dev_priv->uncore,
-                                           reg, I965_PIPECONF_ACTIVE, 0,
-                                           100))
-                       WARN(1, "pipe_off wait timed out\n");
-       } else {
-               intel_wait_for_pipe_scanline_stopped(crtc);
-       }
-}
-
-/* Only for pre-ILK configs */
-void assert_pll(struct drm_i915_private *dev_priv,
-               enum pipe pipe, bool state)
-{
-       u32 val;
-       bool cur_state;
-
-       val = I915_READ(DPLL(pipe));
-       cur_state = !!(val & DPLL_VCO_ENABLE);
-       I915_STATE_WARN(cur_state != state,
-            "PLL state assertion failure (expected %s, current %s)\n",
-                       onoff(state), onoff(cur_state));
-}
-
-/* XXX: the dsi pll is shared between MIPI DSI ports */
-void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
-{
-       u32 val;
-       bool cur_state;
-
-       vlv_cck_get(dev_priv);
-       val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
-       vlv_cck_put(dev_priv);
-
-       cur_state = val & DSI_PLL_VCO_EN;
-       I915_STATE_WARN(cur_state != state,
-            "DSI PLL state assertion failure (expected %s, current %s)\n",
-                       onoff(state), onoff(cur_state));
-}
-
-static void assert_fdi_tx(struct drm_i915_private *dev_priv,
-                         enum pipe pipe, bool state)
-{
-       bool cur_state;
-       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-                                                                     pipe);
-
-       if (HAS_DDI(dev_priv)) {
-               /* DDI does not have a specific FDI_TX register */
-               u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
-               cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
-       } else {
-               u32 val = I915_READ(FDI_TX_CTL(pipe));
-               cur_state = !!(val & FDI_TX_ENABLE);
-       }
-       I915_STATE_WARN(cur_state != state,
-            "FDI TX state assertion failure (expected %s, current %s)\n",
-                       onoff(state), onoff(cur_state));
-}
-#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
-#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
-
-static void assert_fdi_rx(struct drm_i915_private *dev_priv,
-                         enum pipe pipe, bool state)
-{
-       u32 val;
-       bool cur_state;
-
-       val = I915_READ(FDI_RX_CTL(pipe));
-       cur_state = !!(val & FDI_RX_ENABLE);
-       I915_STATE_WARN(cur_state != state,
-            "FDI RX state assertion failure (expected %s, current %s)\n",
-                       onoff(state), onoff(cur_state));
-}
-#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
-#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
-
-static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
-                                     enum pipe pipe)
-{
-       u32 val;
-
-       /* ILK FDI PLL is always enabled */
-       if (IS_GEN(dev_priv, 5))
-               return;
-
-       /* On Haswell, DDI ports are responsible for the FDI PLL setup */
-       if (HAS_DDI(dev_priv))
-               return;
-
-       val = I915_READ(FDI_TX_CTL(pipe));
-       I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
-}
-
-void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
-                      enum pipe pipe, bool state)
-{
-       u32 val;
-       bool cur_state;
-
-       val = I915_READ(FDI_RX_CTL(pipe));
-       cur_state = !!(val & FDI_RX_PLL_ENABLE);
-       I915_STATE_WARN(cur_state != state,
-            "FDI RX PLL assertion failure (expected %s, current %s)\n",
-                       onoff(state), onoff(cur_state));
-}
-
-void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
-       i915_reg_t pp_reg;
-       u32 val;
-       enum pipe panel_pipe = INVALID_PIPE;
-       bool locked = true;
-
-       if (WARN_ON(HAS_DDI(dev_priv)))
-               return;
-
-       if (HAS_PCH_SPLIT(dev_priv)) {
-               u32 port_sel;
-
-               pp_reg = PP_CONTROL(0);
-               port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
-
-               switch (port_sel) {
-               case PANEL_PORT_SELECT_LVDS:
-                       intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
-                       break;
-               case PANEL_PORT_SELECT_DPA:
-                       intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
-                       break;
-               case PANEL_PORT_SELECT_DPC:
-                       intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
-                       break;
-               case PANEL_PORT_SELECT_DPD:
-                       intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
-                       break;
-               default:
-                       MISSING_CASE(port_sel);
-                       break;
-               }
-       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               /* presumably write lock depends on pipe, not port select */
-               pp_reg = PP_CONTROL(pipe);
-               panel_pipe = pipe;
-       } else {
-               u32 port_sel;
-
-               pp_reg = PP_CONTROL(0);
-               port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
-
-               WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
-               intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
-       }
-
-       val = I915_READ(pp_reg);
-       if (!(val & PANEL_POWER_ON) ||
-           ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
-               locked = false;
-
-       I915_STATE_WARN(panel_pipe == pipe && locked,
-            "panel assertion failure, pipe %c regs locked\n",
-            pipe_name(pipe));
-}
-
-void assert_pipe(struct drm_i915_private *dev_priv,
-                enum pipe pipe, bool state)
-{
-       bool cur_state;
-       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-                                                                     pipe);
-       enum intel_display_power_domain power_domain;
-       intel_wakeref_t wakeref;
-
-       /* we keep both pipes enabled on 830 */
-       if (IS_I830(dev_priv))
-               state = true;
-
-       power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
-       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (wakeref) {
-               u32 val = I915_READ(PIPECONF(cpu_transcoder));
-               cur_state = !!(val & PIPECONF_ENABLE);
-
-               intel_display_power_put(dev_priv, power_domain, wakeref);
-       } else {
-               cur_state = false;
-       }
-
-       I915_STATE_WARN(cur_state != state,
-            "pipe %c assertion failure (expected %s, current %s)\n",
-                       pipe_name(pipe), onoff(state), onoff(cur_state));
-}
-
-static void assert_plane(struct intel_plane *plane, bool state)
-{
-       enum pipe pipe;
-       bool cur_state;
-
-       cur_state = plane->get_hw_state(plane, &pipe);
-
-       I915_STATE_WARN(cur_state != state,
-                       "%s assertion failure (expected %s, current %s)\n",
-                       plane->base.name, onoff(state), onoff(cur_state));
-}
-
-#define assert_plane_enabled(p) assert_plane(p, true)
-#define assert_plane_disabled(p) assert_plane(p, false)
-
-static void assert_planes_disabled(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_plane *plane;
-
-       for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
-               assert_plane_disabled(plane);
-}
-
-static void assert_vblank_disabled(struct drm_crtc *crtc)
-{
-       if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
-               drm_crtc_vblank_put(crtc);
-}
-
-void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
-                                   enum pipe pipe)
-{
-       u32 val;
-       bool enabled;
-
-       val = I915_READ(PCH_TRANSCONF(pipe));
-       enabled = !!(val & TRANS_ENABLE);
-       I915_STATE_WARN(enabled,
-            "transcoder assertion failed, should be off on pipe %c but is still active\n",
-            pipe_name(pipe));
-}
-
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
-                                  enum pipe pipe, enum port port,
-                                  i915_reg_t dp_reg)
-{
-       enum pipe port_pipe;
-       bool state;
-
-       state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
-
-       I915_STATE_WARN(state && port_pipe == pipe,
-                       "PCH DP %c enabled on transcoder %c, should be disabled\n",
-                       port_name(port), pipe_name(pipe));
-
-       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
-                       "IBX PCH DP %c still using transcoder B\n",
-                       port_name(port));
-}
-
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
-                                    enum pipe pipe, enum port port,
-                                    i915_reg_t hdmi_reg)
-{
-       enum pipe port_pipe;
-       bool state;
-
-       state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
-
-       I915_STATE_WARN(state && port_pipe == pipe,
-                       "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
-                       port_name(port), pipe_name(pipe));
-
-       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
-                       "IBX PCH HDMI %c still using transcoder B\n",
-                       port_name(port));
-}
-
-static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
-                                     enum pipe pipe)
-{
-       enum pipe port_pipe;
-
-       assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
-       assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
-       assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
-
-       I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
-                       port_pipe == pipe,
-                       "PCH VGA enabled on transcoder %c, should be disabled\n",
-                       pipe_name(pipe));
-
-       I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
-                       port_pipe == pipe,
-                       "PCH LVDS enabled on transcoder %c, should be disabled\n",
-                       pipe_name(pipe));
-
-       /* PCH SDVOB multiplex with HDMIB */
-       assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
-       assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
-       assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
-}
-
-static void _vlv_enable_pll(struct intel_crtc *crtc,
-                           const struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
-       POSTING_READ(DPLL(pipe));
-       udelay(150);
-
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   DPLL(pipe),
-                                   DPLL_LOCK_VLV,
-                                   DPLL_LOCK_VLV,
-                                   1))
-               DRM_ERROR("DPLL %d failed to lock\n", pipe);
-}
-
-static void vlv_enable_pll(struct intel_crtc *crtc,
-                          const struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       assert_pipe_disabled(dev_priv, pipe);
-
-       /* PLL is protected by panel, make sure we can write it */
-       assert_panel_unlocked(dev_priv, pipe);
-
-       if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
-               _vlv_enable_pll(crtc, pipe_config);
-
-       I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
-       POSTING_READ(DPLL_MD(pipe));
-}
-
-
-static void _chv_enable_pll(struct intel_crtc *crtc,
-                           const struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       enum dpio_channel port = vlv_pipe_to_channel(pipe);
-       u32 tmp;
-
-       vlv_dpio_get(dev_priv);
-
-       /* Enable back the 10bit clock to display controller */
-       tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
-       tmp |= DPIO_DCLKP_EN;
-       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
-
-       vlv_dpio_put(dev_priv);
-
-       /*
-        * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
-        */
-       udelay(1);
-
-       /* Enable PLL */
-       I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
-
-       /* Check PLL is locked */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
-                                   1))
-               DRM_ERROR("PLL %d failed to lock\n", pipe);
-}
-
-static void chv_enable_pll(struct intel_crtc *crtc,
-                          const struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       assert_pipe_disabled(dev_priv, pipe);
-
-       /* PLL is protected by panel, make sure we can write it */
-       assert_panel_unlocked(dev_priv, pipe);
-
-       if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
-               _chv_enable_pll(crtc, pipe_config);
-
-       if (pipe != PIPE_A) {
-               /*
-                * WaPixelRepeatModeFixForC0:chv
-                *
-                * DPLLCMD is AWOL. Use chicken bits to propagate
-                * the value from DPLLBMD to either pipe B or C.
-                */
-               I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
-               I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
-               I915_WRITE(CBR4_VLV, 0);
-               dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
-
-               /*
-                * DPLLB VGA mode also seems to cause problems.
-                * We should always have it disabled.
-                */
-               WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
-       } else {
-               I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
-               POSTING_READ(DPLL_MD(pipe));
-       }
-}
-
-static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
-{
-       if (IS_I830(dev_priv))
-               return false;
-
-       return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
-}
-
-static void i9xx_enable_pll(struct intel_crtc *crtc,
-                           const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       i915_reg_t reg = DPLL(crtc->pipe);
-       u32 dpll = crtc_state->dpll_hw_state.dpll;
-       int i;
-
-       assert_pipe_disabled(dev_priv, crtc->pipe);
-
-       /* PLL is protected by panel, make sure we can write it */
-       if (i9xx_has_pps(dev_priv))
-               assert_panel_unlocked(dev_priv, crtc->pipe);
-
-       /*
-        * Apparently we need to have VGA mode enabled prior to changing
-        * the P1/P2 dividers. Otherwise the DPLL will keep using the old
-        * dividers, even though the register value does change.
-        */
-       I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
-       I915_WRITE(reg, dpll);
-
-       /* Wait for the clocks to stabilize. */
-       POSTING_READ(reg);
-       udelay(150);
-
-       if (INTEL_GEN(dev_priv) >= 4) {
-               I915_WRITE(DPLL_MD(crtc->pipe),
-                          crtc_state->dpll_hw_state.dpll_md);
-       } else {
-               /* The pixel multiplier can only be updated once the
-                * DPLL is enabled and the clocks are stable.
-                *
-                * So write it again.
-                */
-               I915_WRITE(reg, dpll);
-       }
-
-       /* We do this three times for luck */
-       for (i = 0; i < 3; i++) {
-               I915_WRITE(reg, dpll);
-               POSTING_READ(reg);
-               udelay(150); /* wait for warmup */
-       }
-}
-
-static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       /* Don't disable pipe or pipe PLLs if needed */
-       if (IS_I830(dev_priv))
-               return;
-
-       /* Make sure the pipe isn't still relying on us */
-       assert_pipe_disabled(dev_priv, pipe);
-
-       I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
-       POSTING_READ(DPLL(pipe));
-}
-
-static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
-       u32 val;
-
-       /* Make sure the pipe isn't still relying on us */
-       assert_pipe_disabled(dev_priv, pipe);
-
-       val = DPLL_INTEGRATED_REF_CLK_VLV |
-               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
-       if (pipe != PIPE_A)
-               val |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
-       I915_WRITE(DPLL(pipe), val);
-       POSTING_READ(DPLL(pipe));
-}
-
-static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
-       enum dpio_channel port = vlv_pipe_to_channel(pipe);
-       u32 val;
-
-       /* Make sure the pipe isn't still relying on us */
-       assert_pipe_disabled(dev_priv, pipe);
-
-       val = DPLL_SSC_REF_CLK_CHV |
-               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
-       if (pipe != PIPE_A)
-               val |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
-       I915_WRITE(DPLL(pipe), val);
-       POSTING_READ(DPLL(pipe));
-
-       vlv_dpio_get(dev_priv);
-
-       /* Disable 10bit clock to display controller */
-       val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
-       val &= ~DPIO_DCLKP_EN;
-       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
-
-       vlv_dpio_put(dev_priv);
-}
-
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
-                        struct intel_digital_port *dport,
-                        unsigned int expected_mask)
-{
-       u32 port_mask;
-       i915_reg_t dpll_reg;
-
-       switch (dport->base.port) {
-       case PORT_B:
-               port_mask = DPLL_PORTB_READY_MASK;
-               dpll_reg = DPLL(0);
-               break;
-       case PORT_C:
-               port_mask = DPLL_PORTC_READY_MASK;
-               dpll_reg = DPLL(0);
-               expected_mask <<= 4;
-               break;
-       case PORT_D:
-               port_mask = DPLL_PORTD_READY_MASK;
-               dpll_reg = DPIO_PHY_STATUS;
-               break;
-       default:
-               BUG();
-       }
-
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   dpll_reg, port_mask, expected_mask,
-                                   1000))
-               WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
-                    port_name(dport->base.port),
-                    I915_READ(dpll_reg) & port_mask, expected_mask);
-}
-
-static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       i915_reg_t reg;
-       u32 val, pipeconf_val;
-
-       /* Make sure PCH DPLL is enabled */
-       assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
-
-       /* FDI must be feeding us bits for PCH ports */
-       assert_fdi_tx_enabled(dev_priv, pipe);
-       assert_fdi_rx_enabled(dev_priv, pipe);
-
-       if (HAS_PCH_CPT(dev_priv)) {
-               /* Workaround: Set the timing override bit before enabling the
-                * pch transcoder. */
-               reg = TRANS_CHICKEN2(pipe);
-               val = I915_READ(reg);
-               val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
-               I915_WRITE(reg, val);
-       }
-
-       reg = PCH_TRANSCONF(pipe);
-       val = I915_READ(reg);
-       pipeconf_val = I915_READ(PIPECONF(pipe));
-
-       if (HAS_PCH_IBX(dev_priv)) {
-               /*
-                * Make the BPC in transcoder be consistent with
-                * that in pipeconf reg. For HDMI we must use 8bpc
-                * here for both 8bpc and 12bpc.
-                */
-               val &= ~PIPECONF_BPC_MASK;
-               if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
-                       val |= PIPECONF_8BPC;
-               else
-                       val |= pipeconf_val & PIPECONF_BPC_MASK;
-       }
-
-       val &= ~TRANS_INTERLACE_MASK;
-       if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
-               if (HAS_PCH_IBX(dev_priv) &&
-                   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
-                       val |= TRANS_LEGACY_INTERLACED_ILK;
-               else
-                       val |= TRANS_INTERLACED;
-       } else {
-               val |= TRANS_PROGRESSIVE;
-       }
-
-       I915_WRITE(reg, val | TRANS_ENABLE);
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
-                                   100))
-               DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
-}
-
-static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
-                                     enum transcoder cpu_transcoder)
-{
-       u32 val, pipeconf_val;
-
-       /* FDI must be feeding us bits for PCH ports */
-       assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
-       assert_fdi_rx_enabled(dev_priv, PIPE_A);
-
-       /* Workaround: set timing override bit. */
-       val = I915_READ(TRANS_CHICKEN2(PIPE_A));
-       val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
-       I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
-
-       val = TRANS_ENABLE;
-       pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
-
-       if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
-           PIPECONF_INTERLACED_ILK)
-               val |= TRANS_INTERLACED;
-       else
-               val |= TRANS_PROGRESSIVE;
-
-       I915_WRITE(LPT_TRANSCONF, val);
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   LPT_TRANSCONF,
-                                   TRANS_STATE_ENABLE,
-                                   TRANS_STATE_ENABLE,
-                                   100))
-               DRM_ERROR("Failed to enable PCH transcoder\n");
-}
-
-static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
-                                           enum pipe pipe)
-{
-       i915_reg_t reg;
-       u32 val;
-
-       /* FDI relies on the transcoder */
-       assert_fdi_tx_disabled(dev_priv, pipe);
-       assert_fdi_rx_disabled(dev_priv, pipe);
-
-       /* Ports must be off as well */
-       assert_pch_ports_disabled(dev_priv, pipe);
-
-       reg = PCH_TRANSCONF(pipe);
-       val = I915_READ(reg);
-       val &= ~TRANS_ENABLE;
-       I915_WRITE(reg, val);
-       /* wait for PCH transcoder off, transcoder state */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   reg, TRANS_STATE_ENABLE, 0,
-                                   50))
-               DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
-
-       if (HAS_PCH_CPT(dev_priv)) {
-               /* Workaround: Clear the timing override chicken bit again. */
-               reg = TRANS_CHICKEN2(pipe);
-               val = I915_READ(reg);
-               val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
-               I915_WRITE(reg, val);
-       }
-}
-
-void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       val = I915_READ(LPT_TRANSCONF);
-       val &= ~TRANS_ENABLE;
-       I915_WRITE(LPT_TRANSCONF, val);
-       /* wait for PCH transcoder off, transcoder state */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
-                                   50))
-               DRM_ERROR("Failed to disable PCH transcoder\n");
-
-       /* Workaround: clear timing override bit. */
-       val = I915_READ(TRANS_CHICKEN2(PIPE_A));
-       val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
-       I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
-}
-
-enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       if (HAS_PCH_LPT(dev_priv))
-               return PIPE_A;
-       else
-               return crtc->pipe;
-}
-
-static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-
-       /*
-        * On i965gm the hardware frame counter reads
-        * zero when the TV encoder is enabled :(
-        */
-       if (IS_I965GM(dev_priv) &&
-           (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
-               return 0;
-
-       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
-               return 0xffffffff; /* full 32 bit counter */
-       else if (INTEL_GEN(dev_priv) >= 3)
-               return 0xffffff; /* only 24 bits of frame count */
-       else
-               return 0; /* Gen2 doesn't have a hardware frame counter */
-}
-
-static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
-       drm_crtc_set_max_vblank_count(&crtc->base,
-                                     intel_crtc_max_vblank_count(crtc_state));
-       drm_crtc_vblank_on(&crtc->base);
-}
-
-static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
-       enum pipe pipe = crtc->pipe;
-       i915_reg_t reg;
-       u32 val;
-
-       DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
-
-       assert_planes_disabled(crtc);
-
-       /*
-        * A pipe without a PLL won't actually be able to drive bits from
-        * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
-        * need the check.
-        */
-       if (HAS_GMCH(dev_priv)) {
-               if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
-                       assert_dsi_pll_enabled(dev_priv);
-               else
-                       assert_pll_enabled(dev_priv, pipe);
-       } else {
-               if (new_crtc_state->has_pch_encoder) {
-                       /* if driving the PCH, we need FDI enabled */
-                       assert_fdi_rx_pll_enabled(dev_priv,
-                                                 intel_crtc_pch_transcoder(crtc));
-                       assert_fdi_tx_pll_enabled(dev_priv,
-                                                 (enum pipe) cpu_transcoder);
-               }
-               /* FIXME: assert CPU port conditions for SNB+ */
-       }
-
-       trace_intel_pipe_enable(dev_priv, pipe);
-
-       reg = PIPECONF(cpu_transcoder);
-       val = I915_READ(reg);
-       if (val & PIPECONF_ENABLE) {
-               /* we keep both pipes enabled on 830 */
-               WARN_ON(!IS_I830(dev_priv));
-               return;
-       }
-
-       I915_WRITE(reg, val | PIPECONF_ENABLE);
-       POSTING_READ(reg);
-
-       /*
-        * Until the pipe starts PIPEDSL reads will return a stale value,
-        * which causes an apparent vblank timestamp jump when PIPEDSL
-        * resets to its proper value. That also messes up the frame count
-        * when it's derived from the timestamps. So let's wait for the
-        * pipe to start properly before we call drm_crtc_vblank_on()
-        */
-       if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
-               intel_wait_for_pipe_scanline_moving(crtc);
-}
-
-static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
-       enum pipe pipe = crtc->pipe;
-       i915_reg_t reg;
-       u32 val;
-
-       DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
-
-       /*
-        * Make sure planes won't keep trying to pump pixels to us,
-        * or we might hang the display.
-        */
-       assert_planes_disabled(crtc);
-
-       trace_intel_pipe_disable(dev_priv, pipe);
-
-       reg = PIPECONF(cpu_transcoder);
-       val = I915_READ(reg);
-       if ((val & PIPECONF_ENABLE) == 0)
-               return;
-
-       /*
-        * Double wide has implications for planes
-        * so best keep it disabled when not needed.
-        */
-       if (old_crtc_state->double_wide)
-               val &= ~PIPECONF_DOUBLE_WIDE;
-
-       /* Don't disable pipe or pipe PLLs if needed */
-       if (!IS_I830(dev_priv))
-               val &= ~PIPECONF_ENABLE;
-
-       I915_WRITE(reg, val);
-       if ((val & PIPECONF_ENABLE) == 0)
-               intel_wait_for_pipe_off(old_crtc_state);
-}
-
-static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
-{
-       return IS_GEN(dev_priv, 2) ? 2048 : 4096;
-}
-
-static unsigned int
-intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
-{
-       struct drm_i915_private *dev_priv = to_i915(fb->dev);
-       unsigned int cpp = fb->format->cpp[color_plane];
-
-       switch (fb->modifier) {
-       case DRM_FORMAT_MOD_LINEAR:
-               return intel_tile_size(dev_priv);
-       case I915_FORMAT_MOD_X_TILED:
-               if (IS_GEN(dev_priv, 2))
-                       return 128;
-               else
-                       return 512;
-       case I915_FORMAT_MOD_Y_TILED_CCS:
-               if (color_plane == 1)
-                       return 128;
-               /* fall through */
-       case I915_FORMAT_MOD_Y_TILED:
-               if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
-                       return 128;
-               else
-                       return 512;
-       case I915_FORMAT_MOD_Yf_TILED_CCS:
-               if (color_plane == 1)
-                       return 128;
-               /* fall through */
-       case I915_FORMAT_MOD_Yf_TILED:
-               switch (cpp) {
-               case 1:
-                       return 64;
-               case 2:
-               case 4:
-                       return 128;
-               case 8:
-               case 16:
-                       return 256;
-               default:
-                       MISSING_CASE(cpp);
-                       return cpp;
-               }
-               break;
-       default:
-               MISSING_CASE(fb->modifier);
-               return cpp;
-       }
-}
-
-static unsigned int
-intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
-{
-       return intel_tile_size(to_i915(fb->dev)) /
-               intel_tile_width_bytes(fb, color_plane);
-}
-
-/* Return the tile dimensions in pixel units */
-static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
-                           unsigned int *tile_width,
-                           unsigned int *tile_height)
-{
-       unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
-       unsigned int cpp = fb->format->cpp[color_plane];
-
-       *tile_width = tile_width_bytes / cpp;
-       *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
-}
-
-unsigned int
-intel_fb_align_height(const struct drm_framebuffer *fb,
-                     int color_plane, unsigned int height)
-{
-       unsigned int tile_height = intel_tile_height(fb, color_plane);
-
-       return ALIGN(height, tile_height);
-}
-
-unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
-{
-       unsigned int size = 0;
-       int i;
-
-       for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
-               size += rot_info->plane[i].width * rot_info->plane[i].height;
-
-       return size;
-}
-
-unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
-{
-       unsigned int size = 0;
-       int i;
-
-       for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
-               size += rem_info->plane[i].width * rem_info->plane[i].height;
-
-       return size;
-}
-
-static void
-intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
-                       const struct drm_framebuffer *fb,
-                       unsigned int rotation)
-{
-       view->type = I915_GGTT_VIEW_NORMAL;
-       if (drm_rotation_90_or_270(rotation)) {
-               view->type = I915_GGTT_VIEW_ROTATED;
-               view->rotated = to_intel_framebuffer(fb)->rot_info;
-       }
-}
-
-static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
-{
-       if (IS_I830(dev_priv))
-               return 16 * 1024;
-       else if (IS_I85X(dev_priv))
-               return 256;
-       else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
-               return 32;
-       else
-               return 4 * 1024;
-}
-
-static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
-{
-       if (INTEL_GEN(dev_priv) >= 9)
-               return 256 * 1024;
-       else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
-                IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               return 128 * 1024;
-       else if (INTEL_GEN(dev_priv) >= 4)
-               return 4 * 1024;
-       else
-               return 0;
-}
-
-static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
-                                        int color_plane)
-{
-       struct drm_i915_private *dev_priv = to_i915(fb->dev);
-
-       /* AUX_DIST needs only 4K alignment */
-       if (color_plane == 1)
-               return 4096;
-
-       switch (fb->modifier) {
-       case DRM_FORMAT_MOD_LINEAR:
-               return intel_linear_alignment(dev_priv);
-       case I915_FORMAT_MOD_X_TILED:
-               if (INTEL_GEN(dev_priv) >= 9)
-                       return 256 * 1024;
-               return 0;
-       case I915_FORMAT_MOD_Y_TILED_CCS:
-       case I915_FORMAT_MOD_Yf_TILED_CCS:
-       case I915_FORMAT_MOD_Y_TILED:
-       case I915_FORMAT_MOD_Yf_TILED:
-               return 1 * 1024 * 1024;
-       default:
-               MISSING_CASE(fb->modifier);
-               return 0;
-       }
-}
-
-static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
-       return INTEL_GEN(dev_priv) < 4 ||
-               (plane->has_fbc &&
-                plane_state->view.type == I915_GGTT_VIEW_NORMAL);
-}
-
-struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
-                          const struct i915_ggtt_view *view,
-                          bool uses_fence,
-                          unsigned long *out_flags)
-{
-       struct drm_device *dev = fb->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       intel_wakeref_t wakeref;
-       struct i915_vma *vma;
-       unsigned int pinctl;
-       u32 alignment;
-
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-       alignment = intel_surf_alignment(fb, 0);
-
-       /* Note that the w/a also requires 64 PTE of padding following the
-        * bo. We currently fill all unused PTE with the shadow page and so
-        * we should always have valid PTE following the scanout preventing
-        * the VT-d warning.
-        */
-       if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
-               alignment = 256 * 1024;
-
-       /*
-        * Global gtt pte registers are special registers which actually forward
-        * writes to a chunk of system memory. Which means that there is no risk
-        * that the register values disappear as soon as we call
-        * intel_runtime_pm_put(), so it is correct to wrap only the
-        * pin/unpin/fence and not more.
-        */
-       wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-       i915_gem_object_lock(obj);
-
-       atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
-
-       pinctl = 0;
-
-       /* Valleyview is definitely limited to scanning out the first
-        * 512MiB. Lets presume this behaviour was inherited from the
-        * g4x display engine and that all earlier gen are similarly
-        * limited. Testing suggests that it is a little more
-        * complicated than this. For example, Cherryview appears quite
-        * happy to scanout from anywhere within its global aperture.
-        */
-       if (HAS_GMCH(dev_priv))
-               pinctl |= PIN_MAPPABLE;
-
-       vma = i915_gem_object_pin_to_display_plane(obj,
-                                                  alignment, view, pinctl);
-       if (IS_ERR(vma))
-               goto err;
-
-       if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
-               int ret;
-
-               /* Install a fence for tiled scan-out. Pre-i965 always needs a
-                * fence, whereas 965+ only requires a fence if using
-                * framebuffer compression.  For simplicity, we always, when
-                * possible, install a fence as the cost is not that onerous.
-                *
-                * If we fail to fence the tiled scanout, then either the
-                * modeset will reject the change (which is highly unlikely as
-                * the affected systems, all but one, do not have unmappable
-                * space) or we will not be able to enable full powersaving
-                * techniques (also likely not to apply due to various limits
-                * FBC and the like impose on the size of the buffer, which
-                * presumably we violated anyway with this unmappable buffer).
-                * Anyway, it is presumably better to stumble onwards with
-                * something and try to run the system in a "less than optimal"
-                * mode that matches the user configuration.
-                */
-               ret = i915_vma_pin_fence(vma);
-               if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
-                       i915_gem_object_unpin_from_display_plane(vma);
-                       vma = ERR_PTR(ret);
-                       goto err;
-               }
-
-               if (ret == 0 && vma->fence)
-                       *out_flags |= PLANE_HAS_FENCE;
-       }
-
-       i915_vma_get(vma);
-err:
-       atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
-
-       i915_gem_object_unlock(obj);
-       intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-       return vma;
-}
-
-void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
-{
-       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
-       i915_gem_object_lock(vma->obj);
-       if (flags & PLANE_HAS_FENCE)
-               i915_vma_unpin_fence(vma);
-       i915_gem_object_unpin_from_display_plane(vma);
-       i915_gem_object_unlock(vma->obj);
-
-       i915_vma_put(vma);
-}
-
-static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
-                         unsigned int rotation)
-{
-       if (drm_rotation_90_or_270(rotation))
-               return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
-       else
-               return fb->pitches[color_plane];
-}
-
-/*
- * Convert the x/y offsets into a linear offset.
- * Only valid with 0/180 degree rotation, which is fine since linear
- * offset is only used with linear buffers on pre-hsw and tiled buffers
- * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
- */
-u32 intel_fb_xy_to_linear(int x, int y,
-                         const struct intel_plane_state *state,
-                         int color_plane)
-{
-       const struct drm_framebuffer *fb = state->base.fb;
-       unsigned int cpp = fb->format->cpp[color_plane];
-       unsigned int pitch = state->color_plane[color_plane].stride;
-
-       return y * pitch + x * cpp;
-}
-
-/*
- * Add the x/y offsets derived from fb->offsets[] to the user
- * specified plane src x/y offsets. The resulting x/y offsets
- * specify the start of scanout from the beginning of the gtt mapping.
- */
-void intel_add_fb_offsets(int *x, int *y,
-                         const struct intel_plane_state *state,
-                         int color_plane)
-
-{
-       *x += state->color_plane[color_plane].x;
-       *y += state->color_plane[color_plane].y;
-}
-
-static u32 intel_adjust_tile_offset(int *x, int *y,
-                                   unsigned int tile_width,
-                                   unsigned int tile_height,
-                                   unsigned int tile_size,
-                                   unsigned int pitch_tiles,
-                                   u32 old_offset,
-                                   u32 new_offset)
-{
-       unsigned int pitch_pixels = pitch_tiles * tile_width;
-       unsigned int tiles;
-
-       WARN_ON(old_offset & (tile_size - 1));
-       WARN_ON(new_offset & (tile_size - 1));
-       WARN_ON(new_offset > old_offset);
-
-       tiles = (old_offset - new_offset) / tile_size;
-
-       *y += tiles / pitch_tiles * tile_height;
-       *x += tiles % pitch_tiles * tile_width;
-
-       /* minimize x in case it got needlessly big */
-       *y += *x / pitch_pixels * tile_height;
-       *x %= pitch_pixels;
-
-       return new_offset;
-}
-
-static bool is_surface_linear(u64 modifier, int color_plane)
-{
-       return modifier == DRM_FORMAT_MOD_LINEAR;
-}
-
-static u32 intel_adjust_aligned_offset(int *x, int *y,
-                                      const struct drm_framebuffer *fb,
-                                      int color_plane,
-                                      unsigned int rotation,
-                                      unsigned int pitch,
-                                      u32 old_offset, u32 new_offset)
-{
-       struct drm_i915_private *dev_priv = to_i915(fb->dev);
-       unsigned int cpp = fb->format->cpp[color_plane];
-
-       WARN_ON(new_offset > old_offset);
-
-       if (!is_surface_linear(fb->modifier, color_plane)) {
-               unsigned int tile_size, tile_width, tile_height;
-               unsigned int pitch_tiles;
-
-               tile_size = intel_tile_size(dev_priv);
-               intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
-
-               if (drm_rotation_90_or_270(rotation)) {
-                       pitch_tiles = pitch / tile_height;
-                       swap(tile_width, tile_height);
-               } else {
-                       pitch_tiles = pitch / (tile_width * cpp);
-               }
-
-               intel_adjust_tile_offset(x, y, tile_width, tile_height,
-                                        tile_size, pitch_tiles,
-                                        old_offset, new_offset);
-       } else {
-               old_offset += *y * pitch + *x * cpp;
-
-               *y = (old_offset - new_offset) / pitch;
-               *x = ((old_offset - new_offset) - *y * pitch) / cpp;
-       }
-
-       return new_offset;
-}
-
-/*
- * Adjust the tile offset by moving the difference into
- * the x/y offsets.
- */
-static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
-                                            const struct intel_plane_state *state,
-                                            int color_plane,
-                                            u32 old_offset, u32 new_offset)
-{
-       return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
-                                          state->base.rotation,
-                                          state->color_plane[color_plane].stride,
-                                          old_offset, new_offset);
-}
-
-/*
- * Computes the aligned offset to the base tile and adjusts
- * x, y. bytes per pixel is assumed to be a power-of-two.
- *
- * In the 90/270 rotated case, x and y are assumed
- * to be already rotated to match the rotated GTT view, and
- * pitch is the tile_height aligned framebuffer height.
- *
- * This function is used when computing the derived information
- * under intel_framebuffer, so using any of that information
- * here is not allowed. Anything under drm_framebuffer can be
- * used. This is why the user has to pass in the pitch since it
- * is specified in the rotated orientation.
- */
-static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
-                                       int *x, int *y,
-                                       const struct drm_framebuffer *fb,
-                                       int color_plane,
-                                       unsigned int pitch,
-                                       unsigned int rotation,
-                                       u32 alignment)
-{
-       unsigned int cpp = fb->format->cpp[color_plane];
-       u32 offset, offset_aligned;
-
-       if (alignment)
-               alignment--;
-
-       if (!is_surface_linear(fb->modifier, color_plane)) {
-               unsigned int tile_size, tile_width, tile_height;
-               unsigned int tile_rows, tiles, pitch_tiles;
-
-               tile_size = intel_tile_size(dev_priv);
-               intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
-
-               if (drm_rotation_90_or_270(rotation)) {
-                       pitch_tiles = pitch / tile_height;
-                       swap(tile_width, tile_height);
-               } else {
-                       pitch_tiles = pitch / (tile_width * cpp);
-               }
-
-               tile_rows = *y / tile_height;
-               *y %= tile_height;
-
-               tiles = *x / tile_width;
-               *x %= tile_width;
-
-               offset = (tile_rows * pitch_tiles + tiles) * tile_size;
-               offset_aligned = offset & ~alignment;
-
-               intel_adjust_tile_offset(x, y, tile_width, tile_height,
-                                        tile_size, pitch_tiles,
-                                        offset, offset_aligned);
-       } else {
-               offset = *y * pitch + *x * cpp;
-               offset_aligned = offset & ~alignment;
-
-               *y = (offset & alignment) / pitch;
-               *x = ((offset & alignment) - *y * pitch) / cpp;
-       }
-
-       return offset_aligned;
-}
-
-static u32 intel_plane_compute_aligned_offset(int *x, int *y,
-                                             const struct intel_plane_state *state,
-                                             int color_plane)
-{
-       struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
-       struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
-       const struct drm_framebuffer *fb = state->base.fb;
-       unsigned int rotation = state->base.rotation;
-       int pitch = state->color_plane[color_plane].stride;
-       u32 alignment;
-
-       if (intel_plane->id == PLANE_CURSOR)
-               alignment = intel_cursor_alignment(dev_priv);
-       else
-               alignment = intel_surf_alignment(fb, color_plane);
-
-       return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
-                                           pitch, rotation, alignment);
-}
-
-/* Convert the fb->offset[] into x/y offsets */
-static int intel_fb_offset_to_xy(int *x, int *y,
-                                const struct drm_framebuffer *fb,
-                                int color_plane)
-{
-       struct drm_i915_private *dev_priv = to_i915(fb->dev);
-       unsigned int height;
-
-       if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
-           fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
-               DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
-                             fb->offsets[color_plane], color_plane);
-               return -EINVAL;
-       }
-
-       height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
-       height = ALIGN(height, intel_tile_height(fb, color_plane));
-
-       /* Catch potential overflows early */
-       if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
-                           fb->offsets[color_plane])) {
-               DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
-                             fb->offsets[color_plane], fb->pitches[color_plane],
-                             color_plane);
-               return -ERANGE;
-       }
-
-       *x = 0;
-       *y = 0;
-
-       intel_adjust_aligned_offset(x, y,
-                                   fb, color_plane, DRM_MODE_ROTATE_0,
-                                   fb->pitches[color_plane],
-                                   fb->offsets[color_plane], 0);
-
-       return 0;
-}
-
-static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
-{
-       switch (fb_modifier) {
-       case I915_FORMAT_MOD_X_TILED:
-               return I915_TILING_X;
-       case I915_FORMAT_MOD_Y_TILED:
-       case I915_FORMAT_MOD_Y_TILED_CCS:
-               return I915_TILING_Y;
-       default:
-               return I915_TILING_NONE;
-       }
-}
-
-/*
- * From the Sky Lake PRM:
- * "The Color Control Surface (CCS) contains the compression status of
- *  the cache-line pairs. The compression state of the cache-line pair
- *  is specified by 2 bits in the CCS. Each CCS cache-line represents
- *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
- *  cache-line-pairs. CCS is always Y tiled."
- *
- * Since cache line pairs refers to horizontally adjacent cache lines,
- * each cache line in the CCS corresponds to an area of 32x16 cache
- * lines on the main surface. Since each pixel is 4 bytes, this gives
- * us a ratio of one byte in the CCS for each 8x16 pixels in the
- * main surface.
- */
-static const struct drm_format_info ccs_formats[] = {
-       { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
-         .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
-       { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
-         .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
-       { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
-         .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
-       { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
-         .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
-};
-
-static const struct drm_format_info *
-lookup_format_info(const struct drm_format_info formats[],
-                  int num_formats, u32 format)
-{
-       int i;
-
-       for (i = 0; i < num_formats; i++) {
-               if (formats[i].format == format)
-                       return &formats[i];
-       }
-
-       return NULL;
-}
-
-static const struct drm_format_info *
-intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
-{
-       switch (cmd->modifier[0]) {
-       case I915_FORMAT_MOD_Y_TILED_CCS:
-       case I915_FORMAT_MOD_Yf_TILED_CCS:
-               return lookup_format_info(ccs_formats,
-                                         ARRAY_SIZE(ccs_formats),
-                                         cmd->pixel_format);
-       default:
-               return NULL;
-       }
-}
-
-bool is_ccs_modifier(u64 modifier)
-{
-       return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
-              modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
-}
-
-u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
-                             u32 pixel_format, u64 modifier)
-{
-       struct intel_crtc *crtc;
-       struct intel_plane *plane;
-
-       /*
-        * We assume the primary plane for pipe A has
-        * the highest stride limits of them all.
-        */
-       crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
-       plane = to_intel_plane(crtc->base.primary);
-
-       return plane->max_stride(plane, pixel_format, modifier,
-                                DRM_MODE_ROTATE_0);
-}
-
-static
-u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
-                       u32 pixel_format, u64 modifier)
-{
-       /*
-        * Arbitrary limit for gen4+ chosen to match the
-        * render engine max stride.
-        *
-        * The new CCS hash mode makes remapping impossible
-        */
-       if (!is_ccs_modifier(modifier)) {
-               if (INTEL_GEN(dev_priv) >= 7)
-                       return 256*1024;
-               else if (INTEL_GEN(dev_priv) >= 4)
-                       return 128*1024;
-       }
-
-       return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
-}
-
-static u32
-intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
-{
-       struct drm_i915_private *dev_priv = to_i915(fb->dev);
-
-       if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
-               u32 max_stride = intel_plane_fb_max_stride(dev_priv,
-                                                          fb->format->format,
-                                                          fb->modifier);
-
-               /*
-                * To make remapping with linear generally feasible
-                * we need the stride to be page aligned.
-                */
-               if (fb->pitches[color_plane] > max_stride)
-                       return intel_tile_size(dev_priv);
-               else
-                       return 64;
-       } else {
-               return intel_tile_width_bytes(fb, color_plane);
-       }
-}
-
-bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       int i;
-
-       /* We don't want to deal with remapping with cursors */
-       if (plane->id == PLANE_CURSOR)
-               return false;
-
-       /*
-        * The display engine limits already match/exceed the
-        * render engine limits, so not much point in remapping.
-        * Would also need to deal with the fence POT alignment
-        * and gen2 2KiB GTT tile size.
-        */
-       if (INTEL_GEN(dev_priv) < 4)
-               return false;
-
-       /*
-        * The new CCS hash mode isn't compatible with remapping as
-        * the virtual address of the pages affects the compressed data.
-        */
-       if (is_ccs_modifier(fb->modifier))
-               return false;
-
-       /* Linear needs a page aligned stride for remapping */
-       if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
-               unsigned int alignment = intel_tile_size(dev_priv) - 1;
-
-               for (i = 0; i < fb->format->num_planes; i++) {
-                       if (fb->pitches[i] & alignment)
-                               return false;
-               }
-       }
-
-       return true;
-}
-
-static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       unsigned int rotation = plane_state->base.rotation;
-       u32 stride, max_stride;
-
-       /*
-        * No remapping for invisible planes since we don't have
-        * an actual source viewport to remap.
-        */
-       if (!plane_state->base.visible)
-               return false;
-
-       if (!intel_plane_can_remap(plane_state))
-               return false;
-
-       /*
-        * FIXME: aux plane limits on gen9+ are
-        * unclear in Bspec, for now no checking.
-        */
-       stride = intel_fb_pitch(fb, 0, rotation);
-       max_stride = plane->max_stride(plane, fb->format->format,
-                                      fb->modifier, rotation);
-
-       return stride > max_stride;
-}
-
-static int
-intel_fill_fb_info(struct drm_i915_private *dev_priv,
-                  struct drm_framebuffer *fb)
-{
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct intel_rotation_info *rot_info = &intel_fb->rot_info;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       u32 gtt_offset_rotated = 0;
-       unsigned int max_size = 0;
-       int i, num_planes = fb->format->num_planes;
-       unsigned int tile_size = intel_tile_size(dev_priv);
-
-       for (i = 0; i < num_planes; i++) {
-               unsigned int width, height;
-               unsigned int cpp, size;
-               u32 offset;
-               int x, y;
-               int ret;
-
-               cpp = fb->format->cpp[i];
-               width = drm_framebuffer_plane_width(fb->width, fb, i);
-               height = drm_framebuffer_plane_height(fb->height, fb, i);
-
-               ret = intel_fb_offset_to_xy(&x, &y, fb, i);
-               if (ret) {
-                       DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
-                                     i, fb->offsets[i]);
-                       return ret;
-               }
-
-               if (is_ccs_modifier(fb->modifier) && i == 1) {
-                       int hsub = fb->format->hsub;
-                       int vsub = fb->format->vsub;
-                       int tile_width, tile_height;
-                       int main_x, main_y;
-                       int ccs_x, ccs_y;
-
-                       intel_tile_dims(fb, i, &tile_width, &tile_height);
-                       tile_width *= hsub;
-                       tile_height *= vsub;
-
-                       ccs_x = (x * hsub) % tile_width;
-                       ccs_y = (y * vsub) % tile_height;
-                       main_x = intel_fb->normal[0].x % tile_width;
-                       main_y = intel_fb->normal[0].y % tile_height;
-
-                       /*
-                        * CCS doesn't have its own x/y offset register, so the intra CCS tile
-                        * x/y offsets must match between CCS and the main surface.
-                        */
-                       if (main_x != ccs_x || main_y != ccs_y) {
-                               DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
-                                             main_x, main_y,
-                                             ccs_x, ccs_y,
-                                             intel_fb->normal[0].x,
-                                             intel_fb->normal[0].y,
-                                             x, y);
-                               return -EINVAL;
-                       }
-               }
-
-               /*
-                * The fence (if used) is aligned to the start of the object
-                * so having the framebuffer wrap around across the edge of the
-                * fenced region doesn't really work. We have no API to configure
-                * the fence start offset within the object (nor could we probably
-                * on gen2/3). So it's just easier if we just require that the
-                * fb layout agrees with the fence layout. We already check that the
-                * fb stride matches the fence stride elsewhere.
-                */
-               if (i == 0 && i915_gem_object_is_tiled(obj) &&
-                   (x + width) * cpp > fb->pitches[i]) {
-                       DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
-                                     i, fb->offsets[i]);
-                       return -EINVAL;
-               }
-
-               /*
-                * First pixel of the framebuffer from
-                * the start of the normal gtt mapping.
-                */
-               intel_fb->normal[i].x = x;
-               intel_fb->normal[i].y = y;
-
-               offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
-                                                     fb->pitches[i],
-                                                     DRM_MODE_ROTATE_0,
-                                                     tile_size);
-               offset /= tile_size;
-
-               if (!is_surface_linear(fb->modifier, i)) {
-                       unsigned int tile_width, tile_height;
-                       unsigned int pitch_tiles;
-                       struct drm_rect r;
-
-                       intel_tile_dims(fb, i, &tile_width, &tile_height);
-
-                       rot_info->plane[i].offset = offset;
-                       rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
-                       rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
-                       rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
-
-                       intel_fb->rotated[i].pitch =
-                               rot_info->plane[i].height * tile_height;
-
-                       /* how many tiles does this plane need */
-                       size = rot_info->plane[i].stride * rot_info->plane[i].height;
-                       /*
-                        * If the plane isn't horizontally tile aligned,
-                        * we need one more tile.
-                        */
-                       if (x != 0)
-                               size++;
-
-                       /* rotate the x/y offsets to match the GTT view */
-                       r.x1 = x;
-                       r.y1 = y;
-                       r.x2 = x + width;
-                       r.y2 = y + height;
-                       drm_rect_rotate(&r,
-                                       rot_info->plane[i].width * tile_width,
-                                       rot_info->plane[i].height * tile_height,
-                                       DRM_MODE_ROTATE_270);
-                       x = r.x1;
-                       y = r.y1;
-
-                       /* rotate the tile dimensions to match the GTT view */
-                       pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
-                       swap(tile_width, tile_height);
-
-                       /*
-                        * We only keep the x/y offsets, so push all of the
-                        * gtt offset into the x/y offsets.
-                        */
-                       intel_adjust_tile_offset(&x, &y,
-                                                tile_width, tile_height,
-                                                tile_size, pitch_tiles,
-                                                gtt_offset_rotated * tile_size, 0);
-
-                       gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
-
-                       /*
-                        * First pixel of the framebuffer from
-                        * the start of the rotated gtt mapping.
-                        */
-                       intel_fb->rotated[i].x = x;
-                       intel_fb->rotated[i].y = y;
-               } else {
-                       size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
-                                           x * cpp, tile_size);
-               }
-
-               /* how many tiles in total needed in the bo */
-               max_size = max(max_size, offset + size);
-       }
-
-       if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
-               DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
-                             mul_u32_u32(max_size, tile_size), obj->base.size);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static void
-intel_plane_remap_gtt(struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(plane_state->base.plane->dev);
-       struct drm_framebuffer *fb = plane_state->base.fb;
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct intel_rotation_info *info = &plane_state->view.rotated;
-       unsigned int rotation = plane_state->base.rotation;
-       int i, num_planes = fb->format->num_planes;
-       unsigned int tile_size = intel_tile_size(dev_priv);
-       unsigned int src_x, src_y;
-       unsigned int src_w, src_h;
-       u32 gtt_offset = 0;
-
-       memset(&plane_state->view, 0, sizeof(plane_state->view));
-       plane_state->view.type = drm_rotation_90_or_270(rotation) ?
-               I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
-
-       src_x = plane_state->base.src.x1 >> 16;
-       src_y = plane_state->base.src.y1 >> 16;
-       src_w = drm_rect_width(&plane_state->base.src) >> 16;
-       src_h = drm_rect_height(&plane_state->base.src) >> 16;
-
-       WARN_ON(is_ccs_modifier(fb->modifier));
-
-       /* Make src coordinates relative to the viewport */
-       drm_rect_translate(&plane_state->base.src,
-                          -(src_x << 16), -(src_y << 16));
-
-       /* Rotate src coordinates to match rotated GTT view */
-       if (drm_rotation_90_or_270(rotation))
-               drm_rect_rotate(&plane_state->base.src,
-                               src_w << 16, src_h << 16,
-                               DRM_MODE_ROTATE_270);
-
-       for (i = 0; i < num_planes; i++) {
-               unsigned int hsub = i ? fb->format->hsub : 1;
-               unsigned int vsub = i ? fb->format->vsub : 1;
-               unsigned int cpp = fb->format->cpp[i];
-               unsigned int tile_width, tile_height;
-               unsigned int width, height;
-               unsigned int pitch_tiles;
-               unsigned int x, y;
-               u32 offset;
-
-               intel_tile_dims(fb, i, &tile_width, &tile_height);
-
-               x = src_x / hsub;
-               y = src_y / vsub;
-               width = src_w / hsub;
-               height = src_h / vsub;
-
-               /*
-                * First pixel of the src viewport from the
-                * start of the normal gtt mapping.
-                */
-               x += intel_fb->normal[i].x;
-               y += intel_fb->normal[i].y;
-
-               offset = intel_compute_aligned_offset(dev_priv, &x, &y,
-                                                     fb, i, fb->pitches[i],
-                                                     DRM_MODE_ROTATE_0, tile_size);
-               offset /= tile_size;
-
-               info->plane[i].offset = offset;
-               info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
-                                                    tile_width * cpp);
-               info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
-               info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
-
-               if (drm_rotation_90_or_270(rotation)) {
-                       struct drm_rect r;
-
-                       /* rotate the x/y offsets to match the GTT view */
-                       r.x1 = x;
-                       r.y1 = y;
-                       r.x2 = x + width;
-                       r.y2 = y + height;
-                       drm_rect_rotate(&r,
-                                       info->plane[i].width * tile_width,
-                                       info->plane[i].height * tile_height,
-                                       DRM_MODE_ROTATE_270);
-                       x = r.x1;
-                       y = r.y1;
-
-                       pitch_tiles = info->plane[i].height;
-                       plane_state->color_plane[i].stride = pitch_tiles * tile_height;
-
-                       /* rotate the tile dimensions to match the GTT view */
-                       swap(tile_width, tile_height);
-               } else {
-                       pitch_tiles = info->plane[i].width;
-                       plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
-               }
-
-               /*
-                * We only keep the x/y offsets, so push all of the
-                * gtt offset into the x/y offsets.
-                */
-               intel_adjust_tile_offset(&x, &y,
-                                        tile_width, tile_height,
-                                        tile_size, pitch_tiles,
-                                        gtt_offset * tile_size, 0);
-
-               gtt_offset += info->plane[i].width * info->plane[i].height;
-
-               plane_state->color_plane[i].offset = 0;
-               plane_state->color_plane[i].x = x;
-               plane_state->color_plane[i].y = y;
-       }
-}
-
-static int
-intel_plane_compute_gtt(struct intel_plane_state *plane_state)
-{
-       const struct intel_framebuffer *fb =
-               to_intel_framebuffer(plane_state->base.fb);
-       unsigned int rotation = plane_state->base.rotation;
-       int i, num_planes;
-
-       if (!fb)
-               return 0;
-
-       num_planes = fb->base.format->num_planes;
-
-       if (intel_plane_needs_remap(plane_state)) {
-               intel_plane_remap_gtt(plane_state);
-
-               /*
-                * Sometimes even remapping can't overcome
-                * the stride limitations :( Can happen with
-                * big plane sizes and suitably misaligned
-                * offsets.
-                */
-               return intel_plane_check_stride(plane_state);
-       }
-
-       intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
-
-       for (i = 0; i < num_planes; i++) {
-               plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
-               plane_state->color_plane[i].offset = 0;
-
-               if (drm_rotation_90_or_270(rotation)) {
-                       plane_state->color_plane[i].x = fb->rotated[i].x;
-                       plane_state->color_plane[i].y = fb->rotated[i].y;
-               } else {
-                       plane_state->color_plane[i].x = fb->normal[i].x;
-                       plane_state->color_plane[i].y = fb->normal[i].y;
-               }
-       }
-
-       /* Rotate src coordinates to match rotated GTT view */
-       if (drm_rotation_90_or_270(rotation))
-               drm_rect_rotate(&plane_state->base.src,
-                               fb->base.width << 16, fb->base.height << 16,
-                               DRM_MODE_ROTATE_270);
-
-       return intel_plane_check_stride(plane_state);
-}
-
-static int i9xx_format_to_fourcc(int format)
-{
-       switch (format) {
-       case DISPPLANE_8BPP:
-               return DRM_FORMAT_C8;
-       case DISPPLANE_BGRX555:
-               return DRM_FORMAT_XRGB1555;
-       case DISPPLANE_BGRX565:
-               return DRM_FORMAT_RGB565;
-       default:
-       case DISPPLANE_BGRX888:
-               return DRM_FORMAT_XRGB8888;
-       case DISPPLANE_RGBX888:
-               return DRM_FORMAT_XBGR8888;
-       case DISPPLANE_BGRX101010:
-               return DRM_FORMAT_XRGB2101010;
-       case DISPPLANE_RGBX101010:
-               return DRM_FORMAT_XBGR2101010;
-       }
-}
-
-int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
-{
-       switch (format) {
-       case PLANE_CTL_FORMAT_RGB_565:
-               return DRM_FORMAT_RGB565;
-       case PLANE_CTL_FORMAT_NV12:
-               return DRM_FORMAT_NV12;
-       case PLANE_CTL_FORMAT_P010:
-               return DRM_FORMAT_P010;
-       case PLANE_CTL_FORMAT_P012:
-               return DRM_FORMAT_P012;
-       case PLANE_CTL_FORMAT_P016:
-               return DRM_FORMAT_P016;
-       case PLANE_CTL_FORMAT_Y210:
-               return DRM_FORMAT_Y210;
-       case PLANE_CTL_FORMAT_Y212:
-               return DRM_FORMAT_Y212;
-       case PLANE_CTL_FORMAT_Y216:
-               return DRM_FORMAT_Y216;
-       case PLANE_CTL_FORMAT_Y410:
-               return DRM_FORMAT_XVYU2101010;
-       case PLANE_CTL_FORMAT_Y412:
-               return DRM_FORMAT_XVYU12_16161616;
-       case PLANE_CTL_FORMAT_Y416:
-               return DRM_FORMAT_XVYU16161616;
-       default:
-       case PLANE_CTL_FORMAT_XRGB_8888:
-               if (rgb_order) {
-                       if (alpha)
-                               return DRM_FORMAT_ABGR8888;
-                       else
-                               return DRM_FORMAT_XBGR8888;
-               } else {
-                       if (alpha)
-                               return DRM_FORMAT_ARGB8888;
-                       else
-                               return DRM_FORMAT_XRGB8888;
-               }
-       case PLANE_CTL_FORMAT_XRGB_2101010:
-               if (rgb_order)
-                       return DRM_FORMAT_XBGR2101010;
-               else
-                       return DRM_FORMAT_XRGB2101010;
-       case PLANE_CTL_FORMAT_XRGB_16161616F:
-               if (rgb_order) {
-                       if (alpha)
-                               return DRM_FORMAT_ABGR16161616F;
-                       else
-                               return DRM_FORMAT_XBGR16161616F;
-               } else {
-                       if (alpha)
-                               return DRM_FORMAT_ARGB16161616F;
-                       else
-                               return DRM_FORMAT_XRGB16161616F;
-               }
-       }
-}
-
-static bool
-intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
-                             struct intel_initial_plane_config *plane_config)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_i915_gem_object *obj = NULL;
-       struct drm_mode_fb_cmd2 mode_cmd = { 0 };
-       struct drm_framebuffer *fb = &plane_config->fb->base;
-       u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
-       u32 size_aligned = round_up(plane_config->base + plane_config->size,
-                                   PAGE_SIZE);
-
-       size_aligned -= base_aligned;
-
-       if (plane_config->size == 0)
-               return false;
-
-       /* If the FB is too big, just don't use it since fbdev is not very
-        * important and we should probably use that space with FBC or other
-        * features. */
-       if (size_aligned * 2 > dev_priv->stolen_usable_size)
-               return false;
-
-       switch (fb->modifier) {
-       case DRM_FORMAT_MOD_LINEAR:
-       case I915_FORMAT_MOD_X_TILED:
-       case I915_FORMAT_MOD_Y_TILED:
-               break;
-       default:
-               DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
-                                fb->modifier);
-               return false;
-       }
-
-       mutex_lock(&dev->struct_mutex);
-       obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
-                                                            base_aligned,
-                                                            base_aligned,
-                                                            size_aligned);
-       mutex_unlock(&dev->struct_mutex);
-       if (!obj)
-               return false;
-
-       switch (plane_config->tiling) {
-       case I915_TILING_NONE:
-               break;
-       case I915_TILING_X:
-       case I915_TILING_Y:
-               obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
-               break;
-       default:
-               MISSING_CASE(plane_config->tiling);
-               return false;
-       }
-
-       mode_cmd.pixel_format = fb->format->format;
-       mode_cmd.width = fb->width;
-       mode_cmd.height = fb->height;
-       mode_cmd.pitches[0] = fb->pitches[0];
-       mode_cmd.modifier[0] = fb->modifier;
-       mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
-
-       if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
-               DRM_DEBUG_KMS("intel fb init failed\n");
-               goto out_unref_obj;
-       }
-
-
-       DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
-       return true;
-
-out_unref_obj:
-       i915_gem_object_put(obj);
-       return false;
-}
-
-static void
-intel_set_plane_visible(struct intel_crtc_state *crtc_state,
-                       struct intel_plane_state *plane_state,
-                       bool visible)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-
-       plane_state->base.visible = visible;
-
-       if (visible)
-               crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
-       else
-               crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
-}
-
-static void fixup_active_planes(struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-       struct drm_plane *plane;
-
-       /*
-        * Active_planes aliases if multiple "primary" or cursor planes
-        * have been used on the same (or wrong) pipe. plane_mask uses
-        * unique ids, hence we can use that to reconstruct active_planes.
-        */
-       crtc_state->active_planes = 0;
-
-       drm_for_each_plane_mask(plane, &dev_priv->drm,
-                               crtc_state->base.plane_mask)
-               crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
-}
-
-static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
-                                        struct intel_plane *plane)
-{
-       struct intel_crtc_state *crtc_state =
-               to_intel_crtc_state(crtc->base.state);
-       struct intel_plane_state *plane_state =
-               to_intel_plane_state(plane->base.state);
-
-       DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
-                     plane->base.base.id, plane->base.name,
-                     crtc->base.base.id, crtc->base.name);
-
-       intel_set_plane_visible(crtc_state, plane_state, false);
-       fixup_active_planes(crtc_state);
-       crtc_state->data_rate[plane->id] = 0;
-
-       if (plane->id == PLANE_PRIMARY)
-               intel_pre_disable_primary_noatomic(&crtc->base);
-
-       intel_disable_plane(plane, crtc_state);
-}
-
-static void
-intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
-                            struct intel_initial_plane_config *plane_config)
-{
-       struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_crtc *c;
-       struct drm_i915_gem_object *obj;
-       struct drm_plane *primary = intel_crtc->base.primary;
-       struct drm_plane_state *plane_state = primary->state;
-       struct intel_plane *intel_plane = to_intel_plane(primary);
-       struct intel_plane_state *intel_state =
-               to_intel_plane_state(plane_state);
-       struct drm_framebuffer *fb;
-
-       if (!plane_config->fb)
-               return;
-
-       if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
-               fb = &plane_config->fb->base;
-               goto valid_fb;
-       }
-
-       kfree(plane_config->fb);
-
-       /*
-        * Failed to alloc the obj, check to see if we should share
-        * an fb with another CRTC instead
-        */
-       for_each_crtc(dev, c) {
-               struct intel_plane_state *state;
-
-               if (c == &intel_crtc->base)
-                       continue;
-
-               if (!to_intel_crtc(c)->active)
-                       continue;
-
-               state = to_intel_plane_state(c->primary->state);
-               if (!state->vma)
-                       continue;
-
-               if (intel_plane_ggtt_offset(state) == plane_config->base) {
-                       fb = state->base.fb;
-                       drm_framebuffer_get(fb);
-                       goto valid_fb;
-               }
-       }
-
-       /*
-        * We've failed to reconstruct the BIOS FB.  Current display state
-        * indicates that the primary plane is visible, but has a NULL FB,
-        * which will lead to problems later if we don't fix it up.  The
-        * simplest solution is to just disable the primary plane now and
-        * pretend the BIOS never had it enabled.
-        */
-       intel_plane_disable_noatomic(intel_crtc, intel_plane);
-
-       return;
-
-valid_fb:
-       intel_state->base.rotation = plane_config->rotation;
-       intel_fill_fb_ggtt_view(&intel_state->view, fb,
-                               intel_state->base.rotation);
-       intel_state->color_plane[0].stride =
-               intel_fb_pitch(fb, 0, intel_state->base.rotation);
-
-       mutex_lock(&dev->struct_mutex);
-       intel_state->vma =
-               intel_pin_and_fence_fb_obj(fb,
-                                          &intel_state->view,
-                                          intel_plane_uses_fence(intel_state),
-                                          &intel_state->flags);
-       mutex_unlock(&dev->struct_mutex);
-       if (IS_ERR(intel_state->vma)) {
-               DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
-                         intel_crtc->pipe, PTR_ERR(intel_state->vma));
-
-               intel_state->vma = NULL;
-               drm_framebuffer_put(fb);
-               return;
-       }
-
-       obj = intel_fb_obj(fb);
-       intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
-
-       plane_state->src_x = 0;
-       plane_state->src_y = 0;
-       plane_state->src_w = fb->width << 16;
-       plane_state->src_h = fb->height << 16;
-
-       plane_state->crtc_x = 0;
-       plane_state->crtc_y = 0;
-       plane_state->crtc_w = fb->width;
-       plane_state->crtc_h = fb->height;
-
-       intel_state->base.src = drm_plane_state_src(plane_state);
-       intel_state->base.dst = drm_plane_state_dest(plane_state);
-
-       if (i915_gem_object_is_tiled(obj))
-               dev_priv->preserve_bios_swizzle = true;
-
-       plane_state->fb = fb;
-       plane_state->crtc = &intel_crtc->base;
-
-       atomic_or(to_intel_plane(primary)->frontbuffer_bit,
-                 &obj->frontbuffer_bits);
-}
-
-static int skl_max_plane_width(const struct drm_framebuffer *fb,
-                              int color_plane,
-                              unsigned int rotation)
-{
-       int cpp = fb->format->cpp[color_plane];
-
-       switch (fb->modifier) {
-       case DRM_FORMAT_MOD_LINEAR:
-       case I915_FORMAT_MOD_X_TILED:
-               return 4096;
-       case I915_FORMAT_MOD_Y_TILED_CCS:
-       case I915_FORMAT_MOD_Yf_TILED_CCS:
-               /* FIXME AUX plane? */
-       case I915_FORMAT_MOD_Y_TILED:
-       case I915_FORMAT_MOD_Yf_TILED:
-               if (cpp == 8)
-                       return 2048;
-               else
-                       return 4096;
-       default:
-               MISSING_CASE(fb->modifier);
-               return 2048;
-       }
-}
-
-static int glk_max_plane_width(const struct drm_framebuffer *fb,
-                              int color_plane,
-                              unsigned int rotation)
-{
-       int cpp = fb->format->cpp[color_plane];
-
-       switch (fb->modifier) {
-       case DRM_FORMAT_MOD_LINEAR:
-       case I915_FORMAT_MOD_X_TILED:
-               if (cpp == 8)
-                       return 4096;
-               else
-                       return 5120;
-       case I915_FORMAT_MOD_Y_TILED_CCS:
-       case I915_FORMAT_MOD_Yf_TILED_CCS:
-               /* FIXME AUX plane? */
-       case I915_FORMAT_MOD_Y_TILED:
-       case I915_FORMAT_MOD_Yf_TILED:
-               if (cpp == 8)
-                       return 2048;
-               else
-                       return 5120;
-       default:
-               MISSING_CASE(fb->modifier);
-               return 2048;
-       }
-}
-
-static int icl_max_plane_width(const struct drm_framebuffer *fb,
-                              int color_plane,
-                              unsigned int rotation)
-{
-       return 5120;
-}
-
-static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
-                                          int main_x, int main_y, u32 main_offset)
-{
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       int hsub = fb->format->hsub;
-       int vsub = fb->format->vsub;
-       int aux_x = plane_state->color_plane[1].x;
-       int aux_y = plane_state->color_plane[1].y;
-       u32 aux_offset = plane_state->color_plane[1].offset;
-       u32 alignment = intel_surf_alignment(fb, 1);
-
-       while (aux_offset >= main_offset && aux_y <= main_y) {
-               int x, y;
-
-               if (aux_x == main_x && aux_y == main_y)
-                       break;
-
-               if (aux_offset == 0)
-                       break;
-
-               x = aux_x / hsub;
-               y = aux_y / vsub;
-               aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
-                                                              aux_offset, aux_offset - alignment);
-               aux_x = x * hsub + aux_x % hsub;
-               aux_y = y * vsub + aux_y % vsub;
-       }
-
-       if (aux_x != main_x || aux_y != main_y)
-               return false;
-
-       plane_state->color_plane[1].offset = aux_offset;
-       plane_state->color_plane[1].x = aux_x;
-       plane_state->color_plane[1].y = aux_y;
-
-       return true;
-}
-
-static int skl_check_main_surface(struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       unsigned int rotation = plane_state->base.rotation;
-       int x = plane_state->base.src.x1 >> 16;
-       int y = plane_state->base.src.y1 >> 16;
-       int w = drm_rect_width(&plane_state->base.src) >> 16;
-       int h = drm_rect_height(&plane_state->base.src) >> 16;
-       int max_width;
-       int max_height = 4096;
-       u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               max_width = icl_max_plane_width(fb, 0, rotation);
-       else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-               max_width = glk_max_plane_width(fb, 0, rotation);
-       else
-               max_width = skl_max_plane_width(fb, 0, rotation);
-
-       if (w > max_width || h > max_height) {
-               DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
-                             w, h, max_width, max_height);
-               return -EINVAL;
-       }
-
-       intel_add_fb_offsets(&x, &y, plane_state, 0);
-       offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
-       alignment = intel_surf_alignment(fb, 0);
-
-       /*
-        * AUX surface offset is specified as the distance from the
-        * main surface offset, and it must be non-negative. Make
-        * sure that is what we will get.
-        */
-       if (offset > aux_offset)
-               offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
-                                                          offset, aux_offset & ~(alignment - 1));
-
-       /*
-        * When using an X-tiled surface, the plane blows up
-        * if the x offset + width exceed the stride.
-        *
-        * TODO: linear and Y-tiled seem fine, Yf untested,
-        */
-       if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
-               int cpp = fb->format->cpp[0];
-
-               while ((x + w) * cpp > plane_state->color_plane[0].stride) {
-                       if (offset == 0) {
-                               DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
-                               return -EINVAL;
-                       }
-
-                       offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
-                                                                  offset, offset - alignment);
-               }
-       }
-
-       /*
-        * CCS AUX surface doesn't have its own x/y offsets, we must make sure
-        * they match with the main surface x/y offsets.
-        */
-       if (is_ccs_modifier(fb->modifier)) {
-               while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
-                       if (offset == 0)
-                               break;
-
-                       offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
-                                                                  offset, offset - alignment);
-               }
-
-               if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
-                       DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
-                       return -EINVAL;
-               }
-       }
-
-       plane_state->color_plane[0].offset = offset;
-       plane_state->color_plane[0].x = x;
-       plane_state->color_plane[0].y = y;
-
-       /*
-        * Put the final coordinates back so that the src
-        * coordinate checks will see the right values.
-        */
-       drm_rect_translate(&plane_state->base.src,
-                          (x << 16) - plane_state->base.src.x1,
-                          (y << 16) - plane_state->base.src.y1);
-
-       return 0;
-}
-
-static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
-{
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       unsigned int rotation = plane_state->base.rotation;
-       int max_width = skl_max_plane_width(fb, 1, rotation);
-       int max_height = 4096;
-       int x = plane_state->base.src.x1 >> 17;
-       int y = plane_state->base.src.y1 >> 17;
-       int w = drm_rect_width(&plane_state->base.src) >> 17;
-       int h = drm_rect_height(&plane_state->base.src) >> 17;
-       u32 offset;
-
-       intel_add_fb_offsets(&x, &y, plane_state, 1);
-       offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
-
-       /* FIXME not quite sure how/if these apply to the chroma plane */
-       if (w > max_width || h > max_height) {
-               DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
-                             w, h, max_width, max_height);
-               return -EINVAL;
-       }
-
-       plane_state->color_plane[1].offset = offset;
-       plane_state->color_plane[1].x = x;
-       plane_state->color_plane[1].y = y;
-
-       return 0;
-}
-
-static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
-{
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       int src_x = plane_state->base.src.x1 >> 16;
-       int src_y = plane_state->base.src.y1 >> 16;
-       int hsub = fb->format->hsub;
-       int vsub = fb->format->vsub;
-       int x = src_x / hsub;
-       int y = src_y / vsub;
-       u32 offset;
-
-       intel_add_fb_offsets(&x, &y, plane_state, 1);
-       offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
-
-       plane_state->color_plane[1].offset = offset;
-       plane_state->color_plane[1].x = x * hsub + src_x % hsub;
-       plane_state->color_plane[1].y = y * vsub + src_y % vsub;
-
-       return 0;
-}
-
-int skl_check_plane_surface(struct intel_plane_state *plane_state)
-{
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       int ret;
-
-       ret = intel_plane_compute_gtt(plane_state);
-       if (ret)
-               return ret;
-
-       if (!plane_state->base.visible)
-               return 0;
-
-       /*
-        * Handle the AUX surface first since
-        * the main surface setup depends on it.
-        */
-       if (is_planar_yuv_format(fb->format->format)) {
-               ret = skl_check_nv12_aux_surface(plane_state);
-               if (ret)
-                       return ret;
-       } else if (is_ccs_modifier(fb->modifier)) {
-               ret = skl_check_ccs_aux_surface(plane_state);
-               if (ret)
-                       return ret;
-       } else {
-               plane_state->color_plane[1].offset = ~0xfff;
-               plane_state->color_plane[1].x = 0;
-               plane_state->color_plane[1].y = 0;
-       }
-
-       ret = skl_check_main_surface(plane_state);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-unsigned int
-i9xx_plane_max_stride(struct intel_plane *plane,
-                     u32 pixel_format, u64 modifier,
-                     unsigned int rotation)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
-       if (!HAS_GMCH(dev_priv)) {
-               return 32*1024;
-       } else if (INTEL_GEN(dev_priv) >= 4) {
-               if (modifier == I915_FORMAT_MOD_X_TILED)
-                       return 16*1024;
-               else
-                       return 32*1024;
-       } else if (INTEL_GEN(dev_priv) >= 3) {
-               if (modifier == I915_FORMAT_MOD_X_TILED)
-                       return 8*1024;
-               else
-                       return 16*1024;
-       } else {
-               if (plane->i9xx_plane == PLANE_C)
-                       return 4*1024;
-               else
-                       return 8*1024;
-       }
-}
-
-static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       u32 dspcntr = 0;
-
-       if (crtc_state->gamma_enable)
-               dspcntr |= DISPPLANE_GAMMA_ENABLE;
-
-       if (crtc_state->csc_enable)
-               dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
-
-       if (INTEL_GEN(dev_priv) < 5)
-               dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
-
-       return dspcntr;
-}
-
-static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
-                         const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(plane_state->base.plane->dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       unsigned int rotation = plane_state->base.rotation;
-       u32 dspcntr;
-
-       dspcntr = DISPLAY_PLANE_ENABLE;
-
-       if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
-           IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
-               dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
-       switch (fb->format->format) {
-       case DRM_FORMAT_C8:
-               dspcntr |= DISPPLANE_8BPP;
-               break;
-       case DRM_FORMAT_XRGB1555:
-               dspcntr |= DISPPLANE_BGRX555;
-               break;
-       case DRM_FORMAT_RGB565:
-               dspcntr |= DISPPLANE_BGRX565;
-               break;
-       case DRM_FORMAT_XRGB8888:
-               dspcntr |= DISPPLANE_BGRX888;
-               break;
-       case DRM_FORMAT_XBGR8888:
-               dspcntr |= DISPPLANE_RGBX888;
-               break;
-       case DRM_FORMAT_XRGB2101010:
-               dspcntr |= DISPPLANE_BGRX101010;
-               break;
-       case DRM_FORMAT_XBGR2101010:
-               dspcntr |= DISPPLANE_RGBX101010;
-               break;
-       default:
-               MISSING_CASE(fb->format->format);
-               return 0;
-       }
-
-       if (INTEL_GEN(dev_priv) >= 4 &&
-           fb->modifier == I915_FORMAT_MOD_X_TILED)
-               dspcntr |= DISPPLANE_TILED;
-
-       if (rotation & DRM_MODE_ROTATE_180)
-               dspcntr |= DISPPLANE_ROTATE_180;
-
-       if (rotation & DRM_MODE_REFLECT_X)
-               dspcntr |= DISPPLANE_MIRROR;
-
-       return dspcntr;
-}
-
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(plane_state->base.plane->dev);
-       int src_x, src_y;
-       u32 offset;
-       int ret;
-
-       ret = intel_plane_compute_gtt(plane_state);
-       if (ret)
-               return ret;
-
-       if (!plane_state->base.visible)
-               return 0;
-
-       src_x = plane_state->base.src.x1 >> 16;
-       src_y = plane_state->base.src.y1 >> 16;
-
-       intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
-
-       if (INTEL_GEN(dev_priv) >= 4)
-               offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
-                                                           plane_state, 0);
-       else
-               offset = 0;
-
-       /*
-        * Put the final coordinates back so that the src
-        * coordinate checks will see the right values.
-        */
-       drm_rect_translate(&plane_state->base.src,
-                          (src_x << 16) - plane_state->base.src.x1,
-                          (src_y << 16) - plane_state->base.src.y1);
-
-       /* HSW/BDW do this automagically in hardware */
-       if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
-               unsigned int rotation = plane_state->base.rotation;
-               int src_w = drm_rect_width(&plane_state->base.src) >> 16;
-               int src_h = drm_rect_height(&plane_state->base.src) >> 16;
-
-               if (rotation & DRM_MODE_ROTATE_180) {
-                       src_x += src_w - 1;
-                       src_y += src_h - 1;
-               } else if (rotation & DRM_MODE_REFLECT_X) {
-                       src_x += src_w - 1;
-               }
-       }
-
-       plane_state->color_plane[0].offset = offset;
-       plane_state->color_plane[0].x = src_x;
-       plane_state->color_plane[0].y = src_y;
-
-       return 0;
-}
-
-static int
-i9xx_plane_check(struct intel_crtc_state *crtc_state,
-                struct intel_plane_state *plane_state)
-{
-       int ret;
-
-       ret = chv_plane_check_rotation(plane_state);
-       if (ret)
-               return ret;
-
-       ret = drm_atomic_helper_check_plane_state(&plane_state->base,
-                                                 &crtc_state->base,
-                                                 DRM_PLANE_HELPER_NO_SCALING,
-                                                 DRM_PLANE_HELPER_NO_SCALING,
-                                                 false, true);
-       if (ret)
-               return ret;
-
-       ret = i9xx_check_plane_surface(plane_state);
-       if (ret)
-               return ret;
-
-       if (!plane_state->base.visible)
-               return 0;
-
-       ret = intel_plane_check_src_coordinates(plane_state);
-       if (ret)
-               return ret;
-
-       plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
-
-       return 0;
-}
-
-static void i9xx_update_plane(struct intel_plane *plane,
-                             const struct intel_crtc_state *crtc_state,
-                             const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-       u32 linear_offset;
-       int x = plane_state->color_plane[0].x;
-       int y = plane_state->color_plane[0].y;
-       unsigned long irqflags;
-       u32 dspaddr_offset;
-       u32 dspcntr;
-
-       dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
-
-       linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
-       if (INTEL_GEN(dev_priv) >= 4)
-               dspaddr_offset = plane_state->color_plane[0].offset;
-       else
-               dspaddr_offset = linear_offset;
-
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-       I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
-
-       if (INTEL_GEN(dev_priv) < 4) {
-               /* pipesrc and dspsize control the size that is scaled from,
-                * which should always be the user's requested size.
-                */
-               I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
-               I915_WRITE_FW(DSPSIZE(i9xx_plane),
-                             ((crtc_state->pipe_src_h - 1) << 16) |
-                             (crtc_state->pipe_src_w - 1));
-       } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
-               I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
-               I915_WRITE_FW(PRIMSIZE(i9xx_plane),
-                             ((crtc_state->pipe_src_h - 1) << 16) |
-                             (crtc_state->pipe_src_w - 1));
-               I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
-       }
-
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-               I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
-       } else if (INTEL_GEN(dev_priv) >= 4) {
-               I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
-               I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
-       }
-
-       /*
-        * The control register self-arms if the plane was previously
-        * disabled. Try to make the plane enable atomic by writing
-        * the control register just before the surface register.
-        */
-       I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
-       if (INTEL_GEN(dev_priv) >= 4)
-               I915_WRITE_FW(DSPSURF(i9xx_plane),
-                             intel_plane_ggtt_offset(plane_state) +
-                             dspaddr_offset);
-       else
-               I915_WRITE_FW(DSPADDR(i9xx_plane),
-                             intel_plane_ggtt_offset(plane_state) +
-                             dspaddr_offset);
-
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i9xx_disable_plane(struct intel_plane *plane,
-                              const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-       unsigned long irqflags;
-       u32 dspcntr;
-
-       /*
-        * DSPCNTR pipe gamma enable on g4x+ and pipe csc
-        * enable on ilk+ affect the pipe bottom color as
-        * well, so we must configure them even if the plane
-        * is disabled.
-        *
-        * On pre-g4x there is no way to gamma correct the
-        * pipe bottom color but we'll keep on doing this
-        * anyway so that the crtc state readout works correctly.
-        */
-       dspcntr = i9xx_plane_ctl_crtc(crtc_state);
-
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-       I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
-       if (INTEL_GEN(dev_priv) >= 4)
-               I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
-       else
-               I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
-
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
-                                   enum pipe *pipe)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum intel_display_power_domain power_domain;
-       enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-       intel_wakeref_t wakeref;
-       bool ret;
-       u32 val;
-
-       /*
-        * Not 100% correct for planes that can move between pipes,
-        * but that's only the case for gen2-4 which don't have any
-        * display power wells.
-        */
-       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (!wakeref)
-               return false;
-
-       val = I915_READ(DSPCNTR(i9xx_plane));
-
-       ret = val & DISPLAY_PLANE_ENABLE;
-
-       if (INTEL_GEN(dev_priv) >= 5)
-               *pipe = plane->pipe;
-       else
-               *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
-                       DISPPLANE_SEL_PIPE_SHIFT;
-
-       intel_display_power_put(dev_priv, power_domain, wakeref);
-
-       return ret;
-}
-
-static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
-{
-       struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
-       I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
-       I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
-}
-
-/*
- * This function detaches (aka. unbinds) unused scalers in hardware
- */
-static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-       const struct intel_crtc_scaler_state *scaler_state =
-               &crtc_state->scaler_state;
-       int i;
-
-       /* loop through and disable scalers that aren't in use */
-       for (i = 0; i < intel_crtc->num_scalers; i++) {
-               if (!scaler_state->scalers[i].in_use)
-                       skl_detach_scaler(intel_crtc, i);
-       }
-}
-
-static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
-                                         int color_plane, unsigned int rotation)
-{
-       /*
-        * The stride is either expressed as a multiple of 64 bytes chunks for
-        * linear buffers or in number of tiles for tiled buffers.
-        */
-       if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
-               return 64;
-       else if (drm_rotation_90_or_270(rotation))
-               return intel_tile_height(fb, color_plane);
-       else
-               return intel_tile_width_bytes(fb, color_plane);
-}
-
-u32 skl_plane_stride(const struct intel_plane_state *plane_state,
-                    int color_plane)
-{
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       unsigned int rotation = plane_state->base.rotation;
-       u32 stride = plane_state->color_plane[color_plane].stride;
-
-       if (color_plane >= fb->format->num_planes)
-               return 0;
-
-       return stride / skl_plane_stride_mult(fb, color_plane, rotation);
-}
-
-static u32 skl_plane_ctl_format(u32 pixel_format)
-{
-       switch (pixel_format) {
-       case DRM_FORMAT_C8:
-               return PLANE_CTL_FORMAT_INDEXED;
-       case DRM_FORMAT_RGB565:
-               return PLANE_CTL_FORMAT_RGB_565;
-       case DRM_FORMAT_XBGR8888:
-       case DRM_FORMAT_ABGR8888:
-               return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
-       case DRM_FORMAT_XRGB8888:
-       case DRM_FORMAT_ARGB8888:
-               return PLANE_CTL_FORMAT_XRGB_8888;
-       case DRM_FORMAT_XRGB2101010:
-               return PLANE_CTL_FORMAT_XRGB_2101010;
-       case DRM_FORMAT_XBGR2101010:
-               return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
-       case DRM_FORMAT_XBGR16161616F:
-       case DRM_FORMAT_ABGR16161616F:
-               return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
-       case DRM_FORMAT_XRGB16161616F:
-       case DRM_FORMAT_ARGB16161616F:
-               return PLANE_CTL_FORMAT_XRGB_16161616F;
-       case DRM_FORMAT_YUYV:
-               return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
-       case DRM_FORMAT_YVYU:
-               return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
-       case DRM_FORMAT_UYVY:
-               return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
-       case DRM_FORMAT_VYUY:
-               return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
-       case DRM_FORMAT_NV12:
-               return PLANE_CTL_FORMAT_NV12;
-       case DRM_FORMAT_P010:
-               return PLANE_CTL_FORMAT_P010;
-       case DRM_FORMAT_P012:
-               return PLANE_CTL_FORMAT_P012;
-       case DRM_FORMAT_P016:
-               return PLANE_CTL_FORMAT_P016;
-       case DRM_FORMAT_Y210:
-               return PLANE_CTL_FORMAT_Y210;
-       case DRM_FORMAT_Y212:
-               return PLANE_CTL_FORMAT_Y212;
-       case DRM_FORMAT_Y216:
-               return PLANE_CTL_FORMAT_Y216;
-       case DRM_FORMAT_XVYU2101010:
-               return PLANE_CTL_FORMAT_Y410;
-       case DRM_FORMAT_XVYU12_16161616:
-               return PLANE_CTL_FORMAT_Y412;
-       case DRM_FORMAT_XVYU16161616:
-               return PLANE_CTL_FORMAT_Y416;
-       default:
-               MISSING_CASE(pixel_format);
-       }
-
-       return 0;
-}
-
-static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
-{
-       if (!plane_state->base.fb->format->has_alpha)
-               return PLANE_CTL_ALPHA_DISABLE;
-
-       switch (plane_state->base.pixel_blend_mode) {
-       case DRM_MODE_BLEND_PIXEL_NONE:
-               return PLANE_CTL_ALPHA_DISABLE;
-       case DRM_MODE_BLEND_PREMULTI:
-               return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
-       case DRM_MODE_BLEND_COVERAGE:
-               return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
-       default:
-               MISSING_CASE(plane_state->base.pixel_blend_mode);
-               return PLANE_CTL_ALPHA_DISABLE;
-       }
-}
-
-static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
-{
-       if (!plane_state->base.fb->format->has_alpha)
-               return PLANE_COLOR_ALPHA_DISABLE;
-
-       switch (plane_state->base.pixel_blend_mode) {
-       case DRM_MODE_BLEND_PIXEL_NONE:
-               return PLANE_COLOR_ALPHA_DISABLE;
-       case DRM_MODE_BLEND_PREMULTI:
-               return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
-       case DRM_MODE_BLEND_COVERAGE:
-               return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
-       default:
-               MISSING_CASE(plane_state->base.pixel_blend_mode);
-               return PLANE_COLOR_ALPHA_DISABLE;
-       }
-}
-
-static u32 skl_plane_ctl_tiling(u64 fb_modifier)
-{
-       switch (fb_modifier) {
-       case DRM_FORMAT_MOD_LINEAR:
-               break;
-       case I915_FORMAT_MOD_X_TILED:
-               return PLANE_CTL_TILED_X;
-       case I915_FORMAT_MOD_Y_TILED:
-               return PLANE_CTL_TILED_Y;
-       case I915_FORMAT_MOD_Y_TILED_CCS:
-               return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
-       case I915_FORMAT_MOD_Yf_TILED:
-               return PLANE_CTL_TILED_YF;
-       case I915_FORMAT_MOD_Yf_TILED_CCS:
-               return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
-       default:
-               MISSING_CASE(fb_modifier);
-       }
-
-       return 0;
-}
-
-static u32 skl_plane_ctl_rotate(unsigned int rotate)
-{
-       switch (rotate) {
-       case DRM_MODE_ROTATE_0:
-               break;
-       /*
-        * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
-        * while i915 HW rotation is clockwise, thats why this swapping.
-        */
-       case DRM_MODE_ROTATE_90:
-               return PLANE_CTL_ROTATE_270;
-       case DRM_MODE_ROTATE_180:
-               return PLANE_CTL_ROTATE_180;
-       case DRM_MODE_ROTATE_270:
-               return PLANE_CTL_ROTATE_90;
-       default:
-               MISSING_CASE(rotate);
-       }
-
-       return 0;
-}
-
-static u32 cnl_plane_ctl_flip(unsigned int reflect)
-{
-       switch (reflect) {
-       case 0:
-               break;
-       case DRM_MODE_REFLECT_X:
-               return PLANE_CTL_FLIP_HORIZONTAL;
-       case DRM_MODE_REFLECT_Y:
-       default:
-               MISSING_CASE(reflect);
-       }
-
-       return 0;
-}
-
-u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-       u32 plane_ctl = 0;
-
-       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-               return plane_ctl;
-
-       if (crtc_state->gamma_enable)
-               plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
-
-       if (crtc_state->csc_enable)
-               plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
-
-       return plane_ctl;
-}
-
-u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
-                 const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(plane_state->base.plane->dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       unsigned int rotation = plane_state->base.rotation;
-       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-       u32 plane_ctl;
-
-       plane_ctl = PLANE_CTL_ENABLE;
-
-       if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
-               plane_ctl |= skl_plane_ctl_alpha(plane_state);
-               plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
-
-               if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
-                       plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
-
-               if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
-                       plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
-       }
-
-       plane_ctl |= skl_plane_ctl_format(fb->format->format);
-       plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
-       plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
-
-       if (INTEL_GEN(dev_priv) >= 10)
-               plane_ctl |= cnl_plane_ctl_flip(rotation &
-                                               DRM_MODE_REFLECT_MASK);
-
-       if (key->flags & I915_SET_COLORKEY_DESTINATION)
-               plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
-       else if (key->flags & I915_SET_COLORKEY_SOURCE)
-               plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
-
-       return plane_ctl;
-}
-
-u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-       u32 plane_color_ctl = 0;
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               return plane_color_ctl;
-
-       if (crtc_state->gamma_enable)
-               plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
-
-       if (crtc_state->csc_enable)
-               plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
-
-       return plane_color_ctl;
-}
-
-u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
-                       const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(plane_state->base.plane->dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       u32 plane_color_ctl = 0;
-
-       plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
-       plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
-
-       if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
-               if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
-                       plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
-               else
-                       plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
-
-               if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
-                       plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
-       } else if (fb->format->is_yuv) {
-               plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
-       }
-
-       return plane_color_ctl;
-}
-
-static int
-__intel_display_resume(struct drm_device *dev,
-                      struct drm_atomic_state *state,
-                      struct drm_modeset_acquire_ctx *ctx)
-{
-       struct drm_crtc_state *crtc_state;
-       struct drm_crtc *crtc;
-       int i, ret;
-
-       intel_modeset_setup_hw_state(dev, ctx);
-       i915_redisable_vga(to_i915(dev));
-
-       if (!state)
-               return 0;
-
-       /*
-        * We've duplicated the state, pointers to the old state are invalid.
-        *
-        * Don't attempt to use the old state until we commit the duplicated state.
-        */
-       for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
-               /*
-                * Force recalculation even if we restore
-                * current state. With fast modeset this may not result
-                * in a modeset when the state is compatible.
-                */
-               crtc_state->mode_changed = true;
-       }
-
-       /* ignore any reset values/BIOS leftovers in the WM registers */
-       if (!HAS_GMCH(to_i915(dev)))
-               to_intel_atomic_state(state)->skip_intermediate_wm = true;
-
-       ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
-
-       WARN_ON(ret == -EDEADLK);
-       return ret;
-}
-
-static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
-{
-       return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
-               intel_has_gpu_reset(dev_priv));
-}
-
-void intel_prepare_reset(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = &dev_priv->drm;
-       struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
-       struct drm_atomic_state *state;
-       int ret;
-
-       /* reset doesn't touch the display */
-       if (!i915_modparams.force_reset_modeset_test &&
-           !gpu_reset_clobbers_display(dev_priv))
-               return;
-
-       /* We have a modeset vs reset deadlock, defensively unbreak it. */
-       set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
-       wake_up_all(&dev_priv->gpu_error.wait_queue);
-
-       if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
-               DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
-               i915_gem_set_wedged(dev_priv);
-       }
-
-       /*
-        * Need mode_config.mutex so that we don't
-        * trample ongoing ->detect() and whatnot.
-        */
-       mutex_lock(&dev->mode_config.mutex);
-       drm_modeset_acquire_init(ctx, 0);
-       while (1) {
-               ret = drm_modeset_lock_all_ctx(dev, ctx);
-               if (ret != -EDEADLK)
-                       break;
-
-               drm_modeset_backoff(ctx);
-       }
-       /*
-        * Disabling the crtcs gracefully seems nicer. Also the
-        * g33 docs say we should at least disable all the planes.
-        */
-       state = drm_atomic_helper_duplicate_state(dev, ctx);
-       if (IS_ERR(state)) {
-               ret = PTR_ERR(state);
-               DRM_ERROR("Duplicating state failed with %i\n", ret);
-               return;
-       }
-
-       ret = drm_atomic_helper_disable_all(dev, ctx);
-       if (ret) {
-               DRM_ERROR("Suspending crtc's failed with %i\n", ret);
-               drm_atomic_state_put(state);
-               return;
-       }
-
-       dev_priv->modeset_restore_state = state;
-       state->acquire_ctx = ctx;
-}
-
-void intel_finish_reset(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = &dev_priv->drm;
-       struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
-       struct drm_atomic_state *state;
-       int ret;
-
-       /* reset doesn't touch the display */
-       if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
-               return;
-
-       state = fetch_and_zero(&dev_priv->modeset_restore_state);
-       if (!state)
-               goto unlock;
-
-       /* reset doesn't touch the display */
-       if (!gpu_reset_clobbers_display(dev_priv)) {
-               /* for testing only restore the display */
-               ret = __intel_display_resume(dev, state, ctx);
-               if (ret)
-                       DRM_ERROR("Restoring old state failed with %i\n", ret);
-       } else {
-               /*
-                * The display has been reset as well,
-                * so need a full re-initialization.
-                */
-               intel_pps_unlock_regs_wa(dev_priv);
-               intel_modeset_init_hw(dev);
-               intel_init_clock_gating(dev_priv);
-
-               spin_lock_irq(&dev_priv->irq_lock);
-               if (dev_priv->display.hpd_irq_setup)
-                       dev_priv->display.hpd_irq_setup(dev_priv);
-               spin_unlock_irq(&dev_priv->irq_lock);
-
-               ret = __intel_display_resume(dev, state, ctx);
-               if (ret)
-                       DRM_ERROR("Restoring old state failed with %i\n", ret);
-
-               intel_hpd_init(dev_priv);
-       }
-
-       drm_atomic_state_put(state);
-unlock:
-       drm_modeset_drop_locks(ctx);
-       drm_modeset_acquire_fini(ctx);
-       mutex_unlock(&dev->mode_config.mutex);
-
-       clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
-}
-
-static void icl_set_pipe_chicken(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       u32 tmp;
-
-       tmp = I915_READ(PIPE_CHICKEN(pipe));
-
-       /*
-        * Display WA #1153: icl
-        * enable hardware to bypass the alpha math
-        * and rounding for per-pixel values 00 and 0xff
-        */
-       tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
-       /*
-        * Display WA # 1605353570: icl
-        * Set the pixel rounding bit to 1 for allowing
-        * passthrough of Frame buffer pixels unmodified
-        * across pipe
-        */
-       tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
-       I915_WRITE(PIPE_CHICKEN(pipe), tmp);
-}
-
-static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
-                                    const struct intel_crtc_state *new_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
-       crtc->base.mode = new_crtc_state->base.mode;
-
-       /*
-        * Update pipe size and adjust fitter if needed: the reason for this is
-        * that in compute_mode_changes we check the native mode (not the pfit
-        * mode) to see if we can flip rather than do a full mode set. In the
-        * fastboot case, we'll flip, but if we don't update the pipesrc and
-        * pfit state, we'll end up with a big fb scanned out into the wrong
-        * sized surface.
-        */
-
-       I915_WRITE(PIPESRC(crtc->pipe),
-                  ((new_crtc_state->pipe_src_w - 1) << 16) |
-                  (new_crtc_state->pipe_src_h - 1));
-
-       /* on skylake this is done by detaching scalers */
-       if (INTEL_GEN(dev_priv) >= 9) {
-               skl_detach_scalers(new_crtc_state);
-
-               if (new_crtc_state->pch_pfit.enabled)
-                       skylake_pfit_enable(new_crtc_state);
-       } else if (HAS_PCH_SPLIT(dev_priv)) {
-               if (new_crtc_state->pch_pfit.enabled)
-                       ironlake_pfit_enable(new_crtc_state);
-               else if (old_crtc_state->pch_pfit.enabled)
-                       ironlake_pfit_disable(old_crtc_state);
-       }
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               icl_set_pipe_chicken(crtc);
-}
-
-static void intel_fdi_normal_train(struct intel_crtc *crtc)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = crtc->pipe;
-       i915_reg_t reg;
-       u32 temp;
-
-       /* enable normal train */
-       reg = FDI_TX_CTL(pipe);
-       temp = I915_READ(reg);
-       if (IS_IVYBRIDGE(dev_priv)) {
-               temp &= ~FDI_LINK_TRAIN_NONE_IVB;
-               temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
-       } else {
-               temp &= ~FDI_LINK_TRAIN_NONE;
-               temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
-       }
-       I915_WRITE(reg, temp);
-
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       if (HAS_PCH_CPT(dev_priv)) {
-               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-               temp |= FDI_LINK_TRAIN_NORMAL_CPT;
-       } else {
-               temp &= ~FDI_LINK_TRAIN_NONE;
-               temp |= FDI_LINK_TRAIN_NONE;
-       }
-       I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-
-       /* wait one idle pattern time */
-       POSTING_READ(reg);
-       udelay(1000);
-
-       /* IVB wants error correction enabled */
-       if (IS_IVYBRIDGE(dev_priv))
-               I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
-                          FDI_FE_ERRC_ENABLE);
-}
-
-/* The FDI link training functions for ILK/Ibexpeak. */
-static void ironlake_fdi_link_train(struct intel_crtc *crtc,
-                                   const struct intel_crtc_state *crtc_state)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = crtc->pipe;
-       i915_reg_t reg;
-       u32 temp, tries;
-
-       /* FDI needs bits from pipe first */
-       assert_pipe_enabled(dev_priv, pipe);
-
-       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
-          for train result */
-       reg = FDI_RX_IMR(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_RX_SYMBOL_LOCK;
-       temp &= ~FDI_RX_BIT_LOCK;
-       I915_WRITE(reg, temp);
-       I915_READ(reg);
-       udelay(150);
-
-       /* enable CPU FDI TX and PCH FDI RX */
-       reg = FDI_TX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_DP_PORT_WIDTH_MASK;
-       temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
-       temp &= ~FDI_LINK_TRAIN_NONE;
-       temp |= FDI_LINK_TRAIN_PATTERN_1;
-       I915_WRITE(reg, temp | FDI_TX_ENABLE);
-
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_LINK_TRAIN_NONE;
-       temp |= FDI_LINK_TRAIN_PATTERN_1;
-       I915_WRITE(reg, temp | FDI_RX_ENABLE);
-
-       POSTING_READ(reg);
-       udelay(150);
-
-       /* Ironlake workaround, enable clock pointer after FDI enable*/
-       I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
-       I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
-                  FDI_RX_PHASE_SYNC_POINTER_EN);
-
-       reg = FDI_RX_IIR(pipe);
-       for (tries = 0; tries < 5; tries++) {
-               temp = I915_READ(reg);
-               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
-               if ((temp & FDI_RX_BIT_LOCK)) {
-                       DRM_DEBUG_KMS("FDI train 1 done.\n");
-                       I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
-                       break;
-               }
-       }
-       if (tries == 5)
-               DRM_ERROR("FDI train 1 fail!\n");
-
-       /* Train 2 */
-       reg = FDI_TX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_LINK_TRAIN_NONE;
-       temp |= FDI_LINK_TRAIN_PATTERN_2;
-       I915_WRITE(reg, temp);
-
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_LINK_TRAIN_NONE;
-       temp |= FDI_LINK_TRAIN_PATTERN_2;
-       I915_WRITE(reg, temp);
-
-       POSTING_READ(reg);
-       udelay(150);
-
-       reg = FDI_RX_IIR(pipe);
-       for (tries = 0; tries < 5; tries++) {
-               temp = I915_READ(reg);
-               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
-               if (temp & FDI_RX_SYMBOL_LOCK) {
-                       I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
-                       DRM_DEBUG_KMS("FDI train 2 done.\n");
-                       break;
-               }
-       }
-       if (tries == 5)
-               DRM_ERROR("FDI train 2 fail!\n");
-
-       DRM_DEBUG_KMS("FDI train done\n");
-
-}
-
-static const int snb_b_fdi_train_param[] = {
-       FDI_LINK_TRAIN_400MV_0DB_SNB_B,
-       FDI_LINK_TRAIN_400MV_6DB_SNB_B,
-       FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
-       FDI_LINK_TRAIN_800MV_0DB_SNB_B,
-};
-
-/* The FDI link training functions for SNB/Cougarpoint. */
-static void gen6_fdi_link_train(struct intel_crtc *crtc,
-                               const struct intel_crtc_state *crtc_state)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = crtc->pipe;
-       i915_reg_t reg;
-       u32 temp, i, retry;
-
-       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
-          for train result */
-       reg = FDI_RX_IMR(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_RX_SYMBOL_LOCK;
-       temp &= ~FDI_RX_BIT_LOCK;
-       I915_WRITE(reg, temp);
-
-       POSTING_READ(reg);
-       udelay(150);
-
-       /* enable CPU FDI TX and PCH FDI RX */
-       reg = FDI_TX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_DP_PORT_WIDTH_MASK;
-       temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
-       temp &= ~FDI_LINK_TRAIN_NONE;
-       temp |= FDI_LINK_TRAIN_PATTERN_1;
-       temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-       /* SNB-B */
-       temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
-       I915_WRITE(reg, temp | FDI_TX_ENABLE);
-
-       I915_WRITE(FDI_RX_MISC(pipe),
-                  FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       if (HAS_PCH_CPT(dev_priv)) {
-               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-               temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
-       } else {
-               temp &= ~FDI_LINK_TRAIN_NONE;
-               temp |= FDI_LINK_TRAIN_PATTERN_1;
-       }
-       I915_WRITE(reg, temp | FDI_RX_ENABLE);
-
-       POSTING_READ(reg);
-       udelay(150);
-
-       for (i = 0; i < 4; i++) {
-               reg = FDI_TX_CTL(pipe);
-               temp = I915_READ(reg);
-               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-               temp |= snb_b_fdi_train_param[i];
-               I915_WRITE(reg, temp);
-
-               POSTING_READ(reg);
-               udelay(500);
-
-               for (retry = 0; retry < 5; retry++) {
-                       reg = FDI_RX_IIR(pipe);
-                       temp = I915_READ(reg);
-                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-                       if (temp & FDI_RX_BIT_LOCK) {
-                               I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
-                               DRM_DEBUG_KMS("FDI train 1 done.\n");
-                               break;
-                       }
-                       udelay(50);
-               }
-               if (retry < 5)
-                       break;
-       }
-       if (i == 4)
-               DRM_ERROR("FDI train 1 fail!\n");
-
-       /* Train 2 */
-       reg = FDI_TX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_LINK_TRAIN_NONE;
-       temp |= FDI_LINK_TRAIN_PATTERN_2;
-       if (IS_GEN(dev_priv, 6)) {
-               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-               /* SNB-B */
-               temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
-       }
-       I915_WRITE(reg, temp);
-
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       if (HAS_PCH_CPT(dev_priv)) {
-               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-               temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
-       } else {
-               temp &= ~FDI_LINK_TRAIN_NONE;
-               temp |= FDI_LINK_TRAIN_PATTERN_2;
-       }
-       I915_WRITE(reg, temp);
-
-       POSTING_READ(reg);
-       udelay(150);
-
-       for (i = 0; i < 4; i++) {
-               reg = FDI_TX_CTL(pipe);
-               temp = I915_READ(reg);
-               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-               temp |= snb_b_fdi_train_param[i];
-               I915_WRITE(reg, temp);
-
-               POSTING_READ(reg);
-               udelay(500);
-
-               for (retry = 0; retry < 5; retry++) {
-                       reg = FDI_RX_IIR(pipe);
-                       temp = I915_READ(reg);
-                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-                       if (temp & FDI_RX_SYMBOL_LOCK) {
-                               I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
-                               DRM_DEBUG_KMS("FDI train 2 done.\n");
-                               break;
-                       }
-                       udelay(50);
-               }
-               if (retry < 5)
-                       break;
-       }
-       if (i == 4)
-               DRM_ERROR("FDI train 2 fail!\n");
-
-       DRM_DEBUG_KMS("FDI train done.\n");
-}
-
-/* Manual link training for Ivy Bridge A0 parts */
-static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
-                                     const struct intel_crtc_state *crtc_state)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = crtc->pipe;
-       i915_reg_t reg;
-       u32 temp, i, j;
-
-       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
-          for train result */
-       reg = FDI_RX_IMR(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_RX_SYMBOL_LOCK;
-       temp &= ~FDI_RX_BIT_LOCK;
-       I915_WRITE(reg, temp);
-
-       POSTING_READ(reg);
-       udelay(150);
-
-       DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
-                     I915_READ(FDI_RX_IIR(pipe)));
-
-       /* Try each vswing and preemphasis setting twice before moving on */
-       for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
-               /* disable first in case we need to retry */
-               reg = FDI_TX_CTL(pipe);
-               temp = I915_READ(reg);
-               temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
-               temp &= ~FDI_TX_ENABLE;
-               I915_WRITE(reg, temp);
-
-               reg = FDI_RX_CTL(pipe);
-               temp = I915_READ(reg);
-               temp &= ~FDI_LINK_TRAIN_AUTO;
-               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-               temp &= ~FDI_RX_ENABLE;
-               I915_WRITE(reg, temp);
-
-               /* enable CPU FDI TX and PCH FDI RX */
-               reg = FDI_TX_CTL(pipe);
-               temp = I915_READ(reg);
-               temp &= ~FDI_DP_PORT_WIDTH_MASK;
-               temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
-               temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
-               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-               temp |= snb_b_fdi_train_param[j/2];
-               temp |= FDI_COMPOSITE_SYNC;
-               I915_WRITE(reg, temp | FDI_TX_ENABLE);
-
-               I915_WRITE(FDI_RX_MISC(pipe),
-                          FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
-               reg = FDI_RX_CTL(pipe);
-               temp = I915_READ(reg);
-               temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
-               temp |= FDI_COMPOSITE_SYNC;
-               I915_WRITE(reg, temp | FDI_RX_ENABLE);
-
-               POSTING_READ(reg);
-               udelay(1); /* should be 0.5us */
-
-               for (i = 0; i < 4; i++) {
-                       reg = FDI_RX_IIR(pipe);
-                       temp = I915_READ(reg);
-                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
-                       if (temp & FDI_RX_BIT_LOCK ||
-                           (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
-                               I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
-                               DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
-                                             i);
-                               break;
-                       }
-                       udelay(1); /* should be 0.5us */
-               }
-               if (i == 4) {
-                       DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
-                       continue;
-               }
-
-               /* Train 2 */
-               reg = FDI_TX_CTL(pipe);
-               temp = I915_READ(reg);
-               temp &= ~FDI_LINK_TRAIN_NONE_IVB;
-               temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
-               I915_WRITE(reg, temp);
-
-               reg = FDI_RX_CTL(pipe);
-               temp = I915_READ(reg);
-               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-               temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
-               I915_WRITE(reg, temp);
-
-               POSTING_READ(reg);
-               udelay(2); /* should be 1.5us */
-
-               for (i = 0; i < 4; i++) {
-                       reg = FDI_RX_IIR(pipe);
-                       temp = I915_READ(reg);
-                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
-                       if (temp & FDI_RX_SYMBOL_LOCK ||
-                           (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
-                               I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
-                               DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
-                                             i);
-                               goto train_done;
-                       }
-                       udelay(2); /* should be 1.5us */
-               }
-               if (i == 4)
-                       DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
-       }
-
-train_done:
-       DRM_DEBUG_KMS("FDI train done.\n");
-}
-
-static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
-       int pipe = intel_crtc->pipe;
-       i915_reg_t reg;
-       u32 temp;
-
-       /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
-       temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
-       temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
-       I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
-
-       POSTING_READ(reg);
-       udelay(200);
-
-       /* Switch from Rawclk to PCDclk */
-       temp = I915_READ(reg);
-       I915_WRITE(reg, temp | FDI_PCDCLK);
-
-       POSTING_READ(reg);
-       udelay(200);
-
-       /* Enable CPU FDI TX PLL, always on for Ironlake */
-       reg = FDI_TX_CTL(pipe);
-       temp = I915_READ(reg);
-       if ((temp & FDI_TX_PLL_ENABLE) == 0) {
-               I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
-
-               POSTING_READ(reg);
-               udelay(100);
-       }
-}
-
-static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
-{
-       struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = intel_crtc->pipe;
-       i915_reg_t reg;
-       u32 temp;
-
-       /* Switch from PCDclk to Rawclk */
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       I915_WRITE(reg, temp & ~FDI_PCDCLK);
-
-       /* Disable CPU FDI TX PLL */
-       reg = FDI_TX_CTL(pipe);
-       temp = I915_READ(reg);
-       I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
-
-       POSTING_READ(reg);
-       udelay(100);
-
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
-
-       /* Wait for the clocks to turn off. */
-       POSTING_READ(reg);
-       udelay(100);
-}
-
-static void ironlake_fdi_disable(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-       i915_reg_t reg;
-       u32 temp;
-
-       /* disable CPU FDI tx and PCH FDI rx */
-       reg = FDI_TX_CTL(pipe);
-       temp = I915_READ(reg);
-       I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
-       POSTING_READ(reg);
-
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~(0x7 << 16);
-       temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
-       I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
-
-       POSTING_READ(reg);
-       udelay(100);
-
-       /* Ironlake workaround, disable clock pointer after downing FDI */
-       if (HAS_PCH_IBX(dev_priv))
-               I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
-
-       /* still set train pattern 1 */
-       reg = FDI_TX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_LINK_TRAIN_NONE;
-       temp |= FDI_LINK_TRAIN_PATTERN_1;
-       I915_WRITE(reg, temp);
-
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       if (HAS_PCH_CPT(dev_priv)) {
-               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-               temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
-       } else {
-               temp &= ~FDI_LINK_TRAIN_NONE;
-               temp |= FDI_LINK_TRAIN_PATTERN_1;
-       }
-       /* BPC in FDI rx is consistent with that in PIPECONF */
-       temp &= ~(0x07 << 16);
-       temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
-       I915_WRITE(reg, temp);
-
-       POSTING_READ(reg);
-       udelay(100);
-}
-
-bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
-{
-       struct drm_crtc *crtc;
-       bool cleanup_done;
-
-       drm_for_each_crtc(crtc, &dev_priv->drm) {
-               struct drm_crtc_commit *commit;
-               spin_lock(&crtc->commit_lock);
-               commit = list_first_entry_or_null(&crtc->commit_list,
-                                                 struct drm_crtc_commit, commit_entry);
-               cleanup_done = commit ?
-                       try_wait_for_completion(&commit->cleanup_done) : true;
-               spin_unlock(&crtc->commit_lock);
-
-               if (cleanup_done)
-                       continue;
-
-               drm_crtc_wait_one_vblank(crtc);
-
-               return true;
-       }
-
-       return false;
-}
-
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
-{
-       u32 temp;
-
-       I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
-
-       mutex_lock(&dev_priv->sb_lock);
-
-       temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
-       temp |= SBI_SSCCTL_DISABLE;
-       intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
-
-       mutex_unlock(&dev_priv->sb_lock);
-}
-
-/* Program iCLKIP clock to the desired frequency */
-static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       int clock = crtc_state->base.adjusted_mode.crtc_clock;
-       u32 divsel, phaseinc, auxdiv, phasedir = 0;
-       u32 temp;
-
-       lpt_disable_iclkip(dev_priv);
-
-       /* The iCLK virtual clock root frequency is in MHz,
-        * but the adjusted_mode->crtc_clock in in KHz. To get the
-        * divisors, it is necessary to divide one by another, so we
-        * convert the virtual clock precision to KHz here for higher
-        * precision.
-        */
-       for (auxdiv = 0; auxdiv < 2; auxdiv++) {
-               u32 iclk_virtual_root_freq = 172800 * 1000;
-               u32 iclk_pi_range = 64;
-               u32 desired_divisor;
-
-               desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
-                                                   clock << auxdiv);
-               divsel = (desired_divisor / iclk_pi_range) - 2;
-               phaseinc = desired_divisor % iclk_pi_range;
-
-               /*
-                * Near 20MHz is a corner case which is
-                * out of range for the 7-bit divisor
-                */
-               if (divsel <= 0x7f)
-                       break;
-       }
-
-       /* This should not happen with any sane values */
-       WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
-               ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
-       WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
-               ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
-
-       DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
-                       clock,
-                       auxdiv,
-                       divsel,
-                       phasedir,
-                       phaseinc);
-
-       mutex_lock(&dev_priv->sb_lock);
-
-       /* Program SSCDIVINTPHASE6 */
-       temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
-       temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
-       temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
-       temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
-       temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
-       temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
-       temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
-       intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
-
-       /* Program SSCAUXDIV */
-       temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
-       temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
-       temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
-       intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
-
-       /* Enable modulator and associated divider */
-       temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
-       temp &= ~SBI_SSCCTL_DISABLE;
-       intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
-
-       mutex_unlock(&dev_priv->sb_lock);
-
-       /* Wait for initialization time */
-       udelay(24);
-
-       I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
-}
-
-int lpt_get_iclkip(struct drm_i915_private *dev_priv)
-{
-       u32 divsel, phaseinc, auxdiv;
-       u32 iclk_virtual_root_freq = 172800 * 1000;
-       u32 iclk_pi_range = 64;
-       u32 desired_divisor;
-       u32 temp;
-
-       if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
-               return 0;
-
-       mutex_lock(&dev_priv->sb_lock);
-
-       temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
-       if (temp & SBI_SSCCTL_DISABLE) {
-               mutex_unlock(&dev_priv->sb_lock);
-               return 0;
-       }
-
-       temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
-       divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
-               SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
-       phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
-               SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
-
-       temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
-       auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
-               SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
-
-       mutex_unlock(&dev_priv->sb_lock);
-
-       desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
-
-       return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
-                                desired_divisor << auxdiv);
-}
-
-static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
-                                               enum pipe pch_transcoder)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-
-       I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
-                  I915_READ(HTOTAL(cpu_transcoder)));
-       I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
-                  I915_READ(HBLANK(cpu_transcoder)));
-       I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
-                  I915_READ(HSYNC(cpu_transcoder)));
-
-       I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
-                  I915_READ(VTOTAL(cpu_transcoder)));
-       I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
-                  I915_READ(VBLANK(cpu_transcoder)));
-       I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
-                  I915_READ(VSYNC(cpu_transcoder)));
-       I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
-                  I915_READ(VSYNCSHIFT(cpu_transcoder)));
-}
-
-static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
-{
-       u32 temp;
-
-       temp = I915_READ(SOUTH_CHICKEN1);
-       if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
-               return;
-
-       WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
-       WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
-
-       temp &= ~FDI_BC_BIFURCATION_SELECT;
-       if (enable)
-               temp |= FDI_BC_BIFURCATION_SELECT;
-
-       DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
-       I915_WRITE(SOUTH_CHICKEN1, temp);
-       POSTING_READ(SOUTH_CHICKEN1);
-}
-
-static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       switch (crtc->pipe) {
-       case PIPE_A:
-               break;
-       case PIPE_B:
-               if (crtc_state->fdi_lanes > 2)
-                       cpt_set_fdi_bc_bifurcation(dev_priv, false);
-               else
-                       cpt_set_fdi_bc_bifurcation(dev_priv, true);
-
-               break;
-       case PIPE_C:
-               cpt_set_fdi_bc_bifurcation(dev_priv, true);
-
-               break;
-       default:
-               BUG();
-       }
-}
-
-/*
- * Finds the encoder associated with the given CRTC. This can only be
- * used when we know that the CRTC isn't feeding multiple encoders!
- */
-static struct intel_encoder *
-intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
-                          const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       const struct drm_connector_state *connector_state;
-       const struct drm_connector *connector;
-       struct intel_encoder *encoder = NULL;
-       int num_encoders = 0;
-       int i;
-
-       for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
-               if (connector_state->crtc != &crtc->base)
-                       continue;
-
-               encoder = to_intel_encoder(connector_state->best_encoder);
-               num_encoders++;
-       }
-
-       WARN(num_encoders != 1, "%d encoders for pipe %c\n",
-            num_encoders, pipe_name(crtc->pipe));
-
-       return encoder;
-}
-
-/*
- * Enable PCH resources required for PCH ports:
- *   - PCH PLLs
- *   - FDI training & RX/TX
- *   - update transcoder timings
- *   - DP transcoding bits
- *   - transcoder
- */
-static void ironlake_pch_enable(const struct intel_atomic_state *state,
-                               const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = crtc->pipe;
-       u32 temp;
-
-       assert_pch_transcoder_disabled(dev_priv, pipe);
-
-       if (IS_IVYBRIDGE(dev_priv))
-               ivybridge_update_fdi_bc_bifurcation(crtc_state);
-
-       /* Write the TU size bits before fdi link training, so that error
-        * detection works. */
-       I915_WRITE(FDI_RX_TUSIZE1(pipe),
-                  I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
-
-       /* For PCH output, training FDI link */
-       dev_priv->display.fdi_link_train(crtc, crtc_state);
-
-       /* We need to program the right clock selection before writing the pixel
-        * mutliplier into the DPLL. */
-       if (HAS_PCH_CPT(dev_priv)) {
-               u32 sel;
-
-               temp = I915_READ(PCH_DPLL_SEL);
-               temp |= TRANS_DPLL_ENABLE(pipe);
-               sel = TRANS_DPLLB_SEL(pipe);
-               if (crtc_state->shared_dpll ==
-                   intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
-                       temp |= sel;
-               else
-                       temp &= ~sel;
-               I915_WRITE(PCH_DPLL_SEL, temp);
-       }
-
-       /* XXX: pch pll's can be enabled any time before we enable the PCH
-        * transcoder, and we actually should do this to not upset any PCH
-        * transcoder that already use the clock when we share it.
-        *
-        * Note that enable_shared_dpll tries to do the right thing, but
-        * get_shared_dpll unconditionally resets the pll - we need that to have
-        * the right LVDS enable sequence. */
-       intel_enable_shared_dpll(crtc_state);
-
-       /* set transcoder timing, panel must allow it */
-       assert_panel_unlocked(dev_priv, pipe);
-       ironlake_pch_transcoder_set_timings(crtc_state, pipe);
-
-       intel_fdi_normal_train(crtc);
-
-       /* For PCH DP, enable TRANS_DP_CTL */
-       if (HAS_PCH_CPT(dev_priv) &&
-           intel_crtc_has_dp_encoder(crtc_state)) {
-               const struct drm_display_mode *adjusted_mode =
-                       &crtc_state->base.adjusted_mode;
-               u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
-               i915_reg_t reg = TRANS_DP_CTL(pipe);
-               enum port port;
-
-               temp = I915_READ(reg);
-               temp &= ~(TRANS_DP_PORT_SEL_MASK |
-                         TRANS_DP_SYNC_MASK |
-                         TRANS_DP_BPC_MASK);
-               temp |= TRANS_DP_OUTPUT_ENABLE;
-               temp |= bpc << 9; /* same format but at 11:9 */
-
-               if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
-                       temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
-               if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
-                       temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
-
-               port = intel_get_crtc_new_encoder(state, crtc_state)->port;
-               WARN_ON(port < PORT_B || port > PORT_D);
-               temp |= TRANS_DP_PORT_SEL(port);
-
-               I915_WRITE(reg, temp);
-       }
-
-       ironlake_enable_pch_transcoder(crtc_state);
-}
-
-static void lpt_pch_enable(const struct intel_atomic_state *state,
-                          const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-
-       assert_pch_transcoder_disabled(dev_priv, PIPE_A);
-
-       lpt_program_iclkip(crtc_state);
-
-       /* Set transcoder timing. */
-       ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
-
-       lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
-}
-
-static void cpt_verify_modeset(struct drm_device *dev, int pipe)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       i915_reg_t dslreg = PIPEDSL(pipe);
-       u32 temp;
-
-       temp = I915_READ(dslreg);
-       udelay(500);
-       if (wait_for(I915_READ(dslreg) != temp, 5)) {
-               if (wait_for(I915_READ(dslreg) != temp, 5))
-                       DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
-       }
-}
-
-/*
- * The hardware phase 0.0 refers to the center of the pixel.
- * We want to start from the top/left edge which is phase
- * -0.5. That matches how the hardware calculates the scaling
- * factors (from top-left of the first pixel to bottom-right
- * of the last pixel, as opposed to the pixel centers).
- *
- * For 4:2:0 subsampled chroma planes we obviously have to
- * adjust that so that the chroma sample position lands in
- * the right spot.
- *
- * Note that for packed YCbCr 4:2:2 formats there is no way to
- * control chroma siting. The hardware simply replicates the
- * chroma samples for both of the luma samples, and thus we don't
- * actually get the expected MPEG2 chroma siting convention :(
- * The same behaviour is observed on pre-SKL platforms as well.
- *
- * Theory behind the formula (note that we ignore sub-pixel
- * source coordinates):
- * s = source sample position
- * d = destination sample position
- *
- * Downscaling 4:1:
- * -0.5
- * | 0.0
- * | |     1.5 (initial phase)
- * | |     |
- * v v     v
- * | s | s | s | s |
- * |       d       |
- *
- * Upscaling 1:4:
- * -0.5
- * | -0.375 (initial phase)
- * | |     0.0
- * | |     |
- * v v     v
- * |       s       |
- * | d | d | d | d |
- */
-u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
-{
-       int phase = -0x8000;
-       u16 trip = 0;
-
-       if (chroma_cosited)
-               phase += (sub - 1) * 0x8000 / sub;
-
-       phase += scale / (2 * sub);
-
-       /*
-        * Hardware initial phase limited to [-0.5:1.5].
-        * Since the max hardware scale factor is 3.0, we
-        * should never actually excdeed 1.0 here.
-        */
-       WARN_ON(phase < -0x8000 || phase > 0x18000);
-
-       if (phase < 0)
-               phase = 0x10000 + phase;
-       else
-               trip = PS_PHASE_TRIP;
-
-       return ((phase >> 2) & PS_PHASE_MASK) | trip;
-}
-
-#define SKL_MIN_SRC_W 8
-#define SKL_MAX_SRC_W 4096
-#define SKL_MIN_SRC_H 8
-#define SKL_MAX_SRC_H 4096
-#define SKL_MIN_DST_W 8
-#define SKL_MAX_DST_W 4096
-#define SKL_MIN_DST_H 8
-#define SKL_MAX_DST_H 4096
-#define ICL_MAX_SRC_W 5120
-#define ICL_MAX_SRC_H 4096
-#define ICL_MAX_DST_W 5120
-#define ICL_MAX_DST_H 4096
-#define SKL_MIN_YUV_420_SRC_W 16
-#define SKL_MIN_YUV_420_SRC_H 16
-
-static int
-skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
-                 unsigned int scaler_user, int *scaler_id,
-                 int src_w, int src_h, int dst_w, int dst_h,
-                 const struct drm_format_info *format, bool need_scaler)
-{
-       struct intel_crtc_scaler_state *scaler_state =
-               &crtc_state->scaler_state;
-       struct intel_crtc *intel_crtc =
-               to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
-       const struct drm_display_mode *adjusted_mode =
-               &crtc_state->base.adjusted_mode;
-
-       /*
-        * Src coordinates are already rotated by 270 degrees for
-        * the 90/270 degree plane rotation cases (to match the
-        * GTT mapping), hence no need to account for rotation here.
-        */
-       if (src_w != dst_w || src_h != dst_h)
-               need_scaler = true;
-
-       /*
-        * Scaling/fitting not supported in IF-ID mode in GEN9+
-        * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
-        * Once NV12 is enabled, handle it here while allocating scaler
-        * for NV12.
-        */
-       if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
-           need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-               DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
-               return -EINVAL;
-       }
-
-       /*
-        * if plane is being disabled or scaler is no more required or force detach
-        *  - free scaler binded to this plane/crtc
-        *  - in order to do this, update crtc->scaler_usage
-        *
-        * Here scaler state in crtc_state is set free so that
-        * scaler can be assigned to other user. Actual register
-        * update to free the scaler is done in plane/panel-fit programming.
-        * For this purpose crtc/plane_state->scaler_id isn't reset here.
-        */
-       if (force_detach || !need_scaler) {
-               if (*scaler_id >= 0) {
-                       scaler_state->scaler_users &= ~(1 << scaler_user);
-                       scaler_state->scalers[*scaler_id].in_use = 0;
-
-                       DRM_DEBUG_KMS("scaler_user index %u.%u: "
-                               "Staged freeing scaler id %d scaler_users = 0x%x\n",
-                               intel_crtc->pipe, scaler_user, *scaler_id,
-                               scaler_state->scaler_users);
-                       *scaler_id = -1;
-               }
-               return 0;
-       }
-
-       if (format && is_planar_yuv_format(format->format) &&
-           (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
-               DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
-               return -EINVAL;
-       }
-
-       /* range checks */
-       if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
-           dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
-           (INTEL_GEN(dev_priv) >= 11 &&
-            (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
-             dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
-           (INTEL_GEN(dev_priv) < 11 &&
-            (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
-             dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
-               DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
-                       "size is out of scaler range\n",
-                       intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
-               return -EINVAL;
-       }
-
-       /* mark this plane as a scaler user in crtc_state */
-       scaler_state->scaler_users |= (1 << scaler_user);
-       DRM_DEBUG_KMS("scaler_user index %u.%u: "
-               "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
-               intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
-               scaler_state->scaler_users);
-
-       return 0;
-}
-
-/**
- * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
- *
- * @state: crtc's scaler state
- *
- * Return
- *     0 - scaler_usage updated successfully
- *    error - requested scaling cannot be supported or other error condition
- */
-int skl_update_scaler_crtc(struct intel_crtc_state *state)
-{
-       const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
-       bool need_scaler = false;
-
-       if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
-               need_scaler = true;
-
-       return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
-                                &state->scaler_state.scaler_id,
-                                state->pipe_src_w, state->pipe_src_h,
-                                adjusted_mode->crtc_hdisplay,
-                                adjusted_mode->crtc_vdisplay, NULL, need_scaler);
-}
-
-/**
- * skl_update_scaler_plane - Stages update to scaler state for a given plane.
- * @crtc_state: crtc's scaler state
- * @plane_state: atomic plane state to update
- *
- * Return
- *     0 - scaler_usage updated successfully
- *    error - requested scaling cannot be supported or other error condition
- */
-static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
-                                  struct intel_plane_state *plane_state)
-{
-       struct intel_plane *intel_plane =
-               to_intel_plane(plane_state->base.plane);
-       struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
-       struct drm_framebuffer *fb = plane_state->base.fb;
-       int ret;
-       bool force_detach = !fb || !plane_state->base.visible;
-       bool need_scaler = false;
-
-       /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
-       if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
-           fb && is_planar_yuv_format(fb->format->format))
-               need_scaler = true;
-
-       ret = skl_update_scaler(crtc_state, force_detach,
-                               drm_plane_index(&intel_plane->base),
-                               &plane_state->scaler_id,
-                               drm_rect_width(&plane_state->base.src) >> 16,
-                               drm_rect_height(&plane_state->base.src) >> 16,
-                               drm_rect_width(&plane_state->base.dst),
-                               drm_rect_height(&plane_state->base.dst),
-                               fb ? fb->format : NULL, need_scaler);
-
-       if (ret || plane_state->scaler_id < 0)
-               return ret;
-
-       /* check colorkey */
-       if (plane_state->ckey.flags) {
-               DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
-                             intel_plane->base.base.id,
-                             intel_plane->base.name);
-               return -EINVAL;
-       }
-
-       /* Check src format */
-       switch (fb->format->format) {
-       case DRM_FORMAT_RGB565:
-       case DRM_FORMAT_XBGR8888:
-       case DRM_FORMAT_XRGB8888:
-       case DRM_FORMAT_ABGR8888:
-       case DRM_FORMAT_ARGB8888:
-       case DRM_FORMAT_XRGB2101010:
-       case DRM_FORMAT_XBGR2101010:
-       case DRM_FORMAT_XBGR16161616F:
-       case DRM_FORMAT_ABGR16161616F:
-       case DRM_FORMAT_XRGB16161616F:
-       case DRM_FORMAT_ARGB16161616F:
-       case DRM_FORMAT_YUYV:
-       case DRM_FORMAT_YVYU:
-       case DRM_FORMAT_UYVY:
-       case DRM_FORMAT_VYUY:
-       case DRM_FORMAT_NV12:
-       case DRM_FORMAT_P010:
-       case DRM_FORMAT_P012:
-       case DRM_FORMAT_P016:
-       case DRM_FORMAT_Y210:
-       case DRM_FORMAT_Y212:
-       case DRM_FORMAT_Y216:
-       case DRM_FORMAT_XVYU2101010:
-       case DRM_FORMAT_XVYU12_16161616:
-       case DRM_FORMAT_XVYU16161616:
-               break;
-       default:
-               DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
-                             intel_plane->base.base.id, intel_plane->base.name,
-                             fb->base.id, fb->format->format);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static void skylake_scaler_disable(struct intel_crtc *crtc)
-{
-       int i;
-
-       for (i = 0; i < crtc->num_scalers; i++)
-               skl_detach_scaler(crtc, i);
-}
-
-static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       const struct intel_crtc_scaler_state *scaler_state =
-               &crtc_state->scaler_state;
-
-       if (crtc_state->pch_pfit.enabled) {
-               u16 uv_rgb_hphase, uv_rgb_vphase;
-               int pfit_w, pfit_h, hscale, vscale;
-               int id;
-
-               if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
-                       return;
-
-               pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
-               pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
-
-               hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
-               vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
-
-               uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
-               uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
-
-               id = scaler_state->scaler_id;
-               I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
-                       PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
-               I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
-                             PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
-               I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
-                             PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
-               I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
-               I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
-       }
-}
-
-static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       int pipe = crtc->pipe;
-
-       if (crtc_state->pch_pfit.enabled) {
-               /* Force use of hard-coded filter coefficients
-                * as some pre-programmed values are broken,
-                * e.g. x201.
-                */
-               if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
-                       I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
-                                                PF_PIPE_SEL_IVB(pipe));
-               else
-                       I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
-               I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
-               I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
-       }
-}
-
-void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       if (!crtc_state->ips_enabled)
-               return;
-
-       /*
-        * We can only enable IPS after we enable a plane and wait for a vblank
-        * This function is called from post_plane_update, which is run after
-        * a vblank wait.
-        */
-       WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
-
-       if (IS_BROADWELL(dev_priv)) {
-               WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
-                                               IPS_ENABLE | IPS_PCODE_CONTROL));
-               /* Quoting Art Runyan: "its not safe to expect any particular
-                * value in IPS_CTL bit 31 after enabling IPS through the
-                * mailbox." Moreover, the mailbox may return a bogus state,
-                * so we need to just enable it and continue on.
-                */
-       } else {
-               I915_WRITE(IPS_CTL, IPS_ENABLE);
-               /* The bit only becomes 1 in the next vblank, so this wait here
-                * is essentially intel_wait_for_vblank. If we don't have this
-                * and don't wait for vblanks until the end of crtc_enable, then
-                * the HW state readout code will complain that the expected
-                * IPS_CTL value is not the one we read. */
-               if (intel_wait_for_register(&dev_priv->uncore,
-                                           IPS_CTL, IPS_ENABLE, IPS_ENABLE,
-                                           50))
-                       DRM_ERROR("Timed out waiting for IPS enable\n");
-       }
-}
-
-void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       if (!crtc_state->ips_enabled)
-               return;
-
-       if (IS_BROADWELL(dev_priv)) {
-               WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
-               /*
-                * Wait for PCODE to finish disabling IPS. The BSpec specified
-                * 42ms timeout value leads to occasional timeouts so use 100ms
-                * instead.
-                */
-               if (intel_wait_for_register(&dev_priv->uncore,
-                                           IPS_CTL, IPS_ENABLE, 0,
-                                           100))
-                       DRM_ERROR("Timed out waiting for IPS disable\n");
-       } else {
-               I915_WRITE(IPS_CTL, 0);
-               POSTING_READ(IPS_CTL);
-       }
-
-       /* We need to wait for a vblank before we can disable the plane. */
-       intel_wait_for_vblank(dev_priv, crtc->pipe);
-}
-
-static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
-{
-       if (intel_crtc->overlay) {
-               struct drm_device *dev = intel_crtc->base.dev;
-
-               mutex_lock(&dev->struct_mutex);
-               (void) intel_overlay_switch_off(intel_crtc->overlay);
-               mutex_unlock(&dev->struct_mutex);
-       }
-
-       /* Let userspace switch the overlay on again. In most cases userspace
-        * has to recompute where to put it anyway.
-        */
-}
-
-/**
- * intel_post_enable_primary - Perform operations after enabling primary plane
- * @crtc: the CRTC whose primary plane was just enabled
- * @new_crtc_state: the enabling state
- *
- * Performs potentially sleeping operations that must be done after the primary
- * plane is enabled, such as updating FBC and IPS.  Note that this may be
- * called due to an explicit primary plane update, or due to an implicit
- * re-enable that is caused when a sprite plane is updated to no longer
- * completely hide the primary plane.
- */
-static void
-intel_post_enable_primary(struct drm_crtc *crtc,
-                         const struct intel_crtc_state *new_crtc_state)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-
-       /*
-        * Gen2 reports pipe underruns whenever all planes are disabled.
-        * So don't enable underrun reporting before at least some planes
-        * are enabled.
-        * FIXME: Need to fix the logic to work when we turn off all planes
-        * but leave the pipe running.
-        */
-       if (IS_GEN(dev_priv, 2))
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
-       /* Underruns don't always raise interrupts, so check manually. */
-       intel_check_cpu_fifo_underruns(dev_priv);
-       intel_check_pch_fifo_underruns(dev_priv);
-}
-
-/* FIXME get rid of this and use pre_plane_update */
-static void
-intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-
-       /*
-        * Gen2 reports pipe underruns whenever all planes are disabled.
-        * So disable underrun reporting before all the planes get disabled.
-        */
-       if (IS_GEN(dev_priv, 2))
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
-       hsw_disable_ips(to_intel_crtc_state(crtc->state));
-
-       /*
-        * Vblank time updates from the shadow to live plane control register
-        * are blocked if the memory self-refresh mode is active at that
-        * moment. So to make sure the plane gets truly disabled, disable
-        * first the self-refresh mode. The self-refresh enable bit in turn
-        * will be checked/applied by the HW only at the next frame start
-        * event which is after the vblank start event, so we need to have a
-        * wait-for-vblank between disabling the plane and the pipe.
-        */
-       if (HAS_GMCH(dev_priv) &&
-           intel_set_memory_cxsr(dev_priv, false))
-               intel_wait_for_vblank(dev_priv, pipe);
-}
-
-static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
-                                      const struct intel_crtc_state *new_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       if (!old_crtc_state->ips_enabled)
-               return false;
-
-       if (needs_modeset(&new_crtc_state->base))
-               return true;
-
-       /*
-        * Workaround : Do not read or write the pipe palette/gamma data while
-        * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
-        *
-        * Disable IPS before we program the LUT.
-        */
-       if (IS_HASWELL(dev_priv) &&
-           (new_crtc_state->base.color_mgmt_changed ||
-            new_crtc_state->update_pipe) &&
-           new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
-               return true;
-
-       return !new_crtc_state->ips_enabled;
-}
-
-static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
-                                      const struct intel_crtc_state *new_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       if (!new_crtc_state->ips_enabled)
-               return false;
-
-       if (needs_modeset(&new_crtc_state->base))
-               return true;
-
-       /*
-        * Workaround : Do not read or write the pipe palette/gamma data while
-        * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
-        *
-        * Re-enable IPS after the LUT has been programmed.
-        */
-       if (IS_HASWELL(dev_priv) &&
-           (new_crtc_state->base.color_mgmt_changed ||
-            new_crtc_state->update_pipe) &&
-           new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
-               return true;
-
-       /*
-        * We can't read out IPS on broadwell, assume the worst and
-        * forcibly enable IPS on the first fastset.
-        */
-       if (new_crtc_state->update_pipe &&
-           old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
-               return true;
-
-       return !old_crtc_state->ips_enabled;
-}
-
-static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
-                         const struct intel_crtc_state *crtc_state)
-{
-       if (!crtc_state->nv12_planes)
-               return false;
-
-       /* WA Display #0827: Gen9:all */
-       if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
-               return true;
-
-       return false;
-}
-
-static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
-                              const struct intel_crtc_state *crtc_state)
-{
-       /* Wa_2006604312:icl */
-       if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
-               return true;
-
-       return false;
-}
-
-static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_atomic_state *old_state = old_crtc_state->base.state;
-       struct intel_crtc_state *pipe_config =
-               intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
-                                               crtc);
-       struct drm_plane *primary = crtc->base.primary;
-       struct drm_plane_state *old_primary_state =
-               drm_atomic_get_old_plane_state(old_state, primary);
-
-       intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
-
-       if (pipe_config->update_wm_post && pipe_config->base.active)
-               intel_update_watermarks(crtc);
-
-       if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
-               hsw_enable_ips(pipe_config);
-
-       if (old_primary_state) {
-               struct drm_plane_state *new_primary_state =
-                       drm_atomic_get_new_plane_state(old_state, primary);
-
-               intel_fbc_post_update(crtc);
-
-               if (new_primary_state->visible &&
-                   (needs_modeset(&pipe_config->base) ||
-                    !old_primary_state->visible))
-                       intel_post_enable_primary(&crtc->base, pipe_config);
-       }
-
-       if (needs_nv12_wa(dev_priv, old_crtc_state) &&
-           !needs_nv12_wa(dev_priv, pipe_config))
-               skl_wa_827(dev_priv, crtc->pipe, false);
-
-       if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
-           !needs_scalerclk_wa(dev_priv, pipe_config))
-               icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
-}
-
-static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
-                                  struct intel_crtc_state *pipe_config)
-{
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_atomic_state *old_state = old_crtc_state->base.state;
-       struct drm_plane *primary = crtc->base.primary;
-       struct drm_plane_state *old_primary_state =
-               drm_atomic_get_old_plane_state(old_state, primary);
-       bool modeset = needs_modeset(&pipe_config->base);
-       struct intel_atomic_state *old_intel_state =
-               to_intel_atomic_state(old_state);
-
-       if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
-               hsw_disable_ips(old_crtc_state);
-
-       if (old_primary_state) {
-               struct intel_plane_state *new_primary_state =
-                       intel_atomic_get_new_plane_state(old_intel_state,
-                                                        to_intel_plane(primary));
-
-               intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
-               /*
-                * Gen2 reports pipe underruns whenever all planes are disabled.
-                * So disable underrun reporting before all the planes get disabled.
-                */
-               if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
-                   (modeset || !new_primary_state->base.visible))
-                       intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
-       }
-
-       /* Display WA 827 */
-       if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
-           needs_nv12_wa(dev_priv, pipe_config))
-               skl_wa_827(dev_priv, crtc->pipe, true);
-
-       /* Wa_2006604312:icl */
-       if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
-           needs_scalerclk_wa(dev_priv, pipe_config))
-               icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
-
-       /*
-        * Vblank time updates from the shadow to live plane control register
-        * are blocked if the memory self-refresh mode is active at that
-        * moment. So to make sure the plane gets truly disabled, disable
-        * first the self-refresh mode. The self-refresh enable bit in turn
-        * will be checked/applied by the HW only at the next frame start
-        * event which is after the vblank start event, so we need to have a
-        * wait-for-vblank between disabling the plane and the pipe.
-        */
-       if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
-           pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
-               intel_wait_for_vblank(dev_priv, crtc->pipe);
-
-       /*
-        * IVB workaround: must disable low power watermarks for at least
-        * one frame before enabling scaling.  LP watermarks can be re-enabled
-        * when scaling is disabled.
-        *
-        * WaCxSRDisabledForSpriteScaling:ivb
-        */
-       if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
-           old_crtc_state->base.active)
-               intel_wait_for_vblank(dev_priv, crtc->pipe);
-
-       /*
-        * If we're doing a modeset, we're done.  No need to do any pre-vblank
-        * watermark programming here.
-        */
-       if (needs_modeset(&pipe_config->base))
-               return;
-
-       /*
-        * For platforms that support atomic watermarks, program the
-        * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
-        * will be the intermediate values that are safe for both pre- and
-        * post- vblank; when vblank happens, the 'active' values will be set
-        * to the final 'target' values and we'll do this again to get the
-        * optimal watermarks.  For gen9+ platforms, the values we program here
-        * will be the final target values which will get automatically latched
-        * at vblank time; no further programming will be necessary.
-        *
-        * If a platform hasn't been transitioned to atomic watermarks yet,
-        * we'll continue to update watermarks the old way, if flags tell
-        * us to.
-        */
-       if (dev_priv->display.initial_watermarks != NULL)
-               dev_priv->display.initial_watermarks(old_intel_state,
-                                                    pipe_config);
-       else if (pipe_config->update_wm_pre)
-               intel_update_watermarks(crtc);
-}
-
-static void intel_crtc_disable_planes(struct intel_atomic_state *state,
-                                     struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       const struct intel_crtc_state *new_crtc_state =
-               intel_atomic_get_new_crtc_state(state, crtc);
-       unsigned int update_mask = new_crtc_state->update_planes;
-       const struct intel_plane_state *old_plane_state;
-       struct intel_plane *plane;
-       unsigned fb_bits = 0;
-       int i;
-
-       intel_crtc_dpms_overlay_disable(crtc);
-
-       for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
-               if (crtc->pipe != plane->pipe ||
-                   !(update_mask & BIT(plane->id)))
-                       continue;
-
-               intel_disable_plane(plane, new_crtc_state);
-
-               if (old_plane_state->base.visible)
-                       fb_bits |= plane->frontbuffer_bit;
-       }
-
-       intel_frontbuffer_flip(dev_priv, fb_bits);
-}
-
-static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
-                                         struct intel_crtc_state *crtc_state,
-                                         struct drm_atomic_state *old_state)
-{
-       struct drm_connector_state *conn_state;
-       struct drm_connector *conn;
-       int i;
-
-       for_each_new_connector_in_state(old_state, conn, conn_state, i) {
-               struct intel_encoder *encoder =
-                       to_intel_encoder(conn_state->best_encoder);
-
-               if (conn_state->crtc != crtc)
-                       continue;
-
-               if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder, crtc_state, conn_state);
-       }
-}
-
-static void intel_encoders_pre_enable(struct drm_crtc *crtc,
-                                     struct intel_crtc_state *crtc_state,
-                                     struct drm_atomic_state *old_state)
-{
-       struct drm_connector_state *conn_state;
-       struct drm_connector *conn;
-       int i;
-
-       for_each_new_connector_in_state(old_state, conn, conn_state, i) {
-               struct intel_encoder *encoder =
-                       to_intel_encoder(conn_state->best_encoder);
-
-               if (conn_state->crtc != crtc)
-                       continue;
-
-               if (encoder->pre_enable)
-                       encoder->pre_enable(encoder, crtc_state, conn_state);
-       }
-}
-
-static void intel_encoders_enable(struct drm_crtc *crtc,
-                                 struct intel_crtc_state *crtc_state,
-                                 struct drm_atomic_state *old_state)
-{
-       struct drm_connector_state *conn_state;
-       struct drm_connector *conn;
-       int i;
-
-       for_each_new_connector_in_state(old_state, conn, conn_state, i) {
-               struct intel_encoder *encoder =
-                       to_intel_encoder(conn_state->best_encoder);
-
-               if (conn_state->crtc != crtc)
-                       continue;
-
-               if (encoder->enable)
-                       encoder->enable(encoder, crtc_state, conn_state);
-               intel_opregion_notify_encoder(encoder, true);
-       }
-}
-
-static void intel_encoders_disable(struct drm_crtc *crtc,
-                                  struct intel_crtc_state *old_crtc_state,
-                                  struct drm_atomic_state *old_state)
-{
-       struct drm_connector_state *old_conn_state;
-       struct drm_connector *conn;
-       int i;
-
-       for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
-               struct intel_encoder *encoder =
-                       to_intel_encoder(old_conn_state->best_encoder);
-
-               if (old_conn_state->crtc != crtc)
-                       continue;
-
-               intel_opregion_notify_encoder(encoder, false);
-               if (encoder->disable)
-                       encoder->disable(encoder, old_crtc_state, old_conn_state);
-       }
-}
-
-static void intel_encoders_post_disable(struct drm_crtc *crtc,
-                                       struct intel_crtc_state *old_crtc_state,
-                                       struct drm_atomic_state *old_state)
-{
-       struct drm_connector_state *old_conn_state;
-       struct drm_connector *conn;
-       int i;
-
-       for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
-               struct intel_encoder *encoder =
-                       to_intel_encoder(old_conn_state->best_encoder);
-
-               if (old_conn_state->crtc != crtc)
-                       continue;
-
-               if (encoder->post_disable)
-                       encoder->post_disable(encoder, old_crtc_state, old_conn_state);
-       }
-}
-
-static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
-                                           struct intel_crtc_state *old_crtc_state,
-                                           struct drm_atomic_state *old_state)
-{
-       struct drm_connector_state *old_conn_state;
-       struct drm_connector *conn;
-       int i;
-
-       for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
-               struct intel_encoder *encoder =
-                       to_intel_encoder(old_conn_state->best_encoder);
-
-               if (old_conn_state->crtc != crtc)
-                       continue;
-
-               if (encoder->post_pll_disable)
-                       encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
-       }
-}
-
-static void intel_encoders_update_pipe(struct drm_crtc *crtc,
-                                      struct intel_crtc_state *crtc_state,
-                                      struct drm_atomic_state *old_state)
-{
-       struct drm_connector_state *conn_state;
-       struct drm_connector *conn;
-       int i;
-
-       for_each_new_connector_in_state(old_state, conn, conn_state, i) {
-               struct intel_encoder *encoder =
-                       to_intel_encoder(conn_state->best_encoder);
-
-               if (conn_state->crtc != crtc)
-                       continue;
-
-               if (encoder->update_pipe)
-                       encoder->update_pipe(encoder, crtc_state, conn_state);
-       }
-}
-
-static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct intel_plane *plane = to_intel_plane(crtc->base.primary);
-
-       plane->disable_plane(plane, crtc_state);
-}
-
-static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
-                                struct drm_atomic_state *old_state)
-{
-       struct drm_crtc *crtc = pipe_config->base.crtc;
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-       struct intel_atomic_state *old_intel_state =
-               to_intel_atomic_state(old_state);
-
-       if (WARN_ON(intel_crtc->active))
-               return;
-
-       /*
-        * Sometimes spurious CPU pipe underruns happen during FDI
-        * training, at least with VGA+HDMI cloning. Suppress them.
-        *
-        * On ILK we get an occasional spurious CPU pipe underruns
-        * between eDP port A enable and vdd enable. Also PCH port
-        * enable seems to result in the occasional CPU pipe underrun.
-        *
-        * Spurious PCH underruns also occur during PCH enabling.
-        */
-       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-       intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
-
-       if (pipe_config->has_pch_encoder)
-               intel_prepare_shared_dpll(pipe_config);
-
-       if (intel_crtc_has_dp_encoder(pipe_config))
-               intel_dp_set_m_n(pipe_config, M1_N1);
-
-       intel_set_pipe_timings(pipe_config);
-       intel_set_pipe_src_size(pipe_config);
-
-       if (pipe_config->has_pch_encoder) {
-               intel_cpu_transcoder_set_m_n(pipe_config,
-                                            &pipe_config->fdi_m_n, NULL);
-       }
-
-       ironlake_set_pipeconf(pipe_config);
-
-       intel_crtc->active = true;
-
-       intel_encoders_pre_enable(crtc, pipe_config, old_state);
-
-       if (pipe_config->has_pch_encoder) {
-               /* Note: FDI PLL enabling _must_ be done before we enable the
-                * cpu pipes, hence this is separate from all the other fdi/pch
-                * enabling. */
-               ironlake_fdi_pll_enable(pipe_config);
-       } else {
-               assert_fdi_tx_disabled(dev_priv, pipe);
-               assert_fdi_rx_disabled(dev_priv, pipe);
-       }
-
-       ironlake_pfit_enable(pipe_config);
-
-       /*
-        * On ILK+ LUT must be loaded before the pipe is running but with
-        * clocks enabled
-        */
-       intel_color_load_luts(pipe_config);
-       intel_color_commit(pipe_config);
-       /* update DSPCNTR to configure gamma for pipe bottom color */
-       intel_disable_primary_plane(pipe_config);
-
-       if (dev_priv->display.initial_watermarks != NULL)
-               dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
-       intel_enable_pipe(pipe_config);
-
-       if (pipe_config->has_pch_encoder)
-               ironlake_pch_enable(old_intel_state, pipe_config);
-
-       assert_vblank_disabled(crtc);
-       intel_crtc_vblank_on(pipe_config);
-
-       intel_encoders_enable(crtc, pipe_config, old_state);
-
-       if (HAS_PCH_CPT(dev_priv))
-               cpt_verify_modeset(dev, intel_crtc->pipe);
-
-       /*
-        * Must wait for vblank to avoid spurious PCH FIFO underruns.
-        * And a second vblank wait is needed at least on ILK with
-        * some interlaced HDMI modes. Let's do the double wait always
-        * in case there are more corner cases we don't know about.
-        */
-       if (pipe_config->has_pch_encoder) {
-               intel_wait_for_vblank(dev_priv, pipe);
-               intel_wait_for_vblank(dev_priv, pipe);
-       }
-       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-       intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
-}
-
-/* IPS only exists on ULT machines and is tied to pipe A. */
-static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
-{
-       return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
-}
-
-static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
-                                           enum pipe pipe, bool apply)
-{
-       u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
-       u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
-
-       if (apply)
-               val |= mask;
-       else
-               val &= ~mask;
-
-       I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
-}
-
-static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       u32 val;
-
-       val = MBUS_DBOX_A_CREDIT(2);
-       val |= MBUS_DBOX_BW_CREDIT(1);
-       val |= MBUS_DBOX_B_CREDIT(8);
-
-       I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
-}
-
-static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
-                               struct drm_atomic_state *old_state)
-{
-       struct drm_crtc *crtc = pipe_config->base.crtc;
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe, hsw_workaround_pipe;
-       enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
-       struct intel_atomic_state *old_intel_state =
-               to_intel_atomic_state(old_state);
-       bool psl_clkgate_wa;
-
-       if (WARN_ON(intel_crtc->active))
-               return;
-
-       intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
-
-       if (pipe_config->shared_dpll)
-               intel_enable_shared_dpll(pipe_config);
-
-       intel_encoders_pre_enable(crtc, pipe_config, old_state);
-
-       if (intel_crtc_has_dp_encoder(pipe_config))
-               intel_dp_set_m_n(pipe_config, M1_N1);
-
-       if (!transcoder_is_dsi(cpu_transcoder))
-               intel_set_pipe_timings(pipe_config);
-
-       intel_set_pipe_src_size(pipe_config);
-
-       if (cpu_transcoder != TRANSCODER_EDP &&
-           !transcoder_is_dsi(cpu_transcoder)) {
-               I915_WRITE(PIPE_MULT(cpu_transcoder),
-                          pipe_config->pixel_multiplier - 1);
-       }
-
-       if (pipe_config->has_pch_encoder) {
-               intel_cpu_transcoder_set_m_n(pipe_config,
-                                            &pipe_config->fdi_m_n, NULL);
-       }
-
-       if (!transcoder_is_dsi(cpu_transcoder))
-               haswell_set_pipeconf(pipe_config);
-
-       if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
-               bdw_set_pipemisc(pipe_config);
-
-       intel_crtc->active = true;
-
-       /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
-       psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
-                        pipe_config->pch_pfit.enabled;
-       if (psl_clkgate_wa)
-               glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
-
-       if (INTEL_GEN(dev_priv) >= 9)
-               skylake_pfit_enable(pipe_config);
-       else
-               ironlake_pfit_enable(pipe_config);
-
-       /*
-        * On ILK+ LUT must be loaded before the pipe is running but with
-        * clocks enabled
-        */
-       intel_color_load_luts(pipe_config);
-       intel_color_commit(pipe_config);
-       /* update DSPCNTR to configure gamma/csc for pipe bottom color */
-       if (INTEL_GEN(dev_priv) < 9)
-               intel_disable_primary_plane(pipe_config);
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               icl_set_pipe_chicken(intel_crtc);
-
-       intel_ddi_set_pipe_settings(pipe_config);
-       if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_enable_transcoder_func(pipe_config);
-
-       if (dev_priv->display.initial_watermarks != NULL)
-               dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               icl_pipe_mbus_enable(intel_crtc);
-
-       /* XXX: Do the pipe assertions at the right place for BXT DSI. */
-       if (!transcoder_is_dsi(cpu_transcoder))
-               intel_enable_pipe(pipe_config);
-
-       if (pipe_config->has_pch_encoder)
-               lpt_pch_enable(old_intel_state, pipe_config);
-
-       if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
-               intel_ddi_set_vc_payload_alloc(pipe_config, true);
-
-       assert_vblank_disabled(crtc);
-       intel_crtc_vblank_on(pipe_config);
-
-       intel_encoders_enable(crtc, pipe_config, old_state);
-
-       if (psl_clkgate_wa) {
-               intel_wait_for_vblank(dev_priv, pipe);
-               glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
-       }
-
-       /* If we change the relative order between pipe/planes enabling, we need
-        * to change the workaround. */
-       hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
-       if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
-               intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
-               intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
-       }
-}
-
-static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       /* To avoid upsetting the power well on haswell only disable the pfit if
-        * it's in use. The hw state code will make sure we get this right. */
-       if (old_crtc_state->pch_pfit.enabled) {
-               I915_WRITE(PF_CTL(pipe), 0);
-               I915_WRITE(PF_WIN_POS(pipe), 0);
-               I915_WRITE(PF_WIN_SZ(pipe), 0);
-       }
-}
-
-static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
-                                 struct drm_atomic_state *old_state)
-{
-       struct drm_crtc *crtc = old_crtc_state->base.crtc;
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-
-       /*
-        * Sometimes spurious CPU pipe underruns happen when the
-        * pipe is already disabled, but FDI RX/TX is still enabled.
-        * Happens at least with VGA+HDMI cloning. Suppress them.
-        */
-       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-       intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
-
-       intel_encoders_disable(crtc, old_crtc_state, old_state);
-
-       drm_crtc_vblank_off(crtc);
-       assert_vblank_disabled(crtc);
-
-       intel_disable_pipe(old_crtc_state);
-
-       ironlake_pfit_disable(old_crtc_state);
-
-       if (old_crtc_state->has_pch_encoder)
-               ironlake_fdi_disable(crtc);
-
-       intel_encoders_post_disable(crtc, old_crtc_state, old_state);
-
-       if (old_crtc_state->has_pch_encoder) {
-               ironlake_disable_pch_transcoder(dev_priv, pipe);
-
-               if (HAS_PCH_CPT(dev_priv)) {
-                       i915_reg_t reg;
-                       u32 temp;
-
-                       /* disable TRANS_DP_CTL */
-                       reg = TRANS_DP_CTL(pipe);
-                       temp = I915_READ(reg);
-                       temp &= ~(TRANS_DP_OUTPUT_ENABLE |
-                                 TRANS_DP_PORT_SEL_MASK);
-                       temp |= TRANS_DP_PORT_SEL_NONE;
-                       I915_WRITE(reg, temp);
-
-                       /* disable DPLL_SEL */
-                       temp = I915_READ(PCH_DPLL_SEL);
-                       temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
-                       I915_WRITE(PCH_DPLL_SEL, temp);
-               }
-
-               ironlake_fdi_pll_disable(intel_crtc);
-       }
-
-       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-       intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
-}
-
-static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
-                                struct drm_atomic_state *old_state)
-{
-       struct drm_crtc *crtc = old_crtc_state->base.crtc;
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
-
-       intel_encoders_disable(crtc, old_crtc_state, old_state);
-
-       drm_crtc_vblank_off(crtc);
-       assert_vblank_disabled(crtc);
-
-       /* XXX: Do the pipe assertions at the right place for BXT DSI. */
-       if (!transcoder_is_dsi(cpu_transcoder))
-               intel_disable_pipe(old_crtc_state);
-
-       if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
-               intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
-
-       if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_disable_transcoder_func(old_crtc_state);
-
-       intel_dsc_disable(old_crtc_state);
-
-       if (INTEL_GEN(dev_priv) >= 9)
-               skylake_scaler_disable(intel_crtc);
-       else
-               ironlake_pfit_disable(old_crtc_state);
-
-       intel_encoders_post_disable(crtc, old_crtc_state, old_state);
-
-       intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
-}
-
-static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       if (!crtc_state->gmch_pfit.control)
-               return;
-
-       /*
-        * The panel fitter should only be adjusted whilst the pipe is disabled,
-        * according to register description and PRM.
-        */
-       WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
-       assert_pipe_disabled(dev_priv, crtc->pipe);
-
-       I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
-       I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
-
-       /* Border color in case we don't scale up to the full screen. Black by
-        * default, change to something else for debugging. */
-       I915_WRITE(BCLRPAT(crtc->pipe), 0);
-}
-
-bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
-{
-       if (port == PORT_NONE)
-               return false;
-
-       if (IS_ELKHARTLAKE(dev_priv))
-               return port <= PORT_C;
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               return port <= PORT_B;
-
-       return false;
-}
-
-bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
-{
-       if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
-               return port >= PORT_C && port <= PORT_F;
-
-       return false;
-}
-
-enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
-{
-       if (!intel_port_is_tc(dev_priv, port))
-               return PORT_TC_NONE;
-
-       return port - PORT_C;
-}
-
-enum intel_display_power_domain intel_port_to_power_domain(enum port port)
-{
-       switch (port) {
-       case PORT_A:
-               return POWER_DOMAIN_PORT_DDI_A_LANES;
-       case PORT_B:
-               return POWER_DOMAIN_PORT_DDI_B_LANES;
-       case PORT_C:
-               return POWER_DOMAIN_PORT_DDI_C_LANES;
-       case PORT_D:
-               return POWER_DOMAIN_PORT_DDI_D_LANES;
-       case PORT_E:
-               return POWER_DOMAIN_PORT_DDI_E_LANES;
-       case PORT_F:
-               return POWER_DOMAIN_PORT_DDI_F_LANES;
-       default:
-               MISSING_CASE(port);
-               return POWER_DOMAIN_PORT_OTHER;
-       }
-}
-
-enum intel_display_power_domain
-intel_aux_power_domain(struct intel_digital_port *dig_port)
-{
-       switch (dig_port->aux_ch) {
-       case AUX_CH_A:
-               return POWER_DOMAIN_AUX_A;
-       case AUX_CH_B:
-               return POWER_DOMAIN_AUX_B;
-       case AUX_CH_C:
-               return POWER_DOMAIN_AUX_C;
-       case AUX_CH_D:
-               return POWER_DOMAIN_AUX_D;
-       case AUX_CH_E:
-               return POWER_DOMAIN_AUX_E;
-       case AUX_CH_F:
-               return POWER_DOMAIN_AUX_F;
-       default:
-               MISSING_CASE(dig_port->aux_ch);
-               return POWER_DOMAIN_AUX_A;
-       }
-}
-
-static u64 get_crtc_power_domains(struct drm_crtc *crtc,
-                                 struct intel_crtc_state *crtc_state)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_encoder *encoder;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
-       u64 mask;
-       enum transcoder transcoder = crtc_state->cpu_transcoder;
-
-       if (!crtc_state->base.active)
-               return 0;
-
-       mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
-       mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
-       if (crtc_state->pch_pfit.enabled ||
-           crtc_state->pch_pfit.force_thru)
-               mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
-
-       drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
-               struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-
-               mask |= BIT_ULL(intel_encoder->power_domain);
-       }
-
-       if (HAS_DDI(dev_priv) && crtc_state->has_audio)
-               mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
-
-       if (crtc_state->shared_dpll)
-               mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
-
-       return mask;
-}
-
-static u64
-modeset_get_crtc_power_domains(struct drm_crtc *crtc,
-                              struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum intel_display_power_domain domain;
-       u64 domains, new_domains, old_domains;
-
-       old_domains = intel_crtc->enabled_power_domains;
-       intel_crtc->enabled_power_domains = new_domains =
-               get_crtc_power_domains(crtc, crtc_state);
-
-       domains = new_domains & ~old_domains;
-
-       for_each_power_domain(domain, domains)
-               intel_display_power_get(dev_priv, domain);
-
-       return old_domains & ~new_domains;
-}
-
-static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
-                                     u64 domains)
-{
-       enum intel_display_power_domain domain;
-
-       for_each_power_domain(domain, domains)
-               intel_display_power_put_unchecked(dev_priv, domain);
-}
-
-static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
-                                  struct drm_atomic_state *old_state)
-{
-       struct intel_atomic_state *old_intel_state =
-               to_intel_atomic_state(old_state);
-       struct drm_crtc *crtc = pipe_config->base.crtc;
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-
-       if (WARN_ON(intel_crtc->active))
-               return;
-
-       if (intel_crtc_has_dp_encoder(pipe_config))
-               intel_dp_set_m_n(pipe_config, M1_N1);
-
-       intel_set_pipe_timings(pipe_config);
-       intel_set_pipe_src_size(pipe_config);
-
-       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
-               I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
-               I915_WRITE(CHV_CANVAS(pipe), 0);
-       }
-
-       i9xx_set_pipeconf(pipe_config);
-
-       intel_crtc->active = true;
-
-       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
-       intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
-
-       if (IS_CHERRYVIEW(dev_priv)) {
-               chv_prepare_pll(intel_crtc, pipe_config);
-               chv_enable_pll(intel_crtc, pipe_config);
-       } else {
-               vlv_prepare_pll(intel_crtc, pipe_config);
-               vlv_enable_pll(intel_crtc, pipe_config);
-       }
-
-       intel_encoders_pre_enable(crtc, pipe_config, old_state);
-
-       i9xx_pfit_enable(pipe_config);
-
-       intel_color_load_luts(pipe_config);
-       intel_color_commit(pipe_config);
-       /* update DSPCNTR to configure gamma for pipe bottom color */
-       intel_disable_primary_plane(pipe_config);
-
-       dev_priv->display.initial_watermarks(old_intel_state,
-                                            pipe_config);
-       intel_enable_pipe(pipe_config);
-
-       assert_vblank_disabled(crtc);
-       intel_crtc_vblank_on(pipe_config);
-
-       intel_encoders_enable(crtc, pipe_config, old_state);
-}
-
-static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
-       I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
-}
-
-static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
-                            struct drm_atomic_state *old_state)
-{
-       struct intel_atomic_state *old_intel_state =
-               to_intel_atomic_state(old_state);
-       struct drm_crtc *crtc = pipe_config->base.crtc;
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
-
-       if (WARN_ON(intel_crtc->active))
-               return;
-
-       i9xx_set_pll_dividers(pipe_config);
-
-       if (intel_crtc_has_dp_encoder(pipe_config))
-               intel_dp_set_m_n(pipe_config, M1_N1);
-
-       intel_set_pipe_timings(pipe_config);
-       intel_set_pipe_src_size(pipe_config);
-
-       i9xx_set_pipeconf(pipe_config);
-
-       intel_crtc->active = true;
-
-       if (!IS_GEN(dev_priv, 2))
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
-       intel_encoders_pre_enable(crtc, pipe_config, old_state);
-
-       i9xx_enable_pll(intel_crtc, pipe_config);
-
-       i9xx_pfit_enable(pipe_config);
-
-       intel_color_load_luts(pipe_config);
-       intel_color_commit(pipe_config);
-       /* update DSPCNTR to configure gamma for pipe bottom color */
-       intel_disable_primary_plane(pipe_config);
-
-       if (dev_priv->display.initial_watermarks != NULL)
-               dev_priv->display.initial_watermarks(old_intel_state,
-                                                    pipe_config);
-       else
-               intel_update_watermarks(intel_crtc);
-       intel_enable_pipe(pipe_config);
-
-       assert_vblank_disabled(crtc);
-       intel_crtc_vblank_on(pipe_config);
-
-       intel_encoders_enable(crtc, pipe_config, old_state);
-}
-
-static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       if (!old_crtc_state->gmch_pfit.control)
-               return;
-
-       assert_pipe_disabled(dev_priv, crtc->pipe);
-
-       DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
-                     I915_READ(PFIT_CONTROL));
-       I915_WRITE(PFIT_CONTROL, 0);
-}
-
-static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
-                             struct drm_atomic_state *old_state)
-{
-       struct drm_crtc *crtc = old_crtc_state->base.crtc;
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-
-       /*
-        * On gen2 planes are double buffered but the pipe isn't, so we must
-        * wait for planes to fully turn off before disabling the pipe.
-        */
-       if (IS_GEN(dev_priv, 2))
-               intel_wait_for_vblank(dev_priv, pipe);
-
-       intel_encoders_disable(crtc, old_crtc_state, old_state);
-
-       drm_crtc_vblank_off(crtc);
-       assert_vblank_disabled(crtc);
-
-       intel_disable_pipe(old_crtc_state);
-
-       i9xx_pfit_disable(old_crtc_state);
-
-       intel_encoders_post_disable(crtc, old_crtc_state, old_state);
-
-       if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
-               if (IS_CHERRYVIEW(dev_priv))
-                       chv_disable_pll(dev_priv, pipe);
-               else if (IS_VALLEYVIEW(dev_priv))
-                       vlv_disable_pll(dev_priv, pipe);
-               else
-                       i9xx_disable_pll(old_crtc_state);
-       }
-
-       intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
-
-       if (!IS_GEN(dev_priv, 2))
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
-       if (!dev_priv->display.initial_watermarks)
-               intel_update_watermarks(intel_crtc);
-
-       /* clock the pipe down to 640x480@60 to potentially save power */
-       if (IS_I830(dev_priv))
-               i830_enable_pipe(dev_priv, pipe);
-}
-
-static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
-                                       struct drm_modeset_acquire_ctx *ctx)
-{
-       struct intel_encoder *encoder;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_bw_state *bw_state =
-               to_intel_bw_state(dev_priv->bw_obj.state);
-       enum intel_display_power_domain domain;
-       struct intel_plane *plane;
-       u64 domains;
-       struct drm_atomic_state *state;
-       struct intel_crtc_state *crtc_state;
-       int ret;
-
-       if (!intel_crtc->active)
-               return;
-
-       for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
-               const struct intel_plane_state *plane_state =
-                       to_intel_plane_state(plane->base.state);
-
-               if (plane_state->base.visible)
-                       intel_plane_disable_noatomic(intel_crtc, plane);
-       }
-
-       state = drm_atomic_state_alloc(crtc->dev);
-       if (!state) {
-               DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
-                             crtc->base.id, crtc->name);
-               return;
-       }
-
-       state->acquire_ctx = ctx;
-
-       /* Everything's already locked, -EDEADLK can't happen. */
-       crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
-       ret = drm_atomic_add_affected_connectors(state, crtc);
-
-       WARN_ON(IS_ERR(crtc_state) || ret);
-
-       dev_priv->display.crtc_disable(crtc_state, state);
-
-       drm_atomic_state_put(state);
-
-       DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
-                     crtc->base.id, crtc->name);
-
-       WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
-       crtc->state->active = false;
-       intel_crtc->active = false;
-       crtc->enabled = false;
-       crtc->state->connector_mask = 0;
-       crtc->state->encoder_mask = 0;
-
-       for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
-               encoder->base.crtc = NULL;
-
-       intel_fbc_disable(intel_crtc);
-       intel_update_watermarks(intel_crtc);
-       intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
-
-       domains = intel_crtc->enabled_power_domains;
-       for_each_power_domain(domain, domains)
-               intel_display_power_put_unchecked(dev_priv, domain);
-       intel_crtc->enabled_power_domains = 0;
-
-       dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
-       dev_priv->min_cdclk[intel_crtc->pipe] = 0;
-       dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
-
-       bw_state->data_rate[intel_crtc->pipe] = 0;
-       bw_state->num_active_planes[intel_crtc->pipe] = 0;
-}
-
-/*
- * turn all crtc's off, but do not adjust state
- * This has to be paired with a call to intel_modeset_setup_hw_state.
- */
-int intel_display_suspend(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_atomic_state *state;
-       int ret;
-
-       state = drm_atomic_helper_suspend(dev);
-       ret = PTR_ERR_OR_ZERO(state);
-       if (ret)
-               DRM_ERROR("Suspending crtc's failed with %i\n", ret);
-       else
-               dev_priv->modeset_restore_state = state;
-       return ret;
-}
-
-void intel_encoder_destroy(struct drm_encoder *encoder)
-{
-       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-
-       drm_encoder_cleanup(encoder);
-       kfree(intel_encoder);
-}
-
-/* Cross check the actual hw state with our own modeset state tracking (and it's
- * internal consistency). */
-static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
-                                        struct drm_connector_state *conn_state)
-{
-       struct intel_connector *connector = to_intel_connector(conn_state->connector);
-
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                     connector->base.base.id,
-                     connector->base.name);
-
-       if (connector->get_hw_state(connector)) {
-               struct intel_encoder *encoder = connector->encoder;
-
-               I915_STATE_WARN(!crtc_state,
-                        "connector enabled without attached crtc\n");
-
-               if (!crtc_state)
-                       return;
-
-               I915_STATE_WARN(!crtc_state->active,
-                     "connector is active, but attached crtc isn't\n");
-
-               if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
-                       return;
-
-               I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
-                       "atomic encoder doesn't match attached encoder\n");
-
-               I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
-                       "attached encoder crtc differs from connector crtc\n");
-       } else {
-               I915_STATE_WARN(crtc_state && crtc_state->active,
-                       "attached crtc is active, but connector isn't\n");
-               I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
-                       "best encoder set without crtc!\n");
-       }
-}
-
-static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
-{
-       if (crtc_state->base.enable && crtc_state->has_pch_encoder)
-               return crtc_state->fdi_lanes;
-
-       return 0;
-}
-
-static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
-                                    struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_atomic_state *state = pipe_config->base.state;
-       struct intel_crtc *other_crtc;
-       struct intel_crtc_state *other_crtc_state;
-
-       DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
-                     pipe_name(pipe), pipe_config->fdi_lanes);
-       if (pipe_config->fdi_lanes > 4) {
-               DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
-                             pipe_name(pipe), pipe_config->fdi_lanes);
-               return -EINVAL;
-       }
-
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-               if (pipe_config->fdi_lanes > 2) {
-                       DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
-                                     pipe_config->fdi_lanes);
-                       return -EINVAL;
-               } else {
-                       return 0;
-               }
-       }
-
-       if (INTEL_INFO(dev_priv)->num_pipes == 2)
-               return 0;
-
-       /* Ivybridge 3 pipe is really complicated */
-       switch (pipe) {
-       case PIPE_A:
-               return 0;
-       case PIPE_B:
-               if (pipe_config->fdi_lanes <= 2)
-                       return 0;
-
-               other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
-               other_crtc_state =
-                       intel_atomic_get_crtc_state(state, other_crtc);
-               if (IS_ERR(other_crtc_state))
-                       return PTR_ERR(other_crtc_state);
-
-               if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
-                       DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
-                                     pipe_name(pipe), pipe_config->fdi_lanes);
-                       return -EINVAL;
-               }
-               return 0;
-       case PIPE_C:
-               if (pipe_config->fdi_lanes > 2) {
-                       DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
-                                     pipe_name(pipe), pipe_config->fdi_lanes);
-                       return -EINVAL;
-               }
-
-               other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
-               other_crtc_state =
-                       intel_atomic_get_crtc_state(state, other_crtc);
-               if (IS_ERR(other_crtc_state))
-                       return PTR_ERR(other_crtc_state);
-
-               if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
-                       DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
-                       return -EINVAL;
-               }
-               return 0;
-       default:
-               BUG();
-       }
-}
-
-#define RETRY 1
-static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
-                                      struct intel_crtc_state *pipe_config)
-{
-       struct drm_device *dev = intel_crtc->base.dev;
-       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-       int lane, link_bw, fdi_dotclock, ret;
-       bool needs_recompute = false;
-
-retry:
-       /* FDI is a binary signal running at ~2.7GHz, encoding
-        * each output octet as 10 bits. The actual frequency
-        * is stored as a divider into a 100MHz clock, and the
-        * mode pixel clock is stored in units of 1KHz.
-        * Hence the bw of each lane in terms of the mode signal
-        * is:
-        */
-       link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
-
-       fdi_dotclock = adjusted_mode->crtc_clock;
-
-       lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
-                                          pipe_config->pipe_bpp);
-
-       pipe_config->fdi_lanes = lane;
-
-       intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
-                              link_bw, &pipe_config->fdi_m_n, false);
-
-       ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
-       if (ret == -EDEADLK)
-               return ret;
-
-       if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
-               pipe_config->pipe_bpp -= 2*3;
-               DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
-                             pipe_config->pipe_bpp);
-               needs_recompute = true;
-               pipe_config->bw_constrained = true;
-
-               goto retry;
-       }
-
-       if (needs_recompute)
-               return RETRY;
-
-       return ret;
-}
-
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       /* IPS only exists on ULT machines and is tied to pipe A. */
-       if (!hsw_crtc_supports_ips(crtc))
-               return false;
-
-       if (!i915_modparams.enable_ips)
-               return false;
-
-       if (crtc_state->pipe_bpp > 24)
-               return false;
-
-       /*
-        * We compare against max which means we must take
-        * the increased cdclk requirement into account when
-        * calculating the new cdclk.
-        *
-        * Should measure whether using a lower cdclk w/o IPS
-        */
-       if (IS_BROADWELL(dev_priv) &&
-           crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
-               return false;
-
-       return true;
-}
-
-static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(crtc_state->base.crtc->dev);
-       struct intel_atomic_state *intel_state =
-               to_intel_atomic_state(crtc_state->base.state);
-
-       if (!hsw_crtc_state_ips_capable(crtc_state))
-               return false;
-
-       /*
-        * When IPS gets enabled, the pipe CRC changes. Since IPS gets
-        * enabled and disabled dynamically based on package C states,
-        * user space can't make reliable use of the CRCs, so let's just
-        * completely disable it.
-        */
-       if (crtc_state->crc_enabled)
-               return false;
-
-       /* IPS should be fine as long as at least one plane is enabled. */
-       if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
-               return false;
-
-       /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-       if (IS_BROADWELL(dev_priv) &&
-           crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
-               return false;
-
-       return true;
-}
-
-static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
-{
-       const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       /* GDG double wide on either pipe, otherwise pipe A only */
-       return INTEL_GEN(dev_priv) < 4 &&
-               (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
-}
-
-static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
-{
-       u32 pixel_rate;
-
-       pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
-
-       /*
-        * We only use IF-ID interlacing. If we ever use
-        * PF-ID we'll need to adjust the pixel_rate here.
-        */
-
-       if (pipe_config->pch_pfit.enabled) {
-               u64 pipe_w, pipe_h, pfit_w, pfit_h;
-               u32 pfit_size = pipe_config->pch_pfit.size;
-
-               pipe_w = pipe_config->pipe_src_w;
-               pipe_h = pipe_config->pipe_src_h;
-
-               pfit_w = (pfit_size >> 16) & 0xFFFF;
-               pfit_h = pfit_size & 0xFFFF;
-               if (pipe_w < pfit_w)
-                       pipe_w = pfit_w;
-               if (pipe_h < pfit_h)
-                       pipe_h = pfit_h;
-
-               if (WARN_ON(!pfit_w || !pfit_h))
-                       return pixel_rate;
-
-               pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
-                                    pfit_w * pfit_h);
-       }
-
-       return pixel_rate;
-}
-
-static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-
-       if (HAS_GMCH(dev_priv))
-               /* FIXME calculate proper pipe pixel rate for GMCH pfit */
-               crtc_state->pixel_rate =
-                       crtc_state->base.adjusted_mode.crtc_clock;
-       else
-               crtc_state->pixel_rate =
-                       ilk_pipe_pixel_rate(crtc_state);
-}
-
-static int intel_crtc_compute_config(struct intel_crtc *crtc,
-                                    struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-       int clock_limit = dev_priv->max_dotclk_freq;
-
-       if (INTEL_GEN(dev_priv) < 4) {
-               clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
-
-               /*
-                * Enable double wide mode when the dot clock
-                * is > 90% of the (display) core speed.
-                */
-               if (intel_crtc_supports_double_wide(crtc) &&
-                   adjusted_mode->crtc_clock > clock_limit) {
-                       clock_limit = dev_priv->max_dotclk_freq;
-                       pipe_config->double_wide = true;
-               }
-       }
-
-       if (adjusted_mode->crtc_clock > clock_limit) {
-               DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
-                             adjusted_mode->crtc_clock, clock_limit,
-                             yesno(pipe_config->double_wide));
-               return -EINVAL;
-       }
-
-       if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
-            pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
-            pipe_config->base.ctm) {
-               /*
-                * There is only one pipe CSC unit per pipe, and we need that
-                * for output conversion from RGB->YCBCR. So if CTM is already
-                * applied we can't support YCBCR420 output.
-                */
-               DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
-               return -EINVAL;
-       }
-
-       /*
-        * Pipe horizontal size must be even in:
-        * - DVO ganged mode
-        * - LVDS dual channel mode
-        * - Double wide pipe
-        */
-       if (pipe_config->pipe_src_w & 1) {
-               if (pipe_config->double_wide) {
-                       DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
-                       return -EINVAL;
-               }
-
-               if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
-                   intel_is_dual_link_lvds(dev_priv)) {
-                       DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
-                       return -EINVAL;
-               }
-       }
-
-       /* Cantiga+ cannot handle modes with a hsync front porch of 0.
-        * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
-        */
-       if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
-               adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
-               return -EINVAL;
-
-       intel_crtc_compute_pixel_rate(pipe_config);
-
-       if (pipe_config->has_pch_encoder)
-               return ironlake_fdi_compute_config(crtc, pipe_config);
-
-       return 0;
-}
-
-static void
-intel_reduce_m_n_ratio(u32 *num, u32 *den)
-{
-       while (*num > DATA_LINK_M_N_MASK ||
-              *den > DATA_LINK_M_N_MASK) {
-               *num >>= 1;
-               *den >>= 1;
-       }
-}
-
-static void compute_m_n(unsigned int m, unsigned int n,
-                       u32 *ret_m, u32 *ret_n,
-                       bool constant_n)
-{
-       /*
-        * Several DP dongles in particular seem to be fussy about
-        * too large link M/N values. Give N value as 0x8000 that
-        * should be acceptable by specific devices. 0x8000 is the
-        * specified fixed N value for asynchronous clock mode,
-        * which the devices expect also in synchronous clock mode.
-        */
-       if (constant_n)
-               *ret_n = 0x8000;
-       else
-               *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
-
-       *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
-       intel_reduce_m_n_ratio(ret_m, ret_n);
-}
-
-void
-intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
-                      int pixel_clock, int link_clock,
-                      struct intel_link_m_n *m_n,
-                      bool constant_n)
-{
-       m_n->tu = 64;
-
-       compute_m_n(bits_per_pixel * pixel_clock,
-                   link_clock * nlanes * 8,
-                   &m_n->gmch_m, &m_n->gmch_n,
-                   constant_n);
-
-       compute_m_n(pixel_clock, link_clock,
-                   &m_n->link_m, &m_n->link_n,
-                   constant_n);
-}
-
-static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
-{
-       if (i915_modparams.panel_use_ssc >= 0)
-               return i915_modparams.panel_use_ssc != 0;
-       return dev_priv->vbt.lvds_use_ssc
-               && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
-}
-
-static u32 pnv_dpll_compute_fp(struct dpll *dpll)
-{
-       return (1 << dpll->n) << 16 | dpll->m2;
-}
-
-static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
-{
-       return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
-}
-
-static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
-                                    struct intel_crtc_state *crtc_state,
-                                    struct dpll *reduced_clock)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       u32 fp, fp2 = 0;
-
-       if (IS_PINEVIEW(dev_priv)) {
-               fp = pnv_dpll_compute_fp(&crtc_state->dpll);
-               if (reduced_clock)
-                       fp2 = pnv_dpll_compute_fp(reduced_clock);
-       } else {
-               fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
-               if (reduced_clock)
-                       fp2 = i9xx_dpll_compute_fp(reduced_clock);
-       }
-
-       crtc_state->dpll_hw_state.fp0 = fp;
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-           reduced_clock) {
-               crtc_state->dpll_hw_state.fp1 = fp2;
-       } else {
-               crtc_state->dpll_hw_state.fp1 = fp;
-       }
-}
-
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
-               pipe)
-{
-       u32 reg_val;
-
-       /*
-        * PLLB opamp always calibrates to max value of 0x3f, force enable it
-        * and set it to a reasonable value instead.
-        */
-       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
-       reg_val &= 0xffffff00;
-       reg_val |= 0x00000030;
-       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
-
-       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
-       reg_val &= 0x00ffffff;
-       reg_val |= 0x8c000000;
-       vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
-
-       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
-       reg_val &= 0xffffff00;
-       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
-
-       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
-       reg_val &= 0x00ffffff;
-       reg_val |= 0xb0000000;
-       vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
-}
-
-static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
-                                        const struct intel_link_m_n *m_n)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
-       I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
-       I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
-       I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
-}
-
-static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
-                                enum transcoder transcoder)
-{
-       if (IS_HASWELL(dev_priv))
-               return transcoder == TRANSCODER_EDP;
-
-       /*
-        * Strictly speaking some registers are available before
-        * gen7, but we only support DRRS on gen7+
-        */
-       return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
-}
-
-static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
-                                        const struct intel_link_m_n *m_n,
-                                        const struct intel_link_m_n *m2_n2)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       enum transcoder transcoder = crtc_state->cpu_transcoder;
-
-       if (INTEL_GEN(dev_priv) >= 5) {
-               I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
-               I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
-               I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
-               I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
-               /*
-                *  M2_N2 registers are set only if DRRS is supported
-                * (to make sure the registers are not unnecessarily accessed).
-                */
-               if (m2_n2 && crtc_state->has_drrs &&
-                   transcoder_has_m2_n2(dev_priv, transcoder)) {
-                       I915_WRITE(PIPE_DATA_M2(transcoder),
-                                       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
-                       I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
-                       I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
-                       I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
-               }
-       } else {
-               I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
-               I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
-               I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
-               I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
-       }
-}
-
-void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
-{
-       const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
-
-       if (m_n == M1_N1) {
-               dp_m_n = &crtc_state->dp_m_n;
-               dp_m2_n2 = &crtc_state->dp_m2_n2;
-       } else if (m_n == M2_N2) {
-
-               /*
-                * M2_N2 registers are not supported. Hence m2_n2 divider value
-                * needs to be programmed into M1_N1.
-                */
-               dp_m_n = &crtc_state->dp_m2_n2;
-       } else {
-               DRM_ERROR("Unsupported divider value\n");
-               return;
-       }
-
-       if (crtc_state->has_pch_encoder)
-               intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
-       else
-               intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
-}
-
-static void vlv_compute_dpll(struct intel_crtc *crtc,
-                            struct intel_crtc_state *pipe_config)
-{
-       pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
-               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
-       if (crtc->pipe != PIPE_A)
-               pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
-       /* DPLL not used with DSI, but still need the rest set up */
-       if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
-               pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
-                       DPLL_EXT_BUFFER_ENABLE_VLV;
-
-       pipe_config->dpll_hw_state.dpll_md =
-               (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
-static void chv_compute_dpll(struct intel_crtc *crtc,
-                            struct intel_crtc_state *pipe_config)
-{
-       pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
-               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
-       if (crtc->pipe != PIPE_A)
-               pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
-       /* DPLL not used with DSI, but still need the rest set up */
-       if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
-               pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
-
-       pipe_config->dpll_hw_state.dpll_md =
-               (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
-static void vlv_prepare_pll(struct intel_crtc *crtc,
-                           const struct intel_crtc_state *pipe_config)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum pipe pipe = crtc->pipe;
-       u32 mdiv;
-       u32 bestn, bestm1, bestm2, bestp1, bestp2;
-       u32 coreclk, reg_val;
-
-       /* Enable Refclk */
-       I915_WRITE(DPLL(pipe),
-                  pipe_config->dpll_hw_state.dpll &
-                  ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
-
-       /* No need to actually set up the DPLL with DSI */
-       if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
-               return;
-
-       vlv_dpio_get(dev_priv);
-
-       bestn = pipe_config->dpll.n;
-       bestm1 = pipe_config->dpll.m1;
-       bestm2 = pipe_config->dpll.m2;
-       bestp1 = pipe_config->dpll.p1;
-       bestp2 = pipe_config->dpll.p2;
-
-       /* See eDP HDMI DPIO driver vbios notes doc */
-
-       /* PLL B needs special handling */
-       if (pipe == PIPE_B)
-               vlv_pllb_recal_opamp(dev_priv, pipe);
-
-       /* Set up Tx target for periodic Rcomp update */
-       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
-
-       /* Disable target IRef on PLL */
-       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
-       reg_val &= 0x00ffffff;
-       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
-
-       /* Disable fast lock */
-       vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
-
-       /* Set idtafcrecal before PLL is enabled */
-       mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
-       mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
-       mdiv |= ((bestn << DPIO_N_SHIFT));
-       mdiv |= (1 << DPIO_K_SHIFT);
-
-       /*
-        * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
-        * but we don't support that).
-        * Note: don't use the DAC post divider as it seems unstable.
-        */
-       mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
-       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
-
-       mdiv |= DPIO_ENABLE_CALIBRATION;
-       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
-
-       /* Set HBR and RBR LPF coefficients */
-       if (pipe_config->port_clock == 162000 ||
-           intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
-           intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
-               vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
-                                0x009f0003);
-       else
-               vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
-                                0x00d0000f);
-
-       if (intel_crtc_has_dp_encoder(pipe_config)) {
-               /* Use SSC source */
-               if (pipe == PIPE_A)
-                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
-                                        0x0df40000);
-               else
-                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
-                                        0x0df70000);
-       } else { /* HDMI or VGA */
-               /* Use bend source */
-               if (pipe == PIPE_A)
-                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
-                                        0x0df70000);
-               else
-                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
-                                        0x0df40000);
-       }
-
-       coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
-       coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
-       if (intel_crtc_has_dp_encoder(pipe_config))
-               coreclk |= 0x01000000;
-       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
-
-       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
-
-       vlv_dpio_put(dev_priv);
-}
-
-static void chv_prepare_pll(struct intel_crtc *crtc,
-                           const struct intel_crtc_state *pipe_config)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum pipe pipe = crtc->pipe;
-       enum dpio_channel port = vlv_pipe_to_channel(pipe);
-       u32 loopfilter, tribuf_calcntr;
-       u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
-       u32 dpio_val;
-       int vco;
-
-       /* Enable Refclk and SSC */
-       I915_WRITE(DPLL(pipe),
-                  pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
-
-       /* No need to actually set up the DPLL with DSI */
-       if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
-               return;
-
-       bestn = pipe_config->dpll.n;
-       bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
-       bestm1 = pipe_config->dpll.m1;
-       bestm2 = pipe_config->dpll.m2 >> 22;
-       bestp1 = pipe_config->dpll.p1;
-       bestp2 = pipe_config->dpll.p2;
-       vco = pipe_config->dpll.vco;
-       dpio_val = 0;
-       loopfilter = 0;
-
-       vlv_dpio_get(dev_priv);
-
-       /* p1 and p2 divider */
-       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
-                       5 << DPIO_CHV_S1_DIV_SHIFT |
-                       bestp1 << DPIO_CHV_P1_DIV_SHIFT |
-                       bestp2 << DPIO_CHV_P2_DIV_SHIFT |
-                       1 << DPIO_CHV_K_DIV_SHIFT);
-
-       /* Feedback post-divider - m2 */
-       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
-
-       /* Feedback refclk divider - n and m1 */
-       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
-                       DPIO_CHV_M1_DIV_BY_2 |
-                       1 << DPIO_CHV_N_DIV_SHIFT);
-
-       /* M2 fraction division */
-       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
-
-       /* M2 fraction division enable */
-       dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
-       dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
-       dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
-       if (bestm2_frac)
-               dpio_val |= DPIO_CHV_FRAC_DIV_EN;
-       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
-
-       /* Program digital lock detect threshold */
-       dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
-       dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
-                                       DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
-       dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
-       if (!bestm2_frac)
-               dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
-       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
-
-       /* Loop filter */
-       if (vco == 5400000) {
-               loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
-               loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
-               loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
-               tribuf_calcntr = 0x9;
-       } else if (vco <= 6200000) {
-               loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
-               loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
-               loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
-               tribuf_calcntr = 0x9;
-       } else if (vco <= 6480000) {
-               loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
-               loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
-               loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
-               tribuf_calcntr = 0x8;
-       } else {
-               /* Not supported. Apply the same limits as in the max case */
-               loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
-               loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
-               loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
-               tribuf_calcntr = 0;
-       }
-       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
-
-       dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
-       dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
-       dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
-       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
-
-       /* AFC Recal */
-       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
-                       vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
-                       DPIO_AFC_RECAL);
-
-       vlv_dpio_put(dev_priv);
-}
-
-/**
- * vlv_force_pll_on - forcibly enable just the PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to enable
- * @dpll: PLL configuration
- *
- * Enable the PLL for @pipe using the supplied @dpll config. To be used
- * in cases where we need the PLL enabled even when @pipe is not going to
- * be enabled.
- */
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
-                    const struct dpll *dpll)
-{
-       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-       struct intel_crtc_state *pipe_config;
-
-       pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
-       if (!pipe_config)
-               return -ENOMEM;
-
-       pipe_config->base.crtc = &crtc->base;
-       pipe_config->pixel_multiplier = 1;
-       pipe_config->dpll = *dpll;
-
-       if (IS_CHERRYVIEW(dev_priv)) {
-               chv_compute_dpll(crtc, pipe_config);
-               chv_prepare_pll(crtc, pipe_config);
-               chv_enable_pll(crtc, pipe_config);
-       } else {
-               vlv_compute_dpll(crtc, pipe_config);
-               vlv_prepare_pll(crtc, pipe_config);
-               vlv_enable_pll(crtc, pipe_config);
-       }
-
-       kfree(pipe_config);
-
-       return 0;
-}
-
-/**
- * vlv_force_pll_off - forcibly disable just the PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to disable
- *
- * Disable the PLL for @pipe. To be used in cases where we need
- * the PLL enabled even when @pipe is not going to be enabled.
- */
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
-       if (IS_CHERRYVIEW(dev_priv))
-               chv_disable_pll(dev_priv, pipe);
-       else
-               vlv_disable_pll(dev_priv, pipe);
-}
-
-static void i9xx_compute_dpll(struct intel_crtc *crtc,
-                             struct intel_crtc_state *crtc_state,
-                             struct dpll *reduced_clock)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       u32 dpll;
-       struct dpll *clock = &crtc_state->dpll;
-
-       i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
-
-       dpll = DPLL_VGA_MODE_DIS;
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
-               dpll |= DPLLB_MODE_LVDS;
-       else
-               dpll |= DPLLB_MODE_DAC_SERIAL;
-
-       if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
-           IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
-               dpll |= (crtc_state->pixel_multiplier - 1)
-                       << SDVO_MULTIPLIER_SHIFT_HIRES;
-       }
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
-           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
-               dpll |= DPLL_SDVO_HIGH_SPEED;
-
-       if (intel_crtc_has_dp_encoder(crtc_state))
-               dpll |= DPLL_SDVO_HIGH_SPEED;
-
-       /* compute bitmask from p1 value */
-       if (IS_PINEVIEW(dev_priv))
-               dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
-       else {
-               dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-               if (IS_G4X(dev_priv) && reduced_clock)
-                       dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
-       }
-       switch (clock->p2) {
-       case 5:
-               dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
-               break;
-       case 7:
-               dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
-               break;
-       case 10:
-               dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
-               break;
-       case 14:
-               dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
-               break;
-       }
-       if (INTEL_GEN(dev_priv) >= 4)
-               dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
-
-       if (crtc_state->sdvo_tv_clock)
-               dpll |= PLL_REF_INPUT_TVCLKINBC;
-       else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-                intel_panel_use_ssc(dev_priv))
-               dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
-       else
-               dpll |= PLL_REF_INPUT_DREFCLK;
-
-       dpll |= DPLL_VCO_ENABLE;
-       crtc_state->dpll_hw_state.dpll = dpll;
-
-       if (INTEL_GEN(dev_priv) >= 4) {
-               u32 dpll_md = (crtc_state->pixel_multiplier - 1)
-                       << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-               crtc_state->dpll_hw_state.dpll_md = dpll_md;
-       }
-}
-
-static void i8xx_compute_dpll(struct intel_crtc *crtc,
-                             struct intel_crtc_state *crtc_state,
-                             struct dpll *reduced_clock)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       u32 dpll;
-       struct dpll *clock = &crtc_state->dpll;
-
-       i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
-
-       dpll = DPLL_VGA_MODE_DIS;
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-               dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-       } else {
-               if (clock->p1 == 2)
-                       dpll |= PLL_P1_DIVIDE_BY_TWO;
-               else
-                       dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-               if (clock->p2 == 4)
-                       dpll |= PLL_P2_DIVIDE_BY_4;
-       }
-
-       /*
-        * Bspec:
-        * "[Almador Errata}: For the correct operation of the muxed DVO pins
-        *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
-        *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
-        *  Enable) must be set to “1” in both the DPLL A Control Register
-        *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
-        *
-        * For simplicity We simply keep both bits always enabled in
-        * both DPLLS. The spec says we should disable the DVO 2X clock
-        * when not needed, but this seems to work fine in practice.
-        */
-       if (IS_I830(dev_priv) ||
-           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
-               dpll |= DPLL_DVO_2X_MODE;
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-           intel_panel_use_ssc(dev_priv))
-               dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
-       else
-               dpll |= PLL_REF_INPUT_DREFCLK;
-
-       dpll |= DPLL_VCO_ENABLE;
-       crtc_state->dpll_hw_state.dpll = dpll;
-}
-
-static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-       const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
-       u32 crtc_vtotal, crtc_vblank_end;
-       int vsyncshift = 0;
-
-       /* We need to be careful not to changed the adjusted mode, for otherwise
-        * the hw state checker will get angry at the mismatch. */
-       crtc_vtotal = adjusted_mode->crtc_vtotal;
-       crtc_vblank_end = adjusted_mode->crtc_vblank_end;
-
-       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-               /* the chip adds 2 halflines automatically */
-               crtc_vtotal -= 1;
-               crtc_vblank_end -= 1;
-
-               if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
-                       vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
-               else
-                       vsyncshift = adjusted_mode->crtc_hsync_start -
-                               adjusted_mode->crtc_htotal / 2;
-               if (vsyncshift < 0)
-                       vsyncshift += adjusted_mode->crtc_htotal;
-       }
-
-       if (INTEL_GEN(dev_priv) > 3)
-               I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
-
-       I915_WRITE(HTOTAL(cpu_transcoder),
-                  (adjusted_mode->crtc_hdisplay - 1) |
-                  ((adjusted_mode->crtc_htotal - 1) << 16));
-       I915_WRITE(HBLANK(cpu_transcoder),
-                  (adjusted_mode->crtc_hblank_start - 1) |
-                  ((adjusted_mode->crtc_hblank_end - 1) << 16));
-       I915_WRITE(HSYNC(cpu_transcoder),
-                  (adjusted_mode->crtc_hsync_start - 1) |
-                  ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
-       I915_WRITE(VTOTAL(cpu_transcoder),
-                  (adjusted_mode->crtc_vdisplay - 1) |
-                  ((crtc_vtotal - 1) << 16));
-       I915_WRITE(VBLANK(cpu_transcoder),
-                  (adjusted_mode->crtc_vblank_start - 1) |
-                  ((crtc_vblank_end - 1) << 16));
-       I915_WRITE(VSYNC(cpu_transcoder),
-                  (adjusted_mode->crtc_vsync_start - 1) |
-                  ((adjusted_mode->crtc_vsync_end - 1) << 16));
-
-       /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
-        * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
-        * documented on the DDI_FUNC_CTL register description, EDP Input Select
-        * bits. */
-       if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
-           (pipe == PIPE_B || pipe == PIPE_C))
-               I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
-
-}
-
-static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       /* pipesrc controls the size that is scaled from, which should
-        * always be the user's requested size.
-        */
-       I915_WRITE(PIPESRC(pipe),
-                  ((crtc_state->pipe_src_w - 1) << 16) |
-                  (crtc_state->pipe_src_h - 1));
-}
-
-static void intel_get_pipe_timings(struct intel_crtc *crtc,
-                                  struct intel_crtc_state *pipe_config)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
-       u32 tmp;
-
-       tmp = I915_READ(HTOTAL(cpu_transcoder));
-       pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
-       pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
-
-       if (!transcoder_is_dsi(cpu_transcoder)) {
-               tmp = I915_READ(HBLANK(cpu_transcoder));
-               pipe_config->base.adjusted_mode.crtc_hblank_start =
-                                                       (tmp & 0xffff) + 1;
-               pipe_config->base.adjusted_mode.crtc_hblank_end =
-                                               ((tmp >> 16) & 0xffff) + 1;
-       }
-       tmp = I915_READ(HSYNC(cpu_transcoder));
-       pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
-       pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
-
-       tmp = I915_READ(VTOTAL(cpu_transcoder));
-       pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
-       pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
-
-       if (!transcoder_is_dsi(cpu_transcoder)) {
-               tmp = I915_READ(VBLANK(cpu_transcoder));
-               pipe_config->base.adjusted_mode.crtc_vblank_start =
-                                                       (tmp & 0xffff) + 1;
-               pipe_config->base.adjusted_mode.crtc_vblank_end =
-                                               ((tmp >> 16) & 0xffff) + 1;
-       }
-       tmp = I915_READ(VSYNC(cpu_transcoder));
-       pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
-       pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
-
-       if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
-               pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
-               pipe_config->base.adjusted_mode.crtc_vtotal += 1;
-               pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
-       }
-}
-
-static void intel_get_pipe_src_size(struct intel_crtc *crtc,
-                                   struct intel_crtc_state *pipe_config)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       u32 tmp;
-
-       tmp = I915_READ(PIPESRC(crtc->pipe));
-       pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
-       pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
-
-       pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
-       pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
-}
-
-void intel_mode_from_pipe_config(struct drm_display_mode *mode,
-                                struct intel_crtc_state *pipe_config)
-{
-       mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
-       mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
-       mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
-       mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
-
-       mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
-       mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
-       mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
-       mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
-
-       mode->flags = pipe_config->base.adjusted_mode.flags;
-       mode->type = DRM_MODE_TYPE_DRIVER;
-
-       mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
-
-       mode->hsync = drm_mode_hsync(mode);
-       mode->vrefresh = drm_mode_vrefresh(mode);
-       drm_mode_set_name(mode);
-}
-
-static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       u32 pipeconf;
-
-       pipeconf = 0;
-
-       /* we keep both pipes enabled on 830 */
-       if (IS_I830(dev_priv))
-               pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
-
-       if (crtc_state->double_wide)
-               pipeconf |= PIPECONF_DOUBLE_WIDE;
-
-       /* only g4x and later have fancy bpc/dither controls */
-       if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
-           IS_CHERRYVIEW(dev_priv)) {
-               /* Bspec claims that we can't use dithering for 30bpp pipes. */
-               if (crtc_state->dither && crtc_state->pipe_bpp != 30)
-                       pipeconf |= PIPECONF_DITHER_EN |
-                                   PIPECONF_DITHER_TYPE_SP;
-
-               switch (crtc_state->pipe_bpp) {
-               case 18:
-                       pipeconf |= PIPECONF_6BPC;
-                       break;
-               case 24:
-                       pipeconf |= PIPECONF_8BPC;
-                       break;
-               case 30:
-                       pipeconf |= PIPECONF_10BPC;
-                       break;
-               default:
-                       /* Case prevented by intel_choose_pipe_bpp_dither. */
-                       BUG();
-               }
-       }
-
-       if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
-               if (INTEL_GEN(dev_priv) < 4 ||
-                   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
-                       pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
-               else
-                       pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
-       } else {
-               pipeconf |= PIPECONF_PROGRESSIVE;
-       }
-
-       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-            crtc_state->limited_color_range)
-               pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
-
-       pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
-
-       I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
-       POSTING_READ(PIPECONF(crtc->pipe));
-}
-
-static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
-                                  struct intel_crtc_state *crtc_state)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       const struct intel_limit *limit;
-       int refclk = 48000;
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-               if (intel_panel_use_ssc(dev_priv)) {
-                       refclk = dev_priv->vbt.lvds_ssc_freq;
-                       DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
-               }
-
-               limit = &intel_limits_i8xx_lvds;
-       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
-               limit = &intel_limits_i8xx_dvo;
-       } else {
-               limit = &intel_limits_i8xx_dac;
-       }
-
-       if (!crtc_state->clock_set &&
-           !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-                                refclk, NULL, &crtc_state->dpll)) {
-               DRM_ERROR("Couldn't find PLL settings for mode!\n");
-               return -EINVAL;
-       }
-
-       i8xx_compute_dpll(crtc, crtc_state, NULL);
-
-       return 0;
-}
-
-static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
-                                 struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       const struct intel_limit *limit;
-       int refclk = 96000;
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-               if (intel_panel_use_ssc(dev_priv)) {
-                       refclk = dev_priv->vbt.lvds_ssc_freq;
-                       DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
-               }
-
-               if (intel_is_dual_link_lvds(dev_priv))
-                       limit = &intel_limits_g4x_dual_channel_lvds;
-               else
-                       limit = &intel_limits_g4x_single_channel_lvds;
-       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
-                  intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
-               limit = &intel_limits_g4x_hdmi;
-       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
-               limit = &intel_limits_g4x_sdvo;
-       } else {
-               /* The option is for other outputs */
-               limit = &intel_limits_i9xx_sdvo;
-       }
-
-       if (!crtc_state->clock_set &&
-           !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-                               refclk, NULL, &crtc_state->dpll)) {
-               DRM_ERROR("Couldn't find PLL settings for mode!\n");
-               return -EINVAL;
-       }
-
-       i9xx_compute_dpll(crtc, crtc_state, NULL);
-
-       return 0;
-}
-
-static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
-                                 struct intel_crtc_state *crtc_state)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       const struct intel_limit *limit;
-       int refclk = 96000;
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-               if (intel_panel_use_ssc(dev_priv)) {
-                       refclk = dev_priv->vbt.lvds_ssc_freq;
-                       DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
-               }
-
-               limit = &intel_limits_pineview_lvds;
-       } else {
-               limit = &intel_limits_pineview_sdvo;
-       }
-
-       if (!crtc_state->clock_set &&
-           !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-                               refclk, NULL, &crtc_state->dpll)) {
-               DRM_ERROR("Couldn't find PLL settings for mode!\n");
-               return -EINVAL;
-       }
-
-       i9xx_compute_dpll(crtc, crtc_state, NULL);
-
-       return 0;
-}
-
-static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
-                                  struct intel_crtc_state *crtc_state)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       const struct intel_limit *limit;
-       int refclk = 96000;
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-               if (intel_panel_use_ssc(dev_priv)) {
-                       refclk = dev_priv->vbt.lvds_ssc_freq;
-                       DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
-               }
-
-               limit = &intel_limits_i9xx_lvds;
-       } else {
-               limit = &intel_limits_i9xx_sdvo;
-       }
-
-       if (!crtc_state->clock_set &&
-           !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-                                refclk, NULL, &crtc_state->dpll)) {
-               DRM_ERROR("Couldn't find PLL settings for mode!\n");
-               return -EINVAL;
-       }
-
-       i9xx_compute_dpll(crtc, crtc_state, NULL);
-
-       return 0;
-}
-
-static int chv_crtc_compute_clock(struct intel_crtc *crtc,
-                                 struct intel_crtc_state *crtc_state)
-{
-       int refclk = 100000;
-       const struct intel_limit *limit = &intel_limits_chv;
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       if (!crtc_state->clock_set &&
-           !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-                               refclk, NULL, &crtc_state->dpll)) {
-               DRM_ERROR("Couldn't find PLL settings for mode!\n");
-               return -EINVAL;
-       }
-
-       chv_compute_dpll(crtc, crtc_state);
-
-       return 0;
-}
-
-static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
-                                 struct intel_crtc_state *crtc_state)
-{
-       int refclk = 100000;
-       const struct intel_limit *limit = &intel_limits_vlv;
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       if (!crtc_state->clock_set &&
-           !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-                               refclk, NULL, &crtc_state->dpll)) {
-               DRM_ERROR("Couldn't find PLL settings for mode!\n");
-               return -EINVAL;
-       }
-
-       vlv_compute_dpll(crtc, crtc_state);
-
-       return 0;
-}
-
-static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
-{
-       if (IS_I830(dev_priv))
-               return false;
-
-       return INTEL_GEN(dev_priv) >= 4 ||
-               IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
-}
-
-static void i9xx_get_pfit_config(struct intel_crtc *crtc,
-                                struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       u32 tmp;
-
-       if (!i9xx_has_pfit(dev_priv))
-               return;
-
-       tmp = I915_READ(PFIT_CONTROL);
-       if (!(tmp & PFIT_ENABLE))
-               return;
-
-       /* Check whether the pfit is attached to our pipe. */
-       if (INTEL_GEN(dev_priv) < 4) {
-               if (crtc->pipe != PIPE_B)
-                       return;
-       } else {
-               if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
-                       return;
-       }
-
-       pipe_config->gmch_pfit.control = tmp;
-       pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
-}
-
-static void vlv_crtc_clock_get(struct intel_crtc *crtc,
-                              struct intel_crtc_state *pipe_config)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = pipe_config->cpu_transcoder;
-       struct dpll clock;
-       u32 mdiv;
-       int refclk = 100000;
-
-       /* In case of DSI, DPLL will not be used */
-       if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
-               return;
-
-       vlv_dpio_get(dev_priv);
-       mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
-       vlv_dpio_put(dev_priv);
-
-       clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
-       clock.m2 = mdiv & DPIO_M2DIV_MASK;
-       clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
-       clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
-       clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
-
-       pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
-}
-
-static void
-i9xx_get_initial_plane_config(struct intel_crtc *crtc,
-                             struct intel_initial_plane_config *plane_config)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_plane *plane = to_intel_plane(crtc->base.primary);
-       enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-       enum pipe pipe;
-       u32 val, base, offset;
-       int fourcc, pixel_format;
-       unsigned int aligned_height;
-       struct drm_framebuffer *fb;
-       struct intel_framebuffer *intel_fb;
-
-       if (!plane->get_hw_state(plane, &pipe))
-               return;
-
-       WARN_ON(pipe != crtc->pipe);
-
-       intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
-       if (!intel_fb) {
-               DRM_DEBUG_KMS("failed to alloc fb\n");
-               return;
-       }
-
-       fb = &intel_fb->base;
-
-       fb->dev = dev;
-
-       val = I915_READ(DSPCNTR(i9xx_plane));
-
-       if (INTEL_GEN(dev_priv) >= 4) {
-               if (val & DISPPLANE_TILED) {
-                       plane_config->tiling = I915_TILING_X;
-                       fb->modifier = I915_FORMAT_MOD_X_TILED;
-               }
-
-               if (val & DISPPLANE_ROTATE_180)
-                       plane_config->rotation = DRM_MODE_ROTATE_180;
-       }
-
-       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
-           val & DISPPLANE_MIRROR)
-               plane_config->rotation |= DRM_MODE_REFLECT_X;
-
-       pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
-       fourcc = i9xx_format_to_fourcc(pixel_format);
-       fb->format = drm_format_info(fourcc);
-
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-               offset = I915_READ(DSPOFFSET(i9xx_plane));
-               base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
-       } else if (INTEL_GEN(dev_priv) >= 4) {
-               if (plane_config->tiling)
-                       offset = I915_READ(DSPTILEOFF(i9xx_plane));
-               else
-                       offset = I915_READ(DSPLINOFF(i9xx_plane));
-               base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
-       } else {
-               base = I915_READ(DSPADDR(i9xx_plane));
-       }
-       plane_config->base = base;
-
-       val = I915_READ(PIPESRC(pipe));
-       fb->width = ((val >> 16) & 0xfff) + 1;
-       fb->height = ((val >> 0) & 0xfff) + 1;
-
-       val = I915_READ(DSPSTRIDE(i9xx_plane));
-       fb->pitches[0] = val & 0xffffffc0;
-
-       aligned_height = intel_fb_align_height(fb, 0, fb->height);
-
-       plane_config->size = fb->pitches[0] * aligned_height;
-
-       DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
-                     crtc->base.name, plane->base.name, fb->width, fb->height,
-                     fb->format->cpp[0] * 8, base, fb->pitches[0],
-                     plane_config->size);
-
-       plane_config->fb = intel_fb;
-}
-
-static void chv_crtc_clock_get(struct intel_crtc *crtc,
-                              struct intel_crtc_state *pipe_config)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = pipe_config->cpu_transcoder;
-       enum dpio_channel port = vlv_pipe_to_channel(pipe);
-       struct dpll clock;
-       u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
-       int refclk = 100000;
-
-       /* In case of DSI, DPLL will not be used */
-       if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
-               return;
-
-       vlv_dpio_get(dev_priv);
-       cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
-       pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
-       pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
-       pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
-       pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
-       vlv_dpio_put(dev_priv);
-
-       clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
-       clock.m2 = (pll_dw0 & 0xff) << 22;
-       if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
-               clock.m2 |= pll_dw2 & 0x3fffff;
-       clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
-       clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
-       clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
-
-       pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
-}
-
-static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
-                                       struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
-
-       pipe_config->lspcon_downsampling = false;
-
-       if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
-               u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
-
-               if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
-                       bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
-                       bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
-
-                       if (ycbcr420_enabled) {
-                               /* We support 4:2:0 in full blend mode only */
-                               if (!blend)
-                                       output = INTEL_OUTPUT_FORMAT_INVALID;
-                               else if (!(IS_GEMINILAKE(dev_priv) ||
-                                          INTEL_GEN(dev_priv) >= 10))
-                                       output = INTEL_OUTPUT_FORMAT_INVALID;
-                               else
-                                       output = INTEL_OUTPUT_FORMAT_YCBCR420;
-                       } else {
-                               /*
-                                * Currently there is no interface defined to
-                                * check user preference between RGB/YCBCR444
-                                * or YCBCR420. So the only possible case for
-                                * YCBCR444 usage is driving YCBCR420 output
-                                * with LSPCON, when pipe is configured for
-                                * YCBCR444 output and LSPCON takes care of
-                                * downsampling it.
-                                */
-                               pipe_config->lspcon_downsampling = true;
-                               output = INTEL_OUTPUT_FORMAT_YCBCR444;
-                       }
-               }
-       }
-
-       pipe_config->output_format = output;
-}
-
-static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct intel_plane *plane = to_intel_plane(crtc->base.primary);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-       u32 tmp;
-
-       tmp = I915_READ(DSPCNTR(i9xx_plane));
-
-       if (tmp & DISPPLANE_GAMMA_ENABLE)
-               crtc_state->gamma_enable = true;
-
-       if (!HAS_GMCH(dev_priv) &&
-           tmp & DISPPLANE_PIPE_CSC_ENABLE)
-               crtc_state->csc_enable = true;
-}
-
-static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
-                                struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum intel_display_power_domain power_domain;
-       intel_wakeref_t wakeref;
-       u32 tmp;
-       bool ret;
-
-       power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
-       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (!wakeref)
-               return false;
-
-       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
-       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
-       pipe_config->shared_dpll = NULL;
-
-       ret = false;
-
-       tmp = I915_READ(PIPECONF(crtc->pipe));
-       if (!(tmp & PIPECONF_ENABLE))
-               goto out;
-
-       if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
-           IS_CHERRYVIEW(dev_priv)) {
-               switch (tmp & PIPECONF_BPC_MASK) {
-               case PIPECONF_6BPC:
-                       pipe_config->pipe_bpp = 18;
-                       break;
-               case PIPECONF_8BPC:
-                       pipe_config->pipe_bpp = 24;
-                       break;
-               case PIPECONF_10BPC:
-                       pipe_config->pipe_bpp = 30;
-                       break;
-               default:
-                       break;
-               }
-       }
-
-       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-           (tmp & PIPECONF_COLOR_RANGE_SELECT))
-               pipe_config->limited_color_range = true;
-
-       pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
-               PIPECONF_GAMMA_MODE_SHIFT;
-
-       if (IS_CHERRYVIEW(dev_priv))
-               pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
-
-       i9xx_get_pipe_color_config(pipe_config);
-       intel_color_get_config(pipe_config);
-
-       if (INTEL_GEN(dev_priv) < 4)
-               pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
-
-       intel_get_pipe_timings(crtc, pipe_config);
-       intel_get_pipe_src_size(crtc, pipe_config);
-
-       i9xx_get_pfit_config(crtc, pipe_config);
-
-       if (INTEL_GEN(dev_priv) >= 4) {
-               /* No way to read it out on pipes B and C */
-               if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
-                       tmp = dev_priv->chv_dpll_md[crtc->pipe];
-               else
-                       tmp = I915_READ(DPLL_MD(crtc->pipe));
-               pipe_config->pixel_multiplier =
-                       ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
-                        >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
-               pipe_config->dpll_hw_state.dpll_md = tmp;
-       } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
-                  IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
-               tmp = I915_READ(DPLL(crtc->pipe));
-               pipe_config->pixel_multiplier =
-                       ((tmp & SDVO_MULTIPLIER_MASK)
-                        >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
-       } else {
-               /* Note that on i915G/GM the pixel multiplier is in the sdvo
-                * port and will be fixed up in the encoder->get_config
-                * function. */
-               pipe_config->pixel_multiplier = 1;
-       }
-       pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
-       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
-               pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
-               pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
-       } else {
-               /* Mask out read-only status bits. */
-               pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
-                                                    DPLL_PORTC_READY_MASK |
-                                                    DPLL_PORTB_READY_MASK);
-       }
-
-       if (IS_CHERRYVIEW(dev_priv))
-               chv_crtc_clock_get(crtc, pipe_config);
-       else if (IS_VALLEYVIEW(dev_priv))
-               vlv_crtc_clock_get(crtc, pipe_config);
-       else
-               i9xx_crtc_clock_get(crtc, pipe_config);
-
-       /*
-        * Normally the dotclock is filled in by the encoder .get_config()
-        * but in case the pipe is enabled w/o any ports we need a sane
-        * default.
-        */
-       pipe_config->base.adjusted_mode.crtc_clock =
-               pipe_config->port_clock / pipe_config->pixel_multiplier;
-
-       ret = true;
-
-out:
-       intel_display_power_put(dev_priv, power_domain, wakeref);
-
-       return ret;
-}
-
-static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
-{
-       struct intel_encoder *encoder;
-       int i;
-       u32 val, final;
-       bool has_lvds = false;
-       bool has_cpu_edp = false;
-       bool has_panel = false;
-       bool has_ck505 = false;
-       bool can_ssc = false;
-       bool using_ssc_source = false;
-
-       /* We need to take the global config into account */
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               switch (encoder->type) {
-               case INTEL_OUTPUT_LVDS:
-                       has_panel = true;
-                       has_lvds = true;
-                       break;
-               case INTEL_OUTPUT_EDP:
-                       has_panel = true;
-                       if (encoder->port == PORT_A)
-                               has_cpu_edp = true;
-                       break;
-               default:
-                       break;
-               }
-       }
-
-       if (HAS_PCH_IBX(dev_priv)) {
-               has_ck505 = dev_priv->vbt.display_clock_mode;
-               can_ssc = has_ck505;
-       } else {
-               has_ck505 = false;
-               can_ssc = true;
-       }
-
-       /* Check if any DPLLs are using the SSC source */
-       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-               u32 temp = I915_READ(PCH_DPLL(i));
-
-               if (!(temp & DPLL_VCO_ENABLE))
-                       continue;
-
-               if ((temp & PLL_REF_INPUT_MASK) ==
-                   PLLB_REF_INPUT_SPREADSPECTRUMIN) {
-                       using_ssc_source = true;
-                       break;
-               }
-       }
-
-       DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
-                     has_panel, has_lvds, has_ck505, using_ssc_source);
-
-       /* Ironlake: try to setup display ref clock before DPLL
-        * enabling. This is only under driver's control after
-        * PCH B stepping, previous chipset stepping should be
-        * ignoring this setting.
-        */
-       val = I915_READ(PCH_DREF_CONTROL);
-
-       /* As we must carefully and slowly disable/enable each source in turn,
-        * compute the final state we want first and check if we need to
-        * make any changes at all.
-        */
-       final = val;
-       final &= ~DREF_NONSPREAD_SOURCE_MASK;
-       if (has_ck505)
-               final |= DREF_NONSPREAD_CK505_ENABLE;
-       else
-               final |= DREF_NONSPREAD_SOURCE_ENABLE;
-
-       final &= ~DREF_SSC_SOURCE_MASK;
-       final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
-       final &= ~DREF_SSC1_ENABLE;
-
-       if (has_panel) {
-               final |= DREF_SSC_SOURCE_ENABLE;
-
-               if (intel_panel_use_ssc(dev_priv) && can_ssc)
-                       final |= DREF_SSC1_ENABLE;
-
-               if (has_cpu_edp) {
-                       if (intel_panel_use_ssc(dev_priv) && can_ssc)
-                               final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
-                       else
-                               final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
-               } else
-                       final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
-       } else if (using_ssc_source) {
-               final |= DREF_SSC_SOURCE_ENABLE;
-               final |= DREF_SSC1_ENABLE;
-       }
-
-       if (final == val)
-               return;
-
-       /* Always enable nonspread source */
-       val &= ~DREF_NONSPREAD_SOURCE_MASK;
-
-       if (has_ck505)
-               val |= DREF_NONSPREAD_CK505_ENABLE;
-       else
-               val |= DREF_NONSPREAD_SOURCE_ENABLE;
-
-       if (has_panel) {
-               val &= ~DREF_SSC_SOURCE_MASK;
-               val |= DREF_SSC_SOURCE_ENABLE;
-
-               /* SSC must be turned on before enabling the CPU output  */
-               if (intel_panel_use_ssc(dev_priv) && can_ssc) {
-                       DRM_DEBUG_KMS("Using SSC on panel\n");
-                       val |= DREF_SSC1_ENABLE;
-               } else
-                       val &= ~DREF_SSC1_ENABLE;
-
-               /* Get SSC going before enabling the outputs */
-               I915_WRITE(PCH_DREF_CONTROL, val);
-               POSTING_READ(PCH_DREF_CONTROL);
-               udelay(200);
-
-               val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
-
-               /* Enable CPU source on CPU attached eDP */
-               if (has_cpu_edp) {
-                       if (intel_panel_use_ssc(dev_priv) && can_ssc) {
-                               DRM_DEBUG_KMS("Using SSC on eDP\n");
-                               val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
-                       } else
-                               val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
-               } else
-                       val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
-
-               I915_WRITE(PCH_DREF_CONTROL, val);
-               POSTING_READ(PCH_DREF_CONTROL);
-               udelay(200);
-       } else {
-               DRM_DEBUG_KMS("Disabling CPU source output\n");
-
-               val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
-
-               /* Turn off CPU output */
-               val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
-
-               I915_WRITE(PCH_DREF_CONTROL, val);
-               POSTING_READ(PCH_DREF_CONTROL);
-               udelay(200);
-
-               if (!using_ssc_source) {
-                       DRM_DEBUG_KMS("Disabling SSC source\n");
-
-                       /* Turn off the SSC source */
-                       val &= ~DREF_SSC_SOURCE_MASK;
-                       val |= DREF_SSC_SOURCE_DISABLE;
-
-                       /* Turn off SSC1 */
-                       val &= ~DREF_SSC1_ENABLE;
-
-                       I915_WRITE(PCH_DREF_CONTROL, val);
-                       POSTING_READ(PCH_DREF_CONTROL);
-                       udelay(200);
-               }
-       }
-
-       BUG_ON(val != final);
-}
-
-static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
-{
-       u32 tmp;
-
-       tmp = I915_READ(SOUTH_CHICKEN2);
-       tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
-       I915_WRITE(SOUTH_CHICKEN2, tmp);
-
-       if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
-                       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
-               DRM_ERROR("FDI mPHY reset assert timeout\n");
-
-       tmp = I915_READ(SOUTH_CHICKEN2);
-       tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
-       I915_WRITE(SOUTH_CHICKEN2, tmp);
-
-       if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
-                        FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
-               DRM_ERROR("FDI mPHY reset de-assert timeout\n");
-}
-
-/* WaMPhyProgramming:hsw */
-static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
-{
-       u32 tmp;
-
-       tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
-       tmp &= ~(0xFF << 24);
-       tmp |= (0x12 << 24);
-       intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
-       tmp |= (1 << 11);
-       intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
-       tmp |= (1 << 11);
-       intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
-       tmp |= (1 << 24) | (1 << 21) | (1 << 18);
-       intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
-       tmp |= (1 << 24) | (1 << 21) | (1 << 18);
-       intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
-       tmp &= ~(7 << 13);
-       tmp |= (5 << 13);
-       intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
-       tmp &= ~(7 << 13);
-       tmp |= (5 << 13);
-       intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
-       tmp &= ~0xFF;
-       tmp |= 0x1C;
-       intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
-       tmp &= ~0xFF;
-       tmp |= 0x1C;
-       intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
-       tmp &= ~(0xFF << 16);
-       tmp |= (0x1C << 16);
-       intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
-       tmp &= ~(0xFF << 16);
-       tmp |= (0x1C << 16);
-       intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
-       tmp |= (1 << 27);
-       intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
-       tmp |= (1 << 27);
-       intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
-       tmp &= ~(0xF << 28);
-       tmp |= (4 << 28);
-       intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
-
-       tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
-       tmp &= ~(0xF << 28);
-       tmp |= (4 << 28);
-       intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
-}
-
-/* Implements 3 different sequences from BSpec chapter "Display iCLK
- * Programming" based on the parameters passed:
- * - Sequence to enable CLKOUT_DP
- * - Sequence to enable CLKOUT_DP without spread
- * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
- */
-static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
-                                bool with_spread, bool with_fdi)
-{
-       u32 reg, tmp;
-
-       if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
-               with_spread = true;
-       if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
-           with_fdi, "LP PCH doesn't have FDI\n"))
-               with_fdi = false;
-
-       mutex_lock(&dev_priv->sb_lock);
-
-       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
-       tmp &= ~SBI_SSCCTL_DISABLE;
-       tmp |= SBI_SSCCTL_PATHALT;
-       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
-
-       udelay(24);
-
-       if (with_spread) {
-               tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
-               tmp &= ~SBI_SSCCTL_PATHALT;
-               intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
-
-               if (with_fdi) {
-                       lpt_reset_fdi_mphy(dev_priv);
-                       lpt_program_fdi_mphy(dev_priv);
-               }
-       }
-
-       reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
-       tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
-       tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
-       intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
-
-       mutex_unlock(&dev_priv->sb_lock);
-}
-
-/* Sequence to disable CLKOUT_DP */
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
-{
-       u32 reg, tmp;
-
-       mutex_lock(&dev_priv->sb_lock);
-
-       reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
-       tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
-       tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
-       intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
-
-       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
-       if (!(tmp & SBI_SSCCTL_DISABLE)) {
-               if (!(tmp & SBI_SSCCTL_PATHALT)) {
-                       tmp |= SBI_SSCCTL_PATHALT;
-                       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
-                       udelay(32);
-               }
-               tmp |= SBI_SSCCTL_DISABLE;
-               intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
-       }
-
-       mutex_unlock(&dev_priv->sb_lock);
-}
-
-#define BEND_IDX(steps) ((50 + (steps)) / 5)
-
-static const u16 sscdivintphase[] = {
-       [BEND_IDX( 50)] = 0x3B23,
-       [BEND_IDX( 45)] = 0x3B23,
-       [BEND_IDX( 40)] = 0x3C23,
-       [BEND_IDX( 35)] = 0x3C23,
-       [BEND_IDX( 30)] = 0x3D23,
-       [BEND_IDX( 25)] = 0x3D23,
-       [BEND_IDX( 20)] = 0x3E23,
-       [BEND_IDX( 15)] = 0x3E23,
-       [BEND_IDX( 10)] = 0x3F23,
-       [BEND_IDX(  5)] = 0x3F23,
-       [BEND_IDX(  0)] = 0x0025,
-       [BEND_IDX( -5)] = 0x0025,
-       [BEND_IDX(-10)] = 0x0125,
-       [BEND_IDX(-15)] = 0x0125,
-       [BEND_IDX(-20)] = 0x0225,
-       [BEND_IDX(-25)] = 0x0225,
-       [BEND_IDX(-30)] = 0x0325,
-       [BEND_IDX(-35)] = 0x0325,
-       [BEND_IDX(-40)] = 0x0425,
-       [BEND_IDX(-45)] = 0x0425,
-       [BEND_IDX(-50)] = 0x0525,
-};
-
-/*
- * Bend CLKOUT_DP
- * steps -50 to 50 inclusive, in steps of 5
- * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
- * change in clock period = -(steps / 10) * 5.787 ps
- */
-static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
-{
-       u32 tmp;
-       int idx = BEND_IDX(steps);
-
-       if (WARN_ON(steps % 5 != 0))
-               return;
-
-       if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
-               return;
-
-       mutex_lock(&dev_priv->sb_lock);
-
-       if (steps % 10 != 0)
-               tmp = 0xAAAAAAAB;
-       else
-               tmp = 0x00000000;
-       intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
-
-       tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
-       tmp &= 0xffff0000;
-       tmp |= sscdivintphase[idx];
-       intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
-
-       mutex_unlock(&dev_priv->sb_lock);
-}
-
-#undef BEND_IDX
-
-static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
-{
-       u32 fuse_strap = I915_READ(FUSE_STRAP);
-       u32 ctl = I915_READ(SPLL_CTL);
-
-       if ((ctl & SPLL_PLL_ENABLE) == 0)
-               return false;
-
-       if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
-           (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
-               return true;
-
-       if (IS_BROADWELL(dev_priv) &&
-           (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
-               return true;
-
-       return false;
-}
-
-static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
-                              enum intel_dpll_id id)
-{
-       u32 fuse_strap = I915_READ(FUSE_STRAP);
-       u32 ctl = I915_READ(WRPLL_CTL(id));
-
-       if ((ctl & WRPLL_PLL_ENABLE) == 0)
-               return false;
-
-       if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
-               return true;
-
-       if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
-           (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
-           (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
-               return true;
-
-       return false;
-}
-
-static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
-{
-       struct intel_encoder *encoder;
-       bool pch_ssc_in_use = false;
-       bool has_fdi = false;
-
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               switch (encoder->type) {
-               case INTEL_OUTPUT_ANALOG:
-                       has_fdi = true;
-                       break;
-               default:
-                       break;
-               }
-       }
-
-       /*
-        * The BIOS may have decided to use the PCH SSC
-        * reference so we must not disable it until the
-        * relevant PLLs have stopped relying on it. We'll
-        * just leave the PCH SSC reference enabled in case
-        * any active PLL is using it. It will get disabled
-        * after runtime suspend if we don't have FDI.
-        *
-        * TODO: Move the whole reference clock handling
-        * to the modeset sequence proper so that we can
-        * actually enable/disable/reconfigure these things
-        * safely. To do that we need to introduce a real
-        * clock hierarchy. That would also allow us to do
-        * clock bending finally.
-        */
-       if (spll_uses_pch_ssc(dev_priv)) {
-               DRM_DEBUG_KMS("SPLL using PCH SSC\n");
-               pch_ssc_in_use = true;
-       }
-
-       if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
-               DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
-               pch_ssc_in_use = true;
-       }
-
-       if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
-               DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
-               pch_ssc_in_use = true;
-       }
-
-       if (pch_ssc_in_use)
-               return;
-
-       if (has_fdi) {
-               lpt_bend_clkout_dp(dev_priv, 0);
-               lpt_enable_clkout_dp(dev_priv, true, true);
-       } else {
-               lpt_disable_clkout_dp(dev_priv);
-       }
-}
-
-/*
- * Initialize reference clocks when the driver loads
- */
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
-{
-       if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
-               ironlake_init_pch_refclk(dev_priv);
-       else if (HAS_PCH_LPT(dev_priv))
-               lpt_init_pch_refclk(dev_priv);
-}
-
-static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       u32 val;
-
-       val = 0;
-
-       switch (crtc_state->pipe_bpp) {
-       case 18:
-               val |= PIPECONF_6BPC;
-               break;
-       case 24:
-               val |= PIPECONF_8BPC;
-               break;
-       case 30:
-               val |= PIPECONF_10BPC;
-               break;
-       case 36:
-               val |= PIPECONF_12BPC;
-               break;
-       default:
-               /* Case prevented by intel_choose_pipe_bpp_dither. */
-               BUG();
-       }
-
-       if (crtc_state->dither)
-               val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
-
-       if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
-               val |= PIPECONF_INTERLACED_ILK;
-       else
-               val |= PIPECONF_PROGRESSIVE;
-
-       if (crtc_state->limited_color_range)
-               val |= PIPECONF_COLOR_RANGE_SELECT;
-
-       val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
-
-       I915_WRITE(PIPECONF(pipe), val);
-       POSTING_READ(PIPECONF(pipe));
-}
-
-static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-       u32 val = 0;
-
-       if (IS_HASWELL(dev_priv) && crtc_state->dither)
-               val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
-
-       if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
-               val |= PIPECONF_INTERLACED_ILK;
-       else
-               val |= PIPECONF_PROGRESSIVE;
-
-       I915_WRITE(PIPECONF(cpu_transcoder), val);
-       POSTING_READ(PIPECONF(cpu_transcoder));
-}
-
-static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       u32 val = 0;
-
-       switch (crtc_state->pipe_bpp) {
-       case 18:
-               val |= PIPEMISC_DITHER_6_BPC;
-               break;
-       case 24:
-               val |= PIPEMISC_DITHER_8_BPC;
-               break;
-       case 30:
-               val |= PIPEMISC_DITHER_10_BPC;
-               break;
-       case 36:
-               val |= PIPEMISC_DITHER_12_BPC;
-               break;
-       default:
-               MISSING_CASE(crtc_state->pipe_bpp);
-               break;
-       }
-
-       if (crtc_state->dither)
-               val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
-
-       if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
-           crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
-               val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
-
-       if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
-               val |= PIPEMISC_YUV420_ENABLE |
-                       PIPEMISC_YUV420_MODE_FULL_BLEND;
-
-       if (INTEL_GEN(dev_priv) >= 11 &&
-           (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
-                                          BIT(PLANE_CURSOR))) == 0)
-               val |= PIPEMISC_HDR_MODE_PRECISION;
-
-       I915_WRITE(PIPEMISC(crtc->pipe), val);
-}
-
-int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       u32 tmp;
-
-       tmp = I915_READ(PIPEMISC(crtc->pipe));
-
-       switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
-       case PIPEMISC_DITHER_6_BPC:
-               return 18;
-       case PIPEMISC_DITHER_8_BPC:
-               return 24;
-       case PIPEMISC_DITHER_10_BPC:
-               return 30;
-       case PIPEMISC_DITHER_12_BPC:
-               return 36;
-       default:
-               MISSING_CASE(tmp);
-               return 0;
-       }
-}
-
-int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
-{
-       /*
-        * Account for spread spectrum to avoid
-        * oversubscribing the link. Max center spread
-        * is 2.5%; use 5% for safety's sake.
-        */
-       u32 bps = target_clock * bpp * 21 / 20;
-       return DIV_ROUND_UP(bps, link_bw * 8);
-}
-
-static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
-{
-       return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
-}
-
-static void ironlake_compute_dpll(struct intel_crtc *crtc,
-                                 struct intel_crtc_state *crtc_state,
-                                 struct dpll *reduced_clock)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       u32 dpll, fp, fp2;
-       int factor;
-
-       /* Enable autotuning of the PLL clock (if permissible) */
-       factor = 21;
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-               if ((intel_panel_use_ssc(dev_priv) &&
-                    dev_priv->vbt.lvds_ssc_freq == 100000) ||
-                   (HAS_PCH_IBX(dev_priv) &&
-                    intel_is_dual_link_lvds(dev_priv)))
-                       factor = 25;
-       } else if (crtc_state->sdvo_tv_clock) {
-               factor = 20;
-       }
-
-       fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
-
-       if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
-               fp |= FP_CB_TUNE;
-
-       if (reduced_clock) {
-               fp2 = i9xx_dpll_compute_fp(reduced_clock);
-
-               if (reduced_clock->m < factor * reduced_clock->n)
-                       fp2 |= FP_CB_TUNE;
-       } else {
-               fp2 = fp;
-       }
-
-       dpll = 0;
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
-               dpll |= DPLLB_MODE_LVDS;
-       else
-               dpll |= DPLLB_MODE_DAC_SERIAL;
-
-       dpll |= (crtc_state->pixel_multiplier - 1)
-               << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
-           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
-               dpll |= DPLL_SDVO_HIGH_SPEED;
-
-       if (intel_crtc_has_dp_encoder(crtc_state))
-               dpll |= DPLL_SDVO_HIGH_SPEED;
-
-       /*
-        * The high speed IO clock is only really required for
-        * SDVO/HDMI/DP, but we also enable it for CRT to make it
-        * possible to share the DPLL between CRT and HDMI. Enabling
-        * the clock needlessly does no real harm, except use up a
-        * bit of power potentially.
-        *
-        * We'll limit this to IVB with 3 pipes, since it has only two
-        * DPLLs and so DPLL sharing is the only way to get three pipes
-        * driving PCH ports at the same time. On SNB we could do this,
-        * and potentially avoid enabling the second DPLL, but it's not
-        * clear if it''s a win or loss power wise. No point in doing
-        * this on ILK at all since it has a fixed DPLL<->pipe mapping.
-        */
-       if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
-           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
-               dpll |= DPLL_SDVO_HIGH_SPEED;
-
-       /* compute bitmask from p1 value */
-       dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-       /* also FPA1 */
-       dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
-
-       switch (crtc_state->dpll.p2) {
-       case 5:
-               dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
-               break;
-       case 7:
-               dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
-               break;
-       case 10:
-               dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
-               break;
-       case 14:
-               dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
-               break;
-       }
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-           intel_panel_use_ssc(dev_priv))
-               dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
-       else
-               dpll |= PLL_REF_INPUT_DREFCLK;
-
-       dpll |= DPLL_VCO_ENABLE;
-
-       crtc_state->dpll_hw_state.dpll = dpll;
-       crtc_state->dpll_hw_state.fp0 = fp;
-       crtc_state->dpll_hw_state.fp1 = fp2;
-}
-
-static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
-                                      struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       const struct intel_limit *limit;
-       int refclk = 120000;
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
-       if (!crtc_state->has_pch_encoder)
-               return 0;
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-               if (intel_panel_use_ssc(dev_priv)) {
-                       DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
-                                     dev_priv->vbt.lvds_ssc_freq);
-                       refclk = dev_priv->vbt.lvds_ssc_freq;
-               }
-
-               if (intel_is_dual_link_lvds(dev_priv)) {
-                       if (refclk == 100000)
-                               limit = &intel_limits_ironlake_dual_lvds_100m;
-                       else
-                               limit = &intel_limits_ironlake_dual_lvds;
-               } else {
-                       if (refclk == 100000)
-                               limit = &intel_limits_ironlake_single_lvds_100m;
-                       else
-                               limit = &intel_limits_ironlake_single_lvds;
-               }
-       } else {
-               limit = &intel_limits_ironlake_dac;
-       }
-
-       if (!crtc_state->clock_set &&
-           !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
-                               refclk, NULL, &crtc_state->dpll)) {
-               DRM_ERROR("Couldn't find PLL settings for mode!\n");
-               return -EINVAL;
-       }
-
-       ironlake_compute_dpll(crtc, crtc_state, NULL);
-
-       if (!intel_get_shared_dpll(crtc_state, NULL)) {
-               DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
-                             pipe_name(crtc->pipe));
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
-                                        struct intel_link_m_n *m_n)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum pipe pipe = crtc->pipe;
-
-       m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
-       m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
-       m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
-               & ~TU_SIZE_MASK;
-       m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
-       m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
-                   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
-}
-
-static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
-                                        enum transcoder transcoder,
-                                        struct intel_link_m_n *m_n,
-                                        struct intel_link_m_n *m2_n2)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-
-       if (INTEL_GEN(dev_priv) >= 5) {
-               m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
-               m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
-               m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
-                       & ~TU_SIZE_MASK;
-               m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
-               m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
-                           & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
-
-               if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
-                       m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
-                       m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
-                       m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
-                                       & ~TU_SIZE_MASK;
-                       m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
-                       m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
-                                       & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
-               }
-       } else {
-               m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
-               m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
-               m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
-                       & ~TU_SIZE_MASK;
-               m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
-               m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
-                           & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
-       }
-}
-
-void intel_dp_get_m_n(struct intel_crtc *crtc,
-                     struct intel_crtc_state *pipe_config)
-{
-       if (pipe_config->has_pch_encoder)
-               intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
-       else
-               intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
-                                            &pipe_config->dp_m_n,
-                                            &pipe_config->dp_m2_n2);
-}
-
-static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
-                                       struct intel_crtc_state *pipe_config)
-{
-       intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
-                                    &pipe_config->fdi_m_n, NULL);
-}
-
-static void skylake_get_pfit_config(struct intel_crtc *crtc,
-                                   struct intel_crtc_state *pipe_config)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
-       u32 ps_ctrl = 0;
-       int id = -1;
-       int i;
-
-       /* find scaler attached to this pipe */
-       for (i = 0; i < crtc->num_scalers; i++) {
-               ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
-               if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
-                       id = i;
-                       pipe_config->pch_pfit.enabled = true;
-                       pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
-                       pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
-                       scaler_state->scalers[i].in_use = true;
-                       break;
-               }
-       }
-
-       scaler_state->scaler_id = id;
-       if (id >= 0) {
-               scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
-       } else {
-               scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
-       }
-}
-
-static void
-skylake_get_initial_plane_config(struct intel_crtc *crtc,
-                                struct intel_initial_plane_config *plane_config)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_plane *plane = to_intel_plane(crtc->base.primary);
-       enum plane_id plane_id = plane->id;
-       enum pipe pipe;
-       u32 val, base, offset, stride_mult, tiling, alpha;
-       int fourcc, pixel_format;
-       unsigned int aligned_height;
-       struct drm_framebuffer *fb;
-       struct intel_framebuffer *intel_fb;
-
-       if (!plane->get_hw_state(plane, &pipe))
-               return;
-
-       WARN_ON(pipe != crtc->pipe);
-
-       intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
-       if (!intel_fb) {
-               DRM_DEBUG_KMS("failed to alloc fb\n");
-               return;
-       }
-
-       fb = &intel_fb->base;
-
-       fb->dev = dev;
-
-       val = I915_READ(PLANE_CTL(pipe, plane_id));
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
-       else
-               pixel_format = val & PLANE_CTL_FORMAT_MASK;
-
-       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
-               alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
-               alpha &= PLANE_COLOR_ALPHA_MASK;
-       } else {
-               alpha = val & PLANE_CTL_ALPHA_MASK;
-       }
-
-       fourcc = skl_format_to_fourcc(pixel_format,
-                                     val & PLANE_CTL_ORDER_RGBX, alpha);
-       fb->format = drm_format_info(fourcc);
-
-       tiling = val & PLANE_CTL_TILED_MASK;
-       switch (tiling) {
-       case PLANE_CTL_TILED_LINEAR:
-               fb->modifier = DRM_FORMAT_MOD_LINEAR;
-               break;
-       case PLANE_CTL_TILED_X:
-               plane_config->tiling = I915_TILING_X;
-               fb->modifier = I915_FORMAT_MOD_X_TILED;
-               break;
-       case PLANE_CTL_TILED_Y:
-               plane_config->tiling = I915_TILING_Y;
-               if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
-                       fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
-               else
-                       fb->modifier = I915_FORMAT_MOD_Y_TILED;
-               break;
-       case PLANE_CTL_TILED_YF:
-               if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
-                       fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
-               else
-                       fb->modifier = I915_FORMAT_MOD_Yf_TILED;
-               break;
-       default:
-               MISSING_CASE(tiling);
-               goto error;
-       }
-
-       /*
-        * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
-        * while i915 HW rotation is clockwise, thats why this swapping.
-        */
-       switch (val & PLANE_CTL_ROTATE_MASK) {
-       case PLANE_CTL_ROTATE_0:
-               plane_config->rotation = DRM_MODE_ROTATE_0;
-               break;
-       case PLANE_CTL_ROTATE_90:
-               plane_config->rotation = DRM_MODE_ROTATE_270;
-               break;
-       case PLANE_CTL_ROTATE_180:
-               plane_config->rotation = DRM_MODE_ROTATE_180;
-               break;
-       case PLANE_CTL_ROTATE_270:
-               plane_config->rotation = DRM_MODE_ROTATE_90;
-               break;
-       }
-
-       if (INTEL_GEN(dev_priv) >= 10 &&
-           val & PLANE_CTL_FLIP_HORIZONTAL)
-               plane_config->rotation |= DRM_MODE_REFLECT_X;
-
-       base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
-       plane_config->base = base;
-
-       offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
-
-       val = I915_READ(PLANE_SIZE(pipe, plane_id));
-       fb->height = ((val >> 16) & 0xfff) + 1;
-       fb->width = ((val >> 0) & 0x1fff) + 1;
-
-       val = I915_READ(PLANE_STRIDE(pipe, plane_id));
-       stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
-       fb->pitches[0] = (val & 0x3ff) * stride_mult;
-
-       aligned_height = intel_fb_align_height(fb, 0, fb->height);
-
-       plane_config->size = fb->pitches[0] * aligned_height;
-
-       DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
-                     crtc->base.name, plane->base.name, fb->width, fb->height,
-                     fb->format->cpp[0] * 8, base, fb->pitches[0],
-                     plane_config->size);
-
-       plane_config->fb = intel_fb;
-       return;
-
-error:
-       kfree(intel_fb);
-}
-
-static void ironlake_get_pfit_config(struct intel_crtc *crtc,
-                                    struct intel_crtc_state *pipe_config)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       u32 tmp;
-
-       tmp = I915_READ(PF_CTL(crtc->pipe));
-
-       if (tmp & PF_ENABLE) {
-               pipe_config->pch_pfit.enabled = true;
-               pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
-               pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
-
-               /* We currently do not free assignements of panel fitters on
-                * ivb/hsw (since we don't use the higher upscaling modes which
-                * differentiates them) so just WARN about this case for now. */
-               if (IS_GEN(dev_priv, 7)) {
-                       WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
-                               PF_PIPE_SEL_IVB(crtc->pipe));
-               }
-       }
-}
-
-static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
-                                    struct intel_crtc_state *pipe_config)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum intel_display_power_domain power_domain;
-       intel_wakeref_t wakeref;
-       u32 tmp;
-       bool ret;
-
-       power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
-       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (!wakeref)
-               return false;
-
-       pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
-       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
-       pipe_config->shared_dpll = NULL;
-
-       ret = false;
-       tmp = I915_READ(PIPECONF(crtc->pipe));
-       if (!(tmp & PIPECONF_ENABLE))
-               goto out;
-
-       switch (tmp & PIPECONF_BPC_MASK) {
-       case PIPECONF_6BPC:
-               pipe_config->pipe_bpp = 18;
-               break;
-       case PIPECONF_8BPC:
-               pipe_config->pipe_bpp = 24;
-               break;
-       case PIPECONF_10BPC:
-               pipe_config->pipe_bpp = 30;
-               break;
-       case PIPECONF_12BPC:
-               pipe_config->pipe_bpp = 36;
-               break;
-       default:
-               break;
-       }
-
-       if (tmp & PIPECONF_COLOR_RANGE_SELECT)
-               pipe_config->limited_color_range = true;
-
-       pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
-               PIPECONF_GAMMA_MODE_SHIFT;
-
-       pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
-
-       i9xx_get_pipe_color_config(pipe_config);
-       intel_color_get_config(pipe_config);
-
-       if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
-               struct intel_shared_dpll *pll;
-               enum intel_dpll_id pll_id;
-
-               pipe_config->has_pch_encoder = true;
-
-               tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
-               pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
-                                         FDI_DP_PORT_WIDTH_SHIFT) + 1;
-
-               ironlake_get_fdi_m_n_config(crtc, pipe_config);
-
-               if (HAS_PCH_IBX(dev_priv)) {
-                       /*
-                        * The pipe->pch transcoder and pch transcoder->pll
-                        * mapping is fixed.
-                        */
-                       pll_id = (enum intel_dpll_id) crtc->pipe;
-               } else {
-                       tmp = I915_READ(PCH_DPLL_SEL);
-                       if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
-                               pll_id = DPLL_ID_PCH_PLL_B;
-                       else
-                               pll_id= DPLL_ID_PCH_PLL_A;
-               }
-
-               pipe_config->shared_dpll =
-                       intel_get_shared_dpll_by_id(dev_priv, pll_id);
-               pll = pipe_config->shared_dpll;
-
-               WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
-                                               &pipe_config->dpll_hw_state));
-
-               tmp = pipe_config->dpll_hw_state.dpll;
-               pipe_config->pixel_multiplier =
-                       ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
-                        >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
-
-               ironlake_pch_clock_get(crtc, pipe_config);
-       } else {
-               pipe_config->pixel_multiplier = 1;
-       }
-
-       intel_get_pipe_timings(crtc, pipe_config);
-       intel_get_pipe_src_size(crtc, pipe_config);
-
-       ironlake_get_pfit_config(crtc, pipe_config);
-
-       ret = true;
-
-out:
-       intel_display_power_put(dev_priv, power_domain, wakeref);
-
-       return ret;
-}
-static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
-                                     struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_atomic_state *state =
-               to_intel_atomic_state(crtc_state->base.state);
-
-       if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
-           INTEL_GEN(dev_priv) >= 11) {
-               struct intel_encoder *encoder =
-                       intel_get_crtc_new_encoder(state, crtc_state);
-
-               if (!intel_get_shared_dpll(crtc_state, encoder)) {
-                       DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
-                                     pipe_name(crtc->pipe));
-                       return -EINVAL;
-               }
-       }
-
-       return 0;
-}
-
-static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
-                                  enum port port,
-                                  struct intel_crtc_state *pipe_config)
-{
-       enum intel_dpll_id id;
-       u32 temp;
-
-       temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
-       id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
-
-       if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
-               return;
-
-       pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
-}
-
-static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
-                               enum port port,
-                               struct intel_crtc_state *pipe_config)
-{
-       enum intel_dpll_id id;
-       u32 temp;
-
-       /* TODO: TBT pll not implemented. */
-       if (intel_port_is_combophy(dev_priv, port)) {
-               temp = I915_READ(DPCLKA_CFGCR0_ICL) &
-                      DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
-               id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
-       } else if (intel_port_is_tc(dev_priv, port)) {
-               id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
-       } else {
-               WARN(1, "Invalid port %x\n", port);
-               return;
-       }
-
-       pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
-}
-
-static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
-                               enum port port,
-                               struct intel_crtc_state *pipe_config)
-{
-       enum intel_dpll_id id;
-
-       switch (port) {
-       case PORT_A:
-               id = DPLL_ID_SKL_DPLL0;
-               break;
-       case PORT_B:
-               id = DPLL_ID_SKL_DPLL1;
-               break;
-       case PORT_C:
-               id = DPLL_ID_SKL_DPLL2;
-               break;
-       default:
-               DRM_ERROR("Incorrect port type\n");
-               return;
-       }
-
-       pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
-}
-
-static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
-                               enum port port,
-                               struct intel_crtc_state *pipe_config)
-{
-       enum intel_dpll_id id;
-       u32 temp;
-
-       temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
-       id = temp >> (port * 3 + 1);
-
-       if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
-               return;
-
-       pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
-}
-
-static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
-                               enum port port,
-                               struct intel_crtc_state *pipe_config)
-{
-       enum intel_dpll_id id;
-       u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
-
-       switch (ddi_pll_sel) {
-       case PORT_CLK_SEL_WRPLL1:
-               id = DPLL_ID_WRPLL1;
-               break;
-       case PORT_CLK_SEL_WRPLL2:
-               id = DPLL_ID_WRPLL2;
-               break;
-       case PORT_CLK_SEL_SPLL:
-               id = DPLL_ID_SPLL;
-               break;
-       case PORT_CLK_SEL_LCPLL_810:
-               id = DPLL_ID_LCPLL_810;
-               break;
-       case PORT_CLK_SEL_LCPLL_1350:
-               id = DPLL_ID_LCPLL_1350;
-               break;
-       case PORT_CLK_SEL_LCPLL_2700:
-               id = DPLL_ID_LCPLL_2700;
-               break;
-       default:
-               MISSING_CASE(ddi_pll_sel);
-               /* fall through */
-       case PORT_CLK_SEL_NONE:
-               return;
-       }
-
-       pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
-}
-
-static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
-                                    struct intel_crtc_state *pipe_config,
-                                    u64 *power_domain_mask,
-                                    intel_wakeref_t *wakerefs)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum intel_display_power_domain power_domain;
-       unsigned long panel_transcoder_mask = 0;
-       unsigned long enabled_panel_transcoders = 0;
-       enum transcoder panel_transcoder;
-       intel_wakeref_t wf;
-       u32 tmp;
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               panel_transcoder_mask |=
-                       BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
-
-       if (HAS_TRANSCODER_EDP(dev_priv))
-               panel_transcoder_mask |= BIT(TRANSCODER_EDP);
-
-       /*
-        * The pipe->transcoder mapping is fixed with the exception of the eDP
-        * and DSI transcoders handled below.
-        */
-       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
-
-       /*
-        * XXX: Do intel_display_power_get_if_enabled before reading this (for
-        * consistency and less surprising code; it's in always on power).
-        */
-       for_each_set_bit(panel_transcoder,
-                        &panel_transcoder_mask,
-                        ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
-               bool force_thru = false;
-               enum pipe trans_pipe;
-
-               tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
-               if (!(tmp & TRANS_DDI_FUNC_ENABLE))
-                       continue;
-
-               /*
-                * Log all enabled ones, only use the first one.
-                *
-                * FIXME: This won't work for two separate DSI displays.
-                */
-               enabled_panel_transcoders |= BIT(panel_transcoder);
-               if (enabled_panel_transcoders != BIT(panel_transcoder))
-                       continue;
-
-               switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
-               default:
-                       WARN(1, "unknown pipe linked to transcoder %s\n",
-                            transcoder_name(panel_transcoder));
-                       /* fall through */
-               case TRANS_DDI_EDP_INPUT_A_ONOFF:
-                       force_thru = true;
-                       /* fall through */
-               case TRANS_DDI_EDP_INPUT_A_ON:
-                       trans_pipe = PIPE_A;
-                       break;
-               case TRANS_DDI_EDP_INPUT_B_ONOFF:
-                       trans_pipe = PIPE_B;
-                       break;
-               case TRANS_DDI_EDP_INPUT_C_ONOFF:
-                       trans_pipe = PIPE_C;
-                       break;
-               }
-
-               if (trans_pipe == crtc->pipe) {
-                       pipe_config->cpu_transcoder = panel_transcoder;
-                       pipe_config->pch_pfit.force_thru = force_thru;
-               }
-       }
-
-       /*
-        * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
-        */
-       WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
-               enabled_panel_transcoders != BIT(TRANSCODER_EDP));
-
-       power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
-       WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
-
-       wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (!wf)
-               return false;
-
-       wakerefs[power_domain] = wf;
-       *power_domain_mask |= BIT_ULL(power_domain);
-
-       tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
-
-       return tmp & PIPECONF_ENABLE;
-}
-
-static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
-                                        struct intel_crtc_state *pipe_config,
-                                        u64 *power_domain_mask,
-                                        intel_wakeref_t *wakerefs)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum intel_display_power_domain power_domain;
-       enum transcoder cpu_transcoder;
-       intel_wakeref_t wf;
-       enum port port;
-       u32 tmp;
-
-       for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
-               if (port == PORT_A)
-                       cpu_transcoder = TRANSCODER_DSI_A;
-               else
-                       cpu_transcoder = TRANSCODER_DSI_C;
-
-               power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
-               WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
-
-               wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
-               if (!wf)
-                       continue;
-
-               wakerefs[power_domain] = wf;
-               *power_domain_mask |= BIT_ULL(power_domain);
-
-               /*
-                * The PLL needs to be enabled with a valid divider
-                * configuration, otherwise accessing DSI registers will hang
-                * the machine. See BSpec North Display Engine
-                * registers/MIPI[BXT]. We can break out here early, since we
-                * need the same DSI PLL to be enabled for both DSI ports.
-                */
-               if (!bxt_dsi_pll_is_enabled(dev_priv))
-                       break;
-
-               /* XXX: this works for video mode only */
-               tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
-               if (!(tmp & DPI_ENABLE))
-                       continue;
-
-               tmp = I915_READ(MIPI_CTRL(port));
-               if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
-                       continue;
-
-               pipe_config->cpu_transcoder = cpu_transcoder;
-               break;
-       }
-
-       return transcoder_is_dsi(pipe_config->cpu_transcoder);
-}
-
-static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
-                                      struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_shared_dpll *pll;
-       enum port port;
-       u32 tmp;
-
-       tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
-
-       port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               icelake_get_ddi_pll(dev_priv, port, pipe_config);
-       else if (IS_CANNONLAKE(dev_priv))
-               cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
-       else if (IS_GEN9_BC(dev_priv))
-               skylake_get_ddi_pll(dev_priv, port, pipe_config);
-       else if (IS_GEN9_LP(dev_priv))
-               bxt_get_ddi_pll(dev_priv, port, pipe_config);
-       else
-               haswell_get_ddi_pll(dev_priv, port, pipe_config);
-
-       pll = pipe_config->shared_dpll;
-       if (pll) {
-               WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
-                                               &pipe_config->dpll_hw_state));
-       }
-
-       /*
-        * Haswell has only FDI/PCH transcoder A. It is which is connected to
-        * DDI E. So just check whether this pipe is wired to DDI E and whether
-        * the PCH transcoder is on.
-        */
-       if (INTEL_GEN(dev_priv) < 9 &&
-           (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
-               pipe_config->has_pch_encoder = true;
-
-               tmp = I915_READ(FDI_RX_CTL(PIPE_A));
-               pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
-                                         FDI_DP_PORT_WIDTH_SHIFT) + 1;
-
-               ironlake_get_fdi_m_n_config(crtc, pipe_config);
-       }
-}
-
-static bool haswell_get_pipe_config(struct intel_crtc *crtc,
-                                   struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
-       enum intel_display_power_domain power_domain;
-       u64 power_domain_mask;
-       bool active;
-
-       intel_crtc_init_scalers(crtc, pipe_config);
-
-       power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
-       wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (!wf)
-               return false;
-
-       wakerefs[power_domain] = wf;
-       power_domain_mask = BIT_ULL(power_domain);
-
-       pipe_config->shared_dpll = NULL;
-
-       active = hsw_get_transcoder_state(crtc, pipe_config,
-                                         &power_domain_mask, wakerefs);
-
-       if (IS_GEN9_LP(dev_priv) &&
-           bxt_get_dsi_transcoder_state(crtc, pipe_config,
-                                        &power_domain_mask, wakerefs)) {
-               WARN_ON(active);
-               active = true;
-       }
-
-       if (!active)
-               goto out;
-
-       if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
-           INTEL_GEN(dev_priv) >= 11) {
-               haswell_get_ddi_port_state(crtc, pipe_config);
-               intel_get_pipe_timings(crtc, pipe_config);
-       }
-
-       intel_get_pipe_src_size(crtc, pipe_config);
-       intel_get_crtc_ycbcr_config(crtc, pipe_config);
-
-       pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
-
-       pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
-
-       if (INTEL_GEN(dev_priv) >= 9) {
-               u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
-
-               if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
-                       pipe_config->gamma_enable = true;
-
-               if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
-                       pipe_config->csc_enable = true;
-       } else {
-               i9xx_get_pipe_color_config(pipe_config);
-       }
-
-       intel_color_get_config(pipe_config);
-
-       power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
-       WARN_ON(power_domain_mask & BIT_ULL(power_domain));
-
-       wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (wf) {
-               wakerefs[power_domain] = wf;
-               power_domain_mask |= BIT_ULL(power_domain);
-
-               if (INTEL_GEN(dev_priv) >= 9)
-                       skylake_get_pfit_config(crtc, pipe_config);
-               else
-                       ironlake_get_pfit_config(crtc, pipe_config);
-       }
-
-       if (hsw_crtc_supports_ips(crtc)) {
-               if (IS_HASWELL(dev_priv))
-                       pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
-               else {
-                       /*
-                        * We cannot readout IPS state on broadwell, set to
-                        * true so we can set it to a defined state on first
-                        * commit.
-                        */
-                       pipe_config->ips_enabled = true;
-               }
-       }
-
-       if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
-           !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
-               pipe_config->pixel_multiplier =
-                       I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
-       } else {
-               pipe_config->pixel_multiplier = 1;
-       }
-
-out:
-       for_each_power_domain(power_domain, power_domain_mask)
-               intel_display_power_put(dev_priv,
-                                       power_domain, wakerefs[power_domain]);
-
-       return active;
-}
-
-static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(plane_state->base.plane->dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       u32 base;
-
-       if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
-               base = obj->phys_handle->busaddr;
-       else
-               base = intel_plane_ggtt_offset(plane_state);
-
-       base += plane_state->color_plane[0].offset;
-
-       /* ILK+ do this automagically */
-       if (HAS_GMCH(dev_priv) &&
-           plane_state->base.rotation & DRM_MODE_ROTATE_180)
-               base += (plane_state->base.crtc_h *
-                        plane_state->base.crtc_w - 1) * fb->format->cpp[0];
-
-       return base;
-}
-
-static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
-{
-       int x = plane_state->base.crtc_x;
-       int y = plane_state->base.crtc_y;
-       u32 pos = 0;
-
-       if (x < 0) {
-               pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
-               x = -x;
-       }
-       pos |= x << CURSOR_X_SHIFT;
-
-       if (y < 0) {
-               pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
-               y = -y;
-       }
-       pos |= y << CURSOR_Y_SHIFT;
-
-       return pos;
-}
-
-static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
-       const struct drm_mode_config *config =
-               &plane_state->base.plane->dev->mode_config;
-       int width = plane_state->base.crtc_w;
-       int height = plane_state->base.crtc_h;
-
-       return width > 0 && width <= config->cursor_width &&
-               height > 0 && height <= config->cursor_height;
-}
-
-static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
-{
-       int src_x, src_y;
-       u32 offset;
-       int ret;
-
-       ret = intel_plane_compute_gtt(plane_state);
-       if (ret)
-               return ret;
-
-       if (!plane_state->base.visible)
-               return 0;
-
-       src_x = plane_state->base.src_x >> 16;
-       src_y = plane_state->base.src_y >> 16;
-
-       intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
-       offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
-                                                   plane_state, 0);
-
-       if (src_x != 0 || src_y != 0) {
-               DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
-               return -EINVAL;
-       }
-
-       plane_state->color_plane[0].offset = offset;
-
-       return 0;
-}
-
-static int intel_check_cursor(struct intel_crtc_state *crtc_state,
-                             struct intel_plane_state *plane_state)
-{
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       int ret;
-
-       if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
-               DRM_DEBUG_KMS("cursor cannot be tiled\n");
-               return -EINVAL;
-       }
-
-       ret = drm_atomic_helper_check_plane_state(&plane_state->base,
-                                                 &crtc_state->base,
-                                                 DRM_PLANE_HELPER_NO_SCALING,
-                                                 DRM_PLANE_HELPER_NO_SCALING,
-                                                 true, true);
-       if (ret)
-               return ret;
-
-       ret = intel_cursor_check_surface(plane_state);
-       if (ret)
-               return ret;
-
-       if (!plane_state->base.visible)
-               return 0;
-
-       ret = intel_plane_check_src_coordinates(plane_state);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static unsigned int
-i845_cursor_max_stride(struct intel_plane *plane,
-                      u32 pixel_format, u64 modifier,
-                      unsigned int rotation)
-{
-       return 2048;
-}
-
-static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
-       u32 cntl = 0;
-
-       if (crtc_state->gamma_enable)
-               cntl |= CURSOR_GAMMA_ENABLE;
-
-       return cntl;
-}
-
-static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
-                          const struct intel_plane_state *plane_state)
-{
-       return CURSOR_ENABLE |
-               CURSOR_FORMAT_ARGB |
-               CURSOR_STRIDE(plane_state->color_plane[0].stride);
-}
-
-static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
-       int width = plane_state->base.crtc_w;
-
-       /*
-        * 845g/865g are only limited by the width of their cursors,
-        * the height is arbitrary up to the precision of the register.
-        */
-       return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
-}
-
-static int i845_check_cursor(struct intel_crtc_state *crtc_state,
-                            struct intel_plane_state *plane_state)
-{
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       int ret;
-
-       ret = intel_check_cursor(crtc_state, plane_state);
-       if (ret)
-               return ret;
-
-       /* if we want to turn off the cursor ignore width and height */
-       if (!fb)
-               return 0;
-
-       /* Check for which cursor types we support */
-       if (!i845_cursor_size_ok(plane_state)) {
-               DRM_DEBUG("Cursor dimension %dx%d not supported\n",
-                         plane_state->base.crtc_w,
-                         plane_state->base.crtc_h);
-               return -EINVAL;
-       }
-
-       WARN_ON(plane_state->base.visible &&
-               plane_state->color_plane[0].stride != fb->pitches[0]);
-
-       switch (fb->pitches[0]) {
-       case 256:
-       case 512:
-       case 1024:
-       case 2048:
-               break;
-       default:
-               DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
-                             fb->pitches[0]);
-               return -EINVAL;
-       }
-
-       plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
-
-       return 0;
-}
-
-static void i845_update_cursor(struct intel_plane *plane,
-                              const struct intel_crtc_state *crtc_state,
-                              const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       u32 cntl = 0, base = 0, pos = 0, size = 0;
-       unsigned long irqflags;
-
-       if (plane_state && plane_state->base.visible) {
-               unsigned int width = plane_state->base.crtc_w;
-               unsigned int height = plane_state->base.crtc_h;
-
-               cntl = plane_state->ctl |
-                       i845_cursor_ctl_crtc(crtc_state);
-
-               size = (height << 12) | width;
-
-               base = intel_cursor_base(plane_state);
-               pos = intel_cursor_position(plane_state);
-       }
-
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-       /* On these chipsets we can only modify the base/size/stride
-        * whilst the cursor is disabled.
-        */
-       if (plane->cursor.base != base ||
-           plane->cursor.size != size ||
-           plane->cursor.cntl != cntl) {
-               I915_WRITE_FW(CURCNTR(PIPE_A), 0);
-               I915_WRITE_FW(CURBASE(PIPE_A), base);
-               I915_WRITE_FW(CURSIZE, size);
-               I915_WRITE_FW(CURPOS(PIPE_A), pos);
-               I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
-
-               plane->cursor.base = base;
-               plane->cursor.size = size;
-               plane->cursor.cntl = cntl;
-       } else {
-               I915_WRITE_FW(CURPOS(PIPE_A), pos);
-       }
-
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i845_disable_cursor(struct intel_plane *plane,
-                               const struct intel_crtc_state *crtc_state)
-{
-       i845_update_cursor(plane, crtc_state, NULL);
-}
-
-static bool i845_cursor_get_hw_state(struct intel_plane *plane,
-                                    enum pipe *pipe)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum intel_display_power_domain power_domain;
-       intel_wakeref_t wakeref;
-       bool ret;
-
-       power_domain = POWER_DOMAIN_PIPE(PIPE_A);
-       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (!wakeref)
-               return false;
-
-       ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
-
-       *pipe = PIPE_A;
-
-       intel_display_power_put(dev_priv, power_domain, wakeref);
-
-       return ret;
-}
-
-static unsigned int
-i9xx_cursor_max_stride(struct intel_plane *plane,
-                      u32 pixel_format, u64 modifier,
-                      unsigned int rotation)
-{
-       return plane->base.dev->mode_config.cursor_width * 4;
-}
-
-static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       u32 cntl = 0;
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               return cntl;
-
-       if (crtc_state->gamma_enable)
-               cntl = MCURSOR_GAMMA_ENABLE;
-
-       if (crtc_state->csc_enable)
-               cntl |= MCURSOR_PIPE_CSC_ENABLE;
-
-       if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
-               cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
-
-       return cntl;
-}
-
-static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
-                          const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(plane_state->base.plane->dev);
-       u32 cntl = 0;
-
-       if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
-               cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
-
-       switch (plane_state->base.crtc_w) {
-       case 64:
-               cntl |= MCURSOR_MODE_64_ARGB_AX;
-               break;
-       case 128:
-               cntl |= MCURSOR_MODE_128_ARGB_AX;
-               break;
-       case 256:
-               cntl |= MCURSOR_MODE_256_ARGB_AX;
-               break;
-       default:
-               MISSING_CASE(plane_state->base.crtc_w);
-               return 0;
-       }
-
-       if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
-               cntl |= MCURSOR_ROTATE_180;
-
-       return cntl;
-}
-
-static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(plane_state->base.plane->dev);
-       int width = plane_state->base.crtc_w;
-       int height = plane_state->base.crtc_h;
-
-       if (!intel_cursor_size_ok(plane_state))
-               return false;
-
-       /* Cursor width is limited to a few power-of-two sizes */
-       switch (width) {
-       case 256:
-       case 128:
-       case 64:
-               break;
-       default:
-               return false;
-       }
-
-       /*
-        * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
-        * height from 8 lines up to the cursor width, when the
-        * cursor is not rotated. Everything else requires square
-        * cursors.
-        */
-       if (HAS_CUR_FBC(dev_priv) &&
-           plane_state->base.rotation & DRM_MODE_ROTATE_0) {
-               if (height < 8 || height > width)
-                       return false;
-       } else {
-               if (height != width)
-                       return false;
-       }
-
-       return true;
-}
-
-static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
-                            struct intel_plane_state *plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       enum pipe pipe = plane->pipe;
-       int ret;
-
-       ret = intel_check_cursor(crtc_state, plane_state);
-       if (ret)
-               return ret;
-
-       /* if we want to turn off the cursor ignore width and height */
-       if (!fb)
-               return 0;
-
-       /* Check for which cursor types we support */
-       if (!i9xx_cursor_size_ok(plane_state)) {
-               DRM_DEBUG("Cursor dimension %dx%d not supported\n",
-                         plane_state->base.crtc_w,
-                         plane_state->base.crtc_h);
-               return -EINVAL;
-       }
-
-       WARN_ON(plane_state->base.visible &&
-               plane_state->color_plane[0].stride != fb->pitches[0]);
-
-       if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
-               DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
-                             fb->pitches[0], plane_state->base.crtc_w);
-               return -EINVAL;
-       }
-
-       /*
-        * There's something wrong with the cursor on CHV pipe C.
-        * If it straddles the left edge of the screen then
-        * moving it away from the edge or disabling it often
-        * results in a pipe underrun, and often that can lead to
-        * dead pipe (constant underrun reported, and it scans
-        * out just a solid color). To recover from that, the
-        * display power well must be turned off and on again.
-        * Refuse the put the cursor into that compromised position.
-        */
-       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
-           plane_state->base.visible && plane_state->base.crtc_x < 0) {
-               DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
-               return -EINVAL;
-       }
-
-       plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
-
-       return 0;
-}
-
-static void i9xx_update_cursor(struct intel_plane *plane,
-                              const struct intel_crtc_state *crtc_state,
-                              const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum pipe pipe = plane->pipe;
-       u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
-       unsigned long irqflags;
-
-       if (plane_state && plane_state->base.visible) {
-               cntl = plane_state->ctl |
-                       i9xx_cursor_ctl_crtc(crtc_state);
-
-               if (plane_state->base.crtc_h != plane_state->base.crtc_w)
-                       fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
-
-               base = intel_cursor_base(plane_state);
-               pos = intel_cursor_position(plane_state);
-       }
-
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-       /*
-        * On some platforms writing CURCNTR first will also
-        * cause CURPOS to be armed by the CURBASE write.
-        * Without the CURCNTR write the CURPOS write would
-        * arm itself. Thus we always update CURCNTR before
-        * CURPOS.
-        *
-        * On other platforms CURPOS always requires the
-        * CURBASE write to arm the update. Additonally
-        * a write to any of the cursor register will cancel
-        * an already armed cursor update. Thus leaving out
-        * the CURBASE write after CURPOS could lead to a
-        * cursor that doesn't appear to move, or even change
-        * shape. Thus we always write CURBASE.
-        *
-        * The other registers are armed by by the CURBASE write
-        * except when the plane is getting enabled at which time
-        * the CURCNTR write arms the update.
-        */
-
-       if (INTEL_GEN(dev_priv) >= 9)
-               skl_write_cursor_wm(plane, crtc_state);
-
-       if (plane->cursor.base != base ||
-           plane->cursor.size != fbc_ctl ||
-           plane->cursor.cntl != cntl) {
-               if (HAS_CUR_FBC(dev_priv))
-                       I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
-               I915_WRITE_FW(CURCNTR(pipe), cntl);
-               I915_WRITE_FW(CURPOS(pipe), pos);
-               I915_WRITE_FW(CURBASE(pipe), base);
-
-               plane->cursor.base = base;
-               plane->cursor.size = fbc_ctl;
-               plane->cursor.cntl = cntl;
-       } else {
-               I915_WRITE_FW(CURPOS(pipe), pos);
-               I915_WRITE_FW(CURBASE(pipe), base);
-       }
-
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i9xx_disable_cursor(struct intel_plane *plane,
-                               const struct intel_crtc_state *crtc_state)
-{
-       i9xx_update_cursor(plane, crtc_state, NULL);
-}
-
-static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
-                                    enum pipe *pipe)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum intel_display_power_domain power_domain;
-       intel_wakeref_t wakeref;
-       bool ret;
-       u32 val;
-
-       /*
-        * Not 100% correct for planes that can move between pipes,
-        * but that's only the case for gen2-3 which don't have any
-        * display power wells.
-        */
-       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (!wakeref)
-               return false;
-
-       val = I915_READ(CURCNTR(plane->pipe));
-
-       ret = val & MCURSOR_MODE;
-
-       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
-               *pipe = plane->pipe;
-       else
-               *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
-                       MCURSOR_PIPE_SELECT_SHIFT;
-
-       intel_display_power_put(dev_priv, power_domain, wakeref);
-
-       return ret;
-}
-
-/* VESA 640x480x72Hz mode to set on the pipe */
-static const struct drm_display_mode load_detect_mode = {
-       DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
-                704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-};
-
-struct drm_framebuffer *
-intel_framebuffer_create(struct drm_i915_gem_object *obj,
-                        struct drm_mode_fb_cmd2 *mode_cmd)
-{
-       struct intel_framebuffer *intel_fb;
-       int ret;
-
-       intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
-       if (!intel_fb)
-               return ERR_PTR(-ENOMEM);
-
-       ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
-       if (ret)
-               goto err;
-
-       return &intel_fb->base;
-
-err:
-       kfree(intel_fb);
-       return ERR_PTR(ret);
-}
-
-static int intel_modeset_disable_planes(struct drm_atomic_state *state,
-                                       struct drm_crtc *crtc)
-{
-       struct drm_plane *plane;
-       struct drm_plane_state *plane_state;
-       int ret, i;
-
-       ret = drm_atomic_add_affected_planes(state, crtc);
-       if (ret)
-               return ret;
-
-       for_each_new_plane_in_state(state, plane, plane_state, i) {
-               if (plane_state->crtc != crtc)
-                       continue;
-
-               ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
-               if (ret)
-                       return ret;
-
-               drm_atomic_set_fb_for_plane(plane_state, NULL);
-       }
-
-       return 0;
-}
-
-int intel_get_load_detect_pipe(struct drm_connector *connector,
-                              const struct drm_display_mode *mode,
-                              struct intel_load_detect_pipe *old,
-                              struct drm_modeset_acquire_ctx *ctx)
-{
-       struct intel_crtc *intel_crtc;
-       struct intel_encoder *intel_encoder =
-               intel_attached_encoder(connector);
-       struct drm_crtc *possible_crtc;
-       struct drm_encoder *encoder = &intel_encoder->base;
-       struct drm_crtc *crtc = NULL;
-       struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_mode_config *config = &dev->mode_config;
-       struct drm_atomic_state *state = NULL, *restore_state = NULL;
-       struct drm_connector_state *connector_state;
-       struct intel_crtc_state *crtc_state;
-       int ret, i = -1;
-
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
-                     connector->base.id, connector->name,
-                     encoder->base.id, encoder->name);
-
-       old->restore_state = NULL;
-
-       WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
-
-       /*
-        * Algorithm gets a little messy:
-        *
-        *   - if the connector already has an assigned crtc, use it (but make
-        *     sure it's on first)
-        *
-        *   - try to find the first unused crtc that can drive this connector,
-        *     and use that if we find one
-        */
-
-       /* See if we already have a CRTC for this connector */
-       if (connector->state->crtc) {
-               crtc = connector->state->crtc;
-
-               ret = drm_modeset_lock(&crtc->mutex, ctx);
-               if (ret)
-                       goto fail;
-
-               /* Make sure the crtc and connector are running */
-               goto found;
-       }
-
-       /* Find an unused one (if possible) */
-       for_each_crtc(dev, possible_crtc) {
-               i++;
-               if (!(encoder->possible_crtcs & (1 << i)))
-                       continue;
-
-               ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
-               if (ret)
-                       goto fail;
-
-               if (possible_crtc->state->enable) {
-                       drm_modeset_unlock(&possible_crtc->mutex);
-                       continue;
-               }
-
-               crtc = possible_crtc;
-               break;
-       }
-
-       /*
-        * If we didn't find an unused CRTC, don't use any.
-        */
-       if (!crtc) {
-               DRM_DEBUG_KMS("no pipe available for load-detect\n");
-               ret = -ENODEV;
-               goto fail;
-       }
-
-found:
-       intel_crtc = to_intel_crtc(crtc);
-
-       state = drm_atomic_state_alloc(dev);
-       restore_state = drm_atomic_state_alloc(dev);
-       if (!state || !restore_state) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       state->acquire_ctx = ctx;
-       restore_state->acquire_ctx = ctx;
-
-       connector_state = drm_atomic_get_connector_state(state, connector);
-       if (IS_ERR(connector_state)) {
-               ret = PTR_ERR(connector_state);
-               goto fail;
-       }
-
-       ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
-       if (ret)
-               goto fail;
-
-       crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
-       if (IS_ERR(crtc_state)) {
-               ret = PTR_ERR(crtc_state);
-               goto fail;
-       }
-
-       crtc_state->base.active = crtc_state->base.enable = true;
-
-       if (!mode)
-               mode = &load_detect_mode;
-
-       ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
-       if (ret)
-               goto fail;
-
-       ret = intel_modeset_disable_planes(state, crtc);
-       if (ret)
-               goto fail;
-
-       ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
-       if (!ret)
-               ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
-       if (!ret)
-               ret = drm_atomic_add_affected_planes(restore_state, crtc);
-       if (ret) {
-               DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
-               goto fail;
-       }
-
-       ret = drm_atomic_commit(state);
-       if (ret) {
-               DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
-               goto fail;
-       }
-
-       old->restore_state = restore_state;
-       drm_atomic_state_put(state);
-
-       /* let the connector get through one full cycle before testing */
-       intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-       return true;
-
-fail:
-       if (state) {
-               drm_atomic_state_put(state);
-               state = NULL;
-       }
-       if (restore_state) {
-               drm_atomic_state_put(restore_state);
-               restore_state = NULL;
-       }
-
-       if (ret == -EDEADLK)
-               return ret;
-
-       return false;
-}
-
-void intel_release_load_detect_pipe(struct drm_connector *connector,
-                                   struct intel_load_detect_pipe *old,
-                                   struct drm_modeset_acquire_ctx *ctx)
-{
-       struct intel_encoder *intel_encoder =
-               intel_attached_encoder(connector);
-       struct drm_encoder *encoder = &intel_encoder->base;
-       struct drm_atomic_state *state = old->restore_state;
-       int ret;
-
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
-                     connector->base.id, connector->name,
-                     encoder->base.id, encoder->name);
-
-       if (!state)
-               return;
-
-       ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
-       if (ret)
-               DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
-       drm_atomic_state_put(state);
-}
-
-static int i9xx_pll_refclk(struct drm_device *dev,
-                          const struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       u32 dpll = pipe_config->dpll_hw_state.dpll;
-
-       if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
-               return dev_priv->vbt.lvds_ssc_freq;
-       else if (HAS_PCH_SPLIT(dev_priv))
-               return 120000;
-       else if (!IS_GEN(dev_priv, 2))
-               return 96000;
-       else
-               return 48000;
-}
-
-/* Returns the clock of the currently programmed mode of the given pipe. */
-static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
-                               struct intel_crtc_state *pipe_config)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = pipe_config->cpu_transcoder;
-       u32 dpll = pipe_config->dpll_hw_state.dpll;
-       u32 fp;
-       struct dpll clock;
-       int port_clock;
-       int refclk = i9xx_pll_refclk(dev, pipe_config);
-
-       if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
-               fp = pipe_config->dpll_hw_state.fp0;
-       else
-               fp = pipe_config->dpll_hw_state.fp1;
-
-       clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
-       if (IS_PINEVIEW(dev_priv)) {
-               clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
-               clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
-       } else {
-               clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
-               clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
-       }
-
-       if (!IS_GEN(dev_priv, 2)) {
-               if (IS_PINEVIEW(dev_priv))
-                       clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
-                               DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
-               else
-                       clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
-                              DPLL_FPA01_P1_POST_DIV_SHIFT);
-
-               switch (dpll & DPLL_MODE_MASK) {
-               case DPLLB_MODE_DAC_SERIAL:
-                       clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
-                               5 : 10;
-                       break;
-               case DPLLB_MODE_LVDS:
-                       clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
-                               7 : 14;
-                       break;
-               default:
-                       DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
-                                 "mode\n", (int)(dpll & DPLL_MODE_MASK));
-                       return;
-               }
-
-               if (IS_PINEVIEW(dev_priv))
-                       port_clock = pnv_calc_dpll_params(refclk, &clock);
-               else
-                       port_clock = i9xx_calc_dpll_params(refclk, &clock);
-       } else {
-               u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
-               bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
-
-               if (is_lvds) {
-                       clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
-                                      DPLL_FPA01_P1_POST_DIV_SHIFT);
-
-                       if (lvds & LVDS_CLKB_POWER_UP)
-                               clock.p2 = 7;
-                       else
-                               clock.p2 = 14;
-               } else {
-                       if (dpll & PLL_P1_DIVIDE_BY_TWO)
-                               clock.p1 = 2;
-                       else {
-                               clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
-                                           DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
-                       }
-                       if (dpll & PLL_P2_DIVIDE_BY_4)
-                               clock.p2 = 4;
-                       else
-                               clock.p2 = 2;
-               }
-
-               port_clock = i9xx_calc_dpll_params(refclk, &clock);
-       }
-
-       /*
-        * This value includes pixel_multiplier. We will use
-        * port_clock to compute adjusted_mode.crtc_clock in the
-        * encoder's get_config() function.
-        */
-       pipe_config->port_clock = port_clock;
-}
-
-int intel_dotclock_calculate(int link_freq,
-                            const struct intel_link_m_n *m_n)
-{
-       /*
-        * The calculation for the data clock is:
-        * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
-        * But we want to avoid losing precison if possible, so:
-        * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
-        *
-        * and the link clock is simpler:
-        * link_clock = (m * link_clock) / n
-        */
-
-       if (!m_n->link_n)
-               return 0;
-
-       return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
-}
-
-static void ironlake_pch_clock_get(struct intel_crtc *crtc,
-                                  struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       /* read out port_clock from the DPLL */
-       i9xx_crtc_clock_get(crtc, pipe_config);
-
-       /*
-        * In case there is an active pipe without active ports,
-        * we may need some idea for the dotclock anyway.
-        * Calculate one based on the FDI configuration.
-        */
-       pipe_config->base.adjusted_mode.crtc_clock =
-               intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
-                                        &pipe_config->fdi_m_n);
-}
-
-/* Returns the currently programmed mode of the given encoder. */
-struct drm_display_mode *
-intel_encoder_current_mode(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_crtc_state *crtc_state;
-       struct drm_display_mode *mode;
-       struct intel_crtc *crtc;
-       enum pipe pipe;
-
-       if (!encoder->get_hw_state(encoder, &pipe))
-               return NULL;
-
-       crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
-       mode = kzalloc(sizeof(*mode), GFP_KERNEL);
-       if (!mode)
-               return NULL;
-
-       crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
-       if (!crtc_state) {
-               kfree(mode);
-               return NULL;
-       }
-
-       crtc_state->base.crtc = &crtc->base;
-
-       if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
-               kfree(crtc_state);
-               kfree(mode);
-               return NULL;
-       }
-
-       encoder->get_config(encoder, crtc_state);
-
-       intel_mode_from_pipe_config(mode, crtc_state);
-
-       kfree(crtc_state);
-
-       return mode;
-}
-
-static void intel_crtc_destroy(struct drm_crtc *crtc)
-{
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       drm_crtc_cleanup(crtc);
-       kfree(intel_crtc);
-}
-
-/**
- * intel_wm_need_update - Check whether watermarks need updating
- * @cur: current plane state
- * @new: new plane state
- *
- * Check current plane state versus the new one to determine whether
- * watermarks need to be recalculated.
- *
- * Returns true or false.
- */
-static bool intel_wm_need_update(struct intel_plane_state *cur,
-                                struct intel_plane_state *new)
-{
-       /* Update watermarks on tiling or size changes. */
-       if (new->base.visible != cur->base.visible)
-               return true;
-
-       if (!cur->base.fb || !new->base.fb)
-               return false;
-
-       if (cur->base.fb->modifier != new->base.fb->modifier ||
-           cur->base.rotation != new->base.rotation ||
-           drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
-           drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
-           drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
-           drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
-               return true;
-
-       return false;
-}
-
-static bool needs_scaling(const struct intel_plane_state *state)
-{
-       int src_w = drm_rect_width(&state->base.src) >> 16;
-       int src_h = drm_rect_height(&state->base.src) >> 16;
-       int dst_w = drm_rect_width(&state->base.dst);
-       int dst_h = drm_rect_height(&state->base.dst);
-
-       return (src_w != dst_w || src_h != dst_h);
-}
-
-int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
-                                   struct drm_crtc_state *crtc_state,
-                                   const struct intel_plane_state *old_plane_state,
-                                   struct drm_plane_state *plane_state)
-{
-       struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
-       struct drm_crtc *crtc = crtc_state->crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_plane *plane = to_intel_plane(plane_state->plane);
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       bool mode_changed = needs_modeset(crtc_state);
-       bool was_crtc_enabled = old_crtc_state->base.active;
-       bool is_crtc_enabled = crtc_state->active;
-       bool turn_off, turn_on, visible, was_visible;
-       struct drm_framebuffer *fb = plane_state->fb;
-       int ret;
-
-       if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
-               ret = skl_update_scaler_plane(
-                       to_intel_crtc_state(crtc_state),
-                       to_intel_plane_state(plane_state));
-               if (ret)
-                       return ret;
-       }
-
-       was_visible = old_plane_state->base.visible;
-       visible = plane_state->visible;
-
-       if (!was_crtc_enabled && WARN_ON(was_visible))
-               was_visible = false;
-
-       /*
-        * Visibility is calculated as if the crtc was on, but
-        * after scaler setup everything depends on it being off
-        * when the crtc isn't active.
-        *
-        * FIXME this is wrong for watermarks. Watermarks should also
-        * be computed as if the pipe would be active. Perhaps move
-        * per-plane wm computation to the .check_plane() hook, and
-        * only combine the results from all planes in the current place?
-        */
-       if (!is_crtc_enabled) {
-               plane_state->visible = visible = false;
-               to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
-               to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0;
-       }
-
-       if (!was_visible && !visible)
-               return 0;
-
-       if (fb != old_plane_state->base.fb)
-               pipe_config->fb_changed = true;
-
-       turn_off = was_visible && (!visible || mode_changed);
-       turn_on = visible && (!was_visible || mode_changed);
-
-       DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
-                        intel_crtc->base.base.id, intel_crtc->base.name,
-                        plane->base.base.id, plane->base.name,
-                        fb ? fb->base.id : -1);
-
-       DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
-                        plane->base.base.id, plane->base.name,
-                        was_visible, visible,
-                        turn_off, turn_on, mode_changed);
-
-       if (turn_on) {
-               if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
-                       pipe_config->update_wm_pre = true;
-
-               /* must disable cxsr around plane enable/disable */
-               if (plane->id != PLANE_CURSOR)
-                       pipe_config->disable_cxsr = true;
-       } else if (turn_off) {
-               if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
-                       pipe_config->update_wm_post = true;
-
-               /* must disable cxsr around plane enable/disable */
-               if (plane->id != PLANE_CURSOR)
-                       pipe_config->disable_cxsr = true;
-       } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
-                                       to_intel_plane_state(plane_state))) {
-               if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
-                       /* FIXME bollocks */
-                       pipe_config->update_wm_pre = true;
-                       pipe_config->update_wm_post = true;
-               }
-       }
-
-       if (visible || was_visible)
-               pipe_config->fb_bits |= plane->frontbuffer_bit;
-
-       /*
-        * ILK/SNB DVSACNTR/Sprite Enable
-        * IVB SPR_CTL/Sprite Enable
-        * "When in Self Refresh Big FIFO mode, a write to enable the
-        *  plane will be internally buffered and delayed while Big FIFO
-        *  mode is exiting."
-        *
-        * Which means that enabling the sprite can take an extra frame
-        * when we start in big FIFO mode (LP1+). Thus we need to drop
-        * down to LP0 and wait for vblank in order to make sure the
-        * sprite gets enabled on the next vblank after the register write.
-        * Doing otherwise would risk enabling the sprite one frame after
-        * we've already signalled flip completion. We can resume LP1+
-        * once the sprite has been enabled.
-        *
-        *
-        * WaCxSRDisabledForSpriteScaling:ivb
-        * IVB SPR_SCALE/Scaling Enable
-        * "Low Power watermarks must be disabled for at least one
-        *  frame before enabling sprite scaling, and kept disabled
-        *  until sprite scaling is disabled."
-        *
-        * ILK/SNB DVSASCALE/Scaling Enable
-        * "When in Self Refresh Big FIFO mode, scaling enable will be
-        *  masked off while Big FIFO mode is exiting."
-        *
-        * Despite the w/a only being listed for IVB we assume that
-        * the ILK/SNB note has similar ramifications, hence we apply
-        * the w/a on all three platforms.
-        *
-        * With experimental results seems this is needed also for primary
-        * plane, not only sprite plane.
-        */
-       if (plane->id != PLANE_CURSOR &&
-           (IS_GEN_RANGE(dev_priv, 5, 6) ||
-            IS_IVYBRIDGE(dev_priv)) &&
-           (turn_on || (!needs_scaling(old_plane_state) &&
-                        needs_scaling(to_intel_plane_state(plane_state)))))
-               pipe_config->disable_lp_wm = true;
-
-       return 0;
-}
-
-static bool encoders_cloneable(const struct intel_encoder *a,
-                              const struct intel_encoder *b)
-{
-       /* masks could be asymmetric, so check both ways */
-       return a == b || (a->cloneable & (1 << b->type) &&
-                         b->cloneable & (1 << a->type));
-}
-
-static bool check_single_encoder_cloning(struct drm_atomic_state *state,
-                                        struct intel_crtc *crtc,
-                                        struct intel_encoder *encoder)
-{
-       struct intel_encoder *source_encoder;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       int i;
-
-       for_each_new_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != &crtc->base)
-                       continue;
-
-               source_encoder =
-                       to_intel_encoder(connector_state->best_encoder);
-               if (!encoders_cloneable(encoder, source_encoder))
-                       return false;
-       }
-
-       return true;
-}
-
-static int icl_add_linked_planes(struct intel_atomic_state *state)
-{
-       struct intel_plane *plane, *linked;
-       struct intel_plane_state *plane_state, *linked_plane_state;
-       int i;
-
-       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
-               linked = plane_state->linked_plane;
-
-               if (!linked)
-                       continue;
-
-               linked_plane_state = intel_atomic_get_plane_state(state, linked);
-               if (IS_ERR(linked_plane_state))
-                       return PTR_ERR(linked_plane_state);
-
-               WARN_ON(linked_plane_state->linked_plane != plane);
-               WARN_ON(linked_plane_state->slave == plane_state->slave);
-       }
-
-       return 0;
-}
-
-static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
-       struct intel_plane *plane, *linked;
-       struct intel_plane_state *plane_state;
-       int i;
-
-       if (INTEL_GEN(dev_priv) < 11)
-               return 0;
-
-       /*
-        * Destroy all old plane links and make the slave plane invisible
-        * in the crtc_state->active_planes mask.
-        */
-       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
-               if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
-                       continue;
-
-               plane_state->linked_plane = NULL;
-               if (plane_state->slave && !plane_state->base.visible) {
-                       crtc_state->active_planes &= ~BIT(plane->id);
-                       crtc_state->update_planes |= BIT(plane->id);
-               }
-
-               plane_state->slave = false;
-       }
-
-       if (!crtc_state->nv12_planes)
-               return 0;
-
-       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
-               struct intel_plane_state *linked_state = NULL;
-
-               if (plane->pipe != crtc->pipe ||
-                   !(crtc_state->nv12_planes & BIT(plane->id)))
-                       continue;
-
-               for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
-                       if (!icl_is_nv12_y_plane(linked->id))
-                               continue;
-
-                       if (crtc_state->active_planes & BIT(linked->id))
-                               continue;
-
-                       linked_state = intel_atomic_get_plane_state(state, linked);
-                       if (IS_ERR(linked_state))
-                               return PTR_ERR(linked_state);
-
-                       break;
-               }
-
-               if (!linked_state) {
-                       DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
-                                     hweight8(crtc_state->nv12_planes));
-
-                       return -EINVAL;
-               }
-
-               plane_state->linked_plane = linked;
-
-               linked_state->slave = true;
-               linked_state->linked_plane = plane;
-               crtc_state->active_planes |= BIT(linked->id);
-               crtc_state->update_planes |= BIT(linked->id);
-               DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
-       }
-
-       return 0;
-}
-
-static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-       struct intel_atomic_state *state =
-               to_intel_atomic_state(new_crtc_state->base.state);
-       const struct intel_crtc_state *old_crtc_state =
-               intel_atomic_get_old_crtc_state(state, crtc);
-
-       return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
-}
-
-static int intel_crtc_atomic_check(struct drm_crtc *crtc,
-                                  struct drm_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_crtc_state *pipe_config =
-               to_intel_crtc_state(crtc_state);
-       int ret;
-       bool mode_changed = needs_modeset(crtc_state);
-
-       if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
-           mode_changed && !crtc_state->active)
-               pipe_config->update_wm_post = true;
-
-       if (mode_changed && crtc_state->enable &&
-           dev_priv->display.crtc_compute_clock &&
-           !WARN_ON(pipe_config->shared_dpll)) {
-               ret = dev_priv->display.crtc_compute_clock(intel_crtc,
-                                                          pipe_config);
-               if (ret)
-                       return ret;
-       }
-
-       /*
-        * May need to update pipe gamma enable bits
-        * when C8 planes are getting enabled/disabled.
-        */
-       if (c8_planes_changed(pipe_config))
-               crtc_state->color_mgmt_changed = true;
-
-       if (mode_changed || pipe_config->update_pipe ||
-           crtc_state->color_mgmt_changed) {
-               ret = intel_color_check(pipe_config);
-               if (ret)
-                       return ret;
-       }
-
-       ret = 0;
-       if (dev_priv->display.compute_pipe_wm) {
-               ret = dev_priv->display.compute_pipe_wm(pipe_config);
-               if (ret) {
-                       DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
-                       return ret;
-               }
-       }
-
-       if (dev_priv->display.compute_intermediate_wm) {
-               if (WARN_ON(!dev_priv->display.compute_pipe_wm))
-                       return 0;
-
-               /*
-                * Calculate 'intermediate' watermarks that satisfy both the
-                * old state and the new state.  We can program these
-                * immediately.
-                */
-               ret = dev_priv->display.compute_intermediate_wm(pipe_config);
-               if (ret) {
-                       DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
-                       return ret;
-               }
-       }
-
-       if (INTEL_GEN(dev_priv) >= 9) {
-               if (mode_changed || pipe_config->update_pipe)
-                       ret = skl_update_scaler_crtc(pipe_config);
-
-               if (!ret)
-                       ret = icl_check_nv12_planes(pipe_config);
-               if (!ret)
-                       ret = skl_check_pipe_max_pixel_rate(intel_crtc,
-                                                           pipe_config);
-               if (!ret)
-                       ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
-                                                        pipe_config);
-       }
-
-       if (HAS_IPS(dev_priv))
-               pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
-
-       return ret;
-}
-
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
-       .atomic_check = intel_crtc_atomic_check,
-};
-
-static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
-{
-       struct intel_connector *connector;
-       struct drm_connector_list_iter conn_iter;
-
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       for_each_intel_connector_iter(connector, &conn_iter) {
-               if (connector->base.state->crtc)
-                       drm_connector_put(&connector->base);
-
-               if (connector->base.encoder) {
-                       connector->base.state->best_encoder =
-                               connector->base.encoder;
-                       connector->base.state->crtc =
-                               connector->base.encoder->crtc;
-
-                       drm_connector_get(&connector->base);
-               } else {
-                       connector->base.state->best_encoder = NULL;
-                       connector->base.state->crtc = NULL;
-               }
-       }
-       drm_connector_list_iter_end(&conn_iter);
-}
-
-static int
-compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
-                     struct intel_crtc_state *pipe_config)
-{
-       struct drm_connector *connector = conn_state->connector;
-       const struct drm_display_info *info = &connector->display_info;
-       int bpp;
-
-       switch (conn_state->max_bpc) {
-       case 6 ... 7:
-               bpp = 6 * 3;
-               break;
-       case 8 ... 9:
-               bpp = 8 * 3;
-               break;
-       case 10 ... 11:
-               bpp = 10 * 3;
-               break;
-       case 12:
-               bpp = 12 * 3;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       if (bpp < pipe_config->pipe_bpp) {
-               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
-                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
-                             connector->base.id, connector->name,
-                             bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
-                             pipe_config->pipe_bpp);
-
-               pipe_config->pipe_bpp = bpp;
-       }
-
-       return 0;
-}
-
-static int
-compute_baseline_pipe_bpp(struct intel_crtc *crtc,
-                         struct intel_crtc_state *pipe_config)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct drm_atomic_state *state = pipe_config->base.state;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       int bpp, i;
-
-       if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
-           IS_CHERRYVIEW(dev_priv)))
-               bpp = 10*3;
-       else if (INTEL_GEN(dev_priv) >= 5)
-               bpp = 12*3;
-       else
-               bpp = 8*3;
-
-       pipe_config->pipe_bpp = bpp;
-
-       /* Clamp display bpp to connector max bpp */
-       for_each_new_connector_in_state(state, connector, connector_state, i) {
-               int ret;
-
-               if (connector_state->crtc != &crtc->base)
-                       continue;
-
-               ret = compute_sink_pipe_bpp(connector_state, pipe_config);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
-{
-       DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
-                     "type: 0x%x flags: 0x%x\n",
-                     mode->crtc_clock,
-                     mode->crtc_hdisplay, mode->crtc_hsync_start,
-                     mode->crtc_hsync_end, mode->crtc_htotal,
-                     mode->crtc_vdisplay, mode->crtc_vsync_start,
-                     mode->crtc_vsync_end, mode->crtc_vtotal,
-                     mode->type, mode->flags);
-}
-
-static inline void
-intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
-                     const char *id, unsigned int lane_count,
-                     const struct intel_link_m_n *m_n)
-{
-       DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
-                     id, lane_count,
-                     m_n->gmch_m, m_n->gmch_n,
-                     m_n->link_m, m_n->link_n, m_n->tu);
-}
-
-static void
-intel_dump_infoframe(struct drm_i915_private *dev_priv,
-                    const union hdmi_infoframe *frame)
-{
-       if ((drm_debug & DRM_UT_KMS) == 0)
-               return;
-
-       hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
-}
-
-#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
-
-static const char * const output_type_str[] = {
-       OUTPUT_TYPE(UNUSED),
-       OUTPUT_TYPE(ANALOG),
-       OUTPUT_TYPE(DVO),
-       OUTPUT_TYPE(SDVO),
-       OUTPUT_TYPE(LVDS),
-       OUTPUT_TYPE(TVOUT),
-       OUTPUT_TYPE(HDMI),
-       OUTPUT_TYPE(DP),
-       OUTPUT_TYPE(EDP),
-       OUTPUT_TYPE(DSI),
-       OUTPUT_TYPE(DDI),
-       OUTPUT_TYPE(DP_MST),
-};
-
-#undef OUTPUT_TYPE
-
-static void snprintf_output_types(char *buf, size_t len,
-                                 unsigned int output_types)
-{
-       char *str = buf;
-       int i;
-
-       str[0] = '\0';
-
-       for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
-               int r;
-
-               if ((output_types & BIT(i)) == 0)
-                       continue;
-
-               r = snprintf(str, len, "%s%s",
-                            str != buf ? "," : "", output_type_str[i]);
-               if (r >= len)
-                       break;
-               str += r;
-               len -= r;
-
-               output_types &= ~BIT(i);
-       }
-
-       WARN_ON_ONCE(output_types != 0);
-}
-
-static const char * const output_format_str[] = {
-       [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
-       [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
-       [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
-       [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
-};
-
-static const char *output_formats(enum intel_output_format format)
-{
-       if (format >= ARRAY_SIZE(output_format_str))
-               format = INTEL_OUTPUT_FORMAT_INVALID;
-       return output_format_str[format];
-}
-
-static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       struct drm_format_name_buf format_name;
-
-       if (!fb) {
-               DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
-                             plane->base.base.id, plane->base.name,
-                             yesno(plane_state->base.visible));
-               return;
-       }
-
-       DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
-                     plane->base.base.id, plane->base.name,
-                     fb->base.id, fb->width, fb->height,
-                     drm_get_format_name(fb->format->format, &format_name),
-                     yesno(plane_state->base.visible));
-       DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
-                     plane_state->base.rotation, plane_state->scaler_id);
-       if (plane_state->base.visible)
-               DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
-                             DRM_RECT_FP_ARG(&plane_state->base.src),
-                             DRM_RECT_ARG(&plane_state->base.dst));
-}
-
-static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
-                                  struct intel_atomic_state *state,
-                                  const char *context)
-{
-       struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       const struct intel_plane_state *plane_state;
-       struct intel_plane *plane;
-       char buf[64];
-       int i;
-
-       DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
-                     crtc->base.base.id, crtc->base.name,
-                     yesno(pipe_config->base.enable), context);
-
-       if (!pipe_config->base.enable)
-               goto dump_planes;
-
-       snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
-       DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
-                     yesno(pipe_config->base.active),
-                     buf, pipe_config->output_types,
-                     output_formats(pipe_config->output_format));
-
-       DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
-                     transcoder_name(pipe_config->cpu_transcoder),
-                     pipe_config->pipe_bpp, pipe_config->dither);
-
-       if (pipe_config->has_pch_encoder)
-               intel_dump_m_n_config(pipe_config, "fdi",
-                                     pipe_config->fdi_lanes,
-                                     &pipe_config->fdi_m_n);
-
-       if (intel_crtc_has_dp_encoder(pipe_config)) {
-               intel_dump_m_n_config(pipe_config, "dp m_n",
-                               pipe_config->lane_count, &pipe_config->dp_m_n);
-               if (pipe_config->has_drrs)
-                       intel_dump_m_n_config(pipe_config, "dp m2_n2",
-                                             pipe_config->lane_count,
-                                             &pipe_config->dp_m2_n2);
-       }
-
-       DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
-                     pipe_config->has_audio, pipe_config->has_infoframe,
-                     pipe_config->infoframes.enable);
-
-       if (pipe_config->infoframes.enable &
-           intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
-               DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
-       if (pipe_config->infoframes.enable &
-           intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
-               intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
-       if (pipe_config->infoframes.enable &
-           intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
-               intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
-       if (pipe_config->infoframes.enable &
-           intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
-               intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
-
-       DRM_DEBUG_KMS("requested mode:\n");
-       drm_mode_debug_printmodeline(&pipe_config->base.mode);
-       DRM_DEBUG_KMS("adjusted mode:\n");
-       drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
-       intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
-       DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
-                     pipe_config->port_clock,
-                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
-                     pipe_config->pixel_rate);
-
-       if (INTEL_GEN(dev_priv) >= 9)
-               DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
-                             crtc->num_scalers,
-                             pipe_config->scaler_state.scaler_users,
-                             pipe_config->scaler_state.scaler_id);
-
-       if (HAS_GMCH(dev_priv))
-               DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
-                             pipe_config->gmch_pfit.control,
-                             pipe_config->gmch_pfit.pgm_ratios,
-                             pipe_config->gmch_pfit.lvds_border_bits);
-       else
-               DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
-                             pipe_config->pch_pfit.pos,
-                             pipe_config->pch_pfit.size,
-                             enableddisabled(pipe_config->pch_pfit.enabled),
-                             yesno(pipe_config->pch_pfit.force_thru));
-
-       DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
-                     pipe_config->ips_enabled, pipe_config->double_wide);
-
-       intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
-
-dump_planes:
-       if (!state)
-               return;
-
-       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
-               if (plane->pipe == crtc->pipe)
-                       intel_dump_plane_state(plane_state);
-       }
-}
-
-static bool check_digital_port_conflicts(struct intel_atomic_state *state)
-{
-       struct drm_device *dev = state->base.dev;
-       struct drm_connector *connector;
-       struct drm_connector_list_iter conn_iter;
-       unsigned int used_ports = 0;
-       unsigned int used_mst_ports = 0;
-       bool ret = true;
-
-       /*
-        * Walk the connector list instead of the encoder
-        * list to detect the problem on ddi platforms
-        * where there's just one encoder per digital port.
-        */
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       drm_for_each_connector_iter(connector, &conn_iter) {
-               struct drm_connector_state *connector_state;
-               struct intel_encoder *encoder;
-
-               connector_state =
-                       drm_atomic_get_new_connector_state(&state->base,
-                                                          connector);
-               if (!connector_state)
-                       connector_state = connector->state;
-
-               if (!connector_state->best_encoder)
-                       continue;
-
-               encoder = to_intel_encoder(connector_state->best_encoder);
-
-               WARN_ON(!connector_state->crtc);
-
-               switch (encoder->type) {
-                       unsigned int port_mask;
-               case INTEL_OUTPUT_DDI:
-                       if (WARN_ON(!HAS_DDI(to_i915(dev))))
-                               break;
-                       /* else: fall through */
-               case INTEL_OUTPUT_DP:
-               case INTEL_OUTPUT_HDMI:
-               case INTEL_OUTPUT_EDP:
-                       port_mask = 1 << encoder->port;
-
-                       /* the same port mustn't appear more than once */
-                       if (used_ports & port_mask)
-                               ret = false;
-
-                       used_ports |= port_mask;
-                       break;
-               case INTEL_OUTPUT_DP_MST:
-                       used_mst_ports |=
-                               1 << encoder->port;
-                       break;
-               default:
-                       break;
-               }
-       }
-       drm_connector_list_iter_end(&conn_iter);
-
-       /* can't mix MST and SST/HDMI on the same port */
-       if (used_ports & used_mst_ports)
-               return false;
-
-       return ret;
-}
-
-static int
-clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(crtc_state->base.crtc->dev);
-       struct intel_crtc_state *saved_state;
-
-       saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
-       if (!saved_state)
-               return -ENOMEM;
-
-       /* FIXME: before the switch to atomic started, a new pipe_config was
-        * kzalloc'd. Code that depends on any field being zero should be
-        * fixed, so that the crtc_state can be safely duplicated. For now,
-        * only fields that are know to not cause problems are preserved. */
-
-       saved_state->scaler_state = crtc_state->scaler_state;
-       saved_state->shared_dpll = crtc_state->shared_dpll;
-       saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
-       saved_state->crc_enabled = crtc_state->crc_enabled;
-       if (IS_G4X(dev_priv) ||
-           IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               saved_state->wm = crtc_state->wm;
-
-       /* Keep base drm_crtc_state intact, only clear our extended struct */
-       BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
-       memcpy(&crtc_state->base + 1, &saved_state->base + 1,
-              sizeof(*crtc_state) - sizeof(crtc_state->base));
-
-       kfree(saved_state);
-       return 0;
-}
-
-static int
-intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
-{
-       struct drm_crtc *crtc = pipe_config->base.crtc;
-       struct drm_atomic_state *state = pipe_config->base.state;
-       struct intel_encoder *encoder;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       int base_bpp, ret;
-       int i;
-       bool retry = true;
-
-       ret = clear_intel_crtc_state(pipe_config);
-       if (ret)
-               return ret;
-
-       pipe_config->cpu_transcoder =
-               (enum transcoder) to_intel_crtc(crtc)->pipe;
-
-       /*
-        * Sanitize sync polarity flags based on requested ones. If neither
-        * positive or negative polarity is requested, treat this as meaning
-        * negative polarity.
-        */
-       if (!(pipe_config->base.adjusted_mode.flags &
-             (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
-               pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
-
-       if (!(pipe_config->base.adjusted_mode.flags &
-             (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
-               pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
-
-       ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
-                                       pipe_config);
-       if (ret)
-               return ret;
-
-       base_bpp = pipe_config->pipe_bpp;
-
-       /*
-        * Determine the real pipe dimensions. Note that stereo modes can
-        * increase the actual pipe size due to the frame doubling and
-        * insertion of additional space for blanks between the frame. This
-        * is stored in the crtc timings. We use the requested mode to do this
-        * computation to clearly distinguish it from the adjusted mode, which
-        * can be changed by the connectors in the below retry loop.
-        */
-       drm_mode_get_hv_timing(&pipe_config->base.mode,
-                              &pipe_config->pipe_src_w,
-                              &pipe_config->pipe_src_h);
-
-       for_each_new_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != crtc)
-                       continue;
-
-               encoder = to_intel_encoder(connector_state->best_encoder);
-
-               if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
-                       DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
-                       return -EINVAL;
-               }
-
-               /*
-                * Determine output_types before calling the .compute_config()
-                * hooks so that the hooks can use this information safely.
-                */
-               if (encoder->compute_output_type)
-                       pipe_config->output_types |=
-                               BIT(encoder->compute_output_type(encoder, pipe_config,
-                                                                connector_state));
-               else
-                       pipe_config->output_types |= BIT(encoder->type);
-       }
-
-encoder_retry:
-       /* Ensure the port clock defaults are reset when retrying. */
-       pipe_config->port_clock = 0;
-       pipe_config->pixel_multiplier = 1;
-
-       /* Fill in default crtc timings, allow encoders to overwrite them. */
-       drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
-                             CRTC_STEREO_DOUBLE);
-
-       /* Pass our mode to the connectors and the CRTC to give them a chance to
-        * adjust it according to limitations or connector properties, and also
-        * a chance to reject the mode entirely.
-        */
-       for_each_new_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != crtc)
-                       continue;
-
-               encoder = to_intel_encoder(connector_state->best_encoder);
-               ret = encoder->compute_config(encoder, pipe_config,
-                                             connector_state);
-               if (ret < 0) {
-                       if (ret != -EDEADLK)
-                               DRM_DEBUG_KMS("Encoder config failure: %d\n",
-                                             ret);
-                       return ret;
-               }
-       }
-
-       /* Set default port clock if not overwritten by the encoder. Needs to be
-        * done afterwards in case the encoder adjusts the mode. */
-       if (!pipe_config->port_clock)
-               pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
-                       * pipe_config->pixel_multiplier;
-
-       ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
-       if (ret == -EDEADLK)
-               return ret;
-       if (ret < 0) {
-               DRM_DEBUG_KMS("CRTC fixup failed\n");
-               return ret;
-       }
-
-       if (ret == RETRY) {
-               if (WARN(!retry, "loop in pipe configuration computation\n"))
-                       return -EINVAL;
-
-               DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
-               retry = false;
-               goto encoder_retry;
-       }
-
-       /* Dithering seems to not pass-through bits correctly when it should, so
-        * only enable it on 6bpc panels and when its not a compliance
-        * test requesting 6bpc video pattern.
-        */
-       pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
-               !pipe_config->dither_force_disable;
-       DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
-                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
-
-       return 0;
-}
-
-bool intel_fuzzy_clock_check(int clock1, int clock2)
-{
-       int diff;
-
-       if (clock1 == clock2)
-               return true;
-
-       if (!clock1 || !clock2)
-               return false;
-
-       diff = abs(clock1 - clock2);
-
-       if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
-               return true;
-
-       return false;
-}
-
-static bool
-intel_compare_m_n(unsigned int m, unsigned int n,
-                 unsigned int m2, unsigned int n2,
-                 bool exact)
-{
-       if (m == m2 && n == n2)
-               return true;
-
-       if (exact || !m || !n || !m2 || !n2)
-               return false;
-
-       BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
-
-       if (n > n2) {
-               while (n > n2) {
-                       m2 <<= 1;
-                       n2 <<= 1;
-               }
-       } else if (n < n2) {
-               while (n < n2) {
-                       m <<= 1;
-                       n <<= 1;
-               }
-       }
-
-       if (n != n2)
-               return false;
-
-       return intel_fuzzy_clock_check(m, m2);
-}
-
-static bool
-intel_compare_link_m_n(const struct intel_link_m_n *m_n,
-                      struct intel_link_m_n *m2_n2,
-                      bool adjust)
-{
-       if (m_n->tu == m2_n2->tu &&
-           intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
-                             m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
-           intel_compare_m_n(m_n->link_m, m_n->link_n,
-                             m2_n2->link_m, m2_n2->link_n, !adjust)) {
-               if (adjust)
-                       *m2_n2 = *m_n;
-
-               return true;
-       }
-
-       return false;
-}
-
-static bool
-intel_compare_infoframe(const union hdmi_infoframe *a,
-                       const union hdmi_infoframe *b)
-{
-       return memcmp(a, b, sizeof(*a)) == 0;
-}
-
-static void
-pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
-                         bool adjust, const char *name,
-                         const union hdmi_infoframe *a,
-                         const union hdmi_infoframe *b)
-{
-       if (adjust) {
-               if ((drm_debug & DRM_UT_KMS) == 0)
-                       return;
-
-               drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
-               drm_dbg(DRM_UT_KMS, "expected:");
-               hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
-               drm_dbg(DRM_UT_KMS, "found");
-               hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
-       } else {
-               drm_err("mismatch in %s infoframe", name);
-               drm_err("expected:");
-               hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
-               drm_err("found");
-               hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
-       }
-}
-
-static void __printf(3, 4)
-pipe_config_err(bool adjust, const char *name, const char *format, ...)
-{
-       struct va_format vaf;
-       va_list args;
-
-       va_start(args, format);
-       vaf.fmt = format;
-       vaf.va = &args;
-
-       if (adjust)
-               drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
-       else
-               drm_err("mismatch in %s %pV", name, &vaf);
-
-       va_end(args);
-}
-
-static bool fastboot_enabled(struct drm_i915_private *dev_priv)
-{
-       if (i915_modparams.fastboot != -1)
-               return i915_modparams.fastboot;
-
-       /* Enable fastboot by default on Skylake and newer */
-       if (INTEL_GEN(dev_priv) >= 9)
-               return true;
-
-       /* Enable fastboot by default on VLV and CHV */
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               return true;
-
-       /* Disabled by default on all others */
-       return false;
-}
-
-static bool
-intel_pipe_config_compare(struct drm_i915_private *dev_priv,
-                         struct intel_crtc_state *current_config,
-                         struct intel_crtc_state *pipe_config,
-                         bool adjust)
-{
-       bool ret = true;
-       bool fixup_inherited = adjust &&
-               (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
-               !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
-
-       if (fixup_inherited && !fastboot_enabled(dev_priv)) {
-               DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
-               ret = false;
-       }
-
-#define PIPE_CONF_CHECK_X(name) do { \
-       if (current_config->name != pipe_config->name) { \
-               pipe_config_err(adjust, __stringify(name), \
-                         "(expected 0x%08x, found 0x%08x)\n", \
-                         current_config->name, \
-                         pipe_config->name); \
-               ret = false; \
-       } \
-} while (0)
-
-#define PIPE_CONF_CHECK_I(name) do { \
-       if (current_config->name != pipe_config->name) { \
-               pipe_config_err(adjust, __stringify(name), \
-                         "(expected %i, found %i)\n", \
-                         current_config->name, \
-                         pipe_config->name); \
-               ret = false; \
-       } \
-} while (0)
-
-#define PIPE_CONF_CHECK_BOOL(name) do { \
-       if (current_config->name != pipe_config->name) { \
-               pipe_config_err(adjust, __stringify(name), \
-                         "(expected %s, found %s)\n", \
-                         yesno(current_config->name), \
-                         yesno(pipe_config->name)); \
-               ret = false; \
-       } \
-} while (0)
-
-/*
- * Checks state where we only read out the enabling, but not the entire
- * state itself (like full infoframes or ELD for audio). These states
- * require a full modeset on bootup to fix up.
- */
-#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
-       if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
-               PIPE_CONF_CHECK_BOOL(name); \
-       } else { \
-               pipe_config_err(adjust, __stringify(name), \
-                         "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
-                         yesno(current_config->name), \
-                         yesno(pipe_config->name)); \
-               ret = false; \
-       } \
-} while (0)
-
-#define PIPE_CONF_CHECK_P(name) do { \
-       if (current_config->name != pipe_config->name) { \
-               pipe_config_err(adjust, __stringify(name), \
-                         "(expected %p, found %p)\n", \
-                         current_config->name, \
-                         pipe_config->name); \
-               ret = false; \
-       } \
-} while (0)
-
-#define PIPE_CONF_CHECK_M_N(name) do { \
-       if (!intel_compare_link_m_n(&current_config->name, \
-                                   &pipe_config->name,\
-                                   adjust)) { \
-               pipe_config_err(adjust, __stringify(name), \
-                         "(expected tu %i gmch %i/%i link %i/%i, " \
-                         "found tu %i, gmch %i/%i link %i/%i)\n", \
-                         current_config->name.tu, \
-                         current_config->name.gmch_m, \
-                         current_config->name.gmch_n, \
-                         current_config->name.link_m, \
-                         current_config->name.link_n, \
-                         pipe_config->name.tu, \
-                         pipe_config->name.gmch_m, \
-                         pipe_config->name.gmch_n, \
-                         pipe_config->name.link_m, \
-                         pipe_config->name.link_n); \
-               ret = false; \
-       } \
-} while (0)
-
-/* This is required for BDW+ where there is only one set of registers for
- * switching between high and low RR.
- * This macro can be used whenever a comparison has to be made between one
- * hw state and multiple sw state variables.
- */
-#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
-       if (!intel_compare_link_m_n(&current_config->name, \
-                                   &pipe_config->name, adjust) && \
-           !intel_compare_link_m_n(&current_config->alt_name, \
-                                   &pipe_config->name, adjust)) { \
-               pipe_config_err(adjust, __stringify(name), \
-                         "(expected tu %i gmch %i/%i link %i/%i, " \
-                         "or tu %i gmch %i/%i link %i/%i, " \
-                         "found tu %i, gmch %i/%i link %i/%i)\n", \
-                         current_config->name.tu, \
-                         current_config->name.gmch_m, \
-                         current_config->name.gmch_n, \
-                         current_config->name.link_m, \
-                         current_config->name.link_n, \
-                         current_config->alt_name.tu, \
-                         current_config->alt_name.gmch_m, \
-                         current_config->alt_name.gmch_n, \
-                         current_config->alt_name.link_m, \
-                         current_config->alt_name.link_n, \
-                         pipe_config->name.tu, \
-                         pipe_config->name.gmch_m, \
-                         pipe_config->name.gmch_n, \
-                         pipe_config->name.link_m, \
-                         pipe_config->name.link_n); \
-               ret = false; \
-       } \
-} while (0)
-
-#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
-       if ((current_config->name ^ pipe_config->name) & (mask)) { \
-               pipe_config_err(adjust, __stringify(name), \
-                         "(%x) (expected %i, found %i)\n", \
-                         (mask), \
-                         current_config->name & (mask), \
-                         pipe_config->name & (mask)); \
-               ret = false; \
-       } \
-} while (0)
-
-#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
-       if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
-               pipe_config_err(adjust, __stringify(name), \
-                         "(expected %i, found %i)\n", \
-                         current_config->name, \
-                         pipe_config->name); \
-               ret = false; \
-       } \
-} while (0)
-
-#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
-       if (!intel_compare_infoframe(&current_config->infoframes.name, \
-                                    &pipe_config->infoframes.name)) { \
-               pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
-                                         &current_config->infoframes.name, \
-                                         &pipe_config->infoframes.name); \
-               ret = false; \
-       } \
-} while (0)
-
-#define PIPE_CONF_QUIRK(quirk) \
-       ((current_config->quirks | pipe_config->quirks) & (quirk))
-
-       PIPE_CONF_CHECK_I(cpu_transcoder);
-
-       PIPE_CONF_CHECK_BOOL(has_pch_encoder);
-       PIPE_CONF_CHECK_I(fdi_lanes);
-       PIPE_CONF_CHECK_M_N(fdi_m_n);
-
-       PIPE_CONF_CHECK_I(lane_count);
-       PIPE_CONF_CHECK_X(lane_lat_optim_mask);
-
-       if (INTEL_GEN(dev_priv) < 8) {
-               PIPE_CONF_CHECK_M_N(dp_m_n);
-
-               if (current_config->has_drrs)
-                       PIPE_CONF_CHECK_M_N(dp_m2_n2);
-       } else
-               PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
-
-       PIPE_CONF_CHECK_X(output_types);
-
-       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
-       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
-       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
-       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
-       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
-       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
-
-       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
-       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
-       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
-       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
-       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
-       PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
-
-       PIPE_CONF_CHECK_I(pixel_multiplier);
-       PIPE_CONF_CHECK_I(output_format);
-       PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
-       if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
-           IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               PIPE_CONF_CHECK_BOOL(limited_color_range);
-
-       PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
-       PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
-       PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
-
-       PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
-
-       PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
-                             DRM_MODE_FLAG_INTERLACE);
-
-       if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
-               PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
-                                     DRM_MODE_FLAG_PHSYNC);
-               PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
-                                     DRM_MODE_FLAG_NHSYNC);
-               PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
-                                     DRM_MODE_FLAG_PVSYNC);
-               PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
-                                     DRM_MODE_FLAG_NVSYNC);
-       }
-
-       PIPE_CONF_CHECK_X(gmch_pfit.control);
-       /* pfit ratios are autocomputed by the hw on gen4+ */
-       if (INTEL_GEN(dev_priv) < 4)
-               PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
-       PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
-
-       /*
-        * Changing the EDP transcoder input mux
-        * (A_ONOFF vs. A_ON) requires a full modeset.
-        */
-       PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
-
-       if (!adjust) {
-               PIPE_CONF_CHECK_I(pipe_src_w);
-               PIPE_CONF_CHECK_I(pipe_src_h);
-
-               PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
-               if (current_config->pch_pfit.enabled) {
-                       PIPE_CONF_CHECK_X(pch_pfit.pos);
-                       PIPE_CONF_CHECK_X(pch_pfit.size);
-               }
-
-               PIPE_CONF_CHECK_I(scaler_state.scaler_id);
-               PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
-
-               PIPE_CONF_CHECK_X(gamma_mode);
-               if (IS_CHERRYVIEW(dev_priv))
-                       PIPE_CONF_CHECK_X(cgm_mode);
-               else
-                       PIPE_CONF_CHECK_X(csc_mode);
-               PIPE_CONF_CHECK_BOOL(gamma_enable);
-               PIPE_CONF_CHECK_BOOL(csc_enable);
-       }
-
-       PIPE_CONF_CHECK_BOOL(double_wide);
-
-       PIPE_CONF_CHECK_P(shared_dpll);
-       PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
-       PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
-       PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
-       PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
-       PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
-       PIPE_CONF_CHECK_X(dpll_hw_state.spll);
-       PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
-       PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
-       PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
-       PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
-       PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
-       PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
-       PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
-       PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
-       PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
-       PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
-       PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
-       PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
-       PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
-       PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
-       PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
-       PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
-       PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
-       PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
-       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
-       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
-       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
-       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
-       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
-       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
-       PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
-
-       PIPE_CONF_CHECK_X(dsi_pll.ctrl);
-       PIPE_CONF_CHECK_X(dsi_pll.div);
-
-       if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
-               PIPE_CONF_CHECK_I(pipe_bpp);
-
-       PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
-       PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
-
-       PIPE_CONF_CHECK_I(min_voltage_level);
-
-       PIPE_CONF_CHECK_X(infoframes.enable);
-       PIPE_CONF_CHECK_X(infoframes.gcp);
-       PIPE_CONF_CHECK_INFOFRAME(avi);
-       PIPE_CONF_CHECK_INFOFRAME(spd);
-       PIPE_CONF_CHECK_INFOFRAME(hdmi);
-       PIPE_CONF_CHECK_INFOFRAME(drm);
-
-#undef PIPE_CONF_CHECK_X
-#undef PIPE_CONF_CHECK_I
-#undef PIPE_CONF_CHECK_BOOL
-#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
-#undef PIPE_CONF_CHECK_P
-#undef PIPE_CONF_CHECK_FLAGS
-#undef PIPE_CONF_CHECK_CLOCK_FUZZY
-#undef PIPE_CONF_QUIRK
-
-       return ret;
-}
-
-static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
-                                          const struct intel_crtc_state *pipe_config)
-{
-       if (pipe_config->has_pch_encoder) {
-               int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
-                                                           &pipe_config->fdi_m_n);
-               int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
-
-               /*
-                * FDI already provided one idea for the dotclock.
-                * Yell if the encoder disagrees.
-                */
-               WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
-                    "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
-                    fdi_dotclock, dotclock);
-       }
-}
-
-static void verify_wm_state(struct drm_crtc *crtc,
-                           struct drm_crtc_state *new_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct skl_hw_state {
-               struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
-               struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
-               struct skl_ddb_allocation ddb;
-               struct skl_pipe_wm wm;
-       } *hw;
-       struct skl_ddb_allocation *sw_ddb;
-       struct skl_pipe_wm *sw_wm;
-       struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       const enum pipe pipe = intel_crtc->pipe;
-       int plane, level, max_level = ilk_wm_max_level(dev_priv);
-
-       if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
-               return;
-
-       hw = kzalloc(sizeof(*hw), GFP_KERNEL);
-       if (!hw)
-               return;
-
-       skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
-       sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
-
-       skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
-
-       skl_ddb_get_hw_state(dev_priv, &hw->ddb);
-       sw_ddb = &dev_priv->wm.skl_hw.ddb;
-
-       if (INTEL_GEN(dev_priv) >= 11 &&
-           hw->ddb.enabled_slices != sw_ddb->enabled_slices)
-               DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
-                         sw_ddb->enabled_slices,
-                         hw->ddb.enabled_slices);
-
-       /* planes */
-       for_each_universal_plane(dev_priv, pipe, plane) {
-               struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
-
-               hw_plane_wm = &hw->wm.planes[plane];
-               sw_plane_wm = &sw_wm->planes[plane];
-
-               /* Watermarks */
-               for (level = 0; level <= max_level; level++) {
-                       if (skl_wm_level_equals(&hw_plane_wm->wm[level],
-                                               &sw_plane_wm->wm[level]))
-                               continue;
-
-                       DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
-                                 pipe_name(pipe), plane + 1, level,
-                                 sw_plane_wm->wm[level].plane_en,
-                                 sw_plane_wm->wm[level].plane_res_b,
-                                 sw_plane_wm->wm[level].plane_res_l,
-                                 hw_plane_wm->wm[level].plane_en,
-                                 hw_plane_wm->wm[level].plane_res_b,
-                                 hw_plane_wm->wm[level].plane_res_l);
-               }
-
-               if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
-                                        &sw_plane_wm->trans_wm)) {
-                       DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
-                                 pipe_name(pipe), plane + 1,
-                                 sw_plane_wm->trans_wm.plane_en,
-                                 sw_plane_wm->trans_wm.plane_res_b,
-                                 sw_plane_wm->trans_wm.plane_res_l,
-                                 hw_plane_wm->trans_wm.plane_en,
-                                 hw_plane_wm->trans_wm.plane_res_b,
-                                 hw_plane_wm->trans_wm.plane_res_l);
-               }
-
-               /* DDB */
-               hw_ddb_entry = &hw->ddb_y[plane];
-               sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
-
-               if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
-                       DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
-                                 pipe_name(pipe), plane + 1,
-                                 sw_ddb_entry->start, sw_ddb_entry->end,
-                                 hw_ddb_entry->start, hw_ddb_entry->end);
-               }
-       }
-
-       /*
-        * cursor
-        * If the cursor plane isn't active, we may not have updated it's ddb
-        * allocation. In that case since the ddb allocation will be updated
-        * once the plane becomes visible, we can skip this check
-        */
-       if (1) {
-               struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
-
-               hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
-               sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
-
-               /* Watermarks */
-               for (level = 0; level <= max_level; level++) {
-                       if (skl_wm_level_equals(&hw_plane_wm->wm[level],
-                                               &sw_plane_wm->wm[level]))
-                               continue;
-
-                       DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
-                                 pipe_name(pipe), level,
-                                 sw_plane_wm->wm[level].plane_en,
-                                 sw_plane_wm->wm[level].plane_res_b,
-                                 sw_plane_wm->wm[level].plane_res_l,
-                                 hw_plane_wm->wm[level].plane_en,
-                                 hw_plane_wm->wm[level].plane_res_b,
-                                 hw_plane_wm->wm[level].plane_res_l);
-               }
-
-               if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
-                                        &sw_plane_wm->trans_wm)) {
-                       DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
-                                 pipe_name(pipe),
-                                 sw_plane_wm->trans_wm.plane_en,
-                                 sw_plane_wm->trans_wm.plane_res_b,
-                                 sw_plane_wm->trans_wm.plane_res_l,
-                                 hw_plane_wm->trans_wm.plane_en,
-                                 hw_plane_wm->trans_wm.plane_res_b,
-                                 hw_plane_wm->trans_wm.plane_res_l);
-               }
-
-               /* DDB */
-               hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
-               sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
-
-               if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
-                       DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
-                                 pipe_name(pipe),
-                                 sw_ddb_entry->start, sw_ddb_entry->end,
-                                 hw_ddb_entry->start, hw_ddb_entry->end);
-               }
-       }
-
-       kfree(hw);
-}
-
-static void
-verify_connector_state(struct drm_device *dev,
-                      struct drm_atomic_state *state,
-                      struct drm_crtc *crtc)
-{
-       struct drm_connector *connector;
-       struct drm_connector_state *new_conn_state;
-       int i;
-
-       for_each_new_connector_in_state(state, connector, new_conn_state, i) {
-               struct drm_encoder *encoder = connector->encoder;
-               struct drm_crtc_state *crtc_state = NULL;
-
-               if (new_conn_state->crtc != crtc)
-                       continue;
-
-               if (crtc)
-                       crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
-
-               intel_connector_verify_state(crtc_state, new_conn_state);
-
-               I915_STATE_WARN(new_conn_state->best_encoder != encoder,
-                    "connector's atomic encoder doesn't match legacy encoder\n");
-       }
-}
-
-static void
-verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
-{
-       struct intel_encoder *encoder;
-       struct drm_connector *connector;
-       struct drm_connector_state *old_conn_state, *new_conn_state;
-       int i;
-
-       for_each_intel_encoder(dev, encoder) {
-               bool enabled = false, found = false;
-               enum pipe pipe;
-
-               DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
-                             encoder->base.base.id,
-                             encoder->base.name);
-
-               for_each_oldnew_connector_in_state(state, connector, old_conn_state,
-                                                  new_conn_state, i) {
-                       if (old_conn_state->best_encoder == &encoder->base)
-                               found = true;
-
-                       if (new_conn_state->best_encoder != &encoder->base)
-                               continue;
-                       found = enabled = true;
-
-                       I915_STATE_WARN(new_conn_state->crtc !=
-                                       encoder->base.crtc,
-                            "connector's crtc doesn't match encoder crtc\n");
-               }
-
-               if (!found)
-                       continue;
-
-               I915_STATE_WARN(!!encoder->base.crtc != enabled,
-                    "encoder's enabled state mismatch "
-                    "(expected %i, found %i)\n",
-                    !!encoder->base.crtc, enabled);
-
-               if (!encoder->base.crtc) {
-                       bool active;
-
-                       active = encoder->get_hw_state(encoder, &pipe);
-                       I915_STATE_WARN(active,
-                            "encoder detached but still enabled on pipe %c.\n",
-                            pipe_name(pipe));
-               }
-       }
-}
-
-static void
-verify_crtc_state(struct drm_crtc *crtc,
-                 struct drm_crtc_state *old_crtc_state,
-                 struct drm_crtc_state *new_crtc_state)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_encoder *encoder;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_crtc_state *pipe_config, *sw_config;
-       struct drm_atomic_state *old_state;
-       bool active;
-
-       old_state = old_crtc_state->state;
-       __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
-       pipe_config = to_intel_crtc_state(old_crtc_state);
-       memset(pipe_config, 0, sizeof(*pipe_config));
-       pipe_config->base.crtc = crtc;
-       pipe_config->base.state = old_state;
-
-       DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
-
-       active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
-
-       /* we keep both pipes enabled on 830 */
-       if (IS_I830(dev_priv))
-               active = new_crtc_state->active;
-
-       I915_STATE_WARN(new_crtc_state->active != active,
-            "crtc active state doesn't match with hw state "
-            "(expected %i, found %i)\n", new_crtc_state->active, active);
-
-       I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
-            "transitional active state does not match atomic hw state "
-            "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
-
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
-               enum pipe pipe;
-
-               active = encoder->get_hw_state(encoder, &pipe);
-               I915_STATE_WARN(active != new_crtc_state->active,
-                       "[ENCODER:%i] active %i with crtc active %i\n",
-                       encoder->base.base.id, active, new_crtc_state->active);
-
-               I915_STATE_WARN(active && intel_crtc->pipe != pipe,
-                               "Encoder connected to wrong pipe %c\n",
-                               pipe_name(pipe));
-
-               if (active)
-                       encoder->get_config(encoder, pipe_config);
-       }
-
-       intel_crtc_compute_pixel_rate(pipe_config);
-
-       if (!new_crtc_state->active)
-               return;
-
-       intel_pipe_config_sanity_check(dev_priv, pipe_config);
-
-       sw_config = to_intel_crtc_state(new_crtc_state);
-       if (!intel_pipe_config_compare(dev_priv, sw_config,
-                                      pipe_config, false)) {
-               I915_STATE_WARN(1, "pipe state doesn't match!\n");
-               intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
-               intel_dump_pipe_config(sw_config, NULL, "[sw state]");
-       }
-}
-
-static void
-intel_verify_planes(struct intel_atomic_state *state)
-{
-       struct intel_plane *plane;
-       const struct intel_plane_state *plane_state;
-       int i;
-
-       for_each_new_intel_plane_in_state(state, plane,
-                                         plane_state, i)
-               assert_plane(plane, plane_state->slave ||
-                            plane_state->base.visible);
-}
-
-static void
-verify_single_dpll_state(struct drm_i915_private *dev_priv,
-                        struct intel_shared_dpll *pll,
-                        struct drm_crtc *crtc,
-                        struct drm_crtc_state *new_state)
-{
-       struct intel_dpll_hw_state dpll_hw_state;
-       unsigned int crtc_mask;
-       bool active;
-
-       memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
-
-       DRM_DEBUG_KMS("%s\n", pll->info->name);
-
-       active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
-
-       if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
-               I915_STATE_WARN(!pll->on && pll->active_mask,
-                    "pll in active use but not on in sw tracking\n");
-               I915_STATE_WARN(pll->on && !pll->active_mask,
-                    "pll is on but not used by any active crtc\n");
-               I915_STATE_WARN(pll->on != active,
-                    "pll on state mismatch (expected %i, found %i)\n",
-                    pll->on, active);
-       }
-
-       if (!crtc) {
-               I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
-                               "more active pll users than references: %x vs %x\n",
-                               pll->active_mask, pll->state.crtc_mask);
-
-               return;
-       }
-
-       crtc_mask = drm_crtc_mask(crtc);
-
-       if (new_state->active)
-               I915_STATE_WARN(!(pll->active_mask & crtc_mask),
-                               "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
-                               pipe_name(drm_crtc_index(crtc)), pll->active_mask);
-       else
-               I915_STATE_WARN(pll->active_mask & crtc_mask,
-                               "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
-                               pipe_name(drm_crtc_index(crtc)), pll->active_mask);
-
-       I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
-                       "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
-                       crtc_mask, pll->state.crtc_mask);
-
-       I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
-                                         &dpll_hw_state,
-                                         sizeof(dpll_hw_state)),
-                       "pll hw state mismatch\n");
-}
-
-static void
-verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
-                        struct drm_crtc_state *old_crtc_state,
-                        struct drm_crtc_state *new_crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
-       struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
-
-       if (new_state->shared_dpll)
-               verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
-
-       if (old_state->shared_dpll &&
-           old_state->shared_dpll != new_state->shared_dpll) {
-               unsigned int crtc_mask = drm_crtc_mask(crtc);
-               struct intel_shared_dpll *pll = old_state->shared_dpll;
-
-               I915_STATE_WARN(pll->active_mask & crtc_mask,
-                               "pll active mismatch (didn't expect pipe %c in active mask)\n",
-                               pipe_name(drm_crtc_index(crtc)));
-               I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
-                               "pll enabled crtcs mismatch (found %x in enabled mask)\n",
-                               pipe_name(drm_crtc_index(crtc)));
-       }
-}
-
-static void
-intel_modeset_verify_crtc(struct drm_crtc *crtc,
-                         struct drm_atomic_state *state,
-                         struct drm_crtc_state *old_state,
-                         struct drm_crtc_state *new_state)
-{
-       if (!needs_modeset(new_state) &&
-           !to_intel_crtc_state(new_state)->update_pipe)
-               return;
-
-       verify_wm_state(crtc, new_state);
-       verify_connector_state(crtc->dev, state, crtc);
-       verify_crtc_state(crtc, old_state, new_state);
-       verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
-}
-
-static void
-verify_disabled_dpll_state(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int i;
-
-       for (i = 0; i < dev_priv->num_shared_dpll; i++)
-               verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
-}
-
-static void
-intel_modeset_verify_disabled(struct drm_device *dev,
-                             struct drm_atomic_state *state)
-{
-       verify_encoder_state(dev, state);
-       verify_connector_state(dev, state, NULL);
-       verify_disabled_dpll_state(dev);
-}
-
-static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       /*
-        * The scanline counter increments at the leading edge of hsync.
-        *
-        * On most platforms it starts counting from vtotal-1 on the
-        * first active line. That means the scanline counter value is
-        * always one less than what we would expect. Ie. just after
-        * start of vblank, which also occurs at start of hsync (on the
-        * last active line), the scanline counter will read vblank_start-1.
-        *
-        * On gen2 the scanline counter starts counting from 1 instead
-        * of vtotal-1, so we have to subtract one (or rather add vtotal-1
-        * to keep the value positive), instead of adding one.
-        *
-        * On HSW+ the behaviour of the scanline counter depends on the output
-        * type. For DP ports it behaves like most other platforms, but on HDMI
-        * there's an extra 1 line difference. So we need to add two instead of
-        * one to the value.
-        *
-        * On VLV/CHV DSI the scanline counter would appear to increment
-        * approx. 1/3 of a scanline before start of vblank. Unfortunately
-        * that means we can't tell whether we're in vblank or not while
-        * we're on that particular line. We must still set scanline_offset
-        * to 1 so that the vblank timestamps come out correct when we query
-        * the scanline counter from within the vblank interrupt handler.
-        * However if queried just before the start of vblank we'll get an
-        * answer that's slightly in the future.
-        */
-       if (IS_GEN(dev_priv, 2)) {
-               const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
-               int vtotal;
-
-               vtotal = adjusted_mode->crtc_vtotal;
-               if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
-                       vtotal /= 2;
-
-               crtc->scanline_offset = vtotal - 1;
-       } else if (HAS_DDI(dev_priv) &&
-                  intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
-               crtc->scanline_offset = 2;
-       } else
-               crtc->scanline_offset = 1;
-}
-
-static void intel_modeset_clear_plls(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       struct intel_crtc_state *old_crtc_state, *new_crtc_state;
-       struct intel_crtc *crtc;
-       int i;
-
-       if (!dev_priv->display.crtc_compute_clock)
-               return;
-
-       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
-                                           new_crtc_state, i) {
-               struct intel_shared_dpll *old_dpll =
-                       old_crtc_state->shared_dpll;
-
-               if (!needs_modeset(&new_crtc_state->base))
-                       continue;
-
-               new_crtc_state->shared_dpll = NULL;
-
-               if (!old_dpll)
-                       continue;
-
-               intel_release_shared_dpll(old_dpll, crtc, &state->base);
-       }
-}
-
-/*
- * This implements the workaround described in the "notes" section of the mode
- * set sequence documentation. When going from no pipes or single pipe to
- * multiple pipes, and planes are enabled after the pipe, we need to wait at
- * least 2 vblanks on the first pipe before enabling planes on the second pipe.
- */
-static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
-{
-       struct intel_crtc_state *crtc_state;
-       struct intel_crtc *crtc;
-       struct intel_crtc_state *first_crtc_state = NULL;
-       struct intel_crtc_state *other_crtc_state = NULL;
-       enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
-       int i;
-
-       /* look at all crtc's that are going to be enabled in during modeset */
-       for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
-               if (!crtc_state->base.active ||
-                   !needs_modeset(&crtc_state->base))
-                       continue;
-
-               if (first_crtc_state) {
-                       other_crtc_state = crtc_state;
-                       break;
-               } else {
-                       first_crtc_state = crtc_state;
-                       first_pipe = crtc->pipe;
-               }
-       }
-
-       /* No workaround needed? */
-       if (!first_crtc_state)
-               return 0;
-
-       /* w/a possibly needed, check how many crtc's are already enabled. */
-       for_each_intel_crtc(state->base.dev, crtc) {
-               crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
-               if (IS_ERR(crtc_state))
-                       return PTR_ERR(crtc_state);
-
-               crtc_state->hsw_workaround_pipe = INVALID_PIPE;
-
-               if (!crtc_state->base.active ||
-                   needs_modeset(&crtc_state->base))
-                       continue;
-
-               /* 2 or more enabled crtcs means no need for w/a */
-               if (enabled_pipe != INVALID_PIPE)
-                       return 0;
-
-               enabled_pipe = crtc->pipe;
-       }
-
-       if (enabled_pipe != INVALID_PIPE)
-               first_crtc_state->hsw_workaround_pipe = enabled_pipe;
-       else if (other_crtc_state)
-               other_crtc_state->hsw_workaround_pipe = first_pipe;
-
-       return 0;
-}
-
-static int intel_lock_all_pipes(struct drm_atomic_state *state)
-{
-       struct drm_crtc *crtc;
-
-       /* Add all pipes to the state */
-       for_each_crtc(state->dev, crtc) {
-               struct drm_crtc_state *crtc_state;
-
-               crtc_state = drm_atomic_get_crtc_state(state, crtc);
-               if (IS_ERR(crtc_state))
-                       return PTR_ERR(crtc_state);
-       }
-
-       return 0;
-}
-
-static int intel_modeset_all_pipes(struct drm_atomic_state *state)
-{
-       struct drm_crtc *crtc;
-
-       /*
-        * Add all pipes to the state, and force
-        * a modeset on all the active ones.
-        */
-       for_each_crtc(state->dev, crtc) {
-               struct drm_crtc_state *crtc_state;
-               int ret;
-
-               crtc_state = drm_atomic_get_crtc_state(state, crtc);
-               if (IS_ERR(crtc_state))
-                       return PTR_ERR(crtc_state);
-
-               if (!crtc_state->active || needs_modeset(crtc_state))
-                       continue;
-
-               crtc_state->mode_changed = true;
-
-               ret = drm_atomic_add_affected_connectors(state, crtc);
-               if (ret)
-                       return ret;
-
-               ret = drm_atomic_add_affected_planes(state, crtc);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int intel_modeset_checks(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       struct intel_crtc_state *old_crtc_state, *new_crtc_state;
-       struct intel_crtc *crtc;
-       int ret = 0, i;
-
-       if (!check_digital_port_conflicts(state)) {
-               DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
-               return -EINVAL;
-       }
-
-       /* keep the current setting */
-       if (!state->cdclk.force_min_cdclk_changed)
-               state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
-
-       state->modeset = true;
-       state->active_crtcs = dev_priv->active_crtcs;
-       state->cdclk.logical = dev_priv->cdclk.logical;
-       state->cdclk.actual = dev_priv->cdclk.actual;
-       state->cdclk.pipe = INVALID_PIPE;
-
-       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
-                                           new_crtc_state, i) {
-               if (new_crtc_state->base.active)
-                       state->active_crtcs |= 1 << i;
-               else
-                       state->active_crtcs &= ~(1 << i);
-
-               if (old_crtc_state->base.active != new_crtc_state->base.active)
-                       state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
-       }
-
-       /*
-        * See if the config requires any additional preparation, e.g.
-        * to adjust global state with pipes off.  We need to do this
-        * here so we can get the modeset_pipe updated config for the new
-        * mode set on this crtc.  For other crtcs we need to use the
-        * adjusted_mode bits in the crtc directly.
-        */
-       if (dev_priv->display.modeset_calc_cdclk) {
-               enum pipe pipe;
-
-               ret = dev_priv->display.modeset_calc_cdclk(state);
-               if (ret < 0)
-                       return ret;
-
-               /*
-                * Writes to dev_priv->cdclk.logical must protected by
-                * holding all the crtc locks, even if we don't end up
-                * touching the hardware
-                */
-               if (intel_cdclk_changed(&dev_priv->cdclk.logical,
-                                       &state->cdclk.logical)) {
-                       ret = intel_lock_all_pipes(&state->base);
-                       if (ret < 0)
-                               return ret;
-               }
-
-               if (is_power_of_2(state->active_crtcs)) {
-                       struct drm_crtc *crtc;
-                       struct drm_crtc_state *crtc_state;
-
-                       pipe = ilog2(state->active_crtcs);
-                       crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
-                       crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc);
-                       if (crtc_state && needs_modeset(crtc_state))
-                               pipe = INVALID_PIPE;
-               } else {
-                       pipe = INVALID_PIPE;
-               }
-
-               /* All pipes must be switched off while we change the cdclk. */
-               if (pipe != INVALID_PIPE &&
-                   intel_cdclk_needs_cd2x_update(dev_priv,
-                                                 &dev_priv->cdclk.actual,
-                                                 &state->cdclk.actual)) {
-                       ret = intel_lock_all_pipes(&state->base);
-                       if (ret < 0)
-                               return ret;
-
-                       state->cdclk.pipe = pipe;
-               } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
-                                                    &state->cdclk.actual)) {
-                       ret = intel_modeset_all_pipes(&state->base);
-                       if (ret < 0)
-                               return ret;
-
-                       state->cdclk.pipe = INVALID_PIPE;
-               }
-
-               DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
-                             state->cdclk.logical.cdclk,
-                             state->cdclk.actual.cdclk);
-               DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
-                             state->cdclk.logical.voltage_level,
-                             state->cdclk.actual.voltage_level);
-       }
-
-       intel_modeset_clear_plls(state);
-
-       if (IS_HASWELL(dev_priv))
-               return haswell_mode_set_planes_workaround(state);
-
-       return 0;
-}
-
-/*
- * Handle calculation of various watermark data at the end of the atomic check
- * phase.  The code here should be run after the per-crtc and per-plane 'check'
- * handlers to ensure that all derived state has been updated.
- */
-static int calc_watermark_data(struct intel_atomic_state *state)
-{
-       struct drm_device *dev = state->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       /* Is there platform-specific watermark information to calculate? */
-       if (dev_priv->display.compute_global_watermarks)
-               return dev_priv->display.compute_global_watermarks(state);
-
-       return 0;
-}
-
-/**
- * intel_atomic_check - validate state object
- * @dev: drm device
- * @_state: state to validate
- */
-static int intel_atomic_check(struct drm_device *dev,
-                             struct drm_atomic_state *_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_atomic_state *state = to_intel_atomic_state(_state);
-       struct intel_crtc_state *old_crtc_state, *new_crtc_state;
-       struct intel_crtc *crtc;
-       int ret, i;
-       bool any_ms = state->cdclk.force_min_cdclk_changed;
-
-       /* Catch I915_MODE_FLAG_INHERITED */
-       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
-                                           new_crtc_state, i) {
-               if (new_crtc_state->base.mode.private_flags !=
-                   old_crtc_state->base.mode.private_flags)
-                       new_crtc_state->base.mode_changed = true;
-       }
-
-       ret = drm_atomic_helper_check_modeset(dev, &state->base);
-       if (ret)
-               goto fail;
-
-       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
-                                           new_crtc_state, i) {
-               if (!needs_modeset(&new_crtc_state->base))
-                       continue;
-
-               if (!new_crtc_state->base.enable) {
-                       any_ms = true;
-                       continue;
-               }
-
-               ret = intel_modeset_pipe_config(new_crtc_state);
-               if (ret)
-                       goto fail;
-
-               if (intel_pipe_config_compare(dev_priv, old_crtc_state,
-                                             new_crtc_state, true)) {
-                       new_crtc_state->base.mode_changed = false;
-                       new_crtc_state->update_pipe = true;
-               }
-
-               if (needs_modeset(&new_crtc_state->base))
-                       any_ms = true;
-       }
-
-       ret = drm_dp_mst_atomic_check(&state->base);
-       if (ret)
-               goto fail;
-
-       if (any_ms) {
-               ret = intel_modeset_checks(state);
-               if (ret)
-                       goto fail;
-       } else {
-               state->cdclk.logical = dev_priv->cdclk.logical;
-       }
-
-       ret = icl_add_linked_planes(state);
-       if (ret)
-               goto fail;
-
-       ret = drm_atomic_helper_check_planes(dev, &state->base);
-       if (ret)
-               goto fail;
-
-       intel_fbc_choose_crtc(dev_priv, state);
-       ret = calc_watermark_data(state);
-       if (ret)
-               goto fail;
-
-       ret = intel_bw_atomic_check(state);
-       if (ret)
-               goto fail;
-
-       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
-                                           new_crtc_state, i) {
-               if (!needs_modeset(&new_crtc_state->base) &&
-                   !new_crtc_state->update_pipe)
-                       continue;
-
-               intel_dump_pipe_config(new_crtc_state, state,
-                                      needs_modeset(&new_crtc_state->base) ?
-                                      "[modeset]" : "[fastset]");
-       }
-
-       return 0;
-
- fail:
-       if (ret == -EDEADLK)
-               return ret;
-
-       /*
-        * FIXME would probably be nice to know which crtc specifically
-        * caused the failure, in cases where we can pinpoint it.
-        */
-       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
-                                           new_crtc_state, i)
-               intel_dump_pipe_config(new_crtc_state, state, "[failed]");
-
-       return ret;
-}
-
-static int intel_atomic_prepare_commit(struct drm_device *dev,
-                                      struct drm_atomic_state *state)
-{
-       return drm_atomic_helper_prepare_planes(dev, state);
-}
-
-u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
-
-       if (!vblank->max_vblank_count)
-               return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
-
-       return dev->driver->get_vblank_counter(dev, crtc->pipe);
-}
-
-static void intel_update_crtc(struct drm_crtc *crtc,
-                             struct drm_atomic_state *state,
-                             struct drm_crtc_state *old_crtc_state,
-                             struct drm_crtc_state *new_crtc_state)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
-       bool modeset = needs_modeset(new_crtc_state);
-       struct intel_plane_state *new_plane_state =
-               intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
-                                                to_intel_plane(crtc->primary));
-
-       if (modeset) {
-               update_scanline_offset(pipe_config);
-               dev_priv->display.crtc_enable(pipe_config, state);
-
-               /* vblanks work again, re-enable pipe CRC. */
-               intel_crtc_enable_pipe_crc(intel_crtc);
-       } else {
-               intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
-                                      pipe_config);
-
-               if (pipe_config->update_pipe)
-                       intel_encoders_update_pipe(crtc, pipe_config, state);
-       }
-
-       if (pipe_config->update_pipe && !pipe_config->enable_fbc)
-               intel_fbc_disable(intel_crtc);
-       else if (new_plane_state)
-               intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
-
-       intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
-
-       if (INTEL_GEN(dev_priv) >= 9)
-               skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
-       else
-               i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
-
-       intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
-}
-
-static void intel_update_crtcs(struct drm_atomic_state *state)
-{
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
-       int i;
-
-       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-               if (!new_crtc_state->active)
-                       continue;
-
-               intel_update_crtc(crtc, state, old_crtc_state,
-                                 new_crtc_state);
-       }
-}
-
-static void skl_update_crtcs(struct drm_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->dev);
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_crtc *crtc;
-       struct intel_crtc *intel_crtc;
-       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
-       struct intel_crtc_state *cstate;
-       unsigned int updated = 0;
-       bool progress;
-       enum pipe pipe;
-       int i;
-       u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
-       u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
-       struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
-
-       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
-               /* ignore allocations for crtc's that have been turned off. */
-               if (new_crtc_state->active)
-                       entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
-
-       /* If 2nd DBuf slice required, enable it here */
-       if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
-               icl_dbuf_slices_update(dev_priv, required_slices);
-
-       /*
-        * Whenever the number of active pipes changes, we need to make sure we
-        * update the pipes in the right order so that their ddb allocations
-        * never overlap with eachother inbetween CRTC updates. Otherwise we'll
-        * cause pipe underruns and other bad stuff.
-        */
-       do {
-               progress = false;
-
-               for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-                       bool vbl_wait = false;
-                       unsigned int cmask = drm_crtc_mask(crtc);
-
-                       intel_crtc = to_intel_crtc(crtc);
-                       cstate = to_intel_crtc_state(new_crtc_state);
-                       pipe = intel_crtc->pipe;
-
-                       if (updated & cmask || !cstate->base.active)
-                               continue;
-
-                       if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
-                                                       entries,
-                                                       INTEL_INFO(dev_priv)->num_pipes, i))
-                               continue;
-
-                       updated |= cmask;
-                       entries[i] = cstate->wm.skl.ddb;
-
-                       /*
-                        * If this is an already active pipe, it's DDB changed,
-                        * and this isn't the last pipe that needs updating
-                        * then we need to wait for a vblank to pass for the
-                        * new ddb allocation to take effect.
-                        */
-                       if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
-                                                &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
-                           !new_crtc_state->active_changed &&
-                           intel_state->wm_results.dirty_pipes != updated)
-                               vbl_wait = true;
-
-                       intel_update_crtc(crtc, state, old_crtc_state,
-                                         new_crtc_state);
-
-                       if (vbl_wait)
-                               intel_wait_for_vblank(dev_priv, pipe);
-
-                       progress = true;
-               }
-       } while (progress);
-
-       /* If 2nd DBuf slice is no more required disable it */
-       if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
-               icl_dbuf_slices_update(dev_priv, required_slices);
-}
-
-static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
-{
-       struct intel_atomic_state *state, *next;
-       struct llist_node *freed;
-
-       freed = llist_del_all(&dev_priv->atomic_helper.free_list);
-       llist_for_each_entry_safe(state, next, freed, freed)
-               drm_atomic_state_put(&state->base);
-}
-
-static void intel_atomic_helper_free_state_worker(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), atomic_helper.free_work);
-
-       intel_atomic_helper_free_state(dev_priv);
-}
-
-static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
-{
-       struct wait_queue_entry wait_fence, wait_reset;
-       struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
-
-       init_wait_entry(&wait_fence, 0);
-       init_wait_entry(&wait_reset, 0);
-       for (;;) {
-               prepare_to_wait(&intel_state->commit_ready.wait,
-                               &wait_fence, TASK_UNINTERRUPTIBLE);
-               prepare_to_wait(&dev_priv->gpu_error.wait_queue,
-                               &wait_reset, TASK_UNINTERRUPTIBLE);
-
-
-               if (i915_sw_fence_done(&intel_state->commit_ready)
-                   || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
-                       break;
-
-               schedule();
-       }
-       finish_wait(&intel_state->commit_ready.wait, &wait_fence);
-       finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
-}
-
-static void intel_atomic_cleanup_work(struct work_struct *work)
-{
-       struct drm_atomic_state *state =
-               container_of(work, struct drm_atomic_state, commit_work);
-       struct drm_i915_private *i915 = to_i915(state->dev);
-
-       drm_atomic_helper_cleanup_planes(&i915->drm, state);
-       drm_atomic_helper_commit_cleanup_done(state);
-       drm_atomic_state_put(state);
-
-       intel_atomic_helper_free_state(i915);
-}
-
-static void intel_atomic_commit_tail(struct drm_atomic_state *state)
-{
-       struct drm_device *dev = state->dev;
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
-       struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
-       struct drm_crtc *crtc;
-       struct intel_crtc *intel_crtc;
-       u64 put_domains[I915_MAX_PIPES] = {};
-       intel_wakeref_t wakeref = 0;
-       int i;
-
-       intel_atomic_commit_fence_wait(intel_state);
-
-       drm_atomic_helper_wait_for_dependencies(state);
-
-       if (intel_state->modeset)
-               wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
-
-       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-               old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
-               new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
-               intel_crtc = to_intel_crtc(crtc);
-
-               if (needs_modeset(new_crtc_state) ||
-                   to_intel_crtc_state(new_crtc_state)->update_pipe) {
-
-                       put_domains[intel_crtc->pipe] =
-                               modeset_get_crtc_power_domains(crtc,
-                                       new_intel_crtc_state);
-               }
-
-               if (!needs_modeset(new_crtc_state))
-                       continue;
-
-               intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
-
-               if (old_crtc_state->active) {
-                       intel_crtc_disable_planes(intel_state, intel_crtc);
-
-                       /*
-                        * We need to disable pipe CRC before disabling the pipe,
-                        * or we race against vblank off.
-                        */
-                       intel_crtc_disable_pipe_crc(intel_crtc);
-
-                       dev_priv->display.crtc_disable(old_intel_crtc_state, state);
-                       intel_crtc->active = false;
-                       intel_fbc_disable(intel_crtc);
-                       intel_disable_shared_dpll(old_intel_crtc_state);
-
-                       /*
-                        * Underruns don't always raise
-                        * interrupts, so check manually.
-                        */
-                       intel_check_cpu_fifo_underruns(dev_priv);
-                       intel_check_pch_fifo_underruns(dev_priv);
-
-                       /* FIXME unify this for all platforms */
-                       if (!new_crtc_state->active &&
-                           !HAS_GMCH(dev_priv) &&
-                           dev_priv->display.initial_watermarks)
-                               dev_priv->display.initial_watermarks(intel_state,
-                                                                    new_intel_crtc_state);
-               }
-       }
-
-       /* FIXME: Eventually get rid of our intel_crtc->config pointer */
-       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
-               to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
-
-       if (intel_state->modeset) {
-               drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
-
-               intel_set_cdclk_pre_plane_update(dev_priv,
-                                                &intel_state->cdclk.actual,
-                                                &dev_priv->cdclk.actual,
-                                                intel_state->cdclk.pipe);
-
-               /*
-                * SKL workaround: bspec recommends we disable the SAGV when we
-                * have more then one pipe enabled
-                */
-               if (!intel_can_enable_sagv(state))
-                       intel_disable_sagv(dev_priv);
-
-               intel_modeset_verify_disabled(dev, state);
-       }
-
-       /* Complete the events for pipes that have now been disabled */
-       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
-               bool modeset = needs_modeset(new_crtc_state);
-
-               /* Complete events for now disable pipes here. */
-               if (modeset && !new_crtc_state->active && new_crtc_state->event) {
-                       spin_lock_irq(&dev->event_lock);
-                       drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
-                       spin_unlock_irq(&dev->event_lock);
-
-                       new_crtc_state->event = NULL;
-               }
-       }
-
-       /* Now enable the clocks, plane, pipe, and connectors that we set up. */
-       dev_priv->display.update_crtcs(state);
-
-       if (intel_state->modeset)
-               intel_set_cdclk_post_plane_update(dev_priv,
-                                                 &intel_state->cdclk.actual,
-                                                 &dev_priv->cdclk.actual,
-                                                 intel_state->cdclk.pipe);
-
-       /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
-        * already, but still need the state for the delayed optimization. To
-        * fix this:
-        * - wrap the optimization/post_plane_update stuff into a per-crtc work.
-        * - schedule that vblank worker _before_ calling hw_done
-        * - at the start of commit_tail, cancel it _synchrously
-        * - switch over to the vblank wait helper in the core after that since
-        *   we don't need out special handling any more.
-        */
-       drm_atomic_helper_wait_for_flip_done(dev, state);
-
-       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
-               new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
-
-               if (new_crtc_state->active &&
-                   !needs_modeset(new_crtc_state) &&
-                   (new_intel_crtc_state->base.color_mgmt_changed ||
-                    new_intel_crtc_state->update_pipe))
-                       intel_color_load_luts(new_intel_crtc_state);
-       }
-
-       /*
-        * Now that the vblank has passed, we can go ahead and program the
-        * optimal watermarks on platforms that need two-step watermark
-        * programming.
-        *
-        * TODO: Move this (and other cleanup) to an async worker eventually.
-        */
-       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
-               new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
-
-               if (dev_priv->display.optimize_watermarks)
-                       dev_priv->display.optimize_watermarks(intel_state,
-                                                             new_intel_crtc_state);
-       }
-
-       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-               intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
-
-               if (put_domains[i])
-                       modeset_put_power_domains(dev_priv, put_domains[i]);
-
-               intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
-       }
-
-       if (intel_state->modeset)
-               intel_verify_planes(intel_state);
-
-       if (intel_state->modeset && intel_can_enable_sagv(state))
-               intel_enable_sagv(dev_priv);
-
-       drm_atomic_helper_commit_hw_done(state);
-
-       if (intel_state->modeset) {
-               /* As one of the primary mmio accessors, KMS has a high
-                * likelihood of triggering bugs in unclaimed access. After we
-                * finish modesetting, see if an error has been flagged, and if
-                * so enable debugging for the next modeset - and hope we catch
-                * the culprit.
-                */
-               intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
-               intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
-       }
-       intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
-
-       /*
-        * Defer the cleanup of the old state to a separate worker to not
-        * impede the current task (userspace for blocking modesets) that
-        * are executed inline. For out-of-line asynchronous modesets/flips,
-        * deferring to a new worker seems overkill, but we would place a
-        * schedule point (cond_resched()) here anyway to keep latencies
-        * down.
-        */
-       INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
-       queue_work(system_highpri_wq, &state->commit_work);
-}
-
-static void intel_atomic_commit_work(struct work_struct *work)
-{
-       struct drm_atomic_state *state =
-               container_of(work, struct drm_atomic_state, commit_work);
-
-       intel_atomic_commit_tail(state);
-}
-
-static int __i915_sw_fence_call
-intel_atomic_commit_ready(struct i915_sw_fence *fence,
-                         enum i915_sw_fence_notify notify)
-{
-       struct intel_atomic_state *state =
-               container_of(fence, struct intel_atomic_state, commit_ready);
-
-       switch (notify) {
-       case FENCE_COMPLETE:
-               /* we do blocking waits in the worker, nothing to do here */
-               break;
-       case FENCE_FREE:
-               {
-                       struct intel_atomic_helper *helper =
-                               &to_i915(state->base.dev)->atomic_helper;
-
-                       if (llist_add(&state->freed, &helper->free_list))
-                               schedule_work(&helper->free_work);
-                       break;
-               }
-       }
-
-       return NOTIFY_DONE;
-}
-
-static void intel_atomic_track_fbs(struct drm_atomic_state *state)
-{
-       struct drm_plane_state *old_plane_state, *new_plane_state;
-       struct drm_plane *plane;
-       int i;
-
-       for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
-               i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
-                                 intel_fb_obj(new_plane_state->fb),
-                                 to_intel_plane(plane)->frontbuffer_bit);
-}
-
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * RETURNS
- * Zero for success or -errno.
- */
-static int intel_atomic_commit(struct drm_device *dev,
-                              struct drm_atomic_state *state,
-                              bool nonblock)
-{
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int ret = 0;
-
-       intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
-       drm_atomic_state_get(state);
-       i915_sw_fence_init(&intel_state->commit_ready,
-                          intel_atomic_commit_ready);
-
-       /*
-        * The intel_legacy_cursor_update() fast path takes care
-        * of avoiding the vblank waits for simple cursor
-        * movement and flips. For cursor on/off and size changes,
-        * we want to perform the vblank waits so that watermark
-        * updates happen during the correct frames. Gen9+ have
-        * double buffered watermarks and so shouldn't need this.
-        *
-        * Unset state->legacy_cursor_update before the call to
-        * drm_atomic_helper_setup_commit() because otherwise
-        * drm_atomic_helper_wait_for_flip_done() is a noop and
-        * we get FIFO underruns because we didn't wait
-        * for vblank.
-        *
-        * FIXME doing watermarks and fb cleanup from a vblank worker
-        * (assuming we had any) would solve these problems.
-        */
-       if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
-               struct intel_crtc_state *new_crtc_state;
-               struct intel_crtc *crtc;
-               int i;
-
-               for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
-                       if (new_crtc_state->wm.need_postvbl_update ||
-                           new_crtc_state->update_wm_post)
-                               state->legacy_cursor_update = false;
-       }
-
-       ret = intel_atomic_prepare_commit(dev, state);
-       if (ret) {
-               DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
-               i915_sw_fence_commit(&intel_state->commit_ready);
-               intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
-               return ret;
-       }
-
-       ret = drm_atomic_helper_setup_commit(state, nonblock);
-       if (!ret)
-               ret = drm_atomic_helper_swap_state(state, true);
-
-       if (ret) {
-               i915_sw_fence_commit(&intel_state->commit_ready);
-
-               drm_atomic_helper_cleanup_planes(dev, state);
-               intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
-               return ret;
-       }
-       dev_priv->wm.distrust_bios_wm = false;
-       intel_shared_dpll_swap_state(state);
-       intel_atomic_track_fbs(state);
-
-       if (intel_state->modeset) {
-               memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
-                      sizeof(intel_state->min_cdclk));
-               memcpy(dev_priv->min_voltage_level,
-                      intel_state->min_voltage_level,
-                      sizeof(intel_state->min_voltage_level));
-               dev_priv->active_crtcs = intel_state->active_crtcs;
-               dev_priv->cdclk.force_min_cdclk =
-                       intel_state->cdclk.force_min_cdclk;
-
-               intel_cdclk_swap_state(intel_state);
-       }
-
-       drm_atomic_state_get(state);
-       INIT_WORK(&state->commit_work, intel_atomic_commit_work);
-
-       i915_sw_fence_commit(&intel_state->commit_ready);
-       if (nonblock && intel_state->modeset) {
-               queue_work(dev_priv->modeset_wq, &state->commit_work);
-       } else if (nonblock) {
-               queue_work(system_unbound_wq, &state->commit_work);
-       } else {
-               if (intel_state->modeset)
-                       flush_workqueue(dev_priv->modeset_wq);
-               intel_atomic_commit_tail(state);
-       }
-
-       return 0;
-}
-
-static const struct drm_crtc_funcs intel_crtc_funcs = {
-       .gamma_set = drm_atomic_helper_legacy_gamma_set,
-       .set_config = drm_atomic_helper_set_config,
-       .destroy = intel_crtc_destroy,
-       .page_flip = drm_atomic_helper_page_flip,
-       .atomic_duplicate_state = intel_crtc_duplicate_state,
-       .atomic_destroy_state = intel_crtc_destroy_state,
-       .set_crc_source = intel_crtc_set_crc_source,
-       .verify_crc_source = intel_crtc_verify_crc_source,
-       .get_crc_sources = intel_crtc_get_crc_sources,
-};
-
-struct wait_rps_boost {
-       struct wait_queue_entry wait;
-
-       struct drm_crtc *crtc;
-       struct i915_request *request;
-};
-
-static int do_rps_boost(struct wait_queue_entry *_wait,
-                       unsigned mode, int sync, void *key)
-{
-       struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
-       struct i915_request *rq = wait->request;
-
-       /*
-        * If we missed the vblank, but the request is already running it
-        * is reasonable to assume that it will complete before the next
-        * vblank without our intervention, so leave RPS alone.
-        */
-       if (!i915_request_started(rq))
-               gen6_rps_boost(rq);
-       i915_request_put(rq);
-
-       drm_crtc_vblank_put(wait->crtc);
-
-       list_del(&wait->wait.entry);
-       kfree(wait);
-       return 1;
-}
-
-static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
-                                      struct dma_fence *fence)
-{
-       struct wait_rps_boost *wait;
-
-       if (!dma_fence_is_i915(fence))
-               return;
-
-       if (INTEL_GEN(to_i915(crtc->dev)) < 6)
-               return;
-
-       if (drm_crtc_vblank_get(crtc))
-               return;
-
-       wait = kmalloc(sizeof(*wait), GFP_KERNEL);
-       if (!wait) {
-               drm_crtc_vblank_put(crtc);
-               return;
-       }
-
-       wait->request = to_request(dma_fence_get(fence));
-       wait->crtc = crtc;
-
-       wait->wait.func = do_rps_boost;
-       wait->wait.flags = 0;
-
-       add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
-}
-
-static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       struct drm_framebuffer *fb = plane_state->base.fb;
-       struct i915_vma *vma;
-
-       if (plane->id == PLANE_CURSOR &&
-           INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
-               struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-               const int align = intel_cursor_alignment(dev_priv);
-               int err;
-
-               err = i915_gem_object_attach_phys(obj, align);
-               if (err)
-                       return err;
-       }
-
-       vma = intel_pin_and_fence_fb_obj(fb,
-                                        &plane_state->view,
-                                        intel_plane_uses_fence(plane_state),
-                                        &plane_state->flags);
-       if (IS_ERR(vma))
-               return PTR_ERR(vma);
-
-       plane_state->vma = vma;
-
-       return 0;
-}
-
-static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
-{
-       struct i915_vma *vma;
-
-       vma = fetch_and_zero(&old_plane_state->vma);
-       if (vma)
-               intel_unpin_fb_vma(vma, old_plane_state->flags);
-}
-
-static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
-{
-       struct i915_sched_attr attr = {
-               .priority = I915_PRIORITY_DISPLAY,
-       };
-
-       i915_gem_object_wait_priority(obj, 0, &attr);
-}
-
-/**
- * intel_prepare_plane_fb - Prepare fb for usage on plane
- * @plane: drm plane to prepare for
- * @new_state: the plane state being prepared
- *
- * Prepares a framebuffer for usage on a display plane.  Generally this
- * involves pinning the underlying object and updating the frontbuffer tracking
- * bits.  Some older platforms need special physical address handling for
- * cursor planes.
- *
- * Must be called with struct_mutex held.
- *
- * Returns 0 on success, negative error code on failure.
- */
-int
-intel_prepare_plane_fb(struct drm_plane *plane,
-                      struct drm_plane_state *new_state)
-{
-       struct intel_atomic_state *intel_state =
-               to_intel_atomic_state(new_state->state);
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-       struct drm_framebuffer *fb = new_state->fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
-       int ret;
-
-       if (old_obj) {
-               struct drm_crtc_state *crtc_state =
-                       drm_atomic_get_new_crtc_state(new_state->state,
-                                                     plane->state->crtc);
-
-               /* Big Hammer, we also need to ensure that any pending
-                * MI_WAIT_FOR_EVENT inside a user batch buffer on the
-                * current scanout is retired before unpinning the old
-                * framebuffer. Note that we rely on userspace rendering
-                * into the buffer attached to the pipe they are waiting
-                * on. If not, userspace generates a GPU hang with IPEHR
-                * point to the MI_WAIT_FOR_EVENT.
-                *
-                * This should only fail upon a hung GPU, in which case we
-                * can safely continue.
-                */
-               if (needs_modeset(crtc_state)) {
-                       ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
-                                                             old_obj->resv, NULL,
-                                                             false, 0,
-                                                             GFP_KERNEL);
-                       if (ret < 0)
-                               return ret;
-               }
-       }
-
-       if (new_state->fence) { /* explicit fencing */
-               ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
-                                                   new_state->fence,
-                                                   I915_FENCE_TIMEOUT,
-                                                   GFP_KERNEL);
-               if (ret < 0)
-                       return ret;
-       }
-
-       if (!obj)
-               return 0;
-
-       ret = i915_gem_object_pin_pages(obj);
-       if (ret)
-               return ret;
-
-       ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
-       if (ret) {
-               i915_gem_object_unpin_pages(obj);
-               return ret;
-       }
-
-       ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
-
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-       i915_gem_object_unpin_pages(obj);
-       if (ret)
-               return ret;
-
-       fb_obj_bump_render_priority(obj);
-       intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
-
-       if (!new_state->fence) { /* implicit fencing */
-               struct dma_fence *fence;
-
-               ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
-                                                     obj->resv, NULL,
-                                                     false, I915_FENCE_TIMEOUT,
-                                                     GFP_KERNEL);
-               if (ret < 0)
-                       return ret;
-
-               fence = reservation_object_get_excl_rcu(obj->resv);
-               if (fence) {
-                       add_rps_boost_after_vblank(new_state->crtc, fence);
-                       dma_fence_put(fence);
-               }
-       } else {
-               add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
-       }
-
-       /*
-        * We declare pageflips to be interactive and so merit a small bias
-        * towards upclocking to deliver the frame on time. By only changing
-        * the RPS thresholds to sample more regularly and aim for higher
-        * clocks we can hopefully deliver low power workloads (like kodi)
-        * that are not quite steady state without resorting to forcing
-        * maximum clocks following a vblank miss (see do_rps_boost()).
-        */
-       if (!intel_state->rps_interactive) {
-               intel_rps_mark_interactive(dev_priv, true);
-               intel_state->rps_interactive = true;
-       }
-
-       return 0;
-}
-
-/**
- * intel_cleanup_plane_fb - Cleans up an fb after plane use
- * @plane: drm plane to clean up for
- * @old_state: the state from the previous modeset
- *
- * Cleans up a framebuffer that has just been removed from a plane.
- *
- * Must be called with struct_mutex held.
- */
-void
-intel_cleanup_plane_fb(struct drm_plane *plane,
-                      struct drm_plane_state *old_state)
-{
-       struct intel_atomic_state *intel_state =
-               to_intel_atomic_state(old_state->state);
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-
-       if (intel_state->rps_interactive) {
-               intel_rps_mark_interactive(dev_priv, false);
-               intel_state->rps_interactive = false;
-       }
-
-       /* Should only be called after a successful intel_prepare_plane_fb()! */
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       intel_plane_unpin_fb(to_intel_plane_state(old_state));
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-}
-
-int
-skl_max_scale(const struct intel_crtc_state *crtc_state,
-             u32 pixel_format)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       int max_scale, mult;
-       int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
-
-       if (!crtc_state->base.enable)
-               return DRM_PLANE_HELPER_NO_SCALING;
-
-       crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
-       max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
-
-       if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
-               max_dotclk *= 2;
-
-       if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
-               return DRM_PLANE_HELPER_NO_SCALING;
-
-       /*
-        * skl max scale is lower of:
-        *    close to 3 but not 3, -1 is for that purpose
-        *            or
-        *    cdclk/crtc_clock
-        */
-       mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
-       tmpclk1 = (1 << 16) * mult - 1;
-       tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
-       max_scale = min(tmpclk1, tmpclk2);
-
-       return max_scale;
-}
-
-static void intel_begin_crtc_commit(struct intel_atomic_state *state,
-                                   struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_crtc_state *old_crtc_state =
-               intel_atomic_get_old_crtc_state(state, crtc);
-       struct intel_crtc_state *new_crtc_state =
-               intel_atomic_get_new_crtc_state(state, crtc);
-       bool modeset = needs_modeset(&new_crtc_state->base);
-
-       /* Perform vblank evasion around commit operation */
-       intel_pipe_update_start(new_crtc_state);
-
-       if (modeset)
-               goto out;
-
-       if (new_crtc_state->base.color_mgmt_changed ||
-           new_crtc_state->update_pipe)
-               intel_color_commit(new_crtc_state);
-
-       if (new_crtc_state->update_pipe)
-               intel_update_pipe_config(old_crtc_state, new_crtc_state);
-       else if (INTEL_GEN(dev_priv) >= 9)
-               skl_detach_scalers(new_crtc_state);
-
-       if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
-               bdw_set_pipemisc(new_crtc_state);
-
-out:
-       if (dev_priv->display.atomic_update_watermarks)
-               dev_priv->display.atomic_update_watermarks(state,
-                                                          new_crtc_state);
-}
-
-void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
-                                 struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       if (!IS_GEN(dev_priv, 2))
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
-
-       if (crtc_state->has_pch_encoder) {
-               enum pipe pch_transcoder =
-                       intel_crtc_pch_transcoder(crtc);
-
-               intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
-       }
-}
-
-static void intel_finish_crtc_commit(struct intel_atomic_state *state,
-                                    struct intel_crtc *crtc)
-{
-       struct intel_crtc_state *old_crtc_state =
-               intel_atomic_get_old_crtc_state(state, crtc);
-       struct intel_crtc_state *new_crtc_state =
-               intel_atomic_get_new_crtc_state(state, crtc);
-
-       intel_pipe_update_end(new_crtc_state);
-
-       if (new_crtc_state->update_pipe &&
-           !needs_modeset(&new_crtc_state->base) &&
-           old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
-               intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
-}
-
-/**
- * intel_plane_destroy - destroy a plane
- * @plane: plane to destroy
- *
- * Common destruction function for all types of planes (primary, cursor,
- * sprite).
- */
-void intel_plane_destroy(struct drm_plane *plane)
-{
-       drm_plane_cleanup(plane);
-       kfree(to_intel_plane(plane));
-}
-
-static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
-                                           u32 format, u64 modifier)
-{
-       switch (modifier) {
-       case DRM_FORMAT_MOD_LINEAR:
-       case I915_FORMAT_MOD_X_TILED:
-               break;
-       default:
-               return false;
-       }
-
-       switch (format) {
-       case DRM_FORMAT_C8:
-       case DRM_FORMAT_RGB565:
-       case DRM_FORMAT_XRGB1555:
-       case DRM_FORMAT_XRGB8888:
-               return modifier == DRM_FORMAT_MOD_LINEAR ||
-                       modifier == I915_FORMAT_MOD_X_TILED;
-       default:
-               return false;
-       }
-}
-
-static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
-                                           u32 format, u64 modifier)
-{
-       switch (modifier) {
-       case DRM_FORMAT_MOD_LINEAR:
-       case I915_FORMAT_MOD_X_TILED:
-               break;
-       default:
-               return false;
-       }
-
-       switch (format) {
-       case DRM_FORMAT_C8:
-       case DRM_FORMAT_RGB565:
-       case DRM_FORMAT_XRGB8888:
-       case DRM_FORMAT_XBGR8888:
-       case DRM_FORMAT_XRGB2101010:
-       case DRM_FORMAT_XBGR2101010:
-               return modifier == DRM_FORMAT_MOD_LINEAR ||
-                       modifier == I915_FORMAT_MOD_X_TILED;
-       default:
-               return false;
-       }
-}
-
-static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
-                                             u32 format, u64 modifier)
-{
-       return modifier == DRM_FORMAT_MOD_LINEAR &&
-               format == DRM_FORMAT_ARGB8888;
-}
-
-static const struct drm_plane_funcs i965_plane_funcs = {
-       .update_plane = drm_atomic_helper_update_plane,
-       .disable_plane = drm_atomic_helper_disable_plane,
-       .destroy = intel_plane_destroy,
-       .atomic_duplicate_state = intel_plane_duplicate_state,
-       .atomic_destroy_state = intel_plane_destroy_state,
-       .format_mod_supported = i965_plane_format_mod_supported,
-};
-
-static const struct drm_plane_funcs i8xx_plane_funcs = {
-       .update_plane = drm_atomic_helper_update_plane,
-       .disable_plane = drm_atomic_helper_disable_plane,
-       .destroy = intel_plane_destroy,
-       .atomic_duplicate_state = intel_plane_duplicate_state,
-       .atomic_destroy_state = intel_plane_destroy_state,
-       .format_mod_supported = i8xx_plane_format_mod_supported,
-};
-
-static int
-intel_legacy_cursor_update(struct drm_plane *plane,
-                          struct drm_crtc *crtc,
-                          struct drm_framebuffer *fb,
-                          int crtc_x, int crtc_y,
-                          unsigned int crtc_w, unsigned int crtc_h,
-                          u32 src_x, u32 src_y,
-                          u32 src_w, u32 src_h,
-                          struct drm_modeset_acquire_ctx *ctx)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       int ret;
-       struct drm_plane_state *old_plane_state, *new_plane_state;
-       struct intel_plane *intel_plane = to_intel_plane(plane);
-       struct drm_framebuffer *old_fb;
-       struct intel_crtc_state *crtc_state =
-               to_intel_crtc_state(crtc->state);
-       struct intel_crtc_state *new_crtc_state;
-
-       /*
-        * When crtc is inactive or there is a modeset pending,
-        * wait for it to complete in the slowpath
-        */
-       if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
-           crtc_state->update_pipe)
-               goto slow;
-
-       old_plane_state = plane->state;
-       /*
-        * Don't do an async update if there is an outstanding commit modifying
-        * the plane.  This prevents our async update's changes from getting
-        * overridden by a previous synchronous update's state.
-        */
-       if (old_plane_state->commit &&
-           !try_wait_for_completion(&old_plane_state->commit->hw_done))
-               goto slow;
-
-       /*
-        * If any parameters change that may affect watermarks,
-        * take the slowpath. Only changing fb or position should be
-        * in the fastpath.
-        */
-       if (old_plane_state->crtc != crtc ||
-           old_plane_state->src_w != src_w ||
-           old_plane_state->src_h != src_h ||
-           old_plane_state->crtc_w != crtc_w ||
-           old_plane_state->crtc_h != crtc_h ||
-           !old_plane_state->fb != !fb)
-               goto slow;
-
-       new_plane_state = intel_plane_duplicate_state(plane);
-       if (!new_plane_state)
-               return -ENOMEM;
-
-       new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
-       if (!new_crtc_state) {
-               ret = -ENOMEM;
-               goto out_free;
-       }
-
-       drm_atomic_set_fb_for_plane(new_plane_state, fb);
-
-       new_plane_state->src_x = src_x;
-       new_plane_state->src_y = src_y;
-       new_plane_state->src_w = src_w;
-       new_plane_state->src_h = src_h;
-       new_plane_state->crtc_x = crtc_x;
-       new_plane_state->crtc_y = crtc_y;
-       new_plane_state->crtc_w = crtc_w;
-       new_plane_state->crtc_h = crtc_h;
-
-       ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
-                                                 to_intel_plane_state(old_plane_state),
-                                                 to_intel_plane_state(new_plane_state));
-       if (ret)
-               goto out_free;
-
-       ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
-       if (ret)
-               goto out_free;
-
-       ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
-       if (ret)
-               goto out_unlock;
-
-       intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
-
-       old_fb = old_plane_state->fb;
-       i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
-                         intel_plane->frontbuffer_bit);
-
-       /* Swap plane state */
-       plane->state = new_plane_state;
-
-       /*
-        * We cannot swap crtc_state as it may be in use by an atomic commit or
-        * page flip that's running simultaneously. If we swap crtc_state and
-        * destroy the old state, we will cause a use-after-free there.
-        *
-        * Only update active_planes, which is needed for our internal
-        * bookkeeping. Either value will do the right thing when updating
-        * planes atomically. If the cursor was part of the atomic update then
-        * we would have taken the slowpath.
-        */
-       crtc_state->active_planes = new_crtc_state->active_planes;
-
-       if (plane->state->visible)
-               intel_update_plane(intel_plane, crtc_state,
-                                  to_intel_plane_state(plane->state));
-       else
-               intel_disable_plane(intel_plane, crtc_state);
-
-       intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
-
-out_unlock:
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-out_free:
-       if (new_crtc_state)
-               intel_crtc_destroy_state(crtc, &new_crtc_state->base);
-       if (ret)
-               intel_plane_destroy_state(plane, new_plane_state);
-       else
-               intel_plane_destroy_state(plane, old_plane_state);
-       return ret;
-
-slow:
-       return drm_atomic_helper_update_plane(plane, crtc, fb,
-                                             crtc_x, crtc_y, crtc_w, crtc_h,
-                                             src_x, src_y, src_w, src_h, ctx);
-}
-
-static const struct drm_plane_funcs intel_cursor_plane_funcs = {
-       .update_plane = intel_legacy_cursor_update,
-       .disable_plane = drm_atomic_helper_disable_plane,
-       .destroy = intel_plane_destroy,
-       .atomic_duplicate_state = intel_plane_duplicate_state,
-       .atomic_destroy_state = intel_plane_destroy_state,
-       .format_mod_supported = intel_cursor_format_mod_supported,
-};
-
-static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
-                              enum i9xx_plane_id i9xx_plane)
-{
-       if (!HAS_FBC(dev_priv))
-               return false;
-
-       if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
-               return i9xx_plane == PLANE_A; /* tied to pipe A */
-       else if (IS_IVYBRIDGE(dev_priv))
-               return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
-                       i9xx_plane == PLANE_C;
-       else if (INTEL_GEN(dev_priv) >= 4)
-               return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
-       else
-               return i9xx_plane == PLANE_A;
-}
-
-static struct intel_plane *
-intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
-       struct intel_plane *plane;
-       const struct drm_plane_funcs *plane_funcs;
-       unsigned int supported_rotations;
-       unsigned int possible_crtcs;
-       const u64 *modifiers;
-       const u32 *formats;
-       int num_formats;
-       int ret;
-
-       if (INTEL_GEN(dev_priv) >= 9)
-               return skl_universal_plane_create(dev_priv, pipe,
-                                                 PLANE_PRIMARY);
-
-       plane = intel_plane_alloc();
-       if (IS_ERR(plane))
-               return plane;
-
-       plane->pipe = pipe;
-       /*
-        * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
-        * port is hooked to pipe B. Hence we want plane A feeding pipe B.
-        */
-       if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
-               plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
-       else
-               plane->i9xx_plane = (enum i9xx_plane_id) pipe;
-       plane->id = PLANE_PRIMARY;
-       plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
-
-       plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
-       if (plane->has_fbc) {
-               struct intel_fbc *fbc = &dev_priv->fbc;
-
-               fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
-       }
-
-       if (INTEL_GEN(dev_priv) >= 4) {
-               formats = i965_primary_formats;
-               num_formats = ARRAY_SIZE(i965_primary_formats);
-               modifiers = i9xx_format_modifiers;
-
-               plane->max_stride = i9xx_plane_max_stride;
-               plane->update_plane = i9xx_update_plane;
-               plane->disable_plane = i9xx_disable_plane;
-               plane->get_hw_state = i9xx_plane_get_hw_state;
-               plane->check_plane = i9xx_plane_check;
-
-               plane_funcs = &i965_plane_funcs;
-       } else {
-               formats = i8xx_primary_formats;
-               num_formats = ARRAY_SIZE(i8xx_primary_formats);
-               modifiers = i9xx_format_modifiers;
-
-               plane->max_stride = i9xx_plane_max_stride;
-               plane->update_plane = i9xx_update_plane;
-               plane->disable_plane = i9xx_disable_plane;
-               plane->get_hw_state = i9xx_plane_get_hw_state;
-               plane->check_plane = i9xx_plane_check;
-
-               plane_funcs = &i8xx_plane_funcs;
-       }
-
-       possible_crtcs = BIT(pipe);
-
-       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
-               ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
-                                              possible_crtcs, plane_funcs,
-                                              formats, num_formats, modifiers,
-                                              DRM_PLANE_TYPE_PRIMARY,
-                                              "primary %c", pipe_name(pipe));
-       else
-               ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
-                                              possible_crtcs, plane_funcs,
-                                              formats, num_formats, modifiers,
-                                              DRM_PLANE_TYPE_PRIMARY,
-                                              "plane %c",
-                                              plane_name(plane->i9xx_plane));
-       if (ret)
-               goto fail;
-
-       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
-               supported_rotations =
-                       DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
-                       DRM_MODE_REFLECT_X;
-       } else if (INTEL_GEN(dev_priv) >= 4) {
-               supported_rotations =
-                       DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
-       } else {
-               supported_rotations = DRM_MODE_ROTATE_0;
-       }
-
-       if (INTEL_GEN(dev_priv) >= 4)
-               drm_plane_create_rotation_property(&plane->base,
-                                                  DRM_MODE_ROTATE_0,
-                                                  supported_rotations);
-
-       drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
-
-       return plane;
-
-fail:
-       intel_plane_free(plane);
-
-       return ERR_PTR(ret);
-}
-
-static struct intel_plane *
-intel_cursor_plane_create(struct drm_i915_private *dev_priv,
-                         enum pipe pipe)
-{
-       unsigned int possible_crtcs;
-       struct intel_plane *cursor;
-       int ret;
-
-       cursor = intel_plane_alloc();
-       if (IS_ERR(cursor))
-               return cursor;
-
-       cursor->pipe = pipe;
-       cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
-       cursor->id = PLANE_CURSOR;
-       cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
-
-       if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
-               cursor->max_stride = i845_cursor_max_stride;
-               cursor->update_plane = i845_update_cursor;
-               cursor->disable_plane = i845_disable_cursor;
-               cursor->get_hw_state = i845_cursor_get_hw_state;
-               cursor->check_plane = i845_check_cursor;
-       } else {
-               cursor->max_stride = i9xx_cursor_max_stride;
-               cursor->update_plane = i9xx_update_cursor;
-               cursor->disable_plane = i9xx_disable_cursor;
-               cursor->get_hw_state = i9xx_cursor_get_hw_state;
-               cursor->check_plane = i9xx_check_cursor;
-       }
-
-       cursor->cursor.base = ~0;
-       cursor->cursor.cntl = ~0;
-
-       if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
-               cursor->cursor.size = ~0;
-
-       possible_crtcs = BIT(pipe);
-
-       ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
-                                      possible_crtcs, &intel_cursor_plane_funcs,
-                                      intel_cursor_formats,
-                                      ARRAY_SIZE(intel_cursor_formats),
-                                      cursor_format_modifiers,
-                                      DRM_PLANE_TYPE_CURSOR,
-                                      "cursor %c", pipe_name(pipe));
-       if (ret)
-               goto fail;
-
-       if (INTEL_GEN(dev_priv) >= 4)
-               drm_plane_create_rotation_property(&cursor->base,
-                                                  DRM_MODE_ROTATE_0,
-                                                  DRM_MODE_ROTATE_0 |
-                                                  DRM_MODE_ROTATE_180);
-
-       drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
-
-       return cursor;
-
-fail:
-       intel_plane_free(cursor);
-
-       return ERR_PTR(ret);
-}
-
-static void intel_crtc_init_scalers(struct intel_crtc *crtc,
-                                   struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc_scaler_state *scaler_state =
-               &crtc_state->scaler_state;
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       int i;
-
-       crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
-       if (!crtc->num_scalers)
-               return;
-
-       for (i = 0; i < crtc->num_scalers; i++) {
-               struct intel_scaler *scaler = &scaler_state->scalers[i];
-
-               scaler->in_use = 0;
-               scaler->mode = 0;
-       }
-
-       scaler_state->scaler_id = -1;
-}
-
-static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
-       struct intel_crtc *intel_crtc;
-       struct intel_crtc_state *crtc_state = NULL;
-       struct intel_plane *primary = NULL;
-       struct intel_plane *cursor = NULL;
-       int sprite, ret;
-
-       intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
-       if (!intel_crtc)
-               return -ENOMEM;
-
-       crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
-       if (!crtc_state) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-       __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
-       intel_crtc->config = crtc_state;
-
-       primary = intel_primary_plane_create(dev_priv, pipe);
-       if (IS_ERR(primary)) {
-               ret = PTR_ERR(primary);
-               goto fail;
-       }
-       intel_crtc->plane_ids_mask |= BIT(primary->id);
-
-       for_each_sprite(dev_priv, pipe, sprite) {
-               struct intel_plane *plane;
-
-               plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
-               if (IS_ERR(plane)) {
-                       ret = PTR_ERR(plane);
-                       goto fail;
-               }
-               intel_crtc->plane_ids_mask |= BIT(plane->id);
-       }
-
-       cursor = intel_cursor_plane_create(dev_priv, pipe);
-       if (IS_ERR(cursor)) {
-               ret = PTR_ERR(cursor);
-               goto fail;
-       }
-       intel_crtc->plane_ids_mask |= BIT(cursor->id);
-
-       ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
-                                       &primary->base, &cursor->base,
-                                       &intel_crtc_funcs,
-                                       "pipe %c", pipe_name(pipe));
-       if (ret)
-               goto fail;
-
-       intel_crtc->pipe = pipe;
-
-       /* initialize shared scalers */
-       intel_crtc_init_scalers(intel_crtc, crtc_state);
-
-       BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
-              dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
-       dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
-
-       if (INTEL_GEN(dev_priv) < 9) {
-               enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
-
-               BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
-                      dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
-               dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
-       }
-
-       drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-
-       intel_color_init(intel_crtc);
-
-       WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
-
-       return 0;
-
-fail:
-       /*
-        * drm_mode_config_cleanup() will free up any
-        * crtcs/planes already initialized.
-        */
-       kfree(crtc_state);
-       kfree(intel_crtc);
-
-       return ret;
-}
-
-int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
-                                     struct drm_file *file)
-{
-       struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
-       struct drm_crtc *drmmode_crtc;
-       struct intel_crtc *crtc;
-
-       drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
-       if (!drmmode_crtc)
-               return -ENOENT;
-
-       crtc = to_intel_crtc(drmmode_crtc);
-       pipe_from_crtc_id->pipe = crtc->pipe;
-
-       return 0;
-}
-
-static int intel_encoder_clones(struct intel_encoder *encoder)
-{
-       struct drm_device *dev = encoder->base.dev;
-       struct intel_encoder *source_encoder;
-       int index_mask = 0;
-       int entry = 0;
-
-       for_each_intel_encoder(dev, source_encoder) {
-               if (encoders_cloneable(encoder, source_encoder))
-                       index_mask |= (1 << entry);
-
-               entry++;
-       }
-
-       return index_mask;
-}
-
-static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
-{
-       if (!IS_MOBILE(dev_priv))
-               return false;
-
-       if ((I915_READ(DP_A) & DP_DETECTED) == 0)
-               return false;
-
-       if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
-               return false;
-
-       return true;
-}
-
-static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
-{
-       if (INTEL_GEN(dev_priv) >= 9)
-               return false;
-
-       if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
-               return false;
-
-       if (HAS_PCH_LPT_H(dev_priv) &&
-           I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
-               return false;
-
-       /* DDI E can't be used if DDI A requires 4 lanes */
-       if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
-               return false;
-
-       if (!dev_priv->vbt.int_crt_support)
-               return false;
-
-       return true;
-}
-
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
-{
-       int pps_num;
-       int pps_idx;
-
-       if (HAS_DDI(dev_priv))
-               return;
-       /*
-        * This w/a is needed at least on CPT/PPT, but to be sure apply it
-        * everywhere where registers can be write protected.
-        */
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               pps_num = 2;
-       else
-               pps_num = 1;
-
-       for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
-               u32 val = I915_READ(PP_CONTROL(pps_idx));
-
-               val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
-               I915_WRITE(PP_CONTROL(pps_idx), val);
-       }
-}
-
-static void intel_pps_init(struct drm_i915_private *dev_priv)
-{
-       if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
-               dev_priv->pps_mmio_base = PCH_PPS_BASE;
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               dev_priv->pps_mmio_base = VLV_PPS_BASE;
-       else
-               dev_priv->pps_mmio_base = PPS_BASE;
-
-       intel_pps_unlock_regs_wa(dev_priv);
-}
-
-static void intel_setup_outputs(struct drm_i915_private *dev_priv)
-{
-       struct intel_encoder *encoder;
-       bool dpd_is_edp = false;
-
-       intel_pps_init(dev_priv);
-
-       if (!HAS_DISPLAY(dev_priv))
-               return;
-
-       if (IS_ELKHARTLAKE(dev_priv)) {
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_B);
-               intel_ddi_init(dev_priv, PORT_C);
-               icl_dsi_init(dev_priv);
-       } else if (INTEL_GEN(dev_priv) >= 11) {
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_B);
-               intel_ddi_init(dev_priv, PORT_C);
-               intel_ddi_init(dev_priv, PORT_D);
-               intel_ddi_init(dev_priv, PORT_E);
-               /*
-                * On some ICL SKUs port F is not present. No strap bits for
-                * this, so rely on VBT.
-                * Work around broken VBTs on SKUs known to have no port F.
-                */
-               if (IS_ICL_WITH_PORT_F(dev_priv) &&
-                   intel_bios_is_port_present(dev_priv, PORT_F))
-                       intel_ddi_init(dev_priv, PORT_F);
-
-               icl_dsi_init(dev_priv);
-       } else if (IS_GEN9_LP(dev_priv)) {
-               /*
-                * FIXME: Broxton doesn't support port detection via the
-                * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
-                * detect the ports.
-                */
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_B);
-               intel_ddi_init(dev_priv, PORT_C);
-
-               vlv_dsi_init(dev_priv);
-       } else if (HAS_DDI(dev_priv)) {
-               int found;
-
-               if (intel_ddi_crt_present(dev_priv))
-                       intel_crt_init(dev_priv);
-
-               /*
-                * Haswell uses DDI functions to detect digital outputs.
-                * On SKL pre-D0 the strap isn't connected, so we assume
-                * it's there.
-                */
-               found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
-               /* WaIgnoreDDIAStrap: skl */
-               if (found || IS_GEN9_BC(dev_priv))
-                       intel_ddi_init(dev_priv, PORT_A);
-
-               /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
-                * register */
-               found = I915_READ(SFUSE_STRAP);
-
-               if (found & SFUSE_STRAP_DDIB_DETECTED)
-                       intel_ddi_init(dev_priv, PORT_B);
-               if (found & SFUSE_STRAP_DDIC_DETECTED)
-                       intel_ddi_init(dev_priv, PORT_C);
-               if (found & SFUSE_STRAP_DDID_DETECTED)
-                       intel_ddi_init(dev_priv, PORT_D);
-               if (found & SFUSE_STRAP_DDIF_DETECTED)
-                       intel_ddi_init(dev_priv, PORT_F);
-               /*
-                * On SKL we don't have a way to detect DDI-E so we rely on VBT.
-                */
-               if (IS_GEN9_BC(dev_priv) &&
-                   intel_bios_is_port_present(dev_priv, PORT_E))
-                       intel_ddi_init(dev_priv, PORT_E);
-
-       } else if (HAS_PCH_SPLIT(dev_priv)) {
-               int found;
-
-               /*
-                * intel_edp_init_connector() depends on this completing first,
-                * to prevent the registration of both eDP and LVDS and the
-                * incorrect sharing of the PPS.
-                */
-               intel_lvds_init(dev_priv);
-               intel_crt_init(dev_priv);
-
-               dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
-
-               if (ilk_has_edp_a(dev_priv))
-                       intel_dp_init(dev_priv, DP_A, PORT_A);
-
-               if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
-                       /* PCH SDVOB multiplex with HDMIB */
-                       found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
-                       if (!found)
-                               intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
-                       if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
-                               intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
-               }
-
-               if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
-                       intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
-
-               if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
-                       intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
-
-               if (I915_READ(PCH_DP_C) & DP_DETECTED)
-                       intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
-
-               if (I915_READ(PCH_DP_D) & DP_DETECTED)
-                       intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
-       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               bool has_edp, has_port;
-
-               if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
-                       intel_crt_init(dev_priv);
-
-               /*
-                * The DP_DETECTED bit is the latched state of the DDC
-                * SDA pin at boot. However since eDP doesn't require DDC
-                * (no way to plug in a DP->HDMI dongle) the DDC pins for
-                * eDP ports may have been muxed to an alternate function.
-                * Thus we can't rely on the DP_DETECTED bit alone to detect
-                * eDP ports. Consult the VBT as well as DP_DETECTED to
-                * detect eDP ports.
-                *
-                * Sadly the straps seem to be missing sometimes even for HDMI
-                * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
-                * and VBT for the presence of the port. Additionally we can't
-                * trust the port type the VBT declares as we've seen at least
-                * HDMI ports that the VBT claim are DP or eDP.
-                */
-               has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
-               has_port = intel_bios_is_port_present(dev_priv, PORT_B);
-               if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
-                       has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
-               if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
-                       intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
-
-               has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
-               has_port = intel_bios_is_port_present(dev_priv, PORT_C);
-               if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
-                       has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
-               if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
-                       intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
-
-               if (IS_CHERRYVIEW(dev_priv)) {
-                       /*
-                        * eDP not supported on port D,
-                        * so no need to worry about it
-                        */
-                       has_port = intel_bios_is_port_present(dev_priv, PORT_D);
-                       if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
-                               intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
-                       if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
-                               intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
-               }
-
-               vlv_dsi_init(dev_priv);
-       } else if (IS_PINEVIEW(dev_priv)) {
-               intel_lvds_init(dev_priv);
-               intel_crt_init(dev_priv);
-       } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
-               bool found = false;
-
-               if (IS_MOBILE(dev_priv))
-                       intel_lvds_init(dev_priv);
-
-               intel_crt_init(dev_priv);
-
-               if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
-                       DRM_DEBUG_KMS("probing SDVOB\n");
-                       found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
-                       if (!found && IS_G4X(dev_priv)) {
-                               DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
-                               intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
-                       }
-
-                       if (!found && IS_G4X(dev_priv))
-                               intel_dp_init(dev_priv, DP_B, PORT_B);
-               }
-
-               /* Before G4X SDVOC doesn't have its own detect register */
-
-               if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
-                       DRM_DEBUG_KMS("probing SDVOC\n");
-                       found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
-               }
-
-               if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
-
-                       if (IS_G4X(dev_priv)) {
-                               DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
-                               intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
-                       }
-                       if (IS_G4X(dev_priv))
-                               intel_dp_init(dev_priv, DP_C, PORT_C);
-               }
-
-               if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
-                       intel_dp_init(dev_priv, DP_D, PORT_D);
-
-               if (SUPPORTS_TV(dev_priv))
-                       intel_tv_init(dev_priv);
-       } else if (IS_GEN(dev_priv, 2)) {
-               if (IS_I85X(dev_priv))
-                       intel_lvds_init(dev_priv);
-
-               intel_crt_init(dev_priv);
-               intel_dvo_init(dev_priv);
-       }
-
-       intel_psr_init(dev_priv);
-
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               encoder->base.possible_crtcs = encoder->crtc_mask;
-               encoder->base.possible_clones =
-                       intel_encoder_clones(encoder);
-       }
-
-       intel_init_pch_refclk(dev_priv);
-
-       drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
-}
-
-static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-
-       drm_framebuffer_cleanup(fb);
-
-       i915_gem_object_lock(obj);
-       WARN_ON(!obj->framebuffer_references--);
-       i915_gem_object_unlock(obj);
-
-       i915_gem_object_put(obj);
-
-       kfree(intel_fb);
-}
-
-static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
-                                               struct drm_file *file,
-                                               unsigned int *handle)
-{
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-
-       if (obj->userptr.mm) {
-               DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
-               return -EINVAL;
-       }
-
-       return drm_gem_handle_create(file, &obj->base, handle);
-}
-
-static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
-                                       struct drm_file *file,
-                                       unsigned flags, unsigned color,
-                                       struct drm_clip_rect *clips,
-                                       unsigned num_clips)
-{
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-
-       i915_gem_object_flush_if_display(obj);
-       intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
-
-       return 0;
-}
-
-static const struct drm_framebuffer_funcs intel_fb_funcs = {
-       .destroy = intel_user_framebuffer_destroy,
-       .create_handle = intel_user_framebuffer_create_handle,
-       .dirty = intel_user_framebuffer_dirty,
-};
-
-static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
-                                 struct drm_i915_gem_object *obj,
-                                 struct drm_mode_fb_cmd2 *mode_cmd)
-{
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       struct drm_framebuffer *fb = &intel_fb->base;
-       u32 max_stride;
-       unsigned int tiling, stride;
-       int ret = -EINVAL;
-       int i;
-
-       i915_gem_object_lock(obj);
-       obj->framebuffer_references++;
-       tiling = i915_gem_object_get_tiling(obj);
-       stride = i915_gem_object_get_stride(obj);
-       i915_gem_object_unlock(obj);
-
-       if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
-               /*
-                * If there's a fence, enforce that
-                * the fb modifier and tiling mode match.
-                */
-               if (tiling != I915_TILING_NONE &&
-                   tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
-                       DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
-                       goto err;
-               }
-       } else {
-               if (tiling == I915_TILING_X) {
-                       mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
-               } else if (tiling == I915_TILING_Y) {
-                       DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
-                       goto err;
-               }
-       }
-
-       if (!drm_any_plane_has_format(&dev_priv->drm,
-                                     mode_cmd->pixel_format,
-                                     mode_cmd->modifier[0])) {
-               struct drm_format_name_buf format_name;
-
-               DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
-                             drm_get_format_name(mode_cmd->pixel_format,
-                                                 &format_name),
-                             mode_cmd->modifier[0]);
-               goto err;
-       }
-
-       /*
-        * gen2/3 display engine uses the fence if present,
-        * so the tiling mode must match the fb modifier exactly.
-        */
-       if (INTEL_GEN(dev_priv) < 4 &&
-           tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
-               DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
-               goto err;
-       }
-
-       max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
-                                        mode_cmd->modifier[0]);
-       if (mode_cmd->pitches[0] > max_stride) {
-               DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
-                             mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
-                             "tiled" : "linear",
-                             mode_cmd->pitches[0], max_stride);
-               goto err;
-       }
-
-       /*
-        * If there's a fence, enforce that
-        * the fb pitch and fence stride match.
-        */
-       if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
-               DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
-                             mode_cmd->pitches[0], stride);
-               goto err;
-       }
-
-       /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
-       if (mode_cmd->offsets[0] != 0)
-               goto err;
-
-       drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
-
-       for (i = 0; i < fb->format->num_planes; i++) {
-               u32 stride_alignment;
-
-               if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
-                       DRM_DEBUG_KMS("bad plane %d handle\n", i);
-                       goto err;
-               }
-
-               stride_alignment = intel_fb_stride_alignment(fb, i);
-
-               /*
-                * Display WA #0531: skl,bxt,kbl,glk
-                *
-                * Render decompression and plane width > 3840
-                * combined with horizontal panning requires the
-                * plane stride to be a multiple of 4. We'll just
-                * require the entire fb to accommodate that to avoid
-                * potential runtime errors at plane configuration time.
-                */
-               if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
-                   is_ccs_modifier(fb->modifier))
-                       stride_alignment *= 4;
-
-               if (fb->pitches[i] & (stride_alignment - 1)) {
-                       DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
-                                     i, fb->pitches[i], stride_alignment);
-                       goto err;
-               }
-
-               fb->obj[i] = &obj->base;
-       }
-
-       ret = intel_fill_fb_info(dev_priv, fb);
-       if (ret)
-               goto err;
-
-       ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
-       if (ret) {
-               DRM_ERROR("framebuffer init failed %d\n", ret);
-               goto err;
-       }
-
-       return 0;
-
-err:
-       i915_gem_object_lock(obj);
-       obj->framebuffer_references--;
-       i915_gem_object_unlock(obj);
-       return ret;
-}
-
-static struct drm_framebuffer *
-intel_user_framebuffer_create(struct drm_device *dev,
-                             struct drm_file *filp,
-                             const struct drm_mode_fb_cmd2 *user_mode_cmd)
-{
-       struct drm_framebuffer *fb;
-       struct drm_i915_gem_object *obj;
-       struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
-
-       obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
-       if (!obj)
-               return ERR_PTR(-ENOENT);
-
-       fb = intel_framebuffer_create(obj, &mode_cmd);
-       if (IS_ERR(fb))
-               i915_gem_object_put(obj);
-
-       return fb;
-}
-
-static void intel_atomic_state_free(struct drm_atomic_state *state)
-{
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-
-       drm_atomic_state_default_release(state);
-
-       i915_sw_fence_fini(&intel_state->commit_ready);
-
-       kfree(state);
-}
-
-static enum drm_mode_status
-intel_mode_valid(struct drm_device *dev,
-                const struct drm_display_mode *mode)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int hdisplay_max, htotal_max;
-       int vdisplay_max, vtotal_max;
-
-       /*
-        * Can't reject DBLSCAN here because Xorg ddxen can add piles
-        * of DBLSCAN modes to the output's mode list when they detect
-        * the scaling mode property on the connector. And they don't
-        * ask the kernel to validate those modes in any way until
-        * modeset time at which point the client gets a protocol error.
-        * So in order to not upset those clients we silently ignore the
-        * DBLSCAN flag on such connectors. For other connectors we will
-        * reject modes with the DBLSCAN flag in encoder->compute_config().
-        * And we always reject DBLSCAN modes in connector->mode_valid()
-        * as we never want such modes on the connector's mode list.
-        */
-
-       if (mode->vscan > 1)
-               return MODE_NO_VSCAN;
-
-       if (mode->flags & DRM_MODE_FLAG_HSKEW)
-               return MODE_H_ILLEGAL;
-
-       if (mode->flags & (DRM_MODE_FLAG_CSYNC |
-                          DRM_MODE_FLAG_NCSYNC |
-                          DRM_MODE_FLAG_PCSYNC))
-               return MODE_HSYNC;
-
-       if (mode->flags & (DRM_MODE_FLAG_BCAST |
-                          DRM_MODE_FLAG_PIXMUX |
-                          DRM_MODE_FLAG_CLKDIV2))
-               return MODE_BAD;
-
-       if (INTEL_GEN(dev_priv) >= 9 ||
-           IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
-               hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
-               vdisplay_max = 4096;
-               htotal_max = 8192;
-               vtotal_max = 8192;
-       } else if (INTEL_GEN(dev_priv) >= 3) {
-               hdisplay_max = 4096;
-               vdisplay_max = 4096;
-               htotal_max = 8192;
-               vtotal_max = 8192;
-       } else {
-               hdisplay_max = 2048;
-               vdisplay_max = 2048;
-               htotal_max = 4096;
-               vtotal_max = 4096;
-       }
-
-       if (mode->hdisplay > hdisplay_max ||
-           mode->hsync_start > htotal_max ||
-           mode->hsync_end > htotal_max ||
-           mode->htotal > htotal_max)
-               return MODE_H_ILLEGAL;
-
-       if (mode->vdisplay > vdisplay_max ||
-           mode->vsync_start > vtotal_max ||
-           mode->vsync_end > vtotal_max ||
-           mode->vtotal > vtotal_max)
-               return MODE_V_ILLEGAL;
-
-       return MODE_OK;
-}
-
-static const struct drm_mode_config_funcs intel_mode_funcs = {
-       .fb_create = intel_user_framebuffer_create,
-       .get_format_info = intel_get_format_info,
-       .output_poll_changed = intel_fbdev_output_poll_changed,
-       .mode_valid = intel_mode_valid,
-       .atomic_check = intel_atomic_check,
-       .atomic_commit = intel_atomic_commit,
-       .atomic_state_alloc = intel_atomic_state_alloc,
-       .atomic_state_clear = intel_atomic_state_clear,
-       .atomic_state_free = intel_atomic_state_free,
-};
-
-/**
- * intel_init_display_hooks - initialize the display modesetting hooks
- * @dev_priv: device private
- */
-void intel_init_display_hooks(struct drm_i915_private *dev_priv)
-{
-       intel_init_cdclk_hooks(dev_priv);
-
-       if (INTEL_GEN(dev_priv) >= 9) {
-               dev_priv->display.get_pipe_config = haswell_get_pipe_config;
-               dev_priv->display.get_initial_plane_config =
-                       skylake_get_initial_plane_config;
-               dev_priv->display.crtc_compute_clock =
-                       haswell_crtc_compute_clock;
-               dev_priv->display.crtc_enable = haswell_crtc_enable;
-               dev_priv->display.crtc_disable = haswell_crtc_disable;
-       } else if (HAS_DDI(dev_priv)) {
-               dev_priv->display.get_pipe_config = haswell_get_pipe_config;
-               dev_priv->display.get_initial_plane_config =
-                       i9xx_get_initial_plane_config;
-               dev_priv->display.crtc_compute_clock =
-                       haswell_crtc_compute_clock;
-               dev_priv->display.crtc_enable = haswell_crtc_enable;
-               dev_priv->display.crtc_disable = haswell_crtc_disable;
-       } else if (HAS_PCH_SPLIT(dev_priv)) {
-               dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
-               dev_priv->display.get_initial_plane_config =
-                       i9xx_get_initial_plane_config;
-               dev_priv->display.crtc_compute_clock =
-                       ironlake_crtc_compute_clock;
-               dev_priv->display.crtc_enable = ironlake_crtc_enable;
-               dev_priv->display.crtc_disable = ironlake_crtc_disable;
-       } else if (IS_CHERRYVIEW(dev_priv)) {
-               dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-               dev_priv->display.get_initial_plane_config =
-                       i9xx_get_initial_plane_config;
-               dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
-               dev_priv->display.crtc_enable = valleyview_crtc_enable;
-               dev_priv->display.crtc_disable = i9xx_crtc_disable;
-       } else if (IS_VALLEYVIEW(dev_priv)) {
-               dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-               dev_priv->display.get_initial_plane_config =
-                       i9xx_get_initial_plane_config;
-               dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
-               dev_priv->display.crtc_enable = valleyview_crtc_enable;
-               dev_priv->display.crtc_disable = i9xx_crtc_disable;
-       } else if (IS_G4X(dev_priv)) {
-               dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-               dev_priv->display.get_initial_plane_config =
-                       i9xx_get_initial_plane_config;
-               dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
-               dev_priv->display.crtc_enable = i9xx_crtc_enable;
-               dev_priv->display.crtc_disable = i9xx_crtc_disable;
-       } else if (IS_PINEVIEW(dev_priv)) {
-               dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-               dev_priv->display.get_initial_plane_config =
-                       i9xx_get_initial_plane_config;
-               dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
-               dev_priv->display.crtc_enable = i9xx_crtc_enable;
-               dev_priv->display.crtc_disable = i9xx_crtc_disable;
-       } else if (!IS_GEN(dev_priv, 2)) {
-               dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-               dev_priv->display.get_initial_plane_config =
-                       i9xx_get_initial_plane_config;
-               dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
-               dev_priv->display.crtc_enable = i9xx_crtc_enable;
-               dev_priv->display.crtc_disable = i9xx_crtc_disable;
-       } else {
-               dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-               dev_priv->display.get_initial_plane_config =
-                       i9xx_get_initial_plane_config;
-               dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
-               dev_priv->display.crtc_enable = i9xx_crtc_enable;
-               dev_priv->display.crtc_disable = i9xx_crtc_disable;
-       }
-
-       if (IS_GEN(dev_priv, 5)) {
-               dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
-       } else if (IS_GEN(dev_priv, 6)) {
-               dev_priv->display.fdi_link_train = gen6_fdi_link_train;
-       } else if (IS_IVYBRIDGE(dev_priv)) {
-               /* FIXME: detect B0+ stepping and use auto training */
-               dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
-       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-               dev_priv->display.fdi_link_train = hsw_fdi_link_train;
-       }
-
-       if (INTEL_GEN(dev_priv) >= 9)
-               dev_priv->display.update_crtcs = skl_update_crtcs;
-       else
-               dev_priv->display.update_crtcs = intel_update_crtcs;
-}
-
-static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
-{
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               return VLV_VGACNTRL;
-       else if (INTEL_GEN(dev_priv) >= 5)
-               return CPU_VGACNTRL;
-       else
-               return VGACNTRL;
-}
-
-/* Disable the VGA plane that we never use */
-static void i915_disable_vga(struct drm_i915_private *dev_priv)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       u8 sr1;
-       i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
-
-       /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
-       vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
-       outb(SR01, VGA_SR_INDEX);
-       sr1 = inb(VGA_SR_DATA);
-       outb(sr1 | 1<<5, VGA_SR_DATA);
-       vga_put(pdev, VGA_RSRC_LEGACY_IO);
-       udelay(300);
-
-       I915_WRITE(vga_reg, VGA_DISP_DISABLE);
-       POSTING_READ(vga_reg);
-}
-
-void intel_modeset_init_hw(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       intel_update_cdclk(dev_priv);
-       intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-       dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
-}
-
-/*
- * Calculate what we think the watermarks should be for the state we've read
- * out of the hardware and then immediately program those watermarks so that
- * we ensure the hardware settings match our internal state.
- *
- * We can calculate what we think WM's should be by creating a duplicate of the
- * current state (which was constructed during hardware readout) and running it
- * through the atomic check code to calculate new watermark values in the
- * state object.
- */
-static void sanitize_watermarks(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_atomic_state *state;
-       struct intel_atomic_state *intel_state;
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *cstate;
-       struct drm_modeset_acquire_ctx ctx;
-       int ret;
-       int i;
-
-       /* Only supported on platforms that use atomic watermark design */
-       if (!dev_priv->display.optimize_watermarks)
-               return;
-
-       /*
-        * We need to hold connection_mutex before calling duplicate_state so
-        * that the connector loop is protected.
-        */
-       drm_modeset_acquire_init(&ctx, 0);
-retry:
-       ret = drm_modeset_lock_all_ctx(dev, &ctx);
-       if (ret == -EDEADLK) {
-               drm_modeset_backoff(&ctx);
-               goto retry;
-       } else if (WARN_ON(ret)) {
-               goto fail;
-       }
-
-       state = drm_atomic_helper_duplicate_state(dev, &ctx);
-       if (WARN_ON(IS_ERR(state)))
-               goto fail;
-
-       intel_state = to_intel_atomic_state(state);
-
-       /*
-        * Hardware readout is the only time we don't want to calculate
-        * intermediate watermarks (since we don't trust the current
-        * watermarks).
-        */
-       if (!HAS_GMCH(dev_priv))
-               intel_state->skip_intermediate_wm = true;
-
-       ret = intel_atomic_check(dev, state);
-       if (ret) {
-               /*
-                * If we fail here, it means that the hardware appears to be
-                * programmed in a way that shouldn't be possible, given our
-                * understanding of watermark requirements.  This might mean a
-                * mistake in the hardware readout code or a mistake in the
-                * watermark calculations for a given platform.  Raise a WARN
-                * so that this is noticeable.
-                *
-                * If this actually happens, we'll have to just leave the
-                * BIOS-programmed watermarks untouched and hope for the best.
-                */
-               WARN(true, "Could not determine valid watermarks for inherited state\n");
-               goto put_state;
-       }
-
-       /* Write calculated watermark values back */
-       for_each_new_crtc_in_state(state, crtc, cstate, i) {
-               struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
-
-               cs->wm.need_postvbl_update = true;
-               dev_priv->display.optimize_watermarks(intel_state, cs);
-
-               to_intel_crtc_state(crtc->state)->wm = cs->wm;
-       }
-
-put_state:
-       drm_atomic_state_put(state);
-fail:
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-}
-
-static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
-{
-       if (IS_GEN(dev_priv, 5)) {
-               u32 fdi_pll_clk =
-                       I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
-
-               dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
-       } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
-               dev_priv->fdi_pll_freq = 270000;
-       } else {
-               return;
-       }
-
-       DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
-}
-
-static int intel_initial_commit(struct drm_device *dev)
-{
-       struct drm_atomic_state *state = NULL;
-       struct drm_modeset_acquire_ctx ctx;
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *crtc_state;
-       int ret = 0;
-
-       state = drm_atomic_state_alloc(dev);
-       if (!state)
-               return -ENOMEM;
-
-       drm_modeset_acquire_init(&ctx, 0);
-
-retry:
-       state->acquire_ctx = &ctx;
-
-       drm_for_each_crtc(crtc, dev) {
-               crtc_state = drm_atomic_get_crtc_state(state, crtc);
-               if (IS_ERR(crtc_state)) {
-                       ret = PTR_ERR(crtc_state);
-                       goto out;
-               }
-
-               if (crtc_state->active) {
-                       ret = drm_atomic_add_affected_planes(state, crtc);
-                       if (ret)
-                               goto out;
-
-                       /*
-                        * FIXME hack to force a LUT update to avoid the
-                        * plane update forcing the pipe gamma on without
-                        * having a proper LUT loaded. Remove once we
-                        * have readout for pipe gamma enable.
-                        */
-                       crtc_state->color_mgmt_changed = true;
-               }
-       }
-
-       ret = drm_atomic_commit(state);
-
-out:
-       if (ret == -EDEADLK) {
-               drm_atomic_state_clear(state);
-               drm_modeset_backoff(&ctx);
-               goto retry;
-       }
-
-       drm_atomic_state_put(state);
-
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-
-       return ret;
-}
-
-int intel_modeset_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       enum pipe pipe;
-       struct intel_crtc *crtc;
-       int ret;
-
-       dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
-
-       drm_mode_config_init(dev);
-
-       ret = intel_bw_init(dev_priv);
-       if (ret)
-               return ret;
-
-       dev->mode_config.min_width = 0;
-       dev->mode_config.min_height = 0;
-
-       dev->mode_config.preferred_depth = 24;
-       dev->mode_config.prefer_shadow = 1;
-
-       dev->mode_config.allow_fb_modifiers = true;
-
-       dev->mode_config.funcs = &intel_mode_funcs;
-
-       init_llist_head(&dev_priv->atomic_helper.free_list);
-       INIT_WORK(&dev_priv->atomic_helper.free_work,
-                 intel_atomic_helper_free_state_worker);
-
-       intel_init_quirks(dev_priv);
-
-       intel_fbc_init(dev_priv);
-
-       intel_init_pm(dev_priv);
-
-       /*
-        * There may be no VBT; and if the BIOS enabled SSC we can
-        * just keep using it to avoid unnecessary flicker.  Whereas if the
-        * BIOS isn't using it, don't assume it will work even if the VBT
-        * indicates as much.
-        */
-       if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
-               bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
-                                           DREF_SSC1_ENABLE);
-
-               if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
-                       DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
-                                    bios_lvds_use_ssc ? "en" : "dis",
-                                    dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
-                       dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
-               }
-       }
-
-       /*
-        * Maximum framebuffer dimensions, chosen to match
-        * the maximum render engine surface size on gen4+.
-        */
-       if (INTEL_GEN(dev_priv) >= 7) {
-               dev->mode_config.max_width = 16384;
-               dev->mode_config.max_height = 16384;
-       } else if (INTEL_GEN(dev_priv) >= 4) {
-               dev->mode_config.max_width = 8192;
-               dev->mode_config.max_height = 8192;
-       } else if (IS_GEN(dev_priv, 3)) {
-               dev->mode_config.max_width = 4096;
-               dev->mode_config.max_height = 4096;
-       } else {
-               dev->mode_config.max_width = 2048;
-               dev->mode_config.max_height = 2048;
-       }
-
-       if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
-               dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
-               dev->mode_config.cursor_height = 1023;
-       } else if (IS_GEN(dev_priv, 2)) {
-               dev->mode_config.cursor_width = 64;
-               dev->mode_config.cursor_height = 64;
-       } else {
-               dev->mode_config.cursor_width = 256;
-               dev->mode_config.cursor_height = 256;
-       }
-
-       dev->mode_config.fb_base = ggtt->gmadr.start;
-
-       DRM_DEBUG_KMS("%d display pipe%s available.\n",
-                     INTEL_INFO(dev_priv)->num_pipes,
-                     INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
-
-       for_each_pipe(dev_priv, pipe) {
-               ret = intel_crtc_init(dev_priv, pipe);
-               if (ret) {
-                       drm_mode_config_cleanup(dev);
-                       return ret;
-               }
-       }
-
-       intel_shared_dpll_init(dev);
-       intel_update_fdi_pll_freq(dev_priv);
-
-       intel_update_czclk(dev_priv);
-       intel_modeset_init_hw(dev);
-
-       intel_hdcp_component_init(dev_priv);
-
-       if (dev_priv->max_cdclk_freq == 0)
-               intel_update_max_cdclk(dev_priv);
-
-       /* Just disable it once at startup */
-       i915_disable_vga(dev_priv);
-       intel_setup_outputs(dev_priv);
-
-       drm_modeset_lock_all(dev);
-       intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
-       drm_modeset_unlock_all(dev);
-
-       for_each_intel_crtc(dev, crtc) {
-               struct intel_initial_plane_config plane_config = {};
-
-               if (!crtc->active)
-                       continue;
-
-               /*
-                * Note that reserving the BIOS fb up front prevents us
-                * from stuffing other stolen allocations like the ring
-                * on top.  This prevents some ugliness at boot time, and
-                * can even allow for smooth boot transitions if the BIOS
-                * fb is large enough for the active pipe configuration.
-                */
-               dev_priv->display.get_initial_plane_config(crtc,
-                                                          &plane_config);
-
-               /*
-                * If the fb is shared between multiple heads, we'll
-                * just get the first one.
-                */
-               intel_find_initial_plane_obj(crtc, &plane_config);
-       }
-
-       /*
-        * Make sure hardware watermarks really match the state we read out.
-        * Note that we need to do this after reconstructing the BIOS fb's
-        * since the watermark calculation done here will use pstate->fb.
-        */
-       if (!HAS_GMCH(dev_priv))
-               sanitize_watermarks(dev);
-
-       /*
-        * Force all active planes to recompute their states. So that on
-        * mode_setcrtc after probe, all the intel_plane_state variables
-        * are already calculated and there is no assert_plane warnings
-        * during bootup.
-        */
-       ret = intel_initial_commit(dev);
-       if (ret)
-               DRM_DEBUG_KMS("Initial commit in probe failed.\n");
-
-       return 0;
-}
-
-void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
-       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-       /* 640x480@60Hz, ~25175 kHz */
-       struct dpll clock = {
-               .m1 = 18,
-               .m2 = 7,
-               .p1 = 13,
-               .p2 = 4,
-               .n = 2,
-       };
-       u32 dpll, fp;
-       int i;
-
-       WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
-
-       DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
-                     pipe_name(pipe), clock.vco, clock.dot);
-
-       fp = i9xx_dpll_compute_fp(&clock);
-       dpll = DPLL_DVO_2X_MODE |
-               DPLL_VGA_MODE_DIS |
-               ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
-               PLL_P2_DIVIDE_BY_4 |
-               PLL_REF_INPUT_DREFCLK |
-               DPLL_VCO_ENABLE;
-
-       I915_WRITE(FP0(pipe), fp);
-       I915_WRITE(FP1(pipe), fp);
-
-       I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
-       I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
-       I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
-       I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
-       I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
-       I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
-       I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
-
-       /*
-        * Apparently we need to have VGA mode enabled prior to changing
-        * the P1/P2 dividers. Otherwise the DPLL will keep using the old
-        * dividers, even though the register value does change.
-        */
-       I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
-       I915_WRITE(DPLL(pipe), dpll);
-
-       /* Wait for the clocks to stabilize. */
-       POSTING_READ(DPLL(pipe));
-       udelay(150);
-
-       /* The pixel multiplier can only be updated once the
-        * DPLL is enabled and the clocks are stable.
-        *
-        * So write it again.
-        */
-       I915_WRITE(DPLL(pipe), dpll);
-
-       /* We do this three times for luck */
-       for (i = 0; i < 3 ; i++) {
-               I915_WRITE(DPLL(pipe), dpll);
-               POSTING_READ(DPLL(pipe));
-               udelay(150); /* wait for warmup */
-       }
-
-       I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
-       POSTING_READ(PIPECONF(pipe));
-
-       intel_wait_for_pipe_scanline_moving(crtc);
-}
-
-void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
-       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
-       DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
-                     pipe_name(pipe));
-
-       WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
-       WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
-       WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
-       WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
-       WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
-
-       I915_WRITE(PIPECONF(pipe), 0);
-       POSTING_READ(PIPECONF(pipe));
-
-       intel_wait_for_pipe_scanline_stopped(crtc);
-
-       I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
-       POSTING_READ(DPLL(pipe));
-}
-
-static void
-intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
-{
-       struct intel_crtc *crtc;
-
-       if (INTEL_GEN(dev_priv) >= 4)
-               return;
-
-       for_each_intel_crtc(&dev_priv->drm, crtc) {
-               struct intel_plane *plane =
-                       to_intel_plane(crtc->base.primary);
-               struct intel_crtc *plane_crtc;
-               enum pipe pipe;
-
-               if (!plane->get_hw_state(plane, &pipe))
-                       continue;
-
-               if (pipe == crtc->pipe)
-                       continue;
-
-               DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
-                             plane->base.base.id, plane->base.name);
-
-               plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-               intel_plane_disable_noatomic(plane_crtc, plane);
-       }
-}
-
-static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct intel_encoder *encoder;
-
-       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
-               return true;
-
-       return false;
-}
-
-static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
-{
-       struct drm_device *dev = encoder->base.dev;
-       struct intel_connector *connector;
-
-       for_each_connector_on_encoder(dev, &encoder->base, connector)
-               return connector;
-
-       return NULL;
-}
-
-static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
-                             enum pipe pch_transcoder)
-{
-       return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
-               (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
-}
-
-static void intel_sanitize_crtc(struct intel_crtc *crtc,
-                               struct drm_modeset_acquire_ctx *ctx)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
-       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-
-       /* Clear any frame start delays used for debugging left by the BIOS */
-       if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
-               i915_reg_t reg = PIPECONF(cpu_transcoder);
-
-               I915_WRITE(reg,
-                          I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
-       }
-
-       if (crtc_state->base.active) {
-               struct intel_plane *plane;
-
-               /* Disable everything but the primary plane */
-               for_each_intel_plane_on_crtc(dev, crtc, plane) {
-                       const struct intel_plane_state *plane_state =
-                               to_intel_plane_state(plane->base.state);
-
-                       if (plane_state->base.visible &&
-                           plane->base.type != DRM_PLANE_TYPE_PRIMARY)
-                               intel_plane_disable_noatomic(crtc, plane);
-               }
-
-               /*
-                * Disable any background color set by the BIOS, but enable the
-                * gamma and CSC to match how we program our planes.
-                */
-               if (INTEL_GEN(dev_priv) >= 9)
-                       I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
-                                  SKL_BOTTOM_COLOR_GAMMA_ENABLE |
-                                  SKL_BOTTOM_COLOR_CSC_ENABLE);
-       }
-
-       /* Adjust the state of the output pipe according to whether we
-        * have active connectors/encoders. */
-       if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
-               intel_crtc_disable_noatomic(&crtc->base, ctx);
-
-       if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
-               /*
-                * We start out with underrun reporting disabled to avoid races.
-                * For correct bookkeeping mark this on active crtcs.
-                *
-                * Also on gmch platforms we dont have any hardware bits to
-                * disable the underrun reporting. Which means we need to start
-                * out with underrun reporting disabled also on inactive pipes,
-                * since otherwise we'll complain about the garbage we read when
-                * e.g. coming up after runtime pm.
-                *
-                * No protection against concurrent access is required - at
-                * worst a fifo underrun happens which also sets this to false.
-                */
-               crtc->cpu_fifo_underrun_disabled = true;
-               /*
-                * We track the PCH trancoder underrun reporting state
-                * within the crtc. With crtc for pipe A housing the underrun
-                * reporting state for PCH transcoder A, crtc for pipe B housing
-                * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
-                * and marking underrun reporting as disabled for the non-existing
-                * PCH transcoders B and C would prevent enabling the south
-                * error interrupt (see cpt_can_enable_serr_int()).
-                */
-               if (has_pch_trancoder(dev_priv, crtc->pipe))
-                       crtc->pch_fifo_underrun_disabled = true;
-       }
-}
-
-static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-
-       /*
-        * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
-        * the hardware when a high res displays plugged in. DPLL P
-        * divider is zero, and the pipe timings are bonkers. We'll
-        * try to disable everything in that case.
-        *
-        * FIXME would be nice to be able to sanitize this state
-        * without several WARNs, but for now let's take the easy
-        * road.
-        */
-       return IS_GEN(dev_priv, 6) &&
-               crtc_state->base.active &&
-               crtc_state->shared_dpll &&
-               crtc_state->port_clock == 0;
-}
-
-static void intel_sanitize_encoder(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_connector *connector;
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       struct intel_crtc_state *crtc_state = crtc ?
-               to_intel_crtc_state(crtc->base.state) : NULL;
-
-       /* We need to check both for a crtc link (meaning that the
-        * encoder is active and trying to read from a pipe) and the
-        * pipe itself being active. */
-       bool has_active_crtc = crtc_state &&
-               crtc_state->base.active;
-
-       if (crtc_state && has_bogus_dpll_config(crtc_state)) {
-               DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
-                             pipe_name(crtc->pipe));
-               has_active_crtc = false;
-       }
-
-       connector = intel_encoder_find_connector(encoder);
-       if (connector && !has_active_crtc) {
-               DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
-                             encoder->base.base.id,
-                             encoder->base.name);
-
-               /* Connector is active, but has no active pipe. This is
-                * fallout from our resume register restoring. Disable
-                * the encoder manually again. */
-               if (crtc_state) {
-                       struct drm_encoder *best_encoder;
-
-                       DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
-                                     encoder->base.base.id,
-                                     encoder->base.name);
-
-                       /* avoid oopsing in case the hooks consult best_encoder */
-                       best_encoder = connector->base.state->best_encoder;
-                       connector->base.state->best_encoder = &encoder->base;
-
-                       if (encoder->disable)
-                               encoder->disable(encoder, crtc_state,
-                                                connector->base.state);
-                       if (encoder->post_disable)
-                               encoder->post_disable(encoder, crtc_state,
-                                                     connector->base.state);
-
-                       connector->base.state->best_encoder = best_encoder;
-               }
-               encoder->base.crtc = NULL;
-
-               /* Inconsistent output/port/pipe state happens presumably due to
-                * a bug in one of the get_hw_state functions. Or someplace else
-                * in our code, like the register restore mess on resume. Clamp
-                * things to off as a safer default. */
-
-               connector->base.dpms = DRM_MODE_DPMS_OFF;
-               connector->base.encoder = NULL;
-       }
-
-       /* notify opregion of the sanitized encoder state */
-       intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               icl_sanitize_encoder_pll_mapping(encoder);
-}
-
-void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
-{
-       i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
-
-       if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
-               DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
-               i915_disable_vga(dev_priv);
-       }
-}
-
-void i915_redisable_vga(struct drm_i915_private *dev_priv)
-{
-       intel_wakeref_t wakeref;
-
-       /*
-        * This function can be called both from intel_modeset_setup_hw_state or
-        * at a very early point in our resume sequence, where the power well
-        * structures are not yet restored. Since this function is at a very
-        * paranoid "someone might have enabled VGA while we were not looking"
-        * level, just check if the power well is enabled instead of trying to
-        * follow the "don't touch the power well if we don't need it" policy
-        * the rest of the driver uses.
-        */
-       wakeref = intel_display_power_get_if_enabled(dev_priv,
-                                                    POWER_DOMAIN_VGA);
-       if (!wakeref)
-               return;
-
-       i915_redisable_vga_power_on(dev_priv);
-
-       intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
-}
-
-/* FIXME read out full plane state for all planes */
-static void readout_plane_state(struct drm_i915_private *dev_priv)
-{
-       struct intel_plane *plane;
-       struct intel_crtc *crtc;
-
-       for_each_intel_plane(&dev_priv->drm, plane) {
-               struct intel_plane_state *plane_state =
-                       to_intel_plane_state(plane->base.state);
-               struct intel_crtc_state *crtc_state;
-               enum pipe pipe = PIPE_A;
-               bool visible;
-
-               visible = plane->get_hw_state(plane, &pipe);
-
-               crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-               crtc_state = to_intel_crtc_state(crtc->base.state);
-
-               intel_set_plane_visible(crtc_state, plane_state, visible);
-
-               DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
-                             plane->base.base.id, plane->base.name,
-                             enableddisabled(visible), pipe_name(pipe));
-       }
-
-       for_each_intel_crtc(&dev_priv->drm, crtc) {
-               struct intel_crtc_state *crtc_state =
-                       to_intel_crtc_state(crtc->base.state);
-
-               fixup_active_planes(crtc_state);
-       }
-}
-
-static void intel_modeset_readout_hw_state(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum pipe pipe;
-       struct intel_crtc *crtc;
-       struct intel_encoder *encoder;
-       struct intel_connector *connector;
-       struct drm_connector_list_iter conn_iter;
-       int i;
-
-       dev_priv->active_crtcs = 0;
-
-       for_each_intel_crtc(dev, crtc) {
-               struct intel_crtc_state *crtc_state =
-                       to_intel_crtc_state(crtc->base.state);
-
-               __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
-               memset(crtc_state, 0, sizeof(*crtc_state));
-               __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
-
-               crtc_state->base.active = crtc_state->base.enable =
-                       dev_priv->display.get_pipe_config(crtc, crtc_state);
-
-               crtc->base.enabled = crtc_state->base.enable;
-               crtc->active = crtc_state->base.active;
-
-               if (crtc_state->base.active)
-                       dev_priv->active_crtcs |= 1 << crtc->pipe;
-
-               DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
-                             crtc->base.base.id, crtc->base.name,
-                             enableddisabled(crtc_state->base.active));
-       }
-
-       readout_plane_state(dev_priv);
-
-       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-               struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
-               pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
-                                                       &pll->state.hw_state);
-               pll->state.crtc_mask = 0;
-               for_each_intel_crtc(dev, crtc) {
-                       struct intel_crtc_state *crtc_state =
-                               to_intel_crtc_state(crtc->base.state);
-
-                       if (crtc_state->base.active &&
-                           crtc_state->shared_dpll == pll)
-                               pll->state.crtc_mask |= 1 << crtc->pipe;
-               }
-               pll->active_mask = pll->state.crtc_mask;
-
-               DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
-                             pll->info->name, pll->state.crtc_mask, pll->on);
-       }
-
-       for_each_intel_encoder(dev, encoder) {
-               pipe = 0;
-
-               if (encoder->get_hw_state(encoder, &pipe)) {
-                       struct intel_crtc_state *crtc_state;
-
-                       crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-                       crtc_state = to_intel_crtc_state(crtc->base.state);
-
-                       encoder->base.crtc = &crtc->base;
-                       encoder->get_config(encoder, crtc_state);
-               } else {
-                       encoder->base.crtc = NULL;
-               }
-
-               DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
-                             encoder->base.base.id, encoder->base.name,
-                             enableddisabled(encoder->base.crtc),
-                             pipe_name(pipe));
-       }
-
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       for_each_intel_connector_iter(connector, &conn_iter) {
-               if (connector->get_hw_state(connector)) {
-                       connector->base.dpms = DRM_MODE_DPMS_ON;
-
-                       encoder = connector->encoder;
-                       connector->base.encoder = &encoder->base;
-
-                       if (encoder->base.crtc &&
-                           encoder->base.crtc->state->active) {
-                               /*
-                                * This has to be done during hardware readout
-                                * because anything calling .crtc_disable may
-                                * rely on the connector_mask being accurate.
-                                */
-                               encoder->base.crtc->state->connector_mask |=
-                                       drm_connector_mask(&connector->base);
-                               encoder->base.crtc->state->encoder_mask |=
-                                       drm_encoder_mask(&encoder->base);
-                       }
-
-               } else {
-                       connector->base.dpms = DRM_MODE_DPMS_OFF;
-                       connector->base.encoder = NULL;
-               }
-               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
-                             connector->base.base.id, connector->base.name,
-                             enableddisabled(connector->base.encoder));
-       }
-       drm_connector_list_iter_end(&conn_iter);
-
-       for_each_intel_crtc(dev, crtc) {
-               struct intel_bw_state *bw_state =
-                       to_intel_bw_state(dev_priv->bw_obj.state);
-               struct intel_crtc_state *crtc_state =
-                       to_intel_crtc_state(crtc->base.state);
-               struct intel_plane *plane;
-               int min_cdclk = 0;
-
-               memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
-               if (crtc_state->base.active) {
-                       intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
-                       crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
-                       crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
-                       intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
-                       WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
-
-                       /*
-                        * The initial mode needs to be set in order to keep
-                        * the atomic core happy. It wants a valid mode if the
-                        * crtc's enabled, so we do the above call.
-                        *
-                        * But we don't set all the derived state fully, hence
-                        * set a flag to indicate that a full recalculation is
-                        * needed on the next commit.
-                        */
-                       crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
-
-                       intel_crtc_compute_pixel_rate(crtc_state);
-
-                       if (dev_priv->display.modeset_calc_cdclk) {
-                               min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
-                               if (WARN_ON(min_cdclk < 0))
-                                       min_cdclk = 0;
-                       }
-
-                       drm_calc_timestamping_constants(&crtc->base,
-                                                       &crtc_state->base.adjusted_mode);
-                       update_scanline_offset(crtc_state);
-               }
-
-               dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
-               dev_priv->min_voltage_level[crtc->pipe] =
-                       crtc_state->min_voltage_level;
-
-               for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
-                       const struct intel_plane_state *plane_state =
-                               to_intel_plane_state(plane->base.state);
-
-                       /*
-                        * FIXME don't have the fb yet, so can't
-                        * use intel_plane_data_rate() :(
-                        */
-                       if (plane_state->base.visible)
-                               crtc_state->data_rate[plane->id] =
-                                       4 * crtc_state->pixel_rate;
-               }
-
-               intel_bw_crtc_update(bw_state, crtc_state);
-
-               intel_pipe_config_sanity_check(dev_priv, crtc_state);
-       }
-}
-
-static void
-get_encoder_power_domains(struct drm_i915_private *dev_priv)
-{
-       struct intel_encoder *encoder;
-
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               struct intel_crtc_state *crtc_state;
-
-               if (!encoder->get_power_domains)
-                       continue;
-
-               /*
-                * MST-primary and inactive encoders don't have a crtc state
-                * and neither of these require any power domain references.
-                */
-               if (!encoder->base.crtc)
-                       continue;
-
-               crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
-               encoder->get_power_domains(encoder, crtc_state);
-       }
-}
-
-static void intel_early_display_was(struct drm_i915_private *dev_priv)
-{
-       /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
-       if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
-               I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
-                          DARBF_GATING_DIS);
-
-       if (IS_HASWELL(dev_priv)) {
-               /*
-                * WaRsPkgCStateDisplayPMReq:hsw
-                * System hang if this isn't done before disabling all planes!
-                */
-               I915_WRITE(CHICKEN_PAR1_1,
-                          I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
-       }
-}
-
-static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
-                                      enum port port, i915_reg_t hdmi_reg)
-{
-       u32 val = I915_READ(hdmi_reg);
-
-       if (val & SDVO_ENABLE ||
-           (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
-               return;
-
-       DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
-                     port_name(port));
-
-       val &= ~SDVO_PIPE_SEL_MASK;
-       val |= SDVO_PIPE_SEL(PIPE_A);
-
-       I915_WRITE(hdmi_reg, val);
-}
-
-static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
-                                    enum port port, i915_reg_t dp_reg)
-{
-       u32 val = I915_READ(dp_reg);
-
-       if (val & DP_PORT_EN ||
-           (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
-               return;
-
-       DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
-                     port_name(port));
-
-       val &= ~DP_PIPE_SEL_MASK;
-       val |= DP_PIPE_SEL(PIPE_A);
-
-       I915_WRITE(dp_reg, val);
-}
-
-static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
-{
-       /*
-        * The BIOS may select transcoder B on some of the PCH
-        * ports even it doesn't enable the port. This would trip
-        * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
-        * Sanitize the transcoder select bits to prevent that. We
-        * assume that the BIOS never actually enabled the port,
-        * because if it did we'd actually have to toggle the port
-        * on and back off to make the transcoder A select stick
-        * (see. intel_dp_link_down(), intel_disable_hdmi(),
-        * intel_disable_sdvo()).
-        */
-       ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
-       ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
-       ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
-
-       /* PCH SDVOB multiplex with HDMIB */
-       ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
-       ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
-       ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
-}
-
-/* Scan out the current hw modeset state,
- * and sanitizes it to the current state
- */
-static void
-intel_modeset_setup_hw_state(struct drm_device *dev,
-                            struct drm_modeset_acquire_ctx *ctx)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc_state *crtc_state;
-       struct intel_encoder *encoder;
-       struct intel_crtc *crtc;
-       intel_wakeref_t wakeref;
-       int i;
-
-       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-
-       intel_early_display_was(dev_priv);
-       intel_modeset_readout_hw_state(dev);
-
-       /* HW state is read out, now we need to sanitize this mess. */
-       get_encoder_power_domains(dev_priv);
-
-       if (HAS_PCH_IBX(dev_priv))
-               ibx_sanitize_pch_ports(dev_priv);
-
-       /*
-        * intel_sanitize_plane_mapping() may need to do vblank
-        * waits, so we need vblank interrupts restored beforehand.
-        */
-       for_each_intel_crtc(&dev_priv->drm, crtc) {
-               crtc_state = to_intel_crtc_state(crtc->base.state);
-
-               drm_crtc_vblank_reset(&crtc->base);
-
-               if (crtc_state->base.active)
-                       intel_crtc_vblank_on(crtc_state);
-       }
-
-       intel_sanitize_plane_mapping(dev_priv);
-
-       for_each_intel_encoder(dev, encoder)
-               intel_sanitize_encoder(encoder);
-
-       for_each_intel_crtc(&dev_priv->drm, crtc) {
-               crtc_state = to_intel_crtc_state(crtc->base.state);
-               intel_sanitize_crtc(crtc, ctx);
-               intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
-       }
-
-       intel_modeset_update_connector_atomic_state(dev);
-
-       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-               struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
-               if (!pll->on || pll->active_mask)
-                       continue;
-
-               DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
-                             pll->info->name);
-
-               pll->info->funcs->disable(dev_priv, pll);
-               pll->on = false;
-       }
-
-       if (IS_G4X(dev_priv)) {
-               g4x_wm_get_hw_state(dev_priv);
-               g4x_wm_sanitize(dev_priv);
-       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               vlv_wm_get_hw_state(dev_priv);
-               vlv_wm_sanitize(dev_priv);
-       } else if (INTEL_GEN(dev_priv) >= 9) {
-               skl_wm_get_hw_state(dev_priv);
-       } else if (HAS_PCH_SPLIT(dev_priv)) {
-               ilk_wm_get_hw_state(dev_priv);
-       }
-
-       for_each_intel_crtc(dev, crtc) {
-               u64 put_domains;
-
-               crtc_state = to_intel_crtc_state(crtc->base.state);
-               put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
-               if (WARN_ON(put_domains))
-                       modeset_put_power_domains(dev_priv, put_domains);
-       }
-
-       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
-
-       intel_fbc_init_pipe_state(dev_priv);
-}
-
-void intel_display_resume(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_atomic_state *state = dev_priv->modeset_restore_state;
-       struct drm_modeset_acquire_ctx ctx;
-       int ret;
-
-       dev_priv->modeset_restore_state = NULL;
-       if (state)
-               state->acquire_ctx = &ctx;
-
-       drm_modeset_acquire_init(&ctx, 0);
-
-       while (1) {
-               ret = drm_modeset_lock_all_ctx(dev, &ctx);
-               if (ret != -EDEADLK)
-                       break;
-
-               drm_modeset_backoff(&ctx);
-       }
-
-       if (!ret)
-               ret = __intel_display_resume(dev, state, &ctx);
-
-       intel_enable_ipc(dev_priv);
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-
-       if (ret)
-               DRM_ERROR("Restoring old state failed with %i\n", ret);
-       if (state)
-               drm_atomic_state_put(state);
-}
-
-static void intel_hpd_poll_fini(struct drm_device *dev)
-{
-       struct intel_connector *connector;
-       struct drm_connector_list_iter conn_iter;
-
-       /* Kill all the work that may have been queued by hpd. */
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       for_each_intel_connector_iter(connector, &conn_iter) {
-               if (connector->modeset_retry_work.func)
-                       cancel_work_sync(&connector->modeset_retry_work);
-               if (connector->hdcp.shim) {
-                       cancel_delayed_work_sync(&connector->hdcp.check_work);
-                       cancel_work_sync(&connector->hdcp.prop_work);
-               }
-       }
-       drm_connector_list_iter_end(&conn_iter);
-}
-
-void intel_modeset_cleanup(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       flush_workqueue(dev_priv->modeset_wq);
-
-       flush_work(&dev_priv->atomic_helper.free_work);
-       WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
-
-       /*
-        * Interrupts and polling as the first thing to avoid creating havoc.
-        * Too much stuff here (turning of connectors, ...) would
-        * experience fancy races otherwise.
-        */
-       intel_irq_uninstall(dev_priv);
-
-       /*
-        * Due to the hpd irq storm handling the hotplug work can re-arm the
-        * poll handlers. Hence disable polling after hpd handling is shut down.
-        */
-       intel_hpd_poll_fini(dev);
-
-       /* poll work can call into fbdev, hence clean that up afterwards */
-       intel_fbdev_fini(dev_priv);
-
-       intel_unregister_dsm_handler();
-
-       intel_fbc_global_disable(dev_priv);
-
-       /* flush any delayed tasks or pending work */
-       flush_scheduled_work();
-
-       intel_hdcp_component_fini(dev_priv);
-
-       drm_mode_config_cleanup(dev);
-
-       intel_overlay_cleanup(dev_priv);
-
-       intel_gmbus_teardown(dev_priv);
-
-       destroy_workqueue(dev_priv->modeset_wq);
-
-       intel_fbc_cleanup_cfb(dev_priv);
-}
-
-/*
- * set vga decode state - true == enable VGA decode
- */
-int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
-{
-       unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
-       u16 gmch_ctrl;
-
-       if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
-               DRM_ERROR("failed to read control word\n");
-               return -EIO;
-       }
-
-       if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
-               return 0;
-
-       if (state)
-               gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
-       else
-               gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
-
-       if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
-               DRM_ERROR("failed to write control word\n");
-               return -EIO;
-       }
-
-       return 0;
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-
-struct intel_display_error_state {
-
-       u32 power_well_driver;
-
-       struct intel_cursor_error_state {
-               u32 control;
-               u32 position;
-               u32 base;
-               u32 size;
-       } cursor[I915_MAX_PIPES];
-
-       struct intel_pipe_error_state {
-               bool power_domain_on;
-               u32 source;
-               u32 stat;
-       } pipe[I915_MAX_PIPES];
-
-       struct intel_plane_error_state {
-               u32 control;
-               u32 stride;
-               u32 size;
-               u32 pos;
-               u32 addr;
-               u32 surface;
-               u32 tile_offset;
-       } plane[I915_MAX_PIPES];
-
-       struct intel_transcoder_error_state {
-               bool available;
-               bool power_domain_on;
-               enum transcoder cpu_transcoder;
-
-               u32 conf;
-
-               u32 htotal;
-               u32 hblank;
-               u32 hsync;
-               u32 vtotal;
-               u32 vblank;
-               u32 vsync;
-       } transcoder[4];
-};
-
-struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_i915_private *dev_priv)
-{
-       struct intel_display_error_state *error;
-       int transcoders[] = {
-               TRANSCODER_A,
-               TRANSCODER_B,
-               TRANSCODER_C,
-               TRANSCODER_EDP,
-       };
-       int i;
-
-       BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
-
-       if (!HAS_DISPLAY(dev_priv))
-               return NULL;
-
-       error = kzalloc(sizeof(*error), GFP_ATOMIC);
-       if (error == NULL)
-               return NULL;
-
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
-
-       for_each_pipe(dev_priv, i) {
-               error->pipe[i].power_domain_on =
-                       __intel_display_power_is_enabled(dev_priv,
-                                                        POWER_DOMAIN_PIPE(i));
-               if (!error->pipe[i].power_domain_on)
-                       continue;
-
-               error->cursor[i].control = I915_READ(CURCNTR(i));
-               error->cursor[i].position = I915_READ(CURPOS(i));
-               error->cursor[i].base = I915_READ(CURBASE(i));
-
-               error->plane[i].control = I915_READ(DSPCNTR(i));
-               error->plane[i].stride = I915_READ(DSPSTRIDE(i));
-               if (INTEL_GEN(dev_priv) <= 3) {
-                       error->plane[i].size = I915_READ(DSPSIZE(i));
-                       error->plane[i].pos = I915_READ(DSPPOS(i));
-               }
-               if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
-                       error->plane[i].addr = I915_READ(DSPADDR(i));
-               if (INTEL_GEN(dev_priv) >= 4) {
-                       error->plane[i].surface = I915_READ(DSPSURF(i));
-                       error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
-               }
-
-               error->pipe[i].source = I915_READ(PIPESRC(i));
-
-               if (HAS_GMCH(dev_priv))
-                       error->pipe[i].stat = I915_READ(PIPESTAT(i));
-       }
-
-       for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
-               enum transcoder cpu_transcoder = transcoders[i];
-
-               if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
-                       continue;
-
-               error->transcoder[i].available = true;
-               error->transcoder[i].power_domain_on =
-                       __intel_display_power_is_enabled(dev_priv,
-                               POWER_DOMAIN_TRANSCODER(cpu_transcoder));
-               if (!error->transcoder[i].power_domain_on)
-                       continue;
-
-               error->transcoder[i].cpu_transcoder = cpu_transcoder;
-
-               error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
-               error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
-               error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
-               error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
-               error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
-               error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
-               error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
-       }
-
-       return error;
-}
-
-#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
-
-void
-intel_display_print_error_state(struct drm_i915_error_state_buf *m,
-                               struct intel_display_error_state *error)
-{
-       struct drm_i915_private *dev_priv = m->i915;
-       int i;
-
-       if (!error)
-               return;
-
-       err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               err_printf(m, "PWR_WELL_CTL2: %08x\n",
-                          error->power_well_driver);
-       for_each_pipe(dev_priv, i) {
-               err_printf(m, "Pipe [%d]:\n", i);
-               err_printf(m, "  Power: %s\n",
-                          onoff(error->pipe[i].power_domain_on));
-               err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
-               err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
-
-               err_printf(m, "Plane [%d]:\n", i);
-               err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
-               err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
-               if (INTEL_GEN(dev_priv) <= 3) {
-                       err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
-                       err_printf(m, "  POS: %08x\n", error->plane[i].pos);
-               }
-               if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
-                       err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
-               if (INTEL_GEN(dev_priv) >= 4) {
-                       err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
-                       err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
-               }
-
-               err_printf(m, "Cursor [%d]:\n", i);
-               err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
-               err_printf(m, "  POS: %08x\n", error->cursor[i].position);
-               err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
-       }
-
-       for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
-               if (!error->transcoder[i].available)
-                       continue;
-
-               err_printf(m, "CPU transcoder: %s\n",
-                          transcoder_name(error->transcoder[i].cpu_transcoder));
-               err_printf(m, "  Power: %s\n",
-                          onoff(error->transcoder[i].power_domain_on));
-               err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
-               err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
-               err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
-               err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
-               err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
-               err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
-               err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
-       }
-}
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
deleted file mode 100644 (file)
index ee6b819..0000000
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * Copyright © 2006-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef _INTEL_DISPLAY_H_
-#define _INTEL_DISPLAY_H_
-
-#include <drm/drm_util.h>
-#include <drm/i915_drm.h>
-
-struct drm_i915_private;
-struct intel_plane_state;
-
-enum i915_gpio {
-       GPIOA,
-       GPIOB,
-       GPIOC,
-       GPIOD,
-       GPIOE,
-       GPIOF,
-       GPIOG,
-       GPIOH,
-       __GPIOI_UNUSED,
-       GPIOJ,
-       GPIOK,
-       GPIOL,
-       GPIOM,
-};
-
-/*
- * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
- * rest have consecutive values and match the enum values of transcoders
- * with a 1:1 transcoder -> pipe mapping.
- */
-enum pipe {
-       INVALID_PIPE = -1,
-
-       PIPE_A = 0,
-       PIPE_B,
-       PIPE_C,
-       _PIPE_EDP,
-
-       I915_MAX_PIPES = _PIPE_EDP
-};
-
-#define pipe_name(p) ((p) + 'A')
-
-enum transcoder {
-       /*
-        * The following transcoders have a 1:1 transcoder -> pipe mapping,
-        * keep their values fixed: the code assumes that TRANSCODER_A=0, the
-        * rest have consecutive values and match the enum values of the pipes
-        * they map to.
-        */
-       TRANSCODER_A = PIPE_A,
-       TRANSCODER_B = PIPE_B,
-       TRANSCODER_C = PIPE_C,
-
-       /*
-        * The following transcoders can map to any pipe, their enum value
-        * doesn't need to stay fixed.
-        */
-       TRANSCODER_EDP,
-       TRANSCODER_DSI_0,
-       TRANSCODER_DSI_1,
-       TRANSCODER_DSI_A = TRANSCODER_DSI_0,    /* legacy DSI */
-       TRANSCODER_DSI_C = TRANSCODER_DSI_1,    /* legacy DSI */
-
-       I915_MAX_TRANSCODERS
-};
-
-static inline const char *transcoder_name(enum transcoder transcoder)
-{
-       switch (transcoder) {
-       case TRANSCODER_A:
-               return "A";
-       case TRANSCODER_B:
-               return "B";
-       case TRANSCODER_C:
-               return "C";
-       case TRANSCODER_EDP:
-               return "EDP";
-       case TRANSCODER_DSI_A:
-               return "DSI A";
-       case TRANSCODER_DSI_C:
-               return "DSI C";
-       default:
-               return "<invalid>";
-       }
-}
-
-static inline bool transcoder_is_dsi(enum transcoder transcoder)
-{
-       return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
-}
-
-/*
- * Global legacy plane identifier. Valid only for primary/sprite
- * planes on pre-g4x, and only for primary planes on g4x-bdw.
- */
-enum i9xx_plane_id {
-       PLANE_A,
-       PLANE_B,
-       PLANE_C,
-};
-
-#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
-
-/*
- * Per-pipe plane identifier.
- * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
- * number of planes per CRTC.  Not all platforms really have this many planes,
- * which means some arrays of size I915_MAX_PLANES may have unused entries
- * between the topmost sprite plane and the cursor plane.
- *
- * This is expected to be passed to various register macros
- * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
- */
-enum plane_id {
-       PLANE_PRIMARY,
-       PLANE_SPRITE0,
-       PLANE_SPRITE1,
-       PLANE_SPRITE2,
-       PLANE_SPRITE3,
-       PLANE_SPRITE4,
-       PLANE_SPRITE5,
-       PLANE_CURSOR,
-
-       I915_MAX_PLANES,
-};
-
-#define for_each_plane_id_on_crtc(__crtc, __p) \
-       for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
-               for_each_if((__crtc)->plane_ids_mask & BIT(__p))
-
-/*
- * Ports identifier referenced from other drivers.
- * Expected to remain stable over time
- */
-static inline const char *port_identifier(enum port port)
-{
-       switch (port) {
-       case PORT_A:
-               return "Port A";
-       case PORT_B:
-               return "Port B";
-       case PORT_C:
-               return "Port C";
-       case PORT_D:
-               return "Port D";
-       case PORT_E:
-               return "Port E";
-       case PORT_F:
-               return "Port F";
-       default:
-               return "<invalid>";
-       }
-}
-
-enum tc_port {
-       PORT_TC_NONE = -1,
-
-       PORT_TC1 = 0,
-       PORT_TC2,
-       PORT_TC3,
-       PORT_TC4,
-
-       I915_MAX_TC_PORTS
-};
-
-enum tc_port_type {
-       TC_PORT_UNKNOWN = 0,
-       TC_PORT_TYPEC,
-       TC_PORT_TBT,
-       TC_PORT_LEGACY,
-};
-
-enum dpio_channel {
-       DPIO_CH0,
-       DPIO_CH1
-};
-
-enum dpio_phy {
-       DPIO_PHY0,
-       DPIO_PHY1,
-       DPIO_PHY2,
-};
-
-#define I915_NUM_PHYS_VLV 2
-
-enum aux_ch {
-       AUX_CH_A,
-       AUX_CH_B,
-       AUX_CH_C,
-       AUX_CH_D,
-       AUX_CH_E, /* ICL+ */
-       AUX_CH_F,
-};
-
-#define aux_ch_name(a) ((a) + 'A')
-
-/* Used by dp and fdi links */
-struct intel_link_m_n {
-       u32 tu;
-       u32 gmch_m;
-       u32 gmch_n;
-       u32 link_m;
-       u32 link_n;
-};
-
-#define for_each_pipe(__dev_priv, __p) \
-       for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
-
-#define for_each_pipe_masked(__dev_priv, __p, __mask) \
-       for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
-               for_each_if((__mask) & BIT(__p))
-
-#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
-       for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++)  \
-               for_each_if ((__mask) & (1 << (__t)))
-
-#define for_each_universal_plane(__dev_priv, __pipe, __p)              \
-       for ((__p) = 0;                                                 \
-            (__p) < RUNTIME_INFO(__dev_priv)->num_sprites[(__pipe)] + 1;       \
-            (__p)++)
-
-#define for_each_sprite(__dev_priv, __p, __s)                          \
-       for ((__s) = 0;                                                 \
-            (__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)];      \
-            (__s)++)
-
-#define for_each_port_masked(__port, __ports_mask) \
-       for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)  \
-               for_each_if((__ports_mask) & BIT(__port))
-
-#define for_each_crtc(dev, crtc) \
-       list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
-
-#define for_each_intel_plane(dev, intel_plane) \
-       list_for_each_entry(intel_plane,                        \
-                           &(dev)->mode_config.plane_list,     \
-                           base.head)
-
-#define for_each_intel_plane_mask(dev, intel_plane, plane_mask)                \
-       list_for_each_entry(intel_plane,                                \
-                           &(dev)->mode_config.plane_list,             \
-                           base.head)                                  \
-               for_each_if((plane_mask) &                              \
-                           drm_plane_mask(&intel_plane->base)))
-
-#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)     \
-       list_for_each_entry(intel_plane,                                \
-                           &(dev)->mode_config.plane_list,             \
-                           base.head)                                  \
-               for_each_if((intel_plane)->pipe == (intel_crtc)->pipe)
-
-#define for_each_intel_crtc(dev, intel_crtc)                           \
-       list_for_each_entry(intel_crtc,                                 \
-                           &(dev)->mode_config.crtc_list,              \
-                           base.head)
-
-#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask)           \
-       list_for_each_entry(intel_crtc,                                 \
-                           &(dev)->mode_config.crtc_list,              \
-                           base.head)                                  \
-               for_each_if((crtc_mask) & drm_crtc_mask(&intel_crtc->base))
-
-#define for_each_intel_encoder(dev, intel_encoder)             \
-       list_for_each_entry(intel_encoder,                      \
-                           &(dev)->mode_config.encoder_list,   \
-                           base.head)
-
-#define for_each_intel_dp(dev, intel_encoder)                  \
-       for_each_intel_encoder(dev, intel_encoder)              \
-               for_each_if(intel_encoder_is_dp(intel_encoder))
-
-#define for_each_intel_connector_iter(intel_connector, iter) \
-       while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
-
-#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
-       list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
-               for_each_if((intel_encoder)->base.crtc == (__crtc))
-
-#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
-       list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
-               for_each_if((intel_connector)->base.encoder == (__encoder))
-
-#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \
-       for ((__i) = 0; \
-            (__i) < (__state)->base.dev->mode_config.num_total_plane && \
-                    ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
-                     (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), 1); \
-            (__i)++) \
-               for_each_if(plane)
-
-#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
-       for ((__i) = 0; \
-            (__i) < (__state)->base.dev->mode_config.num_total_plane && \
-                    ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
-                     (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
-            (__i)++) \
-               for_each_if(plane)
-
-#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
-       for ((__i) = 0; \
-            (__i) < (__state)->base.dev->mode_config.num_crtc && \
-                    ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
-                     (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
-            (__i)++) \
-               for_each_if(crtc)
-
-#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
-       for ((__i) = 0; \
-            (__i) < (__state)->base.dev->mode_config.num_total_plane && \
-                    ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
-                     (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
-                     (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
-            (__i)++) \
-               for_each_if(plane)
-
-#define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
-       for ((__i) = 0; \
-            (__i) < (__state)->base.dev->mode_config.num_crtc && \
-                    ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
-                     (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
-                     (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
-            (__i)++) \
-               for_each_if(crtc)
-
-void intel_link_compute_m_n(u16 bpp, int nlanes,
-                           int pixel_clock, int link_clock,
-                           struct intel_link_m_n *m_n,
-                           bool constant_n);
-bool is_ccs_modifier(u64 modifier);
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
-u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
-                             u32 pixel_format, u64 modifier);
-bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_display_power.c b/drivers/gpu/drm/i915/intel_display_power.c
deleted file mode 100644 (file)
index c93ad51..0000000
+++ /dev/null
@@ -1,4618 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#include <linux/vgaarb.h>
-
-#include "display/intel_crt.h"
-#include "display/intel_dp.h"
-
-#include "i915_drv.h"
-#include "i915_irq.h"
-#include "intel_cdclk.h"
-#include "intel_combo_phy.h"
-#include "intel_csr.h"
-#include "intel_dpio_phy.h"
-#include "intel_drv.h"
-#include "intel_hotplug.h"
-#include "intel_sideband.h"
-
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
-                                        enum i915_power_well_id power_well_id);
-
-const char *
-intel_display_power_domain_str(enum intel_display_power_domain domain)
-{
-       switch (domain) {
-       case POWER_DOMAIN_DISPLAY_CORE:
-               return "DISPLAY_CORE";
-       case POWER_DOMAIN_PIPE_A:
-               return "PIPE_A";
-       case POWER_DOMAIN_PIPE_B:
-               return "PIPE_B";
-       case POWER_DOMAIN_PIPE_C:
-               return "PIPE_C";
-       case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
-               return "PIPE_A_PANEL_FITTER";
-       case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
-               return "PIPE_B_PANEL_FITTER";
-       case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
-               return "PIPE_C_PANEL_FITTER";
-       case POWER_DOMAIN_TRANSCODER_A:
-               return "TRANSCODER_A";
-       case POWER_DOMAIN_TRANSCODER_B:
-               return "TRANSCODER_B";
-       case POWER_DOMAIN_TRANSCODER_C:
-               return "TRANSCODER_C";
-       case POWER_DOMAIN_TRANSCODER_EDP:
-               return "TRANSCODER_EDP";
-       case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
-               return "TRANSCODER_EDP_VDSC";
-       case POWER_DOMAIN_TRANSCODER_DSI_A:
-               return "TRANSCODER_DSI_A";
-       case POWER_DOMAIN_TRANSCODER_DSI_C:
-               return "TRANSCODER_DSI_C";
-       case POWER_DOMAIN_PORT_DDI_A_LANES:
-               return "PORT_DDI_A_LANES";
-       case POWER_DOMAIN_PORT_DDI_B_LANES:
-               return "PORT_DDI_B_LANES";
-       case POWER_DOMAIN_PORT_DDI_C_LANES:
-               return "PORT_DDI_C_LANES";
-       case POWER_DOMAIN_PORT_DDI_D_LANES:
-               return "PORT_DDI_D_LANES";
-       case POWER_DOMAIN_PORT_DDI_E_LANES:
-               return "PORT_DDI_E_LANES";
-       case POWER_DOMAIN_PORT_DDI_F_LANES:
-               return "PORT_DDI_F_LANES";
-       case POWER_DOMAIN_PORT_DDI_A_IO:
-               return "PORT_DDI_A_IO";
-       case POWER_DOMAIN_PORT_DDI_B_IO:
-               return "PORT_DDI_B_IO";
-       case POWER_DOMAIN_PORT_DDI_C_IO:
-               return "PORT_DDI_C_IO";
-       case POWER_DOMAIN_PORT_DDI_D_IO:
-               return "PORT_DDI_D_IO";
-       case POWER_DOMAIN_PORT_DDI_E_IO:
-               return "PORT_DDI_E_IO";
-       case POWER_DOMAIN_PORT_DDI_F_IO:
-               return "PORT_DDI_F_IO";
-       case POWER_DOMAIN_PORT_DSI:
-               return "PORT_DSI";
-       case POWER_DOMAIN_PORT_CRT:
-               return "PORT_CRT";
-       case POWER_DOMAIN_PORT_OTHER:
-               return "PORT_OTHER";
-       case POWER_DOMAIN_VGA:
-               return "VGA";
-       case POWER_DOMAIN_AUDIO:
-               return "AUDIO";
-       case POWER_DOMAIN_AUX_A:
-               return "AUX_A";
-       case POWER_DOMAIN_AUX_B:
-               return "AUX_B";
-       case POWER_DOMAIN_AUX_C:
-               return "AUX_C";
-       case POWER_DOMAIN_AUX_D:
-               return "AUX_D";
-       case POWER_DOMAIN_AUX_E:
-               return "AUX_E";
-       case POWER_DOMAIN_AUX_F:
-               return "AUX_F";
-       case POWER_DOMAIN_AUX_IO_A:
-               return "AUX_IO_A";
-       case POWER_DOMAIN_AUX_TBT1:
-               return "AUX_TBT1";
-       case POWER_DOMAIN_AUX_TBT2:
-               return "AUX_TBT2";
-       case POWER_DOMAIN_AUX_TBT3:
-               return "AUX_TBT3";
-       case POWER_DOMAIN_AUX_TBT4:
-               return "AUX_TBT4";
-       case POWER_DOMAIN_GMBUS:
-               return "GMBUS";
-       case POWER_DOMAIN_INIT:
-               return "INIT";
-       case POWER_DOMAIN_MODESET:
-               return "MODESET";
-       case POWER_DOMAIN_GT_IRQ:
-               return "GT_IRQ";
-       default:
-               MISSING_CASE(domain);
-               return "?";
-       }
-}
-
-static void intel_power_well_enable(struct drm_i915_private *dev_priv,
-                                   struct i915_power_well *power_well)
-{
-       DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
-       power_well->desc->ops->enable(dev_priv, power_well);
-       power_well->hw_enabled = true;
-}
-
-static void intel_power_well_disable(struct drm_i915_private *dev_priv,
-                                    struct i915_power_well *power_well)
-{
-       DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
-       power_well->hw_enabled = false;
-       power_well->desc->ops->disable(dev_priv, power_well);
-}
-
-static void intel_power_well_get(struct drm_i915_private *dev_priv,
-                                struct i915_power_well *power_well)
-{
-       if (!power_well->count++)
-               intel_power_well_enable(dev_priv, power_well);
-}
-
-static void intel_power_well_put(struct drm_i915_private *dev_priv,
-                                struct i915_power_well *power_well)
-{
-       WARN(!power_well->count, "Use count on power well %s is already zero",
-            power_well->desc->name);
-
-       if (!--power_well->count)
-               intel_power_well_disable(dev_priv, power_well);
-}
-
-/**
- * __intel_display_power_is_enabled - unlocked check for a power domain
- * @dev_priv: i915 device instance
- * @domain: power domain to check
- *
- * This is the unlocked version of intel_display_power_is_enabled() and should
- * only be used from error capture and recovery code where deadlocks are
- * possible.
- *
- * Returns:
- * True when the power domain is enabled, false otherwise.
- */
-bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
-                                     enum intel_display_power_domain domain)
-{
-       struct i915_power_well *power_well;
-       bool is_enabled;
-
-       if (dev_priv->runtime_pm.suspended)
-               return false;
-
-       is_enabled = true;
-
-       for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
-               if (power_well->desc->always_on)
-                       continue;
-
-               if (!power_well->hw_enabled) {
-                       is_enabled = false;
-                       break;
-               }
-       }
-
-       return is_enabled;
-}
-
-/**
- * intel_display_power_is_enabled - check for a power domain
- * @dev_priv: i915 device instance
- * @domain: power domain to check
- *
- * This function can be used to check the hw power domain state. It is mostly
- * used in hardware state readout functions. Everywhere else code should rely
- * upon explicit power domain reference counting to ensure that the hardware
- * block is powered up before accessing it.
- *
- * Callers must hold the relevant modesetting locks to ensure that concurrent
- * threads can't disable the power well while the caller tries to read a few
- * registers.
- *
- * Returns:
- * True when the power domain is enabled, false otherwise.
- */
-bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
-                                   enum intel_display_power_domain domain)
-{
-       struct i915_power_domains *power_domains;
-       bool ret;
-
-       power_domains = &dev_priv->power_domains;
-
-       mutex_lock(&power_domains->lock);
-       ret = __intel_display_power_is_enabled(dev_priv, domain);
-       mutex_unlock(&power_domains->lock);
-
-       return ret;
-}
-
-/*
- * Starting with Haswell, we have a "Power Down Well" that can be turned off
- * when not needed anymore. We have 4 registers that can request the power well
- * to be enabled, and it will only be disabled if none of the registers is
- * requesting it to be enabled.
- */
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
-                                      u8 irq_pipe_mask, bool has_vga)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-
-       /*
-        * After we re-enable the power well, if we touch VGA register 0x3d5
-        * we'll get unclaimed register interrupts. This stops after we write
-        * anything to the VGA MSR register. The vgacon module uses this
-        * register all the time, so if we unbind our driver and, as a
-        * consequence, bind vgacon, we'll get stuck in an infinite loop at
-        * console_unlock(). So make here we touch the VGA MSR register, making
-        * sure vgacon can keep working normally without triggering interrupts
-        * and error messages.
-        */
-       if (has_vga) {
-               vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
-               outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
-               vga_put(pdev, VGA_RSRC_LEGACY_IO);
-       }
-
-       if (irq_pipe_mask)
-               gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
-}
-
-static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
-                                      u8 irq_pipe_mask)
-{
-       if (irq_pipe_mask)
-               gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
-}
-
-static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
-       int pw_idx = power_well->desc->hsw.idx;
-
-       /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
-       WARN_ON(intel_wait_for_register(&dev_priv->uncore,
-                                       regs->driver,
-                                       HSW_PWR_WELL_CTL_STATE(pw_idx),
-                                       HSW_PWR_WELL_CTL_STATE(pw_idx),
-                                       1));
-}
-
-static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
-                                    const struct i915_power_well_regs *regs,
-                                    int pw_idx)
-{
-       u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
-       u32 ret;
-
-       ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
-       ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
-       if (regs->kvmr.reg)
-               ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
-       ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
-
-       return ret;
-}
-
-static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
-                                           struct i915_power_well *power_well)
-{
-       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
-       int pw_idx = power_well->desc->hsw.idx;
-       bool disabled;
-       u32 reqs;
-
-       /*
-        * Bspec doesn't require waiting for PWs to get disabled, but still do
-        * this for paranoia. The known cases where a PW will be forced on:
-        * - a KVMR request on any power well via the KVMR request register
-        * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
-        *   DEBUG request registers
-        * Skip the wait in case any of the request bits are set and print a
-        * diagnostic message.
-        */
-       wait_for((disabled = !(I915_READ(regs->driver) &
-                              HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
-                (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
-       if (disabled)
-               return;
-
-       DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
-                     power_well->desc->name,
-                     !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
-}
-
-static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
-                                          enum skl_power_gate pg)
-{
-       /* Timeout 5us for PG#0, for other PGs 1us */
-       WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
-                                       SKL_FUSE_PG_DIST_STATUS(pg),
-                                       SKL_FUSE_PG_DIST_STATUS(pg), 1));
-}
-
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
-                                 struct i915_power_well *power_well)
-{
-       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
-       int pw_idx = power_well->desc->hsw.idx;
-       bool wait_fuses = power_well->desc->hsw.has_fuses;
-       enum skl_power_gate uninitialized_var(pg);
-       u32 val;
-
-       if (wait_fuses) {
-               pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
-                                                SKL_PW_CTL_IDX_TO_PG(pw_idx);
-               /*
-                * For PW1 we have to wait both for the PW0/PG0 fuse state
-                * before enabling the power well and PW1/PG1's own fuse
-                * state after the enabling. For all other power wells with
-                * fuses we only have to wait for that PW/PG's fuse state
-                * after the enabling.
-                */
-               if (pg == SKL_PG1)
-                       gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
-       }
-
-       val = I915_READ(regs->driver);
-       I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
-       hsw_wait_for_power_well_enable(dev_priv, power_well);
-
-       /* Display WA #1178: cnl */
-       if (IS_CANNONLAKE(dev_priv) &&
-           pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
-           pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
-               val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
-               val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
-               I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
-       }
-
-       if (wait_fuses)
-               gen9_wait_for_power_well_fuses(dev_priv, pg);
-
-       hsw_power_well_post_enable(dev_priv,
-                                  power_well->desc->hsw.irq_pipe_mask,
-                                  power_well->desc->hsw.has_vga);
-}
-
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
-       int pw_idx = power_well->desc->hsw.idx;
-       u32 val;
-
-       hsw_power_well_pre_disable(dev_priv,
-                                  power_well->desc->hsw.irq_pipe_mask);
-
-       val = I915_READ(regs->driver);
-       I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
-       hsw_wait_for_power_well_disable(dev_priv, power_well);
-}
-
-#define ICL_AUX_PW_TO_PORT(pw_idx)     ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
-
-static void
-icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
-                                   struct i915_power_well *power_well)
-{
-       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
-       int pw_idx = power_well->desc->hsw.idx;
-       enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
-       u32 val;
-
-       val = I915_READ(regs->driver);
-       I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
-
-       val = I915_READ(ICL_PORT_CL_DW12(port));
-       I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
-
-       hsw_wait_for_power_well_enable(dev_priv, power_well);
-
-       /* Display WA #1178: icl */
-       if (IS_ICELAKE(dev_priv) &&
-           pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
-           !intel_bios_is_port_edp(dev_priv, port)) {
-               val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
-               val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
-               I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
-       }
-}
-
-static void
-icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
-                                    struct i915_power_well *power_well)
-{
-       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
-       int pw_idx = power_well->desc->hsw.idx;
-       enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
-       u32 val;
-
-       val = I915_READ(ICL_PORT_CL_DW12(port));
-       I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
-
-       val = I915_READ(regs->driver);
-       I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
-
-       hsw_wait_for_power_well_disable(dev_priv, power_well);
-}
-
-#define ICL_AUX_PW_TO_CH(pw_idx)       \
-       ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
-
-static void
-icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
-                                struct i915_power_well *power_well)
-{
-       enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
-       u32 val;
-
-       val = I915_READ(DP_AUX_CH_CTL(aux_ch));
-       val &= ~DP_AUX_CH_CTL_TBT_IO;
-       if (power_well->desc->hsw.is_tc_tbt)
-               val |= DP_AUX_CH_CTL_TBT_IO;
-       I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
-
-       hsw_power_well_enable(dev_priv, power_well);
-}
-
-/*
- * We should only use the power well if we explicitly asked the hardware to
- * enable it, so check if it's enabled and also check if we've requested it to
- * be enabled.
- */
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
-       enum i915_power_well_id id = power_well->desc->id;
-       int pw_idx = power_well->desc->hsw.idx;
-       u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
-                  HSW_PWR_WELL_CTL_STATE(pw_idx);
-       u32 val;
-
-       val = I915_READ(regs->driver);
-
-       /*
-        * On GEN9 big core due to a DMC bug the driver's request bits for PW1
-        * and the MISC_IO PW will be not restored, so check instead for the
-        * BIOS's own request bits, which are forced-on for these power wells
-        * when exiting DC5/6.
-        */
-       if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
-           (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
-               val |= I915_READ(regs->bios);
-
-       return (val & mask) == mask;
-}
-
-static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
-{
-       WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
-                 "DC9 already programmed to be enabled.\n");
-       WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
-                 "DC5 still not disabled to enable DC9.\n");
-       WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
-                 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
-                 "Power well 2 on.\n");
-       WARN_ONCE(intel_irqs_enabled(dev_priv),
-                 "Interrupts not disabled yet.\n");
-
-        /*
-         * TODO: check for the following to verify the conditions to enter DC9
-         * state are satisfied:
-         * 1] Check relevant display engine registers to verify if mode set
-         * disable sequence was followed.
-         * 2] Check if display uninitialize sequence is initialized.
-         */
-}
-
-static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
-{
-       WARN_ONCE(intel_irqs_enabled(dev_priv),
-                 "Interrupts not disabled yet.\n");
-       WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
-                 "DC5 still not disabled.\n");
-
-        /*
-         * TODO: check for the following to verify DC9 state was indeed
-         * entered before programming to disable it:
-         * 1] Check relevant display engine registers to verify if mode
-         *  set disable sequence was followed.
-         * 2] Check if display uninitialize sequence is initialized.
-         */
-}
-
-static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
-                               u32 state)
-{
-       int rewrites = 0;
-       int rereads = 0;
-       u32 v;
-
-       I915_WRITE(DC_STATE_EN, state);
-
-       /* It has been observed that disabling the dc6 state sometimes
-        * doesn't stick and dmc keeps returning old value. Make sure
-        * the write really sticks enough times and also force rewrite until
-        * we are confident that state is exactly what we want.
-        */
-       do  {
-               v = I915_READ(DC_STATE_EN);
-
-               if (v != state) {
-                       I915_WRITE(DC_STATE_EN, state);
-                       rewrites++;
-                       rereads = 0;
-               } else if (rereads++ > 5) {
-                       break;
-               }
-
-       } while (rewrites < 100);
-
-       if (v != state)
-               DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
-                         state, v);
-
-       /* Most of the times we need one retry, avoid spam */
-       if (rewrites > 1)
-               DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
-                             state, rewrites);
-}
-
-static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
-{
-       u32 mask;
-
-       mask = DC_STATE_EN_UPTO_DC5;
-       if (INTEL_GEN(dev_priv) >= 11)
-               mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
-       else if (IS_GEN9_LP(dev_priv))
-               mask |= DC_STATE_EN_DC9;
-       else
-               mask |= DC_STATE_EN_UPTO_DC6;
-
-       return mask;
-}
-
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
-
-       DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
-                     dev_priv->csr.dc_state, val);
-       dev_priv->csr.dc_state = val;
-}
-
-/**
- * gen9_set_dc_state - set target display C power state
- * @dev_priv: i915 device instance
- * @state: target DC power state
- * - DC_STATE_DISABLE
- * - DC_STATE_EN_UPTO_DC5
- * - DC_STATE_EN_UPTO_DC6
- * - DC_STATE_EN_DC9
- *
- * Signal to DMC firmware/HW the target DC power state passed in @state.
- * DMC/HW can turn off individual display clocks and power rails when entering
- * a deeper DC power state (higher in number) and turns these back when exiting
- * that state to a shallower power state (lower in number). The HW will decide
- * when to actually enter a given state on an on-demand basis, for instance
- * depending on the active state of display pipes. The state of display
- * registers backed by affected power rails are saved/restored as needed.
- *
- * Based on the above enabling a deeper DC power state is asynchronous wrt.
- * enabling it. Disabling a deeper power state is synchronous: for instance
- * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
- * back on and register state is restored. This is guaranteed by the MMIO write
- * to DC_STATE_EN blocking until the state is restored.
- */
-static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
-{
-       u32 val;
-       u32 mask;
-
-       if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
-               state &= dev_priv->csr.allowed_dc_mask;
-
-       val = I915_READ(DC_STATE_EN);
-       mask = gen9_dc_mask(dev_priv);
-       DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
-                     val & mask, state);
-
-       /* Check if DMC is ignoring our DC state requests */
-       if ((val & mask) != dev_priv->csr.dc_state)
-               DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
-                         dev_priv->csr.dc_state, val & mask);
-
-       val &= ~mask;
-       val |= state;
-
-       gen9_write_dc_state(dev_priv, val);
-
-       dev_priv->csr.dc_state = val & mask;
-}
-
-void bxt_enable_dc9(struct drm_i915_private *dev_priv)
-{
-       assert_can_enable_dc9(dev_priv);
-
-       DRM_DEBUG_KMS("Enabling DC9\n");
-       /*
-        * Power sequencer reset is not needed on
-        * platforms with South Display Engine on PCH,
-        * because PPS registers are always on.
-        */
-       if (!HAS_PCH_SPLIT(dev_priv))
-               intel_power_sequencer_reset(dev_priv);
-       gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
-}
-
-void bxt_disable_dc9(struct drm_i915_private *dev_priv)
-{
-       assert_can_disable_dc9(dev_priv);
-
-       DRM_DEBUG_KMS("Disabling DC9\n");
-
-       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
-       intel_pps_unlock_regs_wa(dev_priv);
-}
-
-static void assert_csr_loaded(struct drm_i915_private *dev_priv)
-{
-       WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
-                 "CSR program storage start is NULL\n");
-       WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
-       WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
-}
-
-static struct i915_power_well *
-lookup_power_well(struct drm_i915_private *dev_priv,
-                 enum i915_power_well_id power_well_id)
-{
-       struct i915_power_well *power_well;
-
-       for_each_power_well(dev_priv, power_well)
-               if (power_well->desc->id == power_well_id)
-                       return power_well;
-
-       /*
-        * It's not feasible to add error checking code to the callers since
-        * this condition really shouldn't happen and it doesn't even make sense
-        * to abort things like display initialization sequences. Just return
-        * the first power well and hope the WARN gets reported so we can fix
-        * our driver.
-        */
-       WARN(1, "Power well %d not defined for this platform\n", power_well_id);
-       return &dev_priv->power_domains.power_wells[0];
-}
-
-static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
-{
-       bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
-                                       SKL_DISP_PW_2);
-
-       WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
-
-       WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
-                 "DC5 already programmed to be enabled.\n");
-       assert_rpm_wakelock_held(&dev_priv->runtime_pm);
-
-       assert_csr_loaded(dev_priv);
-}
-
-void gen9_enable_dc5(struct drm_i915_private *dev_priv)
-{
-       assert_can_enable_dc5(dev_priv);
-
-       DRM_DEBUG_KMS("Enabling DC5\n");
-
-       /* Wa Display #1183: skl,kbl,cfl */
-       if (IS_GEN9_BC(dev_priv))
-               I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
-                          SKL_SELECT_ALTERNATE_DC_EXIT);
-
-       gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
-}
-
-static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
-{
-       WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
-                 "Backlight is not disabled.\n");
-       WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
-                 "DC6 already programmed to be enabled.\n");
-
-       assert_csr_loaded(dev_priv);
-}
-
-void skl_enable_dc6(struct drm_i915_private *dev_priv)
-{
-       assert_can_enable_dc6(dev_priv);
-
-       DRM_DEBUG_KMS("Enabling DC6\n");
-
-       /* Wa Display #1183: skl,kbl,cfl */
-       if (IS_GEN9_BC(dev_priv))
-               I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
-                          SKL_SELECT_ALTERNATE_DC_EXIT);
-
-       gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
-}
-
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
-       int pw_idx = power_well->desc->hsw.idx;
-       u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
-       u32 bios_req = I915_READ(regs->bios);
-
-       /* Take over the request bit if set by BIOS. */
-       if (bios_req & mask) {
-               u32 drv_req = I915_READ(regs->driver);
-
-               if (!(drv_req & mask))
-                       I915_WRITE(regs->driver, drv_req | mask);
-               I915_WRITE(regs->bios, bios_req & ~mask);
-       }
-}
-
-static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-       bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
-}
-
-static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
-                                           struct i915_power_well *power_well)
-{
-       bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
-}
-
-static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
-                                           struct i915_power_well *power_well)
-{
-       return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
-}
-
-static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_well *power_well;
-
-       power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
-       if (power_well->count > 0)
-               bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
-
-       power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
-       if (power_well->count > 0)
-               bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
-
-       if (IS_GEMINILAKE(dev_priv)) {
-               power_well = lookup_power_well(dev_priv,
-                                              GLK_DISP_PW_DPIO_CMN_C);
-               if (power_well->count > 0)
-                       bxt_ddi_phy_verify_state(dev_priv,
-                                                power_well->desc->bxt.phy);
-       }
-}
-
-static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-       return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
-}
-
-static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
-{
-       u32 tmp = I915_READ(DBUF_CTL);
-
-       WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
-            (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
-            "Unexpected DBuf power power state (0x%08x)\n", tmp);
-}
-
-static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
-                                         struct i915_power_well *power_well)
-{
-       struct intel_cdclk_state cdclk_state = {};
-
-       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
-       dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
-       /* Can't read out voltage_level so can't use intel_cdclk_changed() */
-       WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
-
-       gen9_assert_dbuf_enabled(dev_priv);
-
-       if (IS_GEN9_LP(dev_priv))
-               bxt_verify_ddi_phy_power_wells(dev_priv);
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               /*
-                * DMC retains HW context only for port A, the other combo
-                * PHY's HW context for port B is lost after DC transitions,
-                * so we need to restore it manually.
-                */
-               intel_combo_phy_init(dev_priv);
-}
-
-static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-       if (!dev_priv->csr.dmc_payload)
-               return;
-
-       if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
-               skl_enable_dc6(dev_priv);
-       else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
-               gen9_enable_dc5(dev_priv);
-}
-
-static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
-                                        struct i915_power_well *power_well)
-{
-}
-
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-}
-
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
-                                            struct i915_power_well *power_well)
-{
-       return true;
-}
-
-static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
-                                        struct i915_power_well *power_well)
-{
-       if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
-               i830_enable_pipe(dev_priv, PIPE_A);
-       if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
-               i830_enable_pipe(dev_priv, PIPE_B);
-}
-
-static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
-                                         struct i915_power_well *power_well)
-{
-       i830_disable_pipe(dev_priv, PIPE_B);
-       i830_disable_pipe(dev_priv, PIPE_A);
-}
-
-static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
-                                         struct i915_power_well *power_well)
-{
-       return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
-               I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
-}
-
-static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
-                                         struct i915_power_well *power_well)
-{
-       if (power_well->count > 0)
-               i830_pipes_power_well_enable(dev_priv, power_well);
-       else
-               i830_pipes_power_well_disable(dev_priv, power_well);
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
-                              struct i915_power_well *power_well, bool enable)
-{
-       int pw_idx = power_well->desc->vlv.idx;
-       u32 mask;
-       u32 state;
-       u32 ctrl;
-
-       mask = PUNIT_PWRGT_MASK(pw_idx);
-       state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
-                        PUNIT_PWRGT_PWR_GATE(pw_idx);
-
-       vlv_punit_get(dev_priv);
-
-#define COND \
-       ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
-       if (COND)
-               goto out;
-
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
-       ctrl &= ~mask;
-       ctrl |= state;
-       vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
-
-       if (wait_for(COND, 100))
-               DRM_ERROR("timeout setting power well state %08x (%08x)\n",
-                         state,
-                         vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
-
-#undef COND
-
-out:
-       vlv_punit_put(dev_priv);
-}
-
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
-                                 struct i915_power_well *power_well)
-{
-       vlv_set_power_well(dev_priv, power_well, true);
-}
-
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       int pw_idx = power_well->desc->vlv.idx;
-       bool enabled = false;
-       u32 mask;
-       u32 state;
-       u32 ctrl;
-
-       mask = PUNIT_PWRGT_MASK(pw_idx);
-       ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
-
-       vlv_punit_get(dev_priv);
-
-       state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
-       /*
-        * We only ever set the power-on and power-gate states, anything
-        * else is unexpected.
-        */
-       WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
-               state != PUNIT_PWRGT_PWR_GATE(pw_idx));
-       if (state == ctrl)
-               enabled = true;
-
-       /*
-        * A transient state at this point would mean some unexpected party
-        * is poking at the power controls too.
-        */
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
-       WARN_ON(ctrl != state);
-
-       vlv_punit_put(dev_priv);
-
-       return enabled;
-}
-
-static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       /*
-        * On driver load, a pipe may be active and driving a DSI display.
-        * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
-        * (and never recovering) in this case. intel_dsi_post_disable() will
-        * clear it when we turn off the display.
-        */
-       val = I915_READ(DSPCLK_GATE_D);
-       val &= DPOUNIT_CLOCK_GATE_DISABLE;
-       val |= VRHUNIT_CLOCK_GATE_DISABLE;
-       I915_WRITE(DSPCLK_GATE_D, val);
-
-       /*
-        * Disable trickle feed and enable pnd deadline calculation
-        */
-       I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
-       I915_WRITE(CBR1_VLV, 0);
-
-       WARN_ON(dev_priv->rawclk_freq == 0);
-
-       I915_WRITE(RAWCLK_FREQ_VLV,
-                  DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
-}
-
-static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
-{
-       struct intel_encoder *encoder;
-       enum pipe pipe;
-
-       /*
-        * Enable the CRI clock source so we can get at the
-        * display and the reference clock for VGA
-        * hotplug / manual detection. Supposedly DSI also
-        * needs the ref clock up and running.
-        *
-        * CHV DPLL B/C have some issues if VGA mode is enabled.
-        */
-       for_each_pipe(dev_priv, pipe) {
-               u32 val = I915_READ(DPLL(pipe));
-
-               val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
-               if (pipe != PIPE_A)
-                       val |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
-               I915_WRITE(DPLL(pipe), val);
-       }
-
-       vlv_init_display_clock_gating(dev_priv);
-
-       spin_lock_irq(&dev_priv->irq_lock);
-       valleyview_enable_display_irqs(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       /*
-        * During driver initialization/resume we can avoid restoring the
-        * part of the HW/SW state that will be inited anyway explicitly.
-        */
-       if (dev_priv->power_domains.initializing)
-               return;
-
-       intel_hpd_init(dev_priv);
-
-       /* Re-enable the ADPA, if we have one */
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               if (encoder->type == INTEL_OUTPUT_ANALOG)
-                       intel_crt_reset(&encoder->base);
-       }
-
-       i915_redisable_vga_power_on(dev_priv);
-
-       intel_pps_unlock_regs_wa(dev_priv);
-}
-
-static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
-{
-       spin_lock_irq(&dev_priv->irq_lock);
-       valleyview_disable_display_irqs(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       /* make sure we're done processing display irqs */
-       synchronize_irq(dev_priv->drm.irq);
-
-       intel_power_sequencer_reset(dev_priv);
-
-       /* Prevent us from re-enabling polling on accident in late suspend */
-       if (!dev_priv->drm.dev->power.is_suspended)
-               intel_hpd_poll_init(dev_priv);
-}
-
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
-                                         struct i915_power_well *power_well)
-{
-       vlv_set_power_well(dev_priv, power_well, true);
-
-       vlv_display_power_well_init(dev_priv);
-}
-
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-       vlv_display_power_well_deinit(dev_priv);
-
-       vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-       /* since ref/cri clock was enabled */
-       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-
-       vlv_set_power_well(dev_priv, power_well, true);
-
-       /*
-        * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
-        *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
-        *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
-        *   b. The other bits such as sfr settings / modesel may all
-        *      be set to 0.
-        *
-        * This should only be done on init and resume from S3 with
-        * both PLLs disabled, or we risk losing DPIO and PLL
-        * synchronization.
-        */
-       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
-                                           struct i915_power_well *power_well)
-{
-       enum pipe pipe;
-
-       for_each_pipe(dev_priv, pipe)
-               assert_pll_disabled(dev_priv, pipe);
-
-       /* Assert common reset */
-       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
-
-       vlv_set_power_well(dev_priv, power_well, false);
-}
-
-#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
-
-#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
-
-static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_well *cmn_bc =
-               lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
-       struct i915_power_well *cmn_d =
-               lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
-       u32 phy_control = dev_priv->chv_phy_control;
-       u32 phy_status = 0;
-       u32 phy_status_mask = 0xffffffff;
-
-       /*
-        * The BIOS can leave the PHY is some weird state
-        * where it doesn't fully power down some parts.
-        * Disable the asserts until the PHY has been fully
-        * reset (ie. the power well has been disabled at
-        * least once).
-        */
-       if (!dev_priv->chv_phy_assert[DPIO_PHY0])
-               phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
-                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
-                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
-                                    PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
-                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
-                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
-
-       if (!dev_priv->chv_phy_assert[DPIO_PHY1])
-               phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
-                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
-                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
-
-       if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
-               phy_status |= PHY_POWERGOOD(DPIO_PHY0);
-
-               /* this assumes override is only used to enable lanes */
-               if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
-                       phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
-
-               if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
-                       phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
-
-               /* CL1 is on whenever anything is on in either channel */
-               if (BITS_SET(phy_control,
-                            PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
-                            PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
-                       phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
-
-               /*
-                * The DPLLB check accounts for the pipe B + port A usage
-                * with CL2 powered up but all the lanes in the second channel
-                * powered down.
-                */
-               if (BITS_SET(phy_control,
-                            PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
-                   (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
-                       phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
-
-               if (BITS_SET(phy_control,
-                            PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
-                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
-               if (BITS_SET(phy_control,
-                            PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
-                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
-
-               if (BITS_SET(phy_control,
-                            PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
-                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
-               if (BITS_SET(phy_control,
-                            PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
-                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
-       }
-
-       if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
-               phy_status |= PHY_POWERGOOD(DPIO_PHY1);
-
-               /* this assumes override is only used to enable lanes */
-               if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
-                       phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
-
-               if (BITS_SET(phy_control,
-                            PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
-                       phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
-
-               if (BITS_SET(phy_control,
-                            PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
-                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
-               if (BITS_SET(phy_control,
-                            PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
-                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
-       }
-
-       phy_status &= phy_status_mask;
-
-       /*
-        * The PHY may be busy with some initial calibration and whatnot,
-        * so the power state can take a while to actually change.
-        */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   DISPLAY_PHY_STATUS,
-                                   phy_status_mask,
-                                   phy_status,
-                                   10))
-               DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
-                         I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
-                          phy_status, dev_priv->chv_phy_control);
-}
-
-#undef BITS_SET
-
-static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-       enum dpio_phy phy;
-       enum pipe pipe;
-       u32 tmp;
-
-       WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
-                    power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
-
-       if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
-               pipe = PIPE_A;
-               phy = DPIO_PHY0;
-       } else {
-               pipe = PIPE_C;
-               phy = DPIO_PHY1;
-       }
-
-       /* since ref/cri clock was enabled */
-       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-       vlv_set_power_well(dev_priv, power_well, true);
-
-       /* Poll for phypwrgood signal */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   DISPLAY_PHY_STATUS,
-                                   PHY_POWERGOOD(phy),
-                                   PHY_POWERGOOD(phy),
-                                   1))
-               DRM_ERROR("Display PHY %d is not power up\n", phy);
-
-       vlv_dpio_get(dev_priv);
-
-       /* Enable dynamic power down */
-       tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
-       tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
-               DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
-       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
-
-       if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
-               tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
-               tmp |= DPIO_DYNPWRDOWNEN_CH1;
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
-       } else {
-               /*
-                * Force the non-existing CL2 off. BXT does this
-                * too, so maybe it saves some power even though
-                * CL2 doesn't exist?
-                */
-               tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
-               tmp |= DPIO_CL2_LDOFUSE_PWRENB;
-               vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
-       }
-
-       vlv_dpio_put(dev_priv);
-
-       dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
-       I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
-       DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
-                     phy, dev_priv->chv_phy_control);
-
-       assert_chv_phy_status(dev_priv);
-}
-
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
-                                           struct i915_power_well *power_well)
-{
-       enum dpio_phy phy;
-
-       WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
-                    power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
-
-       if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
-               phy = DPIO_PHY0;
-               assert_pll_disabled(dev_priv, PIPE_A);
-               assert_pll_disabled(dev_priv, PIPE_B);
-       } else {
-               phy = DPIO_PHY1;
-               assert_pll_disabled(dev_priv, PIPE_C);
-       }
-
-       dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
-       I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
-       vlv_set_power_well(dev_priv, power_well, false);
-
-       DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
-                     phy, dev_priv->chv_phy_control);
-
-       /* PHY is fully reset now, so we can enable the PHY state asserts */
-       dev_priv->chv_phy_assert[phy] = true;
-
-       assert_chv_phy_status(dev_priv);
-}
-
-static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
-                                    enum dpio_channel ch, bool override, unsigned int mask)
-{
-       enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
-       u32 reg, val, expected, actual;
-
-       /*
-        * The BIOS can leave the PHY is some weird state
-        * where it doesn't fully power down some parts.
-        * Disable the asserts until the PHY has been fully
-        * reset (ie. the power well has been disabled at
-        * least once).
-        */
-       if (!dev_priv->chv_phy_assert[phy])
-               return;
-
-       if (ch == DPIO_CH0)
-               reg = _CHV_CMN_DW0_CH0;
-       else
-               reg = _CHV_CMN_DW6_CH1;
-
-       vlv_dpio_get(dev_priv);
-       val = vlv_dpio_read(dev_priv, pipe, reg);
-       vlv_dpio_put(dev_priv);
-
-       /*
-        * This assumes !override is only used when the port is disabled.
-        * All lanes should power down even without the override when
-        * the port is disabled.
-        */
-       if (!override || mask == 0xf) {
-               expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
-               /*
-                * If CH1 common lane is not active anymore
-                * (eg. for pipe B DPLL) the entire channel will
-                * shut down, which causes the common lane registers
-                * to read as 0. That means we can't actually check
-                * the lane power down status bits, but as the entire
-                * register reads as 0 it's a good indication that the
-                * channel is indeed entirely powered down.
-                */
-               if (ch == DPIO_CH1 && val == 0)
-                       expected = 0;
-       } else if (mask != 0x0) {
-               expected = DPIO_ANYDL_POWERDOWN;
-       } else {
-               expected = 0;
-       }
-
-       if (ch == DPIO_CH0)
-               actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
-       else
-               actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
-       actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
-
-       WARN(actual != expected,
-            "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
-            !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
-            !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
-            reg, val);
-}
-
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
-                         enum dpio_channel ch, bool override)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       bool was_override;
-
-       mutex_lock(&power_domains->lock);
-
-       was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
-       if (override == was_override)
-               goto out;
-
-       if (override)
-               dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-       else
-               dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
-       I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
-       DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
-                     phy, ch, dev_priv->chv_phy_control);
-
-       assert_chv_phy_status(dev_priv);
-
-out:
-       mutex_unlock(&power_domains->lock);
-
-       return was_override;
-}
-
-void chv_phy_powergate_lanes(struct intel_encoder *encoder,
-                            bool override, unsigned int mask)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
-       enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
-
-       mutex_lock(&power_domains->lock);
-
-       dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
-       dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
-
-       if (override)
-               dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-       else
-               dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
-       I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
-       DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
-                     phy, ch, mask, dev_priv->chv_phy_control);
-
-       assert_chv_phy_status(dev_priv);
-
-       assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
-
-       mutex_unlock(&power_domains->lock);
-}
-
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
-                                       struct i915_power_well *power_well)
-{
-       enum pipe pipe = PIPE_A;
-       bool enabled;
-       u32 state, ctrl;
-
-       vlv_punit_get(dev_priv);
-
-       state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
-       /*
-        * We only ever set the power-on and power-gate states, anything
-        * else is unexpected.
-        */
-       WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
-       enabled = state == DP_SSS_PWR_ON(pipe);
-
-       /*
-        * A transient state at this point would mean some unexpected party
-        * is poking at the power controls too.
-        */
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
-       WARN_ON(ctrl << 16 != state);
-
-       vlv_punit_put(dev_priv);
-
-       return enabled;
-}
-
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
-                                   struct i915_power_well *power_well,
-                                   bool enable)
-{
-       enum pipe pipe = PIPE_A;
-       u32 state;
-       u32 ctrl;
-
-       state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
-
-       vlv_punit_get(dev_priv);
-
-#define COND \
-       ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
-
-       if (COND)
-               goto out;
-
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
-       ctrl &= ~DP_SSC_MASK(pipe);
-       ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
-       vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
-
-       if (wait_for(COND, 100))
-               DRM_ERROR("timeout setting power well state %08x (%08x)\n",
-                         state,
-                         vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
-
-#undef COND
-
-out:
-       vlv_punit_put(dev_priv);
-}
-
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
-                                      struct i915_power_well *power_well)
-{
-       chv_set_pipe_power_well(dev_priv, power_well, true);
-
-       vlv_display_power_well_init(dev_priv);
-}
-
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
-                                       struct i915_power_well *power_well)
-{
-       vlv_display_power_well_deinit(dev_priv);
-
-       chv_set_pipe_power_well(dev_priv, power_well, false);
-}
-
-static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
-{
-       return power_domains->async_put_domains[0] |
-              power_domains->async_put_domains[1];
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-
-static bool
-assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
-{
-       return !WARN_ON(power_domains->async_put_domains[0] &
-                       power_domains->async_put_domains[1]);
-}
-
-static bool
-__async_put_domains_state_ok(struct i915_power_domains *power_domains)
-{
-       enum intel_display_power_domain domain;
-       bool err = false;
-
-       err |= !assert_async_put_domain_masks_disjoint(power_domains);
-       err |= WARN_ON(!!power_domains->async_put_wakeref !=
-                      !!__async_put_domains_mask(power_domains));
-
-       for_each_power_domain(domain, __async_put_domains_mask(power_domains))
-               err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
-
-       return !err;
-}
-
-static void print_power_domains(struct i915_power_domains *power_domains,
-                               const char *prefix, u64 mask)
-{
-       enum intel_display_power_domain domain;
-
-       DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
-       for_each_power_domain(domain, mask)
-               DRM_DEBUG_DRIVER("%s use_count %d\n",
-                                intel_display_power_domain_str(domain),
-                                power_domains->domain_use_count[domain]);
-}
-
-static void
-print_async_put_domains_state(struct i915_power_domains *power_domains)
-{
-       DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
-                        power_domains->async_put_wakeref);
-
-       print_power_domains(power_domains, "async_put_domains[0]",
-                           power_domains->async_put_domains[0]);
-       print_power_domains(power_domains, "async_put_domains[1]",
-                           power_domains->async_put_domains[1]);
-}
-
-static void
-verify_async_put_domains_state(struct i915_power_domains *power_domains)
-{
-       if (!__async_put_domains_state_ok(power_domains))
-               print_async_put_domains_state(power_domains);
-}
-
-#else
-
-static void
-assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
-{
-}
-
-static void
-verify_async_put_domains_state(struct i915_power_domains *power_domains)
-{
-}
-
-#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
-
-static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
-{
-       assert_async_put_domain_masks_disjoint(power_domains);
-
-       return __async_put_domains_mask(power_domains);
-}
-
-static void
-async_put_domains_clear_domain(struct i915_power_domains *power_domains,
-                              enum intel_display_power_domain domain)
-{
-       assert_async_put_domain_masks_disjoint(power_domains);
-
-       power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
-       power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
-}
-
-static bool
-intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
-                                      enum intel_display_power_domain domain)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       bool ret = false;
-
-       if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
-               goto out_verify;
-
-       async_put_domains_clear_domain(power_domains, domain);
-
-       ret = true;
-
-       if (async_put_domains_mask(power_domains))
-               goto out_verify;
-
-       cancel_delayed_work(&power_domains->async_put_work);
-       intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
-                                fetch_and_zero(&power_domains->async_put_wakeref));
-out_verify:
-       verify_async_put_domains_state(power_domains);
-
-       return ret;
-}
-
-static void
-__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
-                                enum intel_display_power_domain domain)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *power_well;
-
-       if (intel_display_power_grab_async_put_ref(dev_priv, domain))
-               return;
-
-       for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
-               intel_power_well_get(dev_priv, power_well);
-
-       power_domains->domain_use_count[domain]++;
-}
-
-/**
- * intel_display_power_get - grab a power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function grabs a power domain reference for @domain and ensures that the
- * power domain and all its parents are powered up. Therefore users should only
- * grab a reference to the innermost power domain they need.
- *
- * Any power domain reference obtained by this function must have a symmetric
- * call to intel_display_power_put() to release the reference again.
- */
-intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
-                                       enum intel_display_power_domain domain)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
-       mutex_lock(&power_domains->lock);
-       __intel_display_power_get_domain(dev_priv, domain);
-       mutex_unlock(&power_domains->lock);
-
-       return wakeref;
-}
-
-/**
- * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function grabs a power domain reference for @domain and ensures that the
- * power domain and all its parents are powered up. Therefore users should only
- * grab a reference to the innermost power domain they need.
- *
- * Any power domain reference obtained by this function must have a symmetric
- * call to intel_display_power_put() to release the reference again.
- */
-intel_wakeref_t
-intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
-                                  enum intel_display_power_domain domain)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       intel_wakeref_t wakeref;
-       bool is_enabled;
-
-       wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
-       if (!wakeref)
-               return false;
-
-       mutex_lock(&power_domains->lock);
-
-       if (__intel_display_power_is_enabled(dev_priv, domain)) {
-               __intel_display_power_get_domain(dev_priv, domain);
-               is_enabled = true;
-       } else {
-               is_enabled = false;
-       }
-
-       mutex_unlock(&power_domains->lock);
-
-       if (!is_enabled) {
-               intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-               wakeref = 0;
-       }
-
-       return wakeref;
-}
-
-static void
-__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
-                                enum intel_display_power_domain domain)
-{
-       struct i915_power_domains *power_domains;
-       struct i915_power_well *power_well;
-       const char *name = intel_display_power_domain_str(domain);
-
-       power_domains = &dev_priv->power_domains;
-
-       WARN(!power_domains->domain_use_count[domain],
-            "Use count on domain %s is already zero\n",
-            name);
-       WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
-            "Async disabling of domain %s is pending\n",
-            name);
-
-       power_domains->domain_use_count[domain]--;
-
-       for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
-               intel_power_well_put(dev_priv, power_well);
-}
-
-static void __intel_display_power_put(struct drm_i915_private *dev_priv,
-                                     enum intel_display_power_domain domain)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
-       mutex_lock(&power_domains->lock);
-       __intel_display_power_put_domain(dev_priv, domain);
-       mutex_unlock(&power_domains->lock);
-}
-
-/**
- * intel_display_power_put_unchecked - release an unchecked power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get() and might power down the corresponding hardware
- * block right away if this is the last reference.
- *
- * This function exists only for historical reasons and should be avoided in
- * new code, as the correctness of its use cannot be checked. Always use
- * intel_display_power_put() instead.
- */
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
-                                      enum intel_display_power_domain domain)
-{
-       __intel_display_power_put(dev_priv, domain);
-       intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
-}
-
-static void
-queue_async_put_domains_work(struct i915_power_domains *power_domains,
-                            intel_wakeref_t wakeref)
-{
-       WARN_ON(power_domains->async_put_wakeref);
-       power_domains->async_put_wakeref = wakeref;
-       WARN_ON(!queue_delayed_work(system_unbound_wq,
-                                   &power_domains->async_put_work,
-                                   msecs_to_jiffies(100)));
-}
-
-static void
-release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(power_domains, struct drm_i915_private,
-                            power_domains);
-       struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
-       enum intel_display_power_domain domain;
-       intel_wakeref_t wakeref;
-
-       /*
-        * The caller must hold already raw wakeref, upgrade that to a proper
-        * wakeref to make the state checker happy about the HW access during
-        * power well disabling.
-        */
-       assert_rpm_raw_wakeref_held(rpm);
-       wakeref = intel_runtime_pm_get(rpm);
-
-       for_each_power_domain(domain, mask) {
-               /* Clear before put, so put's sanity check is happy. */
-               async_put_domains_clear_domain(power_domains, domain);
-               __intel_display_power_put_domain(dev_priv, domain);
-       }
-
-       intel_runtime_pm_put(rpm, wakeref);
-}
-
-static void
-intel_display_power_put_async_work(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private,
-                            power_domains.async_put_work.work);
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
-       intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
-       intel_wakeref_t old_work_wakeref = 0;
-
-       mutex_lock(&power_domains->lock);
-
-       /*
-        * Bail out if all the domain refs pending to be released were grabbed
-        * by subsequent gets or a flush_work.
-        */
-       old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
-       if (!old_work_wakeref)
-               goto out_verify;
-
-       release_async_put_domains(power_domains,
-                                 power_domains->async_put_domains[0]);
-
-       /* Requeue the work if more domains were async put meanwhile. */
-       if (power_domains->async_put_domains[1]) {
-               power_domains->async_put_domains[0] =
-                       fetch_and_zero(&power_domains->async_put_domains[1]);
-               queue_async_put_domains_work(power_domains,
-                                            fetch_and_zero(&new_work_wakeref));
-       }
-
-out_verify:
-       verify_async_put_domains_state(power_domains);
-
-       mutex_unlock(&power_domains->lock);
-
-       if (old_work_wakeref)
-               intel_runtime_pm_put_raw(rpm, old_work_wakeref);
-       if (new_work_wakeref)
-               intel_runtime_pm_put_raw(rpm, new_work_wakeref);
-}
-
-/**
- * intel_display_power_put_async - release a power domain reference asynchronously
- * @i915: i915 device instance
- * @domain: power domain to reference
- * @wakeref: wakeref acquired for the reference that is being released
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get*() and schedules a work to power down the
- * corresponding hardware block if this is the last reference.
- */
-void __intel_display_power_put_async(struct drm_i915_private *i915,
-                                    enum intel_display_power_domain domain,
-                                    intel_wakeref_t wakeref)
-{
-       struct i915_power_domains *power_domains = &i915->power_domains;
-       struct intel_runtime_pm *rpm = &i915->runtime_pm;
-       intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
-
-       mutex_lock(&power_domains->lock);
-
-       if (power_domains->domain_use_count[domain] > 1) {
-               __intel_display_power_put_domain(i915, domain);
-
-               goto out_verify;
-       }
-
-       WARN_ON(power_domains->domain_use_count[domain] != 1);
-
-       /* Let a pending work requeue itself or queue a new one. */
-       if (power_domains->async_put_wakeref) {
-               power_domains->async_put_domains[1] |= BIT_ULL(domain);
-       } else {
-               power_domains->async_put_domains[0] |= BIT_ULL(domain);
-               queue_async_put_domains_work(power_domains,
-                                            fetch_and_zero(&work_wakeref));
-       }
-
-out_verify:
-       verify_async_put_domains_state(power_domains);
-
-       mutex_unlock(&power_domains->lock);
-
-       if (work_wakeref)
-               intel_runtime_pm_put_raw(rpm, work_wakeref);
-
-       intel_runtime_pm_put(rpm, wakeref);
-}
-
-/**
- * intel_display_power_flush_work - flushes the async display power disabling work
- * @i915: i915 device instance
- *
- * Flushes any pending work that was scheduled by a preceding
- * intel_display_power_put_async() call, completing the disabling of the
- * corresponding power domains.
- *
- * Note that the work handler function may still be running after this
- * function returns; to ensure that the work handler isn't running use
- * intel_display_power_flush_work_sync() instead.
- */
-void intel_display_power_flush_work(struct drm_i915_private *i915)
-{
-       struct i915_power_domains *power_domains = &i915->power_domains;
-       intel_wakeref_t work_wakeref;
-
-       mutex_lock(&power_domains->lock);
-
-       work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
-       if (!work_wakeref)
-               goto out_verify;
-
-       release_async_put_domains(power_domains,
-                                 async_put_domains_mask(power_domains));
-       cancel_delayed_work(&power_domains->async_put_work);
-
-out_verify:
-       verify_async_put_domains_state(power_domains);
-
-       mutex_unlock(&power_domains->lock);
-
-       if (work_wakeref)
-               intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
-}
-
-/**
- * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
- * @i915: i915 device instance
- *
- * Like intel_display_power_flush_work(), but also ensure that the work
- * handler function is not running any more when this function returns.
- */
-static void
-intel_display_power_flush_work_sync(struct drm_i915_private *i915)
-{
-       struct i915_power_domains *power_domains = &i915->power_domains;
-
-       intel_display_power_flush_work(i915);
-       cancel_delayed_work_sync(&power_domains->async_put_work);
-
-       verify_async_put_domains_state(power_domains);
-
-       WARN_ON(power_domains->async_put_wakeref);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-/**
- * intel_display_power_put - release a power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- * @wakeref: wakeref acquired for the reference that is being released
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get() and might power down the corresponding hardware
- * block right away if this is the last reference.
- */
-void intel_display_power_put(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain,
-                            intel_wakeref_t wakeref)
-{
-       __intel_display_power_put(dev_priv, domain);
-       intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-}
-#endif
-
-#define I830_PIPES_POWER_DOMAINS (             \
-       BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
-       BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
-       BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
-       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DISPLAY_POWER_DOMAINS (            \
-       BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |    \
-       BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
-       BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
-       BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
-       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
-       BIT_ULL(POWER_DOMAIN_VGA) |                     \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |           \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
-       BIT_ULL(POWER_DOMAIN_GMBUS) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_CMN_BC_POWER_DOMAINS (                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DISPLAY_POWER_DOMAINS (            \
-       BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |    \
-       BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
-       BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
-       BIT_ULL(POWER_DOMAIN_PIPE_C) |          \
-       BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
-       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
-       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |    \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
-       BIT_ULL(POWER_DOMAIN_VGA) |                     \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |           \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
-       BIT_ULL(POWER_DOMAIN_AUX_D) |           \
-       BIT_ULL(POWER_DOMAIN_GMBUS) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_BC_POWER_DOMAINS (                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_D_POWER_DOMAINS (         \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_AUX_D) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define HSW_DISPLAY_POWER_DOMAINS (                    \
-       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
-       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
-       BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
-       BIT_ULL(POWER_DOMAIN_VGA) |                             \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define BDW_DISPLAY_POWER_DOMAINS (                    \
-       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
-       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
-       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
-       BIT_ULL(POWER_DOMAIN_VGA) |                             \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (                \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
-       BIT_ULL(POWER_DOMAIN_VGA) |                             \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (         \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (             \
-       SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
-       BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
-       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
-       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (                \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
-       BIT_ULL(POWER_DOMAIN_VGA) |                             \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (             \
-       BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
-       BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
-       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
-       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
-       BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DPIO_CMN_A_POWER_DOMAINS (                 \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DPIO_CMN_BC_POWER_DOMAINS (                        \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (                \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
-       BIT_ULL(POWER_DOMAIN_VGA) |                             \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
-#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
-#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
-#define GLK_DPIO_CMN_A_POWER_DOMAINS (                 \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DPIO_CMN_B_POWER_DOMAINS (                 \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DPIO_CMN_C_POWER_DOMAINS (                 \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_A_POWER_DOMAINS (              \
-       BIT_ULL(POWER_DOMAIN_AUX_A) |           \
-       BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_B_POWER_DOMAINS (              \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_C_POWER_DOMAINS (              \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (             \
-       GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
-       BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
-       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
-       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
-       BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (                \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
-       BIT_ULL(POWER_DOMAIN_VGA) |                             \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_A_POWER_DOMAINS (              \
-       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_B_POWER_DOMAINS (              \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_C_POWER_DOMAINS (              \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_D_POWER_DOMAINS (              \
-       BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_F_POWER_DOMAINS (              \
-       BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (             \
-       CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
-       BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
-       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
-       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-/*
- * ICL PW_0/PG_0 domains (HW/DMC control):
- * - PCI
- * - clocks except port PLL
- * - central power except FBC
- * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
- * ICL PW_1/PG_1 domains (HW/DMC control):
- * - DBUF function
- * - PIPE_A and its planes, except VGA
- * - transcoder EDP + PSR
- * - transcoder DSI
- * - DDI_A
- * - FBC
- */
-#define ICL_PW_4_POWER_DOMAINS (                       \
-       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
-       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
-       BIT_ULL(POWER_DOMAIN_INIT))
-       /* VDSC/joining */
-#define ICL_PW_3_POWER_DOMAINS (                       \
-       ICL_PW_4_POWER_DOMAINS |                        \
-       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |        \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_E) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_TBT1) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_TBT2) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_TBT3) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_TBT4) |                \
-       BIT_ULL(POWER_DOMAIN_VGA) |                     \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-       /*
-        * - transcoder WD
-        * - KVMR (HW control)
-        */
-#define ICL_PW_2_POWER_DOMAINS (                       \
-       ICL_PW_3_POWER_DOMAINS |                        \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) |             \
-       BIT_ULL(POWER_DOMAIN_INIT))
-       /*
-        * - KVMR (HW control)
-        */
-#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (             \
-       ICL_PW_2_POWER_DOMAINS |                        \
-       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
-       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
-#define ICL_DDI_IO_A_POWER_DOMAINS (                   \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
-#define ICL_DDI_IO_B_POWER_DOMAINS (                   \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
-#define ICL_DDI_IO_C_POWER_DOMAINS (                   \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
-#define ICL_DDI_IO_D_POWER_DOMAINS (                   \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
-#define ICL_DDI_IO_E_POWER_DOMAINS (                   \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
-#define ICL_DDI_IO_F_POWER_DOMAINS (                   \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
-
-#define ICL_AUX_A_IO_POWER_DOMAINS (                   \
-       BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_A))
-#define ICL_AUX_B_IO_POWER_DOMAINS (                   \
-       BIT_ULL(POWER_DOMAIN_AUX_B))
-#define ICL_AUX_C_IO_POWER_DOMAINS (                   \
-       BIT_ULL(POWER_DOMAIN_AUX_C))
-#define ICL_AUX_D_IO_POWER_DOMAINS (                   \
-       BIT_ULL(POWER_DOMAIN_AUX_D))
-#define ICL_AUX_E_IO_POWER_DOMAINS (                   \
-       BIT_ULL(POWER_DOMAIN_AUX_E))
-#define ICL_AUX_F_IO_POWER_DOMAINS (                   \
-       BIT_ULL(POWER_DOMAIN_AUX_F))
-#define ICL_AUX_TBT1_IO_POWER_DOMAINS (                        \
-       BIT_ULL(POWER_DOMAIN_AUX_TBT1))
-#define ICL_AUX_TBT2_IO_POWER_DOMAINS (                        \
-       BIT_ULL(POWER_DOMAIN_AUX_TBT2))
-#define ICL_AUX_TBT3_IO_POWER_DOMAINS (                        \
-       BIT_ULL(POWER_DOMAIN_AUX_TBT3))
-#define ICL_AUX_TBT4_IO_POWER_DOMAINS (                        \
-       BIT_ULL(POWER_DOMAIN_AUX_TBT4))
-
-static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
-       .sync_hw = i9xx_power_well_sync_hw_noop,
-       .enable = i9xx_always_on_power_well_noop,
-       .disable = i9xx_always_on_power_well_noop,
-       .is_enabled = i9xx_always_on_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_pipe_power_well_ops = {
-       .sync_hw = i9xx_power_well_sync_hw_noop,
-       .enable = chv_pipe_power_well_enable,
-       .disable = chv_pipe_power_well_disable,
-       .is_enabled = chv_pipe_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
-       .sync_hw = i9xx_power_well_sync_hw_noop,
-       .enable = chv_dpio_cmn_power_well_enable,
-       .disable = chv_dpio_cmn_power_well_disable,
-       .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-};
-
-static const struct i915_power_well_ops i830_pipes_power_well_ops = {
-       .sync_hw = i830_pipes_power_well_sync_hw,
-       .enable = i830_pipes_power_well_enable,
-       .disable = i830_pipes_power_well_disable,
-       .is_enabled = i830_pipes_power_well_enabled,
-};
-
-static const struct i915_power_well_desc i830_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "pipes",
-               .domains = I830_PIPES_POWER_DOMAINS,
-               .ops = &i830_pipes_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-};
-
-static const struct i915_power_well_ops hsw_power_well_ops = {
-       .sync_hw = hsw_power_well_sync_hw,
-       .enable = hsw_power_well_enable,
-       .disable = hsw_power_well_disable,
-       .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
-       .sync_hw = i9xx_power_well_sync_hw_noop,
-       .enable = gen9_dc_off_power_well_enable,
-       .disable = gen9_dc_off_power_well_disable,
-       .is_enabled = gen9_dc_off_power_well_enabled,
-};
-
-static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
-       .sync_hw = i9xx_power_well_sync_hw_noop,
-       .enable = bxt_dpio_cmn_power_well_enable,
-       .disable = bxt_dpio_cmn_power_well_disable,
-       .is_enabled = bxt_dpio_cmn_power_well_enabled,
-};
-
-static const struct i915_power_well_regs hsw_power_well_regs = {
-       .bios   = HSW_PWR_WELL_CTL1,
-       .driver = HSW_PWR_WELL_CTL2,
-       .kvmr   = HSW_PWR_WELL_CTL3,
-       .debug  = HSW_PWR_WELL_CTL4,
-};
-
-static const struct i915_power_well_desc hsw_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "display",
-               .domains = HSW_DISPLAY_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = HSW_DISP_PW_GLOBAL,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
-                       .hsw.has_vga = true,
-               },
-       },
-};
-
-static const struct i915_power_well_desc bdw_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "display",
-               .domains = BDW_DISPLAY_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = HSW_DISP_PW_GLOBAL,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
-                       .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
-                       .hsw.has_vga = true,
-               },
-       },
-};
-
-static const struct i915_power_well_ops vlv_display_power_well_ops = {
-       .sync_hw = i9xx_power_well_sync_hw_noop,
-       .enable = vlv_display_power_well_enable,
-       .disable = vlv_display_power_well_disable,
-       .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
-       .sync_hw = i9xx_power_well_sync_hw_noop,
-       .enable = vlv_dpio_cmn_power_well_enable,
-       .disable = vlv_dpio_cmn_power_well_disable,
-       .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
-       .sync_hw = i9xx_power_well_sync_hw_noop,
-       .enable = vlv_power_well_enable,
-       .disable = vlv_power_well_disable,
-       .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_desc vlv_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "display",
-               .domains = VLV_DISPLAY_POWER_DOMAINS,
-               .ops = &vlv_display_power_well_ops,
-               .id = VLV_DISP_PW_DISP2D,
-               {
-                       .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
-               },
-       },
-       {
-               .name = "dpio-tx-b-01",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
-               },
-       },
-       {
-               .name = "dpio-tx-b-23",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
-               },
-       },
-       {
-               .name = "dpio-tx-c-01",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
-               },
-       },
-       {
-               .name = "dpio-tx-c-23",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
-               },
-       },
-       {
-               .name = "dpio-common",
-               .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
-               .ops = &vlv_dpio_cmn_power_well_ops,
-               .id = VLV_DISP_PW_DPIO_CMN_BC,
-               {
-                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
-               },
-       },
-};
-
-static const struct i915_power_well_desc chv_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "display",
-               /*
-                * Pipe A power well is the new disp2d well. Pipe B and C
-                * power wells don't actually exist. Pipe A power well is
-                * required for any pipe to work.
-                */
-               .domains = CHV_DISPLAY_POWER_DOMAINS,
-               .ops = &chv_pipe_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "dpio-common-bc",
-               .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
-               .ops = &chv_dpio_cmn_power_well_ops,
-               .id = VLV_DISP_PW_DPIO_CMN_BC,
-               {
-                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
-               },
-       },
-       {
-               .name = "dpio-common-d",
-               .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
-               .ops = &chv_dpio_cmn_power_well_ops,
-               .id = CHV_DISP_PW_DPIO_CMN_D,
-               {
-                       .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
-               },
-       },
-};
-
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
-                                        enum i915_power_well_id power_well_id)
-{
-       struct i915_power_well *power_well;
-       bool ret;
-
-       power_well = lookup_power_well(dev_priv, power_well_id);
-       ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
-
-       return ret;
-}
-
-static const struct i915_power_well_desc skl_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "power well 1",
-               /* Handled by the DMC firmware */
-               .always_on = true,
-               .domains = 0,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_1,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "MISC IO power well",
-               /* Handled by the DMC firmware */
-               .always_on = true,
-               .domains = 0,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_MISC_IO,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
-               },
-       },
-       {
-               .name = "DC off",
-               .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
-               .ops = &gen9_dc_off_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "power well 2",
-               .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_2,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
-                       .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
-                       .hsw.has_vga = true,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "DDI A/E IO power well",
-               .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
-               },
-       },
-       {
-               .name = "DDI B IO power well",
-               .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
-               },
-       },
-       {
-               .name = "DDI C IO power well",
-               .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
-               },
-       },
-       {
-               .name = "DDI D IO power well",
-               .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
-               },
-       },
-};
-
-static const struct i915_power_well_desc bxt_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "power well 1",
-               /* Handled by the DMC firmware */
-               .always_on = true,
-               .domains = 0,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_1,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "DC off",
-               .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
-               .ops = &gen9_dc_off_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "power well 2",
-               .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_2,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
-                       .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
-                       .hsw.has_vga = true,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "dpio-common-a",
-               .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
-               .ops = &bxt_dpio_cmn_power_well_ops,
-               .id = BXT_DISP_PW_DPIO_CMN_A,
-               {
-                       .bxt.phy = DPIO_PHY1,
-               },
-       },
-       {
-               .name = "dpio-common-bc",
-               .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
-               .ops = &bxt_dpio_cmn_power_well_ops,
-               .id = VLV_DISP_PW_DPIO_CMN_BC,
-               {
-                       .bxt.phy = DPIO_PHY0,
-               },
-       },
-};
-
-static const struct i915_power_well_desc glk_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "power well 1",
-               /* Handled by the DMC firmware */
-               .always_on = true,
-               .domains = 0,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_1,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "DC off",
-               .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
-               .ops = &gen9_dc_off_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "power well 2",
-               .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_2,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
-                       .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
-                       .hsw.has_vga = true,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "dpio-common-a",
-               .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
-               .ops = &bxt_dpio_cmn_power_well_ops,
-               .id = BXT_DISP_PW_DPIO_CMN_A,
-               {
-                       .bxt.phy = DPIO_PHY1,
-               },
-       },
-       {
-               .name = "dpio-common-b",
-               .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
-               .ops = &bxt_dpio_cmn_power_well_ops,
-               .id = VLV_DISP_PW_DPIO_CMN_BC,
-               {
-                       .bxt.phy = DPIO_PHY0,
-               },
-       },
-       {
-               .name = "dpio-common-c",
-               .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
-               .ops = &bxt_dpio_cmn_power_well_ops,
-               .id = GLK_DISP_PW_DPIO_CMN_C,
-               {
-                       .bxt.phy = DPIO_PHY2,
-               },
-       },
-       {
-               .name = "AUX A",
-               .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
-               },
-       },
-       {
-               .name = "AUX B",
-               .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
-               },
-       },
-       {
-               .name = "AUX C",
-               .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
-               },
-       },
-       {
-               .name = "DDI A IO power well",
-               .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
-               },
-       },
-       {
-               .name = "DDI B IO power well",
-               .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
-               },
-       },
-       {
-               .name = "DDI C IO power well",
-               .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
-               },
-       },
-};
-
-static const struct i915_power_well_desc cnl_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "power well 1",
-               /* Handled by the DMC firmware */
-               .always_on = true,
-               .domains = 0,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_1,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "AUX A",
-               .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
-               },
-       },
-       {
-               .name = "AUX B",
-               .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
-               },
-       },
-       {
-               .name = "AUX C",
-               .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
-               },
-       },
-       {
-               .name = "AUX D",
-               .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
-               },
-       },
-       {
-               .name = "DC off",
-               .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
-               .ops = &gen9_dc_off_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "power well 2",
-               .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_2,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
-                       .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
-                       .hsw.has_vga = true,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "DDI A IO power well",
-               .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
-               },
-       },
-       {
-               .name = "DDI B IO power well",
-               .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
-               },
-       },
-       {
-               .name = "DDI C IO power well",
-               .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
-               },
-       },
-       {
-               .name = "DDI D IO power well",
-               .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
-               },
-       },
-       {
-               .name = "DDI F IO power well",
-               .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
-               },
-       },
-       {
-               .name = "AUX F",
-               .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
-               },
-       },
-};
-
-static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
-       .sync_hw = hsw_power_well_sync_hw,
-       .enable = icl_combo_phy_aux_power_well_enable,
-       .disable = icl_combo_phy_aux_power_well_disable,
-       .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
-       .sync_hw = hsw_power_well_sync_hw,
-       .enable = icl_tc_phy_aux_power_well_enable,
-       .disable = hsw_power_well_disable,
-       .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_regs icl_aux_power_well_regs = {
-       .bios   = ICL_PWR_WELL_CTL_AUX1,
-       .driver = ICL_PWR_WELL_CTL_AUX2,
-       .debug  = ICL_PWR_WELL_CTL_AUX4,
-};
-
-static const struct i915_power_well_regs icl_ddi_power_well_regs = {
-       .bios   = ICL_PWR_WELL_CTL_DDI1,
-       .driver = ICL_PWR_WELL_CTL_DDI2,
-       .debug  = ICL_PWR_WELL_CTL_DDI4,
-};
-
-static const struct i915_power_well_desc icl_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "power well 1",
-               /* Handled by the DMC firmware */
-               .always_on = true,
-               .domains = 0,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_1,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_PW_1,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "DC off",
-               .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
-               .ops = &gen9_dc_off_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "power well 2",
-               .domains = ICL_PW_2_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_2,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_PW_2,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "power well 3",
-               .domains = ICL_PW_3_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_PW_3,
-                       .hsw.irq_pipe_mask = BIT(PIPE_B),
-                       .hsw.has_vga = true,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "DDI A IO",
-               .domains = ICL_DDI_IO_A_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_ddi_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
-               },
-       },
-       {
-               .name = "DDI B IO",
-               .domains = ICL_DDI_IO_B_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_ddi_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
-               },
-       },
-       {
-               .name = "DDI C IO",
-               .domains = ICL_DDI_IO_C_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_ddi_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
-               },
-       },
-       {
-               .name = "DDI D IO",
-               .domains = ICL_DDI_IO_D_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_ddi_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
-               },
-       },
-       {
-               .name = "DDI E IO",
-               .domains = ICL_DDI_IO_E_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_ddi_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
-               },
-       },
-       {
-               .name = "DDI F IO",
-               .domains = ICL_DDI_IO_F_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_ddi_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
-               },
-       },
-       {
-               .name = "AUX A",
-               .domains = ICL_AUX_A_IO_POWER_DOMAINS,
-               .ops = &icl_combo_phy_aux_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
-               },
-       },
-       {
-               .name = "AUX B",
-               .domains = ICL_AUX_B_IO_POWER_DOMAINS,
-               .ops = &icl_combo_phy_aux_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
-               },
-       },
-       {
-               .name = "AUX C",
-               .domains = ICL_AUX_C_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
-                       .hsw.is_tc_tbt = false,
-               },
-       },
-       {
-               .name = "AUX D",
-               .domains = ICL_AUX_D_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
-                       .hsw.is_tc_tbt = false,
-               },
-       },
-       {
-               .name = "AUX E",
-               .domains = ICL_AUX_E_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
-                       .hsw.is_tc_tbt = false,
-               },
-       },
-       {
-               .name = "AUX F",
-               .domains = ICL_AUX_F_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
-                       .hsw.is_tc_tbt = false,
-               },
-       },
-       {
-               .name = "AUX TBT1",
-               .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
-                       .hsw.is_tc_tbt = true,
-               },
-       },
-       {
-               .name = "AUX TBT2",
-               .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
-                       .hsw.is_tc_tbt = true,
-               },
-       },
-       {
-               .name = "AUX TBT3",
-               .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
-                       .hsw.is_tc_tbt = true,
-               },
-       },
-       {
-               .name = "AUX TBT4",
-               .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
-               .ops = &icl_tc_phy_aux_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &icl_aux_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
-                       .hsw.is_tc_tbt = true,
-               },
-       },
-       {
-               .name = "power well 4",
-               .domains = ICL_PW_4_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = ICL_PW_CTL_IDX_PW_4,
-                       .hsw.has_fuses = true,
-                       .hsw.irq_pipe_mask = BIT(PIPE_C),
-               },
-       },
-};
-
-static int
-sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
-                                  int disable_power_well)
-{
-       if (disable_power_well >= 0)
-               return !!disable_power_well;
-
-       return 1;
-}
-
-static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
-                              int enable_dc)
-{
-       u32 mask;
-       int requested_dc;
-       int max_dc;
-
-       if (INTEL_GEN(dev_priv) >= 11) {
-               max_dc = 2;
-               /*
-                * DC9 has a separate HW flow from the rest of the DC states,
-                * not depending on the DMC firmware. It's needed by system
-                * suspend/resume, so allow it unconditionally.
-                */
-               mask = DC_STATE_EN_DC9;
-       } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
-               max_dc = 2;
-               mask = 0;
-       } else if (IS_GEN9_LP(dev_priv)) {
-               max_dc = 1;
-               mask = DC_STATE_EN_DC9;
-       } else {
-               max_dc = 0;
-               mask = 0;
-       }
-
-       if (!i915_modparams.disable_power_well)
-               max_dc = 0;
-
-       if (enable_dc >= 0 && enable_dc <= max_dc) {
-               requested_dc = enable_dc;
-       } else if (enable_dc == -1) {
-               requested_dc = max_dc;
-       } else if (enable_dc > max_dc && enable_dc <= 2) {
-               DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
-                             enable_dc, max_dc);
-               requested_dc = max_dc;
-       } else {
-               DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
-               requested_dc = max_dc;
-       }
-
-       if (requested_dc > 1)
-               mask |= DC_STATE_EN_UPTO_DC6;
-       if (requested_dc > 0)
-               mask |= DC_STATE_EN_UPTO_DC5;
-
-       DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
-
-       return mask;
-}
-
-static int
-__set_power_wells(struct i915_power_domains *power_domains,
-                 const struct i915_power_well_desc *power_well_descs,
-                 int power_well_count)
-{
-       u64 power_well_ids = 0;
-       int i;
-
-       power_domains->power_well_count = power_well_count;
-       power_domains->power_wells =
-                               kcalloc(power_well_count,
-                                       sizeof(*power_domains->power_wells),
-                                       GFP_KERNEL);
-       if (!power_domains->power_wells)
-               return -ENOMEM;
-
-       for (i = 0; i < power_well_count; i++) {
-               enum i915_power_well_id id = power_well_descs[i].id;
-
-               power_domains->power_wells[i].desc = &power_well_descs[i];
-
-               if (id == DISP_PW_ID_NONE)
-                       continue;
-
-               WARN_ON(id >= sizeof(power_well_ids) * 8);
-               WARN_ON(power_well_ids & BIT_ULL(id));
-               power_well_ids |= BIT_ULL(id);
-       }
-
-       return 0;
-}
-
-#define set_power_wells(power_domains, __power_well_descs) \
-       __set_power_wells(power_domains, __power_well_descs, \
-                         ARRAY_SIZE(__power_well_descs))
-
-/**
- * intel_power_domains_init - initializes the power domain structures
- * @dev_priv: i915 device instance
- *
- * Initializes the power domain structures for @dev_priv depending upon the
- * supported platform.
- */
-int intel_power_domains_init(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       int err;
-
-       i915_modparams.disable_power_well =
-               sanitize_disable_power_well_option(dev_priv,
-                                                  i915_modparams.disable_power_well);
-       dev_priv->csr.allowed_dc_mask =
-               get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
-
-       BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
-
-       mutex_init(&power_domains->lock);
-
-       INIT_DELAYED_WORK(&power_domains->async_put_work,
-                         intel_display_power_put_async_work);
-
-       /*
-        * The enabling order will be from lower to higher indexed wells,
-        * the disabling order is reversed.
-        */
-       if (IS_GEN(dev_priv, 11)) {
-               err = set_power_wells(power_domains, icl_power_wells);
-       } else if (IS_CANNONLAKE(dev_priv)) {
-               err = set_power_wells(power_domains, cnl_power_wells);
-
-               /*
-                * DDI and Aux IO are getting enabled for all ports
-                * regardless the presence or use. So, in order to avoid
-                * timeouts, lets remove them from the list
-                * for the SKUs without port F.
-                */
-               if (!IS_CNL_WITH_PORT_F(dev_priv))
-                       power_domains->power_well_count -= 2;
-       } else if (IS_GEMINILAKE(dev_priv)) {
-               err = set_power_wells(power_domains, glk_power_wells);
-       } else if (IS_BROXTON(dev_priv)) {
-               err = set_power_wells(power_domains, bxt_power_wells);
-       } else if (IS_GEN9_BC(dev_priv)) {
-               err = set_power_wells(power_domains, skl_power_wells);
-       } else if (IS_CHERRYVIEW(dev_priv)) {
-               err = set_power_wells(power_domains, chv_power_wells);
-       } else if (IS_BROADWELL(dev_priv)) {
-               err = set_power_wells(power_domains, bdw_power_wells);
-       } else if (IS_HASWELL(dev_priv)) {
-               err = set_power_wells(power_domains, hsw_power_wells);
-       } else if (IS_VALLEYVIEW(dev_priv)) {
-               err = set_power_wells(power_domains, vlv_power_wells);
-       } else if (IS_I830(dev_priv)) {
-               err = set_power_wells(power_domains, i830_power_wells);
-       } else {
-               err = set_power_wells(power_domains, i9xx_always_on_power_well);
-       }
-
-       return err;
-}
-
-/**
- * intel_power_domains_cleanup - clean up power domains resources
- * @dev_priv: i915 device instance
- *
- * Release any resources acquired by intel_power_domains_init()
- */
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
-{
-       kfree(dev_priv->power_domains.power_wells);
-}
-
-static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *power_well;
-
-       mutex_lock(&power_domains->lock);
-       for_each_power_well(dev_priv, power_well) {
-               power_well->desc->ops->sync_hw(dev_priv, power_well);
-               power_well->hw_enabled =
-                       power_well->desc->ops->is_enabled(dev_priv, power_well);
-       }
-       mutex_unlock(&power_domains->lock);
-}
-
-static inline
-bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
-                         i915_reg_t reg, bool enable)
-{
-       u32 val, status;
-
-       val = I915_READ(reg);
-       val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
-       I915_WRITE(reg, val);
-       POSTING_READ(reg);
-       udelay(10);
-
-       status = I915_READ(reg) & DBUF_POWER_STATE;
-       if ((enable && !status) || (!enable && status)) {
-               DRM_ERROR("DBus power %s timeout!\n",
-                         enable ? "enable" : "disable");
-               return false;
-       }
-       return true;
-}
-
-static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
-{
-       intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
-}
-
-static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
-{
-       intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
-}
-
-static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
-{
-       if (INTEL_GEN(dev_priv) < 11)
-               return 1;
-       return 2;
-}
-
-void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
-                           u8 req_slices)
-{
-       const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
-       bool ret;
-
-       if (req_slices > intel_dbuf_max_slices(dev_priv)) {
-               DRM_ERROR("Invalid number of dbuf slices requested\n");
-               return;
-       }
-
-       if (req_slices == hw_enabled_slices || req_slices == 0)
-               return;
-
-       if (req_slices > hw_enabled_slices)
-               ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
-       else
-               ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
-
-       if (ret)
-               dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
-}
-
-static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
-{
-       I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
-       I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
-       POSTING_READ(DBUF_CTL_S2);
-
-       udelay(10);
-
-       if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
-           !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
-               DRM_ERROR("DBuf power enable timeout\n");
-       else
-               /*
-                * FIXME: for now pretend that we only have 1 slice, see
-                * intel_enabled_dbuf_slices_num().
-                */
-               dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
-}
-
-static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
-{
-       I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
-       I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
-       POSTING_READ(DBUF_CTL_S2);
-
-       udelay(10);
-
-       if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
-           (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
-               DRM_ERROR("DBuf power disable timeout!\n");
-       else
-               /*
-                * FIXME: for now pretend that the first slice is always
-                * enabled, see intel_enabled_dbuf_slices_num().
-                */
-               dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
-}
-
-static void icl_mbus_init(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
-             MBUS_ABOX_BT_CREDIT_POOL2(16) |
-             MBUS_ABOX_B_CREDIT(1) |
-             MBUS_ABOX_BW_CREDIT(1);
-
-       I915_WRITE(MBUS_ABOX_CTL, val);
-}
-
-static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
-{
-       u32 val = I915_READ(LCPLL_CTL);
-
-       /*
-        * The LCPLL register should be turned on by the BIOS. For now
-        * let's just check its state and print errors in case
-        * something is wrong.  Don't even try to turn it on.
-        */
-
-       if (val & LCPLL_CD_SOURCE_FCLK)
-               DRM_ERROR("CDCLK source is not LCPLL\n");
-
-       if (val & LCPLL_PLL_DISABLE)
-               DRM_ERROR("LCPLL is disabled\n");
-
-       if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
-               DRM_ERROR("LCPLL not using non-SSC reference\n");
-}
-
-static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = &dev_priv->drm;
-       struct intel_crtc *crtc;
-
-       for_each_intel_crtc(dev, crtc)
-               I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
-                               pipe_name(crtc->pipe));
-
-       I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
-                       "Display power well on\n");
-       I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
-                       "SPLL enabled\n");
-       I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
-                       "WRPLL1 enabled\n");
-       I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
-                       "WRPLL2 enabled\n");
-       I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
-                       "Panel power on\n");
-       I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
-                       "CPU PWM1 enabled\n");
-       if (IS_HASWELL(dev_priv))
-               I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
-                               "CPU PWM2 enabled\n");
-       I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
-                       "PCH PWM1 enabled\n");
-       I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
-                       "Utility pin enabled\n");
-       I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
-                       "PCH GTC enabled\n");
-
-       /*
-        * In theory we can still leave IRQs enabled, as long as only the HPD
-        * interrupts remain enabled. We used to check for that, but since it's
-        * gen-specific and since we only disable LCPLL after we fully disable
-        * the interrupts, the check below should be enough.
-        */
-       I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
-}
-
-static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
-{
-       if (IS_HASWELL(dev_priv))
-               return I915_READ(D_COMP_HSW);
-       else
-               return I915_READ(D_COMP_BDW);
-}
-
-static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
-{
-       if (IS_HASWELL(dev_priv)) {
-               if (sandybridge_pcode_write(dev_priv,
-                                           GEN6_PCODE_WRITE_D_COMP, val))
-                       DRM_DEBUG_KMS("Failed to write to D_COMP\n");
-       } else {
-               I915_WRITE(D_COMP_BDW, val);
-               POSTING_READ(D_COMP_BDW);
-       }
-}
-
-/*
- * This function implements pieces of two sequences from BSpec:
- * - Sequence for display software to disable LCPLL
- * - Sequence for display software to allow package C8+
- * The steps implemented here are just the steps that actually touch the LCPLL
- * register. Callers should take care of disabling all the display engine
- * functions, doing the mode unset, fixing interrupts, etc.
- */
-static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
-                             bool switch_to_fclk, bool allow_power_down)
-{
-       u32 val;
-
-       assert_can_disable_lcpll(dev_priv);
-
-       val = I915_READ(LCPLL_CTL);
-
-       if (switch_to_fclk) {
-               val |= LCPLL_CD_SOURCE_FCLK;
-               I915_WRITE(LCPLL_CTL, val);
-
-               if (wait_for_us(I915_READ(LCPLL_CTL) &
-                               LCPLL_CD_SOURCE_FCLK_DONE, 1))
-                       DRM_ERROR("Switching to FCLK failed\n");
-
-               val = I915_READ(LCPLL_CTL);
-       }
-
-       val |= LCPLL_PLL_DISABLE;
-       I915_WRITE(LCPLL_CTL, val);
-       POSTING_READ(LCPLL_CTL);
-
-       if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
-                                   LCPLL_PLL_LOCK, 0, 1))
-               DRM_ERROR("LCPLL still locked\n");
-
-       val = hsw_read_dcomp(dev_priv);
-       val |= D_COMP_COMP_DISABLE;
-       hsw_write_dcomp(dev_priv, val);
-       ndelay(100);
-
-       if (wait_for((hsw_read_dcomp(dev_priv) &
-                     D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
-               DRM_ERROR("D_COMP RCOMP still in progress\n");
-
-       if (allow_power_down) {
-               val = I915_READ(LCPLL_CTL);
-               val |= LCPLL_POWER_DOWN_ALLOW;
-               I915_WRITE(LCPLL_CTL, val);
-               POSTING_READ(LCPLL_CTL);
-       }
-}
-
-/*
- * Fully restores LCPLL, disallowing power down and switching back to LCPLL
- * source.
- */
-static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       val = I915_READ(LCPLL_CTL);
-
-       if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
-                   LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
-               return;
-
-       /*
-        * Make sure we're not on PC8 state before disabling PC8, otherwise
-        * we'll hang the machine. To prevent PC8 state, just enable force_wake.
-        */
-       intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
-       if (val & LCPLL_POWER_DOWN_ALLOW) {
-               val &= ~LCPLL_POWER_DOWN_ALLOW;
-               I915_WRITE(LCPLL_CTL, val);
-               POSTING_READ(LCPLL_CTL);
-       }
-
-       val = hsw_read_dcomp(dev_priv);
-       val |= D_COMP_COMP_FORCE;
-       val &= ~D_COMP_COMP_DISABLE;
-       hsw_write_dcomp(dev_priv, val);
-
-       val = I915_READ(LCPLL_CTL);
-       val &= ~LCPLL_PLL_DISABLE;
-       I915_WRITE(LCPLL_CTL, val);
-
-       if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
-                                   LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
-               DRM_ERROR("LCPLL not locked yet\n");
-
-       if (val & LCPLL_CD_SOURCE_FCLK) {
-               val = I915_READ(LCPLL_CTL);
-               val &= ~LCPLL_CD_SOURCE_FCLK;
-               I915_WRITE(LCPLL_CTL, val);
-
-               if (wait_for_us((I915_READ(LCPLL_CTL) &
-                                LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
-                       DRM_ERROR("Switching back to LCPLL failed\n");
-       }
-
-       intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
-       intel_update_cdclk(dev_priv);
-       intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-}
-
-/*
- * Package states C8 and deeper are really deep PC states that can only be
- * reached when all the devices on the system allow it, so even if the graphics
- * device allows PC8+, it doesn't mean the system will actually get to these
- * states. Our driver only allows PC8+ when going into runtime PM.
- *
- * The requirements for PC8+ are that all the outputs are disabled, the power
- * well is disabled and most interrupts are disabled, and these are also
- * requirements for runtime PM. When these conditions are met, we manually do
- * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
- * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
- * hang the machine.
- *
- * When we really reach PC8 or deeper states (not just when we allow it) we lose
- * the state of some registers, so when we come back from PC8+ we need to
- * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
- * need to take care of the registers kept by RC6. Notice that this happens even
- * if we don't put the device in PCI D3 state (which is what currently happens
- * because of the runtime PM support).
- *
- * For more, read "Display Sequences for Package C8" on the hardware
- * documentation.
- */
-void hsw_enable_pc8(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       DRM_DEBUG_KMS("Enabling package C8+\n");
-
-       if (HAS_PCH_LPT_LP(dev_priv)) {
-               val = I915_READ(SOUTH_DSPCLK_GATE_D);
-               val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
-               I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
-       }
-
-       lpt_disable_clkout_dp(dev_priv);
-       hsw_disable_lcpll(dev_priv, true, true);
-}
-
-void hsw_disable_pc8(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       DRM_DEBUG_KMS("Disabling package C8+\n");
-
-       hsw_restore_lcpll(dev_priv);
-       intel_init_pch_refclk(dev_priv);
-
-       if (HAS_PCH_LPT_LP(dev_priv)) {
-               val = I915_READ(SOUTH_DSPCLK_GATE_D);
-               val |= PCH_LP_PARTITION_LEVEL_DISABLE;
-               I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
-       }
-}
-
-static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
-                                     bool enable)
-{
-       i915_reg_t reg;
-       u32 reset_bits, val;
-
-       if (IS_IVYBRIDGE(dev_priv)) {
-               reg = GEN7_MSG_CTL;
-               reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
-       } else {
-               reg = HSW_NDE_RSTWRN_OPT;
-               reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
-       }
-
-       val = I915_READ(reg);
-
-       if (enable)
-               val |= reset_bits;
-       else
-               val &= ~reset_bits;
-
-       I915_WRITE(reg, val);
-}
-
-static void skl_display_core_init(struct drm_i915_private *dev_priv,
-                                 bool resume)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *well;
-
-       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
-       /* enable PCH reset handshake */
-       intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
-
-       /* enable PG1 and Misc I/O */
-       mutex_lock(&power_domains->lock);
-
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_enable(dev_priv, well);
-
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
-       intel_power_well_enable(dev_priv, well);
-
-       mutex_unlock(&power_domains->lock);
-
-       intel_cdclk_init(dev_priv);
-
-       gen9_dbuf_enable(dev_priv);
-
-       if (resume && dev_priv->csr.dmc_payload)
-               intel_csr_load_program(dev_priv);
-}
-
-static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *well;
-
-       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
-       gen9_dbuf_disable(dev_priv);
-
-       intel_cdclk_uninit(dev_priv);
-
-       /* The spec doesn't call for removing the reset handshake flag */
-       /* disable PG1 and Misc I/O */
-
-       mutex_lock(&power_domains->lock);
-
-       /*
-        * BSpec says to keep the MISC IO power well enabled here, only
-        * remove our request for power well 1.
-        * Note that even though the driver's request is removed power well 1
-        * may stay enabled after this due to DMC's own request on it.
-        */
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_disable(dev_priv, well);
-
-       mutex_unlock(&power_domains->lock);
-
-       usleep_range(10, 30);           /* 10 us delay per Bspec */
-}
-
-void bxt_display_core_init(struct drm_i915_private *dev_priv,
-                          bool resume)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *well;
-
-       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
-       /*
-        * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
-        * or else the reset will hang because there is no PCH to respond.
-        * Move the handshake programming to initialization sequence.
-        * Previously was left up to BIOS.
-        */
-       intel_pch_reset_handshake(dev_priv, false);
-
-       /* Enable PG1 */
-       mutex_lock(&power_domains->lock);
-
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_enable(dev_priv, well);
-
-       mutex_unlock(&power_domains->lock);
-
-       intel_cdclk_init(dev_priv);
-
-       gen9_dbuf_enable(dev_priv);
-
-       if (resume && dev_priv->csr.dmc_payload)
-               intel_csr_load_program(dev_priv);
-}
-
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *well;
-
-       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
-       gen9_dbuf_disable(dev_priv);
-
-       intel_cdclk_uninit(dev_priv);
-
-       /* The spec doesn't call for removing the reset handshake flag */
-
-       /*
-        * Disable PW1 (PG1).
-        * Note that even though the driver's request is removed power well 1
-        * may stay enabled after this due to DMC's own request on it.
-        */
-       mutex_lock(&power_domains->lock);
-
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_disable(dev_priv, well);
-
-       mutex_unlock(&power_domains->lock);
-
-       usleep_range(10, 30);           /* 10 us delay per Bspec */
-}
-
-static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *well;
-
-       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
-       /* 1. Enable PCH Reset Handshake */
-       intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
-
-       /* 2-3. */
-       intel_combo_phy_init(dev_priv);
-
-       /*
-        * 4. Enable Power Well 1 (PG1).
-        *    The AUX IO power wells will be enabled on demand.
-        */
-       mutex_lock(&power_domains->lock);
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_enable(dev_priv, well);
-       mutex_unlock(&power_domains->lock);
-
-       /* 5. Enable CD clock */
-       intel_cdclk_init(dev_priv);
-
-       /* 6. Enable DBUF */
-       gen9_dbuf_enable(dev_priv);
-
-       if (resume && dev_priv->csr.dmc_payload)
-               intel_csr_load_program(dev_priv);
-}
-
-static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *well;
-
-       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
-       /* 1. Disable all display engine functions -> aready done */
-
-       /* 2. Disable DBUF */
-       gen9_dbuf_disable(dev_priv);
-
-       /* 3. Disable CD clock */
-       intel_cdclk_uninit(dev_priv);
-
-       /*
-        * 4. Disable Power Well 1 (PG1).
-        *    The AUX IO power wells are toggled on demand, so they are already
-        *    disabled at this point.
-        */
-       mutex_lock(&power_domains->lock);
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_disable(dev_priv, well);
-       mutex_unlock(&power_domains->lock);
-
-       usleep_range(10, 30);           /* 10 us delay per Bspec */
-
-       /* 5. */
-       intel_combo_phy_uninit(dev_priv);
-}
-
-void icl_display_core_init(struct drm_i915_private *dev_priv,
-                          bool resume)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *well;
-
-       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
-       /* 1. Enable PCH reset handshake. */
-       intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
-
-       /* 2. Initialize all combo phys */
-       intel_combo_phy_init(dev_priv);
-
-       /*
-        * 3. Enable Power Well 1 (PG1).
-        *    The AUX IO power wells will be enabled on demand.
-        */
-       mutex_lock(&power_domains->lock);
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_enable(dev_priv, well);
-       mutex_unlock(&power_domains->lock);
-
-       /* 4. Enable CDCLK. */
-       intel_cdclk_init(dev_priv);
-
-       /* 5. Enable DBUF. */
-       icl_dbuf_enable(dev_priv);
-
-       /* 6. Setup MBUS. */
-       icl_mbus_init(dev_priv);
-
-       if (resume && dev_priv->csr.dmc_payload)
-               intel_csr_load_program(dev_priv);
-}
-
-void icl_display_core_uninit(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *well;
-
-       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
-       /* 1. Disable all display engine functions -> aready done */
-
-       /* 2. Disable DBUF */
-       icl_dbuf_disable(dev_priv);
-
-       /* 3. Disable CD clock */
-       intel_cdclk_uninit(dev_priv);
-
-       /*
-        * 4. Disable Power Well 1 (PG1).
-        *    The AUX IO power wells are toggled on demand, so they are already
-        *    disabled at this point.
-        */
-       mutex_lock(&power_domains->lock);
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_disable(dev_priv, well);
-       mutex_unlock(&power_domains->lock);
-
-       /* 5. */
-       intel_combo_phy_uninit(dev_priv);
-}
-
-static void chv_phy_control_init(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_well *cmn_bc =
-               lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
-       struct i915_power_well *cmn_d =
-               lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
-
-       /*
-        * DISPLAY_PHY_CONTROL can get corrupted if read. As a
-        * workaround never ever read DISPLAY_PHY_CONTROL, and
-        * instead maintain a shadow copy ourselves. Use the actual
-        * power well state and lane status to reconstruct the
-        * expected initial value.
-        */
-       dev_priv->chv_phy_control =
-               PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
-               PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
-               PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
-               PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
-               PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
-
-       /*
-        * If all lanes are disabled we leave the override disabled
-        * with all power down bits cleared to match the state we
-        * would use after disabling the port. Otherwise enable the
-        * override and set the lane powerdown bits accding to the
-        * current lane status.
-        */
-       if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
-               u32 status = I915_READ(DPLL(PIPE_A));
-               unsigned int mask;
-
-               mask = status & DPLL_PORTB_READY_MASK;
-               if (mask == 0xf)
-                       mask = 0x0;
-               else
-                       dev_priv->chv_phy_control |=
-                               PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
-
-               dev_priv->chv_phy_control |=
-                       PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
-
-               mask = (status & DPLL_PORTC_READY_MASK) >> 4;
-               if (mask == 0xf)
-                       mask = 0x0;
-               else
-                       dev_priv->chv_phy_control |=
-                               PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
-
-               dev_priv->chv_phy_control |=
-                       PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
-
-               dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
-
-               dev_priv->chv_phy_assert[DPIO_PHY0] = false;
-       } else {
-               dev_priv->chv_phy_assert[DPIO_PHY0] = true;
-       }
-
-       if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
-               u32 status = I915_READ(DPIO_PHY_STATUS);
-               unsigned int mask;
-
-               mask = status & DPLL_PORTD_READY_MASK;
-
-               if (mask == 0xf)
-                       mask = 0x0;
-               else
-                       dev_priv->chv_phy_control |=
-                               PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
-
-               dev_priv->chv_phy_control |=
-                       PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
-
-               dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
-
-               dev_priv->chv_phy_assert[DPIO_PHY1] = false;
-       } else {
-               dev_priv->chv_phy_assert[DPIO_PHY1] = true;
-       }
-
-       I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
-       DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
-                     dev_priv->chv_phy_control);
-}
-
-static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_well *cmn =
-               lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
-       struct i915_power_well *disp2d =
-               lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
-
-       /* If the display might be already active skip this */
-       if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
-           disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
-           I915_READ(DPIO_CTL) & DPIO_CMNRST)
-               return;
-
-       DRM_DEBUG_KMS("toggling display PHY side reset\n");
-
-       /* cmnlane needs DPLL registers */
-       disp2d->desc->ops->enable(dev_priv, disp2d);
-
-       /*
-        * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
-        * Need to assert and de-assert PHY SB reset by gating the
-        * common lane power, then un-gating it.
-        * Simply ungating isn't enough to reset the PHY enough to get
-        * ports and lanes running.
-        */
-       cmn->desc->ops->disable(dev_priv, cmn);
-}
-
-static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
-{
-       bool ret;
-
-       vlv_punit_get(dev_priv);
-       ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
-       vlv_punit_put(dev_priv);
-
-       return ret;
-}
-
-static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
-{
-       WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
-            "VED not power gated\n");
-}
-
-static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
-{
-       static const struct pci_device_id isp_ids[] = {
-               {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
-               {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
-               {}
-       };
-
-       WARN(!pci_dev_present(isp_ids) &&
-            !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
-            "ISP not power gated\n");
-}
-
-static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
-
-/**
- * intel_power_domains_init_hw - initialize hardware power domain state
- * @i915: i915 device instance
- * @resume: Called from resume code paths or not
- *
- * This function initializes the hardware power domain state and enables all
- * power wells belonging to the INIT power domain. Power wells in other
- * domains (and not in the INIT domain) are referenced or disabled by
- * intel_modeset_readout_hw_state(). After that the reference count of each
- * power well must match its HW enabled state, see
- * intel_power_domains_verify_state().
- *
- * It will return with power domains disabled (to be enabled later by
- * intel_power_domains_enable()) and must be paired with
- * intel_power_domains_fini_hw().
- */
-void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
-{
-       struct i915_power_domains *power_domains = &i915->power_domains;
-
-       power_domains->initializing = true;
-
-       if (INTEL_GEN(i915) >= 11) {
-               icl_display_core_init(i915, resume);
-       } else if (IS_CANNONLAKE(i915)) {
-               cnl_display_core_init(i915, resume);
-       } else if (IS_GEN9_BC(i915)) {
-               skl_display_core_init(i915, resume);
-       } else if (IS_GEN9_LP(i915)) {
-               bxt_display_core_init(i915, resume);
-       } else if (IS_CHERRYVIEW(i915)) {
-               mutex_lock(&power_domains->lock);
-               chv_phy_control_init(i915);
-               mutex_unlock(&power_domains->lock);
-               assert_isp_power_gated(i915);
-       } else if (IS_VALLEYVIEW(i915)) {
-               mutex_lock(&power_domains->lock);
-               vlv_cmnlane_wa(i915);
-               mutex_unlock(&power_domains->lock);
-               assert_ved_power_gated(i915);
-               assert_isp_power_gated(i915);
-       } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
-               hsw_assert_cdclk(i915);
-               intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
-       } else if (IS_IVYBRIDGE(i915)) {
-               intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
-       }
-
-       /*
-        * Keep all power wells enabled for any dependent HW access during
-        * initialization and to make sure we keep BIOS enabled display HW
-        * resources powered until display HW readout is complete. We drop
-        * this reference in intel_power_domains_enable().
-        */
-       power_domains->wakeref =
-               intel_display_power_get(i915, POWER_DOMAIN_INIT);
-
-       /* Disable power support if the user asked so. */
-       if (!i915_modparams.disable_power_well)
-               intel_display_power_get(i915, POWER_DOMAIN_INIT);
-       intel_power_domains_sync_hw(i915);
-
-       power_domains->initializing = false;
-}
-
-/**
- * intel_power_domains_fini_hw - deinitialize hw power domain state
- * @i915: i915 device instance
- *
- * De-initializes the display power domain HW state. It also ensures that the
- * device stays powered up so that the driver can be reloaded.
- *
- * It must be called with power domains already disabled (after a call to
- * intel_power_domains_disable()) and must be paired with
- * intel_power_domains_init_hw().
- */
-void intel_power_domains_fini_hw(struct drm_i915_private *i915)
-{
-       intel_wakeref_t wakeref __maybe_unused =
-               fetch_and_zero(&i915->power_domains.wakeref);
-
-       /* Remove the refcount we took to keep power well support disabled. */
-       if (!i915_modparams.disable_power_well)
-               intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
-
-       intel_display_power_flush_work_sync(i915);
-
-       intel_power_domains_verify_state(i915);
-
-       /* Keep the power well enabled, but cancel its rpm wakeref. */
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-}
-
-/**
- * intel_power_domains_enable - enable toggling of display power wells
- * @i915: i915 device instance
- *
- * Enable the ondemand enabling/disabling of the display power wells. Note that
- * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
- * only at specific points of the display modeset sequence, thus they are not
- * affected by the intel_power_domains_enable()/disable() calls. The purpose
- * of these function is to keep the rest of power wells enabled until the end
- * of display HW readout (which will acquire the power references reflecting
- * the current HW state).
- */
-void intel_power_domains_enable(struct drm_i915_private *i915)
-{
-       intel_wakeref_t wakeref __maybe_unused =
-               fetch_and_zero(&i915->power_domains.wakeref);
-
-       intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
-       intel_power_domains_verify_state(i915);
-}
-
-/**
- * intel_power_domains_disable - disable toggling of display power wells
- * @i915: i915 device instance
- *
- * Disable the ondemand enabling/disabling of the display power wells. See
- * intel_power_domains_enable() for which power wells this call controls.
- */
-void intel_power_domains_disable(struct drm_i915_private *i915)
-{
-       struct i915_power_domains *power_domains = &i915->power_domains;
-
-       WARN_ON(power_domains->wakeref);
-       power_domains->wakeref =
-               intel_display_power_get(i915, POWER_DOMAIN_INIT);
-
-       intel_power_domains_verify_state(i915);
-}
-
-/**
- * intel_power_domains_suspend - suspend power domain state
- * @i915: i915 device instance
- * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
- *
- * This function prepares the hardware power domain state before entering
- * system suspend.
- *
- * It must be called with power domains already disabled (after a call to
- * intel_power_domains_disable()) and paired with intel_power_domains_resume().
- */
-void intel_power_domains_suspend(struct drm_i915_private *i915,
-                                enum i915_drm_suspend_mode suspend_mode)
-{
-       struct i915_power_domains *power_domains = &i915->power_domains;
-       intel_wakeref_t wakeref __maybe_unused =
-               fetch_and_zero(&power_domains->wakeref);
-
-       intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
-
-       /*
-        * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
-        * support don't manually deinit the power domains. This also means the
-        * CSR/DMC firmware will stay active, it will power down any HW
-        * resources as required and also enable deeper system power states
-        * that would be blocked if the firmware was inactive.
-        */
-       if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
-           suspend_mode == I915_DRM_SUSPEND_IDLE &&
-           i915->csr.dmc_payload) {
-               intel_display_power_flush_work(i915);
-               intel_power_domains_verify_state(i915);
-               return;
-       }
-
-       /*
-        * Even if power well support was disabled we still want to disable
-        * power wells if power domains must be deinitialized for suspend.
-        */
-       if (!i915_modparams.disable_power_well)
-               intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
-
-       intel_display_power_flush_work(i915);
-       intel_power_domains_verify_state(i915);
-
-       if (INTEL_GEN(i915) >= 11)
-               icl_display_core_uninit(i915);
-       else if (IS_CANNONLAKE(i915))
-               cnl_display_core_uninit(i915);
-       else if (IS_GEN9_BC(i915))
-               skl_display_core_uninit(i915);
-       else if (IS_GEN9_LP(i915))
-               bxt_display_core_uninit(i915);
-
-       power_domains->display_core_suspended = true;
-}
-
-/**
- * intel_power_domains_resume - resume power domain state
- * @i915: i915 device instance
- *
- * This function resume the hardware power domain state during system resume.
- *
- * It will return with power domain support disabled (to be enabled later by
- * intel_power_domains_enable()) and must be paired with
- * intel_power_domains_suspend().
- */
-void intel_power_domains_resume(struct drm_i915_private *i915)
-{
-       struct i915_power_domains *power_domains = &i915->power_domains;
-
-       if (power_domains->display_core_suspended) {
-               intel_power_domains_init_hw(i915, true);
-               power_domains->display_core_suspended = false;
-       } else {
-               WARN_ON(power_domains->wakeref);
-               power_domains->wakeref =
-                       intel_display_power_get(i915, POWER_DOMAIN_INIT);
-       }
-
-       intel_power_domains_verify_state(i915);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-
-static void intel_power_domains_dump_info(struct drm_i915_private *i915)
-{
-       struct i915_power_domains *power_domains = &i915->power_domains;
-       struct i915_power_well *power_well;
-
-       for_each_power_well(i915, power_well) {
-               enum intel_display_power_domain domain;
-
-               DRM_DEBUG_DRIVER("%-25s %d\n",
-                                power_well->desc->name, power_well->count);
-
-               for_each_power_domain(domain, power_well->desc->domains)
-                       DRM_DEBUG_DRIVER("  %-23s %d\n",
-                                        intel_display_power_domain_str(domain),
-                                        power_domains->domain_use_count[domain]);
-       }
-}
-
-/**
- * intel_power_domains_verify_state - verify the HW/SW state for all power wells
- * @i915: i915 device instance
- *
- * Verify if the reference count of each power well matches its HW enabled
- * state and the total refcount of the domains it belongs to. This must be
- * called after modeset HW state sanitization, which is responsible for
- * acquiring reference counts for any power wells in use and disabling the
- * ones left on by BIOS but not required by any active output.
- */
-static void intel_power_domains_verify_state(struct drm_i915_private *i915)
-{
-       struct i915_power_domains *power_domains = &i915->power_domains;
-       struct i915_power_well *power_well;
-       bool dump_domain_info;
-
-       mutex_lock(&power_domains->lock);
-
-       verify_async_put_domains_state(power_domains);
-
-       dump_domain_info = false;
-       for_each_power_well(i915, power_well) {
-               enum intel_display_power_domain domain;
-               int domains_count;
-               bool enabled;
-
-               enabled = power_well->desc->ops->is_enabled(i915, power_well);
-               if ((power_well->count || power_well->desc->always_on) !=
-                   enabled)
-                       DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
-                                 power_well->desc->name,
-                                 power_well->count, enabled);
-
-               domains_count = 0;
-               for_each_power_domain(domain, power_well->desc->domains)
-                       domains_count += power_domains->domain_use_count[domain];
-
-               if (power_well->count != domains_count) {
-                       DRM_ERROR("power well %s refcount/domain refcount mismatch "
-                                 "(refcount %d/domains refcount %d)\n",
-                                 power_well->desc->name, power_well->count,
-                                 domains_count);
-                       dump_domain_info = true;
-               }
-       }
-
-       if (dump_domain_info) {
-               static bool dumped;
-
-               if (!dumped) {
-                       intel_power_domains_dump_info(i915);
-                       dumped = true;
-               }
-       }
-
-       mutex_unlock(&power_domains->lock);
-}
-
-#else
-
-static void intel_power_domains_verify_state(struct drm_i915_private *i915)
-{
-}
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_display_power.h b/drivers/gpu/drm/i915/intel_display_power.h
deleted file mode 100644 (file)
index ff57b0a..0000000
+++ /dev/null
@@ -1,288 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_DISPLAY_POWER_H__
-#define __INTEL_DISPLAY_POWER_H__
-
-#include "intel_display.h"
-#include "intel_runtime_pm.h"
-#include "i915_reg.h"
-
-struct drm_i915_private;
-struct intel_encoder;
-
-enum intel_display_power_domain {
-       POWER_DOMAIN_DISPLAY_CORE,
-       POWER_DOMAIN_PIPE_A,
-       POWER_DOMAIN_PIPE_B,
-       POWER_DOMAIN_PIPE_C,
-       POWER_DOMAIN_PIPE_A_PANEL_FITTER,
-       POWER_DOMAIN_PIPE_B_PANEL_FITTER,
-       POWER_DOMAIN_PIPE_C_PANEL_FITTER,
-       POWER_DOMAIN_TRANSCODER_A,
-       POWER_DOMAIN_TRANSCODER_B,
-       POWER_DOMAIN_TRANSCODER_C,
-       POWER_DOMAIN_TRANSCODER_EDP,
-       POWER_DOMAIN_TRANSCODER_EDP_VDSC,
-       POWER_DOMAIN_TRANSCODER_DSI_A,
-       POWER_DOMAIN_TRANSCODER_DSI_C,
-       POWER_DOMAIN_PORT_DDI_A_LANES,
-       POWER_DOMAIN_PORT_DDI_B_LANES,
-       POWER_DOMAIN_PORT_DDI_C_LANES,
-       POWER_DOMAIN_PORT_DDI_D_LANES,
-       POWER_DOMAIN_PORT_DDI_E_LANES,
-       POWER_DOMAIN_PORT_DDI_F_LANES,
-       POWER_DOMAIN_PORT_DDI_A_IO,
-       POWER_DOMAIN_PORT_DDI_B_IO,
-       POWER_DOMAIN_PORT_DDI_C_IO,
-       POWER_DOMAIN_PORT_DDI_D_IO,
-       POWER_DOMAIN_PORT_DDI_E_IO,
-       POWER_DOMAIN_PORT_DDI_F_IO,
-       POWER_DOMAIN_PORT_DSI,
-       POWER_DOMAIN_PORT_CRT,
-       POWER_DOMAIN_PORT_OTHER,
-       POWER_DOMAIN_VGA,
-       POWER_DOMAIN_AUDIO,
-       POWER_DOMAIN_AUX_A,
-       POWER_DOMAIN_AUX_B,
-       POWER_DOMAIN_AUX_C,
-       POWER_DOMAIN_AUX_D,
-       POWER_DOMAIN_AUX_E,
-       POWER_DOMAIN_AUX_F,
-       POWER_DOMAIN_AUX_IO_A,
-       POWER_DOMAIN_AUX_TBT1,
-       POWER_DOMAIN_AUX_TBT2,
-       POWER_DOMAIN_AUX_TBT3,
-       POWER_DOMAIN_AUX_TBT4,
-       POWER_DOMAIN_GMBUS,
-       POWER_DOMAIN_MODESET,
-       POWER_DOMAIN_GT_IRQ,
-       POWER_DOMAIN_INIT,
-
-       POWER_DOMAIN_NUM,
-};
-
-#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
-#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
-               ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) \
-       ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
-        (tran) + POWER_DOMAIN_TRANSCODER_A)
-
-struct i915_power_well;
-
-struct i915_power_well_ops {
-       /*
-        * Synchronize the well's hw state to match the current sw state, for
-        * example enable/disable it based on the current refcount. Called
-        * during driver init and resume time, possibly after first calling
-        * the enable/disable handlers.
-        */
-       void (*sync_hw)(struct drm_i915_private *dev_priv,
-                       struct i915_power_well *power_well);
-       /*
-        * Enable the well and resources that depend on it (for example
-        * interrupts located on the well). Called after the 0->1 refcount
-        * transition.
-        */
-       void (*enable)(struct drm_i915_private *dev_priv,
-                      struct i915_power_well *power_well);
-       /*
-        * Disable the well and resources that depend on it. Called after
-        * the 1->0 refcount transition.
-        */
-       void (*disable)(struct drm_i915_private *dev_priv,
-                       struct i915_power_well *power_well);
-       /* Returns the hw enabled state. */
-       bool (*is_enabled)(struct drm_i915_private *dev_priv,
-                          struct i915_power_well *power_well);
-};
-
-struct i915_power_well_regs {
-       i915_reg_t bios;
-       i915_reg_t driver;
-       i915_reg_t kvmr;
-       i915_reg_t debug;
-};
-
-/* Power well structure for haswell */
-struct i915_power_well_desc {
-       const char *name;
-       bool always_on;
-       u64 domains;
-       /* unique identifier for this power well */
-       enum i915_power_well_id id;
-       /*
-        * Arbitraty data associated with this power well. Platform and power
-        * well specific.
-        */
-       union {
-               struct {
-                       /*
-                        * request/status flag index in the PUNIT power well
-                        * control/status registers.
-                        */
-                       u8 idx;
-               } vlv;
-               struct {
-                       enum dpio_phy phy;
-               } bxt;
-               struct {
-                       const struct i915_power_well_regs *regs;
-                       /*
-                        * request/status flag index in the power well
-                        * constrol/status registers.
-                        */
-                       u8 idx;
-                       /* Mask of pipes whose IRQ logic is backed by the pw */
-                       u8 irq_pipe_mask;
-                       /* The pw is backing the VGA functionality */
-                       bool has_vga:1;
-                       bool has_fuses:1;
-                       /*
-                        * The pw is for an ICL+ TypeC PHY port in
-                        * Thunderbolt mode.
-                        */
-                       bool is_tc_tbt:1;
-               } hsw;
-       };
-       const struct i915_power_well_ops *ops;
-};
-
-struct i915_power_well {
-       const struct i915_power_well_desc *desc;
-       /* power well enable/disable usage count */
-       int count;
-       /* cached hw enabled state */
-       bool hw_enabled;
-};
-
-struct i915_power_domains {
-       /*
-        * Power wells needed for initialization at driver init and suspend
-        * time are on. They are kept on until after the first modeset.
-        */
-       bool initializing;
-       bool display_core_suspended;
-       int power_well_count;
-
-       intel_wakeref_t wakeref;
-
-       struct mutex lock;
-       int domain_use_count[POWER_DOMAIN_NUM];
-
-       struct delayed_work async_put_work;
-       intel_wakeref_t async_put_wakeref;
-       u64 async_put_domains[2];
-
-       struct i915_power_well *power_wells;
-};
-
-#define for_each_power_domain(domain, mask)                            \
-       for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
-               for_each_if(BIT_ULL(domain) & (mask))
-
-#define for_each_power_well(__dev_priv, __power_well)                          \
-       for ((__power_well) = (__dev_priv)->power_domains.power_wells;  \
-            (__power_well) - (__dev_priv)->power_domains.power_wells < \
-               (__dev_priv)->power_domains.power_well_count;           \
-            (__power_well)++)
-
-#define for_each_power_well_reverse(__dev_priv, __power_well)                  \
-       for ((__power_well) = (__dev_priv)->power_domains.power_wells +         \
-                             (__dev_priv)->power_domains.power_well_count - 1; \
-            (__power_well) - (__dev_priv)->power_domains.power_wells >= 0;     \
-            (__power_well)--)
-
-#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask)    \
-       for_each_power_well(__dev_priv, __power_well)                           \
-               for_each_if((__power_well)->desc->domains & (__domain_mask))
-
-#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
-       for_each_power_well_reverse(__dev_priv, __power_well)                   \
-               for_each_if((__power_well)->desc->domains & (__domain_mask))
-
-void skl_enable_dc6(struct drm_i915_private *dev_priv);
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
-void bxt_enable_dc9(struct drm_i915_private *dev_priv);
-void bxt_disable_dc9(struct drm_i915_private *dev_priv);
-void gen9_enable_dc5(struct drm_i915_private *dev_priv);
-
-int intel_power_domains_init(struct drm_i915_private *dev_priv);
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
-void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
-void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void icl_display_core_uninit(struct drm_i915_private *dev_priv);
-void intel_power_domains_enable(struct drm_i915_private *dev_priv);
-void intel_power_domains_disable(struct drm_i915_private *dev_priv);
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
-                                enum i915_drm_suspend_mode);
-void intel_power_domains_resume(struct drm_i915_private *dev_priv);
-void hsw_enable_pc8(struct drm_i915_private *dev_priv);
-void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
-
-const char *
-intel_display_power_domain_str(enum intel_display_power_domain domain);
-
-bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
-                                   enum intel_display_power_domain domain);
-bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
-                                     enum intel_display_power_domain domain);
-intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
-                                       enum intel_display_power_domain domain);
-intel_wakeref_t
-intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
-                                  enum intel_display_power_domain domain);
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
-                                      enum intel_display_power_domain domain);
-void __intel_display_power_put_async(struct drm_i915_private *i915,
-                                    enum intel_display_power_domain domain,
-                                    intel_wakeref_t wakeref);
-void intel_display_power_flush_work(struct drm_i915_private *i915);
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void intel_display_power_put(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain,
-                            intel_wakeref_t wakeref);
-static inline void
-intel_display_power_put_async(struct drm_i915_private *i915,
-                             enum intel_display_power_domain domain,
-                             intel_wakeref_t wakeref)
-{
-       __intel_display_power_put_async(i915, domain, wakeref);
-}
-#else
-static inline void
-intel_display_power_put(struct drm_i915_private *i915,
-                       enum intel_display_power_domain domain,
-                       intel_wakeref_t wakeref)
-{
-       intel_display_power_put_unchecked(i915, domain);
-}
-
-static inline void
-intel_display_power_put_async(struct drm_i915_private *i915,
-                             enum intel_display_power_domain domain,
-                             intel_wakeref_t wakeref)
-{
-       __intel_display_power_put_async(i915, domain, -1);
-}
-#endif
-
-#define with_intel_display_power(i915, domain, wf) \
-       for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
-            intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
-
-void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
-                           u8 req_slices);
-
-void chv_phy_powergate_lanes(struct intel_encoder *encoder,
-                            bool override, unsigned int mask);
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
-                         enum dpio_channel ch, bool override);
-
-#endif /* __INTEL_DISPLAY_POWER_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
deleted file mode 100644 (file)
index 7ccf7f3..0000000
+++ /dev/null
@@ -1,1088 +0,0 @@
-/*
- * Copyright © 2014-2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "display/intel_dp.h"
-
-#include "intel_dpio_phy.h"
-#include "intel_drv.h"
-#include "intel_sideband.h"
-
-/**
- * DOC: DPIO
- *
- * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
- * ports. DPIO is the name given to such a display PHY. These PHYs
- * don't follow the standard programming model using direct MMIO
- * registers, and instead their registers must be accessed trough IOSF
- * sideband. VLV has one such PHY for driving ports B and C, and CHV
- * adds another PHY for driving port D. Each PHY responds to specific
- * IOSF-SB port.
- *
- * Each display PHY is made up of one or two channels. Each channel
- * houses a common lane part which contains the PLL and other common
- * logic. CH0 common lane also contains the IOSF-SB logic for the
- * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
- * must be running when any DPIO registers are accessed.
- *
- * In addition to having their own registers, the PHYs are also
- * controlled through some dedicated signals from the display
- * controller. These include PLL reference clock enable, PLL enable,
- * and CRI clock selection, for example.
- *
- * Eeach channel also has two splines (also called data lanes), and
- * each spline is made up of one Physical Access Coding Sub-Layer
- * (PCS) block and two TX lanes. So each channel has two PCS blocks
- * and four TX lanes. The TX lanes are used as DP lanes or TMDS
- * data/clock pairs depending on the output type.
- *
- * Additionally the PHY also contains an AUX lane with AUX blocks
- * for each channel. This is used for DP AUX communication, but
- * this fact isn't really relevant for the driver since AUX is
- * controlled from the display controller side. No DPIO registers
- * need to be accessed during AUX communication,
- *
- * Generally on VLV/CHV the common lane corresponds to the pipe and
- * the spline (PCS/TX) corresponds to the port.
- *
- * For dual channel PHY (VLV/CHV):
- *
- *  pipe A == CMN/PLL/REF CH0
- *
- *  pipe B == CMN/PLL/REF CH1
- *
- *  port B == PCS/TX CH0
- *
- *  port C == PCS/TX CH1
- *
- * This is especially important when we cross the streams
- * ie. drive port B with pipe B, or port C with pipe A.
- *
- * For single channel PHY (CHV):
- *
- *  pipe C == CMN/PLL/REF CH0
- *
- *  port D == PCS/TX CH0
- *
- * On BXT the entire PHY channel corresponds to the port. That means
- * the PLL is also now associated with the port rather than the pipe,
- * and so the clock needs to be routed to the appropriate transcoder.
- * Port A PLL is directly connected to transcoder EDP and port B/C
- * PLLs can be routed to any transcoder A/B/C.
- *
- * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
- * digital port D (CHV) or port A (BXT). ::
- *
- *
- *     Dual channel PHY (VLV/CHV/BXT)
- *     ---------------------------------
- *     |      CH0      |      CH1      |
- *     |  CMN/PLL/REF  |  CMN/PLL/REF  |
- *     |---------------|---------------| Display PHY
- *     | PCS01 | PCS23 | PCS01 | PCS23 |
- *     |-------|-------|-------|-------|
- *     |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
- *     ---------------------------------
- *     |     DDI0      |     DDI1      | DP/HDMI ports
- *     ---------------------------------
- *
- *     Single channel PHY (CHV/BXT)
- *     -----------------
- *     |      CH0      |
- *     |  CMN/PLL/REF  |
- *     |---------------| Display PHY
- *     | PCS01 | PCS23 |
- *     |-------|-------|
- *     |TX0|TX1|TX2|TX3|
- *     -----------------
- *     |     DDI2      | DP/HDMI port
- *     -----------------
- */
-
-/**
- * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
- */
-struct bxt_ddi_phy_info {
-       /**
-        * @dual_channel: true if this phy has a second channel.
-        */
-       bool dual_channel;
-
-       /**
-        * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
-        * Otherwise the GRC value will be copied from the phy indicated by
-        * this field.
-        */
-       enum dpio_phy rcomp_phy;
-
-       /**
-        * @reset_delay: delay in us to wait before setting the common reset
-        * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy.
-        */
-       int reset_delay;
-
-       /**
-        * @pwron_mask: Mask with the appropriate bit set that would cause the
-        * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON.
-        */
-       u32 pwron_mask;
-
-       /**
-        * @channel: struct containing per channel information.
-        */
-       struct {
-               /**
-                * @channel.port: which port maps to this channel.
-                */
-               enum port port;
-       } channel[2];
-};
-
-static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
-       [DPIO_PHY0] = {
-               .dual_channel = true,
-               .rcomp_phy = DPIO_PHY1,
-               .pwron_mask = BIT(0),
-
-               .channel = {
-                       [DPIO_CH0] = { .port = PORT_B },
-                       [DPIO_CH1] = { .port = PORT_C },
-               }
-       },
-       [DPIO_PHY1] = {
-               .dual_channel = false,
-               .rcomp_phy = -1,
-               .pwron_mask = BIT(1),
-
-               .channel = {
-                       [DPIO_CH0] = { .port = PORT_A },
-               }
-       },
-};
-
-static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
-       [DPIO_PHY0] = {
-               .dual_channel = false,
-               .rcomp_phy = DPIO_PHY1,
-               .pwron_mask = BIT(0),
-               .reset_delay = 20,
-
-               .channel = {
-                       [DPIO_CH0] = { .port = PORT_B },
-               }
-       },
-       [DPIO_PHY1] = {
-               .dual_channel = false,
-               .rcomp_phy = -1,
-               .pwron_mask = BIT(3),
-               .reset_delay = 20,
-
-               .channel = {
-                       [DPIO_CH0] = { .port = PORT_A },
-               }
-       },
-       [DPIO_PHY2] = {
-               .dual_channel = false,
-               .rcomp_phy = DPIO_PHY1,
-               .pwron_mask = BIT(1),
-               .reset_delay = 20,
-
-               .channel = {
-                       [DPIO_CH0] = { .port = PORT_C },
-               }
-       },
-};
-
-static const struct bxt_ddi_phy_info *
-bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
-{
-       if (IS_GEMINILAKE(dev_priv)) {
-               *count =  ARRAY_SIZE(glk_ddi_phy_info);
-               return glk_ddi_phy_info;
-       } else {
-               *count =  ARRAY_SIZE(bxt_ddi_phy_info);
-               return bxt_ddi_phy_info;
-       }
-}
-
-static const struct bxt_ddi_phy_info *
-bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
-       int count;
-       const struct bxt_ddi_phy_info *phy_list =
-               bxt_get_phy_list(dev_priv, &count);
-
-       return &phy_list[phy];
-}
-
-void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
-                            enum dpio_phy *phy, enum dpio_channel *ch)
-{
-       const struct bxt_ddi_phy_info *phy_info, *phys;
-       int i, count;
-
-       phys = bxt_get_phy_list(dev_priv, &count);
-
-       for (i = 0; i < count; i++) {
-               phy_info = &phys[i];
-
-               if (port == phy_info->channel[DPIO_CH0].port) {
-                       *phy = i;
-                       *ch = DPIO_CH0;
-                       return;
-               }
-
-               if (phy_info->dual_channel &&
-                   port == phy_info->channel[DPIO_CH1].port) {
-                       *phy = i;
-                       *ch = DPIO_CH1;
-                       return;
-               }
-       }
-
-       WARN(1, "PHY not found for PORT %c", port_name(port));
-       *phy = DPIO_PHY0;
-       *ch = DPIO_CH0;
-}
-
-void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
-                                 enum port port, u32 margin, u32 scale,
-                                 u32 enable, u32 deemphasis)
-{
-       u32 val;
-       enum dpio_phy phy;
-       enum dpio_channel ch;
-
-       bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
-
-       /*
-        * While we write to the group register to program all lanes at once we
-        * can read only lane registers and we pick lanes 0/1 for that.
-        */
-       val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
-       val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
-       I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
-
-       val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
-       val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
-       val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
-       I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
-
-       val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
-       val &= ~SCALE_DCOMP_METHOD;
-       if (enable)
-               val |= SCALE_DCOMP_METHOD;
-
-       if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
-               DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
-
-       I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
-
-       val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
-       val &= ~DE_EMPHASIS;
-       val |= deemphasis << DEEMPH_SHIFT;
-       I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
-
-       val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
-       val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
-       I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
-}
-
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
-                           enum dpio_phy phy)
-{
-       const struct bxt_ddi_phy_info *phy_info;
-
-       phy_info = bxt_get_phy_info(dev_priv, phy);
-
-       if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
-               return false;
-
-       if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
-            (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
-               DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
-                                phy);
-
-               return false;
-       }
-
-       if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
-               DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
-                                phy);
-
-               return false;
-       }
-
-       return true;
-}
-
-static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
-       u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
-
-       return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
-}
-
-static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
-                                 enum dpio_phy phy)
-{
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   BXT_PORT_REF_DW3(phy),
-                                   GRC_DONE, GRC_DONE,
-                                   10))
-               DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
-}
-
-static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
-                             enum dpio_phy phy)
-{
-       const struct bxt_ddi_phy_info *phy_info;
-       u32 val;
-
-       phy_info = bxt_get_phy_info(dev_priv, phy);
-
-       if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
-               /* Still read out the GRC value for state verification */
-               if (phy_info->rcomp_phy != -1)
-                       dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
-
-               if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
-                       DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
-                                        "won't reprogram it\n", phy);
-                       return;
-               }
-
-               DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
-                                "force reprogramming it\n", phy);
-       }
-
-       val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
-       val |= phy_info->pwron_mask;
-       I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
-
-       /*
-        * The PHY registers start out inaccessible and respond to reads with
-        * all 1s.  Eventually they become accessible as they power up, then
-        * the reserved bit will give the default 0.  Poll on the reserved bit
-        * becoming 0 to find when the PHY is accessible.
-        * The flag should get set in 100us according to the HW team, but
-        * use 1ms due to occasional timeouts observed with that.
-        */
-       if (intel_wait_for_register_fw(&dev_priv->uncore,
-                                      BXT_PORT_CL1CM_DW0(phy),
-                                      PHY_RESERVED | PHY_POWER_GOOD,
-                                      PHY_POWER_GOOD,
-                                      1))
-               DRM_ERROR("timeout during PHY%d power on\n", phy);
-
-       /* Program PLL Rcomp code offset */
-       val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
-       val &= ~IREF0RC_OFFSET_MASK;
-       val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
-       I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
-
-       val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
-       val &= ~IREF1RC_OFFSET_MASK;
-       val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
-       I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
-
-       /* Program power gating */
-       val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
-       val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
-               SUS_CLK_CONFIG;
-       I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
-
-       if (phy_info->dual_channel) {
-               val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
-               val |= DW6_OLDO_DYN_PWR_DOWN_EN;
-               I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
-       }
-
-       if (phy_info->rcomp_phy != -1) {
-               u32 grc_code;
-
-               bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
-
-               /*
-                * PHY0 isn't connected to an RCOMP resistor so copy over
-                * the corresponding calibrated value from PHY1, and disable
-                * the automatic calibration on PHY0.
-                */
-               val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
-                                                         phy_info->rcomp_phy);
-               grc_code = val << GRC_CODE_FAST_SHIFT |
-                          val << GRC_CODE_SLOW_SHIFT |
-                          val;
-               I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
-
-               val = I915_READ(BXT_PORT_REF_DW8(phy));
-               val |= GRC_DIS | GRC_RDY_OVRD;
-               I915_WRITE(BXT_PORT_REF_DW8(phy), val);
-       }
-
-       if (phy_info->reset_delay)
-               udelay(phy_info->reset_delay);
-
-       val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
-       val |= COMMON_RESET_DIS;
-       I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-}
-
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
-       const struct bxt_ddi_phy_info *phy_info;
-       u32 val;
-
-       phy_info = bxt_get_phy_info(dev_priv, phy);
-
-       val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
-       val &= ~COMMON_RESET_DIS;
-       I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-
-       val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
-       val &= ~phy_info->pwron_mask;
-       I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
-}
-
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
-       const struct bxt_ddi_phy_info *phy_info =
-               bxt_get_phy_info(dev_priv, phy);
-       enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
-       bool was_enabled;
-
-       lockdep_assert_held(&dev_priv->power_domains.lock);
-
-       was_enabled = true;
-       if (rcomp_phy != -1)
-               was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
-
-       /*
-        * We need to copy the GRC calibration value from rcomp_phy,
-        * so make sure it's powered up.
-        */
-       if (!was_enabled)
-               _bxt_ddi_phy_init(dev_priv, rcomp_phy);
-
-       _bxt_ddi_phy_init(dev_priv, phy);
-
-       if (!was_enabled)
-               bxt_ddi_phy_uninit(dev_priv, rcomp_phy);
-}
-
-static bool __printf(6, 7)
-__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
-                      i915_reg_t reg, u32 mask, u32 expected,
-                      const char *reg_fmt, ...)
-{
-       struct va_format vaf;
-       va_list args;
-       u32 val;
-
-       val = I915_READ(reg);
-       if ((val & mask) == expected)
-               return true;
-
-       va_start(args, reg_fmt);
-       vaf.fmt = reg_fmt;
-       vaf.va = &args;
-
-       DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
-                        "current %08x, expected %08x (mask %08x)\n",
-                        phy, &vaf, reg.reg, val, (val & ~mask) | expected,
-                        mask);
-
-       va_end(args);
-
-       return false;
-}
-
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
-                             enum dpio_phy phy)
-{
-       const struct bxt_ddi_phy_info *phy_info;
-       u32 mask;
-       bool ok;
-
-       phy_info = bxt_get_phy_info(dev_priv, phy);
-
-#define _CHK(reg, mask, exp, fmt, ...)                                 \
-       __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt,      \
-                              ## __VA_ARGS__)
-
-       if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
-               return false;
-
-       ok = true;
-
-       /* PLL Rcomp code offset */
-       ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
-                   IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
-                   "BXT_PORT_CL1CM_DW9(%d)", phy);
-       ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
-                   IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
-                   "BXT_PORT_CL1CM_DW10(%d)", phy);
-
-       /* Power gating */
-       mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
-       ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
-                   "BXT_PORT_CL1CM_DW28(%d)", phy);
-
-       if (phy_info->dual_channel)
-               ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
-                          DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
-                          "BXT_PORT_CL2CM_DW6(%d)", phy);
-
-       if (phy_info->rcomp_phy != -1) {
-               u32 grc_code = dev_priv->bxt_phy_grc;
-
-               grc_code = grc_code << GRC_CODE_FAST_SHIFT |
-                          grc_code << GRC_CODE_SLOW_SHIFT |
-                          grc_code;
-               mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
-                      GRC_CODE_NOM_MASK;
-               ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
-                          "BXT_PORT_REF_DW6(%d)", phy);
-
-               mask = GRC_DIS | GRC_RDY_OVRD;
-               ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
-                           "BXT_PORT_REF_DW8(%d)", phy);
-       }
-
-       return ok;
-#undef _CHK
-}
-
-u8
-bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
-{
-       switch (lane_count) {
-       case 1:
-               return 0;
-       case 2:
-               return BIT(2) | BIT(0);
-       case 4:
-               return BIT(3) | BIT(2) | BIT(0);
-       default:
-               MISSING_CASE(lane_count);
-
-               return 0;
-       }
-}
-
-void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
-                                    u8 lane_lat_optim_mask)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum port port = encoder->port;
-       enum dpio_phy phy;
-       enum dpio_channel ch;
-       int lane;
-
-       bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
-
-       for (lane = 0; lane < 4; lane++) {
-               u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
-
-               /*
-                * Note that on CHV this flag is called UPAR, but has
-                * the same function.
-                */
-               val &= ~LATENCY_OPTIM;
-               if (lane_lat_optim_mask & BIT(lane))
-                       val |= LATENCY_OPTIM;
-
-               I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
-       }
-}
-
-u8
-bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum port port = encoder->port;
-       enum dpio_phy phy;
-       enum dpio_channel ch;
-       int lane;
-       u8 mask;
-
-       bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
-
-       mask = 0;
-       for (lane = 0; lane < 4; lane++) {
-               u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
-
-               if (val & LATENCY_OPTIM)
-                       mask |= BIT(lane);
-       }
-
-       return mask;
-}
-
-
-void chv_set_phy_signal_level(struct intel_encoder *encoder,
-                             u32 deemph_reg_value, u32 margin_reg_value,
-                             bool uniq_trans_scale)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-       enum dpio_channel ch = vlv_dport_to_channel(dport);
-       enum pipe pipe = intel_crtc->pipe;
-       u32 val;
-       int i;
-
-       vlv_dpio_get(dev_priv);
-
-       /* Clear calc init */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
-       val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
-       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
-       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
-       if (intel_crtc->config->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
-               val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
-               val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
-               val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
-       }
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
-       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
-       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
-
-       if (intel_crtc->config->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
-               val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
-               val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
-       }
-
-       /* Program swing deemph */
-       for (i = 0; i < intel_crtc->config->lane_count; i++) {
-               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
-               val &= ~DPIO_SWING_DEEMPH9P5_MASK;
-               val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
-               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
-       }
-
-       /* Program swing margin */
-       for (i = 0; i < intel_crtc->config->lane_count; i++) {
-               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
-
-               val &= ~DPIO_SWING_MARGIN000_MASK;
-               val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
-
-               /*
-                * Supposedly this value shouldn't matter when unique transition
-                * scale is disabled, but in fact it does matter. Let's just
-                * always program the same value and hope it's OK.
-                */
-               val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
-               val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
-
-               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
-       }
-
-       /*
-        * The document said it needs to set bit 27 for ch0 and bit 26
-        * for ch1. Might be a typo in the doc.
-        * For now, for this unique transition scale selection, set bit
-        * 27 for ch0 and ch1.
-        */
-       for (i = 0; i < intel_crtc->config->lane_count; i++) {
-               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
-               if (uniq_trans_scale)
-                       val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
-               else
-                       val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
-               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
-       }
-
-       /* Start swing calculation */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
-       val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
-       if (intel_crtc->config->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
-               val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
-       }
-
-       vlv_dpio_put(dev_priv);
-}
-
-void chv_data_lane_soft_reset(struct intel_encoder *encoder,
-                             const struct intel_crtc_state *crtc_state,
-                             bool reset)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       enum pipe pipe = crtc->pipe;
-       u32 val;
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
-       if (reset)
-               val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-       else
-               val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
-       if (crtc_state->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
-               if (reset)
-                       val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-               else
-                       val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
-       }
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
-       val |= CHV_PCS_REQ_SOFTRESET_EN;
-       if (reset)
-               val &= ~DPIO_PCS_CLK_SOFT_RESET;
-       else
-               val |= DPIO_PCS_CLK_SOFT_RESET;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
-       if (crtc_state->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
-               val |= CHV_PCS_REQ_SOFTRESET_EN;
-               if (reset)
-                       val &= ~DPIO_PCS_CLK_SOFT_RESET;
-               else
-                       val |= DPIO_PCS_CLK_SOFT_RESET;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
-       }
-}
-
-void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
-                           const struct intel_crtc_state *crtc_state)
-{
-       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       enum dpio_channel ch = vlv_dport_to_channel(dport);
-       enum pipe pipe = crtc->pipe;
-       unsigned int lane_mask =
-               intel_dp_unused_lane_mask(crtc_state->lane_count);
-       u32 val;
-
-       /*
-        * Must trick the second common lane into life.
-        * Otherwise we can't even access the PLL.
-        */
-       if (ch == DPIO_CH0 && pipe == PIPE_B)
-               dport->release_cl2_override =
-                       !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
-
-       chv_phy_powergate_lanes(encoder, true, lane_mask);
-
-       vlv_dpio_get(dev_priv);
-
-       /* Assert data lane reset */
-       chv_data_lane_soft_reset(encoder, crtc_state, true);
-
-       /* program left/right clock distribution */
-       if (pipe != PIPE_B) {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
-               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
-               if (ch == DPIO_CH0)
-                       val |= CHV_BUFLEFTENA1_FORCE;
-               if (ch == DPIO_CH1)
-                       val |= CHV_BUFRIGHTENA1_FORCE;
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
-       } else {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
-               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
-               if (ch == DPIO_CH0)
-                       val |= CHV_BUFLEFTENA2_FORCE;
-               if (ch == DPIO_CH1)
-                       val |= CHV_BUFRIGHTENA2_FORCE;
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
-       }
-
-       /* program clock channel usage */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
-       val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
-       if (pipe != PIPE_B)
-               val &= ~CHV_PCS_USEDCLKCHANNEL;
-       else
-               val |= CHV_PCS_USEDCLKCHANNEL;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
-
-       if (crtc_state->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
-               val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
-               if (pipe != PIPE_B)
-                       val &= ~CHV_PCS_USEDCLKCHANNEL;
-               else
-                       val |= CHV_PCS_USEDCLKCHANNEL;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
-       }
-
-       /*
-        * This a a bit weird since generally CL
-        * matches the pipe, but here we need to
-        * pick the CL based on the port.
-        */
-       val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
-       if (pipe != PIPE_B)
-               val &= ~CHV_CMN_USEDCLKCHANNEL;
-       else
-               val |= CHV_CMN_USEDCLKCHANNEL;
-       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
-
-       vlv_dpio_put(dev_priv);
-}
-
-void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
-                               const struct intel_crtc_state *crtc_state)
-{
-       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       enum dpio_channel ch = vlv_dport_to_channel(dport);
-       enum pipe pipe = crtc->pipe;
-       int data, i, stagger;
-       u32 val;
-
-       vlv_dpio_get(dev_priv);
-
-       /* allow hardware to manage TX FIFO reset source */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
-       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
-       if (crtc_state->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
-               val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-       }
-
-       /* Program Tx lane latency optimal setting*/
-       for (i = 0; i < crtc_state->lane_count; i++) {
-               /* Set the upar bit */
-               if (crtc_state->lane_count == 1)
-                       data = 0x0;
-               else
-                       data = (i == 1) ? 0x0 : 0x1;
-               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
-                               data << DPIO_UPAR_SHIFT);
-       }
-
-       /* Data lane stagger programming */
-       if (crtc_state->port_clock > 270000)
-               stagger = 0x18;
-       else if (crtc_state->port_clock > 135000)
-               stagger = 0xd;
-       else if (crtc_state->port_clock > 67500)
-               stagger = 0x7;
-       else if (crtc_state->port_clock > 33750)
-               stagger = 0x4;
-       else
-               stagger = 0x2;
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
-       val |= DPIO_TX2_STAGGER_MASK(0x1f);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
-       if (crtc_state->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
-               val |= DPIO_TX2_STAGGER_MASK(0x1f);
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-       }
-
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
-                      DPIO_LANESTAGGER_STRAP(stagger) |
-                      DPIO_LANESTAGGER_STRAP_OVRD |
-                      DPIO_TX1_STAGGER_MASK(0x1f) |
-                      DPIO_TX1_STAGGER_MULT(6) |
-                      DPIO_TX2_STAGGER_MULT(0));
-
-       if (crtc_state->lane_count > 2) {
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
-                              DPIO_LANESTAGGER_STRAP(stagger) |
-                              DPIO_LANESTAGGER_STRAP_OVRD |
-                              DPIO_TX1_STAGGER_MASK(0x1f) |
-                              DPIO_TX1_STAGGER_MULT(7) |
-                              DPIO_TX2_STAGGER_MULT(5));
-       }
-
-       /* Deassert data lane reset */
-       chv_data_lane_soft_reset(encoder, crtc_state, false);
-
-       vlv_dpio_put(dev_priv);
-}
-
-void chv_phy_release_cl2_override(struct intel_encoder *encoder)
-{
-       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
-       if (dport->release_cl2_override) {
-               chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
-               dport->release_cl2_override = false;
-       }
-}
-
-void chv_phy_post_pll_disable(struct intel_encoder *encoder,
-                             const struct intel_crtc_state *old_crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum pipe pipe = to_intel_crtc(old_crtc_state->base.crtc)->pipe;
-       u32 val;
-
-       vlv_dpio_get(dev_priv);
-
-       /* disable left/right clock distribution */
-       if (pipe != PIPE_B) {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
-               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
-       } else {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
-               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
-       }
-
-       vlv_dpio_put(dev_priv);
-
-       /*
-        * Leave the power down bit cleared for at least one
-        * lane so that chv_powergate_phy_ch() will power
-        * on something when the channel is otherwise unused.
-        * When the port is off and the override is removed
-        * the lanes power down anyway, so otherwise it doesn't
-        * really matter what the state of power down bits is
-        * after this.
-        */
-       chv_phy_powergate_lanes(encoder, false, 0x0);
-}
-
-void vlv_set_phy_signal_level(struct intel_encoder *encoder,
-                             u32 demph_reg_value, u32 preemph_reg_value,
-                             u32 uniqtranscale_reg_value, u32 tx3_demph)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-       enum dpio_channel port = vlv_dport_to_channel(dport);
-       enum pipe pipe = intel_crtc->pipe;
-
-       vlv_dpio_get(dev_priv);
-
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
-                        uniqtranscale_reg_value);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
-
-       if (tx3_demph)
-               vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
-
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
-
-       vlv_dpio_put(dev_priv);
-}
-
-void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
-                           const struct intel_crtc_state *crtc_state)
-{
-       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       enum dpio_channel port = vlv_dport_to_channel(dport);
-       enum pipe pipe = crtc->pipe;
-
-       /* Program Tx lane resets to default */
-       vlv_dpio_get(dev_priv);
-
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
-                        DPIO_PCS_TX_LANE2_RESET |
-                        DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
-                        DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
-                        DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
-                        (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
-                                DPIO_PCS_CLK_SOFT_RESET);
-
-       /* Fix up inter-pair skew failure */
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
-
-       vlv_dpio_put(dev_priv);
-}
-
-void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
-                               const struct intel_crtc_state *crtc_state)
-{
-       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       enum dpio_channel port = vlv_dport_to_channel(dport);
-       enum pipe pipe = crtc->pipe;
-       u32 val;
-
-       vlv_dpio_get(dev_priv);
-
-       /* Enable clock channels for this port */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
-       val = 0;
-       if (pipe)
-               val |= (1<<21);
-       else
-               val &= ~(1<<21);
-       val |= 0x001000c4;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
-
-       /* Program lane clock */
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
-
-       vlv_dpio_put(dev_priv);
-}
-
-void vlv_phy_reset_lanes(struct intel_encoder *encoder,
-                        const struct intel_crtc_state *old_crtc_state)
-{
-       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-       enum dpio_channel port = vlv_dport_to_channel(dport);
-       enum pipe pipe = crtc->pipe;
-
-       vlv_dpio_get(dev_priv);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
-       vlv_dpio_put(dev_priv);
-}
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.h b/drivers/gpu/drm/i915/intel_dpio_phy.h
deleted file mode 100644 (file)
index f418aab..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_DPIO_PHY_H__
-#define __INTEL_DPIO_PHY_H__
-
-#include <linux/types.h>
-
-enum dpio_channel;
-enum dpio_phy;
-enum port;
-struct drm_i915_private;
-struct intel_crtc_state;
-struct intel_encoder;
-
-void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
-                            enum dpio_phy *phy, enum dpio_channel *ch);
-void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
-                                 enum port port, u32 margin, u32 scale,
-                                 u32 enable, u32 deemphasis);
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
-                           enum dpio_phy phy);
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
-                             enum dpio_phy phy);
-u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
-void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
-                                    u8 lane_lat_optim_mask);
-u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
-
-void chv_set_phy_signal_level(struct intel_encoder *encoder,
-                             u32 deemph_reg_value, u32 margin_reg_value,
-                             bool uniq_trans_scale);
-void chv_data_lane_soft_reset(struct intel_encoder *encoder,
-                             const struct intel_crtc_state *crtc_state,
-                             bool reset);
-void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
-                           const struct intel_crtc_state *crtc_state);
-void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
-                               const struct intel_crtc_state *crtc_state);
-void chv_phy_release_cl2_override(struct intel_encoder *encoder);
-void chv_phy_post_pll_disable(struct intel_encoder *encoder,
-                             const struct intel_crtc_state *old_crtc_state);
-
-void vlv_set_phy_signal_level(struct intel_encoder *encoder,
-                             u32 demph_reg_value, u32 preemph_reg_value,
-                             u32 uniqtranscale_reg_value, u32 tx3_demph);
-void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
-                           const struct intel_crtc_state *crtc_state);
-void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
-                               const struct intel_crtc_state *crtc_state);
-void vlv_phy_reset_lanes(struct intel_encoder *encoder,
-                        const struct intel_crtc_state *old_crtc_state);
-
-#endif /* __INTEL_DPIO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
deleted file mode 100644 (file)
index 2d4e7b9..0000000
+++ /dev/null
@@ -1,3359 +0,0 @@
-/*
- * Copyright © 2006-2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "intel_dpio_phy.h"
-#include "intel_dpll_mgr.h"
-#include "intel_drv.h"
-
-/**
- * DOC: Display PLLs
- *
- * Display PLLs used for driving outputs vary by platform. While some have
- * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
- * from a pool. In the latter scenario, it is possible that multiple pipes
- * share a PLL if their configurations match.
- *
- * This file provides an abstraction over display PLLs. The function
- * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
- * users of a PLL are tracked and that tracking is integrated with the atomic
- * modest interface. During an atomic operation, a PLL can be requested for a
- * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
- * a previously used PLL can be released with intel_release_shared_dpll().
- * Changes to the users are first staged in the atomic state, and then made
- * effective by calling intel_shared_dpll_swap_state() during the atomic
- * commit phase.
- */
-
-static void
-intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
-                                 struct intel_shared_dpll_state *shared_dpll)
-{
-       enum intel_dpll_id i;
-
-       /* Copy shared dpll state */
-       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-               struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
-               shared_dpll[i] = pll->state;
-       }
-}
-
-static struct intel_shared_dpll_state *
-intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
-{
-       struct intel_atomic_state *state = to_intel_atomic_state(s);
-
-       WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
-
-       if (!state->dpll_set) {
-               state->dpll_set = true;
-
-               intel_atomic_duplicate_dpll_state(to_i915(s->dev),
-                                                 state->shared_dpll);
-       }
-
-       return state->shared_dpll;
-}
-
-/**
- * intel_get_shared_dpll_by_id - get a DPLL given its id
- * @dev_priv: i915 device instance
- * @id: pll id
- *
- * Returns:
- * A pointer to the DPLL with @id
- */
-struct intel_shared_dpll *
-intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
-                           enum intel_dpll_id id)
-{
-       return &dev_priv->shared_dplls[id];
-}
-
-/**
- * intel_get_shared_dpll_id - get the id of a DPLL
- * @dev_priv: i915 device instance
- * @pll: the DPLL
- *
- * Returns:
- * The id of @pll
- */
-enum intel_dpll_id
-intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
-                        struct intel_shared_dpll *pll)
-{
-       if (WARN_ON(pll < dev_priv->shared_dplls||
-                   pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
-               return -1;
-
-       return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
-}
-
-/* For ILK+ */
-void assert_shared_dpll(struct drm_i915_private *dev_priv,
-                       struct intel_shared_dpll *pll,
-                       bool state)
-{
-       bool cur_state;
-       struct intel_dpll_hw_state hw_state;
-
-       if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
-               return;
-
-       cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
-       I915_STATE_WARN(cur_state != state,
-            "%s assertion failure (expected %s, current %s)\n",
-                       pll->info->name, onoff(state), onoff(cur_state));
-}
-
-/**
- * intel_prepare_shared_dpll - call a dpll's prepare hook
- * @crtc_state: CRTC, and its state, which has a shared dpll
- *
- * This calls the PLL's prepare hook if it has one and if the PLL is not
- * already enabled. The prepare hook is platform specific.
- */
-void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_shared_dpll *pll = crtc_state->shared_dpll;
-
-       if (WARN_ON(pll == NULL))
-               return;
-
-       mutex_lock(&dev_priv->dpll_lock);
-       WARN_ON(!pll->state.crtc_mask);
-       if (!pll->active_mask) {
-               DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
-               WARN_ON(pll->on);
-               assert_shared_dpll_disabled(dev_priv, pll);
-
-               pll->info->funcs->prepare(dev_priv, pll);
-       }
-       mutex_unlock(&dev_priv->dpll_lock);
-}
-
-/**
- * intel_enable_shared_dpll - enable a CRTC's shared DPLL
- * @crtc_state: CRTC, and its state, which has a shared DPLL
- *
- * Enable the shared DPLL used by @crtc.
- */
-void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_shared_dpll *pll = crtc_state->shared_dpll;
-       unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
-       unsigned int old_mask;
-
-       if (WARN_ON(pll == NULL))
-               return;
-
-       mutex_lock(&dev_priv->dpll_lock);
-       old_mask = pll->active_mask;
-
-       if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
-           WARN_ON(pll->active_mask & crtc_mask))
-               goto out;
-
-       pll->active_mask |= crtc_mask;
-
-       DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
-                     pll->info->name, pll->active_mask, pll->on,
-                     crtc->base.base.id);
-
-       if (old_mask) {
-               WARN_ON(!pll->on);
-               assert_shared_dpll_enabled(dev_priv, pll);
-               goto out;
-       }
-       WARN_ON(pll->on);
-
-       DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
-       pll->info->funcs->enable(dev_priv, pll);
-       pll->on = true;
-
-out:
-       mutex_unlock(&dev_priv->dpll_lock);
-}
-
-/**
- * intel_disable_shared_dpll - disable a CRTC's shared DPLL
- * @crtc_state: CRTC, and its state, which has a shared DPLL
- *
- * Disable the shared DPLL used by @crtc.
- */
-void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_shared_dpll *pll = crtc_state->shared_dpll;
-       unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
-
-       /* PCH only available on ILK+ */
-       if (INTEL_GEN(dev_priv) < 5)
-               return;
-
-       if (pll == NULL)
-               return;
-
-       mutex_lock(&dev_priv->dpll_lock);
-       if (WARN_ON(!(pll->active_mask & crtc_mask)))
-               goto out;
-
-       DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
-                     pll->info->name, pll->active_mask, pll->on,
-                     crtc->base.base.id);
-
-       assert_shared_dpll_enabled(dev_priv, pll);
-       WARN_ON(!pll->on);
-
-       pll->active_mask &= ~crtc_mask;
-       if (pll->active_mask)
-               goto out;
-
-       DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
-       pll->info->funcs->disable(dev_priv, pll);
-       pll->on = false;
-
-out:
-       mutex_unlock(&dev_priv->dpll_lock);
-}
-
-static struct intel_shared_dpll *
-intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
-                      enum intel_dpll_id range_min,
-                      enum intel_dpll_id range_max)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_shared_dpll *pll, *unused_pll = NULL;
-       struct intel_shared_dpll_state *shared_dpll;
-       enum intel_dpll_id i;
-
-       shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
-
-       for (i = range_min; i <= range_max; i++) {
-               pll = &dev_priv->shared_dplls[i];
-
-               /* Only want to check enabled timings first */
-               if (shared_dpll[i].crtc_mask == 0) {
-                       if (!unused_pll)
-                               unused_pll = pll;
-                       continue;
-               }
-
-               if (memcmp(&crtc_state->dpll_hw_state,
-                          &shared_dpll[i].hw_state,
-                          sizeof(crtc_state->dpll_hw_state)) == 0) {
-                       DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
-                                     crtc->base.base.id, crtc->base.name,
-                                     pll->info->name,
-                                     shared_dpll[i].crtc_mask,
-                                     pll->active_mask);
-                       return pll;
-               }
-       }
-
-       /* Ok no matching timings, maybe there's a free one? */
-       if (unused_pll) {
-               DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
-                             crtc->base.base.id, crtc->base.name,
-                             unused_pll->info->name);
-               return unused_pll;
-       }
-
-       return NULL;
-}
-
-static void
-intel_reference_shared_dpll(struct intel_shared_dpll *pll,
-                           struct intel_crtc_state *crtc_state)
-{
-       struct intel_shared_dpll_state *shared_dpll;
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       const enum intel_dpll_id id = pll->info->id;
-
-       shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
-
-       if (shared_dpll[id].crtc_mask == 0)
-               shared_dpll[id].hw_state =
-                       crtc_state->dpll_hw_state;
-
-       crtc_state->shared_dpll = pll;
-       DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
-                        pipe_name(crtc->pipe));
-
-       shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
-}
-
-/**
- * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
- * @state: atomic state
- *
- * This is the dpll version of drm_atomic_helper_swap_state() since the
- * helper does not handle driver-specific global state.
- *
- * For consistency with atomic helpers this function does a complete swap,
- * i.e. it also puts the current state into @state, even though there is no
- * need for that at this moment.
- */
-void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->dev);
-       struct intel_shared_dpll_state *shared_dpll;
-       struct intel_shared_dpll *pll;
-       enum intel_dpll_id i;
-
-       if (!to_intel_atomic_state(state)->dpll_set)
-               return;
-
-       shared_dpll = to_intel_atomic_state(state)->shared_dpll;
-       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-               struct intel_shared_dpll_state tmp;
-
-               pll = &dev_priv->shared_dplls[i];
-
-               tmp = pll->state;
-               pll->state = shared_dpll[i];
-               shared_dpll[i] = tmp;
-       }
-}
-
-static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
-                                     struct intel_shared_dpll *pll,
-                                     struct intel_dpll_hw_state *hw_state)
-{
-       const enum intel_dpll_id id = pll->info->id;
-       intel_wakeref_t wakeref;
-       u32 val;
-
-       wakeref = intel_display_power_get_if_enabled(dev_priv,
-                                                    POWER_DOMAIN_DISPLAY_CORE);
-       if (!wakeref)
-               return false;
-
-       val = I915_READ(PCH_DPLL(id));
-       hw_state->dpll = val;
-       hw_state->fp0 = I915_READ(PCH_FP0(id));
-       hw_state->fp1 = I915_READ(PCH_FP1(id));
-
-       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
-
-       return val & DPLL_VCO_ENABLE;
-}
-
-static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
-                                struct intel_shared_dpll *pll)
-{
-       const enum intel_dpll_id id = pll->info->id;
-
-       I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
-       I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
-}
-
-static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-       bool enabled;
-
-       I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
-
-       val = I915_READ(PCH_DREF_CONTROL);
-       enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
-                           DREF_SUPERSPREAD_SOURCE_MASK));
-       I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
-}
-
-static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
-                               struct intel_shared_dpll *pll)
-{
-       const enum intel_dpll_id id = pll->info->id;
-
-       /* PCH refclock must be enabled first */
-       ibx_assert_pch_refclk_enabled(dev_priv);
-
-       I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
-
-       /* Wait for the clocks to stabilize. */
-       POSTING_READ(PCH_DPLL(id));
-       udelay(150);
-
-       /* The pixel multiplier can only be updated once the
-        * DPLL is enabled and the clocks are stable.
-        *
-        * So write it again.
-        */
-       I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
-       POSTING_READ(PCH_DPLL(id));
-       udelay(200);
-}
-
-static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
-                                struct intel_shared_dpll *pll)
-{
-       const enum intel_dpll_id id = pll->info->id;
-
-       I915_WRITE(PCH_DPLL(id), 0);
-       POSTING_READ(PCH_DPLL(id));
-       udelay(200);
-}
-
-static struct intel_shared_dpll *
-ibx_get_dpll(struct intel_crtc_state *crtc_state,
-            struct intel_encoder *encoder)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_shared_dpll *pll;
-       enum intel_dpll_id i;
-
-       if (HAS_PCH_IBX(dev_priv)) {
-               /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
-               i = (enum intel_dpll_id) crtc->pipe;
-               pll = &dev_priv->shared_dplls[i];
-
-               DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
-                             crtc->base.base.id, crtc->base.name,
-                             pll->info->name);
-       } else {
-               pll = intel_find_shared_dpll(crtc_state,
-                                            DPLL_ID_PCH_PLL_A,
-                                            DPLL_ID_PCH_PLL_B);
-       }
-
-       if (!pll)
-               return NULL;
-
-       /* reference the pll */
-       intel_reference_shared_dpll(pll, crtc_state);
-
-       return pll;
-}
-
-static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
-                             const struct intel_dpll_hw_state *hw_state)
-{
-       DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
-                     "fp0: 0x%x, fp1: 0x%x\n",
-                     hw_state->dpll,
-                     hw_state->dpll_md,
-                     hw_state->fp0,
-                     hw_state->fp1);
-}
-
-static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
-       .prepare = ibx_pch_dpll_prepare,
-       .enable = ibx_pch_dpll_enable,
-       .disable = ibx_pch_dpll_disable,
-       .get_hw_state = ibx_pch_dpll_get_hw_state,
-};
-
-static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
-                              struct intel_shared_dpll *pll)
-{
-       const enum intel_dpll_id id = pll->info->id;
-
-       I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
-       POSTING_READ(WRPLL_CTL(id));
-       udelay(20);
-}
-
-static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
-                               struct intel_shared_dpll *pll)
-{
-       I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
-       POSTING_READ(SPLL_CTL);
-       udelay(20);
-}
-
-static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
-                                 struct intel_shared_dpll *pll)
-{
-       const enum intel_dpll_id id = pll->info->id;
-       u32 val;
-
-       val = I915_READ(WRPLL_CTL(id));
-       I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
-       POSTING_READ(WRPLL_CTL(id));
-}
-
-static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
-                                struct intel_shared_dpll *pll)
-{
-       u32 val;
-
-       val = I915_READ(SPLL_CTL);
-       I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
-       POSTING_READ(SPLL_CTL);
-}
-
-static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
-                                      struct intel_shared_dpll *pll,
-                                      struct intel_dpll_hw_state *hw_state)
-{
-       const enum intel_dpll_id id = pll->info->id;
-       intel_wakeref_t wakeref;
-       u32 val;
-
-       wakeref = intel_display_power_get_if_enabled(dev_priv,
-                                                    POWER_DOMAIN_DISPLAY_CORE);
-       if (!wakeref)
-               return false;
-
-       val = I915_READ(WRPLL_CTL(id));
-       hw_state->wrpll = val;
-
-       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
-
-       return val & WRPLL_PLL_ENABLE;
-}
-
-static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
-                                     struct intel_shared_dpll *pll,
-                                     struct intel_dpll_hw_state *hw_state)
-{
-       intel_wakeref_t wakeref;
-       u32 val;
-
-       wakeref = intel_display_power_get_if_enabled(dev_priv,
-                                                    POWER_DOMAIN_DISPLAY_CORE);
-       if (!wakeref)
-               return false;
-
-       val = I915_READ(SPLL_CTL);
-       hw_state->spll = val;
-
-       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
-
-       return val & SPLL_PLL_ENABLE;
-}
-
-#define LC_FREQ 2700
-#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
-
-#define P_MIN 2
-#define P_MAX 64
-#define P_INC 2
-
-/* Constraints for PLL good behavior */
-#define REF_MIN 48
-#define REF_MAX 400
-#define VCO_MIN 2400
-#define VCO_MAX 4800
-
-struct hsw_wrpll_rnp {
-       unsigned p, n2, r2;
-};
-
-static unsigned hsw_wrpll_get_budget_for_freq(int clock)
-{
-       unsigned budget;
-
-       switch (clock) {
-       case 25175000:
-       case 25200000:
-       case 27000000:
-       case 27027000:
-       case 37762500:
-       case 37800000:
-       case 40500000:
-       case 40541000:
-       case 54000000:
-       case 54054000:
-       case 59341000:
-       case 59400000:
-       case 72000000:
-       case 74176000:
-       case 74250000:
-       case 81000000:
-       case 81081000:
-       case 89012000:
-       case 89100000:
-       case 108000000:
-       case 108108000:
-       case 111264000:
-       case 111375000:
-       case 148352000:
-       case 148500000:
-       case 162000000:
-       case 162162000:
-       case 222525000:
-       case 222750000:
-       case 296703000:
-       case 297000000:
-               budget = 0;
-               break;
-       case 233500000:
-       case 245250000:
-       case 247750000:
-       case 253250000:
-       case 298000000:
-               budget = 1500;
-               break;
-       case 169128000:
-       case 169500000:
-       case 179500000:
-       case 202000000:
-               budget = 2000;
-               break;
-       case 256250000:
-       case 262500000:
-       case 270000000:
-       case 272500000:
-       case 273750000:
-       case 280750000:
-       case 281250000:
-       case 286000000:
-       case 291750000:
-               budget = 4000;
-               break;
-       case 267250000:
-       case 268500000:
-               budget = 5000;
-               break;
-       default:
-               budget = 1000;
-               break;
-       }
-
-       return budget;
-}
-
-static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
-                                unsigned int r2, unsigned int n2,
-                                unsigned int p,
-                                struct hsw_wrpll_rnp *best)
-{
-       u64 a, b, c, d, diff, diff_best;
-
-       /* No best (r,n,p) yet */
-       if (best->p == 0) {
-               best->p = p;
-               best->n2 = n2;
-               best->r2 = r2;
-               return;
-       }
-
-       /*
-        * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
-        * freq2k.
-        *
-        * delta = 1e6 *
-        *         abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
-        *         freq2k;
-        *
-        * and we would like delta <= budget.
-        *
-        * If the discrepancy is above the PPM-based budget, always prefer to
-        * improve upon the previous solution.  However, if you're within the
-        * budget, try to maximize Ref * VCO, that is N / (P * R^2).
-        */
-       a = freq2k * budget * p * r2;
-       b = freq2k * budget * best->p * best->r2;
-       diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
-       diff_best = abs_diff(freq2k * best->p * best->r2,
-                            LC_FREQ_2K * best->n2);
-       c = 1000000 * diff;
-       d = 1000000 * diff_best;
-
-       if (a < c && b < d) {
-               /* If both are above the budget, pick the closer */
-               if (best->p * best->r2 * diff < p * r2 * diff_best) {
-                       best->p = p;
-                       best->n2 = n2;
-                       best->r2 = r2;
-               }
-       } else if (a >= c && b < d) {
-               /* If A is below the threshold but B is above it?  Update. */
-               best->p = p;
-               best->n2 = n2;
-               best->r2 = r2;
-       } else if (a >= c && b >= d) {
-               /* Both are below the limit, so pick the higher n2/(r2*r2) */
-               if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
-                       best->p = p;
-                       best->n2 = n2;
-                       best->r2 = r2;
-               }
-       }
-       /* Otherwise a < c && b >= d, do nothing */
-}
-
-static void
-hsw_ddi_calculate_wrpll(int clock /* in Hz */,
-                       unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
-{
-       u64 freq2k;
-       unsigned p, n2, r2;
-       struct hsw_wrpll_rnp best = { 0, 0, 0 };
-       unsigned budget;
-
-       freq2k = clock / 100;
-
-       budget = hsw_wrpll_get_budget_for_freq(clock);
-
-       /* Special case handling for 540 pixel clock: bypass WR PLL entirely
-        * and directly pass the LC PLL to it. */
-       if (freq2k == 5400000) {
-               *n2_out = 2;
-               *p_out = 1;
-               *r2_out = 2;
-               return;
-       }
-
-       /*
-        * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
-        * the WR PLL.
-        *
-        * We want R so that REF_MIN <= Ref <= REF_MAX.
-        * Injecting R2 = 2 * R gives:
-        *   REF_MAX * r2 > LC_FREQ * 2 and
-        *   REF_MIN * r2 < LC_FREQ * 2
-        *
-        * Which means the desired boundaries for r2 are:
-        *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
-        *
-        */
-       for (r2 = LC_FREQ * 2 / REF_MAX + 1;
-            r2 <= LC_FREQ * 2 / REF_MIN;
-            r2++) {
-
-               /*
-                * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
-                *
-                * Once again we want VCO_MIN <= VCO <= VCO_MAX.
-                * Injecting R2 = 2 * R and N2 = 2 * N, we get:
-                *   VCO_MAX * r2 > n2 * LC_FREQ and
-                *   VCO_MIN * r2 < n2 * LC_FREQ)
-                *
-                * Which means the desired boundaries for n2 are:
-                * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
-                */
-               for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
-                    n2 <= VCO_MAX * r2 / LC_FREQ;
-                    n2++) {
-
-                       for (p = P_MIN; p <= P_MAX; p += P_INC)
-                               hsw_wrpll_update_rnp(freq2k, budget,
-                                                    r2, n2, p, &best);
-               }
-       }
-
-       *n2_out = best.n2;
-       *p_out = best.p;
-       *r2_out = best.r2;
-}
-
-static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
-{
-       struct intel_shared_dpll *pll;
-       u32 val;
-       unsigned int p, n2, r2;
-
-       hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
-
-       val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
-             WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
-             WRPLL_DIVIDER_POST(p);
-
-       crtc_state->dpll_hw_state.wrpll = val;
-
-       pll = intel_find_shared_dpll(crtc_state,
-                                    DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
-
-       if (!pll)
-               return NULL;
-
-       return pll;
-}
-
-static struct intel_shared_dpll *
-hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-       struct intel_shared_dpll *pll;
-       enum intel_dpll_id pll_id;
-       int clock = crtc_state->port_clock;
-
-       switch (clock / 2) {
-       case 81000:
-               pll_id = DPLL_ID_LCPLL_810;
-               break;
-       case 135000:
-               pll_id = DPLL_ID_LCPLL_1350;
-               break;
-       case 270000:
-               pll_id = DPLL_ID_LCPLL_2700;
-               break;
-       default:
-               DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
-               return NULL;
-       }
-
-       pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
-
-       if (!pll)
-               return NULL;
-
-       return pll;
-}
-
-static struct intel_shared_dpll *
-hsw_get_dpll(struct intel_crtc_state *crtc_state,
-            struct intel_encoder *encoder)
-{
-       struct intel_shared_dpll *pll;
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
-               pll = hsw_ddi_hdmi_get_dpll(crtc_state);
-       } else if (intel_crtc_has_dp_encoder(crtc_state)) {
-               pll = hsw_ddi_dp_get_dpll(crtc_state);
-       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
-               if (WARN_ON(crtc_state->port_clock / 2 != 135000))
-                       return NULL;
-
-               crtc_state->dpll_hw_state.spll =
-                       SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
-
-               pll = intel_find_shared_dpll(crtc_state,
-                                            DPLL_ID_SPLL, DPLL_ID_SPLL);
-       } else {
-               return NULL;
-       }
-
-       if (!pll)
-               return NULL;
-
-       intel_reference_shared_dpll(pll, crtc_state);
-
-       return pll;
-}
-
-static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
-                             const struct intel_dpll_hw_state *hw_state)
-{
-       DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
-                     hw_state->wrpll, hw_state->spll);
-}
-
-static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
-       .enable = hsw_ddi_wrpll_enable,
-       .disable = hsw_ddi_wrpll_disable,
-       .get_hw_state = hsw_ddi_wrpll_get_hw_state,
-};
-
-static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
-       .enable = hsw_ddi_spll_enable,
-       .disable = hsw_ddi_spll_disable,
-       .get_hw_state = hsw_ddi_spll_get_hw_state,
-};
-
-static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
-                                struct intel_shared_dpll *pll)
-{
-}
-
-static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
-                                 struct intel_shared_dpll *pll)
-{
-}
-
-static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
-                                      struct intel_shared_dpll *pll,
-                                      struct intel_dpll_hw_state *hw_state)
-{
-       return true;
-}
-
-static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
-       .enable = hsw_ddi_lcpll_enable,
-       .disable = hsw_ddi_lcpll_disable,
-       .get_hw_state = hsw_ddi_lcpll_get_hw_state,
-};
-
-struct skl_dpll_regs {
-       i915_reg_t ctl, cfgcr1, cfgcr2;
-};
-
-/* this array is indexed by the *shared* pll id */
-static const struct skl_dpll_regs skl_dpll_regs[4] = {
-       {
-               /* DPLL 0 */
-               .ctl = LCPLL1_CTL,
-               /* DPLL 0 doesn't support HDMI mode */
-       },
-       {
-               /* DPLL 1 */
-               .ctl = LCPLL2_CTL,
-               .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
-               .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
-       },
-       {
-               /* DPLL 2 */
-               .ctl = WRPLL_CTL(0),
-               .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
-               .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
-       },
-       {
-               /* DPLL 3 */
-               .ctl = WRPLL_CTL(1),
-               .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
-               .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
-       },
-};
-
-static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
-                                   struct intel_shared_dpll *pll)
-{
-       const enum intel_dpll_id id = pll->info->id;
-       u32 val;
-
-       val = I915_READ(DPLL_CTRL1);
-
-       val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
-                DPLL_CTRL1_SSC(id) |
-                DPLL_CTRL1_LINK_RATE_MASK(id));
-       val |= pll->state.hw_state.ctrl1 << (id * 6);
-
-       I915_WRITE(DPLL_CTRL1, val);
-       POSTING_READ(DPLL_CTRL1);
-}
-
-static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
-                              struct intel_shared_dpll *pll)
-{
-       const struct skl_dpll_regs *regs = skl_dpll_regs;
-       const enum intel_dpll_id id = pll->info->id;
-
-       skl_ddi_pll_write_ctrl1(dev_priv, pll);
-
-       I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
-       I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
-       POSTING_READ(regs[id].cfgcr1);
-       POSTING_READ(regs[id].cfgcr2);
-
-       /* the enable bit is always bit 31 */
-       I915_WRITE(regs[id].ctl,
-                  I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
-
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   DPLL_STATUS,
-                                   DPLL_LOCK(id),
-                                   DPLL_LOCK(id),
-                                   5))
-               DRM_ERROR("DPLL %d not locked\n", id);
-}
-
-static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
-                                struct intel_shared_dpll *pll)
-{
-       skl_ddi_pll_write_ctrl1(dev_priv, pll);
-}
-
-static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
-                               struct intel_shared_dpll *pll)
-{
-       const struct skl_dpll_regs *regs = skl_dpll_regs;
-       const enum intel_dpll_id id = pll->info->id;
-
-       /* the enable bit is always bit 31 */
-       I915_WRITE(regs[id].ctl,
-                  I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
-       POSTING_READ(regs[id].ctl);
-}
-
-static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
-                                 struct intel_shared_dpll *pll)
-{
-}
-
-static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
-                                    struct intel_shared_dpll *pll,
-                                    struct intel_dpll_hw_state *hw_state)
-{
-       u32 val;
-       const struct skl_dpll_regs *regs = skl_dpll_regs;
-       const enum intel_dpll_id id = pll->info->id;
-       intel_wakeref_t wakeref;
-       bool ret;
-
-       wakeref = intel_display_power_get_if_enabled(dev_priv,
-                                                    POWER_DOMAIN_DISPLAY_CORE);
-       if (!wakeref)
-               return false;
-
-       ret = false;
-
-       val = I915_READ(regs[id].ctl);
-       if (!(val & LCPLL_PLL_ENABLE))
-               goto out;
-
-       val = I915_READ(DPLL_CTRL1);
-       hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
-
-       /* avoid reading back stale values if HDMI mode is not enabled */
-       if (val & DPLL_CTRL1_HDMI_MODE(id)) {
-               hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
-               hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
-       }
-       ret = true;
-
-out:
-       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
-
-       return ret;
-}
-
-static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
-                                      struct intel_shared_dpll *pll,
-                                      struct intel_dpll_hw_state *hw_state)
-{
-       const struct skl_dpll_regs *regs = skl_dpll_regs;
-       const enum intel_dpll_id id = pll->info->id;
-       intel_wakeref_t wakeref;
-       u32 val;
-       bool ret;
-
-       wakeref = intel_display_power_get_if_enabled(dev_priv,
-                                                    POWER_DOMAIN_DISPLAY_CORE);
-       if (!wakeref)
-               return false;
-
-       ret = false;
-
-       /* DPLL0 is always enabled since it drives CDCLK */
-       val = I915_READ(regs[id].ctl);
-       if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
-               goto out;
-
-       val = I915_READ(DPLL_CTRL1);
-       hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
-
-       ret = true;
-
-out:
-       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
-
-       return ret;
-}
-
-struct skl_wrpll_context {
-       u64 min_deviation;              /* current minimal deviation */
-       u64 central_freq;               /* chosen central freq */
-       u64 dco_freq;                   /* chosen dco freq */
-       unsigned int p;                 /* chosen divider */
-};
-
-static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
-{
-       memset(ctx, 0, sizeof(*ctx));
-
-       ctx->min_deviation = U64_MAX;
-}
-
-/* DCO freq must be within +1%/-6%  of the DCO central freq */
-#define SKL_DCO_MAX_PDEVIATION 100
-#define SKL_DCO_MAX_NDEVIATION 600
-
-static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
-                                 u64 central_freq,
-                                 u64 dco_freq,
-                                 unsigned int divider)
-{
-       u64 deviation;
-
-       deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
-                             central_freq);
-
-       /* positive deviation */
-       if (dco_freq >= central_freq) {
-               if (deviation < SKL_DCO_MAX_PDEVIATION &&
-                   deviation < ctx->min_deviation) {
-                       ctx->min_deviation = deviation;
-                       ctx->central_freq = central_freq;
-                       ctx->dco_freq = dco_freq;
-                       ctx->p = divider;
-               }
-       /* negative deviation */
-       } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
-                  deviation < ctx->min_deviation) {
-               ctx->min_deviation = deviation;
-               ctx->central_freq = central_freq;
-               ctx->dco_freq = dco_freq;
-               ctx->p = divider;
-       }
-}
-
-static void skl_wrpll_get_multipliers(unsigned int p,
-                                     unsigned int *p0 /* out */,
-                                     unsigned int *p1 /* out */,
-                                     unsigned int *p2 /* out */)
-{
-       /* even dividers */
-       if (p % 2 == 0) {
-               unsigned int half = p / 2;
-
-               if (half == 1 || half == 2 || half == 3 || half == 5) {
-                       *p0 = 2;
-                       *p1 = 1;
-                       *p2 = half;
-               } else if (half % 2 == 0) {
-                       *p0 = 2;
-                       *p1 = half / 2;
-                       *p2 = 2;
-               } else if (half % 3 == 0) {
-                       *p0 = 3;
-                       *p1 = half / 3;
-                       *p2 = 2;
-               } else if (half % 7 == 0) {
-                       *p0 = 7;
-                       *p1 = half / 7;
-                       *p2 = 2;
-               }
-       } else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
-               *p0 = 3;
-               *p1 = 1;
-               *p2 = p / 3;
-       } else if (p == 5 || p == 7) {
-               *p0 = p;
-               *p1 = 1;
-               *p2 = 1;
-       } else if (p == 15) {
-               *p0 = 3;
-               *p1 = 1;
-               *p2 = 5;
-       } else if (p == 21) {
-               *p0 = 7;
-               *p1 = 1;
-               *p2 = 3;
-       } else if (p == 35) {
-               *p0 = 7;
-               *p1 = 1;
-               *p2 = 5;
-       }
-}
-
-struct skl_wrpll_params {
-       u32 dco_fraction;
-       u32 dco_integer;
-       u32 qdiv_ratio;
-       u32 qdiv_mode;
-       u32 kdiv;
-       u32 pdiv;
-       u32 central_freq;
-};
-
-static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
-                                     u64 afe_clock,
-                                     u64 central_freq,
-                                     u32 p0, u32 p1, u32 p2)
-{
-       u64 dco_freq;
-
-       switch (central_freq) {
-       case 9600000000ULL:
-               params->central_freq = 0;
-               break;
-       case 9000000000ULL:
-               params->central_freq = 1;
-               break;
-       case 8400000000ULL:
-               params->central_freq = 3;
-       }
-
-       switch (p0) {
-       case 1:
-               params->pdiv = 0;
-               break;
-       case 2:
-               params->pdiv = 1;
-               break;
-       case 3:
-               params->pdiv = 2;
-               break;
-       case 7:
-               params->pdiv = 4;
-               break;
-       default:
-               WARN(1, "Incorrect PDiv\n");
-       }
-
-       switch (p2) {
-       case 5:
-               params->kdiv = 0;
-               break;
-       case 2:
-               params->kdiv = 1;
-               break;
-       case 3:
-               params->kdiv = 2;
-               break;
-       case 1:
-               params->kdiv = 3;
-               break;
-       default:
-               WARN(1, "Incorrect KDiv\n");
-       }
-
-       params->qdiv_ratio = p1;
-       params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
-
-       dco_freq = p0 * p1 * p2 * afe_clock;
-
-       /*
-        * Intermediate values are in Hz.
-        * Divide by MHz to match bsepc
-        */
-       params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
-       params->dco_fraction =
-               div_u64((div_u64(dco_freq, 24) -
-                        params->dco_integer * MHz(1)) * 0x8000, MHz(1));
-}
-
-static bool
-skl_ddi_calculate_wrpll(int clock /* in Hz */,
-                       struct skl_wrpll_params *wrpll_params)
-{
-       u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
-       u64 dco_central_freq[3] = { 8400000000ULL,
-                                   9000000000ULL,
-                                   9600000000ULL };
-       static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
-                                            24, 28, 30, 32, 36, 40, 42, 44,
-                                            48, 52, 54, 56, 60, 64, 66, 68,
-                                            70, 72, 76, 78, 80, 84, 88, 90,
-                                            92, 96, 98 };
-       static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
-       static const struct {
-               const int *list;
-               int n_dividers;
-       } dividers[] = {
-               { even_dividers, ARRAY_SIZE(even_dividers) },
-               { odd_dividers, ARRAY_SIZE(odd_dividers) },
-       };
-       struct skl_wrpll_context ctx;
-       unsigned int dco, d, i;
-       unsigned int p0, p1, p2;
-
-       skl_wrpll_context_init(&ctx);
-
-       for (d = 0; d < ARRAY_SIZE(dividers); d++) {
-               for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
-                       for (i = 0; i < dividers[d].n_dividers; i++) {
-                               unsigned int p = dividers[d].list[i];
-                               u64 dco_freq = p * afe_clock;
-
-                               skl_wrpll_try_divider(&ctx,
-                                                     dco_central_freq[dco],
-                                                     dco_freq,
-                                                     p);
-                               /*
-                                * Skip the remaining dividers if we're sure to
-                                * have found the definitive divider, we can't
-                                * improve a 0 deviation.
-                                */
-                               if (ctx.min_deviation == 0)
-                                       goto skip_remaining_dividers;
-                       }
-               }
-
-skip_remaining_dividers:
-               /*
-                * If a solution is found with an even divider, prefer
-                * this one.
-                */
-               if (d == 0 && ctx.p)
-                       break;
-       }
-
-       if (!ctx.p) {
-               DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
-               return false;
-       }
-
-       /*
-        * gcc incorrectly analyses that these can be used without being
-        * initialized. To be fair, it's hard to guess.
-        */
-       p0 = p1 = p2 = 0;
-       skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
-       skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
-                                 p0, p1, p2);
-
-       return true;
-}
-
-static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
-{
-       u32 ctrl1, cfgcr1, cfgcr2;
-       struct skl_wrpll_params wrpll_params = { 0, };
-
-       /*
-        * See comment in intel_dpll_hw_state to understand why we always use 0
-        * as the DPLL id in this function.
-        */
-       ctrl1 = DPLL_CTRL1_OVERRIDE(0);
-
-       ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
-
-       if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
-                                    &wrpll_params))
-               return false;
-
-       cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
-               DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
-               wrpll_params.dco_integer;
-
-       cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
-               DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
-               DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
-               DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
-               wrpll_params.central_freq;
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       crtc_state->dpll_hw_state.ctrl1 = ctrl1;
-       crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
-       crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-       return true;
-}
-
-static bool
-skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
-{
-       u32 ctrl1;
-
-       /*
-        * See comment in intel_dpll_hw_state to understand why we always use 0
-        * as the DPLL id in this function.
-        */
-       ctrl1 = DPLL_CTRL1_OVERRIDE(0);
-       switch (crtc_state->port_clock / 2) {
-       case 81000:
-               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
-               break;
-       case 135000:
-               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
-               break;
-       case 270000:
-               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
-               break;
-               /* eDP 1.4 rates */
-       case 162000:
-               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
-               break;
-       case 108000:
-               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
-               break;
-       case 216000:
-               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
-               break;
-       }
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       crtc_state->dpll_hw_state.ctrl1 = ctrl1;
-
-       return true;
-}
-
-static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc_state *crtc_state,
-            struct intel_encoder *encoder)
-{
-       struct intel_shared_dpll *pll;
-       bool bret;
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
-               bret = skl_ddi_hdmi_pll_dividers(crtc_state);
-               if (!bret) {
-                       DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
-                       return NULL;
-               }
-       } else if (intel_crtc_has_dp_encoder(crtc_state)) {
-               bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
-               if (!bret) {
-                       DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
-                       return NULL;
-               }
-       } else {
-               return NULL;
-       }
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
-               pll = intel_find_shared_dpll(crtc_state,
-                                            DPLL_ID_SKL_DPLL0,
-                                            DPLL_ID_SKL_DPLL0);
-       else
-               pll = intel_find_shared_dpll(crtc_state,
-                                            DPLL_ID_SKL_DPLL1,
-                                            DPLL_ID_SKL_DPLL3);
-       if (!pll)
-               return NULL;
-
-       intel_reference_shared_dpll(pll, crtc_state);
-
-       return pll;
-}
-
-static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
-                             const struct intel_dpll_hw_state *hw_state)
-{
-       DRM_DEBUG_KMS("dpll_hw_state: "
-                     "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
-                     hw_state->ctrl1,
-                     hw_state->cfgcr1,
-                     hw_state->cfgcr2);
-}
-
-static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
-       .enable = skl_ddi_pll_enable,
-       .disable = skl_ddi_pll_disable,
-       .get_hw_state = skl_ddi_pll_get_hw_state,
-};
-
-static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
-       .enable = skl_ddi_dpll0_enable,
-       .disable = skl_ddi_dpll0_disable,
-       .get_hw_state = skl_ddi_dpll0_get_hw_state,
-};
-
-static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
-                               struct intel_shared_dpll *pll)
-{
-       u32 temp;
-       enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
-       enum dpio_phy phy;
-       enum dpio_channel ch;
-
-       bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
-
-       /* Non-SSC reference */
-       temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
-       temp |= PORT_PLL_REF_SEL;
-       I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
-
-       if (IS_GEMINILAKE(dev_priv)) {
-               temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
-               temp |= PORT_PLL_POWER_ENABLE;
-               I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
-
-               if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
-                                PORT_PLL_POWER_STATE), 200))
-                       DRM_ERROR("Power state not set for PLL:%d\n", port);
-       }
-
-       /* Disable 10 bit clock */
-       temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
-       temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
-       I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
-
-       /* Write P1 & P2 */
-       temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
-       temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
-       temp |= pll->state.hw_state.ebb0;
-       I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
-
-       /* Write M2 integer */
-       temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
-       temp &= ~PORT_PLL_M2_MASK;
-       temp |= pll->state.hw_state.pll0;
-       I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
-
-       /* Write N */
-       temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
-       temp &= ~PORT_PLL_N_MASK;
-       temp |= pll->state.hw_state.pll1;
-       I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
-
-       /* Write M2 fraction */
-       temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
-       temp &= ~PORT_PLL_M2_FRAC_MASK;
-       temp |= pll->state.hw_state.pll2;
-       I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
-
-       /* Write M2 fraction enable */
-       temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
-       temp &= ~PORT_PLL_M2_FRAC_ENABLE;
-       temp |= pll->state.hw_state.pll3;
-       I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
-
-       /* Write coeff */
-       temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
-       temp &= ~PORT_PLL_PROP_COEFF_MASK;
-       temp &= ~PORT_PLL_INT_COEFF_MASK;
-       temp &= ~PORT_PLL_GAIN_CTL_MASK;
-       temp |= pll->state.hw_state.pll6;
-       I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
-
-       /* Write calibration val */
-       temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
-       temp &= ~PORT_PLL_TARGET_CNT_MASK;
-       temp |= pll->state.hw_state.pll8;
-       I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
-
-       temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
-       temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
-       temp |= pll->state.hw_state.pll9;
-       I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
-
-       temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
-       temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
-       temp &= ~PORT_PLL_DCO_AMP_MASK;
-       temp |= pll->state.hw_state.pll10;
-       I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
-
-       /* Recalibrate with new settings */
-       temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
-       temp |= PORT_PLL_RECALIBRATE;
-       I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
-       temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
-       temp |= pll->state.hw_state.ebb4;
-       I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
-
-       /* Enable PLL */
-       temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
-       temp |= PORT_PLL_ENABLE;
-       I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
-       POSTING_READ(BXT_PORT_PLL_ENABLE(port));
-
-       if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
-                       200))
-               DRM_ERROR("PLL %d not locked\n", port);
-
-       if (IS_GEMINILAKE(dev_priv)) {
-               temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
-               temp |= DCC_DELAY_RANGE_2;
-               I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
-       }
-
-       /*
-        * While we write to the group register to program all lanes at once we
-        * can read only lane registers and we pick lanes 0/1 for that.
-        */
-       temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
-       temp &= ~LANE_STAGGER_MASK;
-       temp &= ~LANESTAGGER_STRAP_OVRD;
-       temp |= pll->state.hw_state.pcsdw12;
-       I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
-}
-
-static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
-                                       struct intel_shared_dpll *pll)
-{
-       enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
-       u32 temp;
-
-       temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
-       temp &= ~PORT_PLL_ENABLE;
-       I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
-       POSTING_READ(BXT_PORT_PLL_ENABLE(port));
-
-       if (IS_GEMINILAKE(dev_priv)) {
-               temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
-               temp &= ~PORT_PLL_POWER_ENABLE;
-               I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
-
-               if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
-                               PORT_PLL_POWER_STATE), 200))
-                       DRM_ERROR("Power state not reset for PLL:%d\n", port);
-       }
-}
-
-static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
-                                       struct intel_shared_dpll *pll,
-                                       struct intel_dpll_hw_state *hw_state)
-{
-       enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
-       intel_wakeref_t wakeref;
-       enum dpio_phy phy;
-       enum dpio_channel ch;
-       u32 val;
-       bool ret;
-
-       bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
-
-       wakeref = intel_display_power_get_if_enabled(dev_priv,
-                                                    POWER_DOMAIN_DISPLAY_CORE);
-       if (!wakeref)
-               return false;
-
-       ret = false;
-
-       val = I915_READ(BXT_PORT_PLL_ENABLE(port));
-       if (!(val & PORT_PLL_ENABLE))
-               goto out;
-
-       hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
-       hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
-
-       hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
-       hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
-
-       hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
-       hw_state->pll0 &= PORT_PLL_M2_MASK;
-
-       hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
-       hw_state->pll1 &= PORT_PLL_N_MASK;
-
-       hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
-       hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
-
-       hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
-       hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
-
-       hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
-       hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
-                         PORT_PLL_INT_COEFF_MASK |
-                         PORT_PLL_GAIN_CTL_MASK;
-
-       hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
-       hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
-
-       hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
-       hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
-
-       hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
-       hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
-                          PORT_PLL_DCO_AMP_MASK;
-
-       /*
-        * While we write to the group register to program all lanes at once we
-        * can read only lane registers. We configure all lanes the same way, so
-        * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
-        */
-       hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
-       if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
-               DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
-                                hw_state->pcsdw12,
-                                I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
-       hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
-
-       ret = true;
-
-out:
-       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
-
-       return ret;
-}
-
-/* bxt clock parameters */
-struct bxt_clk_div {
-       int clock;
-       u32 p1;
-       u32 p2;
-       u32 m2_int;
-       u32 m2_frac;
-       bool m2_frac_en;
-       u32 n;
-
-       int vco;
-};
-
-/* pre-calculated values for DP linkrates */
-static const struct bxt_clk_div bxt_dp_clk_val[] = {
-       {162000, 4, 2, 32, 1677722, 1, 1},
-       {270000, 4, 1, 27,       0, 0, 1},
-       {540000, 2, 1, 27,       0, 0, 1},
-       {216000, 3, 2, 32, 1677722, 1, 1},
-       {243000, 4, 1, 24, 1258291, 1, 1},
-       {324000, 4, 1, 32, 1677722, 1, 1},
-       {432000, 3, 1, 32, 1677722, 1, 1}
-};
-
-static bool
-bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
-                         struct bxt_clk_div *clk_div)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct dpll best_clock;
-
-       /* Calculate HDMI div */
-       /*
-        * FIXME: tie the following calculation into
-        * i9xx_crtc_compute_clock
-        */
-       if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
-               DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
-                                crtc_state->port_clock,
-                                pipe_name(crtc->pipe));
-               return false;
-       }
-
-       clk_div->p1 = best_clock.p1;
-       clk_div->p2 = best_clock.p2;
-       WARN_ON(best_clock.m1 != 2);
-       clk_div->n = best_clock.n;
-       clk_div->m2_int = best_clock.m2 >> 22;
-       clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
-       clk_div->m2_frac_en = clk_div->m2_frac != 0;
-
-       clk_div->vco = best_clock.vco;
-
-       return true;
-}
-
-static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
-                                   struct bxt_clk_div *clk_div)
-{
-       int clock = crtc_state->port_clock;
-       int i;
-
-       *clk_div = bxt_dp_clk_val[0];
-       for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
-               if (bxt_dp_clk_val[i].clock == clock) {
-                       *clk_div = bxt_dp_clk_val[i];
-                       break;
-               }
-       }
-
-       clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
-}
-
-static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
-                                     const struct bxt_clk_div *clk_div)
-{
-       struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
-       int clock = crtc_state->port_clock;
-       int vco = clk_div->vco;
-       u32 prop_coef, int_coef, gain_ctl, targ_cnt;
-       u32 lanestagger;
-
-       memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
-
-       if (vco >= 6200000 && vco <= 6700000) {
-               prop_coef = 4;
-               int_coef = 9;
-               gain_ctl = 3;
-               targ_cnt = 8;
-       } else if ((vco > 5400000 && vco < 6200000) ||
-                       (vco >= 4800000 && vco < 5400000)) {
-               prop_coef = 5;
-               int_coef = 11;
-               gain_ctl = 3;
-               targ_cnt = 9;
-       } else if (vco == 5400000) {
-               prop_coef = 3;
-               int_coef = 8;
-               gain_ctl = 1;
-               targ_cnt = 9;
-       } else {
-               DRM_ERROR("Invalid VCO\n");
-               return false;
-       }
-
-       if (clock > 270000)
-               lanestagger = 0x18;
-       else if (clock > 135000)
-               lanestagger = 0x0d;
-       else if (clock > 67000)
-               lanestagger = 0x07;
-       else if (clock > 33000)
-               lanestagger = 0x04;
-       else
-               lanestagger = 0x02;
-
-       dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
-       dpll_hw_state->pll0 = clk_div->m2_int;
-       dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
-       dpll_hw_state->pll2 = clk_div->m2_frac;
-
-       if (clk_div->m2_frac_en)
-               dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
-
-       dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
-       dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
-
-       dpll_hw_state->pll8 = targ_cnt;
-
-       dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
-
-       dpll_hw_state->pll10 =
-               PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
-               | PORT_PLL_DCO_AMP_OVR_EN_H;
-
-       dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
-
-       dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
-
-       return true;
-}
-
-static bool
-bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
-{
-       struct bxt_clk_div clk_div = {};
-
-       bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
-
-       return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
-}
-
-static bool
-bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
-{
-       struct bxt_clk_div clk_div = {};
-
-       bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
-
-       return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
-}
-
-static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc_state *crtc_state,
-            struct intel_encoder *encoder)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_shared_dpll *pll;
-       enum intel_dpll_id id;
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
-           !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
-               return NULL;
-
-       if (intel_crtc_has_dp_encoder(crtc_state) &&
-           !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
-               return NULL;
-
-       /* 1:1 mapping between ports and PLLs */
-       id = (enum intel_dpll_id) encoder->port;
-       pll = intel_get_shared_dpll_by_id(dev_priv, id);
-
-       DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
-                     crtc->base.base.id, crtc->base.name, pll->info->name);
-
-       intel_reference_shared_dpll(pll, crtc_state);
-
-       return pll;
-}
-
-static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
-                             const struct intel_dpll_hw_state *hw_state)
-{
-       DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
-                     "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
-                     "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
-                     hw_state->ebb0,
-                     hw_state->ebb4,
-                     hw_state->pll0,
-                     hw_state->pll1,
-                     hw_state->pll2,
-                     hw_state->pll3,
-                     hw_state->pll6,
-                     hw_state->pll8,
-                     hw_state->pll9,
-                     hw_state->pll10,
-                     hw_state->pcsdw12);
-}
-
-static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
-       .enable = bxt_ddi_pll_enable,
-       .disable = bxt_ddi_pll_disable,
-       .get_hw_state = bxt_ddi_pll_get_hw_state,
-};
-
-struct intel_dpll_mgr {
-       const struct dpll_info *dpll_info;
-
-       struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
-                                             struct intel_encoder *encoder);
-
-       void (*dump_hw_state)(struct drm_i915_private *dev_priv,
-                             const struct intel_dpll_hw_state *hw_state);
-};
-
-static const struct dpll_info pch_plls[] = {
-       { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
-       { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
-       { },
-};
-
-static const struct intel_dpll_mgr pch_pll_mgr = {
-       .dpll_info = pch_plls,
-       .get_dpll = ibx_get_dpll,
-       .dump_hw_state = ibx_dump_hw_state,
-};
-
-static const struct dpll_info hsw_plls[] = {
-       { "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
-       { "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
-       { "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
-       { "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
-       { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
-       { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
-       { },
-};
-
-static const struct intel_dpll_mgr hsw_pll_mgr = {
-       .dpll_info = hsw_plls,
-       .get_dpll = hsw_get_dpll,
-       .dump_hw_state = hsw_dump_hw_state,
-};
-
-static const struct dpll_info skl_plls[] = {
-       { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
-       { "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
-       { "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
-       { "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
-       { },
-};
-
-static const struct intel_dpll_mgr skl_pll_mgr = {
-       .dpll_info = skl_plls,
-       .get_dpll = skl_get_dpll,
-       .dump_hw_state = skl_dump_hw_state,
-};
-
-static const struct dpll_info bxt_plls[] = {
-       { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
-       { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
-       { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
-       { },
-};
-
-static const struct intel_dpll_mgr bxt_pll_mgr = {
-       .dpll_info = bxt_plls,
-       .get_dpll = bxt_get_dpll,
-       .dump_hw_state = bxt_dump_hw_state,
-};
-
-static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
-                              struct intel_shared_dpll *pll)
-{
-       const enum intel_dpll_id id = pll->info->id;
-       u32 val;
-
-       /* 1. Enable DPLL power in DPLL_ENABLE. */
-       val = I915_READ(CNL_DPLL_ENABLE(id));
-       val |= PLL_POWER_ENABLE;
-       I915_WRITE(CNL_DPLL_ENABLE(id), val);
-
-       /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   CNL_DPLL_ENABLE(id),
-                                   PLL_POWER_STATE,
-                                   PLL_POWER_STATE,
-                                   5))
-               DRM_ERROR("PLL %d Power not enabled\n", id);
-
-       /*
-        * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
-        * select DP mode, and set DP link rate.
-        */
-       val = pll->state.hw_state.cfgcr0;
-       I915_WRITE(CNL_DPLL_CFGCR0(id), val);
-
-       /* 4. Reab back to ensure writes completed */
-       POSTING_READ(CNL_DPLL_CFGCR0(id));
-
-       /* 3. Configure DPLL_CFGCR0 */
-       /* Avoid touch CFGCR1 if HDMI mode is not enabled */
-       if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
-               val = pll->state.hw_state.cfgcr1;
-               I915_WRITE(CNL_DPLL_CFGCR1(id), val);
-               /* 4. Reab back to ensure writes completed */
-               POSTING_READ(CNL_DPLL_CFGCR1(id));
-       }
-
-       /*
-        * 5. If the frequency will result in a change to the voltage
-        * requirement, follow the Display Voltage Frequency Switching
-        * Sequence Before Frequency Change
-        *
-        * Note: DVFS is actually handled via the cdclk code paths,
-        * hence we do nothing here.
-        */
-
-       /* 6. Enable DPLL in DPLL_ENABLE. */
-       val = I915_READ(CNL_DPLL_ENABLE(id));
-       val |= PLL_ENABLE;
-       I915_WRITE(CNL_DPLL_ENABLE(id), val);
-
-       /* 7. Wait for PLL lock status in DPLL_ENABLE. */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   CNL_DPLL_ENABLE(id),
-                                   PLL_LOCK,
-                                   PLL_LOCK,
-                                   5))
-               DRM_ERROR("PLL %d not locked\n", id);
-
-       /*
-        * 8. If the frequency will result in a change to the voltage
-        * requirement, follow the Display Voltage Frequency Switching
-        * Sequence After Frequency Change
-        *
-        * Note: DVFS is actually handled via the cdclk code paths,
-        * hence we do nothing here.
-        */
-
-       /*
-        * 9. turn on the clock for the DDI and map the DPLL to the DDI
-        * Done at intel_ddi_clk_select
-        */
-}
-
-static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
-                               struct intel_shared_dpll *pll)
-{
-       const enum intel_dpll_id id = pll->info->id;
-       u32 val;
-
-       /*
-        * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
-        * Done at intel_ddi_post_disable
-        */
-
-       /*
-        * 2. If the frequency will result in a change to the voltage
-        * requirement, follow the Display Voltage Frequency Switching
-        * Sequence Before Frequency Change
-        *
-        * Note: DVFS is actually handled via the cdclk code paths,
-        * hence we do nothing here.
-        */
-
-       /* 3. Disable DPLL through DPLL_ENABLE. */
-       val = I915_READ(CNL_DPLL_ENABLE(id));
-       val &= ~PLL_ENABLE;
-       I915_WRITE(CNL_DPLL_ENABLE(id), val);
-
-       /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   CNL_DPLL_ENABLE(id),
-                                   PLL_LOCK,
-                                   0,
-                                   5))
-               DRM_ERROR("PLL %d locked\n", id);
-
-       /*
-        * 5. If the frequency will result in a change to the voltage
-        * requirement, follow the Display Voltage Frequency Switching
-        * Sequence After Frequency Change
-        *
-        * Note: DVFS is actually handled via the cdclk code paths,
-        * hence we do nothing here.
-        */
-
-       /* 6. Disable DPLL power in DPLL_ENABLE. */
-       val = I915_READ(CNL_DPLL_ENABLE(id));
-       val &= ~PLL_POWER_ENABLE;
-       I915_WRITE(CNL_DPLL_ENABLE(id), val);
-
-       /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   CNL_DPLL_ENABLE(id),
-                                   PLL_POWER_STATE,
-                                   0,
-                                   5))
-               DRM_ERROR("PLL %d Power not disabled\n", id);
-}
-
-static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
-                                    struct intel_shared_dpll *pll,
-                                    struct intel_dpll_hw_state *hw_state)
-{
-       const enum intel_dpll_id id = pll->info->id;
-       intel_wakeref_t wakeref;
-       u32 val;
-       bool ret;
-
-       wakeref = intel_display_power_get_if_enabled(dev_priv,
-                                                    POWER_DOMAIN_DISPLAY_CORE);
-       if (!wakeref)
-               return false;
-
-       ret = false;
-
-       val = I915_READ(CNL_DPLL_ENABLE(id));
-       if (!(val & PLL_ENABLE))
-               goto out;
-
-       val = I915_READ(CNL_DPLL_CFGCR0(id));
-       hw_state->cfgcr0 = val;
-
-       /* avoid reading back stale values if HDMI mode is not enabled */
-       if (val & DPLL_CFGCR0_HDMI_MODE) {
-               hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
-       }
-       ret = true;
-
-out:
-       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
-
-       return ret;
-}
-
-static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
-                                     int *qdiv, int *kdiv)
-{
-       /* even dividers */
-       if (bestdiv % 2 == 0) {
-               if (bestdiv == 2) {
-                       *pdiv = 2;
-                       *qdiv = 1;
-                       *kdiv = 1;
-               } else if (bestdiv % 4 == 0) {
-                       *pdiv = 2;
-                       *qdiv = bestdiv / 4;
-                       *kdiv = 2;
-               } else if (bestdiv % 6 == 0) {
-                       *pdiv = 3;
-                       *qdiv = bestdiv / 6;
-                       *kdiv = 2;
-               } else if (bestdiv % 5 == 0) {
-                       *pdiv = 5;
-                       *qdiv = bestdiv / 10;
-                       *kdiv = 2;
-               } else if (bestdiv % 14 == 0) {
-                       *pdiv = 7;
-                       *qdiv = bestdiv / 14;
-                       *kdiv = 2;
-               }
-       } else {
-               if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
-                       *pdiv = bestdiv;
-                       *qdiv = 1;
-                       *kdiv = 1;
-               } else { /* 9, 15, 21 */
-                       *pdiv = bestdiv / 3;
-                       *qdiv = 1;
-                       *kdiv = 3;
-               }
-       }
-}
-
-static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
-                                     u32 dco_freq, u32 ref_freq,
-                                     int pdiv, int qdiv, int kdiv)
-{
-       u32 dco;
-
-       switch (kdiv) {
-       case 1:
-               params->kdiv = 1;
-               break;
-       case 2:
-               params->kdiv = 2;
-               break;
-       case 3:
-               params->kdiv = 4;
-               break;
-       default:
-               WARN(1, "Incorrect KDiv\n");
-       }
-
-       switch (pdiv) {
-       case 2:
-               params->pdiv = 1;
-               break;
-       case 3:
-               params->pdiv = 2;
-               break;
-       case 5:
-               params->pdiv = 4;
-               break;
-       case 7:
-               params->pdiv = 8;
-               break;
-       default:
-               WARN(1, "Incorrect PDiv\n");
-       }
-
-       WARN_ON(kdiv != 2 && qdiv != 1);
-
-       params->qdiv_ratio = qdiv;
-       params->qdiv_mode = (qdiv == 1) ? 0 : 1;
-
-       dco = div_u64((u64)dco_freq << 15, ref_freq);
-
-       params->dco_integer = dco >> 15;
-       params->dco_fraction = dco & 0x7fff;
-}
-
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
-{
-       int ref_clock = dev_priv->cdclk.hw.ref;
-
-       /*
-        * For ICL+, the spec states: if reference frequency is 38.4,
-        * use 19.2 because the DPLL automatically divides that by 2.
-        */
-       if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
-               ref_clock = 19200;
-
-       return ref_clock;
-}
-
-static bool
-cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
-                       struct skl_wrpll_params *wrpll_params)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-       u32 afe_clock = crtc_state->port_clock * 5;
-       u32 ref_clock;
-       u32 dco_min = 7998000;
-       u32 dco_max = 10000000;
-       u32 dco_mid = (dco_min + dco_max) / 2;
-       static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
-                                        18, 20, 24, 28, 30, 32,  36,  40,
-                                        42, 44, 48, 50, 52, 54,  56,  60,
-                                        64, 66, 68, 70, 72, 76,  78,  80,
-                                        84, 88, 90, 92, 96, 98, 100, 102,
-                                         3,  5,  7,  9, 15, 21 };
-       u32 dco, best_dco = 0, dco_centrality = 0;
-       u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
-       int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
-
-       for (d = 0; d < ARRAY_SIZE(dividers); d++) {
-               dco = afe_clock * dividers[d];
-
-               if ((dco <= dco_max) && (dco >= dco_min)) {
-                       dco_centrality = abs(dco - dco_mid);
-
-                       if (dco_centrality < best_dco_centrality) {
-                               best_dco_centrality = dco_centrality;
-                               best_div = dividers[d];
-                               best_dco = dco;
-                       }
-               }
-       }
-
-       if (best_div == 0)
-               return false;
-
-       cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
-
-       ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
-
-       cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
-                                 pdiv, qdiv, kdiv);
-
-       return true;
-}
-
-static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
-{
-       u32 cfgcr0, cfgcr1;
-       struct skl_wrpll_params wrpll_params = { 0, };
-
-       cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
-
-       if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
-               return false;
-
-       cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
-               wrpll_params.dco_integer;
-
-       cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
-               DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
-               DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
-               DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
-               DPLL_CFGCR1_CENTRAL_FREQ;
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
-       crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
-       return true;
-}
-
-static bool
-cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
-{
-       u32 cfgcr0;
-
-       cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
-
-       switch (crtc_state->port_clock / 2) {
-       case 81000:
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
-               break;
-       case 135000:
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
-               break;
-       case 270000:
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
-               break;
-               /* eDP 1.4 rates */
-       case 162000:
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
-               break;
-       case 108000:
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
-               break;
-       case 216000:
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
-               break;
-       case 324000:
-               /* Some SKUs may require elevated I/O voltage to support this */
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
-               break;
-       case 405000:
-               /* Some SKUs may require elevated I/O voltage to support this */
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
-               break;
-       }
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
-
-       return true;
-}
-
-static struct intel_shared_dpll *
-cnl_get_dpll(struct intel_crtc_state *crtc_state,
-            struct intel_encoder *encoder)
-{
-       struct intel_shared_dpll *pll;
-       bool bret;
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
-               bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
-               if (!bret) {
-                       DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
-                       return NULL;
-               }
-       } else if (intel_crtc_has_dp_encoder(crtc_state)) {
-               bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
-               if (!bret) {
-                       DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
-                       return NULL;
-               }
-       } else {
-               DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
-                             crtc_state->output_types);
-               return NULL;
-       }
-
-       pll = intel_find_shared_dpll(crtc_state,
-                                    DPLL_ID_SKL_DPLL0,
-                                    DPLL_ID_SKL_DPLL2);
-       if (!pll) {
-               DRM_DEBUG_KMS("No PLL selected\n");
-               return NULL;
-       }
-
-       intel_reference_shared_dpll(pll, crtc_state);
-
-       return pll;
-}
-
-static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
-                             const struct intel_dpll_hw_state *hw_state)
-{
-       DRM_DEBUG_KMS("dpll_hw_state: "
-                     "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
-                     hw_state->cfgcr0,
-                     hw_state->cfgcr1);
-}
-
-static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
-       .enable = cnl_ddi_pll_enable,
-       .disable = cnl_ddi_pll_disable,
-       .get_hw_state = cnl_ddi_pll_get_hw_state,
-};
-
-static const struct dpll_info cnl_plls[] = {
-       { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
-       { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
-       { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
-       { },
-};
-
-static const struct intel_dpll_mgr cnl_pll_mgr = {
-       .dpll_info = cnl_plls,
-       .get_dpll = cnl_get_dpll,
-       .dump_hw_state = cnl_dump_hw_state,
-};
-
-struct icl_combo_pll_params {
-       int clock;
-       struct skl_wrpll_params wrpll;
-};
-
-/*
- * These values alrea already adjusted: they're the bits we write to the
- * registers, not the logical values.
- */
-static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
-       { 540000,
-         { .dco_integer = 0x151, .dco_fraction = 0x4000,               /* [0]: 5.4 */
-           .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-       { 270000,
-         { .dco_integer = 0x151, .dco_fraction = 0x4000,               /* [1]: 2.7 */
-           .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-       { 162000,
-         { .dco_integer = 0x151, .dco_fraction = 0x4000,               /* [2]: 1.62 */
-           .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-       { 324000,
-         { .dco_integer = 0x151, .dco_fraction = 0x4000,               /* [3]: 3.24 */
-           .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-       { 216000,
-         { .dco_integer = 0x168, .dco_fraction = 0x0000,               /* [4]: 2.16 */
-           .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
-       { 432000,
-         { .dco_integer = 0x168, .dco_fraction = 0x0000,               /* [5]: 4.32 */
-           .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-       { 648000,
-         { .dco_integer = 0x195, .dco_fraction = 0x0000,               /* [6]: 6.48 */
-           .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-       { 810000,
-         { .dco_integer = 0x151, .dco_fraction = 0x4000,               /* [7]: 8.1 */
-           .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-};
-
-
-/* Also used for 38.4 MHz values. */
-static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
-       { 540000,
-         { .dco_integer = 0x1A5, .dco_fraction = 0x7000,               /* [0]: 5.4 */
-           .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-       { 270000,
-         { .dco_integer = 0x1A5, .dco_fraction = 0x7000,               /* [1]: 2.7 */
-           .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-       { 162000,
-         { .dco_integer = 0x1A5, .dco_fraction = 0x7000,               /* [2]: 1.62 */
-           .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-       { 324000,
-         { .dco_integer = 0x1A5, .dco_fraction = 0x7000,               /* [3]: 3.24 */
-           .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-       { 216000,
-         { .dco_integer = 0x1C2, .dco_fraction = 0x0000,               /* [4]: 2.16 */
-           .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
-       { 432000,
-         { .dco_integer = 0x1C2, .dco_fraction = 0x0000,               /* [5]: 4.32 */
-           .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-       { 648000,
-         { .dco_integer = 0x1FA, .dco_fraction = 0x2000,               /* [6]: 6.48 */
-           .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-       { 810000,
-         { .dco_integer = 0x1A5, .dco_fraction = 0x7000,               /* [7]: 8.1 */
-           .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
-};
-
-static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
-       .dco_integer = 0x151, .dco_fraction = 0x4000,
-       .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
-};
-
-static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
-       .dco_integer = 0x1A5, .dco_fraction = 0x7000,
-       .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
-};
-
-static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
-                                 struct skl_wrpll_params *pll_params)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-       const struct icl_combo_pll_params *params =
-               dev_priv->cdclk.hw.ref == 24000 ?
-               icl_dp_combo_pll_24MHz_values :
-               icl_dp_combo_pll_19_2MHz_values;
-       int clock = crtc_state->port_clock;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
-               if (clock == params[i].clock) {
-                       *pll_params = params[i].wrpll;
-                       return true;
-               }
-       }
-
-       MISSING_CASE(clock);
-       return false;
-}
-
-static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
-                            struct skl_wrpll_params *pll_params)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-
-       *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
-                       icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
-       return true;
-}
-
-static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
-                               struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-       u32 cfgcr0, cfgcr1;
-       struct skl_wrpll_params pll_params = { 0 };
-       bool ret;
-
-       if (intel_port_is_tc(dev_priv, encoder->port))
-               ret = icl_calc_tbt_pll(crtc_state, &pll_params);
-       else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
-                intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
-               ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
-       else
-               ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
-
-       if (!ret)
-               return false;
-
-       cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
-                pll_params.dco_integer;
-
-       cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
-                DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
-                DPLL_CFGCR1_KDIV(pll_params.kdiv) |
-                DPLL_CFGCR1_PDIV(pll_params.pdiv) |
-                DPLL_CFGCR1_CENTRAL_FREQ_8400;
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
-       crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
-
-       return true;
-}
-
-
-static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
-{
-       return id - DPLL_ID_ICL_MGPLL1;
-}
-
-enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
-{
-       return tc_port + DPLL_ID_ICL_MGPLL1;
-}
-
-static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
-                                    u32 *target_dco_khz,
-                                    struct intel_dpll_hw_state *state)
-{
-       u32 dco_min_freq, dco_max_freq;
-       int div1_vals[] = {7, 5, 3, 2};
-       unsigned int i;
-       int div2;
-
-       dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
-       dco_max_freq = is_dp ? 8100000 : 10000000;
-
-       for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
-               int div1 = div1_vals[i];
-
-               for (div2 = 10; div2 > 0; div2--) {
-                       int dco = div1 * div2 * clock_khz * 5;
-                       int a_divratio, tlinedrv, inputsel;
-                       u32 hsdiv;
-
-                       if (dco < dco_min_freq || dco > dco_max_freq)
-                               continue;
-
-                       if (div2 >= 2) {
-                               a_divratio = is_dp ? 10 : 5;
-                               tlinedrv = 2;
-                       } else {
-                               a_divratio = 5;
-                               tlinedrv = 0;
-                       }
-                       inputsel = is_dp ? 0 : 1;
-
-                       switch (div1) {
-                       default:
-                               MISSING_CASE(div1);
-                               /* fall through */
-                       case 2:
-                               hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
-                               break;
-                       case 3:
-                               hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
-                               break;
-                       case 5:
-                               hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
-                               break;
-                       case 7:
-                               hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
-                               break;
-                       }
-
-                       *target_dco_khz = dco;
-
-                       state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
-
-                       state->mg_clktop2_coreclkctl1 =
-                               MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
-
-                       state->mg_clktop2_hsclkctl =
-                               MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
-                               MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
-                               hsdiv |
-                               MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
-
-                       return true;
-               }
-       }
-
-       return false;
-}
-
-/*
- * The specification for this function uses real numbers, so the math had to be
- * adapted to integer-only calculation, that's why it looks so different.
- */
-static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-       struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state;
-       int refclk_khz = dev_priv->cdclk.hw.ref;
-       int clock = crtc_state->port_clock;
-       u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
-       u32 iref_ndiv, iref_trim, iref_pulse_w;
-       u32 prop_coeff, int_coeff;
-       u32 tdc_targetcnt, feedfwgain;
-       u64 ssc_stepsize, ssc_steplen, ssc_steplog;
-       u64 tmp;
-       bool use_ssc = false;
-       bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
-
-       memset(pll_state, 0, sizeof(*pll_state));
-
-       if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
-                                     pll_state)) {
-               DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
-               return false;
-       }
-
-       m1div = 2;
-       m2div_int = dco_khz / (refclk_khz * m1div);
-       if (m2div_int > 255) {
-               m1div = 4;
-               m2div_int = dco_khz / (refclk_khz * m1div);
-               if (m2div_int > 255) {
-                       DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
-                                     clock);
-                       return false;
-               }
-       }
-       m2div_rem = dco_khz % (refclk_khz * m1div);
-
-       tmp = (u64)m2div_rem * (1 << 22);
-       do_div(tmp, refclk_khz * m1div);
-       m2div_frac = tmp;
-
-       switch (refclk_khz) {
-       case 19200:
-               iref_ndiv = 1;
-               iref_trim = 28;
-               iref_pulse_w = 1;
-               break;
-       case 24000:
-               iref_ndiv = 1;
-               iref_trim = 25;
-               iref_pulse_w = 2;
-               break;
-       case 38400:
-               iref_ndiv = 2;
-               iref_trim = 28;
-               iref_pulse_w = 1;
-               break;
-       default:
-               MISSING_CASE(refclk_khz);
-               return false;
-       }
-
-       /*
-        * tdc_res = 0.000003
-        * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
-        *
-        * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
-        * was supposed to be a division, but we rearranged the operations of
-        * the formula to avoid early divisions so we don't multiply the
-        * rounding errors.
-        *
-        * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
-        * we also rearrange to work with integers.
-        *
-        * The 0.5 transformed to 5 results in a multiplication by 10 and the
-        * last division by 10.
-        */
-       tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
-
-       /*
-        * Here we divide dco_khz by 10 in order to allow the dividend to fit in
-        * 32 bits. That's not a problem since we round the division down
-        * anyway.
-        */
-       feedfwgain = (use_ssc || m2div_rem > 0) ?
-               m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
-
-       if (dco_khz >= 9000000) {
-               prop_coeff = 5;
-               int_coeff = 10;
-       } else {
-               prop_coeff = 4;
-               int_coeff = 8;
-       }
-
-       if (use_ssc) {
-               tmp = mul_u32_u32(dco_khz, 47 * 32);
-               do_div(tmp, refclk_khz * m1div * 10000);
-               ssc_stepsize = tmp;
-
-               tmp = mul_u32_u32(dco_khz, 1000);
-               ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
-       } else {
-               ssc_stepsize = 0;
-               ssc_steplen = 0;
-       }
-       ssc_steplog = 4;
-
-       pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
-                                 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
-                                 MG_PLL_DIV0_FBDIV_INT(m2div_int);
-
-       pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
-                                MG_PLL_DIV1_DITHER_DIV_2 |
-                                MG_PLL_DIV1_NDIVRATIO(1) |
-                                MG_PLL_DIV1_FBPREDIV(m1div);
-
-       pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
-                              MG_PLL_LF_AFCCNTSEL_512 |
-                              MG_PLL_LF_GAINCTRL(1) |
-                              MG_PLL_LF_INT_COEFF(int_coeff) |
-                              MG_PLL_LF_PROP_COEFF(prop_coeff);
-
-       pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
-                                     MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
-                                     MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
-                                     MG_PLL_FRAC_LOCK_DCODITHEREN |
-                                     MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
-       if (use_ssc || m2div_rem > 0)
-               pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
-
-       pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
-                               MG_PLL_SSC_TYPE(2) |
-                               MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
-                               MG_PLL_SSC_STEPNUM(ssc_steplog) |
-                               MG_PLL_SSC_FLLEN |
-                               MG_PLL_SSC_STEPSIZE(ssc_stepsize);
-
-       pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
-                                           MG_PLL_TDC_COLDST_IREFINT_EN |
-                                           MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
-                                           MG_PLL_TDC_TDCOVCCORR_EN |
-                                           MG_PLL_TDC_TDCSEL(3);
-
-       pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
-                                MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
-                                MG_PLL_BIAS_BIAS_BONUS(10) |
-                                MG_PLL_BIAS_BIASCAL_EN |
-                                MG_PLL_BIAS_CTRIM(12) |
-                                MG_PLL_BIAS_VREF_RDAC(4) |
-                                MG_PLL_BIAS_IREFTRIM(iref_trim);
-
-       if (refclk_khz == 38400) {
-               pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
-               pll_state->mg_pll_bias_mask = 0;
-       } else {
-               pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
-               pll_state->mg_pll_bias_mask = -1U;
-       }
-
-       pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
-       pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
-
-       return true;
-}
-
-static struct intel_shared_dpll *
-icl_get_dpll(struct intel_crtc_state *crtc_state,
-            struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-       struct intel_digital_port *intel_dig_port;
-       struct intel_shared_dpll *pll;
-       enum port port = encoder->port;
-       enum intel_dpll_id min, max;
-       bool ret;
-
-       if (intel_port_is_combophy(dev_priv, port)) {
-               min = DPLL_ID_ICL_DPLL0;
-               max = DPLL_ID_ICL_DPLL1;
-               ret = icl_calc_dpll_state(crtc_state, encoder);
-       } else if (intel_port_is_tc(dev_priv, port)) {
-               if (encoder->type == INTEL_OUTPUT_DP_MST) {
-                       struct intel_dp_mst_encoder *mst_encoder;
-
-                       mst_encoder = enc_to_mst(&encoder->base);
-                       intel_dig_port = mst_encoder->primary;
-               } else {
-                       intel_dig_port = enc_to_dig_port(&encoder->base);
-               }
-
-               if (intel_dig_port->tc_type == TC_PORT_TBT) {
-                       min = DPLL_ID_ICL_TBTPLL;
-                       max = min;
-                       ret = icl_calc_dpll_state(crtc_state, encoder);
-               } else {
-                       enum tc_port tc_port;
-
-                       tc_port = intel_port_to_tc(dev_priv, port);
-                       min = icl_tc_port_to_pll_id(tc_port);
-                       max = min;
-                       ret = icl_calc_mg_pll_state(crtc_state);
-               }
-       } else {
-               MISSING_CASE(port);
-               return NULL;
-       }
-
-       if (!ret) {
-               DRM_DEBUG_KMS("Could not calculate PLL state.\n");
-               return NULL;
-       }
-
-
-       pll = intel_find_shared_dpll(crtc_state, min, max);
-       if (!pll) {
-               DRM_DEBUG_KMS("No PLL selected\n");
-               return NULL;
-       }
-
-       intel_reference_shared_dpll(pll, crtc_state);
-
-       return pll;
-}
-
-static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
-                               struct intel_shared_dpll *pll,
-                               struct intel_dpll_hw_state *hw_state)
-{
-       const enum intel_dpll_id id = pll->info->id;
-       enum tc_port tc_port = icl_pll_id_to_tc_port(id);
-       intel_wakeref_t wakeref;
-       bool ret = false;
-       u32 val;
-
-       wakeref = intel_display_power_get_if_enabled(dev_priv,
-                                                    POWER_DOMAIN_DISPLAY_CORE);
-       if (!wakeref)
-               return false;
-
-       val = I915_READ(MG_PLL_ENABLE(tc_port));
-       if (!(val & PLL_ENABLE))
-               goto out;
-
-       hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
-       hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
-
-       hw_state->mg_clktop2_coreclkctl1 =
-               I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
-       hw_state->mg_clktop2_coreclkctl1 &=
-               MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
-
-       hw_state->mg_clktop2_hsclkctl =
-               I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
-       hw_state->mg_clktop2_hsclkctl &=
-               MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
-               MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
-               MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
-               MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
-
-       hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
-       hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
-       hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
-       hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
-       hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
-
-       hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
-       hw_state->mg_pll_tdc_coldst_bias =
-               I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
-
-       if (dev_priv->cdclk.hw.ref == 38400) {
-               hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
-               hw_state->mg_pll_bias_mask = 0;
-       } else {
-               hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
-               hw_state->mg_pll_bias_mask = -1U;
-       }
-
-       hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
-       hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
-
-       ret = true;
-out:
-       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
-       return ret;
-}
-
-static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
-                                struct intel_shared_dpll *pll,
-                                struct intel_dpll_hw_state *hw_state,
-                                i915_reg_t enable_reg)
-{
-       const enum intel_dpll_id id = pll->info->id;
-       intel_wakeref_t wakeref;
-       bool ret = false;
-       u32 val;
-
-       wakeref = intel_display_power_get_if_enabled(dev_priv,
-                                                    POWER_DOMAIN_DISPLAY_CORE);
-       if (!wakeref)
-               return false;
-
-       val = I915_READ(enable_reg);
-       if (!(val & PLL_ENABLE))
-               goto out;
-
-       hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
-       hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
-
-       ret = true;
-out:
-       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
-       return ret;
-}
-
-static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
-                                  struct intel_shared_dpll *pll,
-                                  struct intel_dpll_hw_state *hw_state)
-{
-       return icl_pll_get_hw_state(dev_priv, pll, hw_state,
-                                   CNL_DPLL_ENABLE(pll->info->id));
-}
-
-static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
-                                struct intel_shared_dpll *pll,
-                                struct intel_dpll_hw_state *hw_state)
-{
-       return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
-}
-
-static void icl_dpll_write(struct drm_i915_private *dev_priv,
-                          struct intel_shared_dpll *pll)
-{
-       struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
-       const enum intel_dpll_id id = pll->info->id;
-
-       I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0);
-       I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1);
-       POSTING_READ(ICL_DPLL_CFGCR1(id));
-}
-
-static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
-                            struct intel_shared_dpll *pll)
-{
-       struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
-       enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
-       u32 val;
-
-       /*
-        * Some of the following registers have reserved fields, so program
-        * these with RMW based on a mask. The mask can be fixed or generated
-        * during the calc/readout phase if the mask depends on some other HW
-        * state like refclk, see icl_calc_mg_pll_state().
-        */
-       val = I915_READ(MG_REFCLKIN_CTL(tc_port));
-       val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
-       val |= hw_state->mg_refclkin_ctl;
-       I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
-
-       val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
-       val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
-       val |= hw_state->mg_clktop2_coreclkctl1;
-       I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
-
-       val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
-       val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
-                MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
-                MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
-                MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
-       val |= hw_state->mg_clktop2_hsclkctl;
-       I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
-
-       I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
-       I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
-       I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
-       I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
-       I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
-
-       val = I915_READ(MG_PLL_BIAS(tc_port));
-       val &= ~hw_state->mg_pll_bias_mask;
-       val |= hw_state->mg_pll_bias;
-       I915_WRITE(MG_PLL_BIAS(tc_port), val);
-
-       val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
-       val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
-       val |= hw_state->mg_pll_tdc_coldst_bias;
-       I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
-
-       POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
-}
-
-static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
-                                struct intel_shared_dpll *pll,
-                                i915_reg_t enable_reg)
-{
-       u32 val;
-
-       val = I915_READ(enable_reg);
-       val |= PLL_POWER_ENABLE;
-       I915_WRITE(enable_reg, val);
-
-       /*
-        * The spec says we need to "wait" but it also says it should be
-        * immediate.
-        */
-       if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
-                                   PLL_POWER_STATE, PLL_POWER_STATE, 1))
-               DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
-}
-
-static void icl_pll_enable(struct drm_i915_private *dev_priv,
-                          struct intel_shared_dpll *pll,
-                          i915_reg_t enable_reg)
-{
-       u32 val;
-
-       val = I915_READ(enable_reg);
-       val |= PLL_ENABLE;
-       I915_WRITE(enable_reg, val);
-
-       /* Timeout is actually 600us. */
-       if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
-                                   PLL_LOCK, PLL_LOCK, 1))
-               DRM_ERROR("PLL %d not locked\n", pll->info->id);
-}
-
-static void combo_pll_enable(struct drm_i915_private *dev_priv,
-                            struct intel_shared_dpll *pll)
-{
-       i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
-
-       icl_pll_power_enable(dev_priv, pll, enable_reg);
-
-       icl_dpll_write(dev_priv, pll);
-
-       /*
-        * DVFS pre sequence would be here, but in our driver the cdclk code
-        * paths should already be setting the appropriate voltage, hence we do
-        * nothing here.
-        */
-
-       icl_pll_enable(dev_priv, pll, enable_reg);
-
-       /* DVFS post sequence would be here. See the comment above. */
-}
-
-static void tbt_pll_enable(struct drm_i915_private *dev_priv,
-                          struct intel_shared_dpll *pll)
-{
-       icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
-
-       icl_dpll_write(dev_priv, pll);
-
-       /*
-        * DVFS pre sequence would be here, but in our driver the cdclk code
-        * paths should already be setting the appropriate voltage, hence we do
-        * nothing here.
-        */
-
-       icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
-
-       /* DVFS post sequence would be here. See the comment above. */
-}
-
-static void mg_pll_enable(struct drm_i915_private *dev_priv,
-                         struct intel_shared_dpll *pll)
-{
-       i915_reg_t enable_reg =
-               MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
-
-       icl_pll_power_enable(dev_priv, pll, enable_reg);
-
-       icl_mg_pll_write(dev_priv, pll);
-
-       /*
-        * DVFS pre sequence would be here, but in our driver the cdclk code
-        * paths should already be setting the appropriate voltage, hence we do
-        * nothing here.
-        */
-
-       icl_pll_enable(dev_priv, pll, enable_reg);
-
-       /* DVFS post sequence would be here. See the comment above. */
-}
-
-static void icl_pll_disable(struct drm_i915_private *dev_priv,
-                           struct intel_shared_dpll *pll,
-                           i915_reg_t enable_reg)
-{
-       u32 val;
-
-       /* The first steps are done by intel_ddi_post_disable(). */
-
-       /*
-        * DVFS pre sequence would be here, but in our driver the cdclk code
-        * paths should already be setting the appropriate voltage, hence we do
-        * nothign here.
-        */
-
-       val = I915_READ(enable_reg);
-       val &= ~PLL_ENABLE;
-       I915_WRITE(enable_reg, val);
-
-       /* Timeout is actually 1us. */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   enable_reg, PLL_LOCK, 0, 1))
-               DRM_ERROR("PLL %d locked\n", pll->info->id);
-
-       /* DVFS post sequence would be here. See the comment above. */
-
-       val = I915_READ(enable_reg);
-       val &= ~PLL_POWER_ENABLE;
-       I915_WRITE(enable_reg, val);
-
-       /*
-        * The spec says we need to "wait" but it also says it should be
-        * immediate.
-        */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   enable_reg, PLL_POWER_STATE, 0, 1))
-               DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
-}
-
-static void combo_pll_disable(struct drm_i915_private *dev_priv,
-                             struct intel_shared_dpll *pll)
-{
-       icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
-}
-
-static void tbt_pll_disable(struct drm_i915_private *dev_priv,
-                           struct intel_shared_dpll *pll)
-{
-       icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
-}
-
-static void mg_pll_disable(struct drm_i915_private *dev_priv,
-                          struct intel_shared_dpll *pll)
-{
-       i915_reg_t enable_reg =
-               MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
-
-       icl_pll_disable(dev_priv, pll, enable_reg);
-}
-
-static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
-                             const struct intel_dpll_hw_state *hw_state)
-{
-       DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
-                     "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
-                     "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
-                     "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
-                     "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
-                     "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
-                     hw_state->cfgcr0, hw_state->cfgcr1,
-                     hw_state->mg_refclkin_ctl,
-                     hw_state->mg_clktop2_coreclkctl1,
-                     hw_state->mg_clktop2_hsclkctl,
-                     hw_state->mg_pll_div0,
-                     hw_state->mg_pll_div1,
-                     hw_state->mg_pll_lf,
-                     hw_state->mg_pll_frac_lock,
-                     hw_state->mg_pll_ssc,
-                     hw_state->mg_pll_bias,
-                     hw_state->mg_pll_tdc_coldst_bias);
-}
-
-static const struct intel_shared_dpll_funcs combo_pll_funcs = {
-       .enable = combo_pll_enable,
-       .disable = combo_pll_disable,
-       .get_hw_state = combo_pll_get_hw_state,
-};
-
-static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
-       .enable = tbt_pll_enable,
-       .disable = tbt_pll_disable,
-       .get_hw_state = tbt_pll_get_hw_state,
-};
-
-static const struct intel_shared_dpll_funcs mg_pll_funcs = {
-       .enable = mg_pll_enable,
-       .disable = mg_pll_disable,
-       .get_hw_state = mg_pll_get_hw_state,
-};
-
-static const struct dpll_info icl_plls[] = {
-       { "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
-       { "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
-       { "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
-       { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
-       { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
-       { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
-       { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
-       { },
-};
-
-static const struct intel_dpll_mgr icl_pll_mgr = {
-       .dpll_info = icl_plls,
-       .get_dpll = icl_get_dpll,
-       .dump_hw_state = icl_dump_hw_state,
-};
-
-static const struct dpll_info ehl_plls[] = {
-       { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
-       { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
-       { },
-};
-
-static const struct intel_dpll_mgr ehl_pll_mgr = {
-       .dpll_info = ehl_plls,
-       .get_dpll = icl_get_dpll,
-       .dump_hw_state = icl_dump_hw_state,
-};
-
-/**
- * intel_shared_dpll_init - Initialize shared DPLLs
- * @dev: drm device
- *
- * Initialize shared DPLLs for @dev.
- */
-void intel_shared_dpll_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       const struct intel_dpll_mgr *dpll_mgr = NULL;
-       const struct dpll_info *dpll_info;
-       int i;
-
-       if (IS_ELKHARTLAKE(dev_priv))
-               dpll_mgr = &ehl_pll_mgr;
-       else if (INTEL_GEN(dev_priv) >= 11)
-               dpll_mgr = &icl_pll_mgr;
-       else if (IS_CANNONLAKE(dev_priv))
-               dpll_mgr = &cnl_pll_mgr;
-       else if (IS_GEN9_BC(dev_priv))
-               dpll_mgr = &skl_pll_mgr;
-       else if (IS_GEN9_LP(dev_priv))
-               dpll_mgr = &bxt_pll_mgr;
-       else if (HAS_DDI(dev_priv))
-               dpll_mgr = &hsw_pll_mgr;
-       else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
-               dpll_mgr = &pch_pll_mgr;
-
-       if (!dpll_mgr) {
-               dev_priv->num_shared_dpll = 0;
-               return;
-       }
-
-       dpll_info = dpll_mgr->dpll_info;
-
-       for (i = 0; dpll_info[i].name; i++) {
-               WARN_ON(i != dpll_info[i].id);
-               dev_priv->shared_dplls[i].info = &dpll_info[i];
-       }
-
-       dev_priv->dpll_mgr = dpll_mgr;
-       dev_priv->num_shared_dpll = i;
-       mutex_init(&dev_priv->dpll_lock);
-
-       BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
-}
-
-/**
- * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
- * @crtc_state: atomic state for the crtc
- * @encoder: encoder
- *
- * Find an appropriate DPLL for the given CRTC and encoder combination. A
- * reference from the @crtc_state to the returned pll is registered in the
- * atomic state. That configuration is made effective by calling
- * intel_shared_dpll_swap_state(). The reference should be released by calling
- * intel_release_shared_dpll().
- *
- * Returns:
- * A shared DPLL to be used by @crtc_state and @encoder.
- */
-struct intel_shared_dpll *
-intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
-                     struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-       const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
-
-       if (WARN_ON(!dpll_mgr))
-               return NULL;
-
-       return dpll_mgr->get_dpll(crtc_state, encoder);
-}
-
-/**
- * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
- * @dpll: dpll in use by @crtc
- * @crtc: crtc
- * @state: atomic state
- *
- * This function releases the reference from @crtc to @dpll from the
- * atomic @state. The new configuration is made effective by calling
- * intel_shared_dpll_swap_state().
- */
-void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
-                              struct intel_crtc *crtc,
-                              struct drm_atomic_state *state)
-{
-       struct intel_shared_dpll_state *shared_dpll_state;
-
-       shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
-       shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe);
-}
-
-/**
- * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
- * @dev_priv: i915 drm device
- * @hw_state: hw state to be written to the log
- *
- * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
- */
-void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
-                             const struct intel_dpll_hw_state *hw_state)
-{
-       if (dev_priv->dpll_mgr) {
-               dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
-       } else {
-               /* fallback for platforms that don't use the shared dpll
-                * infrastructure
-                */
-               DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
-                             "fp0: 0x%x, fp1: 0x%x\n",
-                             hw_state->dpll,
-                             hw_state->dpll_md,
-                             hw_state->fp0,
-                             hw_state->fp1);
-       }
-}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
deleted file mode 100644 (file)
index d057041..0000000
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Copyright © 2012-2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef _INTEL_DPLL_MGR_H_
-#define _INTEL_DPLL_MGR_H_
-
-#include <linux/types.h>
-
-#include "intel_display.h"
-
-/*FIXME: Move this to a more appropriate place. */
-#define abs_diff(a, b) ({                      \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       (void) (&__a == &__b);                  \
-       __a > __b ? (__a - __b) : (__b - __a); })
-
-struct drm_atomic_state;
-struct drm_device;
-struct drm_i915_private;
-struct intel_crtc;
-struct intel_crtc_state;
-struct intel_encoder;
-struct intel_shared_dpll;
-
-/**
- * enum intel_dpll_id - possible DPLL ids
- *
- * Enumeration of possible IDs for a DPLL. Real shared dpll ids must be >= 0.
- */
-enum intel_dpll_id {
-       /**
-        * @DPLL_ID_PRIVATE: non-shared dpll in use
-        */
-       DPLL_ID_PRIVATE = -1,
-
-       /**
-        * @DPLL_ID_PCH_PLL_A: DPLL A in ILK, SNB and IVB
-        */
-       DPLL_ID_PCH_PLL_A = 0,
-       /**
-        * @DPLL_ID_PCH_PLL_B: DPLL B in ILK, SNB and IVB
-        */
-       DPLL_ID_PCH_PLL_B = 1,
-
-
-       /**
-        * @DPLL_ID_WRPLL1: HSW and BDW WRPLL1
-        */
-       DPLL_ID_WRPLL1 = 0,
-       /**
-        * @DPLL_ID_WRPLL2: HSW and BDW WRPLL2
-        */
-       DPLL_ID_WRPLL2 = 1,
-       /**
-        * @DPLL_ID_SPLL: HSW and BDW SPLL
-        */
-       DPLL_ID_SPLL = 2,
-       /**
-        * @DPLL_ID_LCPLL_810: HSW and BDW 0.81 GHz LCPLL
-        */
-       DPLL_ID_LCPLL_810 = 3,
-       /**
-        * @DPLL_ID_LCPLL_1350: HSW and BDW 1.35 GHz LCPLL
-        */
-       DPLL_ID_LCPLL_1350 = 4,
-       /**
-        * @DPLL_ID_LCPLL_2700: HSW and BDW 2.7 GHz LCPLL
-        */
-       DPLL_ID_LCPLL_2700 = 5,
-
-
-       /**
-        * @DPLL_ID_SKL_DPLL0: SKL and later DPLL0
-        */
-       DPLL_ID_SKL_DPLL0 = 0,
-       /**
-        * @DPLL_ID_SKL_DPLL1: SKL and later DPLL1
-        */
-       DPLL_ID_SKL_DPLL1 = 1,
-       /**
-        * @DPLL_ID_SKL_DPLL2: SKL and later DPLL2
-        */
-       DPLL_ID_SKL_DPLL2 = 2,
-       /**
-        * @DPLL_ID_SKL_DPLL3: SKL and later DPLL3
-        */
-       DPLL_ID_SKL_DPLL3 = 3,
-
-
-       /**
-        * @DPLL_ID_ICL_DPLL0: ICL combo PHY DPLL0
-        */
-       DPLL_ID_ICL_DPLL0 = 0,
-       /**
-        * @DPLL_ID_ICL_DPLL1: ICL combo PHY DPLL1
-        */
-       DPLL_ID_ICL_DPLL1 = 1,
-       /**
-        * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL
-        */
-       DPLL_ID_ICL_TBTPLL = 2,
-       /**
-        * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C)
-        */
-       DPLL_ID_ICL_MGPLL1 = 3,
-       /**
-        * @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
-        */
-       DPLL_ID_ICL_MGPLL2 = 4,
-       /**
-        * @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
-        */
-       DPLL_ID_ICL_MGPLL3 = 5,
-       /**
-        * @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
-        */
-       DPLL_ID_ICL_MGPLL4 = 6,
-};
-#define I915_NUM_PLLS 7
-
-struct intel_dpll_hw_state {
-       /* i9xx, pch plls */
-       u32 dpll;
-       u32 dpll_md;
-       u32 fp0;
-       u32 fp1;
-
-       /* hsw, bdw */
-       u32 wrpll;
-       u32 spll;
-
-       /* skl */
-       /*
-        * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
-        * lower part of ctrl1 and they get shifted into position when writing
-        * the register.  This allows us to easily compare the state to share
-        * the DPLL.
-        */
-       u32 ctrl1;
-       /* HDMI only, 0 when used for DP */
-       u32 cfgcr1, cfgcr2;
-
-       /* cnl */
-       u32 cfgcr0;
-       /* CNL also uses cfgcr1 */
-
-       /* bxt */
-       u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
-
-       /*
-        * ICL uses the following, already defined:
-        * u32 cfgcr0, cfgcr1;
-        */
-       u32 mg_refclkin_ctl;
-       u32 mg_clktop2_coreclkctl1;
-       u32 mg_clktop2_hsclkctl;
-       u32 mg_pll_div0;
-       u32 mg_pll_div1;
-       u32 mg_pll_lf;
-       u32 mg_pll_frac_lock;
-       u32 mg_pll_ssc;
-       u32 mg_pll_bias;
-       u32 mg_pll_tdc_coldst_bias;
-       u32 mg_pll_bias_mask;
-       u32 mg_pll_tdc_coldst_bias_mask;
-};
-
-/**
- * struct intel_shared_dpll_state - hold the DPLL atomic state
- *
- * This structure holds an atomic state for the DPLL, that can represent
- * either its current state (in struct &intel_shared_dpll) or a desired
- * future state which would be applied by an atomic mode set (stored in
- * a struct &intel_atomic_state).
- *
- * See also intel_get_shared_dpll() and intel_release_shared_dpll().
- */
-struct intel_shared_dpll_state {
-       /**
-        * @crtc_mask: mask of CRTC using this DPLL, active or not
-        */
-       unsigned crtc_mask;
-
-       /**
-        * @hw_state: hardware configuration for the DPLL stored in
-        * struct &intel_dpll_hw_state.
-        */
-       struct intel_dpll_hw_state hw_state;
-};
-
-/**
- * struct intel_shared_dpll_funcs - platform specific hooks for managing DPLLs
- */
-struct intel_shared_dpll_funcs {
-       /**
-        * @prepare:
-        *
-        * Optional hook to perform operations prior to enabling the PLL.
-        * Called from intel_prepare_shared_dpll() function unless the PLL
-        * is already enabled.
-        */
-       void (*prepare)(struct drm_i915_private *dev_priv,
-                       struct intel_shared_dpll *pll);
-
-       /**
-        * @enable:
-        *
-        * Hook for enabling the pll, called from intel_enable_shared_dpll()
-        * if the pll is not already enabled.
-        */
-       void (*enable)(struct drm_i915_private *dev_priv,
-                      struct intel_shared_dpll *pll);
-
-       /**
-        * @disable:
-        *
-        * Hook for disabling the pll, called from intel_disable_shared_dpll()
-        * only when it is safe to disable the pll, i.e., there are no more
-        * tracked users for it.
-        */
-       void (*disable)(struct drm_i915_private *dev_priv,
-                       struct intel_shared_dpll *pll);
-
-       /**
-        * @get_hw_state:
-        *
-        * Hook for reading the values currently programmed to the DPLL
-        * registers. This is used for initial hw state readout and state
-        * verification after a mode set.
-        */
-       bool (*get_hw_state)(struct drm_i915_private *dev_priv,
-                            struct intel_shared_dpll *pll,
-                            struct intel_dpll_hw_state *hw_state);
-};
-
-/**
- * struct dpll_info - display PLL platform specific info
- */
-struct dpll_info {
-       /**
-        * @name: DPLL name; used for logging
-        */
-       const char *name;
-
-       /**
-        * @funcs: platform specific hooks
-        */
-       const struct intel_shared_dpll_funcs *funcs;
-
-       /**
-        * @id: unique indentifier for this DPLL; should match the index in the
-        * dev_priv->shared_dplls array
-        */
-       enum intel_dpll_id id;
-
-#define INTEL_DPLL_ALWAYS_ON   (1 << 0)
-       /**
-        * @flags:
-        *
-        * INTEL_DPLL_ALWAYS_ON
-        *     Inform the state checker that the DPLL is kept enabled even if
-        *     not in use by any CRTC.
-        */
-       u32 flags;
-};
-
-/**
- * struct intel_shared_dpll - display PLL with tracked state and users
- */
-struct intel_shared_dpll {
-       /**
-        * @state:
-        *
-        * Store the state for the pll, including its hw state
-        * and CRTCs using it.
-        */
-       struct intel_shared_dpll_state state;
-
-       /**
-        * @active_mask: mask of active CRTCs (i.e. DPMS on) using this DPLL
-        */
-       unsigned active_mask;
-
-       /**
-        * @on: is the PLL actually active? Disabled during modeset
-        */
-       bool on;
-
-       /**
-        * @info: platform specific info
-        */
-       const struct dpll_info *info;
-};
-
-#define SKL_DPLL0 0
-#define SKL_DPLL1 1
-#define SKL_DPLL2 2
-#define SKL_DPLL3 3
-
-/* shared dpll functions */
-struct intel_shared_dpll *
-intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
-                           enum intel_dpll_id id);
-enum intel_dpll_id
-intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
-                        struct intel_shared_dpll *pll);
-void assert_shared_dpll(struct drm_i915_private *dev_priv,
-                       struct intel_shared_dpll *pll,
-                       bool state);
-#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
-#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state,
-                                               struct intel_encoder *encoder);
-void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
-                              struct intel_crtc *crtc,
-                              struct drm_atomic_state *state);
-void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
-void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
-void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
-void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
-void intel_shared_dpll_init(struct drm_device *dev);
-
-void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
-                             const struct intel_dpll_hw_state *hw_state);
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
-enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
-bool intel_dpll_is_combophy(enum intel_dpll_id id);
-
-#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
deleted file mode 100644 (file)
index d36cada..0000000
+++ /dev/null
@@ -1,1345 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/**
- * DOC: Frame Buffer Compression (FBC)
- *
- * FBC tries to save memory bandwidth (and so power consumption) by
- * compressing the amount of memory used by the display. It is total
- * transparent to user space and completely handled in the kernel.
- *
- * The benefits of FBC are mostly visible with solid backgrounds and
- * variation-less patterns. It comes from keeping the memory footprint small
- * and having fewer memory pages opened and accessed for refreshing the display.
- *
- * i915 is responsible to reserve stolen memory for FBC and configure its
- * offset on proper registers. The hardware takes care of all
- * compress/decompress. However there are many known cases where we have to
- * forcibly disable it to allow proper screen updates.
- */
-
-#include <drm/drm_fourcc.h>
-
-#include "i915_drv.h"
-#include "intel_drv.h"
-#include "intel_fbc.h"
-#include "intel_frontbuffer.h"
-
-static inline bool fbc_supported(struct drm_i915_private *dev_priv)
-{
-       return HAS_FBC(dev_priv);
-}
-
-static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
-{
-       return INTEL_GEN(dev_priv) <= 3;
-}
-
-/*
- * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
- * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
- * origin so the x and y offsets can actually fit the registers. As a
- * consequence, the fence doesn't really start exactly at the display plane
- * address we program because it starts at the real start of the buffer, so we
- * have to take this into consideration here.
- */
-static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
-{
-       return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
-}
-
-/*
- * For SKL+, the plane source size used by the hardware is based on the value we
- * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
- * we wrote to PIPESRC.
- */
-static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
-                                           int *width, int *height)
-{
-       if (width)
-               *width = cache->plane.src_w;
-       if (height)
-               *height = cache->plane.src_h;
-}
-
-static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
-                                       struct intel_fbc_state_cache *cache)
-{
-       int lines;
-
-       intel_fbc_get_plane_source_size(cache, NULL, &lines);
-       if (IS_GEN(dev_priv, 7))
-               lines = min(lines, 2048);
-       else if (INTEL_GEN(dev_priv) >= 8)
-               lines = min(lines, 2560);
-
-       /* Hardware needs the full buffer stride, not just the active area. */
-       return lines * cache->fb.stride;
-}
-
-static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
-{
-       u32 fbc_ctl;
-
-       /* Disable compression */
-       fbc_ctl = I915_READ(FBC_CONTROL);
-       if ((fbc_ctl & FBC_CTL_EN) == 0)
-               return;
-
-       fbc_ctl &= ~FBC_CTL_EN;
-       I915_WRITE(FBC_CONTROL, fbc_ctl);
-
-       /* Wait for compressing bit to clear */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   FBC_STATUS, FBC_STAT_COMPRESSING, 0,
-                                   10)) {
-               DRM_DEBUG_KMS("FBC idle timed out\n");
-               return;
-       }
-}
-
-static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
-       int cfb_pitch;
-       int i;
-       u32 fbc_ctl;
-
-       /* Note: fbc.threshold == 1 for i8xx */
-       cfb_pitch = params->cfb_size / FBC_LL_SIZE;
-       if (params->fb.stride < cfb_pitch)
-               cfb_pitch = params->fb.stride;
-
-       /* FBC_CTL wants 32B or 64B units */
-       if (IS_GEN(dev_priv, 2))
-               cfb_pitch = (cfb_pitch / 32) - 1;
-       else
-               cfb_pitch = (cfb_pitch / 64) - 1;
-
-       /* Clear old tags */
-       for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
-               I915_WRITE(FBC_TAG(i), 0);
-
-       if (IS_GEN(dev_priv, 4)) {
-               u32 fbc_ctl2;
-
-               /* Set it up... */
-               fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
-               fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
-               I915_WRITE(FBC_CONTROL2, fbc_ctl2);
-               I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
-       }
-
-       /* enable it... */
-       fbc_ctl = I915_READ(FBC_CONTROL);
-       fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
-       fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
-       if (IS_I945GM(dev_priv))
-               fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
-       fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-       fbc_ctl |= params->vma->fence->id;
-       I915_WRITE(FBC_CONTROL, fbc_ctl);
-}
-
-static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
-{
-       return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
-}
-
-static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
-       u32 dpfc_ctl;
-
-       dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
-       if (params->fb.format->cpp[0] == 2)
-               dpfc_ctl |= DPFC_CTL_LIMIT_2X;
-       else
-               dpfc_ctl |= DPFC_CTL_LIMIT_1X;
-
-       if (params->flags & PLANE_HAS_FENCE) {
-               dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
-               I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
-       } else {
-               I915_WRITE(DPFC_FENCE_YOFF, 0);
-       }
-
-       /* enable it... */
-       I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-}
-
-static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
-{
-       u32 dpfc_ctl;
-
-       /* Disable compression */
-       dpfc_ctl = I915_READ(DPFC_CONTROL);
-       if (dpfc_ctl & DPFC_CTL_EN) {
-               dpfc_ctl &= ~DPFC_CTL_EN;
-               I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-       }
-}
-
-static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
-{
-       return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-/* This function forces a CFB recompression through the nuke operation. */
-static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
-{
-       I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
-       POSTING_READ(MSG_FBC_REND_STATE);
-}
-
-static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
-       u32 dpfc_ctl;
-       int threshold = dev_priv->fbc.threshold;
-
-       dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
-       if (params->fb.format->cpp[0] == 2)
-               threshold++;
-
-       switch (threshold) {
-       case 4:
-       case 3:
-               dpfc_ctl |= DPFC_CTL_LIMIT_4X;
-               break;
-       case 2:
-               dpfc_ctl |= DPFC_CTL_LIMIT_2X;
-               break;
-       case 1:
-               dpfc_ctl |= DPFC_CTL_LIMIT_1X;
-               break;
-       }
-
-       if (params->flags & PLANE_HAS_FENCE) {
-               dpfc_ctl |= DPFC_CTL_FENCE_EN;
-               if (IS_GEN(dev_priv, 5))
-                       dpfc_ctl |= params->vma->fence->id;
-               if (IS_GEN(dev_priv, 6)) {
-                       I915_WRITE(SNB_DPFC_CTL_SA,
-                                  SNB_CPU_FENCE_ENABLE |
-                                  params->vma->fence->id);
-                       I915_WRITE(DPFC_CPU_FENCE_OFFSET,
-                                  params->crtc.fence_y_offset);
-               }
-       } else {
-               if (IS_GEN(dev_priv, 6)) {
-                       I915_WRITE(SNB_DPFC_CTL_SA, 0);
-                       I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
-               }
-       }
-
-       I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
-       I915_WRITE(ILK_FBC_RT_BASE,
-                  i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
-       /* enable it... */
-       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
-       intel_fbc_recompress(dev_priv);
-}
-
-static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
-{
-       u32 dpfc_ctl;
-
-       /* Disable compression */
-       dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
-       if (dpfc_ctl & DPFC_CTL_EN) {
-               dpfc_ctl &= ~DPFC_CTL_EN;
-               I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-       }
-}
-
-static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
-{
-       return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
-       u32 dpfc_ctl;
-       int threshold = dev_priv->fbc.threshold;
-
-       /* Display WA #0529: skl, kbl, bxt. */
-       if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
-               u32 val = I915_READ(CHICKEN_MISC_4);
-
-               val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
-
-               if (i915_gem_object_get_tiling(params->vma->obj) !=
-                   I915_TILING_X)
-                       val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
-
-               I915_WRITE(CHICKEN_MISC_4, val);
-       }
-
-       dpfc_ctl = 0;
-       if (IS_IVYBRIDGE(dev_priv))
-               dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
-
-       if (params->fb.format->cpp[0] == 2)
-               threshold++;
-
-       switch (threshold) {
-       case 4:
-       case 3:
-               dpfc_ctl |= DPFC_CTL_LIMIT_4X;
-               break;
-       case 2:
-               dpfc_ctl |= DPFC_CTL_LIMIT_2X;
-               break;
-       case 1:
-               dpfc_ctl |= DPFC_CTL_LIMIT_1X;
-               break;
-       }
-
-       if (params->flags & PLANE_HAS_FENCE) {
-               dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
-               I915_WRITE(SNB_DPFC_CTL_SA,
-                          SNB_CPU_FENCE_ENABLE |
-                          params->vma->fence->id);
-               I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
-       } else {
-               I915_WRITE(SNB_DPFC_CTL_SA,0);
-               I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
-       }
-
-       if (dev_priv->fbc.false_color)
-               dpfc_ctl |= FBC_CTL_FALSE_COLOR;
-
-       if (IS_IVYBRIDGE(dev_priv)) {
-               /* WaFbcAsynchFlipDisableFbcQueue:ivb */
-               I915_WRITE(ILK_DISPLAY_CHICKEN1,
-                          I915_READ(ILK_DISPLAY_CHICKEN1) |
-                          ILK_FBCQ_DIS);
-       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-               /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-               I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
-                          I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
-                          HSW_FBCQ_DIS);
-       }
-
-       if (IS_GEN(dev_priv, 11))
-               /* Wa_1409120013:icl,ehl */
-               I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
-       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
-       intel_fbc_recompress(dev_priv);
-}
-
-static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
-{
-       if (INTEL_GEN(dev_priv) >= 5)
-               return ilk_fbc_is_active(dev_priv);
-       else if (IS_GM45(dev_priv))
-               return g4x_fbc_is_active(dev_priv);
-       else
-               return i8xx_fbc_is_active(dev_priv);
-}
-
-static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       fbc->active = true;
-
-       if (INTEL_GEN(dev_priv) >= 7)
-               gen7_fbc_activate(dev_priv);
-       else if (INTEL_GEN(dev_priv) >= 5)
-               ilk_fbc_activate(dev_priv);
-       else if (IS_GM45(dev_priv))
-               g4x_fbc_activate(dev_priv);
-       else
-               i8xx_fbc_activate(dev_priv);
-}
-
-static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       fbc->active = false;
-
-       if (INTEL_GEN(dev_priv) >= 5)
-               ilk_fbc_deactivate(dev_priv);
-       else if (IS_GM45(dev_priv))
-               g4x_fbc_deactivate(dev_priv);
-       else
-               i8xx_fbc_deactivate(dev_priv);
-}
-
-/**
- * intel_fbc_is_active - Is FBC active?
- * @dev_priv: i915 device instance
- *
- * This function is used to verify the current state of FBC.
- *
- * FIXME: This should be tracked in the plane config eventually
- * instead of queried at runtime for most callers.
- */
-bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
-{
-       return dev_priv->fbc.active;
-}
-
-static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
-                                const char *reason)
-{
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       WARN_ON(!mutex_is_locked(&fbc->lock));
-
-       if (fbc->active)
-               intel_fbc_hw_deactivate(dev_priv);
-
-       fbc->no_fbc_reason = reason;
-}
-
-static bool multiple_pipes_ok(struct intel_crtc *crtc,
-                             struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-       enum pipe pipe = crtc->pipe;
-
-       /* Don't even bother tracking anything we don't need. */
-       if (!no_fbc_on_multiple_pipes(dev_priv))
-               return true;
-
-       if (plane_state->base.visible)
-               fbc->visible_pipes_mask |= (1 << pipe);
-       else
-               fbc->visible_pipes_mask &= ~(1 << pipe);
-
-       return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
-}
-
-static int find_compression_threshold(struct drm_i915_private *dev_priv,
-                                     struct drm_mm_node *node,
-                                     int size,
-                                     int fb_cpp)
-{
-       int compression_threshold = 1;
-       int ret;
-       u64 end;
-
-       /* The FBC hardware for BDW/SKL doesn't have access to the stolen
-        * reserved range size, so it always assumes the maximum (8mb) is used.
-        * If we enable FBC using a CFB on that memory range we'll get FIFO
-        * underruns, even if that range is not reserved by the BIOS. */
-       if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
-               end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
-       else
-               end = U64_MAX;
-
-       /* HACK: This code depends on what we will do in *_enable_fbc. If that
-        * code changes, this code needs to change as well.
-        *
-        * The enable_fbc code will attempt to use one of our 2 compression
-        * thresholds, therefore, in that case, we only have 1 resort.
-        */
-
-       /* Try to over-allocate to reduce reallocations and fragmentation. */
-       ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
-                                                  4096, 0, end);
-       if (ret == 0)
-               return compression_threshold;
-
-again:
-       /* HW's ability to limit the CFB is 1:4 */
-       if (compression_threshold > 4 ||
-           (fb_cpp == 2 && compression_threshold == 2))
-               return 0;
-
-       ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
-                                                  4096, 0, end);
-       if (ret && INTEL_GEN(dev_priv) <= 4) {
-               return 0;
-       } else if (ret) {
-               compression_threshold <<= 1;
-               goto again;
-       } else {
-               return compression_threshold;
-       }
-}
-
-static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-       struct drm_mm_node *uninitialized_var(compressed_llb);
-       int size, fb_cpp, ret;
-
-       WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
-
-       size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
-       fb_cpp = fbc->state_cache.fb.format->cpp[0];
-
-       ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
-                                        size, fb_cpp);
-       if (!ret)
-               goto err_llb;
-       else if (ret > 1) {
-               DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
-
-       }
-
-       fbc->threshold = ret;
-
-       if (INTEL_GEN(dev_priv) >= 5)
-               I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
-       else if (IS_GM45(dev_priv)) {
-               I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
-       } else {
-               compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
-               if (!compressed_llb)
-                       goto err_fb;
-
-               ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
-                                                 4096, 4096);
-               if (ret)
-                       goto err_fb;
-
-               fbc->compressed_llb = compressed_llb;
-
-               GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
-                                            fbc->compressed_fb.start,
-                                            U32_MAX));
-               GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
-                                            fbc->compressed_llb->start,
-                                            U32_MAX));
-               I915_WRITE(FBC_CFB_BASE,
-                          dev_priv->dsm.start + fbc->compressed_fb.start);
-               I915_WRITE(FBC_LL_BASE,
-                          dev_priv->dsm.start + compressed_llb->start);
-       }
-
-       DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
-                     fbc->compressed_fb.size, fbc->threshold);
-
-       return 0;
-
-err_fb:
-       kfree(compressed_llb);
-       i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
-err_llb:
-       if (drm_mm_initialized(&dev_priv->mm.stolen))
-               pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
-       return -ENOSPC;
-}
-
-static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       if (drm_mm_node_allocated(&fbc->compressed_fb))
-               i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
-
-       if (fbc->compressed_llb) {
-               i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
-               kfree(fbc->compressed_llb);
-       }
-}
-
-void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       if (!fbc_supported(dev_priv))
-               return;
-
-       mutex_lock(&fbc->lock);
-       __intel_fbc_cleanup_cfb(dev_priv);
-       mutex_unlock(&fbc->lock);
-}
-
-static bool stride_is_valid(struct drm_i915_private *dev_priv,
-                           unsigned int stride)
-{
-       /* This should have been caught earlier. */
-       if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
-               return false;
-
-       /* Below are the additional FBC restrictions. */
-       if (stride < 512)
-               return false;
-
-       if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
-               return stride == 4096 || stride == 8192;
-
-       if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
-               return false;
-
-       if (stride > 16384)
-               return false;
-
-       return true;
-}
-
-static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
-                                 u32 pixel_format)
-{
-       switch (pixel_format) {
-       case DRM_FORMAT_XRGB8888:
-       case DRM_FORMAT_XBGR8888:
-               return true;
-       case DRM_FORMAT_XRGB1555:
-       case DRM_FORMAT_RGB565:
-               /* 16bpp not supported on gen2 */
-               if (IS_GEN(dev_priv, 2))
-                       return false;
-               /* WaFbcOnly1to1Ratio:ctg */
-               if (IS_G4X(dev_priv))
-                       return false;
-               return true;
-       default:
-               return false;
-       }
-}
-
-/*
- * For some reason, the hardware tracking starts looking at whatever we
- * programmed as the display plane base address register. It does not look at
- * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
- * variables instead of just looking at the pipe/plane size.
- */
-static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-       unsigned int effective_w, effective_h, max_w, max_h;
-
-       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
-               max_w = 5120;
-               max_h = 4096;
-       } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
-               max_w = 4096;
-               max_h = 4096;
-       } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
-               max_w = 4096;
-               max_h = 2048;
-       } else {
-               max_w = 2048;
-               max_h = 1536;
-       }
-
-       intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
-                                       &effective_h);
-       effective_w += fbc->state_cache.plane.adjusted_x;
-       effective_h += fbc->state_cache.plane.adjusted_y;
-
-       return effective_w <= max_w && effective_h <= max_h;
-}
-
-static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
-                                        struct intel_crtc_state *crtc_state,
-                                        struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-       struct intel_fbc_state_cache *cache = &fbc->state_cache;
-       struct drm_framebuffer *fb = plane_state->base.fb;
-
-       cache->vma = NULL;
-       cache->flags = 0;
-
-       cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
-
-       cache->plane.rotation = plane_state->base.rotation;
-       /*
-        * Src coordinates are already rotated by 270 degrees for
-        * the 90/270 degree plane rotation cases (to match the
-        * GTT mapping), hence no need to account for rotation here.
-        */
-       cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
-       cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
-       cache->plane.visible = plane_state->base.visible;
-       cache->plane.adjusted_x = plane_state->color_plane[0].x;
-       cache->plane.adjusted_y = plane_state->color_plane[0].y;
-       cache->plane.y = plane_state->base.src.y1 >> 16;
-
-       cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
-
-       if (!cache->plane.visible)
-               return;
-
-       cache->fb.format = fb->format;
-       cache->fb.stride = fb->pitches[0];
-
-       cache->vma = plane_state->vma;
-       cache->flags = plane_state->flags;
-       if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
-               cache->flags &= ~PLANE_HAS_FENCE;
-}
-
-static bool intel_fbc_can_activate(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-       struct intel_fbc_state_cache *cache = &fbc->state_cache;
-
-       /* We don't need to use a state cache here since this information is
-        * global for all CRTC.
-        */
-       if (fbc->underrun_detected) {
-               fbc->no_fbc_reason = "underrun detected";
-               return false;
-       }
-
-       if (!cache->vma) {
-               fbc->no_fbc_reason = "primary plane not visible";
-               return false;
-       }
-
-       if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
-               fbc->no_fbc_reason = "incompatible mode";
-               return false;
-       }
-
-       if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
-               fbc->no_fbc_reason = "mode too large for compression";
-               return false;
-       }
-
-       /* The use of a CPU fence is mandatory in order to detect writes
-        * by the CPU to the scanout and trigger updates to the FBC.
-        *
-        * Note that is possible for a tiled surface to be unmappable (and
-        * so have no fence associated with it) due to aperture constaints
-        * at the time of pinning.
-        *
-        * FIXME with 90/270 degree rotation we should use the fence on
-        * the normal GTT view (the rotated view doesn't even have a
-        * fence). Would need changes to the FBC fence Y offset as well.
-        * For now this will effecively disable FBC with 90/270 degree
-        * rotation.
-        */
-       if (!(cache->flags & PLANE_HAS_FENCE)) {
-               fbc->no_fbc_reason = "framebuffer not tiled or fenced";
-               return false;
-       }
-       if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
-           cache->plane.rotation != DRM_MODE_ROTATE_0) {
-               fbc->no_fbc_reason = "rotation unsupported";
-               return false;
-       }
-
-       if (!stride_is_valid(dev_priv, cache->fb.stride)) {
-               fbc->no_fbc_reason = "framebuffer stride not supported";
-               return false;
-       }
-
-       if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
-               fbc->no_fbc_reason = "pixel format is invalid";
-               return false;
-       }
-
-       if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
-           cache->fb.format->has_alpha) {
-               fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
-               return false;
-       }
-
-       /* WaFbcExceedCdClockThreshold:hsw,bdw */
-       if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
-           cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
-               fbc->no_fbc_reason = "pixel rate is too big";
-               return false;
-       }
-
-       /* It is possible for the required CFB size change without a
-        * crtc->disable + crtc->enable since it is possible to change the
-        * stride without triggering a full modeset. Since we try to
-        * over-allocate the CFB, there's a chance we may keep FBC enabled even
-        * if this happens, but if we exceed the current CFB size we'll have to
-        * disable FBC. Notice that it would be possible to disable FBC, wait
-        * for a frame, free the stolen node, then try to reenable FBC in case
-        * we didn't get any invalidate/deactivate calls, but this would require
-        * a lot of tracking just for a specific case. If we conclude it's an
-        * important case, we can implement it later. */
-       if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
-           fbc->compressed_fb.size * fbc->threshold) {
-               fbc->no_fbc_reason = "CFB requirements changed";
-               return false;
-       }
-
-       /*
-        * Work around a problem on GEN9+ HW, where enabling FBC on a plane
-        * having a Y offset that isn't divisible by 4 causes FIFO underrun
-        * and screen flicker.
-        */
-       if (IS_GEN_RANGE(dev_priv, 9, 10) &&
-           (fbc->state_cache.plane.adjusted_y & 3)) {
-               fbc->no_fbc_reason = "plane Y offset is misaligned";
-               return false;
-       }
-
-       return true;
-}
-
-static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       if (intel_vgpu_active(dev_priv)) {
-               fbc->no_fbc_reason = "VGPU is active";
-               return false;
-       }
-
-       if (!i915_modparams.enable_fbc) {
-               fbc->no_fbc_reason = "disabled per module param or by default";
-               return false;
-       }
-
-       if (fbc->underrun_detected) {
-               fbc->no_fbc_reason = "underrun detected";
-               return false;
-       }
-
-       return true;
-}
-
-static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
-                                    struct intel_fbc_reg_params *params)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-       struct intel_fbc_state_cache *cache = &fbc->state_cache;
-
-       /* Since all our fields are integer types, use memset here so the
-        * comparison function can rely on memcmp because the padding will be
-        * zero. */
-       memset(params, 0, sizeof(*params));
-
-       params->vma = cache->vma;
-       params->flags = cache->flags;
-
-       params->crtc.pipe = crtc->pipe;
-       params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
-       params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
-
-       params->fb.format = cache->fb.format;
-       params->fb.stride = cache->fb.stride;
-
-       params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
-
-       if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
-               params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
-                                               32 * fbc->threshold) * 8;
-}
-
-void intel_fbc_pre_update(struct intel_crtc *crtc,
-                         struct intel_crtc_state *crtc_state,
-                         struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-       const char *reason = "update pending";
-
-       if (!fbc_supported(dev_priv))
-               return;
-
-       mutex_lock(&fbc->lock);
-
-       if (!multiple_pipes_ok(crtc, plane_state)) {
-               reason = "more than one pipe active";
-               goto deactivate;
-       }
-
-       if (!fbc->enabled || fbc->crtc != crtc)
-               goto unlock;
-
-       intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
-       fbc->flip_pending = true;
-
-deactivate:
-       intel_fbc_deactivate(dev_priv, reason);
-unlock:
-       mutex_unlock(&fbc->lock);
-}
-
-/**
- * __intel_fbc_disable - disable FBC
- * @dev_priv: i915 device instance
- *
- * This is the low level function that actually disables FBC. Callers should
- * grab the FBC lock.
- */
-static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbc *fbc = &dev_priv->fbc;
-       struct intel_crtc *crtc = fbc->crtc;
-
-       WARN_ON(!mutex_is_locked(&fbc->lock));
-       WARN_ON(!fbc->enabled);
-       WARN_ON(fbc->active);
-
-       DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
-
-       __intel_fbc_cleanup_cfb(dev_priv);
-
-       fbc->enabled = false;
-       fbc->crtc = NULL;
-}
-
-static void __intel_fbc_post_update(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       WARN_ON(!mutex_is_locked(&fbc->lock));
-
-       if (!fbc->enabled || fbc->crtc != crtc)
-               return;
-
-       fbc->flip_pending = false;
-       WARN_ON(fbc->active);
-
-       if (!i915_modparams.enable_fbc) {
-               intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
-               __intel_fbc_disable(dev_priv);
-
-               return;
-       }
-
-       intel_fbc_get_reg_params(crtc, &fbc->params);
-
-       if (!intel_fbc_can_activate(crtc))
-               return;
-
-       if (!fbc->busy_bits) {
-               intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
-               intel_fbc_hw_activate(dev_priv);
-       } else
-               intel_fbc_deactivate(dev_priv, "frontbuffer write");
-}
-
-void intel_fbc_post_update(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       if (!fbc_supported(dev_priv))
-               return;
-
-       mutex_lock(&fbc->lock);
-       __intel_fbc_post_update(crtc);
-       mutex_unlock(&fbc->lock);
-}
-
-static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
-{
-       if (fbc->enabled)
-               return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
-       else
-               return fbc->possible_framebuffer_bits;
-}
-
-void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
-                         unsigned int frontbuffer_bits,
-                         enum fb_op_origin origin)
-{
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       if (!fbc_supported(dev_priv))
-               return;
-
-       if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
-               return;
-
-       mutex_lock(&fbc->lock);
-
-       fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
-
-       if (fbc->enabled && fbc->busy_bits)
-               intel_fbc_deactivate(dev_priv, "frontbuffer write");
-
-       mutex_unlock(&fbc->lock);
-}
-
-void intel_fbc_flush(struct drm_i915_private *dev_priv,
-                    unsigned int frontbuffer_bits, enum fb_op_origin origin)
-{
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       if (!fbc_supported(dev_priv))
-               return;
-
-       mutex_lock(&fbc->lock);
-
-       fbc->busy_bits &= ~frontbuffer_bits;
-
-       if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
-               goto out;
-
-       if (!fbc->busy_bits && fbc->enabled &&
-           (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
-               if (fbc->active)
-                       intel_fbc_recompress(dev_priv);
-               else if (!fbc->flip_pending)
-                       __intel_fbc_post_update(fbc->crtc);
-       }
-
-out:
-       mutex_unlock(&fbc->lock);
-}
-
-/**
- * intel_fbc_choose_crtc - select a CRTC to enable FBC on
- * @dev_priv: i915 device instance
- * @state: the atomic state structure
- *
- * This function looks at the proposed state for CRTCs and planes, then chooses
- * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
- * true.
- *
- * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
- * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
- */
-void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
-                          struct intel_atomic_state *state)
-{
-       struct intel_fbc *fbc = &dev_priv->fbc;
-       struct intel_plane *plane;
-       struct intel_plane_state *plane_state;
-       bool crtc_chosen = false;
-       int i;
-
-       mutex_lock(&fbc->lock);
-
-       /* Does this atomic commit involve the CRTC currently tied to FBC? */
-       if (fbc->crtc &&
-           !intel_atomic_get_new_crtc_state(state, fbc->crtc))
-               goto out;
-
-       if (!intel_fbc_can_enable(dev_priv))
-               goto out;
-
-       /* Simply choose the first CRTC that is compatible and has a visible
-        * plane. We could go for fancier schemes such as checking the plane
-        * size, but this would just affect the few platforms that don't tie FBC
-        * to pipe or plane A. */
-       for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
-               struct intel_crtc_state *crtc_state;
-               struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
-
-               if (!plane->has_fbc)
-                       continue;
-
-               if (!plane_state->base.visible)
-                       continue;
-
-               crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
-
-               crtc_state->enable_fbc = true;
-               crtc_chosen = true;
-               break;
-       }
-
-       if (!crtc_chosen)
-               fbc->no_fbc_reason = "no suitable CRTC for FBC";
-
-out:
-       mutex_unlock(&fbc->lock);
-}
-
-/**
- * intel_fbc_enable: tries to enable FBC on the CRTC
- * @crtc: the CRTC
- * @crtc_state: corresponding &drm_crtc_state for @crtc
- * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
- *
- * This function checks if the given CRTC was chosen for FBC, then enables it if
- * possible. Notice that it doesn't activate FBC. It is valid to call
- * intel_fbc_enable multiple times for the same pipe without an
- * intel_fbc_disable in the middle, as long as it is deactivated.
- */
-void intel_fbc_enable(struct intel_crtc *crtc,
-                     struct intel_crtc_state *crtc_state,
-                     struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       if (!fbc_supported(dev_priv))
-               return;
-
-       mutex_lock(&fbc->lock);
-
-       if (fbc->enabled) {
-               WARN_ON(fbc->crtc == NULL);
-               if (fbc->crtc == crtc) {
-                       WARN_ON(!crtc_state->enable_fbc);
-                       WARN_ON(fbc->active);
-               }
-               goto out;
-       }
-
-       if (!crtc_state->enable_fbc)
-               goto out;
-
-       WARN_ON(fbc->active);
-       WARN_ON(fbc->crtc != NULL);
-
-       intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
-       if (intel_fbc_alloc_cfb(crtc)) {
-               fbc->no_fbc_reason = "not enough stolen memory";
-               goto out;
-       }
-
-       DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
-       fbc->no_fbc_reason = "FBC enabled but not active yet\n";
-
-       fbc->enabled = true;
-       fbc->crtc = crtc;
-out:
-       mutex_unlock(&fbc->lock);
-}
-
-/**
- * intel_fbc_disable - disable FBC if it's associated with crtc
- * @crtc: the CRTC
- *
- * This function disables FBC if it's associated with the provided CRTC.
- */
-void intel_fbc_disable(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       if (!fbc_supported(dev_priv))
-               return;
-
-       mutex_lock(&fbc->lock);
-       if (fbc->crtc == crtc)
-               __intel_fbc_disable(dev_priv);
-       mutex_unlock(&fbc->lock);
-}
-
-/**
- * intel_fbc_global_disable - globally disable FBC
- * @dev_priv: i915 device instance
- *
- * This function disables FBC regardless of which CRTC is associated with it.
- */
-void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       if (!fbc_supported(dev_priv))
-               return;
-
-       mutex_lock(&fbc->lock);
-       if (fbc->enabled) {
-               WARN_ON(fbc->crtc->active);
-               __intel_fbc_disable(dev_priv);
-       }
-       mutex_unlock(&fbc->lock);
-}
-
-static void intel_fbc_underrun_work_fn(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private, fbc.underrun_work);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       mutex_lock(&fbc->lock);
-
-       /* Maybe we were scheduled twice. */
-       if (fbc->underrun_detected || !fbc->enabled)
-               goto out;
-
-       DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
-       fbc->underrun_detected = true;
-
-       intel_fbc_deactivate(dev_priv, "FIFO underrun");
-out:
-       mutex_unlock(&fbc->lock);
-}
-
-/*
- * intel_fbc_reset_underrun - reset FBC fifo underrun status.
- * @dev_priv: i915 device instance
- *
- * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
- * want to re-enable FBC after an underrun to increase test coverage.
- */
-int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
-{
-       int ret;
-
-       cancel_work_sync(&dev_priv->fbc.underrun_work);
-
-       ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
-       if (ret)
-               return ret;
-
-       if (dev_priv->fbc.underrun_detected) {
-               DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
-               dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
-       }
-
-       dev_priv->fbc.underrun_detected = false;
-       mutex_unlock(&dev_priv->fbc.lock);
-
-       return 0;
-}
-
-/**
- * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
- * @dev_priv: i915 device instance
- *
- * Without FBC, most underruns are harmless and don't really cause too many
- * problems, except for an annoying message on dmesg. With FBC, underruns can
- * become black screens or even worse, especially when paired with bad
- * watermarks. So in order for us to be on the safe side, completely disable FBC
- * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
- * already suggests that watermarks may be bad, so try to be as safe as
- * possible.
- *
- * This function is called from the IRQ handler.
- */
-void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       if (!fbc_supported(dev_priv))
-               return;
-
-       /* There's no guarantee that underrun_detected won't be set to true
-        * right after this check and before the work is scheduled, but that's
-        * not a problem since we'll check it again under the work function
-        * while FBC is locked. This check here is just to prevent us from
-        * unnecessarily scheduling the work, and it relies on the fact that we
-        * never switch underrun_detect back to false after it's true. */
-       if (READ_ONCE(fbc->underrun_detected))
-               return;
-
-       schedule_work(&fbc->underrun_work);
-}
-
-/**
- * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
- * @dev_priv: i915 device instance
- *
- * The FBC code needs to track CRTC visibility since the older platforms can't
- * have FBC enabled while multiple pipes are used. This function does the
- * initial setup at driver load to make sure FBC is matching the real hardware.
- */
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
-{
-       struct intel_crtc *crtc;
-
-       /* Don't even bother tracking anything if we don't need. */
-       if (!no_fbc_on_multiple_pipes(dev_priv))
-               return;
-
-       for_each_intel_crtc(&dev_priv->drm, crtc)
-               if (intel_crtc_active(crtc) &&
-                   crtc->base.primary->state->visible)
-                       dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
-}
-
-/*
- * The DDX driver changes its behavior depending on the value it reads from
- * i915.enable_fbc, so sanitize it by translating the default value into either
- * 0 or 1 in order to allow it to know what's going on.
- *
- * Notice that this is done at driver initialization and we still allow user
- * space to change the value during runtime without sanitizing it again. IGT
- * relies on being able to change i915.enable_fbc at runtime.
- */
-static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
-{
-       if (i915_modparams.enable_fbc >= 0)
-               return !!i915_modparams.enable_fbc;
-
-       if (!HAS_FBC(dev_priv))
-               return 0;
-
-       /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
-       if (IS_GEMINILAKE(dev_priv))
-               return 0;
-
-       if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
-               return 1;
-
-       return 0;
-}
-
-static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
-{
-       /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
-       if (intel_vtd_active() &&
-           (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
-               DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
-               return true;
-       }
-
-       return false;
-}
-
-/**
- * intel_fbc_init - Initialize FBC
- * @dev_priv: the i915 device
- *
- * This function might be called during PM init process.
- */
-void intel_fbc_init(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbc *fbc = &dev_priv->fbc;
-
-       INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
-       mutex_init(&fbc->lock);
-       fbc->enabled = false;
-       fbc->active = false;
-
-       if (need_fbc_vtd_wa(dev_priv))
-               mkwrite_device_info(dev_priv)->display.has_fbc = false;
-
-       i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
-       DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
-                     i915_modparams.enable_fbc);
-
-       if (!HAS_FBC(dev_priv)) {
-               fbc->no_fbc_reason = "unsupported by this chipset";
-               return;
-       }
-
-       /* This value was pulled out of someone's hat */
-       if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
-               I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
-
-       /* We still don't have any sort of hardware state readout for FBC, so
-        * deactivate it in case the BIOS activated it to make sure software
-        * matches the hardware state. */
-       if (intel_fbc_hw_is_active(dev_priv))
-               intel_fbc_hw_deactivate(dev_priv);
-}
diff --git a/drivers/gpu/drm/i915/intel_fbc.h b/drivers/gpu/drm/i915/intel_fbc.h
deleted file mode 100644 (file)
index 50272ed..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_FBC_H__
-#define __INTEL_FBC_H__
-
-#include <linux/types.h>
-
-#include "intel_frontbuffer.h"
-
-struct drm_i915_private;
-struct intel_atomic_state;
-struct intel_crtc;
-struct intel_crtc_state;
-struct intel_plane_state;
-
-void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
-                          struct intel_atomic_state *state);
-bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_pre_update(struct intel_crtc *crtc,
-                         struct intel_crtc_state *crtc_state,
-                         struct intel_plane_state *plane_state);
-void intel_fbc_post_update(struct intel_crtc *crtc);
-void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
-void intel_fbc_enable(struct intel_crtc *crtc,
-                     struct intel_crtc_state *crtc_state,
-                     struct intel_plane_state *plane_state);
-void intel_fbc_disable(struct intel_crtc *crtc);
-void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
-void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
-                         unsigned int frontbuffer_bits,
-                         enum fb_op_origin origin);
-void intel_fbc_flush(struct drm_i915_private *dev_priv,
-                    unsigned int frontbuffer_bits, enum fb_op_origin origin);
-void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
-void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
-int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
-
-#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
deleted file mode 100644 (file)
index 1edd44e..0000000
+++ /dev/null
@@ -1,640 +0,0 @@
-/*
- * Copyright © 2007 David Airlie
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *     David Airlie
- */
-
-#include <linux/async.h>
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/sysrq.h>
-#include <linux/tty.h>
-#include <linux/vga_switcheroo.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
-
-#include "i915_drv.h"
-#include "intel_drv.h"
-#include "intel_fbdev.h"
-#include "intel_frontbuffer.h"
-
-static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
-{
-       struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
-       unsigned int origin =
-               ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
-
-       intel_fb_obj_invalidate(obj, origin);
-}
-
-static int intel_fbdev_set_par(struct fb_info *info)
-{
-       struct drm_fb_helper *fb_helper = info->par;
-       struct intel_fbdev *ifbdev =
-               container_of(fb_helper, struct intel_fbdev, helper);
-       int ret;
-
-       ret = drm_fb_helper_set_par(info);
-       if (ret == 0)
-               intel_fbdev_invalidate(ifbdev);
-
-       return ret;
-}
-
-static int intel_fbdev_blank(int blank, struct fb_info *info)
-{
-       struct drm_fb_helper *fb_helper = info->par;
-       struct intel_fbdev *ifbdev =
-               container_of(fb_helper, struct intel_fbdev, helper);
-       int ret;
-
-       ret = drm_fb_helper_blank(blank, info);
-       if (ret == 0)
-               intel_fbdev_invalidate(ifbdev);
-
-       return ret;
-}
-
-static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
-                                  struct fb_info *info)
-{
-       struct drm_fb_helper *fb_helper = info->par;
-       struct intel_fbdev *ifbdev =
-               container_of(fb_helper, struct intel_fbdev, helper);
-       int ret;
-
-       ret = drm_fb_helper_pan_display(var, info);
-       if (ret == 0)
-               intel_fbdev_invalidate(ifbdev);
-
-       return ret;
-}
-
-static struct fb_ops intelfb_ops = {
-       .owner = THIS_MODULE,
-       DRM_FB_HELPER_DEFAULT_OPS,
-       .fb_set_par = intel_fbdev_set_par,
-       .fb_fillrect = drm_fb_helper_cfb_fillrect,
-       .fb_copyarea = drm_fb_helper_cfb_copyarea,
-       .fb_imageblit = drm_fb_helper_cfb_imageblit,
-       .fb_pan_display = intel_fbdev_pan_display,
-       .fb_blank = intel_fbdev_blank,
-};
-
-static int intelfb_alloc(struct drm_fb_helper *helper,
-                        struct drm_fb_helper_surface_size *sizes)
-{
-       struct intel_fbdev *ifbdev =
-               container_of(helper, struct intel_fbdev, helper);
-       struct drm_framebuffer *fb;
-       struct drm_device *dev = helper->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_mode_fb_cmd2 mode_cmd = {};
-       struct drm_i915_gem_object *obj;
-       int size, ret;
-
-       /* we don't do packed 24bpp */
-       if (sizes->surface_bpp == 24)
-               sizes->surface_bpp = 32;
-
-       mode_cmd.width = sizes->surface_width;
-       mode_cmd.height = sizes->surface_height;
-
-       mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
-                                   DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
-       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
-                                                         sizes->surface_depth);
-
-       size = mode_cmd.pitches[0] * mode_cmd.height;
-       size = PAGE_ALIGN(size);
-
-       /* If the FB is too big, just don't use it since fbdev is not very
-        * important and we should probably use that space with FBC or other
-        * features. */
-       obj = NULL;
-       if (size * 2 < dev_priv->stolen_usable_size)
-               obj = i915_gem_object_create_stolen(dev_priv, size);
-       if (obj == NULL)
-               obj = i915_gem_object_create_shmem(dev_priv, size);
-       if (IS_ERR(obj)) {
-               DRM_ERROR("failed to allocate framebuffer\n");
-               ret = PTR_ERR(obj);
-               goto err;
-       }
-
-       fb = intel_framebuffer_create(obj, &mode_cmd);
-       if (IS_ERR(fb)) {
-               ret = PTR_ERR(fb);
-               goto err_obj;
-       }
-
-       ifbdev->fb = to_intel_framebuffer(fb);
-
-       return 0;
-
-err_obj:
-       i915_gem_object_put(obj);
-err:
-       return ret;
-}
-
-static int intelfb_create(struct drm_fb_helper *helper,
-                         struct drm_fb_helper_surface_size *sizes)
-{
-       struct intel_fbdev *ifbdev =
-               container_of(helper, struct intel_fbdev, helper);
-       struct intel_framebuffer *intel_fb = ifbdev->fb;
-       struct drm_device *dev = helper->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       const struct i915_ggtt_view view = {
-               .type = I915_GGTT_VIEW_NORMAL,
-       };
-       struct drm_framebuffer *fb;
-       intel_wakeref_t wakeref;
-       struct fb_info *info;
-       struct i915_vma *vma;
-       unsigned long flags = 0;
-       bool prealloc = false;
-       void __iomem *vaddr;
-       int ret;
-
-       if (intel_fb &&
-           (sizes->fb_width > intel_fb->base.width ||
-            sizes->fb_height > intel_fb->base.height)) {
-               DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
-                             " releasing it\n",
-                             intel_fb->base.width, intel_fb->base.height,
-                             sizes->fb_width, sizes->fb_height);
-               drm_framebuffer_put(&intel_fb->base);
-               intel_fb = ifbdev->fb = NULL;
-       }
-       if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
-               DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
-               ret = intelfb_alloc(helper, sizes);
-               if (ret)
-                       return ret;
-               intel_fb = ifbdev->fb;
-       } else {
-               DRM_DEBUG_KMS("re-using BIOS fb\n");
-               prealloc = true;
-               sizes->fb_width = intel_fb->base.width;
-               sizes->fb_height = intel_fb->base.height;
-       }
-
-       mutex_lock(&dev->struct_mutex);
-       wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
-       /* Pin the GGTT vma for our access via info->screen_base.
-        * This also validates that any existing fb inherited from the
-        * BIOS is suitable for own access.
-        */
-       vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
-                                        &view, false, &flags);
-       if (IS_ERR(vma)) {
-               ret = PTR_ERR(vma);
-               goto out_unlock;
-       }
-
-       fb = &ifbdev->fb->base;
-       intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);
-
-       info = drm_fb_helper_alloc_fbi(helper);
-       if (IS_ERR(info)) {
-               DRM_ERROR("Failed to allocate fb_info\n");
-               ret = PTR_ERR(info);
-               goto out_unpin;
-       }
-
-       ifbdev->helper.fb = fb;
-
-       info->fbops = &intelfb_ops;
-
-       /* setup aperture base/size for vesafb takeover */
-       info->apertures->ranges[0].base = dev->mode_config.fb_base;
-       info->apertures->ranges[0].size = ggtt->mappable_end;
-
-       info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma);
-       info->fix.smem_len = vma->node.size;
-
-       vaddr = i915_vma_pin_iomap(vma);
-       if (IS_ERR(vaddr)) {
-               DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
-               ret = PTR_ERR(vaddr);
-               goto out_unpin;
-       }
-       info->screen_base = vaddr;
-       info->screen_size = vma->node.size;
-
-       drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
-
-       /* If the object is shmemfs backed, it will have given us zeroed pages.
-        * If the object is stolen however, it will be full of whatever
-        * garbage was left in there.
-        */
-       if (intel_fb_obj(fb)->stolen && !prealloc)
-               memset_io(info->screen_base, 0, info->screen_size);
-
-       /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
-       DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
-                     fb->width, fb->height, i915_ggtt_offset(vma));
-       ifbdev->vma = vma;
-       ifbdev->vma_flags = flags;
-
-       intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-       mutex_unlock(&dev->struct_mutex);
-       vga_switcheroo_client_fb_set(pdev, info);
-       return 0;
-
-out_unpin:
-       intel_unpin_fb_vma(vma, flags);
-out_unlock:
-       intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
-}
-
-static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
-       .fb_probe = intelfb_create,
-};
-
-static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
-{
-       /* We rely on the object-free to release the VMA pinning for
-        * the info->screen_base mmaping. Leaking the VMA is simpler than
-        * trying to rectify all the possible error paths leading here.
-        */
-
-       drm_fb_helper_fini(&ifbdev->helper);
-
-       if (ifbdev->vma) {
-               mutex_lock(&ifbdev->helper.dev->struct_mutex);
-               intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
-               mutex_unlock(&ifbdev->helper.dev->struct_mutex);
-       }
-
-       if (ifbdev->fb)
-               drm_framebuffer_remove(&ifbdev->fb->base);
-
-       kfree(ifbdev);
-}
-
-/*
- * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
- * The core display code will have read out the current plane configuration,
- * so we use that to figure out if there's an object for us to use as the
- * fb, and if so, we re-use it for the fbdev configuration.
- *
- * Note we only support a single fb shared across pipes for boot (mostly for
- * fbcon), so we just find the biggest and use that.
- */
-static bool intel_fbdev_init_bios(struct drm_device *dev,
-                                struct intel_fbdev *ifbdev)
-{
-       struct intel_framebuffer *fb = NULL;
-       struct drm_crtc *crtc;
-       struct intel_crtc *intel_crtc;
-       unsigned int max_size = 0;
-
-       /* Find the largest fb */
-       for_each_crtc(dev, crtc) {
-               struct drm_i915_gem_object *obj =
-                       intel_fb_obj(crtc->primary->state->fb);
-               intel_crtc = to_intel_crtc(crtc);
-
-               if (!crtc->state->active || !obj) {
-                       DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
-                                     pipe_name(intel_crtc->pipe));
-                       continue;
-               }
-
-               if (obj->base.size > max_size) {
-                       DRM_DEBUG_KMS("found possible fb from plane %c\n",
-                                     pipe_name(intel_crtc->pipe));
-                       fb = to_intel_framebuffer(crtc->primary->state->fb);
-                       max_size = obj->base.size;
-               }
-       }
-
-       if (!fb) {
-               DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
-               goto out;
-       }
-
-       /* Now make sure all the pipes will fit into it */
-       for_each_crtc(dev, crtc) {
-               unsigned int cur_size;
-
-               intel_crtc = to_intel_crtc(crtc);
-
-               if (!crtc->state->active) {
-                       DRM_DEBUG_KMS("pipe %c not active, skipping\n",
-                                     pipe_name(intel_crtc->pipe));
-                       continue;
-               }
-
-               DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
-                             pipe_name(intel_crtc->pipe));
-
-               /*
-                * See if the plane fb we found above will fit on this
-                * pipe.  Note we need to use the selected fb's pitch and bpp
-                * rather than the current pipe's, since they differ.
-                */
-               cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
-               cur_size = cur_size * fb->base.format->cpp[0];
-               if (fb->base.pitches[0] < cur_size) {
-                       DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
-                                     pipe_name(intel_crtc->pipe),
-                                     cur_size, fb->base.pitches[0]);
-                       fb = NULL;
-                       break;
-               }
-
-               cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
-               cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
-               cur_size *= fb->base.pitches[0];
-               DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
-                             pipe_name(intel_crtc->pipe),
-                             crtc->state->adjusted_mode.crtc_hdisplay,
-                             crtc->state->adjusted_mode.crtc_vdisplay,
-                             fb->base.format->cpp[0] * 8,
-                             cur_size);
-
-               if (cur_size > max_size) {
-                       DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
-                                     pipe_name(intel_crtc->pipe),
-                                     cur_size, max_size);
-                       fb = NULL;
-                       break;
-               }
-
-               DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
-                             pipe_name(intel_crtc->pipe),
-                             max_size, cur_size);
-       }
-
-       if (!fb) {
-               DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
-               goto out;
-       }
-
-       ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
-       ifbdev->fb = fb;
-
-       drm_framebuffer_get(&ifbdev->fb->base);
-
-       /* Final pass to check if any active pipes don't have fbs */
-       for_each_crtc(dev, crtc) {
-               intel_crtc = to_intel_crtc(crtc);
-
-               if (!crtc->state->active)
-                       continue;
-
-               WARN(!crtc->primary->state->fb,
-                    "re-used BIOS config but lost an fb on crtc %d\n",
-                    crtc->base.id);
-       }
-
-
-       DRM_DEBUG_KMS("using BIOS fb for initial console\n");
-       return true;
-
-out:
-
-       return false;
-}
-
-static void intel_fbdev_suspend_worker(struct work_struct *work)
-{
-       intel_fbdev_set_suspend(&container_of(work,
-                                             struct drm_i915_private,
-                                             fbdev_suspend_work)->drm,
-                               FBINFO_STATE_RUNNING,
-                               true);
-}
-
-int intel_fbdev_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_fbdev *ifbdev;
-       int ret;
-
-       if (WARN_ON(!HAS_DISPLAY(dev_priv)))
-               return -ENODEV;
-
-       ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
-       if (ifbdev == NULL)
-               return -ENOMEM;
-
-       mutex_init(&ifbdev->hpd_lock);
-       drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
-
-       if (!intel_fbdev_init_bios(dev, ifbdev))
-               ifbdev->preferred_bpp = 32;
-
-       ret = drm_fb_helper_init(dev, &ifbdev->helper, 4);
-       if (ret) {
-               kfree(ifbdev);
-               return ret;
-       }
-
-       dev_priv->fbdev = ifbdev;
-       INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
-
-       drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
-
-       return 0;
-}
-
-static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
-{
-       struct intel_fbdev *ifbdev = data;
-
-       /* Due to peculiar init order wrt to hpd handling this is separate. */
-       if (drm_fb_helper_initial_config(&ifbdev->helper,
-                                        ifbdev->preferred_bpp))
-               intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
-}
-
-void intel_fbdev_initial_config_async(struct drm_device *dev)
-{
-       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
-
-       if (!ifbdev)
-               return;
-
-       ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
-}
-
-static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
-{
-       if (!ifbdev->cookie)
-               return;
-
-       /* Only serialises with all preceding async calls, hence +1 */
-       async_synchronize_cookie(ifbdev->cookie + 1);
-       ifbdev->cookie = 0;
-}
-
-void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbdev *ifbdev = dev_priv->fbdev;
-
-       if (!ifbdev)
-               return;
-
-       cancel_work_sync(&dev_priv->fbdev_suspend_work);
-       if (!current_is_async())
-               intel_fbdev_sync(ifbdev);
-
-       drm_fb_helper_unregister_fbi(&ifbdev->helper);
-}
-
-void intel_fbdev_fini(struct drm_i915_private *dev_priv)
-{
-       struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->fbdev);
-
-       if (!ifbdev)
-               return;
-
-       intel_fbdev_destroy(ifbdev);
-}
-
-/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
- * processing, fbdev will perform a full connector reprobe if a hotplug event
- * was received while HPD was suspended.
- */
-static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
-{
-       bool send_hpd = false;
-
-       mutex_lock(&ifbdev->hpd_lock);
-       ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
-       send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
-       ifbdev->hpd_waiting = false;
-       mutex_unlock(&ifbdev->hpd_lock);
-
-       if (send_hpd) {
-               DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
-               drm_fb_helper_hotplug_event(&ifbdev->helper);
-       }
-}
-
-void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_fbdev *ifbdev = dev_priv->fbdev;
-       struct fb_info *info;
-
-       if (!ifbdev || !ifbdev->vma)
-               return;
-
-       info = ifbdev->helper.fbdev;
-
-       if (synchronous) {
-               /* Flush any pending work to turn the console on, and then
-                * wait to turn it off. It must be synchronous as we are
-                * about to suspend or unload the driver.
-                *
-                * Note that from within the work-handler, we cannot flush
-                * ourselves, so only flush outstanding work upon suspend!
-                */
-               if (state != FBINFO_STATE_RUNNING)
-                       flush_work(&dev_priv->fbdev_suspend_work);
-
-               console_lock();
-       } else {
-               /*
-                * The console lock can be pretty contented on resume due
-                * to all the printk activity.  Try to keep it out of the hot
-                * path of resume if possible.
-                */
-               WARN_ON(state != FBINFO_STATE_RUNNING);
-               if (!console_trylock()) {
-                       /* Don't block our own workqueue as this can
-                        * be run in parallel with other i915.ko tasks.
-                        */
-                       schedule_work(&dev_priv->fbdev_suspend_work);
-                       return;
-               }
-       }
-
-       /* On resume from hibernation: If the object is shmemfs backed, it has
-        * been restored from swap. If the object is stolen however, it will be
-        * full of whatever garbage was left in there.
-        */
-       if (state == FBINFO_STATE_RUNNING &&
-           intel_fb_obj(&ifbdev->fb->base)->stolen)
-               memset_io(info->screen_base, 0, info->screen_size);
-
-       drm_fb_helper_set_suspend(&ifbdev->helper, state);
-       console_unlock();
-
-       intel_fbdev_hpd_set_suspend(ifbdev, state);
-}
-
-void intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
-       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
-       bool send_hpd;
-
-       if (!ifbdev)
-               return;
-
-       intel_fbdev_sync(ifbdev);
-
-       mutex_lock(&ifbdev->hpd_lock);
-       send_hpd = !ifbdev->hpd_suspended;
-       ifbdev->hpd_waiting = true;
-       mutex_unlock(&ifbdev->hpd_lock);
-
-       if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
-               drm_fb_helper_hotplug_event(&ifbdev->helper);
-}
-
-void intel_fbdev_restore_mode(struct drm_device *dev)
-{
-       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
-
-       if (!ifbdev)
-               return;
-
-       intel_fbdev_sync(ifbdev);
-       if (!ifbdev->vma)
-               return;
-
-       if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
-               intel_fbdev_invalidate(ifbdev);
-}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.h b/drivers/gpu/drm/i915/intel_fbdev.h
deleted file mode 100644 (file)
index de7c842..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_FBDEV_H__
-#define __INTEL_FBDEV_H__
-
-#include <linux/types.h>
-
-struct drm_device;
-struct drm_i915_private;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-int intel_fbdev_init(struct drm_device *dev);
-void intel_fbdev_initial_config_async(struct drm_device *dev);
-void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
-void intel_fbdev_fini(struct drm_i915_private *dev_priv);
-void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
-void intel_fbdev_output_poll_changed(struct drm_device *dev);
-void intel_fbdev_restore_mode(struct drm_device *dev);
-#else
-static inline int intel_fbdev_init(struct drm_device *dev)
-{
-       return 0;
-}
-
-static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
-{
-}
-
-static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
-{
-}
-
-static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
-}
-
-static inline void intel_fbdev_restore_mode(struct drm_device *dev)
-{
-}
-#endif
-
-#endif /* __INTEL_FBDEV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
deleted file mode 100644 (file)
index 8545ad3..0000000
+++ /dev/null
@@ -1,458 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Daniel Vetter <daniel.vetter@ffwll.ch>
- *
- */
-
-#include "i915_drv.h"
-#include "intel_drv.h"
-#include "intel_fbc.h"
-#include "intel_fifo_underrun.h"
-
-/**
- * DOC: fifo underrun handling
- *
- * The i915 driver checks for display fifo underruns using the interrupt signals
- * provided by the hardware. This is enabled by default and fairly useful to
- * debug display issues, especially watermark settings.
- *
- * If an underrun is detected this is logged into dmesg. To avoid flooding logs
- * and occupying the cpu underrun interrupts are disabled after the first
- * occurrence until the next modeset on a given pipe.
- *
- * Note that underrun detection on gmch platforms is a bit more ugly since there
- * is no interrupt (despite that the signalling bit is in the PIPESTAT pipe
- * interrupt register). Also on some other platforms underrun interrupts are
- * shared, which means that if we detect an underrun we need to disable underrun
- * reporting on all pipes.
- *
- * The code also supports underrun detection on the PCH transcoder.
- */
-
-static bool ivb_can_enable_err_int(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc;
-       enum pipe pipe;
-
-       lockdep_assert_held(&dev_priv->irq_lock);
-
-       for_each_pipe(dev_priv, pipe) {
-               crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
-               if (crtc->cpu_fifo_underrun_disabled)
-                       return false;
-       }
-
-       return true;
-}
-
-static bool cpt_can_enable_serr_int(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum pipe pipe;
-       struct intel_crtc *crtc;
-
-       lockdep_assert_held(&dev_priv->irq_lock);
-
-       for_each_pipe(dev_priv, pipe) {
-               crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
-               if (crtc->pch_fifo_underrun_disabled)
-                       return false;
-       }
-
-       return true;
-}
-
-static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       i915_reg_t reg = PIPESTAT(crtc->pipe);
-       u32 enable_mask;
-
-       lockdep_assert_held(&dev_priv->irq_lock);
-
-       if ((I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
-               return;
-
-       enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe);
-       I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
-       POSTING_READ(reg);
-
-       trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
-       DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
-}
-
-static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
-                                            enum pipe pipe,
-                                            bool enable, bool old)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       i915_reg_t reg = PIPESTAT(pipe);
-
-       lockdep_assert_held(&dev_priv->irq_lock);
-
-       if (enable) {
-               u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
-
-               I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
-               POSTING_READ(reg);
-       } else {
-               if (old && I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS)
-                       DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
-       }
-}
-
-static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
-                                                enum pipe pipe, bool enable)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       u32 bit = (pipe == PIPE_A) ?
-               DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
-
-       if (enable)
-               ilk_enable_display_irq(dev_priv, bit);
-       else
-               ilk_disable_display_irq(dev_priv, bit);
-}
-
-static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pipe = crtc->pipe;
-       u32 err_int = I915_READ(GEN7_ERR_INT);
-
-       lockdep_assert_held(&dev_priv->irq_lock);
-
-       if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
-               return;
-
-       I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
-       POSTING_READ(GEN7_ERR_INT);
-
-       trace_intel_cpu_fifo_underrun(dev_priv, pipe);
-       DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
-}
-
-static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
-                                                 enum pipe pipe,
-                                                 bool enable, bool old)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       if (enable) {
-               I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
-
-               if (!ivb_can_enable_err_int(dev))
-                       return;
-
-               ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-       } else {
-               ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
-
-               if (old &&
-                   I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
-                       DRM_ERROR("uncleared fifo underrun on pipe %c\n",
-                                 pipe_name(pipe));
-               }
-       }
-}
-
-static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
-                                                 enum pipe pipe, bool enable)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       if (enable)
-               bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
-       else
-               bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
-}
-
-static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
-                                           enum pipe pch_transcoder,
-                                           bool enable)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       u32 bit = (pch_transcoder == PIPE_A) ?
-               SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
-
-       if (enable)
-               ibx_enable_display_interrupt(dev_priv, bit);
-       else
-               ibx_disable_display_interrupt(dev_priv, bit);
-}
-
-static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum pipe pch_transcoder = crtc->pipe;
-       u32 serr_int = I915_READ(SERR_INT);
-
-       lockdep_assert_held(&dev_priv->irq_lock);
-
-       if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
-               return;
-
-       I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
-       POSTING_READ(SERR_INT);
-
-       trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
-       DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
-                 pipe_name(pch_transcoder));
-}
-
-static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
-                                           enum pipe pch_transcoder,
-                                           bool enable, bool old)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       if (enable) {
-               I915_WRITE(SERR_INT,
-                          SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
-
-               if (!cpt_can_enable_serr_int(dev))
-                       return;
-
-               ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
-       } else {
-               ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
-
-               if (old && I915_READ(SERR_INT) &
-                   SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
-                       DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
-                                 pipe_name(pch_transcoder));
-               }
-       }
-}
-
-static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
-                                                   enum pipe pipe, bool enable)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-       bool old;
-
-       lockdep_assert_held(&dev_priv->irq_lock);
-
-       old = !crtc->cpu_fifo_underrun_disabled;
-       crtc->cpu_fifo_underrun_disabled = !enable;
-
-       if (HAS_GMCH(dev_priv))
-               i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
-       else if (IS_GEN_RANGE(dev_priv, 5, 6))
-               ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
-       else if (IS_GEN(dev_priv, 7))
-               ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
-       else if (INTEL_GEN(dev_priv) >= 8)
-               broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
-
-       return old;
-}
-
-/**
- * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
- * @dev_priv: i915 device instance
- * @pipe: (CPU) pipe to set state for
- * @enable: whether underruns should be reported or not
- *
- * This function sets the fifo underrun state for @pipe. It is used in the
- * modeset code to avoid false positives since on many platforms underruns are
- * expected when disabling or enabling the pipe.
- *
- * Notice that on some platforms disabling underrun reports for one pipe
- * disables for all due to shared interrupts. Actual reporting is still per-pipe
- * though.
- *
- * Returns the previous state of underrun reporting.
- */
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
-                                          enum pipe pipe, bool enable)
-{
-       unsigned long flags;
-       bool ret;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
-                                                     enable);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return ret;
-}
-
-/**
- * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
- * @dev_priv: i915 device instance
- * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
- * @enable: whether underruns should be reported or not
- *
- * This function makes us disable or enable PCH fifo underruns for a specific
- * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
- * underrun reporting for one transcoder may also disable all the other PCH
- * error interruts for the other transcoders, due to the fact that there's just
- * one interrupt mask/enable bit for all the transcoders.
- *
- * Returns the previous state of underrun reporting.
- */
-bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
-                                          enum pipe pch_transcoder,
-                                          bool enable)
-{
-       struct intel_crtc *crtc =
-               intel_get_crtc_for_pipe(dev_priv, pch_transcoder);
-       unsigned long flags;
-       bool old;
-
-       /*
-        * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
-        * has only one pch transcoder A that all pipes can use. To avoid racy
-        * pch transcoder -> pipe lookups from interrupt code simply store the
-        * underrun statistics in crtc A. Since we never expose this anywhere
-        * nor use it outside of the fifo underrun code here using the "wrong"
-        * crtc on LPT won't cause issues.
-        */
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
-       old = !crtc->pch_fifo_underrun_disabled;
-       crtc->pch_fifo_underrun_disabled = !enable;
-
-       if (HAS_PCH_IBX(dev_priv))
-               ibx_set_fifo_underrun_reporting(&dev_priv->drm,
-                                               pch_transcoder,
-                                               enable);
-       else
-               cpt_set_fifo_underrun_reporting(&dev_priv->drm,
-                                               pch_transcoder,
-                                               enable, old);
-
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-       return old;
-}
-
-/**
- * intel_cpu_fifo_underrun_irq_handler - handle CPU fifo underrun interrupt
- * @dev_priv: i915 device instance
- * @pipe: (CPU) pipe to set state for
- *
- * This handles a CPU fifo underrun interrupt, generating an underrun warning
- * into dmesg if underrun reporting is enabled and then disables the underrun
- * interrupt to avoid an irq storm.
- */
-void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
-                                        enum pipe pipe)
-{
-       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
-       /* We may be called too early in init, thanks BIOS! */
-       if (crtc == NULL)
-               return;
-
-       /* GMCH can't disable fifo underruns, filter them. */
-       if (HAS_GMCH(dev_priv) &&
-           crtc->cpu_fifo_underrun_disabled)
-               return;
-
-       if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
-               trace_intel_cpu_fifo_underrun(dev_priv, pipe);
-               DRM_ERROR("CPU pipe %c FIFO underrun\n",
-                         pipe_name(pipe));
-       }
-
-       intel_fbc_handle_fifo_underrun_irq(dev_priv);
-}
-
-/**
- * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
- * @dev_priv: i915 device instance
- * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
- *
- * This handles a PCH fifo underrun interrupt, generating an underrun warning
- * into dmesg if underrun reporting is enabled and then disables the underrun
- * interrupt to avoid an irq storm.
- */
-void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
-                                        enum pipe pch_transcoder)
-{
-       if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
-                                                 false)) {
-               trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
-               DRM_ERROR("PCH transcoder %c FIFO underrun\n",
-                         pipe_name(pch_transcoder));
-       }
-}
-
-/**
- * intel_check_cpu_fifo_underruns - check for CPU fifo underruns immediately
- * @dev_priv: i915 device instance
- *
- * Check for CPU fifo underruns immediately. Useful on IVB/HSW where the shared
- * error interrupt may have been disabled, and so CPU fifo underruns won't
- * necessarily raise an interrupt, and on GMCH platforms where underruns never
- * raise an interrupt.
- */
-void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
-{
-       struct intel_crtc *crtc;
-
-       spin_lock_irq(&dev_priv->irq_lock);
-
-       for_each_intel_crtc(&dev_priv->drm, crtc) {
-               if (crtc->cpu_fifo_underrun_disabled)
-                       continue;
-
-               if (HAS_GMCH(dev_priv))
-                       i9xx_check_fifo_underruns(crtc);
-               else if (IS_GEN(dev_priv, 7))
-                       ivybridge_check_fifo_underruns(crtc);
-       }
-
-       spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-/**
- * intel_check_pch_fifo_underruns - check for PCH fifo underruns immediately
- * @dev_priv: i915 device instance
- *
- * Check for PCH fifo underruns immediately. Useful on CPT/PPT where the shared
- * error interrupt may have been disabled, and so PCH fifo underruns won't
- * necessarily raise an interrupt.
- */
-void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
-{
-       struct intel_crtc *crtc;
-
-       spin_lock_irq(&dev_priv->irq_lock);
-
-       for_each_intel_crtc(&dev_priv->drm, crtc) {
-               if (crtc->pch_fifo_underrun_disabled)
-                       continue;
-
-               if (HAS_PCH_CPT(dev_priv))
-                       cpt_check_pch_fifo_underruns(crtc);
-       }
-
-       spin_unlock_irq(&dev_priv->irq_lock);
-}
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.h b/drivers/gpu/drm/i915/intel_fifo_underrun.h
deleted file mode 100644 (file)
index e04f22a..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_FIFO_UNDERRUN_H__
-#define __INTEL_FIFO_UNDERRUN_H__
-
-#include <linux/types.h>
-
-#include "intel_display.h"
-
-struct drm_i915_private;
-
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
-                                          enum pipe pipe, bool enable);
-bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
-                                          enum pipe pch_transcoder,
-                                          bool enable);
-void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
-                                        enum pipe pipe);
-void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
-                                        enum pipe pch_transcoder);
-void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
-void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
-
-#endif /* __INTEL_FIFO_UNDERRUN_H__ */
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
deleted file mode 100644 (file)
index 44273c1..0000000
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *     Daniel Vetter <daniel.vetter@ffwll.ch>
- */
-
-/**
- * DOC: frontbuffer tracking
- *
- * Many features require us to track changes to the currently active
- * frontbuffer, especially rendering targeted at the frontbuffer.
- *
- * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
- * frontbuffer slots through i915_gem_track_fb(). The function in this file are
- * then called when the contents of the frontbuffer are invalidated, when
- * frontbuffer rendering has stopped again to flush out all the changes and when
- * the frontbuffer is exchanged with a flip. Subsystems interested in
- * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
- * into the relevant places and filter for the frontbuffer slots that they are
- * interested int.
- *
- * On a high level there are two types of powersaving features. The first one
- * work like a special cache (FBC and PSR) and are interested when they should
- * stop caching and when to restart caching. This is done by placing callbacks
- * into the invalidate and the flush functions: At invalidate the caching must
- * be stopped and at flush time it can be restarted. And maybe they need to know
- * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
- * and flush on its own) which can be achieved with placing callbacks into the
- * flip functions.
- *
- * The other type of display power saving feature only cares about busyness
- * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
- * busyness. There is no direct way to detect idleness. Instead an idle timer
- * work delayed work should be started from the flush and flip functions and
- * cancelled as soon as busyness is detected.
- */
-
-#include "display/intel_dp.h"
-
-#include "i915_drv.h"
-#include "intel_drv.h"
-#include "intel_fbc.h"
-#include "intel_frontbuffer.h"
-#include "intel_psr.h"
-
-void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                              enum fb_op_origin origin,
-                              unsigned int frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
-       if (origin == ORIGIN_CS) {
-               spin_lock(&dev_priv->fb_tracking.lock);
-               dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
-               dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-               spin_unlock(&dev_priv->fb_tracking.lock);
-       }
-
-       might_sleep();
-       intel_psr_invalidate(dev_priv, frontbuffer_bits, origin);
-       intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
-       intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
-}
-
-/**
- * intel_frontbuffer_flush - flush frontbuffer
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- * @origin: which operation caused the flush
- *
- * This function gets called every time rendering on the given planes has
- * completed and frontbuffer caching can be started again. Flushes will get
- * delayed if they're blocked by some outstanding asynchronous rendering.
- *
- * Can be called without any locks held.
- */
-static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
-                                   unsigned frontbuffer_bits,
-                                   enum fb_op_origin origin)
-{
-       /* Delay flushing when rings are still busy.*/
-       spin_lock(&dev_priv->fb_tracking.lock);
-       frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-       spin_unlock(&dev_priv->fb_tracking.lock);
-
-       if (!frontbuffer_bits)
-               return;
-
-       might_sleep();
-       intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
-       intel_psr_flush(dev_priv, frontbuffer_bits, origin);
-       intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
-}
-
-void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-                         enum fb_op_origin origin,
-                         unsigned int frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
-       if (origin == ORIGIN_CS) {
-               spin_lock(&dev_priv->fb_tracking.lock);
-               /* Filter out new bits since rendering started. */
-               frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-               dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-               spin_unlock(&dev_priv->fb_tracking.lock);
-       }
-
-       if (frontbuffer_bits)
-               intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
-}
-
-/**
- * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. The actual
- * frontbuffer flushing will be delayed until completion is signalled with
- * intel_frontbuffer_flip_complete. If an invalidate happens in between this
- * flush will be cancelled.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
-                                   unsigned frontbuffer_bits)
-{
-       spin_lock(&dev_priv->fb_tracking.lock);
-       dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
-       /* Remove stale busy bits due to the old buffer. */
-       dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-       spin_unlock(&dev_priv->fb_tracking.lock);
-}
-
-/**
- * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after the flip has been latched and will complete
- * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
-                                    unsigned frontbuffer_bits)
-{
-       spin_lock(&dev_priv->fb_tracking.lock);
-       /* Mask any cancelled flips. */
-       frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
-       dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-       spin_unlock(&dev_priv->fb_tracking.lock);
-
-       if (frontbuffer_bits)
-               intel_frontbuffer_flush(dev_priv,
-                                       frontbuffer_bits, ORIGIN_FLIP);
-}
-
-/**
- * intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. This is for
- * synchronous plane updates which will happen on the next vblank and which will
- * not get delayed by pending gpu rendering.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
-                           unsigned frontbuffer_bits)
-{
-       spin_lock(&dev_priv->fb_tracking.lock);
-       /* Remove stale busy bits due to the old buffer. */
-       dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-       spin_unlock(&dev_priv->fb_tracking.lock);
-
-       intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
-}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
deleted file mode 100644 (file)
index 5727320..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2014-2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __INTEL_FRONTBUFFER_H__
-#define __INTEL_FRONTBUFFER_H__
-
-#include "gem/i915_gem_object.h"
-
-struct drm_i915_private;
-struct drm_i915_gem_object;
-
-enum fb_op_origin {
-       ORIGIN_GTT,
-       ORIGIN_CPU,
-       ORIGIN_CS,
-       ORIGIN_FLIP,
-       ORIGIN_DIRTYFB,
-};
-
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
-                                   unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
-                                    unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
-                           unsigned frontbuffer_bits);
-
-void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                              enum fb_op_origin origin,
-                              unsigned int frontbuffer_bits);
-void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-                         enum fb_op_origin origin,
-                         unsigned int frontbuffer_bits);
-
-/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
- * @origin: which operation caused the invalidation
- *
- * This function gets called every time rendering on the given object starts and
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
- * until the rendering completes or a flip on this frontbuffer plane is
- * scheduled.
- */
-static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                                          enum fb_op_origin origin)
-{
-       unsigned int frontbuffer_bits;
-
-       frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
-       if (!frontbuffer_bits)
-               return false;
-
-       __intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
-       return true;
-}
-
-/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
- * @origin: which operation caused the flush
- *
- * This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again.
- */
-static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-                                     enum fb_op_origin origin)
-{
-       unsigned int frontbuffer_bits;
-
-       frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
-       if (!frontbuffer_bits)
-               return;
-
-       __intel_fb_obj_flush(obj, origin, frontbuffer_bits);
-}
-
-#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
deleted file mode 100644 (file)
index bc3a94d..0000000
+++ /dev/null
@@ -1,1977 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright (C) 2017 Google, Inc.
- *
- * Authors:
- * Sean Paul <seanpaul@chromium.org>
- */
-
-#include <linux/component.h>
-#include <linux/i2c.h>
-#include <linux/random.h>
-
-#include <drm/drm_hdcp.h>
-#include <drm/i915_component.h>
-
-#include "i915_reg.h"
-#include "intel_drv.h"
-#include "intel_hdcp.h"
-#include "intel_sideband.h"
-
-#define KEY_LOAD_TRIES 5
-#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS       50
-#define HDCP2_LC_RETRY_CNT                     3
-
-static
-bool intel_hdcp_is_ksv_valid(u8 *ksv)
-{
-       int i, ones = 0;
-       /* KSV has 20 1's and 20 0's */
-       for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
-               ones += hweight8(ksv[i]);
-       if (ones != 20)
-               return false;
-
-       return true;
-}
-
-static
-int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
-                              const struct intel_hdcp_shim *shim, u8 *bksv)
-{
-       int ret, i, tries = 2;
-
-       /* HDCP spec states that we must retry the bksv if it is invalid */
-       for (i = 0; i < tries; i++) {
-               ret = shim->read_bksv(intel_dig_port, bksv);
-               if (ret)
-                       return ret;
-               if (intel_hdcp_is_ksv_valid(bksv))
-                       break;
-       }
-       if (i == tries) {
-               DRM_DEBUG_KMS("Bksv is invalid\n");
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-/* Is HDCP1.4 capable on Platform and Sink */
-bool intel_hdcp_capable(struct intel_connector *connector)
-{
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       const struct intel_hdcp_shim *shim = connector->hdcp.shim;
-       bool capable = false;
-       u8 bksv[5];
-
-       if (!shim)
-               return capable;
-
-       if (shim->hdcp_capable) {
-               shim->hdcp_capable(intel_dig_port, &capable);
-       } else {
-               if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
-                       capable = true;
-       }
-
-       return capable;
-}
-
-/* Is HDCP2.2 capable on Platform and Sink */
-bool intel_hdcp2_capable(struct intel_connector *connector)
-{
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       bool capable = false;
-
-       /* I915 support for HDCP2.2 */
-       if (!hdcp->hdcp2_supported)
-               return false;
-
-       /* MEI interface is solid */
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return false;
-       }
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       /* Sink's capability for HDCP2.2 */
-       hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
-
-       return capable;
-}
-
-static inline bool intel_hdcp_in_use(struct intel_connector *connector)
-{
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       enum port port = connector->encoder->port;
-       u32 reg;
-
-       reg = I915_READ(PORT_HDCP_STATUS(port));
-       return reg & HDCP_STATUS_ENC;
-}
-
-static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
-{
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       enum port port = connector->encoder->port;
-       u32 reg;
-
-       reg = I915_READ(HDCP2_STATUS_DDI(port));
-       return reg & LINK_ENCRYPTION_STATUS;
-}
-
-static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
-                                   const struct intel_hdcp_shim *shim)
-{
-       int ret, read_ret;
-       bool ksv_ready;
-
-       /* Poll for ksv list ready (spec says max time allowed is 5s) */
-       ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
-                                                        &ksv_ready),
-                        read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
-                        100 * 1000);
-       if (ret)
-               return ret;
-       if (read_ret)
-               return read_ret;
-       if (!ksv_ready)
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
-static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *power_well;
-       enum i915_power_well_id id;
-       bool enabled = false;
-
-       /*
-        * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
-        * On all BXT+, SW can load the keys only when the PW#1 is turned on.
-        */
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               id = HSW_DISP_PW_GLOBAL;
-       else
-               id = SKL_DISP_PW_1;
-
-       mutex_lock(&power_domains->lock);
-
-       /* PG1 (power well #1) needs to be enabled */
-       for_each_power_well(dev_priv, power_well) {
-               if (power_well->desc->id == id) {
-                       enabled = power_well->desc->ops->is_enabled(dev_priv,
-                                                                   power_well);
-                       break;
-               }
-       }
-       mutex_unlock(&power_domains->lock);
-
-       /*
-        * Another req for hdcp key loadability is enabled state of pll for
-        * cdclk. Without active crtc we wont land here. So we are assuming that
-        * cdclk is already on.
-        */
-
-       return enabled;
-}
-
-static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
-{
-       I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
-       I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
-                  HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
-}
-
-static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
-{
-       int ret;
-       u32 val;
-
-       val = I915_READ(HDCP_KEY_STATUS);
-       if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
-               return 0;
-
-       /*
-        * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
-        * out of reset. So if Key is not already loaded, its an error state.
-        */
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
-                       return -ENXIO;
-
-       /*
-        * Initiate loading the HDCP key from fuses.
-        *
-        * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
-        * platforms except BXT and GLK, differ in the key load trigger process
-        * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
-        */
-       if (IS_GEN9_BC(dev_priv)) {
-               ret = sandybridge_pcode_write(dev_priv,
-                                             SKL_PCODE_LOAD_HDCP_KEYS, 1);
-               if (ret) {
-                       DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
-                                 ret);
-                       return ret;
-               }
-       } else {
-               I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
-       }
-
-       /* Wait for the keys to load (500us) */
-       ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
-                                       HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
-                                       10, 1, &val);
-       if (ret)
-               return ret;
-       else if (!(val & HDCP_KEY_LOAD_STATUS))
-               return -ENXIO;
-
-       /* Send Aksv over to PCH display for use in authentication */
-       I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
-
-       return 0;
-}
-
-/* Returns updated SHA-1 index */
-static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
-{
-       I915_WRITE(HDCP_SHA_TEXT, sha_text);
-       if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
-                                   HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
-               DRM_ERROR("Timed out waiting for SHA1 ready\n");
-               return -ETIMEDOUT;
-       }
-       return 0;
-}
-
-static
-u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
-{
-       enum port port = intel_dig_port->base.port;
-       switch (port) {
-       case PORT_A:
-               return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
-       case PORT_B:
-               return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
-       case PORT_C:
-               return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
-       case PORT_D:
-               return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
-       case PORT_E:
-               return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
-       default:
-               break;
-       }
-       DRM_ERROR("Unknown port %d\n", port);
-       return -EINVAL;
-}
-
-static
-int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
-                               const struct intel_hdcp_shim *shim,
-                               u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
-{
-       struct drm_i915_private *dev_priv;
-       u32 vprime, sha_text, sha_leftovers, rep_ctl;
-       int ret, i, j, sha_idx;
-
-       dev_priv = intel_dig_port->base.base.dev->dev_private;
-
-       /* Process V' values from the receiver */
-       for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
-               ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
-               if (ret)
-                       return ret;
-               I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
-       }
-
-       /*
-        * We need to write the concatenation of all device KSVs, BINFO (DP) ||
-        * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
-        * stream is written via the HDCP_SHA_TEXT register in 32-bit
-        * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
-        * index will keep track of our progress through the 64 bytes as well as
-        * helping us work the 40-bit KSVs through our 32-bit register.
-        *
-        * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
-        */
-       sha_idx = 0;
-       sha_text = 0;
-       sha_leftovers = 0;
-       rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
-       I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
-       for (i = 0; i < num_downstream; i++) {
-               unsigned int sha_empty;
-               u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
-
-               /* Fill up the empty slots in sha_text and write it out */
-               sha_empty = sizeof(sha_text) - sha_leftovers;
-               for (j = 0; j < sha_empty; j++)
-                       sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
-
-               ret = intel_write_sha_text(dev_priv, sha_text);
-               if (ret < 0)
-                       return ret;
-
-               /* Programming guide writes this every 64 bytes */
-               sha_idx += sizeof(sha_text);
-               if (!(sha_idx % 64))
-                       I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
-
-               /* Store the leftover bytes from the ksv in sha_text */
-               sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
-               sha_text = 0;
-               for (j = 0; j < sha_leftovers; j++)
-                       sha_text |= ksv[sha_empty + j] <<
-                                       ((sizeof(sha_text) - j - 1) * 8);
-
-               /*
-                * If we still have room in sha_text for more data, continue.
-                * Otherwise, write it out immediately.
-                */
-               if (sizeof(sha_text) > sha_leftovers)
-                       continue;
-
-               ret = intel_write_sha_text(dev_priv, sha_text);
-               if (ret < 0)
-                       return ret;
-               sha_leftovers = 0;
-               sha_text = 0;
-               sha_idx += sizeof(sha_text);
-       }
-
-       /*
-        * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
-        * bytes are leftover from the last ksv, we might be able to fit them
-        * all in sha_text (first 2 cases), or we might need to split them up
-        * into 2 writes (last 2 cases).
-        */
-       if (sha_leftovers == 0) {
-               /* Write 16 bits of text, 16 bits of M0 */
-               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
-               ret = intel_write_sha_text(dev_priv,
-                                          bstatus[0] << 8 | bstatus[1]);
-               if (ret < 0)
-                       return ret;
-               sha_idx += sizeof(sha_text);
-
-               /* Write 32 bits of M0 */
-               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
-               ret = intel_write_sha_text(dev_priv, 0);
-               if (ret < 0)
-                       return ret;
-               sha_idx += sizeof(sha_text);
-
-               /* Write 16 bits of M0 */
-               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
-               ret = intel_write_sha_text(dev_priv, 0);
-               if (ret < 0)
-                       return ret;
-               sha_idx += sizeof(sha_text);
-
-       } else if (sha_leftovers == 1) {
-               /* Write 24 bits of text, 8 bits of M0 */
-               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
-               sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
-               /* Only 24-bits of data, must be in the LSB */
-               sha_text = (sha_text & 0xffffff00) >> 8;
-               ret = intel_write_sha_text(dev_priv, sha_text);
-               if (ret < 0)
-                       return ret;
-               sha_idx += sizeof(sha_text);
-
-               /* Write 32 bits of M0 */
-               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
-               ret = intel_write_sha_text(dev_priv, 0);
-               if (ret < 0)
-                       return ret;
-               sha_idx += sizeof(sha_text);
-
-               /* Write 24 bits of M0 */
-               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
-               ret = intel_write_sha_text(dev_priv, 0);
-               if (ret < 0)
-                       return ret;
-               sha_idx += sizeof(sha_text);
-
-       } else if (sha_leftovers == 2) {
-               /* Write 32 bits of text */
-               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
-               sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
-               ret = intel_write_sha_text(dev_priv, sha_text);
-               if (ret < 0)
-                       return ret;
-               sha_idx += sizeof(sha_text);
-
-               /* Write 64 bits of M0 */
-               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
-               for (i = 0; i < 2; i++) {
-                       ret = intel_write_sha_text(dev_priv, 0);
-                       if (ret < 0)
-                               return ret;
-                       sha_idx += sizeof(sha_text);
-               }
-       } else if (sha_leftovers == 3) {
-               /* Write 32 bits of text */
-               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
-               sha_text |= bstatus[0] << 24;
-               ret = intel_write_sha_text(dev_priv, sha_text);
-               if (ret < 0)
-                       return ret;
-               sha_idx += sizeof(sha_text);
-
-               /* Write 8 bits of text, 24 bits of M0 */
-               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
-               ret = intel_write_sha_text(dev_priv, bstatus[1]);
-               if (ret < 0)
-                       return ret;
-               sha_idx += sizeof(sha_text);
-
-               /* Write 32 bits of M0 */
-               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
-               ret = intel_write_sha_text(dev_priv, 0);
-               if (ret < 0)
-                       return ret;
-               sha_idx += sizeof(sha_text);
-
-               /* Write 8 bits of M0 */
-               I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
-               ret = intel_write_sha_text(dev_priv, 0);
-               if (ret < 0)
-                       return ret;
-               sha_idx += sizeof(sha_text);
-       } else {
-               DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
-                             sha_leftovers);
-               return -EINVAL;
-       }
-
-       I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
-       /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
-       while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
-               ret = intel_write_sha_text(dev_priv, 0);
-               if (ret < 0)
-                       return ret;
-               sha_idx += sizeof(sha_text);
-       }
-
-       /*
-        * Last write gets the length of the concatenation in bits. That is:
-        *  - 5 bytes per device
-        *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
-        */
-       sha_text = (num_downstream * 5 + 10) * 8;
-       ret = intel_write_sha_text(dev_priv, sha_text);
-       if (ret < 0)
-               return ret;
-
-       /* Tell the HW we're done with the hash and wait for it to ACK */
-       I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
-       if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
-                                   HDCP_SHA1_COMPLETE,
-                                   HDCP_SHA1_COMPLETE, 1)) {
-               DRM_ERROR("Timed out waiting for SHA1 complete\n");
-               return -ETIMEDOUT;
-       }
-       if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
-               DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
-               return -ENXIO;
-       }
-
-       return 0;
-}
-
-/* Implements Part 2 of the HDCP authorization procedure */
-static
-int intel_hdcp_auth_downstream(struct intel_connector *connector)
-{
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       const struct intel_hdcp_shim *shim = connector->hdcp.shim;
-       struct drm_device *dev = connector->base.dev;
-       u8 bstatus[2], num_downstream, *ksv_fifo;
-       int ret, i, tries = 3;
-
-       ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
-       if (ret) {
-               DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
-               return ret;
-       }
-
-       ret = shim->read_bstatus(intel_dig_port, bstatus);
-       if (ret)
-               return ret;
-
-       if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
-           DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
-               DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
-               return -EPERM;
-       }
-
-       /*
-        * When repeater reports 0 device count, HDCP1.4 spec allows disabling
-        * the HDCP encryption. That implies that repeater can't have its own
-        * display. As there is no consumption of encrypted content in the
-        * repeater with 0 downstream devices, we are failing the
-        * authentication.
-        */
-       num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
-       if (num_downstream == 0)
-               return -EINVAL;
-
-       ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
-       if (!ksv_fifo)
-               return -ENOMEM;
-
-       ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
-       if (ret)
-               goto err;
-
-       if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
-               DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
-               return -EPERM;
-       }
-
-       /*
-        * When V prime mismatches, DP Spec mandates re-read of
-        * V prime atleast twice.
-        */
-       for (i = 0; i < tries; i++) {
-               ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
-                                                 ksv_fifo, num_downstream,
-                                                 bstatus);
-               if (!ret)
-                       break;
-       }
-
-       if (i == tries) {
-               DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
-               goto err;
-       }
-
-       DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
-                     num_downstream);
-       ret = 0;
-err:
-       kfree(ksv_fifo);
-       return ret;
-}
-
-/* Implements Part 1 of the HDCP authorization procedure */
-static int intel_hdcp_auth(struct intel_connector *connector)
-{
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       struct drm_device *dev = connector->base.dev;
-       const struct intel_hdcp_shim *shim = hdcp->shim;
-       struct drm_i915_private *dev_priv;
-       enum port port;
-       unsigned long r0_prime_gen_start;
-       int ret, i, tries = 2;
-       union {
-               u32 reg[2];
-               u8 shim[DRM_HDCP_AN_LEN];
-       } an;
-       union {
-               u32 reg[2];
-               u8 shim[DRM_HDCP_KSV_LEN];
-       } bksv;
-       union {
-               u32 reg;
-               u8 shim[DRM_HDCP_RI_LEN];
-       } ri;
-       bool repeater_present, hdcp_capable;
-
-       dev_priv = intel_dig_port->base.base.dev->dev_private;
-
-       port = intel_dig_port->base.port;
-
-       /*
-        * Detects whether the display is HDCP capable. Although we check for
-        * valid Bksv below, the HDCP over DP spec requires that we check
-        * whether the display supports HDCP before we write An. For HDMI
-        * displays, this is not necessary.
-        */
-       if (shim->hdcp_capable) {
-               ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
-               if (ret)
-                       return ret;
-               if (!hdcp_capable) {
-                       DRM_DEBUG_KMS("Panel is not HDCP capable\n");
-                       return -EINVAL;
-               }
-       }
-
-       /* Initialize An with 2 random values and acquire it */
-       for (i = 0; i < 2; i++)
-               I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
-       I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
-
-       /* Wait for An to be acquired */
-       if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
-                                   HDCP_STATUS_AN_READY,
-                                   HDCP_STATUS_AN_READY, 1)) {
-               DRM_ERROR("Timed out waiting for An\n");
-               return -ETIMEDOUT;
-       }
-
-       an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
-       an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
-       ret = shim->write_an_aksv(intel_dig_port, an.shim);
-       if (ret)
-               return ret;
-
-       r0_prime_gen_start = jiffies;
-
-       memset(&bksv, 0, sizeof(bksv));
-
-       ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
-       if (ret < 0)
-               return ret;
-
-       if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) {
-               DRM_ERROR("BKSV is revoked\n");
-               return -EPERM;
-       }
-
-       I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
-       I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
-
-       ret = shim->repeater_present(intel_dig_port, &repeater_present);
-       if (ret)
-               return ret;
-       if (repeater_present)
-               I915_WRITE(HDCP_REP_CTL,
-                          intel_hdcp_get_repeater_ctl(intel_dig_port));
-
-       ret = shim->toggle_signalling(intel_dig_port, true);
-       if (ret)
-               return ret;
-
-       I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
-
-       /* Wait for R0 ready */
-       if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
-                    (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
-               DRM_ERROR("Timed out waiting for R0 ready\n");
-               return -ETIMEDOUT;
-       }
-
-       /*
-        * Wait for R0' to become available. The spec says 100ms from Aksv, but
-        * some monitors can take longer than this. We'll set the timeout at
-        * 300ms just to be sure.
-        *
-        * On DP, there's an R0_READY bit available but no such bit
-        * exists on HDMI. Since the upper-bound is the same, we'll just do
-        * the stupid thing instead of polling on one and not the other.
-        */
-       wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
-
-       tries = 3;
-
-       /*
-        * DP HDCP Spec mandates the two more reattempt to read R0, incase
-        * of R0 mismatch.
-        */
-       for (i = 0; i < tries; i++) {
-               ri.reg = 0;
-               ret = shim->read_ri_prime(intel_dig_port, ri.shim);
-               if (ret)
-                       return ret;
-               I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
-
-               /* Wait for Ri prime match */
-               if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
-                   (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
-                       break;
-       }
-
-       if (i == tries) {
-               DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
-                             I915_READ(PORT_HDCP_STATUS(port)));
-               return -ETIMEDOUT;
-       }
-
-       /* Wait for encryption confirmation */
-       if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
-                                   HDCP_STATUS_ENC, HDCP_STATUS_ENC,
-                                   ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
-               DRM_ERROR("Timed out waiting for encryption\n");
-               return -ETIMEDOUT;
-       }
-
-       /*
-        * XXX: If we have MST-connected devices, we need to enable encryption
-        * on those as well.
-        */
-
-       if (repeater_present)
-               return intel_hdcp_auth_downstream(connector);
-
-       DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
-       return 0;
-}
-
-static int _intel_hdcp_disable(struct intel_connector *connector)
-{
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       enum port port = intel_dig_port->base.port;
-       int ret;
-
-       DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
-                     connector->base.name, connector->base.base.id);
-
-       hdcp->hdcp_encrypted = false;
-       I915_WRITE(PORT_HDCP_CONF(port), 0);
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   PORT_HDCP_STATUS(port), ~0, 0,
-                                   ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
-               DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
-               return -ETIMEDOUT;
-       }
-
-       ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
-       if (ret) {
-               DRM_ERROR("Failed to disable HDCP signalling\n");
-               return ret;
-       }
-
-       DRM_DEBUG_KMS("HDCP is disabled\n");
-       return 0;
-}
-
-static int _intel_hdcp_enable(struct intel_connector *connector)
-{
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
-       int i, ret, tries = 3;
-
-       DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
-                     connector->base.name, connector->base.base.id);
-
-       if (!hdcp_key_loadable(dev_priv)) {
-               DRM_ERROR("HDCP key Load is not possible\n");
-               return -ENXIO;
-       }
-
-       for (i = 0; i < KEY_LOAD_TRIES; i++) {
-               ret = intel_hdcp_load_keys(dev_priv);
-               if (!ret)
-                       break;
-               intel_hdcp_clear_keys(dev_priv);
-       }
-       if (ret) {
-               DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
-               return ret;
-       }
-
-       /* Incase of authentication failures, HDCP spec expects reauth. */
-       for (i = 0; i < tries; i++) {
-               ret = intel_hdcp_auth(connector);
-               if (!ret) {
-                       hdcp->hdcp_encrypted = true;
-                       return 0;
-               }
-
-               DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
-
-               /* Ensuring HDCP encryption and signalling are stopped. */
-               _intel_hdcp_disable(connector);
-       }
-
-       DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
-       return ret;
-}
-
-static inline
-struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
-{
-       return container_of(hdcp, struct intel_connector, hdcp);
-}
-
-/* Implements Part 3 of the HDCP authorization procedure */
-static int intel_hdcp_check_link(struct intel_connector *connector)
-{
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       enum port port = intel_dig_port->base.port;
-       int ret = 0;
-
-       mutex_lock(&hdcp->mutex);
-
-       /* Check_link valid only when HDCP1.4 is enabled */
-       if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
-           !hdcp->hdcp_encrypted) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (WARN_ON(!intel_hdcp_in_use(connector))) {
-               DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
-                         connector->base.name, connector->base.base.id,
-                         I915_READ(PORT_HDCP_STATUS(port)));
-               ret = -ENXIO;
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&hdcp->prop_work);
-               goto out;
-       }
-
-       if (hdcp->shim->check_link(intel_dig_port)) {
-               if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-                       hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
-                       schedule_work(&hdcp->prop_work);
-               }
-               goto out;
-       }
-
-       DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
-                     connector->base.name, connector->base.base.id);
-
-       ret = _intel_hdcp_disable(connector);
-       if (ret) {
-               DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&hdcp->prop_work);
-               goto out;
-       }
-
-       ret = _intel_hdcp_enable(connector);
-       if (ret) {
-               DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&hdcp->prop_work);
-               goto out;
-       }
-
-out:
-       mutex_unlock(&hdcp->mutex);
-       return ret;
-}
-
-static void intel_hdcp_prop_work(struct work_struct *work)
-{
-       struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
-                                              prop_work);
-       struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
-       struct drm_device *dev = connector->base.dev;
-       struct drm_connector_state *state;
-
-       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-       mutex_lock(&hdcp->mutex);
-
-       /*
-        * This worker is only used to flip between ENABLED/DESIRED. Either of
-        * those to UNDESIRED is handled by core. If value == UNDESIRED,
-        * we're running just after hdcp has been disabled, so just exit
-        */
-       if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-               state = connector->base.state;
-               state->content_protection = hdcp->value;
-       }
-
-       mutex_unlock(&hdcp->mutex);
-       drm_modeset_unlock(&dev->mode_config.connection_mutex);
-}
-
-bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
-{
-       /* PORT E doesn't have HDCP, and PORT F is disabled */
-       return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
-}
-
-static int
-hdcp2_prepare_ake_init(struct intel_connector *connector,
-                      struct hdcp2_ake_init *ake_data)
-{
-       struct hdcp_port_data *data = &connector->hdcp.port_data;
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct i915_hdcp_comp_master *comp;
-       int ret;
-
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       comp = dev_priv->hdcp_master;
-
-       if (!comp || !comp->ops) {
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return -EINVAL;
-       }
-
-       ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
-       if (ret)
-               DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       return ret;
-}
-
-static int
-hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
-                               struct hdcp2_ake_send_cert *rx_cert,
-                               bool *paired,
-                               struct hdcp2_ake_no_stored_km *ek_pub_km,
-                               size_t *msg_sz)
-{
-       struct hdcp_port_data *data = &connector->hdcp.port_data;
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct i915_hdcp_comp_master *comp;
-       int ret;
-
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       comp = dev_priv->hdcp_master;
-
-       if (!comp || !comp->ops) {
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return -EINVAL;
-       }
-
-       ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
-                                                        rx_cert, paired,
-                                                        ek_pub_km, msg_sz);
-       if (ret < 0)
-               DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       return ret;
-}
-
-static int hdcp2_verify_hprime(struct intel_connector *connector,
-                              struct hdcp2_ake_send_hprime *rx_hprime)
-{
-       struct hdcp_port_data *data = &connector->hdcp.port_data;
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct i915_hdcp_comp_master *comp;
-       int ret;
-
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       comp = dev_priv->hdcp_master;
-
-       if (!comp || !comp->ops) {
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return -EINVAL;
-       }
-
-       ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
-       if (ret < 0)
-               DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       return ret;
-}
-
-static int
-hdcp2_store_pairing_info(struct intel_connector *connector,
-                        struct hdcp2_ake_send_pairing_info *pairing_info)
-{
-       struct hdcp_port_data *data = &connector->hdcp.port_data;
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct i915_hdcp_comp_master *comp;
-       int ret;
-
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       comp = dev_priv->hdcp_master;
-
-       if (!comp || !comp->ops) {
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return -EINVAL;
-       }
-
-       ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
-       if (ret < 0)
-               DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       return ret;
-}
-
-static int
-hdcp2_prepare_lc_init(struct intel_connector *connector,
-                     struct hdcp2_lc_init *lc_init)
-{
-       struct hdcp_port_data *data = &connector->hdcp.port_data;
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct i915_hdcp_comp_master *comp;
-       int ret;
-
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       comp = dev_priv->hdcp_master;
-
-       if (!comp || !comp->ops) {
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return -EINVAL;
-       }
-
-       ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
-       if (ret < 0)
-               DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       return ret;
-}
-
-static int
-hdcp2_verify_lprime(struct intel_connector *connector,
-                   struct hdcp2_lc_send_lprime *rx_lprime)
-{
-       struct hdcp_port_data *data = &connector->hdcp.port_data;
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct i915_hdcp_comp_master *comp;
-       int ret;
-
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       comp = dev_priv->hdcp_master;
-
-       if (!comp || !comp->ops) {
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return -EINVAL;
-       }
-
-       ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
-       if (ret < 0)
-               DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       return ret;
-}
-
-static int hdcp2_prepare_skey(struct intel_connector *connector,
-                             struct hdcp2_ske_send_eks *ske_data)
-{
-       struct hdcp_port_data *data = &connector->hdcp.port_data;
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct i915_hdcp_comp_master *comp;
-       int ret;
-
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       comp = dev_priv->hdcp_master;
-
-       if (!comp || !comp->ops) {
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return -EINVAL;
-       }
-
-       ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
-       if (ret < 0)
-               DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       return ret;
-}
-
-static int
-hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
-                                     struct hdcp2_rep_send_receiverid_list
-                                                               *rep_topology,
-                                     struct hdcp2_rep_send_ack *rep_send_ack)
-{
-       struct hdcp_port_data *data = &connector->hdcp.port_data;
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct i915_hdcp_comp_master *comp;
-       int ret;
-
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       comp = dev_priv->hdcp_master;
-
-       if (!comp || !comp->ops) {
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return -EINVAL;
-       }
-
-       ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
-                                                        rep_topology,
-                                                        rep_send_ack);
-       if (ret < 0)
-               DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       return ret;
-}
-
-static int
-hdcp2_verify_mprime(struct intel_connector *connector,
-                   struct hdcp2_rep_stream_ready *stream_ready)
-{
-       struct hdcp_port_data *data = &connector->hdcp.port_data;
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct i915_hdcp_comp_master *comp;
-       int ret;
-
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       comp = dev_priv->hdcp_master;
-
-       if (!comp || !comp->ops) {
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return -EINVAL;
-       }
-
-       ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
-       if (ret < 0)
-               DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       return ret;
-}
-
-static int hdcp2_authenticate_port(struct intel_connector *connector)
-{
-       struct hdcp_port_data *data = &connector->hdcp.port_data;
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct i915_hdcp_comp_master *comp;
-       int ret;
-
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       comp = dev_priv->hdcp_master;
-
-       if (!comp || !comp->ops) {
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return -EINVAL;
-       }
-
-       ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
-       if (ret < 0)
-               DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       return ret;
-}
-
-static int hdcp2_close_mei_session(struct intel_connector *connector)
-{
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct i915_hdcp_comp_master *comp;
-       int ret;
-
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       comp = dev_priv->hdcp_master;
-
-       if (!comp || !comp->ops) {
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return -EINVAL;
-       }
-
-       ret = comp->ops->close_hdcp_session(comp->mei_dev,
-                                            &connector->hdcp.port_data);
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       return ret;
-}
-
-static int hdcp2_deauthenticate_port(struct intel_connector *connector)
-{
-       return hdcp2_close_mei_session(connector);
-}
-
-/* Authentication flow starts from here */
-static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
-{
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       struct drm_device *dev = connector->base.dev;
-       union {
-               struct hdcp2_ake_init ake_init;
-               struct hdcp2_ake_send_cert send_cert;
-               struct hdcp2_ake_no_stored_km no_stored_km;
-               struct hdcp2_ake_send_hprime send_hprime;
-               struct hdcp2_ake_send_pairing_info pairing_info;
-       } msgs;
-       const struct intel_hdcp_shim *shim = hdcp->shim;
-       size_t size;
-       int ret;
-
-       /* Init for seq_num */
-       hdcp->seq_num_v = 0;
-       hdcp->seq_num_m = 0;
-
-       ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
-       if (ret < 0)
-               return ret;
-
-       ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
-                                 sizeof(msgs.ake_init));
-       if (ret < 0)
-               return ret;
-
-       ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
-                                &msgs.send_cert, sizeof(msgs.send_cert));
-       if (ret < 0)
-               return ret;
-
-       if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
-               return -EINVAL;
-
-       hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
-
-       if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id,
-                                       1)) {
-               DRM_ERROR("Receiver ID is revoked\n");
-               return -EPERM;
-       }
-
-       /*
-        * Here msgs.no_stored_km will hold msgs corresponding to the km
-        * stored also.
-        */
-       ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
-                                             &hdcp->is_paired,
-                                             &msgs.no_stored_km, &size);
-       if (ret < 0)
-               return ret;
-
-       ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
-       if (ret < 0)
-               return ret;
-
-       ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
-                                &msgs.send_hprime, sizeof(msgs.send_hprime));
-       if (ret < 0)
-               return ret;
-
-       ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
-       if (ret < 0)
-               return ret;
-
-       if (!hdcp->is_paired) {
-               /* Pairing is required */
-               ret = shim->read_2_2_msg(intel_dig_port,
-                                        HDCP_2_2_AKE_SEND_PAIRING_INFO,
-                                        &msgs.pairing_info,
-                                        sizeof(msgs.pairing_info));
-               if (ret < 0)
-                       return ret;
-
-               ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
-               if (ret < 0)
-                       return ret;
-               hdcp->is_paired = true;
-       }
-
-       return 0;
-}
-
-static int hdcp2_locality_check(struct intel_connector *connector)
-{
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       union {
-               struct hdcp2_lc_init lc_init;
-               struct hdcp2_lc_send_lprime send_lprime;
-       } msgs;
-       const struct intel_hdcp_shim *shim = hdcp->shim;
-       int tries = HDCP2_LC_RETRY_CNT, ret, i;
-
-       for (i = 0; i < tries; i++) {
-               ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
-               if (ret < 0)
-                       continue;
-
-               ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
-                                     sizeof(msgs.lc_init));
-               if (ret < 0)
-                       continue;
-
-               ret = shim->read_2_2_msg(intel_dig_port,
-                                        HDCP_2_2_LC_SEND_LPRIME,
-                                        &msgs.send_lprime,
-                                        sizeof(msgs.send_lprime));
-               if (ret < 0)
-                       continue;
-
-               ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
-               if (!ret)
-                       break;
-       }
-
-       return ret;
-}
-
-static int hdcp2_session_key_exchange(struct intel_connector *connector)
-{
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       struct hdcp2_ske_send_eks send_eks;
-       int ret;
-
-       ret = hdcp2_prepare_skey(connector, &send_eks);
-       if (ret < 0)
-               return ret;
-
-       ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
-                                       sizeof(send_eks));
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-
-static
-int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
-{
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       union {
-               struct hdcp2_rep_stream_manage stream_manage;
-               struct hdcp2_rep_stream_ready stream_ready;
-       } msgs;
-       const struct intel_hdcp_shim *shim = hdcp->shim;
-       int ret;
-
-       /* Prepare RepeaterAuth_Stream_Manage msg */
-       msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
-       drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
-
-       /* K no of streams is fixed as 1. Stored as big-endian. */
-       msgs.stream_manage.k = cpu_to_be16(1);
-
-       /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
-       msgs.stream_manage.streams[0].stream_id = 0;
-       msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
-
-       /* Send it to Repeater */
-       ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
-                                 sizeof(msgs.stream_manage));
-       if (ret < 0)
-               return ret;
-
-       ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
-                                &msgs.stream_ready, sizeof(msgs.stream_ready));
-       if (ret < 0)
-               return ret;
-
-       hdcp->port_data.seq_num_m = hdcp->seq_num_m;
-       hdcp->port_data.streams[0].stream_type = hdcp->content_type;
-
-       ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
-       if (ret < 0)
-               return ret;
-
-       hdcp->seq_num_m++;
-
-       if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
-               DRM_DEBUG_KMS("seq_num_m roll over.\n");
-               return -1;
-       }
-
-       return 0;
-}
-
-static
-int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
-{
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       struct drm_device *dev = connector->base.dev;
-       union {
-               struct hdcp2_rep_send_receiverid_list recvid_list;
-               struct hdcp2_rep_send_ack rep_ack;
-       } msgs;
-       const struct intel_hdcp_shim *shim = hdcp->shim;
-       u32 seq_num_v, device_cnt;
-       u8 *rx_info;
-       int ret;
-
-       ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
-                                &msgs.recvid_list, sizeof(msgs.recvid_list));
-       if (ret < 0)
-               return ret;
-
-       rx_info = msgs.recvid_list.rx_info;
-
-       if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
-           HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
-               DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
-               return -EINVAL;
-       }
-
-       /* Converting and Storing the seq_num_v to local variable as DWORD */
-       seq_num_v =
-               drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
-
-       if (seq_num_v < hdcp->seq_num_v) {
-               /* Roll over of the seq_num_v from repeater. Reauthenticate. */
-               DRM_DEBUG_KMS("Seq_num_v roll over.\n");
-               return -EINVAL;
-       }
-
-       device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
-                     HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
-       if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids,
-                                       device_cnt)) {
-               DRM_ERROR("Revoked receiver ID(s) is in list\n");
-               return -EPERM;
-       }
-
-       ret = hdcp2_verify_rep_topology_prepare_ack(connector,
-                                                   &msgs.recvid_list,
-                                                   &msgs.rep_ack);
-       if (ret < 0)
-               return ret;
-
-       hdcp->seq_num_v = seq_num_v;
-       ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
-                                 sizeof(msgs.rep_ack));
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-
-static int hdcp2_authenticate_repeater(struct intel_connector *connector)
-{
-       int ret;
-
-       ret = hdcp2_authenticate_repeater_topology(connector);
-       if (ret < 0)
-               return ret;
-
-       return hdcp2_propagate_stream_management_info(connector);
-}
-
-static int hdcp2_authenticate_sink(struct intel_connector *connector)
-{
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       const struct intel_hdcp_shim *shim = hdcp->shim;
-       int ret;
-
-       ret = hdcp2_authentication_key_exchange(connector);
-       if (ret < 0) {
-               DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
-               return ret;
-       }
-
-       ret = hdcp2_locality_check(connector);
-       if (ret < 0) {
-               DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
-               return ret;
-       }
-
-       ret = hdcp2_session_key_exchange(connector);
-       if (ret < 0) {
-               DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
-               return ret;
-       }
-
-       if (shim->config_stream_type) {
-               ret = shim->config_stream_type(intel_dig_port,
-                                              hdcp->is_repeater,
-                                              hdcp->content_type);
-               if (ret < 0)
-                       return ret;
-       }
-
-       if (hdcp->is_repeater) {
-               ret = hdcp2_authenticate_repeater(connector);
-               if (ret < 0) {
-                       DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
-                       return ret;
-               }
-       }
-
-       hdcp->port_data.streams[0].stream_type = hdcp->content_type;
-       ret = hdcp2_authenticate_port(connector);
-       if (ret < 0)
-               return ret;
-
-       return ret;
-}
-
-static int hdcp2_enable_encryption(struct intel_connector *connector)
-{
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       enum port port = connector->encoder->port;
-       int ret;
-
-       WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
-
-       if (hdcp->shim->toggle_signalling) {
-               ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
-               if (ret) {
-                       DRM_ERROR("Failed to enable HDCP signalling. %d\n",
-                                 ret);
-                       return ret;
-               }
-       }
-
-       if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
-               /* Link is Authenticated. Now set for Encryption */
-               I915_WRITE(HDCP2_CTL_DDI(port),
-                          I915_READ(HDCP2_CTL_DDI(port)) |
-                          CTL_LINK_ENCRYPTION_REQ);
-       }
-
-       ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
-                                     LINK_ENCRYPTION_STATUS,
-                                     LINK_ENCRYPTION_STATUS,
-                                     ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
-
-       return ret;
-}
-
-static int hdcp2_disable_encryption(struct intel_connector *connector)
-{
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       enum port port = connector->encoder->port;
-       int ret;
-
-       WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
-
-       I915_WRITE(HDCP2_CTL_DDI(port),
-                  I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
-
-       ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
-                                     LINK_ENCRYPTION_STATUS, 0x0,
-                                     ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
-       if (ret == -ETIMEDOUT)
-               DRM_DEBUG_KMS("Disable Encryption Timedout");
-
-       if (hdcp->shim->toggle_signalling) {
-               ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
-               if (ret) {
-                       DRM_ERROR("Failed to disable HDCP signalling. %d\n",
-                                 ret);
-                       return ret;
-               }
-       }
-
-       return ret;
-}
-
-static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
-{
-       int ret, i, tries = 3;
-
-       for (i = 0; i < tries; i++) {
-               ret = hdcp2_authenticate_sink(connector);
-               if (!ret)
-                       break;
-
-               /* Clearing the mei hdcp session */
-               DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
-                             i + 1, tries, ret);
-               if (hdcp2_deauthenticate_port(connector) < 0)
-                       DRM_DEBUG_KMS("Port deauth failed.\n");
-       }
-
-       if (i != tries) {
-               /*
-                * Ensuring the required 200mSec min time interval between
-                * Session Key Exchange and encryption.
-                */
-               msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
-               ret = hdcp2_enable_encryption(connector);
-               if (ret < 0) {
-                       DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
-                       if (hdcp2_deauthenticate_port(connector) < 0)
-                               DRM_DEBUG_KMS("Port deauth failed.\n");
-               }
-       }
-
-       return ret;
-}
-
-static int _intel_hdcp2_enable(struct intel_connector *connector)
-{
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       int ret;
-
-       DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
-                     connector->base.name, connector->base.base.id,
-                     hdcp->content_type);
-
-       ret = hdcp2_authenticate_and_encrypt(connector);
-       if (ret) {
-               DRM_DEBUG_KMS("HDCP2 Type%d  Enabling Failed. (%d)\n",
-                             hdcp->content_type, ret);
-               return ret;
-       }
-
-       DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
-                     connector->base.name, connector->base.base.id,
-                     hdcp->content_type);
-
-       hdcp->hdcp2_encrypted = true;
-       return 0;
-}
-
-static int _intel_hdcp2_disable(struct intel_connector *connector)
-{
-       int ret;
-
-       DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
-                     connector->base.name, connector->base.base.id);
-
-       ret = hdcp2_disable_encryption(connector);
-
-       if (hdcp2_deauthenticate_port(connector) < 0)
-               DRM_DEBUG_KMS("Port deauth failed.\n");
-
-       connector->hdcp.hdcp2_encrypted = false;
-
-       return ret;
-}
-
-/* Implements the Link Integrity Check for HDCP2.2 */
-static int intel_hdcp2_check_link(struct intel_connector *connector)
-{
-       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       enum port port = connector->encoder->port;
-       int ret = 0;
-
-       mutex_lock(&hdcp->mutex);
-
-       /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
-       if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
-           !hdcp->hdcp2_encrypted) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (WARN_ON(!intel_hdcp2_in_use(connector))) {
-               DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
-                         I915_READ(HDCP2_STATUS_DDI(port)));
-               ret = -ENXIO;
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&hdcp->prop_work);
-               goto out;
-       }
-
-       ret = hdcp->shim->check_2_2_link(intel_dig_port);
-       if (ret == HDCP_LINK_PROTECTED) {
-               if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-                       hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
-                       schedule_work(&hdcp->prop_work);
-               }
-               goto out;
-       }
-
-       if (ret == HDCP_TOPOLOGY_CHANGE) {
-               if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
-                       goto out;
-
-               DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
-               ret = hdcp2_authenticate_repeater_topology(connector);
-               if (!ret) {
-                       hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
-                       schedule_work(&hdcp->prop_work);
-                       goto out;
-               }
-               DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
-                             connector->base.name, connector->base.base.id,
-                             ret);
-       } else {
-               DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
-                             connector->base.name, connector->base.base.id);
-       }
-
-       ret = _intel_hdcp2_disable(connector);
-       if (ret) {
-               DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
-                         connector->base.name, connector->base.base.id, ret);
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&hdcp->prop_work);
-               goto out;
-       }
-
-       ret = _intel_hdcp2_enable(connector);
-       if (ret) {
-               DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
-                             connector->base.name, connector->base.base.id,
-                             ret);
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&hdcp->prop_work);
-               goto out;
-       }
-
-out:
-       mutex_unlock(&hdcp->mutex);
-       return ret;
-}
-
-static void intel_hdcp_check_work(struct work_struct *work)
-{
-       struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
-                                              struct intel_hdcp,
-                                              check_work);
-       struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
-
-       if (!intel_hdcp2_check_link(connector))
-               schedule_delayed_work(&hdcp->check_work,
-                                     DRM_HDCP2_CHECK_PERIOD_MS);
-       else if (!intel_hdcp_check_link(connector))
-               schedule_delayed_work(&hdcp->check_work,
-                                     DRM_HDCP_CHECK_PERIOD_MS);
-}
-
-static int i915_hdcp_component_bind(struct device *i915_kdev,
-                                   struct device *mei_kdev, void *data)
-{
-       struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
-
-       DRM_DEBUG("I915 HDCP comp bind\n");
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
-       dev_priv->hdcp_master->mei_dev = mei_kdev;
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       return 0;
-}
-
-static void i915_hdcp_component_unbind(struct device *i915_kdev,
-                                      struct device *mei_kdev, void *data)
-{
-       struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
-
-       DRM_DEBUG("I915 HDCP comp unbind\n");
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       dev_priv->hdcp_master = NULL;
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-}
-
-static const struct component_ops i915_hdcp_component_ops = {
-       .bind   = i915_hdcp_component_bind,
-       .unbind = i915_hdcp_component_unbind,
-};
-
-static inline int initialize_hdcp_port_data(struct intel_connector *connector)
-{
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       struct hdcp_port_data *data = &hdcp->port_data;
-
-       data->port = connector->encoder->port;
-       data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
-       data->protocol = (u8)hdcp->shim->protocol;
-
-       data->k = 1;
-       if (!data->streams)
-               data->streams = kcalloc(data->k,
-                                       sizeof(struct hdcp2_streamid_type),
-                                       GFP_KERNEL);
-       if (!data->streams) {
-               DRM_ERROR("Out of Memory\n");
-               return -ENOMEM;
-       }
-
-       data->streams[0].stream_id = 0;
-       data->streams[0].stream_type = hdcp->content_type;
-
-       return 0;
-}
-
-static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
-{
-       if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
-               return false;
-
-       return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
-               IS_KABYLAKE(dev_priv));
-}
-
-void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
-{
-       int ret;
-
-       if (!is_hdcp2_supported(dev_priv))
-               return;
-
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       WARN_ON(dev_priv->hdcp_comp_added);
-
-       dev_priv->hdcp_comp_added = true;
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-       ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
-                                 I915_COMPONENT_HDCP);
-       if (ret < 0) {
-               DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
-               mutex_lock(&dev_priv->hdcp_comp_mutex);
-               dev_priv->hdcp_comp_added = false;
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return;
-       }
-}
-
-static void intel_hdcp2_init(struct intel_connector *connector)
-{
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       int ret;
-
-       ret = initialize_hdcp_port_data(connector);
-       if (ret) {
-               DRM_DEBUG_KMS("Mei hdcp data init failed\n");
-               return;
-       }
-
-       hdcp->hdcp2_supported = true;
-}
-
-int intel_hdcp_init(struct intel_connector *connector,
-                   const struct intel_hdcp_shim *shim)
-{
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       int ret;
-
-       if (!shim)
-               return -EINVAL;
-
-       ret = drm_connector_attach_content_protection_property(&connector->base);
-       if (ret)
-               return ret;
-
-       hdcp->shim = shim;
-       mutex_init(&hdcp->mutex);
-       INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
-       INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
-
-       if (is_hdcp2_supported(dev_priv))
-               intel_hdcp2_init(connector);
-       init_waitqueue_head(&hdcp->cp_irq_queue);
-
-       return 0;
-}
-
-int intel_hdcp_enable(struct intel_connector *connector)
-{
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
-       int ret = -EINVAL;
-
-       if (!hdcp->shim)
-               return -ENOENT;
-
-       mutex_lock(&hdcp->mutex);
-       WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
-
-       /*
-        * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
-        * is capable of HDCP2.2, it is preferred to use HDCP2.2.
-        */
-       if (intel_hdcp2_capable(connector)) {
-               ret = _intel_hdcp2_enable(connector);
-               if (!ret)
-                       check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
-       }
-
-       /* When HDCP2.2 fails, HDCP1.4 will be attempted */
-       if (ret && intel_hdcp_capable(connector)) {
-               ret = _intel_hdcp_enable(connector);
-       }
-
-       if (!ret) {
-               schedule_delayed_work(&hdcp->check_work, check_link_interval);
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
-               schedule_work(&hdcp->prop_work);
-       }
-
-       mutex_unlock(&hdcp->mutex);
-       return ret;
-}
-
-int intel_hdcp_disable(struct intel_connector *connector)
-{
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       int ret = 0;
-
-       if (!hdcp->shim)
-               return -ENOENT;
-
-       mutex_lock(&hdcp->mutex);
-
-       if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
-               if (hdcp->hdcp2_encrypted)
-                       ret = _intel_hdcp2_disable(connector);
-               else if (hdcp->hdcp_encrypted)
-                       ret = _intel_hdcp_disable(connector);
-       }
-
-       mutex_unlock(&hdcp->mutex);
-       cancel_delayed_work_sync(&hdcp->check_work);
-       return ret;
-}
-
-void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
-{
-       mutex_lock(&dev_priv->hdcp_comp_mutex);
-       if (!dev_priv->hdcp_comp_added) {
-               mutex_unlock(&dev_priv->hdcp_comp_mutex);
-               return;
-       }
-
-       dev_priv->hdcp_comp_added = false;
-       mutex_unlock(&dev_priv->hdcp_comp_mutex);
-
-       component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
-}
-
-void intel_hdcp_cleanup(struct intel_connector *connector)
-{
-       if (!connector->hdcp.shim)
-               return;
-
-       mutex_lock(&connector->hdcp.mutex);
-       kfree(connector->hdcp.port_data.streams);
-       mutex_unlock(&connector->hdcp.mutex);
-}
-
-void intel_hdcp_atomic_check(struct drm_connector *connector,
-                            struct drm_connector_state *old_state,
-                            struct drm_connector_state *new_state)
-{
-       u64 old_cp = old_state->content_protection;
-       u64 new_cp = new_state->content_protection;
-       struct drm_crtc_state *crtc_state;
-
-       if (!new_state->crtc) {
-               /*
-                * If the connector is being disabled with CP enabled, mark it
-                * desired so it's re-enabled when the connector is brought back
-                */
-               if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
-                       new_state->content_protection =
-                               DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               return;
-       }
-
-       /*
-        * Nothing to do if the state didn't change, or HDCP was activated since
-        * the last commit
-        */
-       if (old_cp == new_cp ||
-           (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
-            new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
-               return;
-
-       crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
-                                                  new_state->crtc);
-       crtc_state->mode_changed = true;
-}
-
-/* Handles the CP_IRQ raised from the DP HDCP sink */
-void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
-{
-       struct intel_hdcp *hdcp = &connector->hdcp;
-
-       if (!hdcp->shim)
-               return;
-
-       atomic_inc(&connector->hdcp.cp_irq_count);
-       wake_up_all(&connector->hdcp.cp_irq_queue);
-
-       schedule_delayed_work(&hdcp->check_work, 0);
-}
diff --git a/drivers/gpu/drm/i915/intel_hdcp.h b/drivers/gpu/drm/i915/intel_hdcp.h
deleted file mode 100644 (file)
index be8da85..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_HDCP_H__
-#define __INTEL_HDCP_H__
-
-#include <linux/types.h>
-
-#include <drm/i915_drm.h>
-
-struct drm_connector;
-struct drm_connector_state;
-struct drm_i915_private;
-struct intel_connector;
-struct intel_hdcp_shim;
-
-void intel_hdcp_atomic_check(struct drm_connector *connector,
-                            struct drm_connector_state *old_state,
-                            struct drm_connector_state *new_state);
-int intel_hdcp_init(struct intel_connector *connector,
-                   const struct intel_hdcp_shim *hdcp_shim);
-int intel_hdcp_enable(struct intel_connector *connector);
-int intel_hdcp_disable(struct intel_connector *connector);
-bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
-bool intel_hdcp_capable(struct intel_connector *connector);
-bool intel_hdcp2_capable(struct intel_connector *connector);
-void intel_hdcp_component_init(struct drm_i915_private *dev_priv);
-void intel_hdcp_component_fini(struct drm_i915_private *dev_priv);
-void intel_hdcp_cleanup(struct intel_connector *connector);
-void intel_hdcp_handle_cp_irq(struct intel_connector *connector);
-
-#endif /* __INTEL_HDCP_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
deleted file mode 100644 (file)
index ea3de4a..0000000
+++ /dev/null
@@ -1,687 +0,0 @@
-/*
- * Copyright © 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/kernel.h>
-
-#include <drm/i915_drm.h>
-
-#include "i915_drv.h"
-#include "intel_drv.h"
-#include "intel_hotplug.h"
-
-/**
- * DOC: Hotplug
- *
- * Simply put, hotplug occurs when a display is connected to or disconnected
- * from the system. However, there may be adapters and docking stations and
- * Display Port short pulses and MST devices involved, complicating matters.
- *
- * Hotplug in i915 is handled in many different levels of abstraction.
- *
- * The platform dependent interrupt handling code in i915_irq.c enables,
- * disables, and does preliminary handling of the interrupts. The interrupt
- * handlers gather the hotplug detect (HPD) information from relevant registers
- * into a platform independent mask of hotplug pins that have fired.
- *
- * The platform independent interrupt handler intel_hpd_irq_handler() in
- * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
- * further processing to appropriate bottom halves (Display Port specific and
- * regular hotplug).
- *
- * The Display Port work function i915_digport_work_func() calls into
- * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
- * pulses, with failures and non-MST long pulses triggering regular hotplug
- * processing on the connector.
- *
- * The regular hotplug work function i915_hotplug_work_func() calls connector
- * detect hooks, and, if connector status changes, triggers sending of hotplug
- * uevent to userspace via drm_kms_helper_hotplug_event().
- *
- * Finally, the userspace is responsible for triggering a modeset upon receiving
- * the hotplug uevent, disabling or enabling the crtc as needed.
- *
- * The hotplug interrupt storm detection and mitigation code keeps track of the
- * number of interrupts per hotplug pin per a period of time, and if the number
- * of interrupts exceeds a certain threshold, the interrupt is disabled for a
- * while before being re-enabled. The intention is to mitigate issues raising
- * from broken hardware triggering massive amounts of interrupts and grinding
- * the system to a halt.
- *
- * Current implementation expects that hotplug interrupt storm will not be
- * seen when display port sink is connected, hence on platforms whose DP
- * callback is handled by i915_digport_work_func reenabling of hpd is not
- * performed (it was never expected to be disabled in the first place ;) )
- * this is specific to DP sinks handled by this routine and any other display
- * such as HDMI or DVI enabled on the same port will have proper logic since
- * it will use i915_hotplug_work_func where this logic is handled.
- */
-
-/**
- * intel_hpd_pin_default - return default pin associated with certain port.
- * @dev_priv: private driver data pointer
- * @port: the hpd port to get associated pin
- *
- * It is only valid and used by digital port encoder.
- *
- * Return pin that is associatade with @port and HDP_NONE if no pin is
- * hard associated with that @port.
- */
-enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
-                                  enum port port)
-{
-       switch (port) {
-       case PORT_A:
-               return HPD_PORT_A;
-       case PORT_B:
-               return HPD_PORT_B;
-       case PORT_C:
-               return HPD_PORT_C;
-       case PORT_D:
-               return HPD_PORT_D;
-       case PORT_E:
-               return HPD_PORT_E;
-       case PORT_F:
-               if (IS_CNL_WITH_PORT_F(dev_priv))
-                       return HPD_PORT_E;
-               return HPD_PORT_F;
-       default:
-               MISSING_CASE(port);
-               return HPD_NONE;
-       }
-}
-
-#define HPD_STORM_DETECT_PERIOD                1000
-#define HPD_STORM_REENABLE_DELAY       (2 * 60 * 1000)
-
-/**
- * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
- * @dev_priv: private driver data pointer
- * @pin: the pin to gather stats on
- * @long_hpd: whether the HPD IRQ was long or short
- *
- * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
- * storms. Only the pin specific stats and state are changed, the caller is
- * responsible for further action.
- *
- * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
- * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
- * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
- * short IRQs count as +1. If this threshold is exceeded, it's considered an
- * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
- *
- * By default, most systems will only count long IRQs towards
- * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
- * suffer from short IRQ storms and must also track these. Because short IRQ
- * storms are naturally caused by sideband interactions with DP MST devices,
- * short IRQ detection is only enabled for systems without DP MST support.
- * Systems which are new enough to support DP MST are far less likely to
- * suffer from IRQ storms at all, so this is fine.
- *
- * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
- * and should only be adjusted for automated hotplug testing.
- *
- * Return true if an IRQ storm was detected on @pin.
- */
-static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
-                                      enum hpd_pin pin, bool long_hpd)
-{
-       struct i915_hotplug *hpd = &dev_priv->hotplug;
-       unsigned long start = hpd->stats[pin].last_jiffies;
-       unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
-       const int increment = long_hpd ? 10 : 1;
-       const int threshold = hpd->hpd_storm_threshold;
-       bool storm = false;
-
-       if (!threshold ||
-           (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
-               return false;
-
-       if (!time_in_range(jiffies, start, end)) {
-               hpd->stats[pin].last_jiffies = jiffies;
-               hpd->stats[pin].count = 0;
-       }
-
-       hpd->stats[pin].count += increment;
-       if (hpd->stats[pin].count > threshold) {
-               hpd->stats[pin].state = HPD_MARK_DISABLED;
-               DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
-               storm = true;
-       } else {
-               DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
-                             hpd->stats[pin].count);
-       }
-
-       return storm;
-}
-
-static void
-intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = &dev_priv->drm;
-       struct intel_connector *intel_connector;
-       struct intel_encoder *intel_encoder;
-       struct drm_connector *connector;
-       struct drm_connector_list_iter conn_iter;
-       enum hpd_pin pin;
-       bool hpd_disabled = false;
-
-       lockdep_assert_held(&dev_priv->irq_lock);
-
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       drm_for_each_connector_iter(connector, &conn_iter) {
-               if (connector->polled != DRM_CONNECTOR_POLL_HPD)
-                       continue;
-
-               intel_connector = to_intel_connector(connector);
-               intel_encoder = intel_connector->encoder;
-               if (!intel_encoder)
-                       continue;
-
-               pin = intel_encoder->hpd_pin;
-               if (pin == HPD_NONE ||
-                   dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
-                       continue;
-
-               DRM_INFO("HPD interrupt storm detected on connector %s: "
-                        "switching from hotplug detection to polling\n",
-                        connector->name);
-
-               dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
-               connector->polled = DRM_CONNECTOR_POLL_CONNECT
-                       | DRM_CONNECTOR_POLL_DISCONNECT;
-               hpd_disabled = true;
-       }
-       drm_connector_list_iter_end(&conn_iter);
-
-       /* Enable polling and queue hotplug re-enabling. */
-       if (hpd_disabled) {
-               drm_kms_helper_poll_enable(dev);
-               mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
-                                msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
-       }
-}
-
-static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv),
-                            hotplug.reenable_work.work);
-       struct drm_device *dev = &dev_priv->drm;
-       intel_wakeref_t wakeref;
-       enum hpd_pin pin;
-
-       wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
-       spin_lock_irq(&dev_priv->irq_lock);
-       for_each_hpd_pin(pin) {
-               struct drm_connector *connector;
-               struct drm_connector_list_iter conn_iter;
-
-               if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
-                       continue;
-
-               dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
-
-               drm_connector_list_iter_begin(dev, &conn_iter);
-               drm_for_each_connector_iter(connector, &conn_iter) {
-                       struct intel_connector *intel_connector = to_intel_connector(connector);
-
-                       /* Don't check MST ports, they don't have pins */
-                       if (!intel_connector->mst_port &&
-                           intel_connector->encoder->hpd_pin == pin) {
-                               if (connector->polled != intel_connector->polled)
-                                       DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
-                                                        connector->name);
-                               connector->polled = intel_connector->polled;
-                               if (!connector->polled)
-                                       connector->polled = DRM_CONNECTOR_POLL_HPD;
-                       }
-               }
-               drm_connector_list_iter_end(&conn_iter);
-       }
-       if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
-               dev_priv->display.hpd_irq_setup(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-}
-
-bool intel_encoder_hotplug(struct intel_encoder *encoder,
-                          struct intel_connector *connector)
-{
-       struct drm_device *dev = connector->base.dev;
-       enum drm_connector_status old_status;
-
-       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-       old_status = connector->base.status;
-
-       connector->base.status =
-               drm_helper_probe_detect(&connector->base, NULL, false);
-
-       if (old_status == connector->base.status)
-               return false;
-
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
-                     connector->base.base.id,
-                     connector->base.name,
-                     drm_get_connector_status_name(old_status),
-                     drm_get_connector_status_name(connector->base.status));
-
-       return true;
-}
-
-static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
-{
-       return intel_encoder_is_dig_port(encoder) &&
-               enc_to_dig_port(&encoder->base)->hpd_pulse != NULL;
-}
-
-static void i915_digport_work_func(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private, hotplug.dig_port_work);
-       u32 long_port_mask, short_port_mask;
-       struct intel_encoder *encoder;
-       u32 old_bits = 0;
-
-       spin_lock_irq(&dev_priv->irq_lock);
-       long_port_mask = dev_priv->hotplug.long_port_mask;
-       dev_priv->hotplug.long_port_mask = 0;
-       short_port_mask = dev_priv->hotplug.short_port_mask;
-       dev_priv->hotplug.short_port_mask = 0;
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               struct intel_digital_port *dig_port;
-               enum port port = encoder->port;
-               bool long_hpd, short_hpd;
-               enum irqreturn ret;
-
-               if (!intel_encoder_has_hpd_pulse(encoder))
-                       continue;
-
-               long_hpd = long_port_mask & BIT(port);
-               short_hpd = short_port_mask & BIT(port);
-
-               if (!long_hpd && !short_hpd)
-                       continue;
-
-               dig_port = enc_to_dig_port(&encoder->base);
-
-               ret = dig_port->hpd_pulse(dig_port, long_hpd);
-               if (ret == IRQ_NONE) {
-                       /* fall back to old school hpd */
-                       old_bits |= BIT(encoder->hpd_pin);
-               }
-       }
-
-       if (old_bits) {
-               spin_lock_irq(&dev_priv->irq_lock);
-               dev_priv->hotplug.event_bits |= old_bits;
-               spin_unlock_irq(&dev_priv->irq_lock);
-               schedule_work(&dev_priv->hotplug.hotplug_work);
-       }
-}
-
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
-static void i915_hotplug_work_func(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private, hotplug.hotplug_work);
-       struct drm_device *dev = &dev_priv->drm;
-       struct intel_connector *intel_connector;
-       struct intel_encoder *intel_encoder;
-       struct drm_connector *connector;
-       struct drm_connector_list_iter conn_iter;
-       bool changed = false;
-       u32 hpd_event_bits;
-
-       mutex_lock(&dev->mode_config.mutex);
-       DRM_DEBUG_KMS("running encoder hotplug functions\n");
-
-       spin_lock_irq(&dev_priv->irq_lock);
-
-       hpd_event_bits = dev_priv->hotplug.event_bits;
-       dev_priv->hotplug.event_bits = 0;
-
-       /* Enable polling for connectors which had HPD IRQ storms */
-       intel_hpd_irq_storm_switch_to_polling(dev_priv);
-
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       drm_for_each_connector_iter(connector, &conn_iter) {
-               intel_connector = to_intel_connector(connector);
-               if (!intel_connector->encoder)
-                       continue;
-               intel_encoder = intel_connector->encoder;
-               if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
-                       DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
-                                     connector->name, intel_encoder->hpd_pin);
-
-                       changed |= intel_encoder->hotplug(intel_encoder,
-                                                         intel_connector);
-               }
-       }
-       drm_connector_list_iter_end(&conn_iter);
-       mutex_unlock(&dev->mode_config.mutex);
-
-       if (changed)
-               drm_kms_helper_hotplug_event(dev);
-}
-
-
-/**
- * intel_hpd_irq_handler - main hotplug irq handler
- * @dev_priv: drm_i915_private
- * @pin_mask: a mask of hpd pins that have triggered the irq
- * @long_mask: a mask of hpd pins that may be long hpd pulses
- *
- * This is the main hotplug irq handler for all platforms. The platform specific
- * irq handlers call the platform specific hotplug irq handlers, which read and
- * decode the appropriate registers into bitmasks about hpd pins that have
- * triggered (@pin_mask), and which of those pins may be long pulses
- * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
- * is not a digital port.
- *
- * Here, we do hotplug irq storm detection and mitigation, and pass further
- * processing to appropriate bottom halves.
- */
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
-                          u32 pin_mask, u32 long_mask)
-{
-       struct intel_encoder *encoder;
-       bool storm_detected = false;
-       bool queue_dig = false, queue_hp = false;
-       u32 long_hpd_pulse_mask = 0;
-       u32 short_hpd_pulse_mask = 0;
-       enum hpd_pin pin;
-
-       if (!pin_mask)
-               return;
-
-       spin_lock(&dev_priv->irq_lock);
-
-       /*
-        * Determine whether ->hpd_pulse() exists for each pin, and
-        * whether we have a short or a long pulse. This is needed
-        * as each pin may have up to two encoders (HDMI and DP) and
-        * only the one of them (DP) will have ->hpd_pulse().
-        */
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
-               enum port port = encoder->port;
-               bool long_hpd;
-
-               pin = encoder->hpd_pin;
-               if (!(BIT(pin) & pin_mask))
-                       continue;
-
-               if (!has_hpd_pulse)
-                       continue;
-
-               long_hpd = long_mask & BIT(pin);
-
-               DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
-                                long_hpd ? "long" : "short");
-               queue_dig = true;
-
-               if (long_hpd) {
-                       long_hpd_pulse_mask |= BIT(pin);
-                       dev_priv->hotplug.long_port_mask |= BIT(port);
-               } else {
-                       short_hpd_pulse_mask |= BIT(pin);
-                       dev_priv->hotplug.short_port_mask |= BIT(port);
-               }
-       }
-
-       /* Now process each pin just once */
-       for_each_hpd_pin(pin) {
-               bool long_hpd;
-
-               if (!(BIT(pin) & pin_mask))
-                       continue;
-
-               if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
-                       /*
-                        * On GMCH platforms the interrupt mask bits only
-                        * prevent irq generation, not the setting of the
-                        * hotplug bits itself. So only WARN about unexpected
-                        * interrupts on saner platforms.
-                        */
-                       WARN_ONCE(!HAS_GMCH(dev_priv),
-                                 "Received HPD interrupt on pin %d although disabled\n", pin);
-                       continue;
-               }
-
-               if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
-                       continue;
-
-               /*
-                * Delegate to ->hpd_pulse() if one of the encoders for this
-                * pin has it, otherwise let the hotplug_work deal with this
-                * pin directly.
-                */
-               if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
-                       long_hpd = long_hpd_pulse_mask & BIT(pin);
-               } else {
-                       dev_priv->hotplug.event_bits |= BIT(pin);
-                       long_hpd = true;
-                       queue_hp = true;
-               }
-
-               if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
-                       dev_priv->hotplug.event_bits &= ~BIT(pin);
-                       storm_detected = true;
-                       queue_hp = true;
-               }
-       }
-
-       /*
-        * Disable any IRQs that storms were detected on. Polling enablement
-        * happens later in our hotplug work.
-        */
-       if (storm_detected && dev_priv->display_irqs_enabled)
-               dev_priv->display.hpd_irq_setup(dev_priv);
-       spin_unlock(&dev_priv->irq_lock);
-
-       /*
-        * Our hotplug handler can grab modeset locks (by calling down into the
-        * fb helpers). Hence it must not be run on our own dev-priv->wq work
-        * queue for otherwise the flush_work in the pageflip code will
-        * deadlock.
-        */
-       if (queue_dig)
-               queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
-       if (queue_hp)
-               schedule_work(&dev_priv->hotplug.hotplug_work);
-}
-
-/**
- * intel_hpd_init - initializes and enables hpd support
- * @dev_priv: i915 device instance
- *
- * This function enables the hotplug support. It requires that interrupts have
- * already been enabled with intel_irq_init_hw(). From this point on hotplug and
- * poll request can run concurrently to other code, so locking rules must be
- * obeyed.
- *
- * This is a separate step from interrupt enabling to simplify the locking rules
- * in the driver load and resume code.
- *
- * Also see: intel_hpd_poll_init(), which enables connector polling
- */
-void intel_hpd_init(struct drm_i915_private *dev_priv)
-{
-       int i;
-
-       for_each_hpd_pin(i) {
-               dev_priv->hotplug.stats[i].count = 0;
-               dev_priv->hotplug.stats[i].state = HPD_ENABLED;
-       }
-
-       WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
-       schedule_work(&dev_priv->hotplug.poll_init_work);
-
-       /*
-        * Interrupt setup is already guaranteed to be single-threaded, this is
-        * just to make the assert_spin_locked checks happy.
-        */
-       if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
-               spin_lock_irq(&dev_priv->irq_lock);
-               if (dev_priv->display_irqs_enabled)
-                       dev_priv->display.hpd_irq_setup(dev_priv);
-               spin_unlock_irq(&dev_priv->irq_lock);
-       }
-}
-
-static void i915_hpd_poll_init_work(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private,
-                            hotplug.poll_init_work);
-       struct drm_device *dev = &dev_priv->drm;
-       struct drm_connector *connector;
-       struct drm_connector_list_iter conn_iter;
-       bool enabled;
-
-       mutex_lock(&dev->mode_config.mutex);
-
-       enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
-
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       drm_for_each_connector_iter(connector, &conn_iter) {
-               struct intel_connector *intel_connector =
-                       to_intel_connector(connector);
-               connector->polled = intel_connector->polled;
-
-               /* MST has a dynamic intel_connector->encoder and it's reprobing
-                * is all handled by the MST helpers. */
-               if (intel_connector->mst_port)
-                       continue;
-
-               if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
-                   intel_connector->encoder->hpd_pin > HPD_NONE) {
-                       connector->polled = enabled ?
-                               DRM_CONNECTOR_POLL_CONNECT |
-                               DRM_CONNECTOR_POLL_DISCONNECT :
-                               DRM_CONNECTOR_POLL_HPD;
-               }
-       }
-       drm_connector_list_iter_end(&conn_iter);
-
-       if (enabled)
-               drm_kms_helper_poll_enable(dev);
-
-       mutex_unlock(&dev->mode_config.mutex);
-
-       /*
-        * We might have missed any hotplugs that happened while we were
-        * in the middle of disabling polling
-        */
-       if (!enabled)
-               drm_helper_hpd_irq_event(dev);
-}
-
-/**
- * intel_hpd_poll_init - enables/disables polling for connectors with hpd
- * @dev_priv: i915 device instance
- *
- * This function enables polling for all connectors, regardless of whether or
- * not they support hotplug detection. Under certain conditions HPD may not be
- * functional. On most Intel GPUs, this happens when we enter runtime suspend.
- * On Valleyview and Cherryview systems, this also happens when we shut off all
- * of the powerwells.
- *
- * Since this function can get called in contexts where we're already holding
- * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
- * worker.
- *
- * Also see: intel_hpd_init(), which restores hpd handling.
- */
-void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
-{
-       WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
-
-       /*
-        * We might already be holding dev->mode_config.mutex, so do this in a
-        * seperate worker
-        * As well, there's no issue if we race here since we always reschedule
-        * this worker anyway
-        */
-       schedule_work(&dev_priv->hotplug.poll_init_work);
-}
-
-void intel_hpd_init_work(struct drm_i915_private *dev_priv)
-{
-       INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
-       INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
-       INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
-       INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
-                         intel_hpd_irq_storm_reenable_work);
-}
-
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
-{
-       spin_lock_irq(&dev_priv->irq_lock);
-
-       dev_priv->hotplug.long_port_mask = 0;
-       dev_priv->hotplug.short_port_mask = 0;
-       dev_priv->hotplug.event_bits = 0;
-
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       cancel_work_sync(&dev_priv->hotplug.dig_port_work);
-       cancel_work_sync(&dev_priv->hotplug.hotplug_work);
-       cancel_work_sync(&dev_priv->hotplug.poll_init_work);
-       cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
-}
-
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
-{
-       bool ret = false;
-
-       if (pin == HPD_NONE)
-               return false;
-
-       spin_lock_irq(&dev_priv->irq_lock);
-       if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
-               dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
-               ret = true;
-       }
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       return ret;
-}
-
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
-{
-       if (pin == HPD_NONE)
-               return;
-
-       spin_lock_irq(&dev_priv->irq_lock);
-       dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
-       spin_unlock_irq(&dev_priv->irq_lock);
-}
diff --git a/drivers/gpu/drm/i915/intel_hotplug.h b/drivers/gpu/drm/i915/intel_hotplug.h
deleted file mode 100644 (file)
index 805f897..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_HOTPLUG_H__
-#define __INTEL_HOTPLUG_H__
-
-#include <linux/types.h>
-
-#include <drm/i915_drm.h>
-
-struct drm_i915_private;
-struct intel_connector;
-struct intel_encoder;
-
-void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
-bool intel_encoder_hotplug(struct intel_encoder *encoder,
-                          struct intel_connector *connector);
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
-                          u32 pin_mask, u32 long_mask);
-void intel_hpd_init(struct drm_i915_private *dev_priv);
-void intel_hpd_init_work(struct drm_i915_private *dev_priv);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
-enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
-                                  enum port port);
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-
-#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
deleted file mode 100644 (file)
index b19800b..0000000
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
- *    Jerome Anand <jerome.anand@intel.com>
- *    based on VED patches
- *
- */
-
-/**
- * DOC: LPE Audio integration for HDMI or DP playback
- *
- * Motivation:
- * Atom platforms (e.g. valleyview and cherryTrail) integrates a DMA-based
- * interface as an alternative to the traditional HDaudio path. While this
- * mode is unrelated to the LPE aka SST audio engine, the documentation refers
- * to this mode as LPE so we keep this notation for the sake of consistency.
- *
- * The interface is handled by a separate standalone driver maintained in the
- * ALSA subsystem for simplicity. To minimize the interaction between the two
- * subsystems, a bridge is setup between the hdmi-lpe-audio and i915:
- * 1. Create a platform device to share MMIO/IRQ resources
- * 2. Make the platform device child of i915 device for runtime PM.
- * 3. Create IRQ chip to forward the LPE audio irqs.
- * the hdmi-lpe-audio driver probes the lpe audio device and creates a new
- * sound card
- *
- * Threats:
- * Due to the restriction in Linux platform device model, user need manually
- * uninstall the hdmi-lpe-audio driver before uninstalling i915 module,
- * otherwise we might run into use-after-free issues after i915 removes the
- * platform device: even though hdmi-lpe-audio driver is released, the modules
- * is still in "installed" status.
- *
- * Implementation:
- * The MMIO/REG platform resources are created according to the registers
- * specification.
- * When forwarding LPE audio irqs, the flow control handler selection depends
- * on the platform, for example on valleyview handle_simple_irq is enough.
- *
- */
-
-#include <linux/acpi.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/irq.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-
-#include <drm/intel_lpe_audio.h>
-
-#include "i915_drv.h"
-#include "intel_lpe_audio.h"
-
-#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL)
-
-static struct platform_device *
-lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = &dev_priv->drm;
-       struct platform_device_info pinfo = {};
-       struct resource *rsc;
-       struct platform_device *platdev;
-       struct intel_hdmi_lpe_audio_pdata *pdata;
-
-       pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
-       if (!pdata)
-               return ERR_PTR(-ENOMEM);
-
-       rsc = kcalloc(2, sizeof(*rsc), GFP_KERNEL);
-       if (!rsc) {
-               kfree(pdata);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       rsc[0].start    = rsc[0].end = dev_priv->lpe_audio.irq;
-       rsc[0].flags    = IORESOURCE_IRQ;
-       rsc[0].name     = "hdmi-lpe-audio-irq";
-
-       rsc[1].start    = pci_resource_start(dev->pdev, 0) +
-               I915_HDMI_LPE_AUDIO_BASE;
-       rsc[1].end      = pci_resource_start(dev->pdev, 0) +
-               I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1;
-       rsc[1].flags    = IORESOURCE_MEM;
-       rsc[1].name     = "hdmi-lpe-audio-mmio";
-
-       pinfo.parent = dev->dev;
-       pinfo.name = "hdmi-lpe-audio";
-       pinfo.id = -1;
-       pinfo.res = rsc;
-       pinfo.num_res = 2;
-       pinfo.data = pdata;
-       pinfo.size_data = sizeof(*pdata);
-       pinfo.dma_mask = DMA_BIT_MASK(32);
-
-       pdata->num_pipes = INTEL_INFO(dev_priv)->num_pipes;
-       pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */
-       pdata->port[0].pipe = -1;
-       pdata->port[1].pipe = -1;
-       pdata->port[2].pipe = -1;
-       spin_lock_init(&pdata->lpe_audio_slock);
-
-       platdev = platform_device_register_full(&pinfo);
-       kfree(rsc);
-       kfree(pdata);
-
-       if (IS_ERR(platdev)) {
-               DRM_ERROR("Failed to allocate LPE audio platform device\n");
-               return platdev;
-       }
-
-       pm_runtime_no_callbacks(&platdev->dev);
-
-       return platdev;
-}
-
-static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
-{
-       /* XXX Note that platform_device_register_full() allocates a dma_mask
-        * and never frees it. We can't free it here as we cannot guarantee
-        * this is the last reference (i.e. that the dma_mask will not be
-        * used after our unregister). So ee choose to leak the sizeof(u64)
-        * allocation here - it should be fixed in the platform_device rather
-        * than us fiddle with its internals.
-        */
-
-       platform_device_unregister(dev_priv->lpe_audio.platdev);
-}
-
-static void lpe_audio_irq_unmask(struct irq_data *d)
-{
-}
-
-static void lpe_audio_irq_mask(struct irq_data *d)
-{
-}
-
-static struct irq_chip lpe_audio_irqchip = {
-       .name = "hdmi_lpe_audio_irqchip",
-       .irq_mask = lpe_audio_irq_mask,
-       .irq_unmask = lpe_audio_irq_unmask,
-};
-
-static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
-{
-       int irq = dev_priv->lpe_audio.irq;
-
-       WARN_ON(!intel_irqs_enabled(dev_priv));
-       irq_set_chip_and_handler_name(irq,
-                               &lpe_audio_irqchip,
-                               handle_simple_irq,
-                               "hdmi_lpe_audio_irq_handler");
-
-       return irq_set_chip_data(irq, dev_priv);
-}
-
-static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
-{
-       int lpe_present = false;
-
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               static const struct pci_device_id atom_hdaudio_ids[] = {
-                       /* Baytrail */
-                       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f04)},
-                       /* Braswell */
-                       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2284)},
-                       {}
-               };
-
-               if (!pci_dev_present(atom_hdaudio_ids)) {
-                       DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
-                       lpe_present = true;
-               }
-       }
-       return lpe_present;
-}
-
-static int lpe_audio_setup(struct drm_i915_private *dev_priv)
-{
-       int ret;
-
-       dev_priv->lpe_audio.irq = irq_alloc_desc(0);
-       if (dev_priv->lpe_audio.irq < 0) {
-               DRM_ERROR("Failed to allocate IRQ desc: %d\n",
-                       dev_priv->lpe_audio.irq);
-               ret = dev_priv->lpe_audio.irq;
-               goto err;
-       }
-
-       DRM_DEBUG("irq = %d\n", dev_priv->lpe_audio.irq);
-
-       ret = lpe_audio_irq_init(dev_priv);
-
-       if (ret) {
-               DRM_ERROR("Failed to initialize irqchip for lpe audio: %d\n",
-                       ret);
-               goto err_free_irq;
-       }
-
-       dev_priv->lpe_audio.platdev = lpe_audio_platdev_create(dev_priv);
-
-       if (IS_ERR(dev_priv->lpe_audio.platdev)) {
-               ret = PTR_ERR(dev_priv->lpe_audio.platdev);
-               DRM_ERROR("Failed to create lpe audio platform device: %d\n",
-                       ret);
-               goto err_free_irq;
-       }
-
-       /* enable chicken bit; at least this is required for Dell Wyse 3040
-        * with DP outputs (but only sometimes by some reason!)
-        */
-       I915_WRITE(VLV_AUD_CHICKEN_BIT_REG, VLV_CHICKEN_BIT_DBG_ENABLE);
-
-       return 0;
-err_free_irq:
-       irq_free_desc(dev_priv->lpe_audio.irq);
-err:
-       dev_priv->lpe_audio.irq = -1;
-       dev_priv->lpe_audio.platdev = NULL;
-       return ret;
-}
-
-/**
- * intel_lpe_audio_irq_handler() - forwards the LPE audio irq
- * @dev_priv: the i915 drm device private data
- *
- * the LPE Audio irq is forwarded to the irq handler registered by LPE audio
- * driver.
- */
-void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
-{
-       int ret;
-
-       if (!HAS_LPE_AUDIO(dev_priv))
-               return;
-
-       ret = generic_handle_irq(dev_priv->lpe_audio.irq);
-       if (ret)
-               DRM_ERROR_RATELIMITED("error handling LPE audio irq: %d\n",
-                               ret);
-}
-
-/**
- * intel_lpe_audio_init() - detect and setup the bridge between HDMI LPE Audio
- * driver and i915
- * @dev_priv: the i915 drm device private data
- *
- * Return: 0 if successful. non-zero if detection or
- * llocation/initialization fails
- */
-int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
-{
-       int ret = -ENODEV;
-
-       if (lpe_audio_detect(dev_priv)) {
-               ret = lpe_audio_setup(dev_priv);
-               if (ret < 0)
-                       DRM_ERROR("failed to setup LPE Audio bridge\n");
-       }
-       return ret;
-}
-
-/**
- * intel_lpe_audio_teardown() - destroy the bridge between HDMI LPE
- * audio driver and i915
- * @dev_priv: the i915 drm device private data
- *
- * release all the resources for LPE audio <-> i915 bridge.
- */
-void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
-{
-       struct irq_desc *desc;
-
-       if (!HAS_LPE_AUDIO(dev_priv))
-               return;
-
-       desc = irq_to_desc(dev_priv->lpe_audio.irq);
-
-       lpe_audio_platdev_destroy(dev_priv);
-
-       irq_free_desc(dev_priv->lpe_audio.irq);
-
-       dev_priv->lpe_audio.irq = -1;
-       dev_priv->lpe_audio.platdev = NULL;
-}
-
-/**
- * intel_lpe_audio_notify() - notify lpe audio event
- * audio driver and i915
- * @dev_priv: the i915 drm device private data
- * @pipe: pipe
- * @port: port
- * @eld : ELD data
- * @ls_clock: Link symbol clock in kHz
- * @dp_output: Driving a DP output?
- *
- * Notify lpe audio driver of eld change.
- */
-void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
-                           enum pipe pipe, enum port port,
-                           const void *eld, int ls_clock, bool dp_output)
-{
-       unsigned long irqflags;
-       struct intel_hdmi_lpe_audio_pdata *pdata;
-       struct intel_hdmi_lpe_audio_port_pdata *ppdata;
-       u32 audio_enable;
-
-       if (!HAS_LPE_AUDIO(dev_priv))
-               return;
-
-       pdata = dev_get_platdata(&dev_priv->lpe_audio.platdev->dev);
-       ppdata = &pdata->port[port - PORT_B];
-
-       spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
-
-       audio_enable = I915_READ(VLV_AUD_PORT_EN_DBG(port));
-
-       if (eld != NULL) {
-               memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
-               ppdata->pipe = pipe;
-               ppdata->ls_clock = ls_clock;
-               ppdata->dp_output = dp_output;
-
-               /* Unmute the amp for both DP and HDMI */
-               I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
-                          audio_enable & ~VLV_AMP_MUTE);
-       } else {
-               memset(ppdata->eld, 0, HDMI_MAX_ELD_BYTES);
-               ppdata->pipe = -1;
-               ppdata->ls_clock = 0;
-               ppdata->dp_output = false;
-
-               /* Mute the amp for both DP and HDMI */
-               I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
-                          audio_enable | VLV_AMP_MUTE);
-       }
-
-       if (pdata->notify_audio_lpe)
-               pdata->notify_audio_lpe(dev_priv->lpe_audio.platdev, port - PORT_B);
-
-       spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags);
-}
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.h b/drivers/gpu/drm/i915/intel_lpe_audio.h
deleted file mode 100644 (file)
index f848c50..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_LPE_AUDIO_H__
-#define __INTEL_LPE_AUDIO_H__
-
-#include <linux/types.h>
-
-enum pipe;
-enum port;
-struct drm_i915_private;
-
-int  intel_lpe_audio_init(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
-                           enum pipe pipe, enum port port,
-                           const void *eld, int ls_clock, bool dp_output);
-
-#endif /* __INTEL_LPE_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
deleted file mode 100644 (file)
index 8248812..0000000
+++ /dev/null
@@ -1,1176 +0,0 @@
-/*
- * Copyright 2008 Intel Corporation <hong.liu@intel.com>
- * Copyright 2008 Red Hat <mjg@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NON-INFRINGEMENT.  IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/acpi.h>
-#include <linux/dmi.h>
-#include <linux/firmware.h>
-#include <acpi/video.h>
-
-#include <drm/i915_drm.h>
-
-#include "display/intel_panel.h"
-
-#include "i915_drv.h"
-#include "intel_drv.h"
-#include "intel_opregion.h"
-
-#define OPREGION_HEADER_OFFSET 0
-#define OPREGION_ACPI_OFFSET   0x100
-#define   ACPI_CLID 0x01ac /* current lid state indicator */
-#define   ACPI_CDCK 0x01b0 /* current docking state indicator */
-#define OPREGION_SWSCI_OFFSET  0x200
-#define OPREGION_ASLE_OFFSET   0x300
-#define OPREGION_VBT_OFFSET    0x400
-#define OPREGION_ASLE_EXT_OFFSET       0x1C00
-
-#define OPREGION_SIGNATURE "IntelGraphicsMem"
-#define MBOX_ACPI      (1<<0)
-#define MBOX_SWSCI     (1<<1)
-#define MBOX_ASLE      (1<<2)
-#define MBOX_ASLE_EXT  (1<<4)
-
-struct opregion_header {
-       u8 signature[16];
-       u32 size;
-       struct {
-               u8 rsvd;
-               u8 revision;
-               u8 minor;
-               u8 major;
-       }  __packed over;
-       u8 bios_ver[32];
-       u8 vbios_ver[16];
-       u8 driver_ver[16];
-       u32 mboxes;
-       u32 driver_model;
-       u32 pcon;
-       u8 dver[32];
-       u8 rsvd[124];
-} __packed;
-
-/* OpRegion mailbox #1: public ACPI methods */
-struct opregion_acpi {
-       u32 drdy;       /* driver readiness */
-       u32 csts;       /* notification status */
-       u32 cevt;       /* current event */
-       u8 rsvd1[20];
-       u32 didl[8];    /* supported display devices ID list */
-       u32 cpdl[8];    /* currently presented display list */
-       u32 cadl[8];    /* currently active display list */
-       u32 nadl[8];    /* next active devices list */
-       u32 aslp;       /* ASL sleep time-out */
-       u32 tidx;       /* toggle table index */
-       u32 chpd;       /* current hotplug enable indicator */
-       u32 clid;       /* current lid state*/
-       u32 cdck;       /* current docking state */
-       u32 sxsw;       /* Sx state resume */
-       u32 evts;       /* ASL supported events */
-       u32 cnot;       /* current OS notification */
-       u32 nrdy;       /* driver status */
-       u32 did2[7];    /* extended supported display devices ID list */
-       u32 cpd2[7];    /* extended attached display devices list */
-       u8 rsvd2[4];
-} __packed;
-
-/* OpRegion mailbox #2: SWSCI */
-struct opregion_swsci {
-       u32 scic;       /* SWSCI command|status|data */
-       u32 parm;       /* command parameters */
-       u32 dslp;       /* driver sleep time-out */
-       u8 rsvd[244];
-} __packed;
-
-/* OpRegion mailbox #3: ASLE */
-struct opregion_asle {
-       u32 ardy;       /* driver readiness */
-       u32 aslc;       /* ASLE interrupt command */
-       u32 tche;       /* technology enabled indicator */
-       u32 alsi;       /* current ALS illuminance reading */
-       u32 bclp;       /* backlight brightness to set */
-       u32 pfit;       /* panel fitting state */
-       u32 cblv;       /* current brightness level */
-       u16 bclm[20];   /* backlight level duty cycle mapping table */
-       u32 cpfm;       /* current panel fitting mode */
-       u32 epfm;       /* enabled panel fitting modes */
-       u8 plut[74];    /* panel LUT and identifier */
-       u32 pfmb;       /* PWM freq and min brightness */
-       u32 cddv;       /* color correction default values */
-       u32 pcft;       /* power conservation features */
-       u32 srot;       /* supported rotation angles */
-       u32 iuer;       /* IUER events */
-       u64 fdss;
-       u32 fdsp;
-       u32 stat;
-       u64 rvda;       /* Physical (2.0) or relative from opregion (2.1+)
-                        * address of raw VBT data. */
-       u32 rvds;       /* Size of raw vbt data */
-       u8 rsvd[58];
-} __packed;
-
-/* OpRegion mailbox #5: ASLE ext */
-struct opregion_asle_ext {
-       u32 phed;       /* Panel Header */
-       u8 bddc[256];   /* Panel EDID */
-       u8 rsvd[764];
-} __packed;
-
-/* Driver readiness indicator */
-#define ASLE_ARDY_READY                (1 << 0)
-#define ASLE_ARDY_NOT_READY    (0 << 0)
-
-/* ASLE Interrupt Command (ASLC) bits */
-#define ASLC_SET_ALS_ILLUM             (1 << 0)
-#define ASLC_SET_BACKLIGHT             (1 << 1)
-#define ASLC_SET_PFIT                  (1 << 2)
-#define ASLC_SET_PWM_FREQ              (1 << 3)
-#define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4)
-#define ASLC_BUTTON_ARRAY              (1 << 5)
-#define ASLC_CONVERTIBLE_INDICATOR     (1 << 6)
-#define ASLC_DOCKING_INDICATOR         (1 << 7)
-#define ASLC_ISCT_STATE_CHANGE         (1 << 8)
-#define ASLC_REQ_MSK                   0x1ff
-/* response bits */
-#define ASLC_ALS_ILLUM_FAILED          (1 << 10)
-#define ASLC_BACKLIGHT_FAILED          (1 << 12)
-#define ASLC_PFIT_FAILED               (1 << 14)
-#define ASLC_PWM_FREQ_FAILED           (1 << 16)
-#define ASLC_ROTATION_ANGLES_FAILED    (1 << 18)
-#define ASLC_BUTTON_ARRAY_FAILED       (1 << 20)
-#define ASLC_CONVERTIBLE_FAILED                (1 << 22)
-#define ASLC_DOCKING_FAILED            (1 << 24)
-#define ASLC_ISCT_STATE_FAILED         (1 << 26)
-
-/* Technology enabled indicator */
-#define ASLE_TCHE_ALS_EN       (1 << 0)
-#define ASLE_TCHE_BLC_EN       (1 << 1)
-#define ASLE_TCHE_PFIT_EN      (1 << 2)
-#define ASLE_TCHE_PFMB_EN      (1 << 3)
-
-/* ASLE backlight brightness to set */
-#define ASLE_BCLP_VALID                (1<<31)
-#define ASLE_BCLP_MSK          (~(1<<31))
-
-/* ASLE panel fitting request */
-#define ASLE_PFIT_VALID         (1<<31)
-#define ASLE_PFIT_CENTER (1<<0)
-#define ASLE_PFIT_STRETCH_TEXT (1<<1)
-#define ASLE_PFIT_STRETCH_GFX (1<<2)
-
-/* PWM frequency and minimum brightness */
-#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
-#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
-#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
-#define ASLE_PFMB_PWM_VALID (1<<31)
-
-#define ASLE_CBLV_VALID         (1<<31)
-
-/* IUER */
-#define ASLE_IUER_DOCKING              (1 << 7)
-#define ASLE_IUER_CONVERTIBLE          (1 << 6)
-#define ASLE_IUER_ROTATION_LOCK_BTN    (1 << 4)
-#define ASLE_IUER_VOLUME_DOWN_BTN      (1 << 3)
-#define ASLE_IUER_VOLUME_UP_BTN                (1 << 2)
-#define ASLE_IUER_WINDOWS_BTN          (1 << 1)
-#define ASLE_IUER_POWER_BTN            (1 << 0)
-
-/* Software System Control Interrupt (SWSCI) */
-#define SWSCI_SCIC_INDICATOR           (1 << 0)
-#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1
-#define SWSCI_SCIC_MAIN_FUNCTION_MASK  (0xf << 1)
-#define SWSCI_SCIC_SUB_FUNCTION_SHIFT  8
-#define SWSCI_SCIC_SUB_FUNCTION_MASK   (0xff << 8)
-#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT        8
-#define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8)
-#define SWSCI_SCIC_EXIT_STATUS_SHIFT   5
-#define SWSCI_SCIC_EXIT_STATUS_MASK    (7 << 5)
-#define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1
-
-#define SWSCI_FUNCTION_CODE(main, sub) \
-       ((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
-        (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
-
-/* SWSCI: Get BIOS Data (GBDA) */
-#define SWSCI_GBDA                     4
-#define SWSCI_GBDA_SUPPORTED_CALLS     SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
-#define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
-#define SWSCI_GBDA_BOOT_DISPLAY_PREF   SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
-#define SWSCI_GBDA_PANEL_DETAILS       SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
-#define SWSCI_GBDA_TV_STANDARD         SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
-#define SWSCI_GBDA_INTERNAL_GRAPHICS   SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
-#define SWSCI_GBDA_SPREAD_SPECTRUM     SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
-
-/* SWSCI: System BIOS Callbacks (SBCB) */
-#define SWSCI_SBCB                     6
-#define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
-#define SWSCI_SBCB_INIT_COMPLETION     SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
-#define SWSCI_SBCB_PRE_HIRES_SET_MODE  SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
-#define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
-#define SWSCI_SBCB_DISPLAY_SWITCH      SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
-#define SWSCI_SBCB_SET_TV_FORMAT       SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
-#define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
-#define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
-#define SWSCI_SBCB_SET_BOOT_DISPLAY    SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
-#define SWSCI_SBCB_SET_PANEL_DETAILS   SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
-#define SWSCI_SBCB_SET_INTERNAL_GFX    SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
-#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS        SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
-#define SWSCI_SBCB_SUSPEND_RESUME      SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
-#define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
-#define SWSCI_SBCB_POST_VBE_PM         SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
-#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO        SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
-
-/*
- * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
- * Attached to the Display Adapter).
- */
-#define ACPI_DISPLAY_INDEX_SHIFT               0
-#define ACPI_DISPLAY_INDEX_MASK                        (0xf << 0)
-#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT     4
-#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK      (0xf << 4)
-#define ACPI_DISPLAY_TYPE_SHIFT                        8
-#define ACPI_DISPLAY_TYPE_MASK                 (0xf << 8)
-#define ACPI_DISPLAY_TYPE_OTHER                        (0 << 8)
-#define ACPI_DISPLAY_TYPE_VGA                  (1 << 8)
-#define ACPI_DISPLAY_TYPE_TV                   (2 << 8)
-#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL     (3 << 8)
-#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL     (4 << 8)
-#define ACPI_VENDOR_SPECIFIC_SHIFT             12
-#define ACPI_VENDOR_SPECIFIC_MASK              (0xf << 12)
-#define ACPI_BIOS_CAN_DETECT                   (1 << 16)
-#define ACPI_DEPENDS_ON_VGA                    (1 << 17)
-#define ACPI_PIPE_ID_SHIFT                     18
-#define ACPI_PIPE_ID_MASK                      (7 << 18)
-#define ACPI_DEVICE_ID_SCHEME                  (1 << 31)
-
-#define MAX_DSLP       1500
-
-static int swsci(struct drm_i915_private *dev_priv,
-                u32 function, u32 parm, u32 *parm_out)
-{
-       struct opregion_swsci *swsci = dev_priv->opregion.swsci;
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       u32 main_function, sub_function, scic;
-       u16 swsci_val;
-       u32 dslp;
-
-       if (!swsci)
-               return -ENODEV;
-
-       main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
-               SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
-       sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
-               SWSCI_SCIC_SUB_FUNCTION_SHIFT;
-
-       /* Check if we can call the function. See swsci_setup for details. */
-       if (main_function == SWSCI_SBCB) {
-               if ((dev_priv->opregion.swsci_sbcb_sub_functions &
-                    (1 << sub_function)) == 0)
-                       return -EINVAL;
-       } else if (main_function == SWSCI_GBDA) {
-               if ((dev_priv->opregion.swsci_gbda_sub_functions &
-                    (1 << sub_function)) == 0)
-                       return -EINVAL;
-       }
-
-       /* Driver sleep timeout in ms. */
-       dslp = swsci->dslp;
-       if (!dslp) {
-               /* The spec says 2ms should be the default, but it's too small
-                * for some machines. */
-               dslp = 50;
-       } else if (dslp > MAX_DSLP) {
-               /* Hey bios, trust must be earned. */
-               DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
-                             "using %u ms instead\n", dslp, MAX_DSLP);
-               dslp = MAX_DSLP;
-       }
-
-       /* The spec tells us to do this, but we are the only user... */
-       scic = swsci->scic;
-       if (scic & SWSCI_SCIC_INDICATOR) {
-               DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
-               return -EBUSY;
-       }
-
-       scic = function | SWSCI_SCIC_INDICATOR;
-
-       swsci->parm = parm;
-       swsci->scic = scic;
-
-       /* Ensure SCI event is selected and event trigger is cleared. */
-       pci_read_config_word(pdev, SWSCI, &swsci_val);
-       if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
-               swsci_val |= SWSCI_SCISEL;
-               swsci_val &= ~SWSCI_GSSCIE;
-               pci_write_config_word(pdev, SWSCI, swsci_val);
-       }
-
-       /* Use event trigger to tell bios to check the mail. */
-       swsci_val |= SWSCI_GSSCIE;
-       pci_write_config_word(pdev, SWSCI, swsci_val);
-
-       /* Poll for the result. */
-#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
-       if (wait_for(C, dslp)) {
-               DRM_DEBUG_DRIVER("SWSCI request timed out\n");
-               return -ETIMEDOUT;
-       }
-
-       scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
-               SWSCI_SCIC_EXIT_STATUS_SHIFT;
-
-       /* Note: scic == 0 is an error! */
-       if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
-               DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
-               return -EIO;
-       }
-
-       if (parm_out)
-               *parm_out = swsci->parm;
-
-       return 0;
-
-#undef C
-}
-
-#define DISPLAY_TYPE_CRT                       0
-#define DISPLAY_TYPE_TV                                1
-#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL       2
-#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL       3
-
-int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
-                                 bool enable)
-{
-       struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
-       u32 parm = 0;
-       u32 type = 0;
-       u32 port;
-
-       /* don't care about old stuff for now */
-       if (!HAS_DDI(dev_priv))
-               return 0;
-
-       if (intel_encoder->type == INTEL_OUTPUT_DSI)
-               port = 0;
-       else
-               port = intel_encoder->port;
-
-       if (port == PORT_E)  {
-               port = 0;
-       } else {
-               parm |= 1 << port;
-               port++;
-       }
-
-       if (!enable)
-               parm |= 4 << 8;
-
-       switch (intel_encoder->type) {
-       case INTEL_OUTPUT_ANALOG:
-               type = DISPLAY_TYPE_CRT;
-               break;
-       case INTEL_OUTPUT_DDI:
-       case INTEL_OUTPUT_DP:
-       case INTEL_OUTPUT_HDMI:
-       case INTEL_OUTPUT_DP_MST:
-               type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
-               break;
-       case INTEL_OUTPUT_EDP:
-       case INTEL_OUTPUT_DSI:
-               type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
-               break;
-       default:
-               WARN_ONCE(1, "unsupported intel_encoder type %d\n",
-                         intel_encoder->type);
-               return -EINVAL;
-       }
-
-       parm |= type << (16 + port * 3);
-
-       return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
-}
-
-static const struct {
-       pci_power_t pci_power_state;
-       u32 parm;
-} power_state_map[] = {
-       { PCI_D0,       0x00 },
-       { PCI_D1,       0x01 },
-       { PCI_D2,       0x02 },
-       { PCI_D3hot,    0x04 },
-       { PCI_D3cold,   0x04 },
-};
-
-int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
-                                 pci_power_t state)
-{
-       int i;
-
-       if (!HAS_DDI(dev_priv))
-               return 0;
-
-       for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
-               if (state == power_state_map[i].pci_power_state)
-                       return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
-                                    power_state_map[i].parm, NULL);
-       }
-
-       return -EINVAL;
-}
-
-static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
-{
-       struct intel_connector *connector;
-       struct drm_connector_list_iter conn_iter;
-       struct opregion_asle *asle = dev_priv->opregion.asle;
-       struct drm_device *dev = &dev_priv->drm;
-
-       DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
-
-       if (acpi_video_get_backlight_type() == acpi_backlight_native) {
-               DRM_DEBUG_KMS("opregion backlight request ignored\n");
-               return 0;
-       }
-
-       if (!(bclp & ASLE_BCLP_VALID))
-               return ASLC_BACKLIGHT_FAILED;
-
-       bclp &= ASLE_BCLP_MSK;
-       if (bclp > 255)
-               return ASLC_BACKLIGHT_FAILED;
-
-       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
-       /*
-        * Update backlight on all connectors that support backlight (usually
-        * only one).
-        */
-       DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       for_each_intel_connector_iter(connector, &conn_iter)
-               intel_panel_set_backlight_acpi(connector->base.state, bclp, 255);
-       drm_connector_list_iter_end(&conn_iter);
-       asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
-
-       drm_modeset_unlock(&dev->mode_config.connection_mutex);
-
-
-       return 0;
-}
-
-static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
-{
-       /* alsi is the current ALS reading in lux. 0 indicates below sensor
-          range, 0xffff indicates above sensor range. 1-0xfffe are valid */
-       DRM_DEBUG_DRIVER("Illum is not supported\n");
-       return ASLC_ALS_ILLUM_FAILED;
-}
-
-static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
-{
-       DRM_DEBUG_DRIVER("PWM freq is not supported\n");
-       return ASLC_PWM_FREQ_FAILED;
-}
-
-static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
-{
-       /* Panel fitting is currently controlled by the X code, so this is a
-          noop until modesetting support works fully */
-       DRM_DEBUG_DRIVER("Pfit is not supported\n");
-       return ASLC_PFIT_FAILED;
-}
-
-static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
-{
-       DRM_DEBUG_DRIVER("SROT is not supported\n");
-       return ASLC_ROTATION_ANGLES_FAILED;
-}
-
-static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
-{
-       if (!iuer)
-               DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
-       if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
-               DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
-       if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
-               DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
-       if (iuer & ASLE_IUER_VOLUME_UP_BTN)
-               DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
-       if (iuer & ASLE_IUER_WINDOWS_BTN)
-               DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
-       if (iuer & ASLE_IUER_POWER_BTN)
-               DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
-
-       return ASLC_BUTTON_ARRAY_FAILED;
-}
-
-static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
-{
-       if (iuer & ASLE_IUER_CONVERTIBLE)
-               DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
-       else
-               DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
-
-       return ASLC_CONVERTIBLE_FAILED;
-}
-
-static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
-{
-       if (iuer & ASLE_IUER_DOCKING)
-               DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
-       else
-               DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
-
-       return ASLC_DOCKING_FAILED;
-}
-
-static u32 asle_isct_state(struct drm_i915_private *dev_priv)
-{
-       DRM_DEBUG_DRIVER("ISCT is not supported\n");
-       return ASLC_ISCT_STATE_FAILED;
-}
-
-static void asle_work(struct work_struct *work)
-{
-       struct intel_opregion *opregion =
-               container_of(work, struct intel_opregion, asle_work);
-       struct drm_i915_private *dev_priv =
-               container_of(opregion, struct drm_i915_private, opregion);
-       struct opregion_asle *asle = dev_priv->opregion.asle;
-       u32 aslc_stat = 0;
-       u32 aslc_req;
-
-       if (!asle)
-               return;
-
-       aslc_req = asle->aslc;
-
-       if (!(aslc_req & ASLC_REQ_MSK)) {
-               DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
-                                aslc_req);
-               return;
-       }
-
-       if (aslc_req & ASLC_SET_ALS_ILLUM)
-               aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
-
-       if (aslc_req & ASLC_SET_BACKLIGHT)
-               aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
-
-       if (aslc_req & ASLC_SET_PFIT)
-               aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
-
-       if (aslc_req & ASLC_SET_PWM_FREQ)
-               aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
-
-       if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
-               aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
-                                                       asle->srot);
-
-       if (aslc_req & ASLC_BUTTON_ARRAY)
-               aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
-
-       if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
-               aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
-
-       if (aslc_req & ASLC_DOCKING_INDICATOR)
-               aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
-
-       if (aslc_req & ASLC_ISCT_STATE_CHANGE)
-               aslc_stat |= asle_isct_state(dev_priv);
-
-       asle->aslc = aslc_stat;
-}
-
-void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
-{
-       if (dev_priv->opregion.asle)
-               schedule_work(&dev_priv->opregion.asle_work);
-}
-
-#define ACPI_EV_DISPLAY_SWITCH (1<<0)
-#define ACPI_EV_LID            (1<<1)
-#define ACPI_EV_DOCK           (1<<2)
-
-/*
- * The only video events relevant to opregion are 0x80. These indicate either a
- * docking event, lid switch or display switch request. In Linux, these are
- * handled by the dock, button and video drivers.
- */
-static int intel_opregion_video_event(struct notifier_block *nb,
-                                     unsigned long val, void *data)
-{
-       struct intel_opregion *opregion = container_of(nb, struct intel_opregion,
-                                                      acpi_notifier);
-       struct acpi_bus_event *event = data;
-       struct opregion_acpi *acpi;
-       int ret = NOTIFY_OK;
-
-       if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
-               return NOTIFY_DONE;
-
-       acpi = opregion->acpi;
-
-       if (event->type == 0x80 && ((acpi->cevt & 1) == 0))
-               ret = NOTIFY_BAD;
-
-       acpi->csts = 0;
-
-       return ret;
-}
-
-/*
- * Initialise the DIDL field in opregion. This passes a list of devices to
- * the firmware. Values are defined by section B.4.2 of the ACPI specification
- * (version 3)
- */
-
-static void set_did(struct intel_opregion *opregion, int i, u32 val)
-{
-       if (i < ARRAY_SIZE(opregion->acpi->didl)) {
-               opregion->acpi->didl[i] = val;
-       } else {
-               i -= ARRAY_SIZE(opregion->acpi->didl);
-
-               if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
-                       return;
-
-               opregion->acpi->did2[i] = val;
-       }
-}
-
-static u32 acpi_display_type(struct intel_connector *connector)
-{
-       u32 display_type;
-
-       switch (connector->base.connector_type) {
-       case DRM_MODE_CONNECTOR_VGA:
-       case DRM_MODE_CONNECTOR_DVIA:
-               display_type = ACPI_DISPLAY_TYPE_VGA;
-               break;
-       case DRM_MODE_CONNECTOR_Composite:
-       case DRM_MODE_CONNECTOR_SVIDEO:
-       case DRM_MODE_CONNECTOR_Component:
-       case DRM_MODE_CONNECTOR_9PinDIN:
-       case DRM_MODE_CONNECTOR_TV:
-               display_type = ACPI_DISPLAY_TYPE_TV;
-               break;
-       case DRM_MODE_CONNECTOR_DVII:
-       case DRM_MODE_CONNECTOR_DVID:
-       case DRM_MODE_CONNECTOR_DisplayPort:
-       case DRM_MODE_CONNECTOR_HDMIA:
-       case DRM_MODE_CONNECTOR_HDMIB:
-               display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
-               break;
-       case DRM_MODE_CONNECTOR_LVDS:
-       case DRM_MODE_CONNECTOR_eDP:
-       case DRM_MODE_CONNECTOR_DSI:
-               display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
-               break;
-       case DRM_MODE_CONNECTOR_Unknown:
-       case DRM_MODE_CONNECTOR_VIRTUAL:
-               display_type = ACPI_DISPLAY_TYPE_OTHER;
-               break;
-       default:
-               MISSING_CASE(connector->base.connector_type);
-               display_type = ACPI_DISPLAY_TYPE_OTHER;
-               break;
-       }
-
-       return display_type;
-}
-
-static void intel_didl_outputs(struct drm_i915_private *dev_priv)
-{
-       struct intel_opregion *opregion = &dev_priv->opregion;
-       struct intel_connector *connector;
-       struct drm_connector_list_iter conn_iter;
-       int i = 0, max_outputs;
-       int display_index[16] = {};
-
-       /*
-        * In theory, did2, the extended didl, gets added at opregion version
-        * 3.0. In practice, however, we're supposed to set it for earlier
-        * versions as well, since a BIOS that doesn't understand did2 should
-        * not look at it anyway. Use a variable so we can tweak this if a need
-        * arises later.
-        */
-       max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
-               ARRAY_SIZE(opregion->acpi->did2);
-
-       drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
-       for_each_intel_connector_iter(connector, &conn_iter) {
-               u32 device_id, type;
-
-               device_id = acpi_display_type(connector);
-
-               /* Use display type specific display index. */
-               type = (device_id & ACPI_DISPLAY_TYPE_MASK)
-                       >> ACPI_DISPLAY_TYPE_SHIFT;
-               device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
-
-               connector->acpi_device_id = device_id;
-               if (i < max_outputs)
-                       set_did(opregion, i, device_id);
-               i++;
-       }
-       drm_connector_list_iter_end(&conn_iter);
-
-       DRM_DEBUG_KMS("%d outputs detected\n", i);
-
-       if (i > max_outputs)
-               DRM_ERROR("More than %d outputs in connector list\n",
-                         max_outputs);
-
-       /* If fewer than max outputs, the list must be null terminated */
-       if (i < max_outputs)
-               set_did(opregion, i, 0);
-}
-
-static void intel_setup_cadls(struct drm_i915_private *dev_priv)
-{
-       struct intel_opregion *opregion = &dev_priv->opregion;
-       struct intel_connector *connector;
-       struct drm_connector_list_iter conn_iter;
-       int i = 0;
-
-       /*
-        * Initialize the CADL field from the connector device ids. This is
-        * essentially the same as copying from the DIDL. Technically, this is
-        * not always correct as display outputs may exist, but not active. This
-        * initialization is necessary for some Clevo laptops that check this
-        * field before processing the brightness and display switching hotkeys.
-        *
-        * Note that internal panels should be at the front of the connector
-        * list already, ensuring they're not left out.
-        */
-       drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
-       for_each_intel_connector_iter(connector, &conn_iter) {
-               if (i >= ARRAY_SIZE(opregion->acpi->cadl))
-                       break;
-               opregion->acpi->cadl[i++] = connector->acpi_device_id;
-       }
-       drm_connector_list_iter_end(&conn_iter);
-
-       /* If fewer than 8 active devices, the list must be null terminated */
-       if (i < ARRAY_SIZE(opregion->acpi->cadl))
-               opregion->acpi->cadl[i] = 0;
-}
-
-static void swsci_setup(struct drm_i915_private *dev_priv)
-{
-       struct intel_opregion *opregion = &dev_priv->opregion;
-       bool requested_callbacks = false;
-       u32 tmp;
-
-       /* Sub-function code 0 is okay, let's allow them. */
-       opregion->swsci_gbda_sub_functions = 1;
-       opregion->swsci_sbcb_sub_functions = 1;
-
-       /* We use GBDA to ask for supported GBDA calls. */
-       if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
-               /* make the bits match the sub-function codes */
-               tmp <<= 1;
-               opregion->swsci_gbda_sub_functions |= tmp;
-       }
-
-       /*
-        * We also use GBDA to ask for _requested_ SBCB callbacks. The driver
-        * must not call interfaces that are not specifically requested by the
-        * bios.
-        */
-       if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
-               /* here, the bits already match sub-function codes */
-               opregion->swsci_sbcb_sub_functions |= tmp;
-               requested_callbacks = true;
-       }
-
-       /*
-        * But we use SBCB to ask for _supported_ SBCB calls. This does not mean
-        * the callback is _requested_. But we still can't call interfaces that
-        * are not requested.
-        */
-       if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
-               /* make the bits match the sub-function codes */
-               u32 low = tmp & 0x7ff;
-               u32 high = tmp & ~0xfff; /* bit 11 is reserved */
-               tmp = (high << 4) | (low << 1) | 1;
-
-               /* best guess what to do with supported wrt requested */
-               if (requested_callbacks) {
-                       u32 req = opregion->swsci_sbcb_sub_functions;
-                       if ((req & tmp) != req)
-                               DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
-                       /* XXX: for now, trust the requested callbacks */
-                       /* opregion->swsci_sbcb_sub_functions &= tmp; */
-               } else {
-                       opregion->swsci_sbcb_sub_functions |= tmp;
-               }
-       }
-
-       DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
-                        opregion->swsci_gbda_sub_functions,
-                        opregion->swsci_sbcb_sub_functions);
-}
-
-static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
-{
-       DRM_DEBUG_KMS("Falling back to manually reading VBT from "
-                     "VBIOS ROM for %s\n", id->ident);
-       return 1;
-}
-
-static const struct dmi_system_id intel_no_opregion_vbt[] = {
-       {
-               .callback = intel_no_opregion_vbt_callback,
-               .ident = "ThinkCentre A57",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
-               },
-       },
-       { }
-};
-
-static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
-{
-       struct intel_opregion *opregion = &dev_priv->opregion;
-       const struct firmware *fw = NULL;
-       const char *name = i915_modparams.vbt_firmware;
-       int ret;
-
-       if (!name || !*name)
-               return -ENOENT;
-
-       ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev);
-       if (ret) {
-               DRM_ERROR("Requesting VBT firmware \"%s\" failed (%d)\n",
-                         name, ret);
-               return ret;
-       }
-
-       if (intel_bios_is_valid_vbt(fw->data, fw->size)) {
-               opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
-               if (opregion->vbt_firmware) {
-                       DRM_DEBUG_KMS("Found valid VBT firmware \"%s\"\n", name);
-                       opregion->vbt = opregion->vbt_firmware;
-                       opregion->vbt_size = fw->size;
-                       ret = 0;
-               } else {
-                       ret = -ENOMEM;
-               }
-       } else {
-               DRM_DEBUG_KMS("Invalid VBT firmware \"%s\"\n", name);
-               ret = -EINVAL;
-       }
-
-       release_firmware(fw);
-
-       return ret;
-}
-
-int intel_opregion_setup(struct drm_i915_private *dev_priv)
-{
-       struct intel_opregion *opregion = &dev_priv->opregion;
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       u32 asls, mboxes;
-       char buf[sizeof(OPREGION_SIGNATURE)];
-       int err = 0;
-       void *base;
-       const void *vbt;
-       u32 vbt_size;
-
-       BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
-       BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
-       BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
-       BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
-       BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
-
-       pci_read_config_dword(pdev, ASLS, &asls);
-       DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
-       if (asls == 0) {
-               DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
-               return -ENOTSUPP;
-       }
-
-       INIT_WORK(&opregion->asle_work, asle_work);
-
-       base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
-       if (!base)
-               return -ENOMEM;
-
-       memcpy(buf, base, sizeof(buf));
-
-       if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
-               DRM_DEBUG_DRIVER("opregion signature mismatch\n");
-               err = -EINVAL;
-               goto err_out;
-       }
-       opregion->header = base;
-       opregion->lid_state = base + ACPI_CLID;
-
-       DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n",
-                        opregion->header->over.major,
-                        opregion->header->over.minor,
-                        opregion->header->over.revision);
-
-       mboxes = opregion->header->mboxes;
-       if (mboxes & MBOX_ACPI) {
-               DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
-               opregion->acpi = base + OPREGION_ACPI_OFFSET;
-       }
-
-       if (mboxes & MBOX_SWSCI) {
-               DRM_DEBUG_DRIVER("SWSCI supported\n");
-               opregion->swsci = base + OPREGION_SWSCI_OFFSET;
-               swsci_setup(dev_priv);
-       }
-
-       if (mboxes & MBOX_ASLE) {
-               DRM_DEBUG_DRIVER("ASLE supported\n");
-               opregion->asle = base + OPREGION_ASLE_OFFSET;
-
-               opregion->asle->ardy = ASLE_ARDY_NOT_READY;
-       }
-
-       if (mboxes & MBOX_ASLE_EXT)
-               DRM_DEBUG_DRIVER("ASLE extension supported\n");
-
-       if (intel_load_vbt_firmware(dev_priv) == 0)
-               goto out;
-
-       if (dmi_check_system(intel_no_opregion_vbt))
-               goto out;
-
-       if (opregion->header->over.major >= 2 && opregion->asle &&
-           opregion->asle->rvda && opregion->asle->rvds) {
-               resource_size_t rvda = opregion->asle->rvda;
-
-               /*
-                * opregion 2.0: rvda is the physical VBT address.
-                *
-                * opregion 2.1+: rvda is unsigned, relative offset from
-                * opregion base, and should never point within opregion.
-                */
-               if (opregion->header->over.major > 2 ||
-                   opregion->header->over.minor >= 1) {
-                       WARN_ON(rvda < OPREGION_SIZE);
-
-                       rvda += asls;
-               }
-
-               opregion->rvda = memremap(rvda, opregion->asle->rvds,
-                                         MEMREMAP_WB);
-
-               vbt = opregion->rvda;
-               vbt_size = opregion->asle->rvds;
-               if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
-                       DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n");
-                       opregion->vbt = vbt;
-                       opregion->vbt_size = vbt_size;
-                       goto out;
-               } else {
-                       DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
-                       memunmap(opregion->rvda);
-                       opregion->rvda = NULL;
-               }
-       }
-
-       vbt = base + OPREGION_VBT_OFFSET;
-       /*
-        * The VBT specification says that if the ASLE ext mailbox is not used
-        * its area is reserved, but on some CHT boards the VBT extends into the
-        * ASLE ext area. Allow this even though it is against the spec, so we
-        * do not end up rejecting the VBT on those boards (and end up not
-        * finding the LCD panel because of this).
-        */
-       vbt_size = (mboxes & MBOX_ASLE_EXT) ?
-               OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
-       vbt_size -= OPREGION_VBT_OFFSET;
-       if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
-               DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
-               opregion->vbt = vbt;
-               opregion->vbt_size = vbt_size;
-       } else {
-               DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
-       }
-
-out:
-       return 0;
-
-err_out:
-       memunmap(base);
-       return err;
-}
-
-static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id)
-{
-       DRM_INFO("Using panel type from OpRegion on %s\n", id->ident);
-       return 1;
-}
-
-static const struct dmi_system_id intel_use_opregion_panel_type[] = {
-       {
-               .callback = intel_use_opregion_panel_type_callback,
-               .ident = "Conrac GmbH IX45GM2",
-               .matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"),
-                           DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"),
-               },
-       },
-       { }
-};
-
-int
-intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
-{
-       u32 panel_details;
-       int ret;
-
-       ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
-       if (ret) {
-               DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
-                             ret);
-               return ret;
-       }
-
-       ret = (panel_details >> 8) & 0xff;
-       if (ret > 0x10) {
-               DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret);
-               return -EINVAL;
-       }
-
-       /* fall back to VBT panel type? */
-       if (ret == 0x0) {
-               DRM_DEBUG_KMS("No panel type in OpRegion\n");
-               return -ENODEV;
-       }
-
-       /*
-        * So far we know that some machined must use it, others must not use it.
-        * There doesn't seem to be any way to determine which way to go, except
-        * via a quirk list :(
-        */
-       if (!dmi_check_system(intel_use_opregion_panel_type)) {
-               DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
-               return -ENODEV;
-       }
-
-       return ret - 1;
-}
-
-void intel_opregion_register(struct drm_i915_private *i915)
-{
-       struct intel_opregion *opregion = &i915->opregion;
-
-       if (!opregion->header)
-               return;
-
-       if (opregion->acpi) {
-               opregion->acpi_notifier.notifier_call =
-                       intel_opregion_video_event;
-               register_acpi_notifier(&opregion->acpi_notifier);
-       }
-
-       intel_opregion_resume(i915);
-}
-
-void intel_opregion_resume(struct drm_i915_private *i915)
-{
-       struct intel_opregion *opregion = &i915->opregion;
-
-       if (!opregion->header)
-               return;
-
-       if (opregion->acpi) {
-               intel_didl_outputs(i915);
-               intel_setup_cadls(i915);
-
-               /*
-                * Notify BIOS we are ready to handle ACPI video ext notifs.
-                * Right now, all the events are handled by the ACPI video
-                * module. We don't actually need to do anything with them.
-                */
-               opregion->acpi->csts = 0;
-               opregion->acpi->drdy = 1;
-       }
-
-       if (opregion->asle) {
-               opregion->asle->tche = ASLE_TCHE_BLC_EN;
-               opregion->asle->ardy = ASLE_ARDY_READY;
-       }
-
-       intel_opregion_notify_adapter(i915, PCI_D0);
-}
-
-void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
-{
-       struct intel_opregion *opregion = &i915->opregion;
-
-       if (!opregion->header)
-               return;
-
-       intel_opregion_notify_adapter(i915, state);
-
-       if (opregion->asle)
-               opregion->asle->ardy = ASLE_ARDY_NOT_READY;
-
-       cancel_work_sync(&i915->opregion.asle_work);
-
-       if (opregion->acpi)
-               opregion->acpi->drdy = 0;
-}
-
-void intel_opregion_unregister(struct drm_i915_private *i915)
-{
-       struct intel_opregion *opregion = &i915->opregion;
-
-       intel_opregion_suspend(i915, PCI_D1);
-
-       if (!opregion->header)
-               return;
-
-       if (opregion->acpi_notifier.notifier_call) {
-               unregister_acpi_notifier(&opregion->acpi_notifier);
-               opregion->acpi_notifier.notifier_call = NULL;
-       }
-
-       /* just clear all opregion memory pointers now */
-       memunmap(opregion->header);
-       if (opregion->rvda) {
-               memunmap(opregion->rvda);
-               opregion->rvda = NULL;
-       }
-       if (opregion->vbt_firmware) {
-               kfree(opregion->vbt_firmware);
-               opregion->vbt_firmware = NULL;
-       }
-       opregion->header = NULL;
-       opregion->acpi = NULL;
-       opregion->swsci = NULL;
-       opregion->asle = NULL;
-       opregion->vbt = NULL;
-       opregion->lid_state = NULL;
-}
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
deleted file mode 100644 (file)
index 4aa68ff..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright © 2008-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef _INTEL_OPREGION_H_
-#define _INTEL_OPREGION_H_
-
-#include <linux/workqueue.h>
-#include <linux/pci.h>
-
-struct drm_i915_private;
-struct intel_encoder;
-
-struct opregion_header;
-struct opregion_acpi;
-struct opregion_swsci;
-struct opregion_asle;
-
-struct intel_opregion {
-       struct opregion_header *header;
-       struct opregion_acpi *acpi;
-       struct opregion_swsci *swsci;
-       u32 swsci_gbda_sub_functions;
-       u32 swsci_sbcb_sub_functions;
-       struct opregion_asle *asle;
-       void *rvda;
-       void *vbt_firmware;
-       const void *vbt;
-       u32 vbt_size;
-       u32 *lid_state;
-       struct work_struct asle_work;
-       struct notifier_block acpi_notifier;
-};
-
-#define OPREGION_SIZE            (8 * 1024)
-
-#ifdef CONFIG_ACPI
-
-int intel_opregion_setup(struct drm_i915_private *dev_priv);
-
-void intel_opregion_register(struct drm_i915_private *dev_priv);
-void intel_opregion_unregister(struct drm_i915_private *dev_priv);
-
-void intel_opregion_resume(struct drm_i915_private *dev_priv);
-void intel_opregion_suspend(struct drm_i915_private *dev_priv,
-                           pci_power_t state);
-
-void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
-int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
-                                 bool enable);
-int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
-                                 pci_power_t state);
-int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
-
-#else /* CONFIG_ACPI*/
-
-static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
-{
-       return 0;
-}
-
-static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_opregion_resume(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
-                                         pci_power_t state)
-{
-}
-
-static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline int
-intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
-{
-       return 0;
-}
-
-static inline int
-intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
-{
-       return 0;
-}
-
-static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
-{
-       return -ENODEV;
-}
-
-#endif /* CONFIG_ACPI */
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
deleted file mode 100644 (file)
index 21339b7..0000000
+++ /dev/null
@@ -1,1497 +0,0 @@
-/*
- * Copyright © 2009
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- *    Daniel Vetter <daniel@ffwll.ch>
- *
- * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
- */
-
-#include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
-
-#include "gem/i915_gem_pm.h"
-
-#include "i915_drv.h"
-#include "i915_reg.h"
-#include "intel_drv.h"
-#include "intel_frontbuffer.h"
-#include "intel_overlay.h"
-
-/* Limits for overlay size. According to intel doc, the real limits are:
- * Y width: 4095, UV width (planar): 2047, Y height: 2047,
- * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
- * the mininum of both.  */
-#define IMAGE_MAX_WIDTH                2048
-#define IMAGE_MAX_HEIGHT       2046 /* 2 * 1023 */
-/* on 830 and 845 these large limits result in the card hanging */
-#define IMAGE_MAX_WIDTH_LEGACY 1024
-#define IMAGE_MAX_HEIGHT_LEGACY        1088
-
-/* overlay register definitions */
-/* OCMD register */
-#define OCMD_TILED_SURFACE     (0x1<<19)
-#define OCMD_MIRROR_MASK       (0x3<<17)
-#define OCMD_MIRROR_MODE       (0x3<<17)
-#define OCMD_MIRROR_HORIZONTAL (0x1<<17)
-#define OCMD_MIRROR_VERTICAL   (0x2<<17)
-#define OCMD_MIRROR_BOTH       (0x3<<17)
-#define OCMD_BYTEORDER_MASK    (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
-#define OCMD_UV_SWAP           (0x1<<14) /* YVYU */
-#define OCMD_Y_SWAP            (0x2<<14) /* UYVY or FOURCC UYVY */
-#define OCMD_Y_AND_UV_SWAP     (0x3<<14) /* VYUY */
-#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
-#define OCMD_RGB_888           (0x1<<10) /* not in i965 Intel docs */
-#define OCMD_RGB_555           (0x2<<10) /* not in i965 Intel docs */
-#define OCMD_RGB_565           (0x3<<10) /* not in i965 Intel docs */
-#define OCMD_YUV_422_PACKED    (0x8<<10)
-#define OCMD_YUV_411_PACKED    (0x9<<10) /* not in i965 Intel docs */
-#define OCMD_YUV_420_PLANAR    (0xc<<10)
-#define OCMD_YUV_422_PLANAR    (0xd<<10)
-#define OCMD_YUV_410_PLANAR    (0xe<<10) /* also 411 */
-#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
-#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
-#define OCMD_BUF_TYPE_MASK     (0x1<<5)
-#define OCMD_BUF_TYPE_FRAME    (0x0<<5)
-#define OCMD_BUF_TYPE_FIELD    (0x1<<5)
-#define OCMD_TEST_MODE         (0x1<<4)
-#define OCMD_BUFFER_SELECT     (0x3<<2)
-#define OCMD_BUFFER0           (0x0<<2)
-#define OCMD_BUFFER1           (0x1<<2)
-#define OCMD_FIELD_SELECT      (0x1<<2)
-#define OCMD_FIELD0            (0x0<<1)
-#define OCMD_FIELD1            (0x1<<1)
-#define OCMD_ENABLE            (0x1<<0)
-
-/* OCONFIG register */
-#define OCONF_PIPE_MASK                (0x1<<18)
-#define OCONF_PIPE_A           (0x0<<18)
-#define OCONF_PIPE_B           (0x1<<18)
-#define OCONF_GAMMA2_ENABLE    (0x1<<16)
-#define OCONF_CSC_MODE_BT601   (0x0<<5)
-#define OCONF_CSC_MODE_BT709   (0x1<<5)
-#define OCONF_CSC_BYPASS       (0x1<<4)
-#define OCONF_CC_OUT_8BIT      (0x1<<3)
-#define OCONF_TEST_MODE                (0x1<<2)
-#define OCONF_THREE_LINE_BUFFER        (0x1<<0)
-#define OCONF_TWO_LINE_BUFFER  (0x0<<0)
-
-/* DCLRKM (dst-key) register */
-#define DST_KEY_ENABLE         (0x1<<31)
-#define CLK_RGB24_MASK         0x0
-#define CLK_RGB16_MASK         0x070307
-#define CLK_RGB15_MASK         0x070707
-#define CLK_RGB8I_MASK         0xffffff
-
-#define RGB16_TO_COLORKEY(c) \
-       (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
-#define RGB15_TO_COLORKEY(c) \
-       (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
-
-/* overlay flip addr flag */
-#define OFC_UPDATE             0x1
-
-/* polyphase filter coefficients */
-#define N_HORIZ_Y_TAPS          5
-#define N_VERT_Y_TAPS           3
-#define N_HORIZ_UV_TAPS         3
-#define N_VERT_UV_TAPS          3
-#define N_PHASES                17
-#define MAX_TAPS                5
-
-/* memory bufferd overlay registers */
-struct overlay_registers {
-       u32 OBUF_0Y;
-       u32 OBUF_1Y;
-       u32 OBUF_0U;
-       u32 OBUF_0V;
-       u32 OBUF_1U;
-       u32 OBUF_1V;
-       u32 OSTRIDE;
-       u32 YRGB_VPH;
-       u32 UV_VPH;
-       u32 HORZ_PH;
-       u32 INIT_PHS;
-       u32 DWINPOS;
-       u32 DWINSZ;
-       u32 SWIDTH;
-       u32 SWIDTHSW;
-       u32 SHEIGHT;
-       u32 YRGBSCALE;
-       u32 UVSCALE;
-       u32 OCLRC0;
-       u32 OCLRC1;
-       u32 DCLRKV;
-       u32 DCLRKM;
-       u32 SCLRKVH;
-       u32 SCLRKVL;
-       u32 SCLRKEN;
-       u32 OCONFIG;
-       u32 OCMD;
-       u32 RESERVED1; /* 0x6C */
-       u32 OSTART_0Y;
-       u32 OSTART_1Y;
-       u32 OSTART_0U;
-       u32 OSTART_0V;
-       u32 OSTART_1U;
-       u32 OSTART_1V;
-       u32 OTILEOFF_0Y;
-       u32 OTILEOFF_1Y;
-       u32 OTILEOFF_0U;
-       u32 OTILEOFF_0V;
-       u32 OTILEOFF_1U;
-       u32 OTILEOFF_1V;
-       u32 FASTHSCALE; /* 0xA0 */
-       u32 UVSCALEV; /* 0xA4 */
-       u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
-       u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
-       u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
-       u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
-       u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
-       u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
-       u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
-       u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
-       u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
-};
-
-struct intel_overlay {
-       struct drm_i915_private *i915;
-       struct intel_crtc *crtc;
-       struct i915_vma *vma;
-       struct i915_vma *old_vma;
-       bool active;
-       bool pfit_active;
-       u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
-       u32 color_key:24;
-       u32 color_key_enabled:1;
-       u32 brightness, contrast, saturation;
-       u32 old_xscale, old_yscale;
-       /* register access */
-       struct drm_i915_gem_object *reg_bo;
-       struct overlay_registers __iomem *regs;
-       u32 flip_addr;
-       /* flip handling */
-       struct i915_active_request last_flip;
-};
-
-static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
-                                     bool enable)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       u8 val;
-
-       /* WA_OVERLAY_CLKGATE:alm */
-       if (enable)
-               I915_WRITE(DSPCLK_GATE_D, 0);
-       else
-               I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
-
-       /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
-       pci_bus_read_config_byte(pdev->bus,
-                                PCI_DEVFN(0, 0), I830_CLOCK_GATE, &val);
-       if (enable)
-               val &= ~I830_L2_CACHE_CLOCK_GATE_DISABLE;
-       else
-               val |= I830_L2_CACHE_CLOCK_GATE_DISABLE;
-       pci_bus_write_config_byte(pdev->bus,
-                                 PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
-}
-
-static void intel_overlay_submit_request(struct intel_overlay *overlay,
-                                        struct i915_request *rq,
-                                        i915_active_retire_fn retire)
-{
-       GEM_BUG_ON(i915_active_request_peek(&overlay->last_flip,
-                                           &overlay->i915->drm.struct_mutex));
-       i915_active_request_set_retire_fn(&overlay->last_flip, retire,
-                                         &overlay->i915->drm.struct_mutex);
-       __i915_active_request_set(&overlay->last_flip, rq);
-       i915_request_add(rq);
-}
-
-static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
-                                        struct i915_request *rq,
-                                        i915_active_retire_fn retire)
-{
-       intel_overlay_submit_request(overlay, rq, retire);
-       return i915_active_request_retire(&overlay->last_flip,
-                                         &overlay->i915->drm.struct_mutex);
-}
-
-static struct i915_request *alloc_request(struct intel_overlay *overlay)
-{
-       struct intel_engine_cs *engine = overlay->i915->engine[RCS0];
-
-       return i915_request_create(engine->kernel_context);
-}
-
-/* overlay needs to be disable in OCMD reg */
-static int intel_overlay_on(struct intel_overlay *overlay)
-{
-       struct drm_i915_private *dev_priv = overlay->i915;
-       struct i915_request *rq;
-       u32 *cs;
-
-       WARN_ON(overlay->active);
-
-       rq = alloc_request(overlay);
-       if (IS_ERR(rq))
-               return PTR_ERR(rq);
-
-       cs = intel_ring_begin(rq, 4);
-       if (IS_ERR(cs)) {
-               i915_request_add(rq);
-               return PTR_ERR(cs);
-       }
-
-       overlay->active = true;
-
-       if (IS_I830(dev_priv))
-               i830_overlay_clock_gating(dev_priv, false);
-
-       *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
-       *cs++ = overlay->flip_addr | OFC_UPDATE;
-       *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
-       *cs++ = MI_NOOP;
-       intel_ring_advance(rq, cs);
-
-       return intel_overlay_do_wait_request(overlay, rq, NULL);
-}
-
-static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
-                                      struct i915_vma *vma)
-{
-       enum pipe pipe = overlay->crtc->pipe;
-
-       WARN_ON(overlay->old_vma);
-
-       i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
-                         vma ? vma->obj : NULL,
-                         INTEL_FRONTBUFFER_OVERLAY(pipe));
-
-       intel_frontbuffer_flip_prepare(overlay->i915,
-                                      INTEL_FRONTBUFFER_OVERLAY(pipe));
-
-       overlay->old_vma = overlay->vma;
-       if (vma)
-               overlay->vma = i915_vma_get(vma);
-       else
-               overlay->vma = NULL;
-}
-
-/* overlay needs to be enabled in OCMD reg */
-static int intel_overlay_continue(struct intel_overlay *overlay,
-                                 struct i915_vma *vma,
-                                 bool load_polyphase_filter)
-{
-       struct drm_i915_private *dev_priv = overlay->i915;
-       struct i915_request *rq;
-       u32 flip_addr = overlay->flip_addr;
-       u32 tmp, *cs;
-
-       WARN_ON(!overlay->active);
-
-       if (load_polyphase_filter)
-               flip_addr |= OFC_UPDATE;
-
-       /* check for underruns */
-       tmp = I915_READ(DOVSTA);
-       if (tmp & (1 << 17))
-               DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
-
-       rq = alloc_request(overlay);
-       if (IS_ERR(rq))
-               return PTR_ERR(rq);
-
-       cs = intel_ring_begin(rq, 2);
-       if (IS_ERR(cs)) {
-               i915_request_add(rq);
-               return PTR_ERR(cs);
-       }
-
-       *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
-       *cs++ = flip_addr;
-       intel_ring_advance(rq, cs);
-
-       intel_overlay_flip_prepare(overlay, vma);
-
-       intel_overlay_submit_request(overlay, rq, NULL);
-
-       return 0;
-}
-
-static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
-{
-       struct i915_vma *vma;
-
-       vma = fetch_and_zero(&overlay->old_vma);
-       if (WARN_ON(!vma))
-               return;
-
-       intel_frontbuffer_flip_complete(overlay->i915,
-                                       INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
-
-       i915_gem_object_unpin_from_display_plane(vma);
-       i915_vma_put(vma);
-}
-
-static void
-intel_overlay_release_old_vid_tail(struct i915_active_request *active,
-                                  struct i915_request *rq)
-{
-       struct intel_overlay *overlay =
-               container_of(active, typeof(*overlay), last_flip);
-
-       intel_overlay_release_old_vma(overlay);
-}
-
-static void intel_overlay_off_tail(struct i915_active_request *active,
-                                  struct i915_request *rq)
-{
-       struct intel_overlay *overlay =
-               container_of(active, typeof(*overlay), last_flip);
-       struct drm_i915_private *dev_priv = overlay->i915;
-
-       intel_overlay_release_old_vma(overlay);
-
-       overlay->crtc->overlay = NULL;
-       overlay->crtc = NULL;
-       overlay->active = false;
-
-       if (IS_I830(dev_priv))
-               i830_overlay_clock_gating(dev_priv, true);
-}
-
-/* overlay needs to be disabled in OCMD reg */
-static int intel_overlay_off(struct intel_overlay *overlay)
-{
-       struct i915_request *rq;
-       u32 *cs, flip_addr = overlay->flip_addr;
-
-       WARN_ON(!overlay->active);
-
-       /* According to intel docs the overlay hw may hang (when switching
-        * off) without loading the filter coeffs. It is however unclear whether
-        * this applies to the disabling of the overlay or to the switching off
-        * of the hw. Do it in both cases */
-       flip_addr |= OFC_UPDATE;
-
-       rq = alloc_request(overlay);
-       if (IS_ERR(rq))
-               return PTR_ERR(rq);
-
-       cs = intel_ring_begin(rq, 6);
-       if (IS_ERR(cs)) {
-               i915_request_add(rq);
-               return PTR_ERR(cs);
-       }
-
-       /* wait for overlay to go idle */
-       *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
-       *cs++ = flip_addr;
-       *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
-
-       /* turn overlay off */
-       *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
-       *cs++ = flip_addr;
-       *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
-
-       intel_ring_advance(rq, cs);
-
-       intel_overlay_flip_prepare(overlay, NULL);
-
-       return intel_overlay_do_wait_request(overlay, rq,
-                                            intel_overlay_off_tail);
-}
-
-/* recover from an interruption due to a signal
- * We have to be careful not to repeat work forever an make forward progess. */
-static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
-{
-       return i915_active_request_retire(&overlay->last_flip,
-                                         &overlay->i915->drm.struct_mutex);
-}
-
-/* Wait for pending overlay flip and release old frame.
- * Needs to be called before the overlay register are changed
- * via intel_overlay_(un)map_regs
- */
-static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
-{
-       struct drm_i915_private *dev_priv = overlay->i915;
-       u32 *cs;
-       int ret;
-
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
-       /* Only wait if there is actually an old frame to release to
-        * guarantee forward progress.
-        */
-       if (!overlay->old_vma)
-               return 0;
-
-       if (I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
-               /* synchronous slowpath */
-               struct i915_request *rq;
-
-               rq = alloc_request(overlay);
-               if (IS_ERR(rq))
-                       return PTR_ERR(rq);
-
-               cs = intel_ring_begin(rq, 2);
-               if (IS_ERR(cs)) {
-                       i915_request_add(rq);
-                       return PTR_ERR(cs);
-               }
-
-               *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
-               *cs++ = MI_NOOP;
-               intel_ring_advance(rq, cs);
-
-               ret = intel_overlay_do_wait_request(overlay, rq,
-                                                   intel_overlay_release_old_vid_tail);
-               if (ret)
-                       return ret;
-       } else
-               intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
-
-       return 0;
-}
-
-void intel_overlay_reset(struct drm_i915_private *dev_priv)
-{
-       struct intel_overlay *overlay = dev_priv->overlay;
-
-       if (!overlay)
-               return;
-
-       overlay->old_xscale = 0;
-       overlay->old_yscale = 0;
-       overlay->crtc = NULL;
-       overlay->active = false;
-}
-
-static int packed_depth_bytes(u32 format)
-{
-       switch (format & I915_OVERLAY_DEPTH_MASK) {
-       case I915_OVERLAY_YUV422:
-               return 4;
-       case I915_OVERLAY_YUV411:
-               /* return 6; not implemented */
-       default:
-               return -EINVAL;
-       }
-}
-
-static int packed_width_bytes(u32 format, short width)
-{
-       switch (format & I915_OVERLAY_DEPTH_MASK) {
-       case I915_OVERLAY_YUV422:
-               return width << 1;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int uv_hsubsampling(u32 format)
-{
-       switch (format & I915_OVERLAY_DEPTH_MASK) {
-       case I915_OVERLAY_YUV422:
-       case I915_OVERLAY_YUV420:
-               return 2;
-       case I915_OVERLAY_YUV411:
-       case I915_OVERLAY_YUV410:
-               return 4;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int uv_vsubsampling(u32 format)
-{
-       switch (format & I915_OVERLAY_DEPTH_MASK) {
-       case I915_OVERLAY_YUV420:
-       case I915_OVERLAY_YUV410:
-               return 2;
-       case I915_OVERLAY_YUV422:
-       case I915_OVERLAY_YUV411:
-               return 1;
-       default:
-               return -EINVAL;
-       }
-}
-
-static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
-{
-       u32 sw;
-
-       if (IS_GEN(dev_priv, 2))
-               sw = ALIGN((offset & 31) + width, 32);
-       else
-               sw = ALIGN((offset & 63) + width, 64);
-
-       if (sw == 0)
-               return 0;
-
-       return (sw - 32) >> 3;
-}
-
-static const u16 y_static_hcoeffs[N_PHASES][N_HORIZ_Y_TAPS] = {
-       [ 0] = { 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, },
-       [ 1] = { 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, },
-       [ 2] = { 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, },
-       [ 3] = { 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, },
-       [ 4] = { 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, },
-       [ 5] = { 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, },
-       [ 6] = { 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, },
-       [ 7] = { 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, },
-       [ 8] = { 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, },
-       [ 9] = { 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, },
-       [10] = { 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, },
-       [11] = { 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, },
-       [12] = { 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, },
-       [13] = { 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, },
-       [14] = { 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, },
-       [15] = { 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, },
-       [16] = { 0xb000, 0x3000, 0x0800, 0x3000, 0xb000, },
-};
-
-static const u16 uv_static_hcoeffs[N_PHASES][N_HORIZ_UV_TAPS] = {
-       [ 0] = { 0x3000, 0x1800, 0x1800, },
-       [ 1] = { 0xb000, 0x18d0, 0x2e60, },
-       [ 2] = { 0xb000, 0x1990, 0x2ce0, },
-       [ 3] = { 0xb020, 0x1a68, 0x2b40, },
-       [ 4] = { 0xb040, 0x1b20, 0x29e0, },
-       [ 5] = { 0xb060, 0x1bd8, 0x2880, },
-       [ 6] = { 0xb080, 0x1c88, 0x3e60, },
-       [ 7] = { 0xb0a0, 0x1d28, 0x3c00, },
-       [ 8] = { 0xb0c0, 0x1db8, 0x39e0, },
-       [ 9] = { 0xb0e0, 0x1e40, 0x37e0, },
-       [10] = { 0xb100, 0x1eb8, 0x3620, },
-       [11] = { 0xb100, 0x1f18, 0x34a0, },
-       [12] = { 0xb100, 0x1f68, 0x3360, },
-       [13] = { 0xb0e0, 0x1fa8, 0x3240, },
-       [14] = { 0xb0c0, 0x1fe0, 0x3140, },
-       [15] = { 0xb060, 0x1ff0, 0x30a0, },
-       [16] = { 0x3000, 0x0800, 0x3000, },
-};
-
-static void update_polyphase_filter(struct overlay_registers __iomem *regs)
-{
-       memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
-       memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
-                   sizeof(uv_static_hcoeffs));
-}
-
-static bool update_scaling_factors(struct intel_overlay *overlay,
-                                  struct overlay_registers __iomem *regs,
-                                  struct drm_intel_overlay_put_image *params)
-{
-       /* fixed point with a 12 bit shift */
-       u32 xscale, yscale, xscale_UV, yscale_UV;
-#define FP_SHIFT 12
-#define FRACT_MASK 0xfff
-       bool scale_changed = false;
-       int uv_hscale = uv_hsubsampling(params->flags);
-       int uv_vscale = uv_vsubsampling(params->flags);
-
-       if (params->dst_width > 1)
-               xscale = ((params->src_scan_width - 1) << FP_SHIFT) /
-                       params->dst_width;
-       else
-               xscale = 1 << FP_SHIFT;
-
-       if (params->dst_height > 1)
-               yscale = ((params->src_scan_height - 1) << FP_SHIFT) /
-                       params->dst_height;
-       else
-               yscale = 1 << FP_SHIFT;
-
-       /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
-       xscale_UV = xscale/uv_hscale;
-       yscale_UV = yscale/uv_vscale;
-       /* make the Y scale to UV scale ratio an exact multiply */
-       xscale = xscale_UV * uv_hscale;
-       yscale = yscale_UV * uv_vscale;
-       /*} else {
-         xscale_UV = 0;
-         yscale_UV = 0;
-         }*/
-
-       if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
-               scale_changed = true;
-       overlay->old_xscale = xscale;
-       overlay->old_yscale = yscale;
-
-       iowrite32(((yscale & FRACT_MASK) << 20) |
-                 ((xscale >> FP_SHIFT)  << 16) |
-                 ((xscale & FRACT_MASK) << 3),
-                &regs->YRGBSCALE);
-
-       iowrite32(((yscale_UV & FRACT_MASK) << 20) |
-                 ((xscale_UV >> FP_SHIFT)  << 16) |
-                 ((xscale_UV & FRACT_MASK) << 3),
-                &regs->UVSCALE);
-
-       iowrite32((((yscale    >> FP_SHIFT) << 16) |
-                  ((yscale_UV >> FP_SHIFT) << 0)),
-                &regs->UVSCALEV);
-
-       if (scale_changed)
-               update_polyphase_filter(regs);
-
-       return scale_changed;
-}
-
-static void update_colorkey(struct intel_overlay *overlay,
-                           struct overlay_registers __iomem *regs)
-{
-       const struct intel_plane_state *state =
-               to_intel_plane_state(overlay->crtc->base.primary->state);
-       u32 key = overlay->color_key;
-       u32 format = 0;
-       u32 flags = 0;
-
-       if (overlay->color_key_enabled)
-               flags |= DST_KEY_ENABLE;
-
-       if (state->base.visible)
-               format = state->base.fb->format->format;
-
-       switch (format) {
-       case DRM_FORMAT_C8:
-               key = 0;
-               flags |= CLK_RGB8I_MASK;
-               break;
-       case DRM_FORMAT_XRGB1555:
-               key = RGB15_TO_COLORKEY(key);
-               flags |= CLK_RGB15_MASK;
-               break;
-       case DRM_FORMAT_RGB565:
-               key = RGB16_TO_COLORKEY(key);
-               flags |= CLK_RGB16_MASK;
-               break;
-       default:
-               flags |= CLK_RGB24_MASK;
-               break;
-       }
-
-       iowrite32(key, &regs->DCLRKV);
-       iowrite32(flags, &regs->DCLRKM);
-}
-
-static u32 overlay_cmd_reg(struct drm_intel_overlay_put_image *params)
-{
-       u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
-
-       if (params->flags & I915_OVERLAY_YUV_PLANAR) {
-               switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
-               case I915_OVERLAY_YUV422:
-                       cmd |= OCMD_YUV_422_PLANAR;
-                       break;
-               case I915_OVERLAY_YUV420:
-                       cmd |= OCMD_YUV_420_PLANAR;
-                       break;
-               case I915_OVERLAY_YUV411:
-               case I915_OVERLAY_YUV410:
-                       cmd |= OCMD_YUV_410_PLANAR;
-                       break;
-               }
-       } else { /* YUV packed */
-               switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
-               case I915_OVERLAY_YUV422:
-                       cmd |= OCMD_YUV_422_PACKED;
-                       break;
-               case I915_OVERLAY_YUV411:
-                       cmd |= OCMD_YUV_411_PACKED;
-                       break;
-               }
-
-               switch (params->flags & I915_OVERLAY_SWAP_MASK) {
-               case I915_OVERLAY_NO_SWAP:
-                       break;
-               case I915_OVERLAY_UV_SWAP:
-                       cmd |= OCMD_UV_SWAP;
-                       break;
-               case I915_OVERLAY_Y_SWAP:
-                       cmd |= OCMD_Y_SWAP;
-                       break;
-               case I915_OVERLAY_Y_AND_UV_SWAP:
-                       cmd |= OCMD_Y_AND_UV_SWAP;
-                       break;
-               }
-       }
-
-       return cmd;
-}
-
-static int intel_overlay_do_put_image(struct intel_overlay *overlay,
-                                     struct drm_i915_gem_object *new_bo,
-                                     struct drm_intel_overlay_put_image *params)
-{
-       struct overlay_registers __iomem *regs = overlay->regs;
-       struct drm_i915_private *dev_priv = overlay->i915;
-       u32 swidth, swidthsw, sheight, ostride;
-       enum pipe pipe = overlay->crtc->pipe;
-       bool scale_changed = false;
-       struct i915_vma *vma;
-       int ret, tmp_width;
-
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-       WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
-
-       ret = intel_overlay_release_old_vid(overlay);
-       if (ret != 0)
-               return ret;
-
-       atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
-
-       i915_gem_object_lock(new_bo);
-       vma = i915_gem_object_pin_to_display_plane(new_bo,
-                                                  0, NULL, PIN_MAPPABLE);
-       i915_gem_object_unlock(new_bo);
-       if (IS_ERR(vma)) {
-               ret = PTR_ERR(vma);
-               goto out_pin_section;
-       }
-       intel_fb_obj_flush(new_bo, ORIGIN_DIRTYFB);
-
-       ret = i915_vma_put_fence(vma);
-       if (ret)
-               goto out_unpin;
-
-       if (!overlay->active) {
-               u32 oconfig;
-
-               oconfig = OCONF_CC_OUT_8BIT;
-               if (IS_GEN(dev_priv, 4))
-                       oconfig |= OCONF_CSC_MODE_BT709;
-               oconfig |= pipe == 0 ?
-                       OCONF_PIPE_A : OCONF_PIPE_B;
-               iowrite32(oconfig, &regs->OCONFIG);
-
-               ret = intel_overlay_on(overlay);
-               if (ret != 0)
-                       goto out_unpin;
-       }
-
-       iowrite32(params->dst_y << 16 | params->dst_x, &regs->DWINPOS);
-       iowrite32(params->dst_height << 16 | params->dst_width, &regs->DWINSZ);
-
-       if (params->flags & I915_OVERLAY_YUV_PACKED)
-               tmp_width = packed_width_bytes(params->flags,
-                                              params->src_width);
-       else
-               tmp_width = params->src_width;
-
-       swidth = params->src_width;
-       swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
-       sheight = params->src_height;
-       iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
-       ostride = params->stride_Y;
-
-       if (params->flags & I915_OVERLAY_YUV_PLANAR) {
-               int uv_hscale = uv_hsubsampling(params->flags);
-               int uv_vscale = uv_vsubsampling(params->flags);
-               u32 tmp_U, tmp_V;
-
-               swidth |= (params->src_width / uv_hscale) << 16;
-               sheight |= (params->src_height / uv_vscale) << 16;
-
-               tmp_U = calc_swidthsw(dev_priv, params->offset_U,
-                                     params->src_width / uv_hscale);
-               tmp_V = calc_swidthsw(dev_priv, params->offset_V,
-                                     params->src_width / uv_hscale);
-               swidthsw |= max(tmp_U, tmp_V) << 16;
-
-               iowrite32(i915_ggtt_offset(vma) + params->offset_U,
-                         &regs->OBUF_0U);
-               iowrite32(i915_ggtt_offset(vma) + params->offset_V,
-                         &regs->OBUF_0V);
-
-               ostride |= params->stride_UV << 16;
-       }
-
-       iowrite32(swidth, &regs->SWIDTH);
-       iowrite32(swidthsw, &regs->SWIDTHSW);
-       iowrite32(sheight, &regs->SHEIGHT);
-       iowrite32(ostride, &regs->OSTRIDE);
-
-       scale_changed = update_scaling_factors(overlay, regs, params);
-
-       update_colorkey(overlay, regs);
-
-       iowrite32(overlay_cmd_reg(params), &regs->OCMD);
-
-       ret = intel_overlay_continue(overlay, vma, scale_changed);
-       if (ret)
-               goto out_unpin;
-
-       return 0;
-
-out_unpin:
-       i915_gem_object_unpin_from_display_plane(vma);
-out_pin_section:
-       atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
-
-       return ret;
-}
-
-int intel_overlay_switch_off(struct intel_overlay *overlay)
-{
-       struct drm_i915_private *dev_priv = overlay->i915;
-       int ret;
-
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-       WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
-
-       ret = intel_overlay_recover_from_interrupt(overlay);
-       if (ret != 0)
-               return ret;
-
-       if (!overlay->active)
-               return 0;
-
-       ret = intel_overlay_release_old_vid(overlay);
-       if (ret != 0)
-               return ret;
-
-       iowrite32(0, &overlay->regs->OCMD);
-
-       return intel_overlay_off(overlay);
-}
-
-static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
-                                         struct intel_crtc *crtc)
-{
-       if (!crtc->active)
-               return -EINVAL;
-
-       /* can't use the overlay with double wide pipe */
-       if (crtc->config->double_wide)
-               return -EINVAL;
-
-       return 0;
-}
-
-static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
-{
-       struct drm_i915_private *dev_priv = overlay->i915;
-       u32 pfit_control = I915_READ(PFIT_CONTROL);
-       u32 ratio;
-
-       /* XXX: This is not the same logic as in the xorg driver, but more in
-        * line with the intel documentation for the i965
-        */
-       if (INTEL_GEN(dev_priv) >= 4) {
-               /* on i965 use the PGM reg to read out the autoscaler values */
-               ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
-       } else {
-               if (pfit_control & VERT_AUTO_SCALE)
-                       ratio = I915_READ(PFIT_AUTO_RATIOS);
-               else
-                       ratio = I915_READ(PFIT_PGM_RATIOS);
-               ratio >>= PFIT_VERT_SCALE_SHIFT;
-       }
-
-       overlay->pfit_vscale_ratio = ratio;
-}
-
-static int check_overlay_dst(struct intel_overlay *overlay,
-                            struct drm_intel_overlay_put_image *rec)
-{
-       const struct intel_crtc_state *pipe_config =
-               overlay->crtc->config;
-
-       if (rec->dst_x < pipe_config->pipe_src_w &&
-           rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
-           rec->dst_y < pipe_config->pipe_src_h &&
-           rec->dst_y + rec->dst_height <= pipe_config->pipe_src_h)
-               return 0;
-       else
-               return -EINVAL;
-}
-
-static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
-{
-       u32 tmp;
-
-       /* downscaling limit is 8.0 */
-       tmp = ((rec->src_scan_height << 16) / rec->dst_height) >> 16;
-       if (tmp > 7)
-               return -EINVAL;
-
-       tmp = ((rec->src_scan_width << 16) / rec->dst_width) >> 16;
-       if (tmp > 7)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int check_overlay_src(struct drm_i915_private *dev_priv,
-                            struct drm_intel_overlay_put_image *rec,
-                            struct drm_i915_gem_object *new_bo)
-{
-       int uv_hscale = uv_hsubsampling(rec->flags);
-       int uv_vscale = uv_vsubsampling(rec->flags);
-       u32 stride_mask;
-       int depth;
-       u32 tmp;
-
-       /* check src dimensions */
-       if (IS_I845G(dev_priv) || IS_I830(dev_priv)) {
-               if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
-                   rec->src_width  > IMAGE_MAX_WIDTH_LEGACY)
-                       return -EINVAL;
-       } else {
-               if (rec->src_height > IMAGE_MAX_HEIGHT ||
-                   rec->src_width  > IMAGE_MAX_WIDTH)
-                       return -EINVAL;
-       }
-
-       /* better safe than sorry, use 4 as the maximal subsampling ratio */
-       if (rec->src_height < N_VERT_Y_TAPS*4 ||
-           rec->src_width  < N_HORIZ_Y_TAPS*4)
-               return -EINVAL;
-
-       /* check alignment constraints */
-       switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
-       case I915_OVERLAY_RGB:
-               /* not implemented */
-               return -EINVAL;
-
-       case I915_OVERLAY_YUV_PACKED:
-               if (uv_vscale != 1)
-                       return -EINVAL;
-
-               depth = packed_depth_bytes(rec->flags);
-               if (depth < 0)
-                       return depth;
-
-               /* ignore UV planes */
-               rec->stride_UV = 0;
-               rec->offset_U = 0;
-               rec->offset_V = 0;
-               /* check pixel alignment */
-               if (rec->offset_Y % depth)
-                       return -EINVAL;
-               break;
-
-       case I915_OVERLAY_YUV_PLANAR:
-               if (uv_vscale < 0 || uv_hscale < 0)
-                       return -EINVAL;
-               /* no offset restrictions for planar formats */
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       if (rec->src_width % uv_hscale)
-               return -EINVAL;
-
-       /* stride checking */
-       if (IS_I830(dev_priv) || IS_I845G(dev_priv))
-               stride_mask = 255;
-       else
-               stride_mask = 63;
-
-       if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
-               return -EINVAL;
-       if (IS_GEN(dev_priv, 4) && rec->stride_Y < 512)
-               return -EINVAL;
-
-       tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
-               4096 : 8192;
-       if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
-               return -EINVAL;
-
-       /* check buffer dimensions */
-       switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
-       case I915_OVERLAY_RGB:
-       case I915_OVERLAY_YUV_PACKED:
-               /* always 4 Y values per depth pixels */
-               if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
-                       return -EINVAL;
-
-               tmp = rec->stride_Y*rec->src_height;
-               if (rec->offset_Y + tmp > new_bo->base.size)
-                       return -EINVAL;
-               break;
-
-       case I915_OVERLAY_YUV_PLANAR:
-               if (rec->src_width > rec->stride_Y)
-                       return -EINVAL;
-               if (rec->src_width/uv_hscale > rec->stride_UV)
-                       return -EINVAL;
-
-               tmp = rec->stride_Y * rec->src_height;
-               if (rec->offset_Y + tmp > new_bo->base.size)
-                       return -EINVAL;
-
-               tmp = rec->stride_UV * (rec->src_height / uv_vscale);
-               if (rec->offset_U + tmp > new_bo->base.size ||
-                   rec->offset_V + tmp > new_bo->base.size)
-                       return -EINVAL;
-               break;
-       }
-
-       return 0;
-}
-
-int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
-                                 struct drm_file *file_priv)
-{
-       struct drm_intel_overlay_put_image *params = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_overlay *overlay;
-       struct drm_crtc *drmmode_crtc;
-       struct intel_crtc *crtc;
-       struct drm_i915_gem_object *new_bo;
-       int ret;
-
-       overlay = dev_priv->overlay;
-       if (!overlay) {
-               DRM_DEBUG("userspace bug: no overlay\n");
-               return -ENODEV;
-       }
-
-       if (!(params->flags & I915_OVERLAY_ENABLE)) {
-               drm_modeset_lock_all(dev);
-               mutex_lock(&dev->struct_mutex);
-
-               ret = intel_overlay_switch_off(overlay);
-
-               mutex_unlock(&dev->struct_mutex);
-               drm_modeset_unlock_all(dev);
-
-               return ret;
-       }
-
-       drmmode_crtc = drm_crtc_find(dev, file_priv, params->crtc_id);
-       if (!drmmode_crtc)
-               return -ENOENT;
-       crtc = to_intel_crtc(drmmode_crtc);
-
-       new_bo = i915_gem_object_lookup(file_priv, params->bo_handle);
-       if (!new_bo)
-               return -ENOENT;
-
-       drm_modeset_lock_all(dev);
-       mutex_lock(&dev->struct_mutex);
-
-       if (i915_gem_object_is_tiled(new_bo)) {
-               DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
-               ret = -EINVAL;
-               goto out_unlock;
-       }
-
-       ret = intel_overlay_recover_from_interrupt(overlay);
-       if (ret != 0)
-               goto out_unlock;
-
-       if (overlay->crtc != crtc) {
-               ret = intel_overlay_switch_off(overlay);
-               if (ret != 0)
-                       goto out_unlock;
-
-               ret = check_overlay_possible_on_crtc(overlay, crtc);
-               if (ret != 0)
-                       goto out_unlock;
-
-               overlay->crtc = crtc;
-               crtc->overlay = overlay;
-
-               /* line too wide, i.e. one-line-mode */
-               if (crtc->config->pipe_src_w > 1024 &&
-                   crtc->config->gmch_pfit.control & PFIT_ENABLE) {
-                       overlay->pfit_active = true;
-                       update_pfit_vscale_ratio(overlay);
-               } else
-                       overlay->pfit_active = false;
-       }
-
-       ret = check_overlay_dst(overlay, params);
-       if (ret != 0)
-               goto out_unlock;
-
-       if (overlay->pfit_active) {
-               params->dst_y = (((u32)params->dst_y << 12) /
-                                overlay->pfit_vscale_ratio);
-               /* shifting right rounds downwards, so add 1 */
-               params->dst_height = (((u32)params->dst_height << 12) /
-                                overlay->pfit_vscale_ratio) + 1;
-       }
-
-       if (params->src_scan_height > params->src_height ||
-           params->src_scan_width > params->src_width) {
-               ret = -EINVAL;
-               goto out_unlock;
-       }
-
-       ret = check_overlay_src(dev_priv, params, new_bo);
-       if (ret != 0)
-               goto out_unlock;
-
-       /* Check scaling after src size to prevent a divide-by-zero. */
-       ret = check_overlay_scaling(params);
-       if (ret != 0)
-               goto out_unlock;
-
-       ret = intel_overlay_do_put_image(overlay, new_bo, params);
-       if (ret != 0)
-               goto out_unlock;
-
-       mutex_unlock(&dev->struct_mutex);
-       drm_modeset_unlock_all(dev);
-       i915_gem_object_put(new_bo);
-
-       return 0;
-
-out_unlock:
-       mutex_unlock(&dev->struct_mutex);
-       drm_modeset_unlock_all(dev);
-       i915_gem_object_put(new_bo);
-
-       return ret;
-}
-
-static void update_reg_attrs(struct intel_overlay *overlay,
-                            struct overlay_registers __iomem *regs)
-{
-       iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
-                 &regs->OCLRC0);
-       iowrite32(overlay->saturation, &regs->OCLRC1);
-}
-
-static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
-{
-       int i;
-
-       if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
-               return false;
-
-       for (i = 0; i < 3; i++) {
-               if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
-                       return false;
-       }
-
-       return true;
-}
-
-static bool check_gamma5_errata(u32 gamma5)
-{
-       int i;
-
-       for (i = 0; i < 3; i++) {
-               if (((gamma5 >> i*8) & 0xff) == 0x80)
-                       return false;
-       }
-
-       return true;
-}
-
-static int check_gamma(struct drm_intel_overlay_attrs *attrs)
-{
-       if (!check_gamma_bounds(0, attrs->gamma0) ||
-           !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
-           !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
-           !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
-           !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
-           !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
-           !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
-               return -EINVAL;
-
-       if (!check_gamma5_errata(attrs->gamma5))
-               return -EINVAL;
-
-       return 0;
-}
-
-int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
-                             struct drm_file *file_priv)
-{
-       struct drm_intel_overlay_attrs *attrs = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_overlay *overlay;
-       int ret;
-
-       overlay = dev_priv->overlay;
-       if (!overlay) {
-               DRM_DEBUG("userspace bug: no overlay\n");
-               return -ENODEV;
-       }
-
-       drm_modeset_lock_all(dev);
-       mutex_lock(&dev->struct_mutex);
-
-       ret = -EINVAL;
-       if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
-               attrs->color_key  = overlay->color_key;
-               attrs->brightness = overlay->brightness;
-               attrs->contrast   = overlay->contrast;
-               attrs->saturation = overlay->saturation;
-
-               if (!IS_GEN(dev_priv, 2)) {
-                       attrs->gamma0 = I915_READ(OGAMC0);
-                       attrs->gamma1 = I915_READ(OGAMC1);
-                       attrs->gamma2 = I915_READ(OGAMC2);
-                       attrs->gamma3 = I915_READ(OGAMC3);
-                       attrs->gamma4 = I915_READ(OGAMC4);
-                       attrs->gamma5 = I915_READ(OGAMC5);
-               }
-       } else {
-               if (attrs->brightness < -128 || attrs->brightness > 127)
-                       goto out_unlock;
-               if (attrs->contrast > 255)
-                       goto out_unlock;
-               if (attrs->saturation > 1023)
-                       goto out_unlock;
-
-               overlay->color_key  = attrs->color_key;
-               overlay->brightness = attrs->brightness;
-               overlay->contrast   = attrs->contrast;
-               overlay->saturation = attrs->saturation;
-
-               update_reg_attrs(overlay, overlay->regs);
-
-               if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
-                       if (IS_GEN(dev_priv, 2))
-                               goto out_unlock;
-
-                       if (overlay->active) {
-                               ret = -EBUSY;
-                               goto out_unlock;
-                       }
-
-                       ret = check_gamma(attrs);
-                       if (ret)
-                               goto out_unlock;
-
-                       I915_WRITE(OGAMC0, attrs->gamma0);
-                       I915_WRITE(OGAMC1, attrs->gamma1);
-                       I915_WRITE(OGAMC2, attrs->gamma2);
-                       I915_WRITE(OGAMC3, attrs->gamma3);
-                       I915_WRITE(OGAMC4, attrs->gamma4);
-                       I915_WRITE(OGAMC5, attrs->gamma5);
-               }
-       }
-       overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0;
-
-       ret = 0;
-out_unlock:
-       mutex_unlock(&dev->struct_mutex);
-       drm_modeset_unlock_all(dev);
-
-       return ret;
-}
-
-static int get_registers(struct intel_overlay *overlay, bool use_phys)
-{
-       struct drm_i915_private *i915 = overlay->i915;
-       struct drm_i915_gem_object *obj;
-       struct i915_vma *vma;
-       int err;
-
-       mutex_lock(&i915->drm.struct_mutex);
-
-       obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
-       if (obj == NULL)
-               obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
-       if (IS_ERR(obj)) {
-               err = PTR_ERR(obj);
-               goto err_unlock;
-       }
-
-       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
-       if (IS_ERR(vma)) {
-               err = PTR_ERR(vma);
-               goto err_put_bo;
-       }
-
-       if (use_phys)
-               overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
-       else
-               overlay->flip_addr = i915_ggtt_offset(vma);
-       overlay->regs = i915_vma_pin_iomap(vma);
-       i915_vma_unpin(vma);
-
-       if (IS_ERR(overlay->regs)) {
-               err = PTR_ERR(overlay->regs);
-               goto err_put_bo;
-       }
-
-       overlay->reg_bo = obj;
-       mutex_unlock(&i915->drm.struct_mutex);
-       return 0;
-
-err_put_bo:
-       i915_gem_object_put(obj);
-err_unlock:
-       mutex_unlock(&i915->drm.struct_mutex);
-       return err;
-}
-
-void intel_overlay_setup(struct drm_i915_private *dev_priv)
-{
-       struct intel_overlay *overlay;
-       int ret;
-
-       if (!HAS_OVERLAY(dev_priv))
-               return;
-
-       overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
-       if (!overlay)
-               return;
-
-       overlay->i915 = dev_priv;
-
-       overlay->color_key = 0x0101fe;
-       overlay->color_key_enabled = true;
-       overlay->brightness = -19;
-       overlay->contrast = 75;
-       overlay->saturation = 146;
-
-       INIT_ACTIVE_REQUEST(&overlay->last_flip);
-
-       ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
-       if (ret)
-               goto out_free;
-
-       memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
-       update_polyphase_filter(overlay->regs);
-       update_reg_attrs(overlay, overlay->regs);
-
-       dev_priv->overlay = overlay;
-       DRM_INFO("Initialized overlay support.\n");
-       return;
-
-out_free:
-       kfree(overlay);
-}
-
-void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
-{
-       struct intel_overlay *overlay;
-
-       overlay = fetch_and_zero(&dev_priv->overlay);
-       if (!overlay)
-               return;
-
-       /*
-        * The bo's should be free'd by the generic code already.
-        * Furthermore modesetting teardown happens beforehand so the
-        * hardware should be off already.
-        */
-       WARN_ON(overlay->active);
-
-       i915_gem_object_put(overlay->reg_bo);
-
-       kfree(overlay);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-
-struct intel_overlay_error_state {
-       struct overlay_registers regs;
-       unsigned long base;
-       u32 dovsta;
-       u32 isr;
-};
-
-struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
-{
-       struct intel_overlay *overlay = dev_priv->overlay;
-       struct intel_overlay_error_state *error;
-
-       if (!overlay || !overlay->active)
-               return NULL;
-
-       error = kmalloc(sizeof(*error), GFP_ATOMIC);
-       if (error == NULL)
-               return NULL;
-
-       error->dovsta = I915_READ(DOVSTA);
-       error->isr = I915_READ(GEN2_ISR);
-       error->base = overlay->flip_addr;
-
-       memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
-
-       return error;
-}
-
-void
-intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
-                               struct intel_overlay_error_state *error)
-{
-       i915_error_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
-                         error->dovsta, error->isr);
-       i915_error_printf(m, "  Register file at 0x%08lx:\n",
-                         error->base);
-
-#define P(x) i915_error_printf(m, "    " #x ": 0x%08x\n", error->regs.x)
-       P(OBUF_0Y);
-       P(OBUF_1Y);
-       P(OBUF_0U);
-       P(OBUF_0V);
-       P(OBUF_1U);
-       P(OBUF_1V);
-       P(OSTRIDE);
-       P(YRGB_VPH);
-       P(UV_VPH);
-       P(HORZ_PH);
-       P(INIT_PHS);
-       P(DWINPOS);
-       P(DWINSZ);
-       P(SWIDTH);
-       P(SWIDTHSW);
-       P(SHEIGHT);
-       P(YRGBSCALE);
-       P(UVSCALE);
-       P(OCLRC0);
-       P(OCLRC1);
-       P(DCLRKV);
-       P(DCLRKM);
-       P(SCLRKVH);
-       P(SCLRKVL);
-       P(SCLRKEN);
-       P(OCONFIG);
-       P(OCMD);
-       P(OSTART_0Y);
-       P(OSTART_1Y);
-       P(OSTART_0U);
-       P(OSTART_0V);
-       P(OSTART_1U);
-       P(OSTART_1V);
-       P(OTILEOFF_0Y);
-       P(OTILEOFF_1Y);
-       P(OTILEOFF_0U);
-       P(OTILEOFF_0V);
-       P(OTILEOFF_1U);
-       P(OTILEOFF_1V);
-       P(FASTHSCALE);
-       P(UVSCALEV);
-#undef P
-}
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_overlay.h b/drivers/gpu/drm/i915/intel_overlay.h
deleted file mode 100644 (file)
index a167c28..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_OVERLAY_H__
-#define __INTEL_OVERLAY_H__
-
-struct drm_device;
-struct drm_file;
-struct drm_i915_error_state_buf;
-struct drm_i915_private;
-struct intel_overlay;
-struct intel_overlay_error_state;
-
-void intel_overlay_setup(struct drm_i915_private *dev_priv);
-void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
-int intel_overlay_switch_off(struct intel_overlay *overlay);
-int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
-                                 struct drm_file *file_priv);
-int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
-                             struct drm_file *file_priv);
-void intel_overlay_reset(struct drm_i915_private *dev_priv);
-struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
-void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
-                                    struct intel_overlay_error_state *error);
-
-#endif /* __INTEL_OVERLAY_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
deleted file mode 100644 (file)
index 1e2c430..0000000
+++ /dev/null
@@ -1,671 +0,0 @@
-/*
- * Copyright © 2013 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Author: Damien Lespiau <damien.lespiau@intel.com>
- *
- */
-
-#include <linux/circ_buf.h>
-#include <linux/ctype.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#include "intel_atomic.h"
-#include "intel_drv.h"
-#include "intel_pipe_crc.h"
-
-static const char * const pipe_crc_sources[] = {
-       [INTEL_PIPE_CRC_SOURCE_NONE] = "none",
-       [INTEL_PIPE_CRC_SOURCE_PLANE1] = "plane1",
-       [INTEL_PIPE_CRC_SOURCE_PLANE2] = "plane2",
-       [INTEL_PIPE_CRC_SOURCE_PLANE3] = "plane3",
-       [INTEL_PIPE_CRC_SOURCE_PLANE4] = "plane4",
-       [INTEL_PIPE_CRC_SOURCE_PLANE5] = "plane5",
-       [INTEL_PIPE_CRC_SOURCE_PLANE6] = "plane6",
-       [INTEL_PIPE_CRC_SOURCE_PLANE7] = "plane7",
-       [INTEL_PIPE_CRC_SOURCE_PIPE] = "pipe",
-       [INTEL_PIPE_CRC_SOURCE_TV] = "TV",
-       [INTEL_PIPE_CRC_SOURCE_DP_B] = "DP-B",
-       [INTEL_PIPE_CRC_SOURCE_DP_C] = "DP-C",
-       [INTEL_PIPE_CRC_SOURCE_DP_D] = "DP-D",
-       [INTEL_PIPE_CRC_SOURCE_AUTO] = "auto",
-};
-
-static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
-                                u32 *val)
-{
-       if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
-               *source = INTEL_PIPE_CRC_SOURCE_PIPE;
-
-       switch (*source) {
-       case INTEL_PIPE_CRC_SOURCE_PIPE:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_NONE:
-               *val = 0;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
-                                    enum pipe pipe,
-                                    enum intel_pipe_crc_source *source)
-{
-       struct drm_device *dev = &dev_priv->drm;
-       struct intel_encoder *encoder;
-       struct intel_crtc *crtc;
-       struct intel_digital_port *dig_port;
-       int ret = 0;
-
-       *source = INTEL_PIPE_CRC_SOURCE_PIPE;
-
-       drm_modeset_lock_all(dev);
-       for_each_intel_encoder(dev, encoder) {
-               if (!encoder->base.crtc)
-                       continue;
-
-               crtc = to_intel_crtc(encoder->base.crtc);
-
-               if (crtc->pipe != pipe)
-                       continue;
-
-               switch (encoder->type) {
-               case INTEL_OUTPUT_TVOUT:
-                       *source = INTEL_PIPE_CRC_SOURCE_TV;
-                       break;
-               case INTEL_OUTPUT_DP:
-               case INTEL_OUTPUT_EDP:
-                       dig_port = enc_to_dig_port(&encoder->base);
-                       switch (dig_port->base.port) {
-                       case PORT_B:
-                               *source = INTEL_PIPE_CRC_SOURCE_DP_B;
-                               break;
-                       case PORT_C:
-                               *source = INTEL_PIPE_CRC_SOURCE_DP_C;
-                               break;
-                       case PORT_D:
-                               *source = INTEL_PIPE_CRC_SOURCE_DP_D;
-                               break;
-                       default:
-                               WARN(1, "nonexisting DP port %c\n",
-                                    port_name(dig_port->base.port));
-                               break;
-                       }
-                       break;
-               default:
-                       break;
-               }
-       }
-       drm_modeset_unlock_all(dev);
-
-       return ret;
-}
-
-static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
-                               enum pipe pipe,
-                               enum intel_pipe_crc_source *source,
-                               u32 *val)
-{
-       bool need_stable_symbols = false;
-
-       if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
-               int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
-               if (ret)
-                       return ret;
-       }
-
-       switch (*source) {
-       case INTEL_PIPE_CRC_SOURCE_PIPE:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_DP_B:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
-               need_stable_symbols = true;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_DP_C:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
-               need_stable_symbols = true;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_DP_D:
-               if (!IS_CHERRYVIEW(dev_priv))
-                       return -EINVAL;
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
-               need_stable_symbols = true;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_NONE:
-               *val = 0;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       /*
-        * When the pipe CRC tap point is after the transcoders we need
-        * to tweak symbol-level features to produce a deterministic series of
-        * symbols for a given frame. We need to reset those features only once
-        * a frame (instead of every nth symbol):
-        *   - DC-balance: used to ensure a better clock recovery from the data
-        *     link (SDVO)
-        *   - DisplayPort scrambling: used for EMI reduction
-        */
-       if (need_stable_symbols) {
-               u32 tmp = I915_READ(PORT_DFT2_G4X);
-
-               tmp |= DC_BALANCE_RESET_VLV;
-               switch (pipe) {
-               case PIPE_A:
-                       tmp |= PIPE_A_SCRAMBLE_RESET;
-                       break;
-               case PIPE_B:
-                       tmp |= PIPE_B_SCRAMBLE_RESET;
-                       break;
-               case PIPE_C:
-                       tmp |= PIPE_C_SCRAMBLE_RESET;
-                       break;
-               default:
-                       return -EINVAL;
-               }
-               I915_WRITE(PORT_DFT2_G4X, tmp);
-       }
-
-       return 0;
-}
-
-static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
-                                enum pipe pipe,
-                                enum intel_pipe_crc_source *source,
-                                u32 *val)
-{
-       if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
-               int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
-               if (ret)
-                       return ret;
-       }
-
-       switch (*source) {
-       case INTEL_PIPE_CRC_SOURCE_PIPE:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_TV:
-               if (!SUPPORTS_TV(dev_priv))
-                       return -EINVAL;
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_NONE:
-               *val = 0;
-               break;
-       default:
-               /*
-                * The DP CRC source doesn't work on g4x.
-                * It can be made to work to some degree by selecting
-                * the correct CRC source before the port is enabled,
-                * and not touching the CRC source bits again until
-                * the port is disabled. But even then the bits
-                * eventually get stuck and a reboot is needed to get
-                * working CRCs on the pipe again. Let's simply
-                * refuse to use DP CRCs on g4x.
-                */
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
-                                        enum pipe pipe)
-{
-       u32 tmp = I915_READ(PORT_DFT2_G4X);
-
-       switch (pipe) {
-       case PIPE_A:
-               tmp &= ~PIPE_A_SCRAMBLE_RESET;
-               break;
-       case PIPE_B:
-               tmp &= ~PIPE_B_SCRAMBLE_RESET;
-               break;
-       case PIPE_C:
-               tmp &= ~PIPE_C_SCRAMBLE_RESET;
-               break;
-       default:
-               return;
-       }
-       if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
-               tmp &= ~DC_BALANCE_RESET_VLV;
-       I915_WRITE(PORT_DFT2_G4X, tmp);
-}
-
-static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
-                               u32 *val)
-{
-       if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
-               *source = INTEL_PIPE_CRC_SOURCE_PIPE;
-
-       switch (*source) {
-       case INTEL_PIPE_CRC_SOURCE_PLANE1:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_PLANE2:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_PIPE:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_NONE:
-               *val = 0;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static void
-intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_crtc_state *pipe_config;
-       struct drm_atomic_state *state;
-       struct drm_modeset_acquire_ctx ctx;
-       int ret;
-
-       drm_modeset_acquire_init(&ctx, 0);
-
-       state = drm_atomic_state_alloc(&dev_priv->drm);
-       if (!state) {
-               ret = -ENOMEM;
-               goto unlock;
-       }
-
-       state->acquire_ctx = &ctx;
-
-retry:
-       pipe_config = intel_atomic_get_crtc_state(state, crtc);
-       if (IS_ERR(pipe_config)) {
-               ret = PTR_ERR(pipe_config);
-               goto put_state;
-       }
-
-       pipe_config->base.mode_changed = pipe_config->has_psr;
-       pipe_config->crc_enabled = enable;
-
-       if (IS_HASWELL(dev_priv) &&
-           pipe_config->base.active && crtc->pipe == PIPE_A &&
-           pipe_config->cpu_transcoder == TRANSCODER_EDP)
-               pipe_config->base.mode_changed = true;
-
-       ret = drm_atomic_commit(state);
-
-put_state:
-       if (ret == -EDEADLK) {
-               drm_atomic_state_clear(state);
-               drm_modeset_backoff(&ctx);
-               goto retry;
-       }
-
-       drm_atomic_state_put(state);
-unlock:
-       WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-}
-
-static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
-                               enum pipe pipe,
-                               enum intel_pipe_crc_source *source,
-                               u32 *val)
-{
-       if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
-               *source = INTEL_PIPE_CRC_SOURCE_PIPE;
-
-       switch (*source) {
-       case INTEL_PIPE_CRC_SOURCE_PLANE1:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_PLANE2:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_PIPE:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_NONE:
-               *val = 0;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
-                               enum pipe pipe,
-                               enum intel_pipe_crc_source *source,
-                               u32 *val)
-{
-       if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
-               *source = INTEL_PIPE_CRC_SOURCE_PIPE;
-
-       switch (*source) {
-       case INTEL_PIPE_CRC_SOURCE_PLANE1:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_1_SKL;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_PLANE2:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_2_SKL;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_PLANE3:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_3_SKL;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_PLANE4:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_4_SKL;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_PLANE5:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_5_SKL;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_PLANE6:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_6_SKL;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_PLANE7:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_7_SKL;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_PIPE:
-               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DMUX_SKL;
-               break;
-       case INTEL_PIPE_CRC_SOURCE_NONE:
-               *val = 0;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
-                              enum pipe pipe,
-                              enum intel_pipe_crc_source *source, u32 *val)
-{
-       if (IS_GEN(dev_priv, 2))
-               return i8xx_pipe_crc_ctl_reg(source, val);
-       else if (INTEL_GEN(dev_priv) < 5)
-               return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
-       else if (IS_GEN_RANGE(dev_priv, 5, 6))
-               return ilk_pipe_crc_ctl_reg(source, val);
-       else if (INTEL_GEN(dev_priv) < 9)
-               return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
-       else
-               return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
-}
-
-static int
-display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
-{
-       int i;
-
-       if (!buf) {
-               *s = INTEL_PIPE_CRC_SOURCE_NONE;
-               return 0;
-       }
-
-       i = match_string(pipe_crc_sources, ARRAY_SIZE(pipe_crc_sources), buf);
-       if (i < 0)
-               return i;
-
-       *s = i;
-       return 0;
-}
-
-void intel_display_crc_init(struct drm_i915_private *dev_priv)
-{
-       enum pipe pipe;
-
-       for_each_pipe(dev_priv, pipe) {
-               struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-
-               spin_lock_init(&pipe_crc->lock);
-       }
-}
-
-static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
-                                const enum intel_pipe_crc_source source)
-{
-       switch (source) {
-       case INTEL_PIPE_CRC_SOURCE_PIPE:
-       case INTEL_PIPE_CRC_SOURCE_NONE:
-               return 0;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
-                                const enum intel_pipe_crc_source source)
-{
-       switch (source) {
-       case INTEL_PIPE_CRC_SOURCE_PIPE:
-       case INTEL_PIPE_CRC_SOURCE_TV:
-       case INTEL_PIPE_CRC_SOURCE_NONE:
-               return 0;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
-                               const enum intel_pipe_crc_source source)
-{
-       switch (source) {
-       case INTEL_PIPE_CRC_SOURCE_PIPE:
-       case INTEL_PIPE_CRC_SOURCE_DP_B:
-       case INTEL_PIPE_CRC_SOURCE_DP_C:
-       case INTEL_PIPE_CRC_SOURCE_DP_D:
-       case INTEL_PIPE_CRC_SOURCE_NONE:
-               return 0;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
-                               const enum intel_pipe_crc_source source)
-{
-       switch (source) {
-       case INTEL_PIPE_CRC_SOURCE_PIPE:
-       case INTEL_PIPE_CRC_SOURCE_PLANE1:
-       case INTEL_PIPE_CRC_SOURCE_PLANE2:
-       case INTEL_PIPE_CRC_SOURCE_NONE:
-               return 0;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
-                               const enum intel_pipe_crc_source source)
-{
-       switch (source) {
-       case INTEL_PIPE_CRC_SOURCE_PIPE:
-       case INTEL_PIPE_CRC_SOURCE_PLANE1:
-       case INTEL_PIPE_CRC_SOURCE_PLANE2:
-       case INTEL_PIPE_CRC_SOURCE_NONE:
-               return 0;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
-                               const enum intel_pipe_crc_source source)
-{
-       switch (source) {
-       case INTEL_PIPE_CRC_SOURCE_PIPE:
-       case INTEL_PIPE_CRC_SOURCE_PLANE1:
-       case INTEL_PIPE_CRC_SOURCE_PLANE2:
-       case INTEL_PIPE_CRC_SOURCE_PLANE3:
-       case INTEL_PIPE_CRC_SOURCE_PLANE4:
-       case INTEL_PIPE_CRC_SOURCE_PLANE5:
-       case INTEL_PIPE_CRC_SOURCE_PLANE6:
-       case INTEL_PIPE_CRC_SOURCE_PLANE7:
-       case INTEL_PIPE_CRC_SOURCE_NONE:
-               return 0;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int
-intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
-                         const enum intel_pipe_crc_source source)
-{
-       if (IS_GEN(dev_priv, 2))
-               return i8xx_crc_source_valid(dev_priv, source);
-       else if (INTEL_GEN(dev_priv) < 5)
-               return i9xx_crc_source_valid(dev_priv, source);
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               return vlv_crc_source_valid(dev_priv, source);
-       else if (IS_GEN_RANGE(dev_priv, 5, 6))
-               return ilk_crc_source_valid(dev_priv, source);
-       else if (INTEL_GEN(dev_priv) < 9)
-               return ivb_crc_source_valid(dev_priv, source);
-       else
-               return skl_crc_source_valid(dev_priv, source);
-}
-
-const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
-                                             size_t *count)
-{
-       *count = ARRAY_SIZE(pipe_crc_sources);
-       return pipe_crc_sources;
-}
-
-int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
-                                size_t *values_cnt)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       enum intel_pipe_crc_source source;
-
-       if (display_crc_ctl_parse_source(source_name, &source) < 0) {
-               DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
-               return -EINVAL;
-       }
-
-       if (source == INTEL_PIPE_CRC_SOURCE_AUTO ||
-           intel_is_valid_crc_source(dev_priv, source) == 0) {
-               *values_cnt = 5;
-               return 0;
-       }
-
-       return -EINVAL;
-}
-
-int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
-       enum intel_display_power_domain power_domain;
-       enum intel_pipe_crc_source source;
-       intel_wakeref_t wakeref;
-       u32 val = 0; /* shut up gcc */
-       int ret = 0;
-       bool enable;
-
-       if (display_crc_ctl_parse_source(source_name, &source) < 0) {
-               DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
-               return -EINVAL;
-       }
-
-       power_domain = POWER_DOMAIN_PIPE(crtc->index);
-       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (!wakeref) {
-               DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
-               return -EIO;
-       }
-
-       enable = source != INTEL_PIPE_CRC_SOURCE_NONE;
-       if (enable)
-               intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), true);
-
-       ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
-       if (ret != 0)
-               goto out;
-
-       pipe_crc->source = source;
-       I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
-       POSTING_READ(PIPE_CRC_CTL(crtc->index));
-
-       if (!source) {
-               if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-                       vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
-       }
-
-       pipe_crc->skipped = 0;
-
-out:
-       if (!enable)
-               intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), false);
-
-       intel_display_power_put(dev_priv, power_domain, wakeref);
-
-       return ret;
-}
-
-void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
-{
-       struct drm_crtc *crtc = &intel_crtc->base;
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
-       u32 val = 0;
-
-       if (!crtc->crc.opened)
-               return;
-
-       if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val) < 0)
-               return;
-
-       /* Don't need pipe_crc->lock here, IRQs are not generated. */
-       pipe_crc->skipped = 0;
-
-       I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
-       POSTING_READ(PIPE_CRC_CTL(crtc->index));
-}
-
-void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
-{
-       struct drm_crtc *crtc = &intel_crtc->base;
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
-
-       /* Swallow crc's until we stop generating them. */
-       spin_lock_irq(&pipe_crc->lock);
-       pipe_crc->skipped = INT_MIN;
-       spin_unlock_irq(&pipe_crc->lock);
-
-       I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
-       POSTING_READ(PIPE_CRC_CTL(crtc->index));
-       synchronize_irq(dev_priv->drm.irq);
-}
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.h b/drivers/gpu/drm/i915/intel_pipe_crc.h
deleted file mode 100644 (file)
index db258a7..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_PIPE_CRC_H__
-#define __INTEL_PIPE_CRC_H__
-
-#include <linux/types.h>
-
-struct drm_crtc;
-struct drm_i915_private;
-struct intel_crtc;
-
-#ifdef CONFIG_DEBUG_FS
-void intel_display_crc_init(struct drm_i915_private *dev_priv);
-int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
-int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
-                                const char *source_name, size_t *values_cnt);
-const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
-                                             size_t *count);
-void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
-void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
-#else
-static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
-#define intel_crtc_set_crc_source NULL
-#define intel_crtc_verify_crc_source NULL
-#define intel_crtc_get_crc_sources NULL
-static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
-{
-}
-
-static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
-{
-}
-#endif
-
-#endif /* __INTEL_PIPE_CRC_H__ */
index b03e2a467e8b3c792adfa2b2a1a0fa4ed5b88b4b..d9a7a13ce32aca890e40a9da788e29abc86d08f5 100644 (file)
 #include <drm/drm_fourcc.h>
 #include <drm/drm_plane_helper.h>
 
+#include "display/intel_atomic.h"
+#include "display/intel_fbc.h"
+#include "display/intel_sprite.h"
+
 #include "i915_drv.h"
 #include "i915_irq.h"
-#include "intel_atomic.h"
 #include "intel_drv.h"
-#include "intel_fbc.h"
 #include "intel_pm.h"
-#include "intel_sprite.h"
 #include "intel_sideband.h"
 #include "../../../platform/x86/intel_ips.h"
 
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
deleted file mode 100644 (file)
index 69709df..0000000
+++ /dev/null
@@ -1,1303 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drm_atomic_helper.h>
-
-#include "display/intel_dp.h"
-
-#include "i915_drv.h"
-#include "intel_drv.h"
-#include "intel_psr.h"
-#include "intel_sprite.h"
-
-/**
- * DOC: Panel Self Refresh (PSR/SRD)
- *
- * Since Haswell Display controller supports Panel Self-Refresh on display
- * panels witch have a remote frame buffer (RFB) implemented according to PSR
- * spec in eDP1.3. PSR feature allows the display to go to lower standby states
- * when system is idle but display is on as it eliminates display refresh
- * request to DDR memory completely as long as the frame buffer for that
- * display is unchanged.
- *
- * Panel Self Refresh must be supported by both Hardware (source) and
- * Panel (sink).
- *
- * PSR saves power by caching the framebuffer in the panel RFB, which allows us
- * to power down the link and memory controller. For DSI panels the same idea
- * is called "manual mode".
- *
- * The implementation uses the hardware-based PSR support which automatically
- * enters/exits self-refresh mode. The hardware takes care of sending the
- * required DP aux message and could even retrain the link (that part isn't
- * enabled yet though). The hardware also keeps track of any frontbuffer
- * changes to know when to exit self-refresh mode again. Unfortunately that
- * part doesn't work too well, hence why the i915 PSR support uses the
- * software frontbuffer tracking to make sure it doesn't miss a screen
- * update. For this integration intel_psr_invalidate() and intel_psr_flush()
- * get called by the frontbuffer tracking code. Note that because of locking
- * issues the self-refresh re-enable code is done from a work queue, which
- * must be correctly synchronized/cancelled when shutting down the pipe."
- */
-
-static bool psr_global_enabled(u32 debug)
-{
-       switch (debug & I915_PSR_DEBUG_MODE_MASK) {
-       case I915_PSR_DEBUG_DEFAULT:
-               return i915_modparams.enable_psr;
-       case I915_PSR_DEBUG_DISABLE:
-               return false;
-       default:
-               return true;
-       }
-}
-
-static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
-                              const struct intel_crtc_state *crtc_state)
-{
-       /* Cannot enable DSC and PSR2 simultaneously */
-       WARN_ON(crtc_state->dsc_params.compression_enable &&
-               crtc_state->has_psr2);
-
-       switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
-       case I915_PSR_DEBUG_DISABLE:
-       case I915_PSR_DEBUG_FORCE_PSR1:
-               return false;
-       default:
-               return crtc_state->has_psr2;
-       }
-}
-
-static int edp_psr_shift(enum transcoder cpu_transcoder)
-{
-       switch (cpu_transcoder) {
-       case TRANSCODER_A:
-               return EDP_PSR_TRANSCODER_A_SHIFT;
-       case TRANSCODER_B:
-               return EDP_PSR_TRANSCODER_B_SHIFT;
-       case TRANSCODER_C:
-               return EDP_PSR_TRANSCODER_C_SHIFT;
-       default:
-               MISSING_CASE(cpu_transcoder);
-               /* fallthrough */
-       case TRANSCODER_EDP:
-               return EDP_PSR_TRANSCODER_EDP_SHIFT;
-       }
-}
-
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
-{
-       u32 debug_mask, mask;
-       enum transcoder cpu_transcoder;
-       u32 transcoders = BIT(TRANSCODER_EDP);
-
-       if (INTEL_GEN(dev_priv) >= 8)
-               transcoders |= BIT(TRANSCODER_A) |
-                              BIT(TRANSCODER_B) |
-                              BIT(TRANSCODER_C);
-
-       debug_mask = 0;
-       mask = 0;
-       for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
-               int shift = edp_psr_shift(cpu_transcoder);
-
-               mask |= EDP_PSR_ERROR(shift);
-               debug_mask |= EDP_PSR_POST_EXIT(shift) |
-                             EDP_PSR_PRE_ENTRY(shift);
-       }
-
-       if (debug & I915_PSR_DEBUG_IRQ)
-               mask |= debug_mask;
-
-       I915_WRITE(EDP_PSR_IMR, ~mask);
-}
-
-static void psr_event_print(u32 val, bool psr2_enabled)
-{
-       DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
-       if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
-               DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
-       if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
-               DRM_DEBUG_KMS("\tPSR2 disabled\n");
-       if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
-               DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
-       if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
-               DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
-       if (val & PSR_EVENT_GRAPHICS_RESET)
-               DRM_DEBUG_KMS("\tGraphics reset\n");
-       if (val & PSR_EVENT_PCH_INTERRUPT)
-               DRM_DEBUG_KMS("\tPCH interrupt\n");
-       if (val & PSR_EVENT_MEMORY_UP)
-               DRM_DEBUG_KMS("\tMemory up\n");
-       if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
-               DRM_DEBUG_KMS("\tFront buffer modification\n");
-       if (val & PSR_EVENT_WD_TIMER_EXPIRE)
-               DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
-       if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
-               DRM_DEBUG_KMS("\tPIPE registers updated\n");
-       if (val & PSR_EVENT_REGISTER_UPDATE)
-               DRM_DEBUG_KMS("\tRegister updated\n");
-       if (val & PSR_EVENT_HDCP_ENABLE)
-               DRM_DEBUG_KMS("\tHDCP enabled\n");
-       if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
-               DRM_DEBUG_KMS("\tKVMR session enabled\n");
-       if (val & PSR_EVENT_VBI_ENABLE)
-               DRM_DEBUG_KMS("\tVBI enabled\n");
-       if (val & PSR_EVENT_LPSP_MODE_EXIT)
-               DRM_DEBUG_KMS("\tLPSP mode exited\n");
-       if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
-               DRM_DEBUG_KMS("\tPSR disabled\n");
-}
-
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
-{
-       u32 transcoders = BIT(TRANSCODER_EDP);
-       enum transcoder cpu_transcoder;
-       ktime_t time_ns =  ktime_get();
-       u32 mask = 0;
-
-       if (INTEL_GEN(dev_priv) >= 8)
-               transcoders |= BIT(TRANSCODER_A) |
-                              BIT(TRANSCODER_B) |
-                              BIT(TRANSCODER_C);
-
-       for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
-               int shift = edp_psr_shift(cpu_transcoder);
-
-               if (psr_iir & EDP_PSR_ERROR(shift)) {
-                       DRM_WARN("[transcoder %s] PSR aux error\n",
-                                transcoder_name(cpu_transcoder));
-
-                       dev_priv->psr.irq_aux_error = true;
-
-                       /*
-                        * If this interruption is not masked it will keep
-                        * interrupting so fast that it prevents the scheduled
-                        * work to run.
-                        * Also after a PSR error, we don't want to arm PSR
-                        * again so we don't care about unmask the interruption
-                        * or unset irq_aux_error.
-                        */
-                       mask |= EDP_PSR_ERROR(shift);
-               }
-
-               if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
-                       dev_priv->psr.last_entry_attempt = time_ns;
-                       DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
-                                     transcoder_name(cpu_transcoder));
-               }
-
-               if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
-                       dev_priv->psr.last_exit = time_ns;
-                       DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
-                                     transcoder_name(cpu_transcoder));
-
-                       if (INTEL_GEN(dev_priv) >= 9) {
-                               u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
-                               bool psr2_enabled = dev_priv->psr.psr2_enabled;
-
-                               I915_WRITE(PSR_EVENT(cpu_transcoder), val);
-                               psr_event_print(val, psr2_enabled);
-                       }
-               }
-       }
-
-       if (mask) {
-               mask |= I915_READ(EDP_PSR_IMR);
-               I915_WRITE(EDP_PSR_IMR, mask);
-
-               schedule_work(&dev_priv->psr.work);
-       }
-}
-
-static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
-{
-       u8 alpm_caps = 0;
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
-                             &alpm_caps) != 1)
-               return false;
-       return alpm_caps & DP_ALPM_CAP;
-}
-
-static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
-{
-       u8 val = 8; /* assume the worst if we can't read the value */
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux,
-                             DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
-               val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
-       else
-               DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
-       return val;
-}
-
-static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
-{
-       u16 val;
-       ssize_t r;
-
-       /*
-        * Returning the default X granularity if granularity not required or
-        * if DPCD read fails
-        */
-       if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
-               return 4;
-
-       r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
-       if (r != 2)
-               DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
-
-       /*
-        * Spec says that if the value read is 0 the default granularity should
-        * be used instead.
-        */
-       if (r != 2 || val == 0)
-               val = 4;
-
-       return val;
-}
-
-void intel_psr_init_dpcd(struct intel_dp *intel_dp)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
-
-       drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
-                        sizeof(intel_dp->psr_dpcd));
-
-       if (!intel_dp->psr_dpcd[0])
-               return;
-       DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
-                     intel_dp->psr_dpcd[0]);
-
-       if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
-               DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
-               return;
-       }
-
-       if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
-               DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
-               return;
-       }
-
-       dev_priv->psr.sink_support = true;
-       dev_priv->psr.sink_sync_latency =
-               intel_dp_get_sink_sync_latency(intel_dp);
-
-       WARN_ON(dev_priv->psr.dp);
-       dev_priv->psr.dp = intel_dp;
-
-       if (INTEL_GEN(dev_priv) >= 9 &&
-           (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
-               bool y_req = intel_dp->psr_dpcd[1] &
-                            DP_PSR2_SU_Y_COORDINATE_REQUIRED;
-               bool alpm = intel_dp_get_alpm_status(intel_dp);
-
-               /*
-                * All panels that supports PSR version 03h (PSR2 +
-                * Y-coordinate) can handle Y-coordinates in VSC but we are
-                * only sure that it is going to be used when required by the
-                * panel. This way panel is capable to do selective update
-                * without a aux frame sync.
-                *
-                * To support PSR version 02h and PSR version 03h without
-                * Y-coordinate requirement panels we would need to enable
-                * GTC first.
-                */
-               dev_priv->psr.sink_psr2_support = y_req && alpm;
-               DRM_DEBUG_KMS("PSR2 %ssupported\n",
-                             dev_priv->psr.sink_psr2_support ? "" : "not ");
-
-               if (dev_priv->psr.sink_psr2_support) {
-                       dev_priv->psr.colorimetry_support =
-                               intel_dp_get_colorimetry_status(intel_dp);
-                       dev_priv->psr.su_x_granularity =
-                               intel_dp_get_su_x_granulartiy(intel_dp);
-               }
-       }
-}
-
-static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
-                               const struct intel_crtc_state *crtc_state)
-{
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct dp_sdp psr_vsc;
-
-       if (dev_priv->psr.psr2_enabled) {
-               /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
-               memset(&psr_vsc, 0, sizeof(psr_vsc));
-               psr_vsc.sdp_header.HB0 = 0;
-               psr_vsc.sdp_header.HB1 = 0x7;
-               if (dev_priv->psr.colorimetry_support) {
-                       psr_vsc.sdp_header.HB2 = 0x5;
-                       psr_vsc.sdp_header.HB3 = 0x13;
-               } else {
-                       psr_vsc.sdp_header.HB2 = 0x4;
-                       psr_vsc.sdp_header.HB3 = 0xe;
-               }
-       } else {
-               /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
-               memset(&psr_vsc, 0, sizeof(psr_vsc));
-               psr_vsc.sdp_header.HB0 = 0;
-               psr_vsc.sdp_header.HB1 = 0x7;
-               psr_vsc.sdp_header.HB2 = 0x2;
-               psr_vsc.sdp_header.HB3 = 0x8;
-       }
-
-       intel_dig_port->write_infoframe(&intel_dig_port->base,
-                                       crtc_state,
-                                       DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
-}
-
-static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       u32 aux_clock_divider, aux_ctl;
-       int i;
-       static const u8 aux_msg[] = {
-               [0] = DP_AUX_NATIVE_WRITE << 4,
-               [1] = DP_SET_POWER >> 8,
-               [2] = DP_SET_POWER & 0xff,
-               [3] = 1 - 1,
-               [4] = DP_SET_POWER_D0,
-       };
-       u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
-                          EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
-                          EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
-                          EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
-
-       BUILD_BUG_ON(sizeof(aux_msg) > 20);
-       for (i = 0; i < sizeof(aux_msg); i += 4)
-               I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
-                          intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
-
-       aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
-
-       /* Start with bits set for DDI_AUX_CTL register */
-       aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
-                                            aux_clock_divider);
-
-       /* Select only valid bits for SRD_AUX_CTL */
-       aux_ctl &= psr_aux_mask;
-       I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
-}
-
-static void intel_psr_enable_sink(struct intel_dp *intel_dp)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       u8 dpcd_val = DP_PSR_ENABLE;
-
-       /* Enable ALPM at sink for psr2 */
-       if (dev_priv->psr.psr2_enabled) {
-               drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
-                                  DP_ALPM_ENABLE);
-               dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
-       } else {
-               if (dev_priv->psr.link_standby)
-                       dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
-
-               if (INTEL_GEN(dev_priv) >= 8)
-                       dpcd_val |= DP_PSR_CRC_VERIFICATION;
-       }
-
-       drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
-
-       drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
-}
-
-static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       u32 val = 0;
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               val |= EDP_PSR_TP4_TIME_0US;
-
-       if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
-               val |= EDP_PSR_TP1_TIME_0us;
-       else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
-               val |= EDP_PSR_TP1_TIME_100us;
-       else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
-               val |= EDP_PSR_TP1_TIME_500us;
-       else
-               val |= EDP_PSR_TP1_TIME_2500us;
-
-       if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
-               val |= EDP_PSR_TP2_TP3_TIME_0us;
-       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
-               val |= EDP_PSR_TP2_TP3_TIME_100us;
-       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
-               val |= EDP_PSR_TP2_TP3_TIME_500us;
-       else
-               val |= EDP_PSR_TP2_TP3_TIME_2500us;
-
-       if (intel_dp_source_supports_hbr2(intel_dp) &&
-           drm_dp_tps3_supported(intel_dp->dpcd))
-               val |= EDP_PSR_TP1_TP3_SEL;
-       else
-               val |= EDP_PSR_TP1_TP2_SEL;
-
-       return val;
-}
-
-static void hsw_activate_psr1(struct intel_dp *intel_dp)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       u32 max_sleep_time = 0x1f;
-       u32 val = EDP_PSR_ENABLE;
-
-       /* Let's use 6 as the minimum to cover all known cases including the
-        * off-by-one issue that HW has in some cases.
-        */
-       int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
-       /* sink_sync_latency of 8 means source has to wait for more than 8
-        * frames, we'll go with 9 frames for now
-        */
-       idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
-       val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
-
-       val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
-       if (IS_HASWELL(dev_priv))
-               val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
-
-       if (dev_priv->psr.link_standby)
-               val |= EDP_PSR_LINK_STANDBY;
-
-       val |= intel_psr1_get_tp_time(intel_dp);
-
-       if (INTEL_GEN(dev_priv) >= 8)
-               val |= EDP_PSR_CRC_ENABLE;
-
-       val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
-       I915_WRITE(EDP_PSR_CTL, val);
-}
-
-static void hsw_activate_psr2(struct intel_dp *intel_dp)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       u32 val;
-
-       /* Let's use 6 as the minimum to cover all known cases including the
-        * off-by-one issue that HW has in some cases.
-        */
-       int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
-       idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
-       val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
-
-       val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
-       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-               val |= EDP_Y_COORDINATE_ENABLE;
-
-       val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
-
-       if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
-           dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
-               val |= EDP_PSR2_TP2_TIME_50us;
-       else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
-               val |= EDP_PSR2_TP2_TIME_100us;
-       else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
-               val |= EDP_PSR2_TP2_TIME_500us;
-       else
-               val |= EDP_PSR2_TP2_TIME_2500us;
-
-       /*
-        * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
-        * recommending keep this bit unset while PSR2 is enabled.
-        */
-       I915_WRITE(EDP_PSR_CTL, 0);
-
-       I915_WRITE(EDP_PSR2_CTL, val);
-}
-
-static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
-                                   struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
-       int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
-       int psr_max_h = 0, psr_max_v = 0;
-
-       if (!dev_priv->psr.sink_psr2_support)
-               return false;
-
-       /*
-        * DSC and PSR2 cannot be enabled simultaneously. If a requested
-        * resolution requires DSC to be enabled, priority is given to DSC
-        * over PSR2.
-        */
-       if (crtc_state->dsc_params.compression_enable) {
-               DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
-               return false;
-       }
-
-       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
-               psr_max_h = 4096;
-               psr_max_v = 2304;
-       } else if (IS_GEN(dev_priv, 9)) {
-               psr_max_h = 3640;
-               psr_max_v = 2304;
-       }
-
-       if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
-               DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
-                             crtc_hdisplay, crtc_vdisplay,
-                             psr_max_h, psr_max_v);
-               return false;
-       }
-
-       /*
-        * HW sends SU blocks of size four scan lines, which means the starting
-        * X coordinate and Y granularity requirements will always be met. We
-        * only need to validate the SU block width is a multiple of
-        * x granularity.
-        */
-       if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
-               DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
-                             crtc_hdisplay, dev_priv->psr.su_x_granularity);
-               return false;
-       }
-
-       if (crtc_state->crc_enabled) {
-               DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
-               return false;
-       }
-
-       return true;
-}
-
-void intel_psr_compute_config(struct intel_dp *intel_dp,
-                             struct intel_crtc_state *crtc_state)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       const struct drm_display_mode *adjusted_mode =
-               &crtc_state->base.adjusted_mode;
-       int psr_setup_time;
-
-       if (!CAN_PSR(dev_priv))
-               return;
-
-       if (intel_dp != dev_priv->psr.dp)
-               return;
-
-       /*
-        * HSW spec explicitly says PSR is tied to port A.
-        * BDW+ platforms with DDI implementation of PSR have different
-        * PSR registers per transcoder and we only implement transcoder EDP
-        * ones. Since by Display design transcoder EDP is tied to port A
-        * we can safely escape based on the port A.
-        */
-       if (dig_port->base.port != PORT_A) {
-               DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
-               return;
-       }
-
-       if (dev_priv->psr.sink_not_reliable) {
-               DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
-               return;
-       }
-
-       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-               DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
-               return;
-       }
-
-       psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
-       if (psr_setup_time < 0) {
-               DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
-                             intel_dp->psr_dpcd[1]);
-               return;
-       }
-
-       if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
-           adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
-               DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
-                             psr_setup_time);
-               return;
-       }
-
-       crtc_state->has_psr = true;
-       crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
-}
-
-static void intel_psr_activate(struct intel_dp *intel_dp)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
-       if (INTEL_GEN(dev_priv) >= 9)
-               WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
-       WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
-       WARN_ON(dev_priv->psr.active);
-       lockdep_assert_held(&dev_priv->psr.lock);
-
-       /* psr1 and psr2 are mutually exclusive.*/
-       if (dev_priv->psr.psr2_enabled)
-               hsw_activate_psr2(intel_dp);
-       else
-               hsw_activate_psr1(intel_dp);
-
-       dev_priv->psr.active = true;
-}
-
-static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
-                                        enum transcoder cpu_transcoder)
-{
-       static const i915_reg_t regs[] = {
-               [TRANSCODER_A] = CHICKEN_TRANS_A,
-               [TRANSCODER_B] = CHICKEN_TRANS_B,
-               [TRANSCODER_C] = CHICKEN_TRANS_C,
-               [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
-       };
-
-       WARN_ON(INTEL_GEN(dev_priv) < 9);
-
-       if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
-                   !regs[cpu_transcoder].reg))
-               cpu_transcoder = TRANSCODER_A;
-
-       return regs[cpu_transcoder];
-}
-
-static void intel_psr_enable_source(struct intel_dp *intel_dp,
-                                   const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-       u32 mask;
-
-       /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
-        * use hardcoded values PSR AUX transactions
-        */
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               hsw_psr_setup_aux(intel_dp);
-
-       if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
-                                          !IS_GEMINILAKE(dev_priv))) {
-               i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
-                                                       cpu_transcoder);
-               u32 chicken = I915_READ(reg);
-
-               chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
-                          PSR2_ADD_VERTICAL_LINE_COUNT;
-               I915_WRITE(reg, chicken);
-       }
-
-       /*
-        * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
-        * mask LPSP to avoid dependency on other drivers that might block
-        * runtime_pm besides preventing  other hw tracking issues now we
-        * can rely on frontbuffer tracking.
-        */
-       mask = EDP_PSR_DEBUG_MASK_MEMUP |
-              EDP_PSR_DEBUG_MASK_HPD |
-              EDP_PSR_DEBUG_MASK_LPSP |
-              EDP_PSR_DEBUG_MASK_MAX_SLEEP;
-
-       if (INTEL_GEN(dev_priv) < 11)
-               mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
-
-       I915_WRITE(EDP_PSR_DEBUG, mask);
-}
-
-static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
-                                   const struct intel_crtc_state *crtc_state)
-{
-       struct intel_dp *intel_dp = dev_priv->psr.dp;
-
-       WARN_ON(dev_priv->psr.enabled);
-
-       dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
-       dev_priv->psr.busy_frontbuffer_bits = 0;
-       dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
-
-       DRM_DEBUG_KMS("Enabling PSR%s\n",
-                     dev_priv->psr.psr2_enabled ? "2" : "1");
-       intel_psr_setup_vsc(intel_dp, crtc_state);
-       intel_psr_enable_sink(intel_dp);
-       intel_psr_enable_source(intel_dp, crtc_state);
-       dev_priv->psr.enabled = true;
-
-       intel_psr_activate(intel_dp);
-}
-
-/**
- * intel_psr_enable - Enable PSR
- * @intel_dp: Intel DP
- * @crtc_state: new CRTC state
- *
- * This function can only be called after the pipe is fully trained and enabled.
- */
-void intel_psr_enable(struct intel_dp *intel_dp,
-                     const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
-       if (!crtc_state->has_psr)
-               return;
-
-       if (WARN_ON(!CAN_PSR(dev_priv)))
-               return;
-
-       WARN_ON(dev_priv->drrs.dp);
-
-       mutex_lock(&dev_priv->psr.lock);
-
-       if (!psr_global_enabled(dev_priv->psr.debug)) {
-               DRM_DEBUG_KMS("PSR disabled by flag\n");
-               goto unlock;
-       }
-
-       intel_psr_enable_locked(dev_priv, crtc_state);
-
-unlock:
-       mutex_unlock(&dev_priv->psr.lock);
-}
-
-static void intel_psr_exit(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       if (!dev_priv->psr.active) {
-               if (INTEL_GEN(dev_priv) >= 9)
-                       WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
-               WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
-               return;
-       }
-
-       if (dev_priv->psr.psr2_enabled) {
-               val = I915_READ(EDP_PSR2_CTL);
-               WARN_ON(!(val & EDP_PSR2_ENABLE));
-               I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
-       } else {
-               val = I915_READ(EDP_PSR_CTL);
-               WARN_ON(!(val & EDP_PSR_ENABLE));
-               I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
-       }
-       dev_priv->psr.active = false;
-}
-
-static void intel_psr_disable_locked(struct intel_dp *intel_dp)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       i915_reg_t psr_status;
-       u32 psr_status_mask;
-
-       lockdep_assert_held(&dev_priv->psr.lock);
-
-       if (!dev_priv->psr.enabled)
-               return;
-
-       DRM_DEBUG_KMS("Disabling PSR%s\n",
-                     dev_priv->psr.psr2_enabled ? "2" : "1");
-
-       intel_psr_exit(dev_priv);
-
-       if (dev_priv->psr.psr2_enabled) {
-               psr_status = EDP_PSR2_STATUS;
-               psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
-       } else {
-               psr_status = EDP_PSR_STATUS;
-               psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
-       }
-
-       /* Wait till PSR is idle */
-       if (intel_wait_for_register(&dev_priv->uncore,
-                                   psr_status, psr_status_mask, 0, 2000))
-               DRM_ERROR("Timed out waiting PSR idle state\n");
-
-       /* Disable PSR on Sink */
-       drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
-
-       dev_priv->psr.enabled = false;
-}
-
-/**
- * intel_psr_disable - Disable PSR
- * @intel_dp: Intel DP
- * @old_crtc_state: old CRTC state
- *
- * This function needs to be called before disabling pipe.
- */
-void intel_psr_disable(struct intel_dp *intel_dp,
-                      const struct intel_crtc_state *old_crtc_state)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
-       if (!old_crtc_state->has_psr)
-               return;
-
-       if (WARN_ON(!CAN_PSR(dev_priv)))
-               return;
-
-       mutex_lock(&dev_priv->psr.lock);
-
-       intel_psr_disable_locked(intel_dp);
-
-       mutex_unlock(&dev_priv->psr.lock);
-       cancel_work_sync(&dev_priv->psr.work);
-}
-
-static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
-{
-       /*
-        * Display WA #0884: all
-        * This documented WA for bxt can be safely applied
-        * broadly so we can force HW tracking to exit PSR
-        * instead of disabling and re-enabling.
-        * Workaround tells us to write 0 to CUR_SURFLIVE_A,
-        * but it makes more sense write to the current active
-        * pipe.
-        */
-       I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
-}
-
-/**
- * intel_psr_update - Update PSR state
- * @intel_dp: Intel DP
- * @crtc_state: new CRTC state
- *
- * This functions will update PSR states, disabling, enabling or switching PSR
- * version when executing fastsets. For full modeset, intel_psr_disable() and
- * intel_psr_enable() should be called instead.
- */
-void intel_psr_update(struct intel_dp *intel_dp,
-                     const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct i915_psr *psr = &dev_priv->psr;
-       bool enable, psr2_enable;
-
-       if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
-               return;
-
-       mutex_lock(&dev_priv->psr.lock);
-
-       enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
-       psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
-
-       if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
-               /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
-               if (crtc_state->crc_enabled && psr->enabled)
-                       psr_force_hw_tracking_exit(dev_priv);
-
-               goto unlock;
-       }
-
-       if (psr->enabled)
-               intel_psr_disable_locked(intel_dp);
-
-       if (enable)
-               intel_psr_enable_locked(dev_priv, crtc_state);
-
-unlock:
-       mutex_unlock(&dev_priv->psr.lock);
-}
-
-/**
- * intel_psr_wait_for_idle - wait for PSR1 to idle
- * @new_crtc_state: new CRTC state
- * @out_value: PSR status in case of failure
- *
- * This function is expected to be called from pipe_update_start() where it is
- * not expected to race with PSR enable or disable.
- *
- * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
- */
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
-                           u32 *out_value)
-{
-       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
-               return 0;
-
-       /* FIXME: Update this for PSR2 if we need to wait for idle */
-       if (READ_ONCE(dev_priv->psr.psr2_enabled))
-               return 0;
-
-       /*
-        * From bspec: Panel Self Refresh (BDW+)
-        * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
-        * exit training time + 1.5 ms of aux channel handshake. 50 ms is
-        * defensive enough to cover everything.
-        */
-
-       return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
-                                        EDP_PSR_STATUS_STATE_MASK,
-                                        EDP_PSR_STATUS_STATE_IDLE, 2, 50,
-                                        out_value);
-}
-
-static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
-{
-       i915_reg_t reg;
-       u32 mask;
-       int err;
-
-       if (!dev_priv->psr.enabled)
-               return false;
-
-       if (dev_priv->psr.psr2_enabled) {
-               reg = EDP_PSR2_STATUS;
-               mask = EDP_PSR2_STATUS_STATE_MASK;
-       } else {
-               reg = EDP_PSR_STATUS;
-               mask = EDP_PSR_STATUS_STATE_MASK;
-       }
-
-       mutex_unlock(&dev_priv->psr.lock);
-
-       err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
-       if (err)
-               DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
-
-       /* After the unlocked wait, verify that PSR is still wanted! */
-       mutex_lock(&dev_priv->psr.lock);
-       return err == 0 && dev_priv->psr.enabled;
-}
-
-static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = &dev_priv->drm;
-       struct drm_modeset_acquire_ctx ctx;
-       struct drm_atomic_state *state;
-       struct drm_crtc *crtc;
-       int err;
-
-       state = drm_atomic_state_alloc(dev);
-       if (!state)
-               return -ENOMEM;
-
-       drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-       state->acquire_ctx = &ctx;
-
-retry:
-       drm_for_each_crtc(crtc, dev) {
-               struct drm_crtc_state *crtc_state;
-               struct intel_crtc_state *intel_crtc_state;
-
-               crtc_state = drm_atomic_get_crtc_state(state, crtc);
-               if (IS_ERR(crtc_state)) {
-                       err = PTR_ERR(crtc_state);
-                       goto error;
-               }
-
-               intel_crtc_state = to_intel_crtc_state(crtc_state);
-
-               if (crtc_state->active && intel_crtc_state->has_psr) {
-                       /* Mark mode as changed to trigger a pipe->update() */
-                       crtc_state->mode_changed = true;
-                       break;
-               }
-       }
-
-       err = drm_atomic_commit(state);
-
-error:
-       if (err == -EDEADLK) {
-               drm_atomic_state_clear(state);
-               err = drm_modeset_backoff(&ctx);
-               if (!err)
-                       goto retry;
-       }
-
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-       drm_atomic_state_put(state);
-
-       return err;
-}
-
-int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
-{
-       const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
-       u32 old_mode;
-       int ret;
-
-       if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
-           mode > I915_PSR_DEBUG_FORCE_PSR1) {
-               DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
-               return -EINVAL;
-       }
-
-       ret = mutex_lock_interruptible(&dev_priv->psr.lock);
-       if (ret)
-               return ret;
-
-       old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
-       dev_priv->psr.debug = val;
-       intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
-
-       mutex_unlock(&dev_priv->psr.lock);
-
-       if (old_mode != mode)
-               ret = intel_psr_fastset_force(dev_priv);
-
-       return ret;
-}
-
-static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
-{
-       struct i915_psr *psr = &dev_priv->psr;
-
-       intel_psr_disable_locked(psr->dp);
-       psr->sink_not_reliable = true;
-       /* let's make sure that sink is awaken */
-       drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
-}
-
-static void intel_psr_work(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), psr.work);
-
-       mutex_lock(&dev_priv->psr.lock);
-
-       if (!dev_priv->psr.enabled)
-               goto unlock;
-
-       if (READ_ONCE(dev_priv->psr.irq_aux_error))
-               intel_psr_handle_irq(dev_priv);
-
-       /*
-        * We have to make sure PSR is ready for re-enable
-        * otherwise it keeps disabled until next full enable/disable cycle.
-        * PSR might take some time to get fully disabled
-        * and be ready for re-enable.
-        */
-       if (!__psr_wait_for_idle_locked(dev_priv))
-               goto unlock;
-
-       /*
-        * The delayed work can race with an invalidate hence we need to
-        * recheck. Since psr_flush first clears this and then reschedules we
-        * won't ever miss a flush when bailing out here.
-        */
-       if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
-               goto unlock;
-
-       intel_psr_activate(dev_priv->psr.dp);
-unlock:
-       mutex_unlock(&dev_priv->psr.lock);
-}
-
-/**
- * intel_psr_invalidate - Invalidade PSR
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- * @origin: which operation caused the invalidate
- *
- * Since the hardware frontbuffer tracking has gaps we need to integrate
- * with the software frontbuffer tracking. This function gets called every
- * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
- * disabled if the frontbuffer mask contains a buffer relevant to PSR.
- *
- * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
- */
-void intel_psr_invalidate(struct drm_i915_private *dev_priv,
-                         unsigned frontbuffer_bits, enum fb_op_origin origin)
-{
-       if (!CAN_PSR(dev_priv))
-               return;
-
-       if (origin == ORIGIN_FLIP)
-               return;
-
-       mutex_lock(&dev_priv->psr.lock);
-       if (!dev_priv->psr.enabled) {
-               mutex_unlock(&dev_priv->psr.lock);
-               return;
-       }
-
-       frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
-       dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
-
-       if (frontbuffer_bits)
-               intel_psr_exit(dev_priv);
-
-       mutex_unlock(&dev_priv->psr.lock);
-}
-
-/**
- * intel_psr_flush - Flush PSR
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- * @origin: which operation caused the flush
- *
- * Since the hardware frontbuffer tracking has gaps we need to integrate
- * with the software frontbuffer tracking. This function gets called every
- * time frontbuffer rendering has completed and flushed out to memory. PSR
- * can be enabled again if no other frontbuffer relevant to PSR is dirty.
- *
- * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
- */
-void intel_psr_flush(struct drm_i915_private *dev_priv,
-                    unsigned frontbuffer_bits, enum fb_op_origin origin)
-{
-       if (!CAN_PSR(dev_priv))
-               return;
-
-       if (origin == ORIGIN_FLIP)
-               return;
-
-       mutex_lock(&dev_priv->psr.lock);
-       if (!dev_priv->psr.enabled) {
-               mutex_unlock(&dev_priv->psr.lock);
-               return;
-       }
-
-       frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
-       dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
-
-       /* By definition flush = invalidate + flush */
-       if (frontbuffer_bits)
-               psr_force_hw_tracking_exit(dev_priv);
-
-       if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
-               schedule_work(&dev_priv->psr.work);
-       mutex_unlock(&dev_priv->psr.lock);
-}
-
-/**
- * intel_psr_init - Init basic PSR work and mutex.
- * @dev_priv: i915 device private
- *
- * This function is  called only once at driver load to initialize basic
- * PSR stuff.
- */
-void intel_psr_init(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       if (!HAS_PSR(dev_priv))
-               return;
-
-       dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
-               HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
-
-       if (!dev_priv->psr.sink_support)
-               return;
-
-       if (i915_modparams.enable_psr == -1)
-               if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
-                       i915_modparams.enable_psr = 0;
-
-       /*
-        * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
-        * will still keep the error set even after the reset done in the
-        * irq_preinstall and irq_uninstall hooks.
-        * And enabling in this situation cause the screen to freeze in the
-        * first time that PSR HW tries to activate so lets keep PSR disabled
-        * to avoid any rendering problems.
-        */
-       val = I915_READ(EDP_PSR_IIR);
-       val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
-       if (val) {
-               DRM_DEBUG_KMS("PSR interruption error set\n");
-               dev_priv->psr.sink_not_reliable = true;
-       }
-
-       /* Set link_standby x link_off defaults */
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               /* HSW and BDW require workarounds that we don't implement. */
-               dev_priv->psr.link_standby = false;
-       else
-               /* For new platforms let's respect VBT back again */
-               dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
-
-       INIT_WORK(&dev_priv->psr.work, intel_psr_work);
-       mutex_init(&dev_priv->psr.lock);
-}
-
-void intel_psr_short_pulse(struct intel_dp *intel_dp)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct i915_psr *psr = &dev_priv->psr;
-       u8 val;
-       const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
-                         DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
-                         DP_PSR_LINK_CRC_ERROR;
-
-       if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
-               return;
-
-       mutex_lock(&psr->lock);
-
-       if (!psr->enabled || psr->dp != intel_dp)
-               goto exit;
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
-               DRM_ERROR("PSR_STATUS dpcd read failed\n");
-               goto exit;
-       }
-
-       if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
-               DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
-               intel_psr_disable_locked(intel_dp);
-               psr->sink_not_reliable = true;
-       }
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
-               DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
-               goto exit;
-       }
-
-       if (val & DP_PSR_RFB_STORAGE_ERROR)
-               DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
-       if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
-               DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
-       if (val & DP_PSR_LINK_CRC_ERROR)
-               DRM_ERROR("PSR Link CRC error, disabling PSR\n");
-
-       if (val & ~errors)
-               DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
-                         val & ~errors);
-       if (val & errors) {
-               intel_psr_disable_locked(intel_dp);
-               psr->sink_not_reliable = true;
-       }
-       /* clear status register */
-       drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
-exit:
-       mutex_unlock(&psr->lock);
-}
-
-bool intel_psr_enabled(struct intel_dp *intel_dp)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       bool ret;
-
-       if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
-               return false;
-
-       mutex_lock(&dev_priv->psr.lock);
-       ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
-       mutex_unlock(&dev_priv->psr.lock);
-
-       return ret;
-}
diff --git a/drivers/gpu/drm/i915/intel_psr.h b/drivers/gpu/drm/i915/intel_psr.h
deleted file mode 100644 (file)
index dc81882..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_PSR_H__
-#define __INTEL_PSR_H__
-
-#include "intel_frontbuffer.h"
-
-struct drm_i915_private;
-struct intel_crtc_state;
-struct intel_dp;
-
-#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
-void intel_psr_init_dpcd(struct intel_dp *intel_dp);
-void intel_psr_enable(struct intel_dp *intel_dp,
-                     const struct intel_crtc_state *crtc_state);
-void intel_psr_disable(struct intel_dp *intel_dp,
-                      const struct intel_crtc_state *old_crtc_state);
-void intel_psr_update(struct intel_dp *intel_dp,
-                     const struct intel_crtc_state *crtc_state);
-int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
-void intel_psr_invalidate(struct drm_i915_private *dev_priv,
-                         unsigned frontbuffer_bits,
-                         enum fb_op_origin origin);
-void intel_psr_flush(struct drm_i915_private *dev_priv,
-                    unsigned frontbuffer_bits,
-                    enum fb_op_origin origin);
-void intel_psr_init(struct drm_i915_private *dev_priv);
-void intel_psr_compute_config(struct intel_dp *intel_dp,
-                             struct intel_crtc_state *crtc_state);
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
-void intel_psr_short_pulse(struct intel_dp *intel_dp);
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
-                           u32 *out_value);
-bool intel_psr_enabled(struct intel_dp *intel_dp);
-
-#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_quirks.c b/drivers/gpu/drm/i915/intel_quirks.c
deleted file mode 100644 (file)
index 0b749c2..0000000
+++ /dev/null
@@ -1,170 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018 Intel Corporation
- */
-
-#include <linux/dmi.h>
-
-#include "intel_drv.h"
-#include "intel_quirks.h"
-
-/*
- * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
- */
-static void quirk_ssc_force_disable(struct drm_i915_private *i915)
-{
-       i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
-       DRM_INFO("applying lvds SSC disable quirk\n");
-}
-
-/*
- * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
- * brightness value
- */
-static void quirk_invert_brightness(struct drm_i915_private *i915)
-{
-       i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
-       DRM_INFO("applying inverted panel brightness quirk\n");
-}
-
-/* Some VBT's incorrectly indicate no backlight is present */
-static void quirk_backlight_present(struct drm_i915_private *i915)
-{
-       i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
-       DRM_INFO("applying backlight present quirk\n");
-}
-
-/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
- * which is 300 ms greater than eDP spec T12 min.
- */
-static void quirk_increase_t12_delay(struct drm_i915_private *i915)
-{
-       i915->quirks |= QUIRK_INCREASE_T12_DELAY;
-       DRM_INFO("Applying T12 delay quirk\n");
-}
-
-/*
- * GeminiLake NUC HDMI outputs require additional off time
- * this allows the onboard retimer to correctly sync to signal
- */
-static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
-{
-       i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
-       DRM_INFO("Applying Increase DDI Disabled quirk\n");
-}
-
-struct intel_quirk {
-       int device;
-       int subsystem_vendor;
-       int subsystem_device;
-       void (*hook)(struct drm_i915_private *i915);
-};
-
-/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
-struct intel_dmi_quirk {
-       void (*hook)(struct drm_i915_private *i915);
-       const struct dmi_system_id (*dmi_id_list)[];
-};
-
-static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
-{
-       DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
-       return 1;
-}
-
-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
-       {
-               .dmi_id_list = &(const struct dmi_system_id[]) {
-                       {
-                               .callback = intel_dmi_reverse_brightness,
-                               .ident = "NCR Corporation",
-                               .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
-                                           DMI_MATCH(DMI_PRODUCT_NAME, ""),
-                               },
-                       },
-                       { }  /* terminating entry */
-               },
-               .hook = quirk_invert_brightness,
-       },
-};
-
-static struct intel_quirk intel_quirks[] = {
-       /* Lenovo U160 cannot use SSC on LVDS */
-       { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
-
-       /* Sony Vaio Y cannot use SSC on LVDS */
-       { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
-
-       /* Acer Aspire 5734Z must invert backlight brightness */
-       { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
-
-       /* Acer/eMachines G725 */
-       { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
-
-       /* Acer/eMachines e725 */
-       { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
-
-       /* Acer/Packard Bell NCL20 */
-       { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
-
-       /* Acer Aspire 4736Z */
-       { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
-
-       /* Acer Aspire 5336 */
-       { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
-
-       /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
-       { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
-
-       /* Acer C720 Chromebook (Core i3 4005U) */
-       { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
-
-       /* Apple Macbook 2,1 (Core 2 T7400) */
-       { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
-
-       /* Apple Macbook 4,1 */
-       { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
-
-       /* Toshiba CB35 Chromebook (Celeron 2955U) */
-       { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
-
-       /* HP Chromebook 14 (Celeron 2955U) */
-       { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
-
-       /* Dell Chromebook 11 */
-       { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
-
-       /* Dell Chromebook 11 (2015 version) */
-       { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
-
-       /* Toshiba Satellite P50-C-18C */
-       { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
-
-       /* GeminiLake NUC */
-       { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
-       { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
-       /* ASRock ITX*/
-       { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
-       { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
-};
-
-void intel_init_quirks(struct drm_i915_private *i915)
-{
-       struct pci_dev *d = i915->drm.pdev;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
-               struct intel_quirk *q = &intel_quirks[i];
-
-               if (d->device == q->device &&
-                   (d->subsystem_vendor == q->subsystem_vendor ||
-                    q->subsystem_vendor == PCI_ANY_ID) &&
-                   (d->subsystem_device == q->subsystem_device ||
-                    q->subsystem_device == PCI_ANY_ID))
-                       q->hook(i915);
-       }
-       for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
-               if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
-                       intel_dmi_quirks[i].hook(i915);
-       }
-}
diff --git a/drivers/gpu/drm/i915/intel_quirks.h b/drivers/gpu/drm/i915/intel_quirks.h
deleted file mode 100644 (file)
index b0fcff1..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_QUIRKS_H__
-#define __INTEL_QUIRKS_H__
-
-struct drm_i915_private;
-
-void intel_init_quirks(struct drm_i915_private *dev_priv);
-
-#endif /* __INTEL_QUIRKS_H__ */
index f2d6299a8161be9ba58abc09fad463e065068efb..473c4850c01d0c6dacdaff180c0f1bf4077652b4 100644 (file)
@@ -8,7 +8,8 @@
 
 #include <linux/types.h>
 
-#include "intel_display.h"
+#include "display/intel_display.h"
+
 #include "intel_wakeref.h"
 
 #include "i915_utils.h"
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
deleted file mode 100644 (file)
index 004b520..0000000
+++ /dev/null
@@ -1,2464 +0,0 @@
-/*
- * Copyright © 2011 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- *   Jesse Barnes <jbarnes@virtuousgeek.org>
- *
- * New plane/sprite handling.
- *
- * The older chips had a separate interface for programming plane related
- * registers; newer ones are much simpler and we can use the new DRM plane
- * support.
- */
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_color_mgmt.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
-
-#include "i915_drv.h"
-#include "intel_atomic_plane.h"
-#include "intel_drv.h"
-#include "intel_frontbuffer.h"
-#include "intel_pm.h"
-#include "intel_psr.h"
-#include "intel_sprite.h"
-
-bool is_planar_yuv_format(u32 pixelformat)
-{
-       switch (pixelformat) {
-       case DRM_FORMAT_NV12:
-       case DRM_FORMAT_P010:
-       case DRM_FORMAT_P012:
-       case DRM_FORMAT_P016:
-               return true;
-       default:
-               return false;
-       }
-}
-
-int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
-                            int usecs)
-{
-       /* paranoia */
-       if (!adjusted_mode->crtc_htotal)
-               return 1;
-
-       return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock,
-                           1000 * adjusted_mode->crtc_htotal);
-}
-
-/* FIXME: We should instead only take spinlocks once for the entire update
- * instead of once per mmio. */
-#if IS_ENABLED(CONFIG_PROVE_LOCKING)
-#define VBLANK_EVASION_TIME_US 250
-#else
-#define VBLANK_EVASION_TIME_US 100
-#endif
-
-/**
- * intel_pipe_update_start() - start update of a set of display registers
- * @new_crtc_state: the new crtc state
- *
- * Mark the start of an update to pipe registers that should be updated
- * atomically regarding vblank. If the next vblank will happens within
- * the next 100 us, this function waits until the vblank passes.
- *
- * After a successful call to this function, interrupts will be disabled
- * until a subsequent call to intel_pipe_update_end(). That is done to
- * avoid random delays.
- */
-void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       const struct drm_display_mode *adjusted_mode = &new_crtc_state->base.adjusted_mode;
-       long timeout = msecs_to_jiffies_timeout(1);
-       int scanline, min, max, vblank_start;
-       wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
-       bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-               intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
-       DEFINE_WAIT(wait);
-       u32 psr_status;
-
-       vblank_start = adjusted_mode->crtc_vblank_start;
-       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
-               vblank_start = DIV_ROUND_UP(vblank_start, 2);
-
-       /* FIXME needs to be calibrated sensibly */
-       min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
-                                                     VBLANK_EVASION_TIME_US);
-       max = vblank_start - 1;
-
-       if (min <= 0 || max <= 0)
-               goto irq_disable;
-
-       if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
-               goto irq_disable;
-
-       /*
-        * Wait for psr to idle out after enabling the VBL interrupts
-        * VBL interrupts will start the PSR exit and prevent a PSR
-        * re-entry as well.
-        */
-       if (intel_psr_wait_for_idle(new_crtc_state, &psr_status))
-               DRM_ERROR("PSR idle timed out 0x%x, atomic update may fail\n",
-                         psr_status);
-
-       local_irq_disable();
-
-       crtc->debug.min_vbl = min;
-       crtc->debug.max_vbl = max;
-       trace_i915_pipe_update_start(crtc);
-
-       for (;;) {
-               /*
-                * prepare_to_wait() has a memory barrier, which guarantees
-                * other CPUs can see the task state update by the time we
-                * read the scanline.
-                */
-               prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
-
-               scanline = intel_get_crtc_scanline(crtc);
-               if (scanline < min || scanline > max)
-                       break;
-
-               if (!timeout) {
-                       DRM_ERROR("Potential atomic update failure on pipe %c\n",
-                                 pipe_name(crtc->pipe));
-                       break;
-               }
-
-               local_irq_enable();
-
-               timeout = schedule_timeout(timeout);
-
-               local_irq_disable();
-       }
-
-       finish_wait(wq, &wait);
-
-       drm_crtc_vblank_put(&crtc->base);
-
-       /*
-        * On VLV/CHV DSI the scanline counter would appear to
-        * increment approx. 1/3 of a scanline before start of vblank.
-        * The registers still get latched at start of vblank however.
-        * This means we must not write any registers on the first
-        * line of vblank (since not the whole line is actually in
-        * vblank). And unfortunately we can't use the interrupt to
-        * wait here since it will fire too soon. We could use the
-        * frame start interrupt instead since it will fire after the
-        * critical scanline, but that would require more changes
-        * in the interrupt code. So for now we'll just do the nasty
-        * thing and poll for the bad scanline to pass us by.
-        *
-        * FIXME figure out if BXT+ DSI suffers from this as well
-        */
-       while (need_vlv_dsi_wa && scanline == vblank_start)
-               scanline = intel_get_crtc_scanline(crtc);
-
-       crtc->debug.scanline_start = scanline;
-       crtc->debug.start_vbl_time = ktime_get();
-       crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
-
-       trace_i915_pipe_update_vblank_evaded(crtc);
-       return;
-
-irq_disable:
-       local_irq_disable();
-}
-
-/**
- * intel_pipe_update_end() - end update of a set of display registers
- * @new_crtc_state: the new crtc state
- *
- * Mark the end of an update started with intel_pipe_update_start(). This
- * re-enables interrupts and verifies the update was actually completed
- * before a vblank.
- */
-void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
-       enum pipe pipe = crtc->pipe;
-       int scanline_end = intel_get_crtc_scanline(crtc);
-       u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
-       ktime_t end_vbl_time = ktime_get();
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
-
-       /* We're still in the vblank-evade critical section, this can't race.
-        * Would be slightly nice to just grab the vblank count and arm the
-        * event outside of the critical section - the spinlock might spin for a
-        * while ... */
-       if (new_crtc_state->base.event) {
-               WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
-
-               spin_lock(&crtc->base.dev->event_lock);
-               drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->base.event);
-               spin_unlock(&crtc->base.dev->event_lock);
-
-               new_crtc_state->base.event = NULL;
-       }
-
-       local_irq_enable();
-
-       if (intel_vgpu_active(dev_priv))
-               return;
-
-       if (crtc->debug.start_vbl_count &&
-           crtc->debug.start_vbl_count != end_vbl_count) {
-               DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
-                         pipe_name(pipe), crtc->debug.start_vbl_count,
-                         end_vbl_count,
-                         ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
-                         crtc->debug.min_vbl, crtc->debug.max_vbl,
-                         crtc->debug.scanline_start, scanline_end);
-       }
-#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
-       else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
-                VBLANK_EVASION_TIME_US)
-               DRM_WARN("Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
-                        pipe_name(pipe),
-                        ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
-                        VBLANK_EVASION_TIME_US);
-#endif
-}
-
-int intel_plane_check_stride(const struct intel_plane_state *plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       unsigned int rotation = plane_state->base.rotation;
-       u32 stride, max_stride;
-
-       /*
-        * We ignore stride for all invisible planes that
-        * can be remapped. Otherwise we could end up
-        * with a false positive when the remapping didn't
-        * kick in due the plane being invisible.
-        */
-       if (intel_plane_can_remap(plane_state) &&
-           !plane_state->base.visible)
-               return 0;
-
-       /* FIXME other color planes? */
-       stride = plane_state->color_plane[0].stride;
-       max_stride = plane->max_stride(plane, fb->format->format,
-                                      fb->modifier, rotation);
-
-       if (stride > max_stride) {
-               DRM_DEBUG_KMS("[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n",
-                             fb->base.id, stride,
-                             plane->base.base.id, plane->base.name, max_stride);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
-{
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       struct drm_rect *src = &plane_state->base.src;
-       u32 src_x, src_y, src_w, src_h, hsub, vsub;
-       bool rotated = drm_rotation_90_or_270(plane_state->base.rotation);
-
-       /*
-        * Hardware doesn't handle subpixel coordinates.
-        * Adjust to (macro)pixel boundary, but be careful not to
-        * increase the source viewport size, because that could
-        * push the downscaling factor out of bounds.
-        */
-       src_x = src->x1 >> 16;
-       src_w = drm_rect_width(src) >> 16;
-       src_y = src->y1 >> 16;
-       src_h = drm_rect_height(src) >> 16;
-
-       src->x1 = src_x << 16;
-       src->x2 = (src_x + src_w) << 16;
-       src->y1 = src_y << 16;
-       src->y2 = (src_y + src_h) << 16;
-
-       if (!fb->format->is_yuv)
-               return 0;
-
-       /* YUV specific checks */
-       if (!rotated) {
-               hsub = fb->format->hsub;
-               vsub = fb->format->vsub;
-       } else {
-               hsub = vsub = max(fb->format->hsub, fb->format->vsub);
-       }
-
-       if (src_x % hsub || src_w % hsub) {
-               DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n",
-                             src_x, src_w, hsub, rotated ? "rotated " : "");
-               return -EINVAL;
-       }
-
-       if (src_y % vsub || src_h % vsub) {
-               DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n",
-                             src_y, src_h, vsub, rotated ? "rotated " : "");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static unsigned int
-skl_plane_max_stride(struct intel_plane *plane,
-                    u32 pixel_format, u64 modifier,
-                    unsigned int rotation)
-{
-       const struct drm_format_info *info = drm_format_info(pixel_format);
-       int cpp = info->cpp[0];
-
-       /*
-        * "The stride in bytes must not exceed the
-        * of the size of 8K pixels and 32K bytes."
-        */
-       if (drm_rotation_90_or_270(rotation))
-               return min(8192, 32768 / cpp);
-       else
-               return min(8192 * cpp, 32768);
-}
-
-static void
-skl_program_scaler(struct intel_plane *plane,
-                  const struct intel_crtc_state *crtc_state,
-                  const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum pipe pipe = plane->pipe;
-       int scaler_id = plane_state->scaler_id;
-       const struct intel_scaler *scaler =
-               &crtc_state->scaler_state.scalers[scaler_id];
-       int crtc_x = plane_state->base.dst.x1;
-       int crtc_y = plane_state->base.dst.y1;
-       u32 crtc_w = drm_rect_width(&plane_state->base.dst);
-       u32 crtc_h = drm_rect_height(&plane_state->base.dst);
-       u16 y_hphase, uv_rgb_hphase;
-       u16 y_vphase, uv_rgb_vphase;
-       int hscale, vscale;
-
-       hscale = drm_rect_calc_hscale(&plane_state->base.src,
-                                     &plane_state->base.dst,
-                                     0, INT_MAX);
-       vscale = drm_rect_calc_vscale(&plane_state->base.src,
-                                     &plane_state->base.dst,
-                                     0, INT_MAX);
-
-       /* TODO: handle sub-pixel coordinates */
-       if (is_planar_yuv_format(plane_state->base.fb->format->format) &&
-           !icl_is_hdr_plane(dev_priv, plane->id)) {
-               y_hphase = skl_scaler_calc_phase(1, hscale, false);
-               y_vphase = skl_scaler_calc_phase(1, vscale, false);
-
-               /* MPEG2 chroma siting convention */
-               uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
-               uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
-       } else {
-               /* not used */
-               y_hphase = 0;
-               y_vphase = 0;
-
-               uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
-               uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
-       }
-
-       I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
-                     PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
-       I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
-                     PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
-       I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
-                     PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
-       I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
-       I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
-}
-
-/* Preoffset values for YUV to RGB Conversion */
-#define PREOFF_YUV_TO_RGB_HI           0x1800
-#define PREOFF_YUV_TO_RGB_ME           0x1F00
-#define PREOFF_YUV_TO_RGB_LO           0x1800
-
-#define  ROFF(x)          (((x) & 0xffff) << 16)
-#define  GOFF(x)          (((x) & 0xffff) << 0)
-#define  BOFF(x)          (((x) & 0xffff) << 16)
-
-static void
-icl_program_input_csc(struct intel_plane *plane,
-                     const struct intel_crtc_state *crtc_state,
-                     const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum pipe pipe = plane->pipe;
-       enum plane_id plane_id = plane->id;
-
-       static const u16 input_csc_matrix[][9] = {
-               /*
-                * BT.601 full range YCbCr -> full range RGB
-                * The matrix required is :
-                * [1.000, 0.000, 1.371,
-                *  1.000, -0.336, -0.698,
-                *  1.000, 1.732, 0.0000]
-                */
-               [DRM_COLOR_YCBCR_BT601] = {
-                       0x7AF8, 0x7800, 0x0,
-                       0x8B28, 0x7800, 0x9AC0,
-                       0x0, 0x7800, 0x7DD8,
-               },
-               /*
-                * BT.709 full range YCbCr -> full range RGB
-                * The matrix required is :
-                * [1.000, 0.000, 1.574,
-                *  1.000, -0.187, -0.468,
-                *  1.000, 1.855, 0.0000]
-                */
-               [DRM_COLOR_YCBCR_BT709] = {
-                       0x7C98, 0x7800, 0x0,
-                       0x9EF8, 0x7800, 0xABF8,
-                       0x0, 0x7800,  0x7ED8,
-               },
-       };
-
-       /* Matrix for Limited Range to Full Range Conversion */
-       static const u16 input_csc_matrix_lr[][9] = {
-               /*
-                * BT.601 Limted range YCbCr -> full range RGB
-                * The matrix required is :
-                * [1.164384, 0.000, 1.596370,
-                *  1.138393, -0.382500, -0.794598,
-                *  1.138393, 1.971696, 0.0000]
-                */
-               [DRM_COLOR_YCBCR_BT601] = {
-                       0x7CC8, 0x7950, 0x0,
-                       0x8CB8, 0x7918, 0x9C40,
-                       0x0, 0x7918, 0x7FC8,
-               },
-               /*
-                * BT.709 Limited range YCbCr -> full range RGB
-                * The matrix required is :
-                * [1.164, 0.000, 1.833671,
-                *  1.138393, -0.213249, -0.532909,
-                *  1.138393, 2.112402, 0.0000]
-                */
-               [DRM_COLOR_YCBCR_BT709] = {
-                       0x7EA8, 0x7950, 0x0,
-                       0x8888, 0x7918, 0xADA8,
-                       0x0, 0x7918,  0x6870,
-               },
-       };
-       const u16 *csc;
-
-       if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
-               csc = input_csc_matrix[plane_state->base.color_encoding];
-       else
-               csc = input_csc_matrix_lr[plane_state->base.color_encoding];
-
-       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
-                     GOFF(csc[1]));
-       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2]));
-       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) |
-                     GOFF(csc[4]));
-       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5]));
-       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) |
-                     GOFF(csc[7]));
-       I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8]));
-
-       I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
-                     PREOFF_YUV_TO_RGB_HI);
-       I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
-                     PREOFF_YUV_TO_RGB_ME);
-       I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
-                     PREOFF_YUV_TO_RGB_LO);
-       I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
-       I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
-       I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
-}
-
-static void
-skl_program_plane(struct intel_plane *plane,
-                 const struct intel_crtc_state *crtc_state,
-                 const struct intel_plane_state *plane_state,
-                 int color_plane, bool slave, u32 plane_ctl)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum plane_id plane_id = plane->id;
-       enum pipe pipe = plane->pipe;
-       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-       u32 surf_addr = plane_state->color_plane[color_plane].offset;
-       u32 stride = skl_plane_stride(plane_state, color_plane);
-       u32 aux_stride = skl_plane_stride(plane_state, 1);
-       int crtc_x = plane_state->base.dst.x1;
-       int crtc_y = plane_state->base.dst.y1;
-       u32 x = plane_state->color_plane[color_plane].x;
-       u32 y = plane_state->color_plane[color_plane].y;
-       u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
-       u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
-       struct intel_plane *linked = plane_state->linked_plane;
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       u8 alpha = plane_state->base.alpha >> 8;
-       u32 plane_color_ctl = 0;
-       unsigned long irqflags;
-       u32 keymsk, keymax;
-
-       plane_ctl |= skl_plane_ctl_crtc(crtc_state);
-
-       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-               plane_color_ctl = plane_state->color_ctl |
-                       glk_plane_color_ctl_crtc(crtc_state);
-
-       /* Sizes are 0 based */
-       src_w--;
-       src_h--;
-
-       keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
-
-       keymsk = key->channel_mask & 0x7ffffff;
-       if (alpha < 0xff)
-               keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
-
-       /* The scaler will handle the output position */
-       if (plane_state->scaler_id >= 0) {
-               crtc_x = 0;
-               crtc_y = 0;
-       }
-
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-       I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
-       I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
-       I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
-       I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
-                     (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
-
-       if (icl_is_hdr_plane(dev_priv, plane_id)) {
-               u32 cus_ctl = 0;
-
-               if (linked) {
-                       /* Enable and use MPEG-2 chroma siting */
-                       cus_ctl = PLANE_CUS_ENABLE |
-                               PLANE_CUS_HPHASE_0 |
-                               PLANE_CUS_VPHASE_SIGN_NEGATIVE |
-                               PLANE_CUS_VPHASE_0_25;
-
-                       if (linked->id == PLANE_SPRITE5)
-                               cus_ctl |= PLANE_CUS_PLANE_7;
-                       else if (linked->id == PLANE_SPRITE4)
-                               cus_ctl |= PLANE_CUS_PLANE_6;
-                       else
-                               MISSING_CASE(linked->id);
-               }
-
-               I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
-       }
-
-       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-               I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
-
-       if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
-               icl_program_input_csc(plane, crtc_state, plane_state);
-
-       skl_write_plane_wm(plane, crtc_state);
-
-       I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
-       I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
-       I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
-
-       I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
-
-       if (INTEL_GEN(dev_priv) < 11)
-               I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
-                             (plane_state->color_plane[1].y << 16) |
-                             plane_state->color_plane[1].x);
-
-       /*
-        * The control register self-arms if the plane was previously
-        * disabled. Try to make the plane enable atomic by writing
-        * the control register just before the surface register.
-        */
-       I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
-       I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
-                     intel_plane_ggtt_offset(plane_state) + surf_addr);
-
-       if (!slave && plane_state->scaler_id >= 0)
-               skl_program_scaler(plane, crtc_state, plane_state);
-
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void
-skl_update_plane(struct intel_plane *plane,
-                const struct intel_crtc_state *crtc_state,
-                const struct intel_plane_state *plane_state)
-{
-       int color_plane = 0;
-
-       if (plane_state->linked_plane) {
-               /* Program the UV plane */
-               color_plane = 1;
-       }
-
-       skl_program_plane(plane, crtc_state, plane_state,
-                         color_plane, false, plane_state->ctl);
-}
-
-static void
-icl_update_slave(struct intel_plane *plane,
-                const struct intel_crtc_state *crtc_state,
-                const struct intel_plane_state *plane_state)
-{
-       skl_program_plane(plane, crtc_state, plane_state, 0, true,
-                         plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE);
-}
-
-static void
-skl_disable_plane(struct intel_plane *plane,
-                 const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum plane_id plane_id = plane->id;
-       enum pipe pipe = plane->pipe;
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-       if (icl_is_hdr_plane(dev_priv, plane_id))
-               I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0);
-
-       skl_write_plane_wm(plane, crtc_state);
-
-       I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
-       I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
-
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static bool
-skl_plane_get_hw_state(struct intel_plane *plane,
-                      enum pipe *pipe)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum intel_display_power_domain power_domain;
-       enum plane_id plane_id = plane->id;
-       intel_wakeref_t wakeref;
-       bool ret;
-
-       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (!wakeref)
-               return false;
-
-       ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
-
-       *pipe = plane->pipe;
-
-       intel_display_power_put(dev_priv, power_domain, wakeref);
-
-       return ret;
-}
-
-static void
-chv_update_csc(const struct intel_plane_state *plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       enum plane_id plane_id = plane->id;
-       /*
-        * |r|   | c0 c1 c2 |   |cr|
-        * |g| = | c3 c4 c5 | x |y |
-        * |b|   | c6 c7 c8 |   |cb|
-        *
-        * Coefficients are s3.12.
-        *
-        * Cb and Cr apparently come in as signed already, and
-        * we always get full range data in on account of CLRC0/1.
-        */
-       static const s16 csc_matrix[][9] = {
-               /* BT.601 full range YCbCr -> full range RGB */
-               [DRM_COLOR_YCBCR_BT601] = {
-                        5743, 4096,     0,
-                       -2925, 4096, -1410,
-                           0, 4096,  7258,
-               },
-               /* BT.709 full range YCbCr -> full range RGB */
-               [DRM_COLOR_YCBCR_BT709] = {
-                        6450, 4096,     0,
-                       -1917, 4096,  -767,
-                           0, 4096,  7601,
-               },
-       };
-       const s16 *csc = csc_matrix[plane_state->base.color_encoding];
-
-       /* Seems RGB data bypasses the CSC always */
-       if (!fb->format->is_yuv)
-               return;
-
-       I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
-       I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
-       I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
-
-       I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
-       I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
-       I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
-       I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
-       I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(csc[8]));
-
-       I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0));
-       I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
-       I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
-
-       I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
-       I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
-       I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
-}
-
-#define SIN_0 0
-#define COS_0 1
-
-static void
-vlv_update_clrc(const struct intel_plane_state *plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       enum pipe pipe = plane->pipe;
-       enum plane_id plane_id = plane->id;
-       int contrast, brightness, sh_scale, sh_sin, sh_cos;
-
-       if (fb->format->is_yuv &&
-           plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
-               /*
-                * Expand limited range to full range:
-                * Contrast is applied first and is used to expand Y range.
-                * Brightness is applied second and is used to remove the
-                * offset from Y. Saturation/hue is used to expand CbCr range.
-                */
-               contrast = DIV_ROUND_CLOSEST(255 << 6, 235 - 16);
-               brightness = -DIV_ROUND_CLOSEST(16 * 255, 235 - 16);
-               sh_scale = DIV_ROUND_CLOSEST(128 << 7, 240 - 128);
-               sh_sin = SIN_0 * sh_scale;
-               sh_cos = COS_0 * sh_scale;
-       } else {
-               /* Pass-through everything. */
-               contrast = 1 << 6;
-               brightness = 0;
-               sh_scale = 1 << 7;
-               sh_sin = SIN_0 * sh_scale;
-               sh_cos = COS_0 * sh_scale;
-       }
-
-       /* FIXME these register are single buffered :( */
-       I915_WRITE_FW(SPCLRC0(pipe, plane_id),
-                     SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
-       I915_WRITE_FW(SPCLRC1(pipe, plane_id),
-                     SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
-}
-
-static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
-       u32 sprctl = 0;
-
-       if (crtc_state->gamma_enable)
-               sprctl |= SP_GAMMA_ENABLE;
-
-       return sprctl;
-}
-
-static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
-                         const struct intel_plane_state *plane_state)
-{
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       unsigned int rotation = plane_state->base.rotation;
-       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-       u32 sprctl;
-
-       sprctl = SP_ENABLE;
-
-       switch (fb->format->format) {
-       case DRM_FORMAT_YUYV:
-               sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV;
-               break;
-       case DRM_FORMAT_YVYU:
-               sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YVYU;
-               break;
-       case DRM_FORMAT_UYVY:
-               sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_UYVY;
-               break;
-       case DRM_FORMAT_VYUY:
-               sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
-               break;
-       case DRM_FORMAT_RGB565:
-               sprctl |= SP_FORMAT_BGR565;
-               break;
-       case DRM_FORMAT_XRGB8888:
-               sprctl |= SP_FORMAT_BGRX8888;
-               break;
-       case DRM_FORMAT_ARGB8888:
-               sprctl |= SP_FORMAT_BGRA8888;
-               break;
-       case DRM_FORMAT_XBGR2101010:
-               sprctl |= SP_FORMAT_RGBX1010102;
-               break;
-       case DRM_FORMAT_ABGR2101010:
-               sprctl |= SP_FORMAT_RGBA1010102;
-               break;
-       case DRM_FORMAT_XBGR8888:
-               sprctl |= SP_FORMAT_RGBX8888;
-               break;
-       case DRM_FORMAT_ABGR8888:
-               sprctl |= SP_FORMAT_RGBA8888;
-               break;
-       default:
-               MISSING_CASE(fb->format->format);
-               return 0;
-       }
-
-       if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
-               sprctl |= SP_YUV_FORMAT_BT709;
-
-       if (fb->modifier == I915_FORMAT_MOD_X_TILED)
-               sprctl |= SP_TILED;
-
-       if (rotation & DRM_MODE_ROTATE_180)
-               sprctl |= SP_ROTATE_180;
-
-       if (rotation & DRM_MODE_REFLECT_X)
-               sprctl |= SP_MIRROR;
-
-       if (key->flags & I915_SET_COLORKEY_SOURCE)
-               sprctl |= SP_SOURCE_KEY;
-
-       return sprctl;
-}
-
-static void
-vlv_update_plane(struct intel_plane *plane,
-                const struct intel_crtc_state *crtc_state,
-                const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum pipe pipe = plane->pipe;
-       enum plane_id plane_id = plane->id;
-       u32 sprsurf_offset = plane_state->color_plane[0].offset;
-       u32 linear_offset;
-       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-       int crtc_x = plane_state->base.dst.x1;
-       int crtc_y = plane_state->base.dst.y1;
-       u32 crtc_w = drm_rect_width(&plane_state->base.dst);
-       u32 crtc_h = drm_rect_height(&plane_state->base.dst);
-       u32 x = plane_state->color_plane[0].x;
-       u32 y = plane_state->color_plane[0].y;
-       unsigned long irqflags;
-       u32 sprctl;
-
-       sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state);
-
-       /* Sizes are 0 based */
-       crtc_w--;
-       crtc_h--;
-
-       linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-       I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
-                     plane_state->color_plane[0].stride);
-       I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
-       I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
-       I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
-
-       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
-               chv_update_csc(plane_state);
-
-       if (key->flags) {
-               I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
-               I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
-               I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
-       }
-
-       I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
-       I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
-
-       /*
-        * The control register self-arms if the plane was previously
-        * disabled. Try to make the plane enable atomic by writing
-        * the control register just before the surface register.
-        */
-       I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
-       I915_WRITE_FW(SPSURF(pipe, plane_id),
-                     intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
-
-       vlv_update_clrc(plane_state);
-
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void
-vlv_disable_plane(struct intel_plane *plane,
-                 const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum pipe pipe = plane->pipe;
-       enum plane_id plane_id = plane->id;
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-       I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
-       I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
-
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static bool
-vlv_plane_get_hw_state(struct intel_plane *plane,
-                      enum pipe *pipe)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum intel_display_power_domain power_domain;
-       enum plane_id plane_id = plane->id;
-       intel_wakeref_t wakeref;
-       bool ret;
-
-       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (!wakeref)
-               return false;
-
-       ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
-
-       *pipe = plane->pipe;
-
-       intel_display_power_put(dev_priv, power_domain, wakeref);
-
-       return ret;
-}
-
-static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
-       u32 sprctl = 0;
-
-       if (crtc_state->gamma_enable)
-               sprctl |= SPRITE_GAMMA_ENABLE;
-
-       if (crtc_state->csc_enable)
-               sprctl |= SPRITE_PIPE_CSC_ENABLE;
-
-       return sprctl;
-}
-
-static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
-                         const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(plane_state->base.plane->dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       unsigned int rotation = plane_state->base.rotation;
-       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-       u32 sprctl;
-
-       sprctl = SPRITE_ENABLE;
-
-       if (IS_IVYBRIDGE(dev_priv))
-               sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
-
-       switch (fb->format->format) {
-       case DRM_FORMAT_XBGR8888:
-               sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
-               break;
-       case DRM_FORMAT_XRGB8888:
-               sprctl |= SPRITE_FORMAT_RGBX888;
-               break;
-       case DRM_FORMAT_YUYV:
-               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
-               break;
-       case DRM_FORMAT_YVYU:
-               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
-               break;
-       case DRM_FORMAT_UYVY:
-               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
-               break;
-       case DRM_FORMAT_VYUY:
-               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
-               break;
-       default:
-               MISSING_CASE(fb->format->format);
-               return 0;
-       }
-
-       if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
-               sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
-
-       if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
-               sprctl |= SPRITE_YUV_RANGE_CORRECTION_DISABLE;
-
-       if (fb->modifier == I915_FORMAT_MOD_X_TILED)
-               sprctl |= SPRITE_TILED;
-
-       if (rotation & DRM_MODE_ROTATE_180)
-               sprctl |= SPRITE_ROTATE_180;
-
-       if (key->flags & I915_SET_COLORKEY_DESTINATION)
-               sprctl |= SPRITE_DEST_KEY;
-       else if (key->flags & I915_SET_COLORKEY_SOURCE)
-               sprctl |= SPRITE_SOURCE_KEY;
-
-       return sprctl;
-}
-
-static void
-ivb_update_plane(struct intel_plane *plane,
-                const struct intel_crtc_state *crtc_state,
-                const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum pipe pipe = plane->pipe;
-       u32 sprsurf_offset = plane_state->color_plane[0].offset;
-       u32 linear_offset;
-       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-       int crtc_x = plane_state->base.dst.x1;
-       int crtc_y = plane_state->base.dst.y1;
-       u32 crtc_w = drm_rect_width(&plane_state->base.dst);
-       u32 crtc_h = drm_rect_height(&plane_state->base.dst);
-       u32 x = plane_state->color_plane[0].x;
-       u32 y = plane_state->color_plane[0].y;
-       u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
-       u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
-       u32 sprctl, sprscale = 0;
-       unsigned long irqflags;
-
-       sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state);
-
-       /* Sizes are 0 based */
-       src_w--;
-       src_h--;
-       crtc_w--;
-       crtc_h--;
-
-       if (crtc_w != src_w || crtc_h != src_h)
-               sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
-
-       linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-       I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
-       I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-       I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
-       if (IS_IVYBRIDGE(dev_priv))
-               I915_WRITE_FW(SPRSCALE(pipe), sprscale);
-
-       if (key->flags) {
-               I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
-               I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
-               I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
-       }
-
-       /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
-        * register */
-       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-               I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
-       } else {
-               I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
-               I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
-       }
-
-       /*
-        * The control register self-arms if the plane was previously
-        * disabled. Try to make the plane enable atomic by writing
-        * the control register just before the surface register.
-        */
-       I915_WRITE_FW(SPRCTL(pipe), sprctl);
-       I915_WRITE_FW(SPRSURF(pipe),
-                     intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
-
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void
-ivb_disable_plane(struct intel_plane *plane,
-                 const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum pipe pipe = plane->pipe;
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-       I915_WRITE_FW(SPRCTL(pipe), 0);
-       /* Disable the scaler */
-       if (IS_IVYBRIDGE(dev_priv))
-               I915_WRITE_FW(SPRSCALE(pipe), 0);
-       I915_WRITE_FW(SPRSURF(pipe), 0);
-
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static bool
-ivb_plane_get_hw_state(struct intel_plane *plane,
-                      enum pipe *pipe)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum intel_display_power_domain power_domain;
-       intel_wakeref_t wakeref;
-       bool ret;
-
-       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (!wakeref)
-               return false;
-
-       ret =  I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
-
-       *pipe = plane->pipe;
-
-       intel_display_power_put(dev_priv, power_domain, wakeref);
-
-       return ret;
-}
-
-static unsigned int
-g4x_sprite_max_stride(struct intel_plane *plane,
-                     u32 pixel_format, u64 modifier,
-                     unsigned int rotation)
-{
-       return 16384;
-}
-
-static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
-       u32 dvscntr = 0;
-
-       if (crtc_state->gamma_enable)
-               dvscntr |= DVS_GAMMA_ENABLE;
-
-       if (crtc_state->csc_enable)
-               dvscntr |= DVS_PIPE_CSC_ENABLE;
-
-       return dvscntr;
-}
-
-static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
-                         const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(plane_state->base.plane->dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       unsigned int rotation = plane_state->base.rotation;
-       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-       u32 dvscntr;
-
-       dvscntr = DVS_ENABLE;
-
-       if (IS_GEN(dev_priv, 6))
-               dvscntr |= DVS_TRICKLE_FEED_DISABLE;
-
-       switch (fb->format->format) {
-       case DRM_FORMAT_XBGR8888:
-               dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
-               break;
-       case DRM_FORMAT_XRGB8888:
-               dvscntr |= DVS_FORMAT_RGBX888;
-               break;
-       case DRM_FORMAT_YUYV:
-               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
-               break;
-       case DRM_FORMAT_YVYU:
-               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
-               break;
-       case DRM_FORMAT_UYVY:
-               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
-               break;
-       case DRM_FORMAT_VYUY:
-               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
-               break;
-       default:
-               MISSING_CASE(fb->format->format);
-               return 0;
-       }
-
-       if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
-               dvscntr |= DVS_YUV_FORMAT_BT709;
-
-       if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
-               dvscntr |= DVS_YUV_RANGE_CORRECTION_DISABLE;
-
-       if (fb->modifier == I915_FORMAT_MOD_X_TILED)
-               dvscntr |= DVS_TILED;
-
-       if (rotation & DRM_MODE_ROTATE_180)
-               dvscntr |= DVS_ROTATE_180;
-
-       if (key->flags & I915_SET_COLORKEY_DESTINATION)
-               dvscntr |= DVS_DEST_KEY;
-       else if (key->flags & I915_SET_COLORKEY_SOURCE)
-               dvscntr |= DVS_SOURCE_KEY;
-
-       return dvscntr;
-}
-
-static void
-g4x_update_plane(struct intel_plane *plane,
-                const struct intel_crtc_state *crtc_state,
-                const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum pipe pipe = plane->pipe;
-       u32 dvssurf_offset = plane_state->color_plane[0].offset;
-       u32 linear_offset;
-       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-       int crtc_x = plane_state->base.dst.x1;
-       int crtc_y = plane_state->base.dst.y1;
-       u32 crtc_w = drm_rect_width(&plane_state->base.dst);
-       u32 crtc_h = drm_rect_height(&plane_state->base.dst);
-       u32 x = plane_state->color_plane[0].x;
-       u32 y = plane_state->color_plane[0].y;
-       u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
-       u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
-       u32 dvscntr, dvsscale = 0;
-       unsigned long irqflags;
-
-       dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state);
-
-       /* Sizes are 0 based */
-       src_w--;
-       src_h--;
-       crtc_w--;
-       crtc_h--;
-
-       if (crtc_w != src_w || crtc_h != src_h)
-               dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
-
-       linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-       I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
-       I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-       I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
-       I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
-
-       if (key->flags) {
-               I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
-               I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
-               I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
-       }
-
-       I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
-       I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
-
-       /*
-        * The control register self-arms if the plane was previously
-        * disabled. Try to make the plane enable atomic by writing
-        * the control register just before the surface register.
-        */
-       I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
-       I915_WRITE_FW(DVSSURF(pipe),
-                     intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
-
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void
-g4x_disable_plane(struct intel_plane *plane,
-                 const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum pipe pipe = plane->pipe;
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-       I915_WRITE_FW(DVSCNTR(pipe), 0);
-       /* Disable the scaler */
-       I915_WRITE_FW(DVSSCALE(pipe), 0);
-       I915_WRITE_FW(DVSSURF(pipe), 0);
-
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static bool
-g4x_plane_get_hw_state(struct intel_plane *plane,
-                      enum pipe *pipe)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       enum intel_display_power_domain power_domain;
-       intel_wakeref_t wakeref;
-       bool ret;
-
-       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
-       wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-       if (!wakeref)
-               return false;
-
-       ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
-
-       *pipe = plane->pipe;
-
-       intel_display_power_put(dev_priv, power_domain, wakeref);
-
-       return ret;
-}
-
-static bool intel_fb_scalable(const struct drm_framebuffer *fb)
-{
-       if (!fb)
-               return false;
-
-       switch (fb->format->format) {
-       case DRM_FORMAT_C8:
-               return false;
-       default:
-               return true;
-       }
-}
-
-static int
-g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
-                        struct intel_plane_state *plane_state)
-{
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       const struct drm_rect *src = &plane_state->base.src;
-       const struct drm_rect *dst = &plane_state->base.dst;
-       int src_x, src_y, src_w, src_h, crtc_w, crtc_h;
-       const struct drm_display_mode *adjusted_mode =
-               &crtc_state->base.adjusted_mode;
-       unsigned int cpp = fb->format->cpp[0];
-       unsigned int width_bytes;
-       int min_width, min_height;
-
-       crtc_w = drm_rect_width(dst);
-       crtc_h = drm_rect_height(dst);
-
-       src_x = src->x1 >> 16;
-       src_y = src->y1 >> 16;
-       src_w = drm_rect_width(src) >> 16;
-       src_h = drm_rect_height(src) >> 16;
-
-       if (src_w == crtc_w && src_h == crtc_h)
-               return 0;
-
-       min_width = 3;
-
-       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-               if (src_h & 1) {
-                       DRM_DEBUG_KMS("Source height must be even with interlaced modes\n");
-                       return -EINVAL;
-               }
-               min_height = 6;
-       } else {
-               min_height = 3;
-       }
-
-       width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
-
-       if (src_w < min_width || src_h < min_height ||
-           src_w > 2048 || src_h > 2048) {
-               DRM_DEBUG_KMS("Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n",
-                             src_w, src_h, min_width, min_height, 2048, 2048);
-               return -EINVAL;
-       }
-
-       if (width_bytes > 4096) {
-               DRM_DEBUG_KMS("Fetch width (%d) exceeds hardware max with scaling (%u)\n",
-                             width_bytes, 4096);
-               return -EINVAL;
-       }
-
-       if (width_bytes > 4096 || fb->pitches[0] > 4096) {
-               DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n",
-                             fb->pitches[0], 4096);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int
-g4x_sprite_check(struct intel_crtc_state *crtc_state,
-                struct intel_plane_state *plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       int min_scale = DRM_PLANE_HELPER_NO_SCALING;
-       int max_scale = DRM_PLANE_HELPER_NO_SCALING;
-       int ret;
-
-       if (intel_fb_scalable(plane_state->base.fb)) {
-               if (INTEL_GEN(dev_priv) < 7) {
-                       min_scale = 1;
-                       max_scale = 16 << 16;
-               } else if (IS_IVYBRIDGE(dev_priv)) {
-                       min_scale = 1;
-                       max_scale = 2 << 16;
-               }
-       }
-
-       ret = drm_atomic_helper_check_plane_state(&plane_state->base,
-                                                 &crtc_state->base,
-                                                 min_scale, max_scale,
-                                                 true, true);
-       if (ret)
-               return ret;
-
-       ret = i9xx_check_plane_surface(plane_state);
-       if (ret)
-               return ret;
-
-       if (!plane_state->base.visible)
-               return 0;
-
-       ret = intel_plane_check_src_coordinates(plane_state);
-       if (ret)
-               return ret;
-
-       ret = g4x_sprite_check_scaling(crtc_state, plane_state);
-       if (ret)
-               return ret;
-
-       if (INTEL_GEN(dev_priv) >= 7)
-               plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
-       else
-               plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state);
-
-       return 0;
-}
-
-int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       unsigned int rotation = plane_state->base.rotation;
-
-       /* CHV ignores the mirror bit when the rotate bit is set :( */
-       if (IS_CHERRYVIEW(dev_priv) &&
-           rotation & DRM_MODE_ROTATE_180 &&
-           rotation & DRM_MODE_REFLECT_X) {
-               DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int
-vlv_sprite_check(struct intel_crtc_state *crtc_state,
-                struct intel_plane_state *plane_state)
-{
-       int ret;
-
-       ret = chv_plane_check_rotation(plane_state);
-       if (ret)
-               return ret;
-
-       ret = drm_atomic_helper_check_plane_state(&plane_state->base,
-                                                 &crtc_state->base,
-                                                 DRM_PLANE_HELPER_NO_SCALING,
-                                                 DRM_PLANE_HELPER_NO_SCALING,
-                                                 true, true);
-       if (ret)
-               return ret;
-
-       ret = i9xx_check_plane_surface(plane_state);
-       if (ret)
-               return ret;
-
-       if (!plane_state->base.visible)
-               return 0;
-
-       ret = intel_plane_check_src_coordinates(plane_state);
-       if (ret)
-               return ret;
-
-       plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state);
-
-       return 0;
-}
-
-static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
-                             const struct intel_plane_state *plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       unsigned int rotation = plane_state->base.rotation;
-       struct drm_format_name_buf format_name;
-
-       if (!fb)
-               return 0;
-
-       if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
-           is_ccs_modifier(fb->modifier)) {
-               DRM_DEBUG_KMS("RC support only with 0/180 degree rotation (%x)\n",
-                             rotation);
-               return -EINVAL;
-       }
-
-       if (rotation & DRM_MODE_REFLECT_X &&
-           fb->modifier == DRM_FORMAT_MOD_LINEAR) {
-               DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
-               return -EINVAL;
-       }
-
-       if (drm_rotation_90_or_270(rotation)) {
-               if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
-                   fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
-                       DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
-                       return -EINVAL;
-               }
-
-               /*
-                * 90/270 is not allowed with RGB64 16:16:16:16 and
-                * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
-                */
-               switch (fb->format->format) {
-               case DRM_FORMAT_RGB565:
-                       if (INTEL_GEN(dev_priv) >= 11)
-                               break;
-                       /* fall through */
-               case DRM_FORMAT_C8:
-               case DRM_FORMAT_XRGB16161616F:
-               case DRM_FORMAT_XBGR16161616F:
-               case DRM_FORMAT_ARGB16161616F:
-               case DRM_FORMAT_ABGR16161616F:
-               case DRM_FORMAT_Y210:
-               case DRM_FORMAT_Y212:
-               case DRM_FORMAT_Y216:
-               case DRM_FORMAT_XVYU12_16161616:
-               case DRM_FORMAT_XVYU16161616:
-                       DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
-                                     drm_get_format_name(fb->format->format,
-                                                         &format_name));
-                       return -EINVAL;
-               default:
-                       break;
-               }
-       }
-
-       /* Y-tiling is not supported in IF-ID Interlace mode */
-       if (crtc_state->base.enable &&
-           crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
-           (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
-            fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
-            fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
-            fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) {
-               DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state,
-                                          const struct intel_plane_state *plane_state)
-{
-       struct drm_i915_private *dev_priv =
-               to_i915(plane_state->base.plane->dev);
-       int crtc_x = plane_state->base.dst.x1;
-       int crtc_w = drm_rect_width(&plane_state->base.dst);
-       int pipe_src_w = crtc_state->pipe_src_w;
-
-       /*
-        * Display WA #1175: cnl,glk
-        * Planes other than the cursor may cause FIFO underflow and display
-        * corruption if starting less than 4 pixels from the right edge of
-        * the screen.
-        * Besides the above WA fix the similar problem, where planes other
-        * than the cursor ending less than 4 pixels from the left edge of the
-        * screen may cause FIFO underflow and display corruption.
-        */
-       if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
-           (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
-               DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
-                             crtc_x + crtc_w < 4 ? "end" : "start",
-                             crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
-                             4, pipe_src_w - 4);
-               return -ERANGE;
-       }
-
-       return 0;
-}
-
-static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
-{
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       unsigned int rotation = plane_state->base.rotation;
-       int src_w = drm_rect_width(&plane_state->base.src) >> 16;
-
-       /* Display WA #1106 */
-       if (is_planar_yuv_format(fb->format->format) && src_w & 3 &&
-           (rotation == DRM_MODE_ROTATE_270 ||
-            rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
-               DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int skl_plane_check(struct intel_crtc_state *crtc_state,
-                          struct intel_plane_state *plane_state)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
-       int min_scale = DRM_PLANE_HELPER_NO_SCALING;
-       int max_scale = DRM_PLANE_HELPER_NO_SCALING;
-       int ret;
-
-       ret = skl_plane_check_fb(crtc_state, plane_state);
-       if (ret)
-               return ret;
-
-       /* use scaler when colorkey is not required */
-       if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
-               min_scale = 1;
-               max_scale = skl_max_scale(crtc_state, fb->format->format);
-       }
-
-       ret = drm_atomic_helper_check_plane_state(&plane_state->base,
-                                                 &crtc_state->base,
-                                                 min_scale, max_scale,
-                                                 true, true);
-       if (ret)
-               return ret;
-
-       ret = skl_check_plane_surface(plane_state);
-       if (ret)
-               return ret;
-
-       if (!plane_state->base.visible)
-               return 0;
-
-       ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
-       if (ret)
-               return ret;
-
-       ret = intel_plane_check_src_coordinates(plane_state);
-       if (ret)
-               return ret;
-
-       ret = skl_plane_check_nv12_rotation(plane_state);
-       if (ret)
-               return ret;
-
-       /* HW only has 8 bits pixel precision, disable plane if invisible */
-       if (!(plane_state->base.alpha >> 8))
-               plane_state->base.visible = false;
-
-       plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
-
-       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-               plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
-                                                            plane_state);
-
-       return 0;
-}
-
-static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
-{
-       return INTEL_GEN(dev_priv) >= 9;
-}
-
-static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
-                                const struct drm_intel_sprite_colorkey *set)
-{
-       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-
-       *key = *set;
-
-       /*
-        * We want src key enabled on the
-        * sprite and not on the primary.
-        */
-       if (plane->id == PLANE_PRIMARY &&
-           set->flags & I915_SET_COLORKEY_SOURCE)
-               key->flags = 0;
-
-       /*
-        * On SKL+ we want dst key enabled on
-        * the primary and not on the sprite.
-        */
-       if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
-           set->flags & I915_SET_COLORKEY_DESTINATION)
-               key->flags = 0;
-}
-
-int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
-                                   struct drm_file *file_priv)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_intel_sprite_colorkey *set = data;
-       struct drm_plane *plane;
-       struct drm_plane_state *plane_state;
-       struct drm_atomic_state *state;
-       struct drm_modeset_acquire_ctx ctx;
-       int ret = 0;
-
-       /* ignore the pointless "none" flag */
-       set->flags &= ~I915_SET_COLORKEY_NONE;
-
-       if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
-               return -EINVAL;
-
-       /* Make sure we don't try to enable both src & dest simultaneously */
-       if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
-               return -EINVAL;
-
-       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-           set->flags & I915_SET_COLORKEY_DESTINATION)
-               return -EINVAL;
-
-       plane = drm_plane_find(dev, file_priv, set->plane_id);
-       if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
-               return -ENOENT;
-
-       /*
-        * SKL+ only plane 2 can do destination keying against plane 1.
-        * Also multiple planes can't do destination keying on the same
-        * pipe simultaneously.
-        */
-       if (INTEL_GEN(dev_priv) >= 9 &&
-           to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
-           set->flags & I915_SET_COLORKEY_DESTINATION)
-               return -EINVAL;
-
-       drm_modeset_acquire_init(&ctx, 0);
-
-       state = drm_atomic_state_alloc(plane->dev);
-       if (!state) {
-               ret = -ENOMEM;
-               goto out;
-       }
-       state->acquire_ctx = &ctx;
-
-       while (1) {
-               plane_state = drm_atomic_get_plane_state(state, plane);
-               ret = PTR_ERR_OR_ZERO(plane_state);
-               if (!ret)
-                       intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
-
-               /*
-                * On some platforms we have to configure
-                * the dst colorkey on the primary plane.
-                */
-               if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
-                       struct intel_crtc *crtc =
-                               intel_get_crtc_for_pipe(dev_priv,
-                                                       to_intel_plane(plane)->pipe);
-
-                       plane_state = drm_atomic_get_plane_state(state,
-                                                                crtc->base.primary);
-                       ret = PTR_ERR_OR_ZERO(plane_state);
-                       if (!ret)
-                               intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
-               }
-
-               if (!ret)
-                       ret = drm_atomic_commit(state);
-
-               if (ret != -EDEADLK)
-                       break;
-
-               drm_atomic_state_clear(state);
-               drm_modeset_backoff(&ctx);
-       }
-
-       drm_atomic_state_put(state);
-out:
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-       return ret;
-}
-
-static const u32 g4x_plane_formats[] = {
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_YUYV,
-       DRM_FORMAT_YVYU,
-       DRM_FORMAT_UYVY,
-       DRM_FORMAT_VYUY,
-};
-
-static const u64 i9xx_plane_format_modifiers[] = {
-       I915_FORMAT_MOD_X_TILED,
-       DRM_FORMAT_MOD_LINEAR,
-       DRM_FORMAT_MOD_INVALID
-};
-
-static const u32 snb_plane_formats[] = {
-       DRM_FORMAT_XBGR8888,
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_YUYV,
-       DRM_FORMAT_YVYU,
-       DRM_FORMAT_UYVY,
-       DRM_FORMAT_VYUY,
-};
-
-static const u32 vlv_plane_formats[] = {
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_ABGR8888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_XBGR8888,
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_XBGR2101010,
-       DRM_FORMAT_ABGR2101010,
-       DRM_FORMAT_YUYV,
-       DRM_FORMAT_YVYU,
-       DRM_FORMAT_UYVY,
-       DRM_FORMAT_VYUY,
-};
-
-static const u32 skl_plane_formats[] = {
-       DRM_FORMAT_C8,
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_XBGR8888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_ABGR8888,
-       DRM_FORMAT_XRGB2101010,
-       DRM_FORMAT_XBGR2101010,
-       DRM_FORMAT_YUYV,
-       DRM_FORMAT_YVYU,
-       DRM_FORMAT_UYVY,
-       DRM_FORMAT_VYUY,
-};
-
-static const u32 icl_plane_formats[] = {
-       DRM_FORMAT_C8,
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_XBGR8888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_ABGR8888,
-       DRM_FORMAT_XRGB2101010,
-       DRM_FORMAT_XBGR2101010,
-       DRM_FORMAT_YUYV,
-       DRM_FORMAT_YVYU,
-       DRM_FORMAT_UYVY,
-       DRM_FORMAT_VYUY,
-       DRM_FORMAT_Y210,
-       DRM_FORMAT_Y212,
-       DRM_FORMAT_Y216,
-       DRM_FORMAT_XVYU2101010,
-       DRM_FORMAT_XVYU12_16161616,
-       DRM_FORMAT_XVYU16161616,
-};
-
-static const u32 icl_hdr_plane_formats[] = {
-       DRM_FORMAT_C8,
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_XBGR8888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_ABGR8888,
-       DRM_FORMAT_XRGB2101010,
-       DRM_FORMAT_XBGR2101010,
-       DRM_FORMAT_XRGB16161616F,
-       DRM_FORMAT_XBGR16161616F,
-       DRM_FORMAT_ARGB16161616F,
-       DRM_FORMAT_ABGR16161616F,
-       DRM_FORMAT_YUYV,
-       DRM_FORMAT_YVYU,
-       DRM_FORMAT_UYVY,
-       DRM_FORMAT_VYUY,
-       DRM_FORMAT_Y210,
-       DRM_FORMAT_Y212,
-       DRM_FORMAT_Y216,
-       DRM_FORMAT_XVYU2101010,
-       DRM_FORMAT_XVYU12_16161616,
-       DRM_FORMAT_XVYU16161616,
-};
-
-static const u32 skl_planar_formats[] = {
-       DRM_FORMAT_C8,
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_XBGR8888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_ABGR8888,
-       DRM_FORMAT_XRGB2101010,
-       DRM_FORMAT_XBGR2101010,
-       DRM_FORMAT_YUYV,
-       DRM_FORMAT_YVYU,
-       DRM_FORMAT_UYVY,
-       DRM_FORMAT_VYUY,
-       DRM_FORMAT_NV12,
-};
-
-static const u32 glk_planar_formats[] = {
-       DRM_FORMAT_C8,
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_XBGR8888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_ABGR8888,
-       DRM_FORMAT_XRGB2101010,
-       DRM_FORMAT_XBGR2101010,
-       DRM_FORMAT_YUYV,
-       DRM_FORMAT_YVYU,
-       DRM_FORMAT_UYVY,
-       DRM_FORMAT_VYUY,
-       DRM_FORMAT_NV12,
-       DRM_FORMAT_P010,
-       DRM_FORMAT_P012,
-       DRM_FORMAT_P016,
-};
-
-static const u32 icl_planar_formats[] = {
-       DRM_FORMAT_C8,
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_XBGR8888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_ABGR8888,
-       DRM_FORMAT_XRGB2101010,
-       DRM_FORMAT_XBGR2101010,
-       DRM_FORMAT_YUYV,
-       DRM_FORMAT_YVYU,
-       DRM_FORMAT_UYVY,
-       DRM_FORMAT_VYUY,
-       DRM_FORMAT_NV12,
-       DRM_FORMAT_P010,
-       DRM_FORMAT_P012,
-       DRM_FORMAT_P016,
-       DRM_FORMAT_Y210,
-       DRM_FORMAT_Y212,
-       DRM_FORMAT_Y216,
-       DRM_FORMAT_XVYU2101010,
-       DRM_FORMAT_XVYU12_16161616,
-       DRM_FORMAT_XVYU16161616,
-};
-
-static const u32 icl_hdr_planar_formats[] = {
-       DRM_FORMAT_C8,
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_XBGR8888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_ABGR8888,
-       DRM_FORMAT_XRGB2101010,
-       DRM_FORMAT_XBGR2101010,
-       DRM_FORMAT_XRGB16161616F,
-       DRM_FORMAT_XBGR16161616F,
-       DRM_FORMAT_ARGB16161616F,
-       DRM_FORMAT_ABGR16161616F,
-       DRM_FORMAT_YUYV,
-       DRM_FORMAT_YVYU,
-       DRM_FORMAT_UYVY,
-       DRM_FORMAT_VYUY,
-       DRM_FORMAT_NV12,
-       DRM_FORMAT_P010,
-       DRM_FORMAT_P012,
-       DRM_FORMAT_P016,
-       DRM_FORMAT_Y210,
-       DRM_FORMAT_Y212,
-       DRM_FORMAT_Y216,
-       DRM_FORMAT_XVYU2101010,
-       DRM_FORMAT_XVYU12_16161616,
-       DRM_FORMAT_XVYU16161616,
-};
-
-static const u64 skl_plane_format_modifiers_noccs[] = {
-       I915_FORMAT_MOD_Yf_TILED,
-       I915_FORMAT_MOD_Y_TILED,
-       I915_FORMAT_MOD_X_TILED,
-       DRM_FORMAT_MOD_LINEAR,
-       DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 skl_plane_format_modifiers_ccs[] = {
-       I915_FORMAT_MOD_Yf_TILED_CCS,
-       I915_FORMAT_MOD_Y_TILED_CCS,
-       I915_FORMAT_MOD_Yf_TILED,
-       I915_FORMAT_MOD_Y_TILED,
-       I915_FORMAT_MOD_X_TILED,
-       DRM_FORMAT_MOD_LINEAR,
-       DRM_FORMAT_MOD_INVALID
-};
-
-static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
-                                           u32 format, u64 modifier)
-{
-       switch (modifier) {
-       case DRM_FORMAT_MOD_LINEAR:
-       case I915_FORMAT_MOD_X_TILED:
-               break;
-       default:
-               return false;
-       }
-
-       switch (format) {
-       case DRM_FORMAT_XRGB8888:
-       case DRM_FORMAT_YUYV:
-       case DRM_FORMAT_YVYU:
-       case DRM_FORMAT_UYVY:
-       case DRM_FORMAT_VYUY:
-               if (modifier == DRM_FORMAT_MOD_LINEAR ||
-                   modifier == I915_FORMAT_MOD_X_TILED)
-                       return true;
-               /* fall through */
-       default:
-               return false;
-       }
-}
-
-static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
-                                           u32 format, u64 modifier)
-{
-       switch (modifier) {
-       case DRM_FORMAT_MOD_LINEAR:
-       case I915_FORMAT_MOD_X_TILED:
-               break;
-       default:
-               return false;
-       }
-
-       switch (format) {
-       case DRM_FORMAT_XRGB8888:
-       case DRM_FORMAT_XBGR8888:
-       case DRM_FORMAT_YUYV:
-       case DRM_FORMAT_YVYU:
-       case DRM_FORMAT_UYVY:
-       case DRM_FORMAT_VYUY:
-               if (modifier == DRM_FORMAT_MOD_LINEAR ||
-                   modifier == I915_FORMAT_MOD_X_TILED)
-                       return true;
-               /* fall through */
-       default:
-               return false;
-       }
-}
-
-static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
-                                           u32 format, u64 modifier)
-{
-       switch (modifier) {
-       case DRM_FORMAT_MOD_LINEAR:
-       case I915_FORMAT_MOD_X_TILED:
-               break;
-       default:
-               return false;
-       }
-
-       switch (format) {
-       case DRM_FORMAT_RGB565:
-       case DRM_FORMAT_ABGR8888:
-       case DRM_FORMAT_ARGB8888:
-       case DRM_FORMAT_XBGR8888:
-       case DRM_FORMAT_XRGB8888:
-       case DRM_FORMAT_XBGR2101010:
-       case DRM_FORMAT_ABGR2101010:
-       case DRM_FORMAT_YUYV:
-       case DRM_FORMAT_YVYU:
-       case DRM_FORMAT_UYVY:
-       case DRM_FORMAT_VYUY:
-               if (modifier == DRM_FORMAT_MOD_LINEAR ||
-                   modifier == I915_FORMAT_MOD_X_TILED)
-                       return true;
-               /* fall through */
-       default:
-               return false;
-       }
-}
-
-static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
-                                          u32 format, u64 modifier)
-{
-       struct intel_plane *plane = to_intel_plane(_plane);
-
-       switch (modifier) {
-       case DRM_FORMAT_MOD_LINEAR:
-       case I915_FORMAT_MOD_X_TILED:
-       case I915_FORMAT_MOD_Y_TILED:
-       case I915_FORMAT_MOD_Yf_TILED:
-               break;
-       case I915_FORMAT_MOD_Y_TILED_CCS:
-       case I915_FORMAT_MOD_Yf_TILED_CCS:
-               if (!plane->has_ccs)
-                       return false;
-               break;
-       default:
-               return false;
-       }
-
-       switch (format) {
-       case DRM_FORMAT_XRGB8888:
-       case DRM_FORMAT_XBGR8888:
-       case DRM_FORMAT_ARGB8888:
-       case DRM_FORMAT_ABGR8888:
-               if (is_ccs_modifier(modifier))
-                       return true;
-               /* fall through */
-       case DRM_FORMAT_RGB565:
-       case DRM_FORMAT_XRGB2101010:
-       case DRM_FORMAT_XBGR2101010:
-       case DRM_FORMAT_YUYV:
-       case DRM_FORMAT_YVYU:
-       case DRM_FORMAT_UYVY:
-       case DRM_FORMAT_VYUY:
-       case DRM_FORMAT_NV12:
-       case DRM_FORMAT_P010:
-       case DRM_FORMAT_P012:
-       case DRM_FORMAT_P016:
-       case DRM_FORMAT_XVYU2101010:
-               if (modifier == I915_FORMAT_MOD_Yf_TILED)
-                       return true;
-               /* fall through */
-       case DRM_FORMAT_C8:
-       case DRM_FORMAT_XBGR16161616F:
-       case DRM_FORMAT_ABGR16161616F:
-       case DRM_FORMAT_XRGB16161616F:
-       case DRM_FORMAT_ARGB16161616F:
-       case DRM_FORMAT_Y210:
-       case DRM_FORMAT_Y212:
-       case DRM_FORMAT_Y216:
-       case DRM_FORMAT_XVYU12_16161616:
-       case DRM_FORMAT_XVYU16161616:
-               if (modifier == DRM_FORMAT_MOD_LINEAR ||
-                   modifier == I915_FORMAT_MOD_X_TILED ||
-                   modifier == I915_FORMAT_MOD_Y_TILED)
-                       return true;
-               /* fall through */
-       default:
-               return false;
-       }
-}
-
-static const struct drm_plane_funcs g4x_sprite_funcs = {
-       .update_plane = drm_atomic_helper_update_plane,
-       .disable_plane = drm_atomic_helper_disable_plane,
-       .destroy = intel_plane_destroy,
-       .atomic_duplicate_state = intel_plane_duplicate_state,
-       .atomic_destroy_state = intel_plane_destroy_state,
-       .format_mod_supported = g4x_sprite_format_mod_supported,
-};
-
-static const struct drm_plane_funcs snb_sprite_funcs = {
-       .update_plane = drm_atomic_helper_update_plane,
-       .disable_plane = drm_atomic_helper_disable_plane,
-       .destroy = intel_plane_destroy,
-       .atomic_duplicate_state = intel_plane_duplicate_state,
-       .atomic_destroy_state = intel_plane_destroy_state,
-       .format_mod_supported = snb_sprite_format_mod_supported,
-};
-
-static const struct drm_plane_funcs vlv_sprite_funcs = {
-       .update_plane = drm_atomic_helper_update_plane,
-       .disable_plane = drm_atomic_helper_disable_plane,
-       .destroy = intel_plane_destroy,
-       .atomic_duplicate_state = intel_plane_duplicate_state,
-       .atomic_destroy_state = intel_plane_destroy_state,
-       .format_mod_supported = vlv_sprite_format_mod_supported,
-};
-
-static const struct drm_plane_funcs skl_plane_funcs = {
-       .update_plane = drm_atomic_helper_update_plane,
-       .disable_plane = drm_atomic_helper_disable_plane,
-       .destroy = intel_plane_destroy,
-       .atomic_duplicate_state = intel_plane_duplicate_state,
-       .atomic_destroy_state = intel_plane_destroy_state,
-       .format_mod_supported = skl_plane_format_mod_supported,
-};
-
-static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
-                             enum pipe pipe, enum plane_id plane_id)
-{
-       if (!HAS_FBC(dev_priv))
-               return false;
-
-       return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
-}
-
-static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
-                                enum pipe pipe, enum plane_id plane_id)
-{
-       if (INTEL_GEN(dev_priv) >= 11)
-               return plane_id <= PLANE_SPRITE3;
-
-       /* Display WA #0870: skl, bxt */
-       if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
-               return false;
-
-       if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
-               return false;
-
-       if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
-               return false;
-
-       return true;
-}
-
-static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
-                             enum pipe pipe, enum plane_id plane_id)
-{
-       if (plane_id == PLANE_CURSOR)
-               return false;
-
-       if (INTEL_GEN(dev_priv) >= 10)
-               return true;
-
-       if (IS_GEMINILAKE(dev_priv))
-               return pipe != PIPE_C;
-
-       return pipe != PIPE_C &&
-               (plane_id == PLANE_PRIMARY ||
-                plane_id == PLANE_SPRITE0);
-}
-
-struct intel_plane *
-skl_universal_plane_create(struct drm_i915_private *dev_priv,
-                          enum pipe pipe, enum plane_id plane_id)
-{
-       struct intel_plane *plane;
-       enum drm_plane_type plane_type;
-       unsigned int supported_rotations;
-       unsigned int possible_crtcs;
-       const u64 *modifiers;
-       const u32 *formats;
-       int num_formats;
-       int ret;
-
-       plane = intel_plane_alloc();
-       if (IS_ERR(plane))
-               return plane;
-
-       plane->pipe = pipe;
-       plane->id = plane_id;
-       plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
-
-       plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
-       if (plane->has_fbc) {
-               struct intel_fbc *fbc = &dev_priv->fbc;
-
-               fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
-       }
-
-       plane->max_stride = skl_plane_max_stride;
-       plane->update_plane = skl_update_plane;
-       plane->disable_plane = skl_disable_plane;
-       plane->get_hw_state = skl_plane_get_hw_state;
-       plane->check_plane = skl_plane_check;
-       if (icl_is_nv12_y_plane(plane_id))
-               plane->update_slave = icl_update_slave;
-
-       if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
-               if (icl_is_hdr_plane(dev_priv, plane_id)) {
-                       formats = icl_hdr_planar_formats;
-                       num_formats = ARRAY_SIZE(icl_hdr_planar_formats);
-               } else if (INTEL_GEN(dev_priv) >= 11) {
-                       formats = icl_planar_formats;
-                       num_formats = ARRAY_SIZE(icl_planar_formats);
-               } else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) {
-                       formats = glk_planar_formats;
-                       num_formats = ARRAY_SIZE(glk_planar_formats);
-               } else {
-                       formats = skl_planar_formats;
-                       num_formats = ARRAY_SIZE(skl_planar_formats);
-               }
-       } else if (icl_is_hdr_plane(dev_priv, plane_id)) {
-               formats = icl_hdr_plane_formats;
-               num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
-       } else if (INTEL_GEN(dev_priv) >= 11) {
-               formats = icl_plane_formats;
-               num_formats = ARRAY_SIZE(icl_plane_formats);
-       } else {
-               formats = skl_plane_formats;
-               num_formats = ARRAY_SIZE(skl_plane_formats);
-       }
-
-       plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
-       if (plane->has_ccs)
-               modifiers = skl_plane_format_modifiers_ccs;
-       else
-               modifiers = skl_plane_format_modifiers_noccs;
-
-       if (plane_id == PLANE_PRIMARY)
-               plane_type = DRM_PLANE_TYPE_PRIMARY;
-       else
-               plane_type = DRM_PLANE_TYPE_OVERLAY;
-
-       possible_crtcs = BIT(pipe);
-
-       ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
-                                      possible_crtcs, &skl_plane_funcs,
-                                      formats, num_formats, modifiers,
-                                      plane_type,
-                                      "plane %d%c", plane_id + 1,
-                                      pipe_name(pipe));
-       if (ret)
-               goto fail;
-
-       supported_rotations =
-               DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
-               DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
-
-       if (INTEL_GEN(dev_priv) >= 10)
-               supported_rotations |= DRM_MODE_REFLECT_X;
-
-       drm_plane_create_rotation_property(&plane->base,
-                                          DRM_MODE_ROTATE_0,
-                                          supported_rotations);
-
-       drm_plane_create_color_properties(&plane->base,
-                                         BIT(DRM_COLOR_YCBCR_BT601) |
-                                         BIT(DRM_COLOR_YCBCR_BT709),
-                                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
-                                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
-                                         DRM_COLOR_YCBCR_BT709,
-                                         DRM_COLOR_YCBCR_LIMITED_RANGE);
-
-       drm_plane_create_alpha_property(&plane->base);
-       drm_plane_create_blend_mode_property(&plane->base,
-                                            BIT(DRM_MODE_BLEND_PIXEL_NONE) |
-                                            BIT(DRM_MODE_BLEND_PREMULTI) |
-                                            BIT(DRM_MODE_BLEND_COVERAGE));
-
-       drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
-
-       return plane;
-
-fail:
-       intel_plane_free(plane);
-
-       return ERR_PTR(ret);
-}
-
-struct intel_plane *
-intel_sprite_plane_create(struct drm_i915_private *dev_priv,
-                         enum pipe pipe, int sprite)
-{
-       struct intel_plane *plane;
-       const struct drm_plane_funcs *plane_funcs;
-       unsigned long possible_crtcs;
-       unsigned int supported_rotations;
-       const u64 *modifiers;
-       const u32 *formats;
-       int num_formats;
-       int ret;
-
-       if (INTEL_GEN(dev_priv) >= 9)
-               return skl_universal_plane_create(dev_priv, pipe,
-                                                 PLANE_SPRITE0 + sprite);
-
-       plane = intel_plane_alloc();
-       if (IS_ERR(plane))
-               return plane;
-
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               plane->max_stride = i9xx_plane_max_stride;
-               plane->update_plane = vlv_update_plane;
-               plane->disable_plane = vlv_disable_plane;
-               plane->get_hw_state = vlv_plane_get_hw_state;
-               plane->check_plane = vlv_sprite_check;
-
-               formats = vlv_plane_formats;
-               num_formats = ARRAY_SIZE(vlv_plane_formats);
-               modifiers = i9xx_plane_format_modifiers;
-
-               plane_funcs = &vlv_sprite_funcs;
-       } else if (INTEL_GEN(dev_priv) >= 7) {
-               plane->max_stride = g4x_sprite_max_stride;
-               plane->update_plane = ivb_update_plane;
-               plane->disable_plane = ivb_disable_plane;
-               plane->get_hw_state = ivb_plane_get_hw_state;
-               plane->check_plane = g4x_sprite_check;
-
-               formats = snb_plane_formats;
-               num_formats = ARRAY_SIZE(snb_plane_formats);
-               modifiers = i9xx_plane_format_modifiers;
-
-               plane_funcs = &snb_sprite_funcs;
-       } else {
-               plane->max_stride = g4x_sprite_max_stride;
-               plane->update_plane = g4x_update_plane;
-               plane->disable_plane = g4x_disable_plane;
-               plane->get_hw_state = g4x_plane_get_hw_state;
-               plane->check_plane = g4x_sprite_check;
-
-               modifiers = i9xx_plane_format_modifiers;
-               if (IS_GEN(dev_priv, 6)) {
-                       formats = snb_plane_formats;
-                       num_formats = ARRAY_SIZE(snb_plane_formats);
-
-                       plane_funcs = &snb_sprite_funcs;
-               } else {
-                       formats = g4x_plane_formats;
-                       num_formats = ARRAY_SIZE(g4x_plane_formats);
-
-                       plane_funcs = &g4x_sprite_funcs;
-               }
-       }
-
-       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
-               supported_rotations =
-                       DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
-                       DRM_MODE_REFLECT_X;
-       } else {
-               supported_rotations =
-                       DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
-       }
-
-       plane->pipe = pipe;
-       plane->id = PLANE_SPRITE0 + sprite;
-       plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
-
-       possible_crtcs = BIT(pipe);
-
-       ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
-                                      possible_crtcs, plane_funcs,
-                                      formats, num_formats, modifiers,
-                                      DRM_PLANE_TYPE_OVERLAY,
-                                      "sprite %c", sprite_name(pipe, sprite));
-       if (ret)
-               goto fail;
-
-       drm_plane_create_rotation_property(&plane->base,
-                                          DRM_MODE_ROTATE_0,
-                                          supported_rotations);
-
-       drm_plane_create_color_properties(&plane->base,
-                                         BIT(DRM_COLOR_YCBCR_BT601) |
-                                         BIT(DRM_COLOR_YCBCR_BT709),
-                                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
-                                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
-                                         DRM_COLOR_YCBCR_BT709,
-                                         DRM_COLOR_YCBCR_LIMITED_RANGE);
-
-       drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
-
-       return plane;
-
-fail:
-       intel_plane_free(plane);
-
-       return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/i915/intel_sprite.h b/drivers/gpu/drm/i915/intel_sprite.h
deleted file mode 100644 (file)
index 500f6bf..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_SPRITE_H__
-#define __INTEL_SPRITE_H__
-
-#include <linux/types.h>
-
-#include "i915_drv.h"
-#include "intel_display.h"
-
-struct drm_device;
-struct drm_display_mode;
-struct drm_file;
-struct drm_i915_private;
-struct intel_crtc_state;
-struct intel_plane_state;
-
-bool is_planar_yuv_format(u32 pixelformat);
-int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
-                            int usecs);
-struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
-                                             enum pipe pipe, int plane);
-int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
-                                   struct drm_file *file_priv);
-void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
-void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
-int intel_plane_check_stride(const struct intel_plane_state *plane_state);
-int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
-int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
-struct intel_plane *
-skl_universal_plane_create(struct drm_i915_private *dev_priv,
-                          enum pipe pipe, enum plane_id plane_id);
-
-static inline bool icl_is_nv12_y_plane(enum plane_id id)
-{
-       /* Don't need to do a gen check, these planes are only available on gen11 */
-       if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
-               return true;
-
-       return false;
-}
-
-static inline u8 icl_hdr_plane_mask(void)
-{
-       return BIT(PLANE_PRIMARY) |
-               BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
-}
-
-static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
-                                   enum plane_id plane_id)
-{
-       return INTEL_GEN(dev_priv) >= 11 &&
-               icl_hdr_plane_mask() & BIT(plane_id);
-}
-
-#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
deleted file mode 100644 (file)
index 89ef14c..0000000
+++ /dev/null
@@ -1,808 +0,0 @@
-/*
- * Copyright © 2006-2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- *    Eric Anholt <eric@anholt.net>
- *
- */
-
-/*
- * This information is private to VBT parsing in intel_bios.c.
- *
- * Please do NOT include anywhere else.
- */
-#ifndef _INTEL_BIOS_PRIVATE
-#error "intel_vbt_defs.h is private to intel_bios.c"
-#endif
-
-#ifndef _INTEL_VBT_DEFS_H_
-#define _INTEL_VBT_DEFS_H_
-
-#include "intel_bios.h"
-
-/**
- * struct vbt_header - VBT Header structure
- * @signature:         VBT signature, always starts with "$VBT"
- * @version:           Version of this structure
- * @header_size:       Size of this structure
- * @vbt_size:          Size of VBT (VBT Header, BDB Header and data blocks)
- * @vbt_checksum:      Checksum
- * @reserved0:         Reserved
- * @bdb_offset:                Offset of &struct bdb_header from beginning of VBT
- * @aim_offset:                Offsets of add-in data blocks from beginning of VBT
- */
-struct vbt_header {
-       u8 signature[20];
-       u16 version;
-       u16 header_size;
-       u16 vbt_size;
-       u8 vbt_checksum;
-       u8 reserved0;
-       u32 bdb_offset;
-       u32 aim_offset[4];
-} __packed;
-
-/**
- * struct bdb_header - BDB Header structure
- * @signature:         BDB signature "BIOS_DATA_BLOCK"
- * @version:           Version of the data block definitions
- * @header_size:       Size of this structure
- * @bdb_size:          Size of BDB (BDB Header and data blocks)
- */
-struct bdb_header {
-       u8 signature[16];
-       u16 version;
-       u16 header_size;
-       u16 bdb_size;
-} __packed;
-
-/*
- * There are several types of BIOS data blocks (BDBs), each block has
- * an ID and size in the first 3 bytes (ID in first, size in next 2).
- * Known types are listed below.
- */
-enum bdb_block_id {
-       BDB_GENERAL_FEATURES            = 1,
-       BDB_GENERAL_DEFINITIONS         = 2,
-       BDB_OLD_TOGGLE_LIST             = 3,
-       BDB_MODE_SUPPORT_LIST           = 4,
-       BDB_GENERIC_MODE_TABLE          = 5,
-       BDB_EXT_MMIO_REGS               = 6,
-       BDB_SWF_IO                      = 7,
-       BDB_SWF_MMIO                    = 8,
-       BDB_PSR                         = 9,
-       BDB_MODE_REMOVAL_TABLE          = 10,
-       BDB_CHILD_DEVICE_TABLE          = 11,
-       BDB_DRIVER_FEATURES             = 12,
-       BDB_DRIVER_PERSISTENCE          = 13,
-       BDB_EXT_TABLE_PTRS              = 14,
-       BDB_DOT_CLOCK_OVERRIDE          = 15,
-       BDB_DISPLAY_SELECT              = 16,
-       BDB_DRIVER_ROTATION             = 18,
-       BDB_DISPLAY_REMOVE              = 19,
-       BDB_OEM_CUSTOM                  = 20,
-       BDB_EFP_LIST                    = 21, /* workarounds for VGA hsync/vsync */
-       BDB_SDVO_LVDS_OPTIONS           = 22,
-       BDB_SDVO_PANEL_DTDS             = 23,
-       BDB_SDVO_LVDS_PNP_IDS           = 24,
-       BDB_SDVO_LVDS_POWER_SEQ         = 25,
-       BDB_TV_OPTIONS                  = 26,
-       BDB_EDP                         = 27,
-       BDB_LVDS_OPTIONS                = 40,
-       BDB_LVDS_LFP_DATA_PTRS          = 41,
-       BDB_LVDS_LFP_DATA               = 42,
-       BDB_LVDS_BACKLIGHT              = 43,
-       BDB_LVDS_POWER                  = 44,
-       BDB_MIPI_CONFIG                 = 52,
-       BDB_MIPI_SEQUENCE               = 53,
-       BDB_SKIP                        = 254, /* VBIOS private block, ignore */
-};
-
-/*
- * Block 1 - General Bit Definitions
- */
-
-struct bdb_general_features {
-        /* bits 1 */
-       u8 panel_fitting:2;
-       u8 flexaim:1;
-       u8 msg_enable:1;
-       u8 clear_screen:3;
-       u8 color_flip:1;
-
-        /* bits 2 */
-       u8 download_ext_vbt:1;
-       u8 enable_ssc:1;
-       u8 ssc_freq:1;
-       u8 enable_lfp_on_override:1;
-       u8 disable_ssc_ddt:1;
-       u8 underscan_vga_timings:1;
-       u8 display_clock_mode:1;
-       u8 vbios_hotplug_support:1;
-
-        /* bits 3 */
-       u8 disable_smooth_vision:1;
-       u8 single_dvi:1;
-       u8 rotate_180:1;                                        /* 181 */
-       u8 fdi_rx_polarity_inverted:1;
-       u8 vbios_extended_mode:1;                               /* 160 */
-       u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1;                    /* 160 */
-       u8 panel_best_fit_timing:1;                             /* 160 */
-       u8 ignore_strap_state:1;                                /* 160 */
-
-        /* bits 4 */
-       u8 legacy_monitor_detect;
-
-        /* bits 5 */
-       u8 int_crt_support:1;
-       u8 int_tv_support:1;
-       u8 int_efp_support:1;
-       u8 dp_ssc_enable:1;     /* PCH attached eDP supports SSC */
-       u8 dp_ssc_freq:1;       /* SSC freq for PCH attached eDP */
-       u8 dp_ssc_dongle_supported:1;
-       u8 rsvd11:2; /* finish byte */
-} __packed;
-
-/*
- * Block 2 - General Bytes Definition
- */
-
-/* pre-915 */
-#define GPIO_PIN_DVI_LVDS      0x03 /* "DVI/LVDS DDC GPIO pins" */
-#define GPIO_PIN_ADD_I2C       0x05 /* "ADDCARD I2C GPIO pins" */
-#define GPIO_PIN_ADD_DDC       0x04 /* "ADDCARD DDC GPIO pins" */
-#define GPIO_PIN_ADD_DDC_I2C   0x06 /* "ADDCARD DDC/I2C GPIO pins" */
-
-/* Pre 915 */
-#define DEVICE_TYPE_NONE       0x00
-#define DEVICE_TYPE_CRT                0x01
-#define DEVICE_TYPE_TV         0x09
-#define DEVICE_TYPE_EFP                0x12
-#define DEVICE_TYPE_LFP                0x22
-/* On 915+ */
-#define DEVICE_TYPE_CRT_DPMS           0x6001
-#define DEVICE_TYPE_CRT_DPMS_HOTPLUG   0x4001
-#define DEVICE_TYPE_TV_COMPOSITE       0x0209
-#define DEVICE_TYPE_TV_MACROVISION     0x0289
-#define DEVICE_TYPE_TV_RF_COMPOSITE    0x020c
-#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE        0x0609
-#define DEVICE_TYPE_TV_SCART           0x0209
-#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
-#define DEVICE_TYPE_EFP_HOTPLUG_PWR    0x6012
-#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR        0x6052
-#define DEVICE_TYPE_EFP_DVI_I          0x6053
-#define DEVICE_TYPE_EFP_DVI_D_DUAL     0x6152
-#define DEVICE_TYPE_EFP_DVI_D_HDCP     0x60d2
-#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR        0x6062
-#define DEVICE_TYPE_OPENLDI_DUALPIX    0x6162
-#define DEVICE_TYPE_LFP_PANELLINK      0x5012
-#define DEVICE_TYPE_LFP_CMOS_PWR       0x5042
-#define DEVICE_TYPE_LFP_LVDS_PWR       0x5062
-#define DEVICE_TYPE_LFP_LVDS_DUAL      0x5162
-#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
-
-/* Add the device class for LFP, TV, HDMI */
-#define DEVICE_TYPE_INT_LFP            0x1022
-#define DEVICE_TYPE_INT_TV             0x1009
-#define DEVICE_TYPE_HDMI               0x60D2
-#define DEVICE_TYPE_DP                 0x68C6
-#define DEVICE_TYPE_DP_DUAL_MODE       0x60D6
-#define DEVICE_TYPE_eDP                        0x78C6
-
-#define DEVICE_TYPE_CLASS_EXTENSION    (1 << 15)
-#define DEVICE_TYPE_POWER_MANAGEMENT   (1 << 14)
-#define DEVICE_TYPE_HOTPLUG_SIGNALING  (1 << 13)
-#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
-#define DEVICE_TYPE_NOT_HDMI_OUTPUT    (1 << 11)
-#define DEVICE_TYPE_MIPI_OUTPUT                (1 << 10)
-#define DEVICE_TYPE_COMPOSITE_OUTPUT   (1 << 9)
-#define DEVICE_TYPE_DUAL_CHANNEL       (1 << 8)
-#define DEVICE_TYPE_HIGH_SPEED_LINK    (1 << 6)
-#define DEVICE_TYPE_LVDS_SIGNALING     (1 << 5)
-#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
-#define DEVICE_TYPE_VIDEO_SIGNALING    (1 << 3)
-#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
-#define DEVICE_TYPE_DIGITAL_OUTPUT     (1 << 1)
-#define DEVICE_TYPE_ANALOG_OUTPUT      (1 << 0)
-
-/*
- * Bits we care about when checking for DEVICE_TYPE_eDP. Depending on the
- * system, the other bits may or may not be set for eDP outputs.
- */
-#define DEVICE_TYPE_eDP_BITS \
-       (DEVICE_TYPE_INTERNAL_CONNECTOR |       \
-        DEVICE_TYPE_MIPI_OUTPUT |              \
-        DEVICE_TYPE_COMPOSITE_OUTPUT |         \
-        DEVICE_TYPE_DUAL_CHANNEL |             \
-        DEVICE_TYPE_LVDS_SIGNALING |           \
-        DEVICE_TYPE_TMDS_DVI_SIGNALING |       \
-        DEVICE_TYPE_VIDEO_SIGNALING |          \
-        DEVICE_TYPE_DISPLAYPORT_OUTPUT |       \
-        DEVICE_TYPE_ANALOG_OUTPUT)
-
-#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
-       (DEVICE_TYPE_INTERNAL_CONNECTOR |       \
-        DEVICE_TYPE_MIPI_OUTPUT |              \
-        DEVICE_TYPE_COMPOSITE_OUTPUT |         \
-        DEVICE_TYPE_LVDS_SIGNALING |           \
-        DEVICE_TYPE_TMDS_DVI_SIGNALING |       \
-        DEVICE_TYPE_VIDEO_SIGNALING |          \
-        DEVICE_TYPE_DISPLAYPORT_OUTPUT |       \
-        DEVICE_TYPE_DIGITAL_OUTPUT |           \
-        DEVICE_TYPE_ANALOG_OUTPUT)
-
-#define DEVICE_CFG_NONE                0x00
-#define DEVICE_CFG_12BIT_DVOB  0x01
-#define DEVICE_CFG_12BIT_DVOC  0x02
-#define DEVICE_CFG_24BIT_DVOBC 0x09
-#define DEVICE_CFG_24BIT_DVOCB 0x0a
-#define DEVICE_CFG_DUAL_DVOB   0x11
-#define DEVICE_CFG_DUAL_DVOC   0x12
-#define DEVICE_CFG_DUAL_DVOBC  0x13
-#define DEVICE_CFG_DUAL_LINK_DVOBC     0x19
-#define DEVICE_CFG_DUAL_LINK_DVOCB     0x1a
-
-#define DEVICE_WIRE_NONE       0x00
-#define DEVICE_WIRE_DVOB       0x01
-#define DEVICE_WIRE_DVOC       0x02
-#define DEVICE_WIRE_DVOBC      0x03
-#define DEVICE_WIRE_DVOBB      0x05
-#define DEVICE_WIRE_DVOCC      0x06
-#define DEVICE_WIRE_DVOB_MASTER 0x0d
-#define DEVICE_WIRE_DVOC_MASTER 0x0e
-
-/* dvo_port pre BDB 155 */
-#define DEVICE_PORT_DVOA       0x00 /* none on 845+ */
-#define DEVICE_PORT_DVOB       0x01
-#define DEVICE_PORT_DVOC       0x02
-
-/* dvo_port BDB 155+ */
-#define DVO_PORT_HDMIA         0
-#define DVO_PORT_HDMIB         1
-#define DVO_PORT_HDMIC         2
-#define DVO_PORT_HDMID         3
-#define DVO_PORT_LVDS          4
-#define DVO_PORT_TV            5
-#define DVO_PORT_CRT           6
-#define DVO_PORT_DPB           7
-#define DVO_PORT_DPC           8
-#define DVO_PORT_DPD           9
-#define DVO_PORT_DPA           10
-#define DVO_PORT_DPE           11                              /* 193 */
-#define DVO_PORT_HDMIE         12                              /* 193 */
-#define DVO_PORT_DPF           13                              /* N/A */
-#define DVO_PORT_HDMIF         14                              /* N/A */
-#define DVO_PORT_MIPIA         21                              /* 171 */
-#define DVO_PORT_MIPIB         22                              /* 171 */
-#define DVO_PORT_MIPIC         23                              /* 171 */
-#define DVO_PORT_MIPID         24                              /* 171 */
-
-#define HDMI_MAX_DATA_RATE_PLATFORM    0                       /* 204 */
-#define HDMI_MAX_DATA_RATE_297         1                       /* 204 */
-#define HDMI_MAX_DATA_RATE_165         2                       /* 204 */
-
-#define LEGACY_CHILD_DEVICE_CONFIG_SIZE                33
-
-/* DDC Bus DDI Type 155+ */
-enum vbt_gmbus_ddi {
-       DDC_BUS_DDI_B = 0x1,
-       DDC_BUS_DDI_C,
-       DDC_BUS_DDI_D,
-       DDC_BUS_DDI_F,
-       ICL_DDC_BUS_DDI_A = 0x1,
-       ICL_DDC_BUS_DDI_B,
-       ICL_DDC_BUS_PORT_1 = 0x4,
-       ICL_DDC_BUS_PORT_2,
-       ICL_DDC_BUS_PORT_3,
-       ICL_DDC_BUS_PORT_4,
-};
-
-#define DP_AUX_A 0x40
-#define DP_AUX_B 0x10
-#define DP_AUX_C 0x20
-#define DP_AUX_D 0x30
-#define DP_AUX_E 0x50
-#define DP_AUX_F 0x60
-
-#define VBT_DP_MAX_LINK_RATE_HBR3      0
-#define VBT_DP_MAX_LINK_RATE_HBR2      1
-#define VBT_DP_MAX_LINK_RATE_HBR       2
-#define VBT_DP_MAX_LINK_RATE_LBR       3
-
-/*
- * The child device config, aka the display device data structure, provides a
- * description of a port and its configuration on the platform.
- *
- * The child device config size has been increased, and fields have been added
- * and their meaning has changed over time. Care must be taken when accessing
- * basically any of the fields to ensure the correct interpretation for the BDB
- * version in question.
- *
- * When we copy the child device configs to dev_priv->vbt.child_dev, we reserve
- * space for the full structure below, and initialize the tail not actually
- * present in VBT to zeros. Accessing those fields is fine, as long as the
- * default zero is taken into account, again according to the BDB version.
- *
- * BDB versions 155 and below are considered legacy, and version 155 seems to be
- * a baseline for some of the VBT documentation. When adding new fields, please
- * include the BDB version when the field was added, if it's above that.
- */
-struct child_device_config {
-       u16 handle;
-       u16 device_type; /* See DEVICE_TYPE_* above */
-
-       union {
-               u8  device_id[10]; /* ascii string */
-               struct {
-                       u8 i2c_speed;
-                       u8 dp_onboard_redriver;                 /* 158 */
-                       u8 dp_ondock_redriver;                  /* 158 */
-                       u8 hdmi_level_shifter_value:5;          /* 169 */
-                       u8 hdmi_max_data_rate:3;                /* 204 */
-                       u16 dtd_buf_ptr;                        /* 161 */
-                       u8 edidless_efp:1;                      /* 161 */
-                       u8 compression_enable:1;                /* 198 */
-                       u8 compression_method:1;                /* 198 */
-                       u8 ganged_edp:1;                        /* 202 */
-                       u8 reserved0:4;
-                       u8 compression_structure_index:4;       /* 198 */
-                       u8 reserved1:4;
-                       u8 slave_port;                          /* 202 */
-                       u8 reserved2;
-               } __packed;
-       } __packed;
-
-       u16 addin_offset;
-       u8 dvo_port; /* See DEVICE_PORT_* and DVO_PORT_* above */
-       u8 i2c_pin;
-       u8 slave_addr;
-       u8 ddc_pin;
-       u16 edid_ptr;
-       u8 dvo_cfg; /* See DEVICE_CFG_* above */
-
-       union {
-               struct {
-                       u8 dvo2_port;
-                       u8 i2c2_pin;
-                       u8 slave2_addr;
-                       u8 ddc2_pin;
-               } __packed;
-               struct {
-                       u8 efp_routed:1;                        /* 158 */
-                       u8 lane_reversal:1;                     /* 184 */
-                       u8 lspcon:1;                            /* 192 */
-                       u8 iboost:1;                            /* 196 */
-                       u8 hpd_invert:1;                        /* 196 */
-                       u8 use_vbt_vswing:1;                    /* 218 */
-                       u8 flag_reserved:2;
-                       u8 hdmi_support:1;                      /* 158 */
-                       u8 dp_support:1;                        /* 158 */
-                       u8 tmds_support:1;                      /* 158 */
-                       u8 support_reserved:5;
-                       u8 aux_channel;
-                       u8 dongle_detect;
-               } __packed;
-       } __packed;
-
-       u8 pipe_cap:2;
-       u8 sdvo_stall:1;                                        /* 158 */
-       u8 hpd_status:2;
-       u8 integrated_encoder:1;
-       u8 capabilities_reserved:2;
-       u8 dvo_wiring; /* See DEVICE_WIRE_* above */
-
-       union {
-               u8 dvo2_wiring;
-               u8 mipi_bridge_type;                            /* 171 */
-       } __packed;
-
-       u16 extended_type;
-       u8 dvo_function;
-       u8 dp_usb_type_c:1;                                     /* 195 */
-       u8 tbt:1;                                               /* 209 */
-       u8 flags2_reserved:2;                                   /* 195 */
-       u8 dp_port_trace_length:4;                              /* 209 */
-       u8 dp_gpio_index;                                       /* 195 */
-       u16 dp_gpio_pin_num;                                    /* 195 */
-       u8 dp_iboost_level:4;                                   /* 196 */
-       u8 hdmi_iboost_level:4;                                 /* 196 */
-       u8 dp_max_link_rate:2;                                  /* 216 CNL+ */
-       u8 dp_max_link_rate_reserved:6;                         /* 216 */
-} __packed;
-
-struct bdb_general_definitions {
-       /* DDC GPIO */
-       u8 crt_ddc_gmbus_pin;
-
-       /* DPMS bits */
-       u8 dpms_acpi:1;
-       u8 skip_boot_crt_detect:1;
-       u8 dpms_aim:1;
-       u8 rsvd1:5; /* finish byte */
-
-       /* boot device bits */
-       u8 boot_display[2];
-       u8 child_dev_size;
-
-       /*
-        * Device info:
-        * If TV is present, it'll be at devices[0].
-        * LVDS will be next, either devices[0] or [1], if present.
-        * On some platforms the number of device is 6. But could be as few as
-        * 4 if both TV and LVDS are missing.
-        * And the device num is related with the size of general definition
-        * block. It is obtained by using the following formula:
-        * number = (block_size - sizeof(bdb_general_definitions))/
-        *           defs->child_dev_size;
-        */
-       u8 devices[0];
-} __packed;
-
-/*
- * Block 9 - SRD Feature Block
- */
-
-struct psr_table {
-       /* Feature bits */
-       u8 full_link:1;
-       u8 require_aux_to_wakeup:1;
-       u8 feature_bits_rsvd:6;
-
-       /* Wait times */
-       u8 idle_frames:4;
-       u8 lines_to_wait:3;
-       u8 wait_times_rsvd:1;
-
-       /* TP wake up time in multiple of 100 */
-       u16 tp1_wakeup_time;
-       u16 tp2_tp3_wakeup_time;
-
-       /* PSR2 TP2/TP3 wakeup time for 16 panels */
-       u32 psr2_tp2_tp3_wakeup_time;
-} __packed;
-
-struct bdb_psr {
-       struct psr_table psr_table[16];
-} __packed;
-
-/*
- * Block 12 - Driver Features Data Block
- */
-
-#define BDB_DRIVER_FEATURE_NO_LVDS             0
-#define BDB_DRIVER_FEATURE_INT_LVDS            1
-#define BDB_DRIVER_FEATURE_SDVO_LVDS           2
-#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS       3
-
-struct bdb_driver_features {
-       u8 boot_dev_algorithm:1;
-       u8 block_display_switch:1;
-       u8 allow_display_switch:1;
-       u8 hotplug_dvo:1;
-       u8 dual_view_zoom:1;
-       u8 int15h_hook:1;
-       u8 sprite_in_clone:1;
-       u8 primary_lfp_id:1;
-
-       u16 boot_mode_x;
-       u16 boot_mode_y;
-       u8 boot_mode_bpp;
-       u8 boot_mode_refresh;
-
-       u16 enable_lfp_primary:1;
-       u16 selective_mode_pruning:1;
-       u16 dual_frequency:1;
-       u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
-       u16 nt_clone_support:1;
-       u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
-       u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
-       u16 cui_aspect_scaling:1;
-       u16 preserve_aspect_ratio:1;
-       u16 sdvo_device_power_down:1;
-       u16 crt_hotplug:1;
-       u16 lvds_config:2;
-       u16 tv_hotplug:1;
-       u16 hdmi_config:2;
-
-       u8 static_display:1;
-       u8 reserved2:7;
-       u16 legacy_crt_max_x;
-       u16 legacy_crt_max_y;
-       u8 legacy_crt_max_refresh;
-
-       u8 hdmi_termination;
-       u8 custom_vbt_version;
-       /* Driver features data block */
-       u16 rmpm_enabled:1;
-       u16 s2ddt_enabled:1;
-       u16 dpst_enabled:1;
-       u16 bltclt_enabled:1;
-       u16 adb_enabled:1;
-       u16 drrs_enabled:1;
-       u16 grs_enabled:1;
-       u16 gpmt_enabled:1;
-       u16 tbt_enabled:1;
-       u16 psr_enabled:1;
-       u16 ips_enabled:1;
-       u16 reserved3:4;
-       u16 pc_feature_valid:1;
-} __packed;
-
-/*
- * Block 22 - SDVO LVDS General Options
- */
-
-struct bdb_sdvo_lvds_options {
-       u8 panel_backlight;
-       u8 h40_set_panel_type;
-       u8 panel_type;
-       u8 ssc_clk_freq;
-       u16 als_low_trip;
-       u16 als_high_trip;
-       u8 sclalarcoeff_tab_row_num;
-       u8 sclalarcoeff_tab_row_size;
-       u8 coefficient[8];
-       u8 panel_misc_bits_1;
-       u8 panel_misc_bits_2;
-       u8 panel_misc_bits_3;
-       u8 panel_misc_bits_4;
-} __packed;
-
-/*
- * Block 23 - SDVO LVDS Panel DTDs
- */
-
-struct lvds_dvo_timing {
-       u16 clock;              /**< In 10khz */
-       u8 hactive_lo;
-       u8 hblank_lo;
-       u8 hblank_hi:4;
-       u8 hactive_hi:4;
-       u8 vactive_lo;
-       u8 vblank_lo;
-       u8 vblank_hi:4;
-       u8 vactive_hi:4;
-       u8 hsync_off_lo;
-       u8 hsync_pulse_width_lo;
-       u8 vsync_pulse_width_lo:4;
-       u8 vsync_off_lo:4;
-       u8 vsync_pulse_width_hi:2;
-       u8 vsync_off_hi:2;
-       u8 hsync_pulse_width_hi:2;
-       u8 hsync_off_hi:2;
-       u8 himage_lo;
-       u8 vimage_lo;
-       u8 vimage_hi:4;
-       u8 himage_hi:4;
-       u8 h_border;
-       u8 v_border;
-       u8 rsvd1:3;
-       u8 digital:2;
-       u8 vsync_positive:1;
-       u8 hsync_positive:1;
-       u8 non_interlaced:1;
-} __packed;
-
-struct bdb_sdvo_panel_dtds {
-       struct lvds_dvo_timing dtds[4];
-} __packed;
-
-/*
- * Block 27 - eDP VBT Block
- */
-
-#define EDP_18BPP      0
-#define EDP_24BPP      1
-#define EDP_30BPP      2
-#define EDP_RATE_1_62  0
-#define EDP_RATE_2_7   1
-#define EDP_LANE_1     0
-#define EDP_LANE_2     1
-#define EDP_LANE_4     3
-#define EDP_PREEMPHASIS_NONE   0
-#define EDP_PREEMPHASIS_3_5dB  1
-#define EDP_PREEMPHASIS_6dB    2
-#define EDP_PREEMPHASIS_9_5dB  3
-#define EDP_VSWING_0_4V                0
-#define EDP_VSWING_0_6V                1
-#define EDP_VSWING_0_8V                2
-#define EDP_VSWING_1_2V                3
-
-
-struct edp_fast_link_params {
-       u8 rate:4;
-       u8 lanes:4;
-       u8 preemphasis:4;
-       u8 vswing:4;
-} __packed;
-
-struct edp_pwm_delays {
-       u16 pwm_on_to_backlight_enable;
-       u16 backlight_disable_to_pwm_off;
-} __packed;
-
-struct edp_full_link_params {
-       u8 preemphasis:4;
-       u8 vswing:4;
-} __packed;
-
-struct bdb_edp {
-       struct edp_power_seq power_seqs[16];
-       u32 color_depth;
-       struct edp_fast_link_params fast_link_params[16];
-       u32 sdrrs_msa_timing_delay;
-
-       /* ith bit indicates enabled/disabled for (i+1)th panel */
-       u16 edp_s3d_feature;                                    /* 162 */
-       u16 edp_t3_optimization;                                /* 165 */
-       u64 edp_vswing_preemph;                                 /* 173 */
-       u16 fast_link_training;                                 /* 182 */
-       u16 dpcd_600h_write_required;                           /* 185 */
-       struct edp_pwm_delays pwm_delays[16];                   /* 186 */
-       u16 full_link_params_provided;                          /* 199 */
-       struct edp_full_link_params full_link_params[16];       /* 199 */
-} __packed;
-
-/*
- * Block 40 - LFP Data Block
- */
-
-/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
-#define MODE_MASK              0x3
-
-struct bdb_lvds_options {
-       u8 panel_type;
-       u8 panel_type2;                                         /* 212 */
-       /* LVDS capabilities, stored in a dword */
-       u8 pfit_mode:2;
-       u8 pfit_text_mode_enhanced:1;
-       u8 pfit_gfx_mode_enhanced:1;
-       u8 pfit_ratio_auto:1;
-       u8 pixel_dither:1;
-       u8 lvds_edid:1;
-       u8 rsvd2:1;
-       u8 rsvd4;
-       /* LVDS Panel channel bits stored here */
-       u32 lvds_panel_channel_bits;
-       /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
-       u16 ssc_bits;
-       u16 ssc_freq;
-       u16 ssc_ddt;
-       /* Panel color depth defined here */
-       u16 panel_color_depth;
-       /* LVDS panel type bits stored here */
-       u32 dps_panel_type_bits;
-       /* LVDS backlight control type bits stored here */
-       u32 blt_control_type_bits;
-
-       u16 lcdvcc_s0_enable;                                   /* 200 */
-       u32 rotation;                                           /* 228 */
-} __packed;
-
-/*
- * Block 41 - LFP Data Table Pointers
- */
-
-/* LFP pointer table contains entries to the struct below */
-struct lvds_lfp_data_ptr {
-       u16 fp_timing_offset; /* offsets are from start of bdb */
-       u8 fp_table_size;
-       u16 dvo_timing_offset;
-       u8 dvo_table_size;
-       u16 panel_pnp_id_offset;
-       u8 pnp_table_size;
-} __packed;
-
-struct bdb_lvds_lfp_data_ptrs {
-       u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
-       struct lvds_lfp_data_ptr ptr[16];
-} __packed;
-
-/*
- * Block 42 - LFP Data Tables
- */
-
-/* LFP data has 3 blocks per entry */
-struct lvds_fp_timing {
-       u16 x_res;
-       u16 y_res;
-       u32 lvds_reg;
-       u32 lvds_reg_val;
-       u32 pp_on_reg;
-       u32 pp_on_reg_val;
-       u32 pp_off_reg;
-       u32 pp_off_reg_val;
-       u32 pp_cycle_reg;
-       u32 pp_cycle_reg_val;
-       u32 pfit_reg;
-       u32 pfit_reg_val;
-       u16 terminator;
-} __packed;
-
-struct lvds_pnp_id {
-       u16 mfg_name;
-       u16 product_code;
-       u32 serial;
-       u8 mfg_week;
-       u8 mfg_year;
-} __packed;
-
-struct lvds_lfp_data_entry {
-       struct lvds_fp_timing fp_timing;
-       struct lvds_dvo_timing dvo_timing;
-       struct lvds_pnp_id pnp_id;
-} __packed;
-
-struct bdb_lvds_lfp_data {
-       struct lvds_lfp_data_entry data[16];
-} __packed;
-
-/*
- * Block 43 - LFP Backlight Control Data Block
- */
-
-#define BDB_BACKLIGHT_TYPE_NONE        0
-#define BDB_BACKLIGHT_TYPE_PWM 2
-
-struct lfp_backlight_data_entry {
-       u8 type:2;
-       u8 active_low_pwm:1;
-       u8 obsolete1:5;
-       u16 pwm_freq_hz;
-       u8 min_brightness;
-       u8 obsolete2;
-       u8 obsolete3;
-} __packed;
-
-struct lfp_backlight_control_method {
-       u8 type:4;
-       u8 controller:4;
-} __packed;
-
-struct bdb_lfp_backlight_data {
-       u8 entry_size;
-       struct lfp_backlight_data_entry data[16];
-       u8 level[16];
-       struct lfp_backlight_control_method backlight_control[16];
-} __packed;
-
-/*
- * Block 52 - MIPI Configuration Block
- */
-
-#define MAX_MIPI_CONFIGURATIONS        6
-
-struct bdb_mipi_config {
-       struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
-       struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
-} __packed;
-
-/*
- * Block 53 - MIPI Sequence Block
- */
-
-struct bdb_mipi_sequence {
-       u8 version;
-       u8 data[0]; /* up to 6 variable length blocks */
-} __packed;
-
-#endif /* _INTEL_VBT_DEFS_H_ */