Merge drm/drm-next into drm-intel-next-queued
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_pm.c
index 245f0022bcfd00c730f020b24326bfba90de4ea2..897a791662c59c60adfdc4774b2c52f3e95ef4bb 100644 (file)
@@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
        uint32_t method1, method2;
        int cpp;
 
+       if (mem_value == 0)
+               return U32_MAX;
+
        if (!intel_wm_plane_visible(cstate, pstate))
                return 0;
 
@@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
        uint32_t method1, method2;
        int cpp;
 
+       if (mem_value == 0)
+               return U32_MAX;
+
        if (!intel_wm_plane_visible(cstate, pstate))
                return 0;
 
@@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
 {
        int cpp;
 
+       if (mem_value == 0)
+               return U32_MAX;
+
        if (!intel_wm_plane_visible(cstate, pstate))
                return 0;
 
@@ -3008,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
        intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 }
 
+static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+{
+       /*
+        * On some SNB machines (Thinkpad X220 Tablet at least)
+        * LP3 usage can cause vblank interrupts to be lost.
+        * The DEIIR bit will go high but it looks like the CPU
+        * never gets interrupted.
+        *
+        * It's not clear whether other interrupt source could
+        * be affected or if this is somehow limited to vblank
+        * interrupts only. To play it safe we disable LP3
+        * watermarks entirely.
+        */
+       if (dev_priv->wm.pri_latency[3] == 0 &&
+           dev_priv->wm.spr_latency[3] == 0 &&
+           dev_priv->wm.cur_latency[3] == 0)
+               return;
+
+       dev_priv->wm.pri_latency[3] = 0;
+       dev_priv->wm.spr_latency[3] = 0;
+       dev_priv->wm.cur_latency[3] = 0;
+
+       DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
+       intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+       intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+       intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+}
+
 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
        intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
@@ -3024,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
        intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
        intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 
-       if (IS_GEN6(dev_priv))
+       if (IS_GEN6(dev_priv)) {
                snb_wm_latency_quirk(dev_priv);
+               snb_wm_lp3_irq_quirk(dev_priv);
+       }
 }
 
 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
@@ -3159,7 +3198,8 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
         * and after the vblank.
         */
        *a = newstate->wm.ilk.optimal;
-       if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base))
+       if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
+           intel_state->skip_intermediate_wm)
                return 0;
 
        a->pipe_enabled |= b->pipe_enabled;
@@ -3611,15 +3651,8 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
 static bool
 intel_has_sagv(struct drm_i915_private *dev_priv)
 {
-       if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
-           IS_CANNONLAKE(dev_priv))
-               return true;
-
-       if (IS_SKYLAKE(dev_priv) &&
-           dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
-               return true;
-
-       return false;
+       return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
+               dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
 }
 
 /*
@@ -3783,7 +3816,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
 
 static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
                              const struct intel_crtc_state *cstate,
-                             const unsigned int total_data_rate,
+                             const u64 total_data_rate,
                              const int num_active,
                              struct skl_ddb_allocation *ddb)
 {
@@ -3797,12 +3830,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
                return ddb_size - 4; /* 4 blocks for bypass path allocation */
 
        adjusted_mode = &cstate->base.adjusted_mode;
-       total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode);
+       total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
 
        /*
         * 12GB/s is maximum BW supported by single DBuf slice.
         */
-       if (total_data_bw >= GBps(12) || num_active > 1) {
+       if (num_active > 1 || total_data_bw >= GBps(12)) {
                ddb->enabled_slices = 2;
        } else {
                ddb->enabled_slices = 1;
@@ -3813,16 +3846,15 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 }
 
 static void
-skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
+skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
                                   const struct intel_crtc_state *cstate,
-                                  const unsigned int total_data_rate,
+                                  const u64 total_data_rate,
                                   struct skl_ddb_allocation *ddb,
                                   struct skl_ddb_entry *alloc, /* out */
                                   int *num_active /* out */)
 {
        struct drm_atomic_state *state = cstate->base.state;
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *for_crtc = cstate->base.crtc;
        const struct drm_crtc_state *crtc_state;
        const struct drm_crtc *crtc;
@@ -3944,14 +3976,9 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
                                      val & PLANE_CTL_ALPHA_MASK);
 
        val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
-       /*
-        * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed
-        * registers for now.
-        */
-       if (INTEL_GEN(dev_priv) < 11)
+       if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) {
                val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
 
-       if (fourcc == DRM_FORMAT_NV12) {
                skl_ddb_entry_init_from_hw(dev_priv,
                                           &ddb->plane[pipe][plane_id], val2);
                skl_ddb_entry_init_from_hw(dev_priv,
@@ -4138,23 +4165,24 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
        return 0;
 }
 
-static unsigned int
+static u64
 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
-                            const struct drm_plane_state *pstate,
+                            const struct intel_plane_state *intel_pstate,
                             const int plane)
 {
-       struct intel_plane *intel_plane = to_intel_plane(pstate->plane);
-       struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
+       struct intel_plane *intel_plane =
+               to_intel_plane(intel_pstate->base.plane);
        uint32_t data_rate;
        uint32_t width = 0, height = 0;
        struct drm_framebuffer *fb;
        u32 format;
        uint_fixed_16_16_t down_scale_amount;
+       u64 rate;
 
        if (!intel_pstate->base.visible)
                return 0;
 
-       fb = pstate->fb;
+       fb = intel_pstate->base.fb;
        format = fb->format->format;
 
        if (intel_plane->id == PLANE_CURSOR)
@@ -4176,28 +4204,26 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
                height /= 2;
        }
 
-       data_rate = width * height * fb->format->cpp[plane];
+       data_rate = width * height;
 
        down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
 
-       return mul_round_up_u32_fixed16(data_rate, down_scale_amount);
+       rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
+
+       rate *= fb->format->cpp[plane];
+       return rate;
 }
 
-/*
- * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
- * a 8192x4096@32bpp framebuffer:
- *   3 * 4096 * 8192  * 4 < 2^32
- */
-static unsigned int
+static u64
 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
-                                unsigned int *plane_data_rate,
-                                unsigned int *uv_plane_data_rate)
+                                u64 *plane_data_rate,
+                                u64 *uv_plane_data_rate)
 {
        struct drm_crtc_state *cstate = &intel_cstate->base;
        struct drm_atomic_state *state = cstate->state;
        struct drm_plane *plane;
        const struct drm_plane_state *pstate;
-       unsigned int total_data_rate = 0;
+       u64 total_data_rate = 0;
 
        if (WARN_ON(!state))
                return 0;
@@ -4205,26 +4231,81 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
        /* Calculate and cache data rate for each plane */
        drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
                enum plane_id plane_id = to_intel_plane(plane)->id;
-               unsigned int rate;
+               u64 rate;
+               const struct intel_plane_state *intel_pstate =
+                       to_intel_plane_state(pstate);
 
                /* packed/y */
                rate = skl_plane_relative_data_rate(intel_cstate,
-                                                   pstate, 0);
+                                                   intel_pstate, 0);
                plane_data_rate[plane_id] = rate;
-
                total_data_rate += rate;
 
                /* uv-plane */
                rate = skl_plane_relative_data_rate(intel_cstate,
-                                                   pstate, 1);
+                                                   intel_pstate, 1);
                uv_plane_data_rate[plane_id] = rate;
-
                total_data_rate += rate;
        }
 
        return total_data_rate;
 }
 
+static u64
+icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+                                u64 *plane_data_rate)
+{
+       struct drm_crtc_state *cstate = &intel_cstate->base;
+       struct drm_atomic_state *state = cstate->state;
+       struct drm_plane *plane;
+       const struct drm_plane_state *pstate;
+       u64 total_data_rate = 0;
+
+       if (WARN_ON(!state))
+               return 0;
+
+       /* Calculate and cache data rate for each plane */
+       drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
+               const struct intel_plane_state *intel_pstate =
+                       to_intel_plane_state(pstate);
+               enum plane_id plane_id = to_intel_plane(plane)->id;
+               u64 rate;
+
+               if (!intel_pstate->linked_plane) {
+                       rate = skl_plane_relative_data_rate(intel_cstate,
+                                                           intel_pstate, 0);
+                       plane_data_rate[plane_id] = rate;
+                       total_data_rate += rate;
+               } else {
+                       enum plane_id y_plane_id;
+
+                       /*
+                        * The slave plane might not iterate in
+                        * drm_atomic_crtc_state_for_each_plane_state(),
+                        * and needs the master plane state which may be
+                        * NULL if we try get_new_plane_state(), so we
+                        * always calculate from the master.
+                        */
+                       if (intel_pstate->slave)
+                               continue;
+
+                       /* Y plane rate is calculated on the slave */
+                       rate = skl_plane_relative_data_rate(intel_cstate,
+                                                           intel_pstate, 0);
+                       y_plane_id = intel_pstate->linked_plane->id;
+                       plane_data_rate[y_plane_id] = rate;
+                       total_data_rate += rate;
+
+                       rate = skl_plane_relative_data_rate(intel_cstate,
+                                                           intel_pstate, 1);
+                       plane_data_rate[plane_id] = rate;
+                       total_data_rate += rate;
+               }
+       }
+
+       return total_data_rate;
+}
+
 static uint16_t
 skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
 {
@@ -4297,15 +4378,25 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
 
        drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
                enum plane_id plane_id = to_intel_plane(plane)->id;
+               struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
 
                if (plane_id == PLANE_CURSOR)
                        continue;
 
-               if (!pstate->visible)
+               /* slave plane must be invisible and calculated from master */
+               if (!pstate->visible || WARN_ON(plane_state->slave))
                        continue;
 
-               minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
-               uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+               if (!plane_state->linked_plane) {
+                       minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
+                       uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+               } else {
+                       enum plane_id y_plane_id =
+                               plane_state->linked_plane->id;
+
+                       minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
+                       minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+               }
        }
 
        minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
@@ -4317,18 +4408,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 {
        struct drm_atomic_state *state = cstate->base.state;
        struct drm_crtc *crtc = cstate->base.crtc;
-       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum pipe pipe = intel_crtc->pipe;
        struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
        uint16_t alloc_size, start;
        uint16_t minimum[I915_MAX_PLANES] = {};
        uint16_t uv_minimum[I915_MAX_PLANES] = {};
-       unsigned int total_data_rate;
+       u64 total_data_rate;
        enum plane_id plane_id;
        int num_active;
-       unsigned int plane_data_rate[I915_MAX_PLANES] = {};
-       unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {};
+       u64 plane_data_rate[I915_MAX_PLANES] = {};
+       u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
        uint16_t total_min_blocks = 0;
 
        /* Clear the partitioning for disabled planes. */
@@ -4343,11 +4434,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                return 0;
        }
 
-       total_data_rate = skl_get_total_relative_data_rate(cstate,
-                                                          plane_data_rate,
-                                                          uv_plane_data_rate);
-       skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb,
-                                          alloc, &num_active);
+       if (INTEL_GEN(dev_priv) < 11)
+               total_data_rate =
+                       skl_get_total_relative_data_rate(cstate,
+                                                        plane_data_rate,
+                                                        uv_plane_data_rate);
+       else
+               total_data_rate =
+                       icl_get_total_relative_data_rate(cstate,
+                                                        plane_data_rate);
+
+       skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
+                                          ddb, alloc, &num_active);
        alloc_size = skl_ddb_entry_size(alloc);
        if (alloc_size == 0)
                return 0;
@@ -4387,7 +4485,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 
        start = alloc->start;
        for_each_plane_id_on_crtc(intel_crtc, plane_id) {
-               unsigned int data_rate, uv_data_rate;
+               u64 data_rate, uv_data_rate;
                uint16_t plane_blocks, uv_plane_blocks;
 
                if (plane_id == PLANE_CURSOR)
@@ -4401,8 +4499,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                 * result is < available as data_rate / total_data_rate < 1
                 */
                plane_blocks = minimum[plane_id];
-               plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
-                                       total_data_rate);
+               plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate);
 
                /* Leave disabled planes at (0,0) */
                if (data_rate) {
@@ -4416,8 +4513,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                uv_data_rate = uv_plane_data_rate[plane_id];
 
                uv_plane_blocks = uv_minimum[plane_id];
-               uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate,
-                                          total_data_rate);
+               uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate);
+
+               /* Gen11+ uses a separate plane for UV watermarks */
+               WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
 
                if (uv_data_rate) {
                        ddb->uv_plane[pipe][plane_id].start = start;
@@ -4475,7 +4574,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
 }
 
 static uint_fixed_16_16_t
-intel_get_linetime_us(struct intel_crtc_state *cstate)
+intel_get_linetime_us(const struct intel_crtc_state *cstate)
 {
        uint32_t pixel_rate;
        uint32_t crtc_htotal;
@@ -4519,7 +4618,7 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
 
 static int
 skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
-                           struct intel_crtc_state *cstate,
+                           const struct intel_crtc_state *cstate,
                            const struct intel_plane_state *intel_pstate,
                            struct skl_wm_params *wp, int plane_id)
 {
@@ -4626,7 +4725,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
 }
 
 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
-                               struct intel_crtc_state *cstate,
+                               const struct intel_crtc_state *cstate,
                                const struct intel_plane_state *intel_pstate,
                                uint16_t ddb_allocation,
                                int level,
@@ -4671,15 +4770,24 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
        } else {
                if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
                     wp->dbuf_block_size < 1) &&
-                    (wp->plane_bytes_per_line / wp->dbuf_block_size < 1))
+                    (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
                        selected_result = method2;
-               else if (ddb_allocation >=
-                        fixed16_to_u32_round_up(wp->plane_blocks_per_line))
-                       selected_result = min_fixed16(method1, method2);
-               else if (latency >= wp->linetime_us)
-                       selected_result = min_fixed16(method1, method2);
-               else
+               } else if (ddb_allocation >=
+                        fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
+                       if (IS_GEN9(dev_priv) &&
+                           !IS_GEMINILAKE(dev_priv))
+                               selected_result = min_fixed16(method1, method2);
+                       else
+                               selected_result = method2;
+               } else if (latency >= wp->linetime_us) {
+                       if (IS_GEN9(dev_priv) &&
+                           !IS_GEMINILAKE(dev_priv))
+                               selected_result = min_fixed16(method1, method2);
+                       else
+                               selected_result = method2;
+               } else {
                        selected_result = method1;
+               }
        }
 
        res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
@@ -4755,17 +4863,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
                }
        }
 
-       /*
-        * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A)
-        * disable wm level 1-7 on NV12 planes
-        */
-       if (wp->is_planar && level >= 1 &&
-           (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
-            IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) {
-               result->plane_en = false;
-               return 0;
-       }
-
        /* The number of lines are ignored for the level 0 watermark. */
        result->plane_res_b = res_blocks;
        result->plane_res_l = res_lines;
@@ -4777,38 +4874,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 static int
 skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
                      struct skl_ddb_allocation *ddb,
-                     struct intel_crtc_state *cstate,
+                     const struct intel_crtc_state *cstate,
                      const struct intel_plane_state *intel_pstate,
+                     uint16_t ddb_blocks,
                      const struct skl_wm_params *wm_params,
                      struct skl_plane_wm *wm,
-                     int plane_id)
+                     struct skl_wm_level *levels)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
-       struct drm_plane *plane = intel_pstate->base.plane;
-       struct intel_plane *intel_plane = to_intel_plane(plane);
-       uint16_t ddb_blocks;
-       enum pipe pipe = intel_crtc->pipe;
        int level, max_level = ilk_wm_max_level(dev_priv);
-       enum plane_id intel_plane_id = intel_plane->id;
+       struct skl_wm_level *result_prev = &levels[0];
        int ret;
 
        if (WARN_ON(!intel_pstate->base.fb))
                return -EINVAL;
 
-       ddb_blocks = plane_id ?
-                    skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) :
-                    skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]);
-
        for (level = 0; level <= max_level; level++) {
-               struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] :
-                                                         &wm->wm[level];
-               struct skl_wm_level *result_prev;
-
-               if (level)
-                       result_prev = plane_id ? &wm->uv_wm[level - 1] :
-                                                 &wm->wm[level - 1];
-               else
-                       result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0];
+               struct skl_wm_level *result = &levels[level];
 
                ret = skl_compute_plane_wm(dev_priv,
                                           cstate,
@@ -4820,6 +4901,8 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
                                           result);
                if (ret)
                        return ret;
+
+               result_prev = result;
        }
 
        if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
@@ -4829,7 +4912,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
 }
 
 static uint32_t
-skl_compute_linetime_wm(struct intel_crtc_state *cstate)
+skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
 {
        struct drm_atomic_state *state = cstate->base.state;
        struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -4851,7 +4934,7 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
        return linetime_wm;
 }
 
-static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
+static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
                                      struct skl_wm_params *wp,
                                      struct skl_wm_level *wm_l0,
                                      uint16_t ddb_allocation,
@@ -4861,7 +4944,7 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
        const struct drm_i915_private *dev_priv = to_i915(dev);
        uint16_t trans_min, trans_y_tile_min;
        const uint16_t trans_amount = 10; /* This is configurable amount */
-       uint16_t trans_offset_b, res_blocks;
+       uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
 
        if (!cstate->base.active)
                goto exit;
@@ -4874,19 +4957,31 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
        if (!dev_priv->ipc_enabled)
                goto exit;
 
-       trans_min = 0;
-       if (INTEL_GEN(dev_priv) >= 10)
+       trans_min = 14;
+       if (INTEL_GEN(dev_priv) >= 11)
                trans_min = 4;
 
        trans_offset_b = trans_min + trans_amount;
 
+       /*
+        * The spec asks for Selected Result Blocks for wm0 (the real value),
+        * not Result Blocks (the integer value). Pay attention to the capital
+        * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
+        * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
+        * and since we later will have to get the ceiling of the sum in the
+        * transition watermarks calculation, we can just pretend Selected
+        * Result Blocks is Result Blocks minus 1 and it should work for the
+        * current platforms.
+        */
+       wm0_sel_res_b = wm_l0->plane_res_b - 1;
+
        if (wp->y_tiled) {
                trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
                                                        wp->y_tile_minimum);
-               res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) +
+               res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
                                trans_offset_b;
        } else {
-               res_blocks = wm_l0->plane_res_b + trans_offset_b;
+               res_blocks = wm0_sel_res_b + trans_offset_b;
 
                /* WA BUG:1938466 add one block for non y-tile planes */
                if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
@@ -4906,16 +5001,101 @@ exit:
        trans_wm->plane_en = false;
 }
 
+static int __skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
+                                      struct skl_pipe_wm *pipe_wm,
+                                      enum plane_id plane_id,
+                                      const struct intel_crtc_state *cstate,
+                                      const struct intel_plane_state *pstate,
+                                      int color_plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(pstate->base.plane->dev);
+       struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+       enum pipe pipe = to_intel_plane(pstate->base.plane)->pipe;
+       struct skl_wm_params wm_params;
+       uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
+       int ret;
+
+       ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate,
+                                         &wm_params, color_plane);
+       if (ret)
+               return ret;
+
+       ret = skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
+                                   ddb_blocks, &wm_params, wm, wm->wm);
+
+       if (ret)
+               return ret;
+
+       skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
+                                 ddb_blocks, &wm->trans_wm);
+
+       return 0;
+}
+
+static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
+                                    struct skl_pipe_wm *pipe_wm,
+                                    const struct intel_crtc_state *cstate,
+                                    const struct intel_plane_state *pstate)
+{
+       enum plane_id plane_id = to_intel_plane(pstate->base.plane)->id;
+
+       return __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
+}
+
+static int skl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
+                                    struct skl_pipe_wm *pipe_wm,
+                                    const struct intel_crtc_state *cstate,
+                                    const struct intel_plane_state *pstate)
+{
+       struct intel_plane *plane = to_intel_plane(pstate->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum plane_id plane_id = plane->id;
+       struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+       struct skl_wm_params wm_params;
+       enum pipe pipe = plane->pipe;
+       uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
+       int ret;
+
+       ret = __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
+       if (ret)
+               return ret;
+
+       /* uv plane watermarks must also be validated for NV12/Planar */
+       ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]);
+
+       ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, &wm_params, 1);
+       if (ret)
+               return ret;
+
+       return skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
+                                    ddb_blocks, &wm_params, wm, wm->uv_wm);
+}
+
+static int icl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
+                                    struct skl_pipe_wm *pipe_wm,
+                                    const struct intel_crtc_state *cstate,
+                                    const struct intel_plane_state *pstate)
+{
+       int ret;
+       enum plane_id y_plane_id = pstate->linked_plane->id;
+       enum plane_id uv_plane_id = to_intel_plane(pstate->base.plane)->id;
+
+       ret = __skl_build_plane_wm_single(ddb, pipe_wm, y_plane_id,
+                                         cstate, pstate, 0);
+       if (ret)
+               return ret;
+
+       return __skl_build_plane_wm_single(ddb, pipe_wm, uv_plane_id,
+                                          cstate, pstate, 1);
+}
+
 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
                             struct skl_ddb_allocation *ddb,
                             struct skl_pipe_wm *pipe_wm)
 {
-       struct drm_device *dev = cstate->base.crtc->dev;
        struct drm_crtc_state *crtc_state = &cstate->base;
-       const struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_plane *plane;
        const struct drm_plane_state *pstate;
-       struct skl_plane_wm *wm;
        int ret;
 
        /*
@@ -4927,44 +5107,21 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
        drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
                const struct intel_plane_state *intel_pstate =
                                                to_intel_plane_state(pstate);
-               enum plane_id plane_id = to_intel_plane(plane)->id;
-               struct skl_wm_params wm_params;
-               enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
-               uint16_t ddb_blocks;
 
-               wm = &pipe_wm->planes[plane_id];
-               ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
+               /* Watermarks calculated in master */
+               if (intel_pstate->slave)
+                       continue;
 
-               ret = skl_compute_plane_wm_params(dev_priv, cstate,
-                                                 intel_pstate, &wm_params, 0);
-               if (ret)
-                       return ret;
+               if (intel_pstate->linked_plane)
+                       ret = icl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
+               else if (intel_pstate->base.fb &&
+                        intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
+                       ret = skl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
+               else
+                       ret = skl_build_plane_wm_single(ddb, pipe_wm, cstate, intel_pstate);
 
-               ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
-                                           intel_pstate, &wm_params, wm, 0);
                if (ret)
                        return ret;
-
-               skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
-                                         ddb_blocks, &wm->trans_wm);
-
-               /* uv plane watermarks must also be validated for NV12/Planar */
-               if (wm_params.is_planar) {
-                       memset(&wm_params, 0, sizeof(struct skl_wm_params));
-                       wm->is_planar = true;
-
-                       ret = skl_compute_plane_wm_params(dev_priv, cstate,
-                                                         intel_pstate,
-                                                         &wm_params, 1);
-                       if (ret)
-                               return ret;
-
-                       ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
-                                                   intel_pstate, &wm_params,
-                                                   wm, 1);
-                       if (ret)
-                               return ret;
-               }
        }
 
        pipe_wm->linetime = skl_compute_linetime_wm(cstate);
@@ -5015,14 +5172,7 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
        skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
                           &wm->trans_wm);
 
-       skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
-                           &ddb->plane[pipe][plane_id]);
-       /* FIXME: add proper NV12 support for ICL. */
-       if (INTEL_GEN(dev_priv) >= 11)
-               return skl_ddb_entry_write(dev_priv,
-                                          PLANE_BUF_CFG(pipe, plane_id),
-                                          &ddb->plane[pipe][plane_id]);
-       if (wm->is_planar) {
+       if (wm->is_planar && INTEL_GEN(dev_priv) < 11) {
                skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
                                    &ddb->uv_plane[pipe][plane_id]);
                skl_ddb_entry_write(dev_priv,
@@ -5031,7 +5181,8 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
        } else {
                skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
                                    &ddb->plane[pipe][plane_id]);
-               I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
+               if (INTEL_GEN(dev_priv) < 11)
+                       I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
        }
 }
 
@@ -5075,16 +5226,15 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
        return a->start < b->end && b->start < a->end;
 }
 
-bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
-                                const struct skl_ddb_entry **entries,
-                                const struct skl_ddb_entry *ddb,
-                                int ignore)
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+                                const struct skl_ddb_entry entries[],
+                                int num_entries, int ignore_idx)
 {
-       enum pipe pipe;
+       int i;
 
-       for_each_pipe(dev_priv, pipe) {
-               if (pipe != ignore && entries[pipe] &&
-                   skl_ddb_entries_overlap(ddb, entries[pipe]))
+       for (i = 0; i < num_entries; i++) {
+               if (i != ignore_idx &&
+                   skl_ddb_entries_overlap(ddb, &entries[i]))
                        return true;
        }
 
@@ -5136,11 +5286,12 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
        struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
-       struct drm_plane_state *plane_state;
        struct drm_plane *plane;
        enum pipe pipe = intel_crtc->pipe;
 
        drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
+               struct drm_plane_state *plane_state;
+               struct intel_plane *linked;
                enum plane_id plane_id = to_intel_plane(plane)->id;
 
                if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
@@ -5152,6 +5303,15 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
                plane_state = drm_atomic_get_plane_state(state, plane);
                if (IS_ERR(plane_state))
                        return PTR_ERR(plane_state);
+
+               /* Make sure linked plane is updated too */
+               linked = to_intel_plane_state(plane_state)->linked_plane;
+               if (!linked)
+                       continue;
+
+               plane_state = drm_atomic_get_plane_state(state, &linked->base);
+               if (IS_ERR(plane_state))
+                       return PTR_ERR(plane_state);
        }
 
        return 0;
@@ -5210,11 +5370,11 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
                        if (skl_ddb_entry_equal(old, new))
                                continue;
 
-                       DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
-                                        intel_plane->base.base.id,
-                                        intel_plane->base.name,
-                                        old->start, old->end,
-                                        new->start, new->end);
+                       DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+                                     intel_plane->base.base.id,
+                                     intel_plane->base.name,
+                                     old->start, old->end,
+                                     new->start, new->end);
                }
        }
 }
@@ -6116,14 +6276,8 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
 {
        u32 val;
 
-       /* Display WA #0477 WaDisableIPC: skl */
-       if (IS_SKYLAKE(dev_priv))
-               dev_priv->ipc_enabled = false;
-
-       /* Display WA #1141: SKL:all KBL:all CFL */
-       if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
-           !dev_priv->dram_info.symmetric_memory)
-               dev_priv->ipc_enabled = false;
+       if (!HAS_IPC(dev_priv))
+               return;
 
        val = I915_READ(DISP_ARB_CTL2);
 
@@ -6137,11 +6291,15 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
 
 void intel_init_ipc(struct drm_i915_private *dev_priv)
 {
-       dev_priv->ipc_enabled = false;
        if (!HAS_IPC(dev_priv))
                return;
 
-       dev_priv->ipc_enabled = true;
+       /* Display WA #1141: SKL:all KBL:all CFL */
+       if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+               dev_priv->ipc_enabled = dev_priv->dram_info.symmetric_memory;
+       else
+               dev_priv->ipc_enabled = true;
+
        intel_enable_ipc(dev_priv);
 }
 
@@ -8735,6 +8893,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
        /* This is not an Wa. Enable to reduce Sampler power */
        I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
                   I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
+
+       /* WaEnable32PlaneMode:icl */
+       I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
+                  _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
 }
 
 static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9312,8 +9474,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
 /* Set up chip specific power management-related functions */
 void intel_init_pm(struct drm_i915_private *dev_priv)
 {
-       intel_fbc_init(dev_priv);
-
        /* For cxsr */
        if (IS_PINEVIEW(dev_priv))
                i915_pineview_get_mem_freq(dev_priv);