Merge drm/drm-next into drm-intel-next-queued
authorJani Nikula <jani.nikula@intel.com>
Thu, 25 Jun 2020 15:05:03 +0000 (18:05 +0300)
committerJani Nikula <jani.nikula@intel.com>
Thu, 25 Jun 2020 15:05:03 +0000 (18:05 +0300)
Catch up with upstream, in particular to get c1e8d7c6a7a6 ("mmap locking
API: convert mmap_sem comments").

Signed-off-by: Jani Nikula <jani.nikula@intel.com>
22 files changed:
1  2 
drivers/dma-buf/selftests.h
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_debugfs.c
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_hotplug.c
drivers/gpu/drm/i915/display/intel_tv.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_phys.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_query.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_runtime_pm.c

index 42fc17a88b4b6d22cf02f3d2a04d0666abd2939e,55918ef9adab25a33d3ca7f56186ac27729af317..bc8cea67bf1e07013c5faaf8df4a7fd84ca72d38
@@@ -5,9 -5,10 +5,10 @@@
   * a module parameter. It must be unique and legal for a C identifier.
   *
   * The function should be of type int function(void). It may be conditionally
 - * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
 + * compiled using #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST).
   *
   * Tests are executed in order by igt/dmabuf_selftest
   */
  selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
  selftest(dma_fence, dma_fence)
+ selftest(dma_fence_chain, dma_fence_chain)
index 77681356505b20415d7978f2bb04423b7b8ad8bf,c1836095ea389b80feb3412daffa16cc377d7011..a11bb675f9b38e68560eaac3d2bfd36eaa9468b7
@@@ -4812,18 -4812,11 +4812,18 @@@ u32 glk_plane_color_ctl(const struct in
        plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
  
        if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
 -              if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
 +              switch (plane_state->hw.color_encoding) {
 +              case DRM_COLOR_YCBCR_BT709:
                        plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
 -              else
 -                      plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
 -
 +                      break;
 +              case DRM_COLOR_YCBCR_BT2020:
 +                      plane_color_ctl |=
 +                              PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
 +                      break;
 +              default:
 +                      plane_color_ctl |=
 +                              PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
 +              }
                if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
                        plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
        } else if (fb->format->is_yuv) {
@@@ -4886,7 -4879,7 +4886,7 @@@ void intel_prepare_reset(struct drm_i91
        int ret;
  
        /* reset doesn't touch the display */
 -      if (!i915_modparams.force_reset_modeset_test &&
 +      if (!dev_priv->params.force_reset_modeset_test &&
            !gpu_reset_clobbers_display(dev_priv))
                return;
  
@@@ -6431,7 -6424,8 +6431,7 @@@ static bool hsw_post_update_enable_ips(
         * We can't read out IPS on broadwell, assume the worst and
         * forcibly enable IPS on the first fastset.
         */
 -      if (new_crtc_state->update_pipe &&
 -          old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
 +      if (new_crtc_state->update_pipe && old_crtc_state->inherited)
                return true;
  
        return !old_crtc_state->ips_enabled;
@@@ -7218,33 -7212,30 +7218,33 @@@ bool intel_phy_is_combo(struct drm_i915
  {
        if (phy == PHY_NONE)
                return false;
 -
 -      if (IS_ELKHARTLAKE(dev_priv))
 +      else if (IS_ROCKETLAKE(dev_priv))
 +              return phy <= PHY_D;
 +      else if (IS_ELKHARTLAKE(dev_priv))
                return phy <= PHY_C;
 -
 -      if (INTEL_GEN(dev_priv) >= 11)
 +      else if (INTEL_GEN(dev_priv) >= 11)
                return phy <= PHY_B;
 -
 -      return false;
 +      else
 +              return false;
  }
  
  bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
  {
 -      if (INTEL_GEN(dev_priv) >= 12)
 +      if (IS_ROCKETLAKE(dev_priv))
 +              return false;
 +      else if (INTEL_GEN(dev_priv) >= 12)
                return phy >= PHY_D && phy <= PHY_I;
 -
 -      if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
 +      else if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
                return phy >= PHY_C && phy <= PHY_F;
 -
 -      return false;
 +      else
 +              return false;
  }
  
  enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
  {
 -      if (IS_ELKHARTLAKE(i915) && port == PORT_D)
 +      if (IS_ROCKETLAKE(i915) && port >= PORT_D)
 +              return (enum phy)port - 1;
 +      else if (IS_ELKHARTLAKE(i915) && port == PORT_D)
                return PHY_A;
  
        return (enum phy)port;
@@@ -7588,8 -7579,6 +7588,8 @@@ static void intel_crtc_disable_noatomic
                to_intel_bw_state(dev_priv->bw_obj.state);
        struct intel_cdclk_state *cdclk_state =
                to_intel_cdclk_state(dev_priv->cdclk.obj.state);
 +      struct intel_dbuf_state *dbuf_state =
 +              to_intel_dbuf_state(dev_priv->dbuf.obj.state);
        struct intel_crtc_state *crtc_state =
                to_intel_crtc_state(crtc->base.state);
        enum intel_display_power_domain domain;
        cdclk_state->min_voltage_level[pipe] = 0;
        cdclk_state->active_pipes &= ~BIT(pipe);
  
 +      dbuf_state->active_pipes &= ~BIT(pipe);
 +
        bw_state->data_rate[pipe] = 0;
        bw_state->num_active_planes[pipe] = 0;
  }
@@@ -7882,7 -7869,7 +7882,7 @@@ bool hsw_crtc_state_ips_capable(const s
        if (!hsw_crtc_supports_ips(crtc))
                return false;
  
 -      if (!i915_modparams.enable_ips)
 +      if (!dev_priv->params.enable_ips)
                return false;
  
        if (crtc_state->pipe_bpp > 24)
@@@ -8153,8 -8140,8 +8153,8 @@@ static void intel_panel_sanitize_ssc(st
  
  static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  {
 -      if (i915_modparams.panel_use_ssc >= 0)
 -              return i915_modparams.panel_use_ssc != 0;
 +      if (dev_priv->params.panel_use_ssc >= 0)
 +              return dev_priv->params.panel_use_ssc != 0;
        return dev_priv->vbt.lvds_use_ssc
                && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
  }
@@@ -8899,8 -8886,6 +8899,6 @@@ void intel_mode_from_pipe_config(struc
  
        mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
  
-       mode->hsync = drm_mode_hsync(mode);
-       mode->vrefresh = drm_mode_vrefresh(mode);
        drm_mode_set_name(mode);
  }
  
@@@ -10897,7 -10882,7 +10895,7 @@@ static bool hsw_get_transcoder_state(st
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
 -      unsigned long panel_transcoder_mask = 0;
 +      unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
        unsigned long enabled_panel_transcoders = 0;
        enum transcoder panel_transcoder;
        intel_wakeref_t wf;
                panel_transcoder_mask |=
                        BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
  
 -      if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP))
 -              panel_transcoder_mask |= BIT(TRANSCODER_EDP);
 -
        /*
         * The pipe->transcoder mapping is fixed with the exception of the eDP
         * and DSI transcoders handled below.
         * XXX: Do intel_display_power_get_if_enabled before reading this (for
         * consistency and less surprising code; it's in always on power).
         */
 -      for_each_set_bit(panel_transcoder,
 -                       &panel_transcoder_mask,
 -                       ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
 +      for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
 +                                     panel_transcoder_mask) {
                bool force_thru = false;
                enum pipe trans_pipe;
  
@@@ -12510,7 -12499,7 +12508,7 @@@ static int icl_check_nv12_planes(struc
                        continue;
  
                for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
 -                      if (!icl_is_nv12_y_plane(linked->id))
 +                      if (!icl_is_nv12_y_plane(dev_priv, linked->id))
                                continue;
  
                        if (crtc_state->active_planes & BIT(linked->id))
                                plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
                        else if (linked->id == PLANE_SPRITE4)
                                plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
 +                      else if (linked->id == PLANE_SPRITE3)
 +                              plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
 +                      else if (linked->id == PLANE_SPRITE2)
 +                              plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
                        else
                                MISSING_CASE(linked->id);
                }
@@@ -13585,8 -13570,8 +13583,8 @@@ pipe_config_mismatch(bool fastset, cons
  
  static bool fastboot_enabled(struct drm_i915_private *dev_priv)
  {
 -      if (i915_modparams.fastboot != -1)
 -              return i915_modparams.fastboot;
 +      if (dev_priv->params.fastboot != -1)
 +              return dev_priv->params.fastboot;
  
        /* Enable fastboot by default on Skylake and newer */
        if (INTEL_GEN(dev_priv) >= 9)
@@@ -13610,7 -13595,8 +13608,7 @@@ intel_pipe_config_compare(const struct 
        bool ret = true;
        u32 bp_gamma = 0;
        bool fixup_inherited = fastset &&
 -              (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
 -              !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
 +              current_config->inherited && !pipe_config->inherited;
  
        if (fixup_inherited && !fastboot_enabled(dev_priv)) {
                drm_dbg_kms(&dev_priv->drm,
@@@ -14021,10 -14007,10 +14019,10 @@@ static void verify_wm_state(struct inte
        hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
  
        if (INTEL_GEN(dev_priv) >= 11 &&
 -          hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
 +          hw_enabled_slices != dev_priv->dbuf.enabled_slices)
                drm_err(&dev_priv->drm,
                        "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
 -                      dev_priv->enabled_dbuf_slices_mask,
 +                      dev_priv->dbuf.enabled_slices,
                        hw_enabled_slices);
  
        /* planes */
@@@ -14418,8 -14404,6 +14416,8 @@@ intel_crtc_update_active_timings(const 
  
        drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
  
 +      crtc->mode_flags = crtc_state->mode_flags;
 +
        /*
         * The scanline counter increments at the leading edge of hsync.
         *
@@@ -14567,12 -14551,20 +14565,12 @@@ static int intel_modeset_checks(struct 
        state->modeset = true;
        state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
  
 -      state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes;
 -
 -      if (state->active_pipe_changes) {
 +      if (state->active_pipes != dev_priv->active_pipes) {
                ret = _intel_atomic_lock_global_state(state);
                if (ret)
                        return ret;
        }
  
 -      ret = intel_modeset_calc_cdclk(state);
 -      if (ret)
 -              return ret;
 -
 -      intel_modeset_clear_plls(state);
 -
        if (IS_HASWELL(dev_priv))
                return hsw_mode_set_planes_workaround(state);
  
@@@ -14649,10 -14641,11 +14647,10 @@@ static bool active_planes_affects_min_c
        /* See {hsw,vlv,ivb}_plane_ratio() */
        return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
                IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
 -              IS_IVYBRIDGE(dev_priv);
 +              IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
  }
  
 -static int intel_atomic_check_planes(struct intel_atomic_state *state,
 -                                   bool *need_cdclk_calc)
 +static int intel_atomic_check_planes(struct intel_atomic_state *state)
  {
        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
        struct intel_crtc_state *old_crtc_state, *new_crtc_state;
                old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
                new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
  
 -              if (hweight8(old_active_planes) == hweight8(new_active_planes))
 +              /*
 +               * Not only the number of planes, but if the plane configuration had
 +               * changed might already mean we need to recompute min CDCLK,
 +               * because different planes might consume different amount of Dbuf bandwidth
 +               * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
 +               */
 +              if (old_active_planes == new_active_planes)
                        continue;
  
                ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
                        return ret;
        }
  
 +      return 0;
 +}
 +
 +static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
 +                                  bool *need_cdclk_calc)
 +{
 +      struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 +      struct intel_cdclk_state *new_cdclk_state;
 +      struct intel_plane_state *plane_state;
 +      struct intel_bw_state *new_bw_state;
 +      struct intel_plane *plane;
 +      int min_cdclk = 0;
 +      enum pipe pipe;
 +      int ret;
 +      int i;
        /*
         * active_planes bitmask has been updated, and potentially
         * affected planes are part of the state. We can now
                        return ret;
        }
  
 +      new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
 +
 +      if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
 +              *need_cdclk_calc = true;
 +
 +      ret = dev_priv->display.bw_calc_min_cdclk(state);
 +      if (ret)
 +              return ret;
 +
 +      new_bw_state = intel_atomic_get_new_bw_state(state);
 +
 +      if (!new_cdclk_state || !new_bw_state)
 +              return 0;
 +
 +      for_each_pipe(dev_priv, pipe) {
 +              min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
 +
 +              /*
 +               * Currently do this change only if we need to increase
 +               */
 +              if (new_bw_state->min_cdclk > min_cdclk)
 +                      *need_cdclk_calc = true;
 +      }
 +
        return 0;
  }
  
@@@ -14809,13 -14757,16 +14807,13 @@@ static int intel_atomic_check(struct dr
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_atomic_state *state = to_intel_atomic_state(_state);
        struct intel_crtc_state *old_crtc_state, *new_crtc_state;
 -      struct intel_cdclk_state *new_cdclk_state;
        struct intel_crtc *crtc;
        int ret, i;
        bool any_ms = false;
  
 -      /* Catch I915_MODE_FLAG_INHERITED */
        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
                                            new_crtc_state, i) {
 -              if (new_crtc_state->uapi.mode.private_flags !=
 -                  old_crtc_state->uapi.mode.private_flags)
 +              if (new_crtc_state->inherited != old_crtc_state->inherited)
                        new_crtc_state->uapi.mode_changed = true;
        }
  
        if (ret)
                goto fail;
  
 -      ret = intel_atomic_check_planes(state, &any_ms);
 +      ret = intel_atomic_check_planes(state);
        if (ret)
                goto fail;
  
 -      new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
 -      if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
 -              any_ms = true;
 -
        /*
         * distrust_bios_wm will force a full dbuf recomputation
         * but the hardware state will only get updated accordingly
                        goto fail;
        }
  
 -      ret = intel_atomic_check_crtcs(state);
 -      if (ret)
 -              goto fail;
 -
        intel_fbc_choose_crtc(dev_priv, state);
        ret = calc_watermark_data(state);
        if (ret)
        if (ret)
                goto fail;
  
 +      ret = intel_atomic_check_cdclk(state, &any_ms);
 +      if (ret)
 +              goto fail;
 +
 +      if (any_ms) {
 +              ret = intel_modeset_calc_cdclk(state);
 +              if (ret)
 +                      return ret;
 +
 +              intel_modeset_clear_plls(state);
 +      }
 +
 +      ret = intel_atomic_check_crtcs(state);
 +      if (ret)
 +              goto fail;
 +
        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
                                            new_crtc_state, i) {
                if (!needs_modeset(new_crtc_state) &&
  
  static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
  {
 -      return drm_atomic_helper_prepare_planes(state->base.dev,
 -                                              &state->base);
 +      struct intel_crtc_state *crtc_state;
 +      struct intel_crtc *crtc;
 +      int i, ret;
 +
 +      ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
 +      if (ret < 0)
 +              return ret;
 +
 +      for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
 +              bool mode_changed = needs_modeset(crtc_state);
 +
 +              if (mode_changed || crtc_state->update_pipe ||
 +                  crtc_state->uapi.color_mgmt_changed) {
 +                      intel_dsb_prepare(crtc_state);
 +              }
 +      }
 +
 +      return 0;
  }
  
  u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
@@@ -15185,7 -15112,7 +15183,7 @@@ static void intel_update_crtc(struct in
         * of enabling them on the CRTC's first fastset.
         */
        if (new_crtc_state->update_pipe && !modeset &&
 -          old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
 +          old_crtc_state->inherited)
                intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
  }
  
@@@ -15277,6 -15204,29 +15275,6 @@@ static void intel_commit_modeset_enable
        }
  }
  
 -static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
 -{
 -      struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 -      u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
 -      u8 required_slices = state->enabled_dbuf_slices_mask;
 -      u8 slices_union = hw_enabled_slices | required_slices;
 -
 -      /* If 2nd DBuf slice required, enable it here */
 -      if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
 -              icl_dbuf_slices_update(dev_priv, slices_union);
 -}
 -
 -static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
 -{
 -      struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 -      u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
 -      u8 required_slices = state->enabled_dbuf_slices_mask;
 -
 -      /* If 2nd DBuf slice is no more required disable it */
 -      if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
 -              icl_dbuf_slices_update(dev_priv, required_slices);
 -}
 -
  static void skl_commit_modeset_enables(struct intel_atomic_state *state)
  {
        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@@ -15443,27 -15393,15 +15441,27 @@@ static void intel_atomic_commit_fence_w
                    &wait_reset);
  }
  
 +static void intel_cleanup_dsbs(struct intel_atomic_state *state)
 +{
 +      struct intel_crtc_state *old_crtc_state, *new_crtc_state;
 +      struct intel_crtc *crtc;
 +      int i;
 +
 +      for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 +                                          new_crtc_state, i)
 +              intel_dsb_cleanup(old_crtc_state);
 +}
 +
  static void intel_atomic_cleanup_work(struct work_struct *work)
  {
 -      struct drm_atomic_state *state =
 -              container_of(work, struct drm_atomic_state, commit_work);
 -      struct drm_i915_private *i915 = to_i915(state->dev);
 +      struct intel_atomic_state *state =
 +              container_of(work, struct intel_atomic_state, base.commit_work);
 +      struct drm_i915_private *i915 = to_i915(state->base.dev);
  
 -      drm_atomic_helper_cleanup_planes(&i915->drm, state);
 -      drm_atomic_helper_commit_cleanup_done(state);
 -      drm_atomic_state_put(state);
 +      intel_cleanup_dsbs(state);
 +      drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
 +      drm_atomic_helper_commit_cleanup_done(&state->base);
 +      drm_atomic_state_put(&state->base);
  
        intel_atomic_helper_free_state(i915);
  }
@@@ -15529,7 -15467,9 +15527,7 @@@ static void intel_atomic_commit_tail(st
        if (state->modeset)
                intel_encoders_update_prepare(state);
  
 -      /* Enable all new slices, we might need */
 -      if (state->modeset)
 -              icl_dbuf_slice_pre_update(state);
 +      intel_dbuf_pre_plane_update(state);
  
        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
        dev_priv->display.commit_modeset_enables(state);
                        dev_priv->display.optimize_watermarks(state, crtc);
        }
  
 -      /* Disable all slices, we don't need */
 -      if (state->modeset)
 -              icl_dbuf_slice_post_update(state);
 +      intel_dbuf_post_plane_update(state);
  
        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                intel_post_plane_update(state, crtc);
                        modeset_put_power_domains(dev_priv, put_domains[i]);
  
                intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
 +
 +              /*
 +               * DSB cleanup is done in cleanup_work aligning with framebuffer
 +               * cleanup. So copy and reset the dsb structure to sync with
 +               * commit_done and later do dsb cleanup in cleanup_work.
 +               */
 +              old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
        }
  
        /* Underruns don't always raise interrupts, so check manually */
@@@ -15749,15 -15684,8 +15747,15 @@@ static int intel_atomic_commit(struct d
                intel_atomic_swap_global_state(state);
  
        if (ret) {
 +              struct intel_crtc_state *new_crtc_state;
 +              struct intel_crtc *crtc;
 +              int i;
 +
                i915_sw_fence_commit(&state->commit_ready);
  
 +              for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
 +                      intel_dsb_cleanup(new_crtc_state);
 +
                drm_atomic_helper_cleanup_planes(dev, &state->base);
                intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
                return ret;
@@@ -16817,12 -16745,7 +16815,12 @@@ static void intel_setup_outputs(struct 
        if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
                return;
  
 -      if (INTEL_GEN(dev_priv) >= 12) {
 +      if (IS_ROCKETLAKE(dev_priv)) {
 +              intel_ddi_init(dev_priv, PORT_A);
 +              intel_ddi_init(dev_priv, PORT_B);
 +              intel_ddi_init(dev_priv, PORT_D);       /* DDI TC1 */
 +              intel_ddi_init(dev_priv, PORT_E);       /* DDI TC2 */
 +      } else if (INTEL_GEN(dev_priv) >= 12) {
                intel_ddi_init(dev_priv, PORT_A);
                intel_ddi_init(dev_priv, PORT_B);
                intel_ddi_init(dev_priv, PORT_D);
@@@ -17497,35 -17420,23 +17495,35 @@@ void intel_modeset_init_hw(struct drm_i
  {
        struct intel_cdclk_state *cdclk_state =
                to_intel_cdclk_state(i915->cdclk.obj.state);
 +      struct intel_dbuf_state *dbuf_state =
 +              to_intel_dbuf_state(i915->dbuf.obj.state);
  
        intel_update_cdclk(i915);
        intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
        cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
 +
 +      dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
  }
  
  static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
  {
        struct drm_plane *plane;
 -      struct drm_crtc *crtc;
 +      struct intel_crtc *crtc;
  
 -      drm_for_each_crtc(crtc, state->dev) {
 -              struct drm_crtc_state *crtc_state;
 +      for_each_intel_crtc(state->dev, crtc) {
 +              struct intel_crtc_state *crtc_state;
  
 -              crtc_state = drm_atomic_get_crtc_state(state, crtc);
 +              crtc_state = intel_atomic_get_crtc_state(state, crtc);
                if (IS_ERR(crtc_state))
                        return PTR_ERR(crtc_state);
 +
 +              if (crtc_state->hw.active) {
 +                      /*
 +                       * Preserve the inherited flag to avoid
 +                       * taking the full modeset path.
 +                       */
 +                      crtc_state->inherited = true;
 +              }
        }
  
        drm_for_each_plane(plane, state->dev) {
                }
  
                if (crtc_state->hw.active) {
 +                      /*
 +                       * We've not yet detected sink capabilities
 +                       * (audio,infoframes,etc.) and thus we don't want to
 +                       * force a full state recomputation yet. We want that to
 +                       * happen only for the first real commit from userspace.
 +                       * So preserve the inherited flag for the time being.
 +                       */
 +                      crtc_state->inherited = true;
 +
                        ret = drm_atomic_add_affected_planes(state, &crtc->base);
                        if (ret)
                                goto out;
@@@ -17763,8 -17665,7 +17761,8 @@@ static void intel_mode_config_init(stru
        if (IS_I845G(i915) || IS_I865G(i915)) {
                mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
                mode_config->cursor_height = 1023;
 -      } else if (IS_GEN(i915, 2)) {
 +      } else if (IS_I830(i915) || IS_I85X(i915) ||
 +                 IS_I915G(i915) || IS_I915GM(i915)) {
                mode_config->cursor_width = 64;
                mode_config->cursor_height = 64;
        } else {
@@@ -17810,10 -17711,6 +17808,10 @@@ int intel_modeset_init_noirq(struct drm
        if (ret)
                return ret;
  
 +      ret = intel_dbuf_init(i915);
 +      if (ret)
 +              return ret;
 +
        ret = intel_bw_init(i915);
        if (ret)
                return ret;
@@@ -18330,8 -18227,6 +18328,8 @@@ static void intel_modeset_readout_hw_st
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_cdclk_state *cdclk_state =
                to_intel_cdclk_state(dev_priv->cdclk.obj.state);
 +      struct intel_dbuf_state *dbuf_state =
 +              to_intel_dbuf_state(dev_priv->dbuf.obj.state);
        enum pipe pipe;
        struct intel_crtc *crtc;
        struct intel_encoder *encoder;
                            enableddisabled(crtc_state->hw.active));
        }
  
 -      dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes;
 +      dev_priv->active_pipes = cdclk_state->active_pipes =
 +              dbuf_state->active_pipes = active_pipes;
  
        readout_plane_state(dev_priv);
  
                         * set a flag to indicate that a full recalculation is
                         * needed on the next commit.
                         */
 -                      mode->private_flags = I915_MODE_FLAG_INHERITED;
 +                      crtc_state->inherited = true;
  
                        intel_crtc_compute_pixel_rate(crtc_state);
  
index cfe2517e0088b0b1fc2a0fd457ba2c7f7012dd8b,2b640d8ab9d2ebf55c68d110e8c478e45e5fb8b3..d1cb48b3f4628d0e7d8f7b137debd8ae120b83ba
@@@ -125,7 -125,7 +125,7 @@@ static int i915_ips_status(struct seq_f
        wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
  
        seq_printf(m, "Enabled by kernel parameter: %s\n",
 -                 yesno(i915_modparams.enable_ips));
 +                 yesno(dev_priv->params.enable_ips));
  
        if (INTEL_GEN(dev_priv) >= 8) {
                seq_puts(m, "Currently: unknown\n");
@@@ -632,15 -632,9 +632,9 @@@ static void intel_dp_info(struct seq_fi
  }
  
  static void intel_dp_mst_info(struct seq_file *m,
-                         struct intel_connector *intel_connector)
+                             struct intel_connector *intel_connector)
  {
-       struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
-       struct intel_dp_mst_encoder *intel_mst =
-               enc_to_mst(intel_encoder);
-       struct intel_digital_port *intel_dig_port = intel_mst->primary;
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
-       bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
-                                       intel_connector->port);
+       bool has_audio = intel_connector->port->has_audio;
  
        seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
  }
@@@ -1105,10 -1099,10 +1099,10 @@@ static void drrs_status_per_crtc(struc
                seq_puts(m, "\n\t\t");
                if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
                        seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
-                       vrefresh = panel->fixed_mode->vrefresh;
+                       vrefresh = drm_mode_vrefresh(panel->fixed_mode);
                } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
                        seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
-                       vrefresh = panel->downclock_mode->vrefresh;
+                       vrefresh = drm_mode_vrefresh(panel->downclock_mode);
                } else {
                        seq_printf(m, "DRRS_State: Unknown(%d)\n",
                                                drrs->refresh_rate_type);
@@@ -1984,7 -1978,7 +1978,7 @@@ static const struct 
        {"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
  };
  
int intel_display_debugfs_register(struct drm_i915_private *i915)
void intel_display_debugfs_register(struct drm_i915_private *i915)
  {
        struct drm_minor *minor = i915->drm.primary;
        int i;
                                    intel_display_debugfs_files[i].fops);
        }
  
-       return drm_debugfs_create_files(intel_display_debugfs_list,
-                                       ARRAY_SIZE(intel_display_debugfs_list),
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(intel_display_debugfs_list,
+                                ARRAY_SIZE(intel_display_debugfs_list),
+                                minor->debugfs_root, minor);
  }
  
  static int i915_panel_show(struct seq_file *m, void *data)
@@@ -2224,8 -2218,7 +2218,8 @@@ int intel_connector_debugfs_add(struct 
        }
  
        if (INTEL_GEN(dev_priv) >= 10 &&
 -          (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
 +          ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
 +            !to_intel_connector(connector)->mst_port) ||
             connector->connector_type == DRM_MODE_CONNECTOR_eDP))
                debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
                                    connector, &i915_dsc_fec_support_fops);
index 76a49eac730595f670fca05d4f7e7c687cdc2fda,2bf3d4cb4ea983c872a9f19565f1bb953b9ab30c..4b0aaa3081c9ca61fdb3d7203fbf01f8464a9c73
@@@ -438,7 -438,7 +438,7 @@@ struct intel_connector 
           state of connector->polled in case hotplug storm detection changes it */
        u8 polled;
  
-       void *port; /* store this opaque as its illegal to dereference it */
+       struct drm_dp_mst_port *port;
  
        struct intel_dp *mst_port;
  
@@@ -479,6 -479,16 +479,6 @@@ struct intel_atomic_state 
  
        bool dpll_set, modeset;
  
 -      /*
 -       * Does this transaction change the pipes that are active?  This mask
 -       * tracks which CRTC's have changed their active state at the end of
 -       * the transaction (not counting the temporary disable during modesets).
 -       * This mask should only be non-zero when intel_state->modeset is true,
 -       * but the converse is not necessarily true; simply changing a mode may
 -       * not flip the final active status of any CRTC's
 -       */
 -      u8 active_pipe_changes;
 -
        u8 active_pipes;
  
        struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
         */
        bool global_state_changed;
  
 -      /* Number of enabled DBuf slices */
 -      u8 enabled_dbuf_slices_mask;
 -
        struct i915_sw_fence commit_ready;
  
        struct llist_node freed;
@@@ -630,7 -643,8 +630,7 @@@ struct intel_crtc_scaler_state 
        int scaler_id;
  };
  
 -/* drm_mode->private_flags */
 -#define I915_MODE_FLAG_INHERITED (1<<0)
 +/* {crtc,crtc_state}->mode_flags */
  /* Flag to get scanline using frame time stamps */
  #define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
  /* Flag to use the scanline counter instead of the pixel counter */
@@@ -827,7 -841,6 +827,7 @@@ struct intel_crtc_state 
        bool update_wm_pre, update_wm_post; /* watermarks are updated */
        bool fifo_changed; /* FIFO split is changed */
        bool preload_luts;
 +      bool inherited; /* state inherited from BIOS? */
  
        /* Pipe source size (ie. panel fitter input size)
         * All planes will be positioned inside this space,
        /* Used by SDVO (and if we ever fix it, HDMI). */
        unsigned pixel_multiplier;
  
 +      /* I915_MODE_FLAG_* */
 +      u8 mode_flags;
 +
        u8 lane_count;
  
        /*
  
        /* Only valid on TGL+ */
        enum transcoder mst_master_transcoder;
 +
 +      /* For DSB related info */
 +      struct intel_dsb *dsb;
  };
  
  enum intel_pipe_crc_source {
@@@ -1111,10 -1118,6 +1111,10 @@@ struct intel_crtc 
         */
        bool active;
        u8 plane_ids_mask;
 +
 +      /* I915_MODE_FLAG_* */
 +      u8 mode_flags;
 +
        unsigned long long enabled_power_domains;
        struct intel_overlay *overlay;
  
        /* scalers available on this crtc */
        int num_scalers;
  
 -      /* per pipe DSB related info */
 -      struct intel_dsb dsb;
 -
  #ifdef CONFIG_DEBUG_FS
        struct intel_pipe_crc pipe_crc;
  #endif
@@@ -1367,9 -1373,6 +1367,9 @@@ struct intel_dp 
        void (*set_idle_link_train)(struct intel_dp *intel_dp);
        void (*set_signal_levels)(struct intel_dp *intel_dp);
  
 +      u8 (*preemph_max)(struct intel_dp *intel_dp);
 +      u8 (*voltage_max)(struct intel_dp *intel_dp);
 +
        /* Displayport compliance testing */
        struct intel_dp_compliance compliance;
  
index 7765a8b95b9dc51cf47f656568681326b5e3851f,cc525fda441af8ea8da7e361686838dc8db08954..3df5d901dd9d97fd138946778fca0eba34f4093d
@@@ -409,10 -409,7 +409,10 @@@ static int intel_dp_rate_index(const in
  
  static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
  {
 -      WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
 +      struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 +
 +      drm_WARN_ON(&i915->drm,
 +                  !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
  
        intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
                                                     intel_dp->num_source_rates,
                                                     intel_dp->common_rates);
  
        /* Paranoia, there should always be something in common. */
 -      if (WARN_ON(intel_dp->num_common_rates == 0)) {
 +      if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
                intel_dp->common_rates[0] = 162000;
                intel_dp->num_common_rates = 1;
        }
@@@ -468,15 -465,6 +468,15 @@@ int intel_dp_get_link_train_fallback_va
        struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        int index;
  
 +      /*
 +       * TODO: Enable fallback on MST links once MST link compute can handle
 +       * the fallback params.
 +       */
 +      if (intel_dp->is_mst) {
 +              drm_err(&i915->drm, "Link Training Unsuccessful\n");
 +              return -1;
 +      }
 +
        index = intel_dp_rate_index(intel_dp->common_rates,
                                    intel_dp->num_common_rates,
                                    link_rate);
@@@ -1567,7 -1555,6 +1567,7 @@@ static ssize_
  intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
  {
        struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
 +      struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 txbuf[20], rxbuf[20];
        size_t txsize, rxsize;
        int ret;
                txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
                rxsize = 2; /* 0 or 1 data bytes */
  
 -              if (WARN_ON(txsize > 20))
 +              if (drm_WARN_ON(&i915->drm, txsize > 20))
                        return -E2BIG;
  
 -              WARN_ON(!msg->buffer != !msg->size);
 +              drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
  
                if (msg->buffer)
                        memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
                txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
                rxsize = msg->size + 1;
  
 -              if (WARN_ON(rxsize > 20))
 +              if (drm_WARN_ON(&i915->drm, rxsize > 20))
                        return -E2BIG;
  
                ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
@@@ -1884,11 -1871,10 +1884,11 @@@ static void intel_dp_print_rates(struc
  int
  intel_dp_max_link_rate(struct intel_dp *intel_dp)
  {
 +      struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        int len;
  
        len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
 -      if (WARN_ON(len <= 0))
 +      if (drm_WARN_ON(&i915->drm, len <= 0))
                return 162000;
  
        return intel_dp->common_rates[len - 1];
  
  int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
  {
 +      struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        int i = intel_dp_rate_index(intel_dp->sink_rates,
                                    intel_dp->num_sink_rates, rate);
  
 -      if (WARN_ON(i < 0))
 +      if (drm_WARN_ON(&i915->drm, i < 0))
                i = 0;
  
        return i;
@@@ -3999,24 -3984,70 +3999,24 @@@ intel_dp_get_link_status(struct intel_d
                                DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
  }
  
 -/* These are source-specific values. */
 -u8
 -intel_dp_voltage_max(struct intel_dp *intel_dp)
 +static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp)
  {
 -      struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 -      struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
 -      enum port port = encoder->port;
 +      return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
 +}
  
 -      if (HAS_DDI(dev_priv))
 -              return intel_ddi_dp_voltage_max(encoder);
 -      else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 -              return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
 -      else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
 -              return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
 -      else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
 -              return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
 -      else
 -              return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
 +static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp)
 +{
 +      return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
  }
  
 -u8
 -intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
 +static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp)
  {
 -      struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 -      struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
 -      enum port port = encoder->port;
 +      return DP_TRAIN_PRE_EMPH_LEVEL_2;
 +}
  
 -      if (HAS_DDI(dev_priv)) {
 -              return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
 -      } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 -              switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
 -              case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
 -                      return DP_TRAIN_PRE_EMPH_LEVEL_3;
 -              case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
 -                      return DP_TRAIN_PRE_EMPH_LEVEL_2;
 -              case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
 -                      return DP_TRAIN_PRE_EMPH_LEVEL_1;
 -              case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
 -              default:
 -                      return DP_TRAIN_PRE_EMPH_LEVEL_0;
 -              }
 -      } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
 -              switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
 -              case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
 -                      return DP_TRAIN_PRE_EMPH_LEVEL_2;
 -              case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
 -              case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
 -                      return DP_TRAIN_PRE_EMPH_LEVEL_1;
 -              default:
 -                      return DP_TRAIN_PRE_EMPH_LEVEL_0;
 -              }
 -      } else {
 -              switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
 -              case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
 -                      return DP_TRAIN_PRE_EMPH_LEVEL_2;
 -              case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
 -                      return DP_TRAIN_PRE_EMPH_LEVEL_2;
 -              case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
 -                      return DP_TRAIN_PRE_EMPH_LEVEL_1;
 -              case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
 -              default:
 -                      return DP_TRAIN_PRE_EMPH_LEVEL_0;
 -              }
 -      }
 +static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp)
 +{
 +      return DP_TRAIN_PRE_EMPH_LEVEL_3;
  }
  
  static void vlv_set_signal_levels(struct intel_dp *intel_dp)
@@@ -4299,7 -4330,6 +4299,7 @@@ static u32 ivb_cpu_edp_signal_levels(u
        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
                return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
 +      case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
                return EDP_LINK_TRAIN_400MV_6DB_IVB;
  
        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
@@@ -4716,9 -4746,7 +4716,9 @@@ intel_dp_sink_can_mst(struct intel_dp *
  static bool
  intel_dp_can_mst(struct intel_dp *intel_dp)
  {
 -      return i915_modparams.enable_dp_mst &&
 +      struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 +
 +      return i915->params.enable_dp_mst &&
                intel_dp->can_mst &&
                intel_dp_sink_can_mst(intel_dp);
  }
@@@ -4735,13 -4763,13 +4735,13 @@@ intel_dp_configure_mst(struct intel_dp 
                    "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
                    encoder->base.base.id, encoder->base.name,
                    yesno(intel_dp->can_mst), yesno(sink_can_mst),
 -                  yesno(i915_modparams.enable_dp_mst));
 +                  yesno(i915->params.enable_dp_mst));
  
        if (!intel_dp->can_mst)
                return;
  
        intel_dp->is_mst = sink_can_mst &&
 -              i915_modparams.enable_dp_mst;
 +              i915->params.enable_dp_mst;
  
        drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
                                        intel_dp->is_mst);
@@@ -5567,46 -5595,35 +5567,46 @@@ update_status
                            "Could not write test response to sink\n");
  }
  
 -static int
 +/**
 + * intel_dp_check_mst_status - service any pending MST interrupts, check link status
 + * @intel_dp: Intel DP struct
 + *
 + * Read any pending MST interrupts, call MST core to handle these and ack the
 + * interrupts. Check if the main and AUX link state is ok.
 + *
 + * Returns:
 + * - %true if pending interrupts were serviced (or no interrupts were
 + *   pending) w/o detecting an error condition.
 + * - %false if an error condition - like AUX failure or a loss of link - is
 + *   detected, which needs servicing from the hotplug work.
 + */
 +static bool
  intel_dp_check_mst_status(struct intel_dp *intel_dp)
  {
        struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 -      bool need_retrain = false;
 -
 -      if (!intel_dp->is_mst)
 -              return -EINVAL;
 +      bool link_ok = true;
  
 -      WARN_ON_ONCE(intel_dp->active_mst_links < 0);
 +      drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
  
        for (;;) {
                u8 esi[DP_DPRX_ESI_LEN] = {};
 -              bool bret, handled;
 +              bool handled;
                int retry;
  
 -              bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
 -              if (!bret) {
 +              if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
                        drm_dbg_kms(&i915->drm,
                                    "failed to get ESI - device may have failed\n");
 -                      return -EINVAL;
 +                      link_ok = false;
 +
 +                      break;
                }
  
                /* check link status - esi[10] = 0x200c */
 -              if (intel_dp->active_mst_links > 0 && !need_retrain &&
 +              if (intel_dp->active_mst_links > 0 && link_ok &&
                    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
                        drm_dbg_kms(&i915->drm,
                                    "channel EQ not ok, retraining\n");
 -                      need_retrain = true;
 +                      link_ok = false;
                }
  
                drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
                }
        }
  
 -      return need_retrain;
 +      return link_ok;
  }
  
  static bool
@@@ -5949,7 -5966,7 +5949,7 @@@ intel_dp_detect_dpcd(struct intel_dp *i
        u8 *dpcd = intel_dp->dpcd;
        u8 type;
  
 -      if (WARN_ON(intel_dp_is_edp(intel_dp)))
 +      if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
                return connector_status_connected;
  
        if (lspcon->active)
@@@ -6174,17 -6191,7 +6174,17 @@@ intel_dp_detect(struct drm_connector *c
                goto out;
        }
  
 -      if (intel_dp->reset_link_params) {
 +      /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
 +      if (INTEL_GEN(dev_priv) >= 11)
 +              intel_dp_get_dsc_sink_cap(intel_dp);
 +
 +      intel_dp_configure_mst(intel_dp);
 +
 +      /*
 +       * TODO: Reset link params when switching to MST mode, until MST
 +       * supports link training fallback params.
 +       */
 +      if (intel_dp->reset_link_params || intel_dp->is_mst) {
                /* Initial max link lane count */
                intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
  
  
        intel_dp_print_rates(intel_dp);
  
 -      /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
 -      if (INTEL_GEN(dev_priv) >= 11)
 -              intel_dp_get_dsc_sink_cap(intel_dp);
 -
 -      intel_dp_configure_mst(intel_dp);
 -
        if (intel_dp->is_mst) {
                /*
                 * If we are in MST mode then this connector
@@@ -7281,10 -7294,35 +7281,10 @@@ intel_dp_hpd_pulse(struct intel_digital
        }
  
        if (intel_dp->is_mst) {
 -              switch (intel_dp_check_mst_status(intel_dp)) {
 -              case -EINVAL:
 -                      /*
 -                       * If we were in MST mode, and device is not
 -                       * there, get out of MST mode
 -                       */
 -                      drm_dbg_kms(&i915->drm,
 -                                  "MST device may have disappeared %d vs %d\n",
 -                                  intel_dp->is_mst,
 -                                  intel_dp->mst_mgr.mst_state);
 -                      intel_dp->is_mst = false;
 -                      drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
 -                                                      intel_dp->is_mst);
 -
 -                      return IRQ_NONE;
 -              case 1:
 -                      return IRQ_NONE;
 -              default:
 -                      break;
 -              }
 -      }
 -
 -      if (!intel_dp->is_mst) {
 -              bool handled;
 -
 -              handled = intel_dp_short_pulse(intel_dp);
 -
 -              if (!handled)
 +              if (!intel_dp_check_mst_status(intel_dp))
                        return IRQ_NONE;
 +      } else if (!intel_dp_short_pulse(intel_dp)) {
 +              return IRQ_NONE;
        }
  
        return IRQ_HANDLED;
@@@ -7656,7 -7694,7 +7656,7 @@@ static void intel_dp_set_drrs_state(str
                return;
        }
  
-       if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
+       if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
                        refresh_rate)
                index = DRRS_LOW_RR;
  
@@@ -7769,7 -7807,7 +7769,7 @@@ void intel_edp_drrs_disable(struct inte
  
        if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
                intel_dp_set_drrs_state(dev_priv, old_crtc_state,
-                       intel_dp->attached_connector->panel.fixed_mode->vrefresh);
+                       drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
  
        dev_priv->drrs.dp = NULL;
        mutex_unlock(&dev_priv->drrs.mutex);
@@@ -7802,7 -7840,7 +7802,7 @@@ static void intel_edp_drrs_downclock_wo
                struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
  
                intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
-                       intel_dp->attached_connector->panel.downclock_mode->vrefresh);
+                       drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
        }
  
  unlock:
  void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
                               unsigned int frontbuffer_bits)
  {
+       struct intel_dp *intel_dp;
        struct drm_crtc *crtc;
        enum pipe pipe;
  
        cancel_delayed_work(&dev_priv->drrs.work);
  
        mutex_lock(&dev_priv->drrs.mutex);
-       if (!dev_priv->drrs.dp) {
+       intel_dp = dev_priv->drrs.dp;
+       if (!intel_dp) {
                mutex_unlock(&dev_priv->drrs.mutex);
                return;
        }
  
-       crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+       crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
        pipe = to_intel_crtc(crtc)->pipe;
  
        frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
        /* invalidate means busy screen hence upclock */
        if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
                intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
-                       dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
+                                       drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
  
        mutex_unlock(&dev_priv->drrs.mutex);
  }
  void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
                          unsigned int frontbuffer_bits)
  {
+       struct intel_dp *intel_dp;
        struct drm_crtc *crtc;
        enum pipe pipe;
  
        cancel_delayed_work(&dev_priv->drrs.work);
  
        mutex_lock(&dev_priv->drrs.mutex);
-       if (!dev_priv->drrs.dp) {
+       intel_dp = dev_priv->drrs.dp;
+       if (!intel_dp) {
                mutex_unlock(&dev_priv->drrs.mutex);
                return;
        }
  
-       crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+       crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
        pipe = to_intel_crtc(crtc)->pipe;
  
        frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
        /* flush means busy screen hence upclock */
        if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
                intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
-                               dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
+                                       drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
  
        /*
         * flush also means no more activity hence schedule downclock, if all
@@@ -8322,15 -8366,6 +8328,15 @@@ bool intel_dp_init(struct drm_i915_priv
        else
                intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels;
  
 +      if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
 +          (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
 +              intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3;
 +              intel_dig_port->dp.voltage_max = intel_dp_voltage_max_3;
 +      } else {
 +              intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2;
 +              intel_dig_port->dp.voltage_max = intel_dp_voltage_max_2;
 +      }
 +
        intel_dig_port->dp.output_reg = output_reg;
        intel_dig_port->max_lanes = 4;
        intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
index f7df7a5b7c136bb0ba55c444670f32220f662674,2e6c6375a23b42fdc978972928153032cb8cd273..8273f2e07427c4d14d3656f0fefd6a67ba407da6
@@@ -33,6 -33,7 +33,7 @@@
  #include "intel_connector.h"
  #include "intel_ddi.h"
  #include "intel_display_types.h"
+ #include "intel_hotplug.h"
  #include "intel_dp.h"
  #include "intel_dp_mst.h"
  #include "intel_dpio_phy.h"
@@@ -113,9 -114,7 +114,7 @@@ static int intel_dp_mst_compute_config(
        pipe_config->has_pch_encoder = false;
  
        if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
-               pipe_config->has_audio =
-                       drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
-                                                 connector->port);
+               pipe_config->has_audio = connector->port->has_audio;
        else
                pipe_config->has_audio =
                        intel_conn_state->force_audio == HDMI_AUDIO_ON;
@@@ -318,25 -317,6 +317,25 @@@ intel_dp_mst_atomic_check(struct drm_co
        return ret;
  }
  
 +static void clear_act_sent(struct intel_dp *intel_dp)
 +{
 +      struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 +
 +      intel_de_write(i915, intel_dp->regs.dp_tp_status,
 +                     DP_TP_STATUS_ACT_SENT);
 +}
 +
 +static void wait_for_act_sent(struct intel_dp *intel_dp)
 +{
 +      struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 +
 +      if (intel_de_wait_for_set(i915, intel_dp->regs.dp_tp_status,
 +                                DP_TP_STATUS_ACT_SENT, 1))
 +              drm_err(&i915->drm, "Timed out waiting for ACT sent\n");
 +
 +      drm_dp_check_act_status(&intel_dp->mst_mgr);
 +}
 +
  static void intel_mst_disable_dp(struct intel_atomic_state *state,
                                 struct intel_encoder *encoder,
                                 const struct intel_crtc_state *old_crtc_state,
@@@ -390,8 -370,6 +389,8 @@@ static void intel_mst_post_disable_dp(s
  
        drm_dp_update_payload_part2(&intel_dp->mst_mgr);
  
 +      clear_act_sent(intel_dp);
 +
        val = intel_de_read(dev_priv,
                            TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
        val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
                       TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
                       val);
  
 -      if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
 -                                DP_TP_STATUS_ACT_SENT, 1))
 -              drm_err(&dev_priv->drm,
 -                      "Timed out waiting for ACT sent when disabling\n");
 -      drm_dp_check_act_status(&intel_dp->mst_mgr);
 +      wait_for_act_sent(intel_dp);
  
        drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
  
@@@ -470,6 -452,7 +469,6 @@@ static void intel_mst_pre_enable_dp(str
        struct intel_connector *connector =
                to_intel_connector(conn_state->connector);
        int ret;
 -      u32 temp;
        bool first_mst_stream;
  
        /* MST encoders are bound to a crtc, not to a connector,
                drm_err(&dev_priv->drm, "failed to allocate vcpi\n");
  
        intel_dp->active_mst_links++;
 -      temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status);
 -      intel_de_write(dev_priv, intel_dp->regs.dp_tp_status, temp);
  
        ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
  
@@@ -529,25 -514,19 +528,25 @@@ static void intel_mst_enable_dp(struct 
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 +      u32 val;
  
        drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
  
 +      clear_act_sent(intel_dp);
 +
        intel_ddi_enable_transcoder_func(encoder, pipe_config);
  
 +      val = intel_de_read(dev_priv,
 +                          TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
 +      val |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
 +      intel_de_write(dev_priv,
 +                     TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder),
 +                     val);
 +
        drm_dbg_kms(&dev_priv->drm, "active links %d\n",
                    intel_dp->active_mst_links);
  
 -      if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
 -                                DP_TP_STATUS_ACT_SENT, 1))
 -              drm_err(&dev_priv->drm, "Timed out waiting for ACT sent\n");
 -
 -      drm_dp_check_act_status(&intel_dp->mst_mgr);
 +      wait_for_act_sent(intel_dp);
  
        drm_dp_update_payload_part2(&intel_dp->mst_mgr);
  
@@@ -795,8 -774,17 +794,17 @@@ err
        return NULL;
  }
  
+ static void
+ intel_dp_mst_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
+ {
+       struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+       intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
+ }
  static const struct drm_dp_mst_topology_cbs mst_cbs = {
        .add_connector = intel_dp_add_mst_connector,
+       .poll_hpd_irq = intel_dp_mst_poll_hpd_irq,
  };
  
  static struct intel_dp_mst_encoder *
index d794dd5f170c789f7535d80db27cef414a22220c,664f883541018c7bf76ca22a695b296bc946dfe0..2e94c1413c02ab7114ffe89abcd7698977fff212
@@@ -89,15 -89,6 +89,15 @@@ enum hpd_pin intel_hpd_pin_default(stru
  {
        enum phy phy = intel_port_to_phy(dev_priv, port);
  
 +      /*
 +       * RKL + TGP PCH is a special case; we effectively choose the hpd_pin
 +       * based on the DDI rather than the PHY (i.e., the last two outputs
 +       * shold be HPD_PORT_{D,E} rather than {C,D}.  Note that this differs
 +       * from the behavior of both TGL+TGP and RKL+CMP.
 +       */
 +      if (IS_ROCKETLAKE(dev_priv) && HAS_PCH_TGP(dev_priv))
 +              return HPD_PORT_A + port - PORT_A;
 +
        switch (phy) {
        case PHY_F:
                return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
@@@ -356,6 -347,24 +356,24 @@@ static void i915_digport_work_func(stru
        }
  }
  
+ /**
+  * intel_hpd_trigger_irq - trigger an hpd irq event for a port
+  * @dig_port: digital port
+  *
+  * Trigger an HPD interrupt event for the given port, emulating a short pulse
+  * generated by the sink, and schedule the dig port work to handle it.
+  */
+ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
+ {
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       spin_lock_irq(&i915->irq_lock);
+       i915->hotplug.short_port_mask |= BIT(dig_port->base.port);
+       spin_unlock_irq(&i915->irq_lock);
+       queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
+ }
  /*
   * Handle hotplug events outside the interrupt handler proper.
   */
index 48093f19ec2251ad88aefb014c729e2a02d70e12,abc67207f2f3e2a1d965e5e10fdc42604fa30a7a..777032d9697b578c2c5e18aa0e19d6950f8719fd
@@@ -1038,9 -1038,6 +1038,6 @@@ intel_tv_mode_to_mode(struct drm_displa
        /* TV has it's own notion of sync and other mode flags, so clear them. */
        mode->flags = 0;
  
-       mode->vrefresh = 0;
-       mode->vrefresh = drm_mode_vrefresh(mode);
        snprintf(mode->name, sizeof(mode->name),
                 "%dx%d%c (%s)",
                 mode->hdisplay, mode->vdisplay,
@@@ -1161,7 -1158,7 +1158,7 @@@ intel_tv_get_config(struct intel_encode
  
        /* pixel counter doesn't work on i965gm TV output */
        if (IS_I965GM(dev_priv))
 -              adjusted_mode->private_flags |=
 +              pipe_config->mode_flags |=
                        I915_MODE_FLAG_USE_SCANLINE_COUNTER;
  }
  
@@@ -1331,7 -1328,7 +1328,7 @@@ intel_tv_compute_config(struct intel_en
  
        /* pixel counter doesn't work on i965gm TV output */
        if (IS_I965GM(dev_priv))
 -              adjusted_mode->private_flags |=
 +              pipe_config->mode_flags |=
                        I915_MODE_FLAG_USE_SCANLINE_COUNTER;
  
        return 0;
index 4d88faeb4d4ca3e272533cf9f15d50229ba01741,30c229fcb4046ddd17a6aa3d9986cf67147d81e4..5c13809dc3c879bd217d7592b896d1b7b0b841e6
@@@ -650,7 -650,7 +650,7 @@@ static void context_close(struct i915_g
         * context close.
         */
        if (!i915_gem_context_is_persistent(ctx) ||
 -          !i915_modparams.enable_hangcheck)
 +          !ctx->i915->params.enable_hangcheck)
                kill_context(ctx);
  
        i915_gem_context_put(ctx);
@@@ -667,7 -667,7 +667,7 @@@ static int __context_set_persistence(st
                 * reset] are allowed to survive past termination. We require
                 * hangcheck to ensure that the persistent requests are healthy.
                 */
 -              if (!i915_modparams.enable_hangcheck)
 +              if (!ctx->i915->params.enable_hangcheck)
                        return -EINVAL;
  
                i915_gem_context_set_persistence(ctx);
@@@ -1921,11 -1921,6 +1921,6 @@@ get_engines(struct i915_gem_context *ct
        }
  
        user = u64_to_user_ptr(args->value);
-       if (!access_ok(user, size)) {
-               err = -EFAULT;
-               goto err_free;
-       }
        if (put_user(0, &user->extensions)) {
                err = -EFAULT;
                goto err_free;
index 23db79b806db53c27277527828f5c6c761182f10,db8eb1c6afe9b055096203ea1100c46393672059..c38ab51e82f08496dfceeafccb7673265cc350bf
@@@ -45,6 -45,13 +45,6 @@@ struct eb_vma_array 
        struct eb_vma vma[];
  };
  
 -enum {
 -      FORCE_CPU_RELOC = 1,
 -      FORCE_GTT_RELOC,
 -      FORCE_GPU_RELOC,
 -#define DBG_FORCE_RELOC 0 /* choose one of the above! */
 -};
 -
  #define __EXEC_OBJECT_HAS_PIN         BIT(31)
  #define __EXEC_OBJECT_HAS_FENCE               BIT(30)
  #define __EXEC_OBJECT_NEEDS_MAP               BIT(29)
@@@ -253,6 -260,8 +253,6 @@@ struct i915_execbuffer 
         */
        struct reloc_cache {
                struct drm_mm_node node; /** temporary GTT binding */
 -              unsigned long vaddr; /** Current kmap address */
 -              unsigned long page; /** Currently mapped page index */
                unsigned int gen; /** Cached value of INTEL_GEN */
                bool use_64bit_reloc : 1;
                bool has_llc : 1;
@@@ -596,6 -605,23 +596,6 @@@ eb_add_vma(struct i915_execbuffer *eb
        }
  }
  
 -static inline int use_cpu_reloc(const struct reloc_cache *cache,
 -                              const struct drm_i915_gem_object *obj)
 -{
 -      if (!i915_gem_object_has_struct_page(obj))
 -              return false;
 -
 -      if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
 -              return true;
 -
 -      if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
 -              return false;
 -
 -      return (cache->has_llc ||
 -              obj->cache_dirty ||
 -              obj->cache_level != I915_CACHE_NONE);
 -}
 -
  static int eb_reserve_vma(const struct i915_execbuffer *eb,
                          struct eb_vma *ev,
                          u64 pin_flags)
@@@ -919,6 -945,8 +919,6 @@@ relocation_target(const struct drm_i915
  static void reloc_cache_init(struct reloc_cache *cache,
                             struct drm_i915_private *i915)
  {
 -      cache->page = -1;
 -      cache->vaddr = 0;
        /* Must be a variable in the struct to allow GCC to unroll. */
        cache->gen = INTEL_GEN(i915);
        cache->has_llc = HAS_LLC(i915);
        cache->target = NULL;
  }
  
 -static inline void *unmask_page(unsigned long p)
 -{
 -      return (void *)(uintptr_t)(p & PAGE_MASK);
 -}
 -
 -static inline unsigned int unmask_flags(unsigned long p)
 -{
 -      return p & ~PAGE_MASK;
 -}
 -
 -#define KMAP 0x4 /* after CLFLUSH_FLAGS */
 -
 -static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
 -{
 -      struct drm_i915_private *i915 =
 -              container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
 -      return &i915->ggtt;
 -}
 -
  #define RELOC_TAIL 4
  
  static int reloc_gpu_chain(struct reloc_cache *cache)
@@@ -1042,6 -1089,181 +1042,6 @@@ static int reloc_gpu_flush(struct reloc
        return err;
  }
  
 -static void reloc_cache_reset(struct reloc_cache *cache)
 -{
 -      void *vaddr;
 -
 -      if (!cache->vaddr)
 -              return;
 -
 -      vaddr = unmask_page(cache->vaddr);
 -      if (cache->vaddr & KMAP) {
 -              if (cache->vaddr & CLFLUSH_AFTER)
 -                      mb();
 -
 -              kunmap_atomic(vaddr);
 -              i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
 -      } else {
 -              struct i915_ggtt *ggtt = cache_to_ggtt(cache);
 -
 -              intel_gt_flush_ggtt_writes(ggtt->vm.gt);
 -              io_mapping_unmap_atomic((void __iomem *)vaddr);
 -
 -              if (drm_mm_node_allocated(&cache->node)) {
 -                      ggtt->vm.clear_range(&ggtt->vm,
 -                                           cache->node.start,
 -                                           cache->node.size);
 -                      mutex_lock(&ggtt->vm.mutex);
 -                      drm_mm_remove_node(&cache->node);
 -                      mutex_unlock(&ggtt->vm.mutex);
 -              } else {
 -                      i915_vma_unpin((struct i915_vma *)cache->node.mm);
 -              }
 -      }
 -
 -      cache->vaddr = 0;
 -      cache->page = -1;
 -}
 -
 -static void *reloc_kmap(struct drm_i915_gem_object *obj,
 -                      struct reloc_cache *cache,
 -                      unsigned long page)
 -{
 -      void *vaddr;
 -
 -      if (cache->vaddr) {
 -              kunmap_atomic(unmask_page(cache->vaddr));
 -      } else {
 -              unsigned int flushes;
 -              int err;
 -
 -              err = i915_gem_object_prepare_write(obj, &flushes);
 -              if (err)
 -                      return ERR_PTR(err);
 -
 -              BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
 -              BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
 -
 -              cache->vaddr = flushes | KMAP;
 -              cache->node.mm = (void *)obj;
 -              if (flushes)
 -                      mb();
 -      }
 -
 -      vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
 -      cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
 -      cache->page = page;
 -
 -      return vaddr;
 -}
 -
 -static void *reloc_iomap(struct drm_i915_gem_object *obj,
 -                       struct reloc_cache *cache,
 -                       unsigned long page)
 -{
 -      struct i915_ggtt *ggtt = cache_to_ggtt(cache);
 -      unsigned long offset;
 -      void *vaddr;
 -
 -      if (cache->vaddr) {
 -              intel_gt_flush_ggtt_writes(ggtt->vm.gt);
 -              io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
 -      } else {
 -              struct i915_vma *vma;
 -              int err;
 -
 -              if (i915_gem_object_is_tiled(obj))
 -                      return ERR_PTR(-EINVAL);
 -
 -              if (use_cpu_reloc(cache, obj))
 -                      return NULL;
 -
 -              i915_gem_object_lock(obj);
 -              err = i915_gem_object_set_to_gtt_domain(obj, true);
 -              i915_gem_object_unlock(obj);
 -              if (err)
 -                      return ERR_PTR(err);
 -
 -              vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 -                                             PIN_MAPPABLE |
 -                                             PIN_NONBLOCK /* NOWARN */ |
 -                                             PIN_NOEVICT);
 -              if (IS_ERR(vma)) {
 -                      memset(&cache->node, 0, sizeof(cache->node));
 -                      mutex_lock(&ggtt->vm.mutex);
 -                      err = drm_mm_insert_node_in_range
 -                              (&ggtt->vm.mm, &cache->node,
 -                               PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
 -                               0, ggtt->mappable_end,
 -                               DRM_MM_INSERT_LOW);
 -                      mutex_unlock(&ggtt->vm.mutex);
 -                      if (err) /* no inactive aperture space, use cpu reloc */
 -                              return NULL;
 -              } else {
 -                      cache->node.start = vma->node.start;
 -                      cache->node.mm = (void *)vma;
 -              }
 -      }
 -
 -      offset = cache->node.start;
 -      if (drm_mm_node_allocated(&cache->node)) {
 -              ggtt->vm.insert_page(&ggtt->vm,
 -                                   i915_gem_object_get_dma_address(obj, page),
 -                                   offset, I915_CACHE_NONE, 0);
 -      } else {
 -              offset += page << PAGE_SHIFT;
 -      }
 -
 -      vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
 -                                                       offset);
 -      cache->page = page;
 -      cache->vaddr = (unsigned long)vaddr;
 -
 -      return vaddr;
 -}
 -
 -static void *reloc_vaddr(struct drm_i915_gem_object *obj,
 -                       struct reloc_cache *cache,
 -                       unsigned long page)
 -{
 -      void *vaddr;
 -
 -      if (cache->page == page) {
 -              vaddr = unmask_page(cache->vaddr);
 -      } else {
 -              vaddr = NULL;
 -              if ((cache->vaddr & KMAP) == 0)
 -                      vaddr = reloc_iomap(obj, cache, page);
 -              if (!vaddr)
 -                      vaddr = reloc_kmap(obj, cache, page);
 -      }
 -
 -      return vaddr;
 -}
 -
 -static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
 -{
 -      if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
 -              if (flushes & CLFLUSH_BEFORE) {
 -                      clflushopt(addr);
 -                      mb();
 -              }
 -
 -              *addr = value;
 -
 -              /*
 -               * Writes to the same cacheline are serialised by the CPU
 -               * (including clflush). On the write path, we only require
 -               * that it hits memory in an orderly fashion and place
 -               * mb barriers at the start and end of the relocation phase
 -               * to ensure ordering of clflush wrt to the system.
 -               */
 -              if (flushes & CLFLUSH_AFTER)
 -                      clflushopt(addr);
 -      } else
 -              *addr = value;
 -}
 -
  static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
  {
        struct drm_i915_gem_object *obj = vma->obj;
@@@ -1207,6 -1429,17 +1207,6 @@@ static u32 *reloc_gpu(struct i915_execb
        return cmd;
  }
  
 -static inline bool use_reloc_gpu(struct i915_vma *vma)
 -{
 -      if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
 -              return true;
 -
 -      if (DBG_FORCE_RELOC)
 -              return false;
 -
 -      return !dma_resv_test_signaled_rcu(vma->resv, true);
 -}
 -
  static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
  {
        struct page *page;
        return addr + offset_in_page(offset);
  }
  
 -static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
 -                            struct i915_vma *vma,
 -                            u64 offset,
 -                            u64 target_addr)
 +static int __reloc_entry_gpu(struct i915_execbuffer *eb,
 +                           struct i915_vma *vma,
 +                           u64 offset,
 +                           u64 target_addr)
  {
        const unsigned int gen = eb->reloc_cache.gen;
        unsigned int len;
  
        batch = reloc_gpu(eb, vma, len);
        if (IS_ERR(batch))
 -              return false;
 +              return PTR_ERR(batch);
  
        addr = gen8_canonical_addr(vma->node.start + offset);
        if (gen >= 8) {
                *batch++ = target_addr;
        }
  
 -      return true;
 -}
 -
 -static bool reloc_entry_gpu(struct i915_execbuffer *eb,
 -                          struct i915_vma *vma,
 -                          u64 offset,
 -                          u64 target_addr)
 -{
 -      if (eb->reloc_cache.vaddr)
 -              return false;
 -
 -      if (!use_reloc_gpu(vma))
 -              return false;
 -
 -      return __reloc_entry_gpu(eb, vma, offset, target_addr);
 +      return 0;
  }
  
  static u64
 -relocate_entry(struct i915_vma *vma,
 +relocate_entry(struct i915_execbuffer *eb,
 +             struct i915_vma *vma,
               const struct drm_i915_gem_relocation_entry *reloc,
 -             struct i915_execbuffer *eb,
               const struct i915_vma *target)
  {
        u64 target_addr = relocation_target(reloc, target);
 -      u64 offset = reloc->offset;
 -
 -      if (!reloc_entry_gpu(eb, vma, offset, target_addr)) {
 -              bool wide = eb->reloc_cache.use_64bit_reloc;
 -              void *vaddr;
 -
 -repeat:
 -              vaddr = reloc_vaddr(vma->obj,
 -                                  &eb->reloc_cache,
 -                                  offset >> PAGE_SHIFT);
 -              if (IS_ERR(vaddr))
 -                      return PTR_ERR(vaddr);
 -
 -              GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
 -              clflush_write32(vaddr + offset_in_page(offset),
 -                              lower_32_bits(target_addr),
 -                              eb->reloc_cache.vaddr);
 -
 -              if (wide) {
 -                      offset += sizeof(u32);
 -                      target_addr >>= 32;
 -                      wide = false;
 -                      goto repeat;
 -              }
 -      }
 +      int err;
 +
 +      err = __reloc_entry_gpu(eb, vma, reloc->offset, target_addr);
 +      if (err)
 +              return err;
  
        return target->node.start | UPDATE;
  }
@@@ -1359,7 -1626,8 +1359,7 @@@ eb_relocate_entry(struct i915_execbuffe
                        err = i915_vma_bind(target->vma,
                                            target->vma->obj->cache_level,
                                            PIN_GLOBAL, NULL);
 -                      if (WARN_ONCE(err,
 -                                    "Unexpected failure to bind target VMA!"))
 +                      if (err)
                                return err;
                }
        }
         * If the relocation already has the right value in it, no
         * more work needs to be done.
         */
 -      if (!DBG_FORCE_RELOC &&
 -          gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
 +      if (gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
                return 0;
  
        /* Check that the relocation address is valid... */
        ev->flags &= ~EXEC_OBJECT_ASYNC;
  
        /* and update the user's relocation entry */
 -      return relocate_entry(ev->vma, reloc, eb, target->vma);
 +      return relocate_entry(eb, ev->vma, reloc, target->vma);
  }
  
  static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
                 * this is bad and so lockdep complains vehemently.
                 */
                copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
 -              if (unlikely(copied)) {
 -                      remain = -EFAULT;
 -                      goto out;
 -              }
 +              if (unlikely(copied))
 +                      return -EFAULT;
  
                remain -= count;
                do {
  
                        if (likely(offset == 0)) {
                        } else if ((s64)offset < 0) {
 -                              remain = (int)offset;
 -                              goto out;
 +                              return (int)offset;
                        } else {
                                /*
                                 * Note that reporting an error now
                } while (r++, --count);
                urelocs += ARRAY_SIZE(stack);
        } while (remain);
 -out:
 -      reloc_cache_reset(&eb->reloc_cache);
 -      return remain;
 +
 +      return 0;
  }
  
  static int eb_relocate(struct i915_execbuffer *eb)
@@@ -1638,8 -1911,8 +1638,8 @@@ static int i915_reset_gen7_sol_offsets(
        u32 *cs;
        int i;
  
 -      if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
 -              drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
 +      if (!IS_GEN(rq->engine->i915, 7) || rq->engine->id != RCS0) {
 +              drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
                return -EINVAL;
        }
  
@@@ -2386,7 -2659,7 +2386,7 @@@ i915_gem_do_execbuffer(struct drm_devic
        eb.i915 = i915;
        eb.file = file;
        eb.args = args;
 -      if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
 +      if (!(args->flags & I915_EXEC_NO_RELOC))
                args->flags |= __EXEC_HAS_RELOC;
  
        eb.exec = exec;
@@@ -2782,7 -3055,8 +2782,8 @@@ i915_gem_execbuffer2_ioctl(struct drm_d
                 * And this range already got effectively checked earlier
                 * when we did the "copy_from_user()" above.
                 */
-               if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
+               if (!user_write_access_begin(user_exec_list,
+                                            count * sizeof(*user_exec_list)))
                        goto end;
  
                for (i = 0; i < args->buffer_count; i++) {
                                        end_user);
                }
  end_user:
-               user_access_end();
+               user_write_access_end();
  end:;
        }
  
index 9d306dc9849d9516cb316e67b04d687aabb60e5d,fe45bd4d63a5771bd87fde323be5eb81a8869768..fe27c5b344e3fe9c64ba448d31a655cae2ab1bc7
@@@ -93,7 -93,7 +93,7 @@@ i915_gem_mmap_ioctl(struct drm_device *
                struct mm_struct *mm = current->mm;
                struct vm_area_struct *vma;
  
-               if (down_write_killable(&mm->mmap_sem)) {
+               if (mmap_write_lock_killable(mm)) {
                        addr = -EINTR;
                        goto err;
                }
                                pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
                else
                        addr = -ENOMEM;
-               up_write(&mm->mmap_sem);
+               mmap_write_unlock(mm);
                if (IS_ERR_VALUE(addr))
                        goto err;
        }
@@@ -216,12 -216,12 +216,12 @@@ static vm_fault_t i915_error_to_vmf_fau
        case -ENXIO: /* unable to access backing store (on device) */
                return VM_FAULT_SIGBUS;
  
 -      case -ENOSPC: /* shmemfs allocation failure */
        case -ENOMEM: /* our allocation failure */
                return VM_FAULT_OOM;
  
        case 0:
        case -EAGAIN:
 +      case -ENOSPC: /* transient failure to evict? */
        case -ERESTARTSYS:
        case -EINTR:
        case -EBUSY:
index f4277afb89eb8f92c42672ca6bcae47f961f635e,7fe9831aa9bab9b7591b6c6855553ee67848fcda..28147aab47b9ae60e8cef05378524b6d73833193
@@@ -10,8 -10,6 +10,6 @@@
  
  #include <drm/drm.h> /* for drm_legacy.h! */
  #include <drm/drm_cache.h>
- #include <drm/drm_legacy.h> /* for drm_pci.h! */
- #include <drm/drm_pci.h>
  
  #include "gt/intel_gt.h"
  #include "i915_drv.h"
@@@ -29,7 -27,7 +27,7 @@@ static int i915_gem_object_get_pages_ph
        void *dst;
        int i;
  
 -      if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
 +      if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
                return -EINVAL;
  
        /*
@@@ -142,7 -140,6 +140,7 @@@ static void phys_release(struct drm_i91
  }
  
  static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
 +      .name = "i915_gem_object_phys",
        .get_pages = i915_gem_object_get_pages_phys,
        .put_pages = i915_gem_object_put_pages_phys,
  
index 2adc0ea429fb308f320ff4416b389c673360c09e,c31a6744daee9176d3345d74b3ed7f3d39a2772f..9c53eb88340095d552503f8eee245bf4769749be
@@@ -200,10 -200,10 +200,10 @@@ i915_mmu_notifier_find(struct i915_mm_s
        if (IS_ERR(mn))
                err = PTR_ERR(mn);
  
-       down_write(&mm->mm->mmap_sem);
+       mmap_write_lock(mm->mm);
        mutex_lock(&mm->i915->mm_lock);
        if (mm->mn == NULL && !err) {
-               /* Protected by mmap_sem (write-lock) */
+               /* Protected by mmap_lock (write-lock) */
                err = __mmu_notifier_register(&mn->mn, mm->mm);
                if (!err) {
                        /* Protected by mm_lock */
                err = 0;
        }
        mutex_unlock(&mm->i915->mm_lock);
-       up_write(&mm->mm->mmap_sem);
+       mmap_write_unlock(mm->mm);
  
        if (mn && !IS_ERR(mn))
                kfree(mn);
@@@ -235,7 -235,7 +235,7 @@@ i915_gem_userptr_init__mmu_notifier(str
        if (flags & I915_USERPTR_UNSYNCHRONIZED)
                return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
  
 -      if (WARN_ON(obj->userptr.mm == NULL))
 +      if (GEM_WARN_ON(!obj->userptr.mm))
                return -EINVAL;
  
        mn = i915_mmu_notifier_find(obj->userptr.mm);
@@@ -468,10 -468,10 +468,10 @@@ __i915_gem_userptr_get_pages_worker(str
                if (mmget_not_zero(mm)) {
                        while (pinned < npages) {
                                if (!locked) {
-                                       down_read(&mm->mmap_sem);
+                                       mmap_read_lock(mm);
                                        locked = 1;
                                }
-                               ret = get_user_pages_remote
+                               ret = pin_user_pages_remote
                                        (work->task, mm,
                                         obj->userptr.ptr + pinned * PAGE_SIZE,
                                         npages - pinned,
                                pinned += ret;
                        }
                        if (locked)
-                               up_read(&mm->mmap_sem);
+                               mmap_read_unlock(mm);
                        mmput(mm);
                }
        }
        }
        mutex_unlock(&obj->mm.lock);
  
-       release_pages(pvec, pinned);
+       unpin_user_pages(pvec, pinned);
        kvfree(pvec);
  
        i915_gem_object_put(obj);
@@@ -522,8 -522,8 +522,8 @@@ __i915_gem_userptr_get_pages_schedule(s
  
        /* Spawn a worker so that we can acquire the
         * user pages without holding our mutex. Access
-        * to the user pages requires mmap_sem, and we have
-        * a strict lock ordering of mmap_sem, struct_mutex -
+        * to the user pages requires mmap_lock, and we have
+        * a strict lock ordering of mmap_lock, struct_mutex -
         * we already hold struct_mutex here and so cannot
         * call gup without encountering a lock inversion.
         *
@@@ -564,6 -564,7 +564,7 @@@ static int i915_gem_userptr_get_pages(s
        struct sg_table *pages;
        bool active;
        int pinned;
+       unsigned int gup_flags = 0;
  
        /* If userspace should engineer that these pages are replaced in
         * the vma between us binding this page into the GTT and completion
                                      GFP_KERNEL |
                                      __GFP_NORETRY |
                                      __GFP_NOWARN);
-               if (pvec) /* defer to worker if malloc fails */
-                       pinned = __get_user_pages_fast(obj->userptr.ptr,
-                                                      num_pages,
-                                                      !i915_gem_object_is_readonly(obj),
-                                                      pvec);
+               /*
+                * Using __get_user_pages_fast() with a read-only
+                * access is questionable. A read-only page may be
+                * COW-broken, and then this might end up giving
+                * the wrong side of the COW..
+                *
+                * We may or may not care.
+                */
+               if (pvec) {
+                       /* defer to worker if malloc fails */
+                       if (!i915_gem_object_is_readonly(obj))
+                               gup_flags |= FOLL_WRITE;
+                       pinned = pin_user_pages_fast_only(obj->userptr.ptr,
+                                                         num_pages, gup_flags,
+                                                         pvec);
+               }
        }
  
        active = false;
                __i915_gem_userptr_set_active(obj, true);
  
        if (IS_ERR(pages))
-               release_pages(pvec, pinned);
+               unpin_user_pages(pvec, pinned);
        kvfree(pvec);
  
        return PTR_ERR_OR_ZERO(pages);
@@@ -675,7 -687,7 +687,7 @@@ i915_gem_userptr_put_pages(struct drm_i
                }
  
                mark_page_accessed(page);
-               put_page(page);
+               unpin_user_page(page);
        }
        obj->mm.dirty = false;
  
@@@ -700,7 -712,6 +712,7 @@@ i915_gem_userptr_dmabuf_export(struct d
  }
  
  static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
 +      .name = "i915_gem_object_userptr",
        .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
                 I915_GEM_OBJECT_IS_SHRINKABLE |
                 I915_GEM_OBJECT_NO_MMAP |
index 776a73a19503793fe6faec549c28ead8b6ae4389,a1696e9ce4b6c07c69d20b4dc65e140923bd443e..7ba16ddfe75f3b745bd646469149ceeed3849945
@@@ -199,10 -199,8 +199,10 @@@ static void emulate_monitor_status_chan
                        SDE_PORTC_HOTPLUG_CPT |
                        SDE_PORTD_HOTPLUG_CPT);
  
 -      if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
 -          IS_COFFEELAKE(dev_priv)) {
 +      if (IS_SKYLAKE(dev_priv) ||
 +          IS_KABYLAKE(dev_priv) ||
 +          IS_COFFEELAKE(dev_priv) ||
 +          IS_COMETLAKE(dev_priv)) {
                vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
                                SDE_PORTE_HOTPLUG_SPT);
                vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
                                SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
                                SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
                                SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
-               vgpu_vreg_t(vgpu, LCPLL1_CTL) |=
-                               LCPLL_PLL_ENABLE |
-                               LCPLL_PLL_LOCK;
-               vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
+               /*
+                * Only 1 PIPE enabled in current vGPU display and PIPE_A is
+                *  tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A,
+                *   TRANSCODER_A can be enabled. PORT_x depends on the input of
+                *   setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x
+                *   so we fixed to DPLL0 here.
+                * Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode
+                */
+               vgpu_vreg_t(vgpu, DPLL_CTRL1) =
+                       DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0);
+               vgpu_vreg_t(vgpu, DPLL_CTRL1) |=
+                       DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0);
+               vgpu_vreg_t(vgpu, LCPLL1_CTL) =
+                       LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK;
+               vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0);
+               /*
+                * Golden M/N are calculated based on:
+                *   24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID),
+                *   DP link clk 1620 MHz and non-constant_n.
+                * TODO: calculate DP link symbol clk and stream clk m/n.
+                */
+               vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT;
+               vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e;
+               vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000;
+               vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e;
+               vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000;
        }
  
        if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+                       ~DPLL_CTRL2_DDI_CLK_OFF(PORT_B);
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+                       DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B);
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+                       DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B);
                vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
        }
  
        if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+                       ~DPLL_CTRL2_DDI_CLK_OFF(PORT_C);
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+                       DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C);
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+                       DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C);
                vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
        }
  
        if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+                       ~DPLL_CTRL2_DDI_CLK_OFF(PORT_D);
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+                       DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D);
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+                       DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D);
                vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
                vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
        }
  
 -      if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
 -           IS_COFFEELAKE(dev_priv)) &&
 +      if ((IS_SKYLAKE(dev_priv) ||
 +           IS_KABYLAKE(dev_priv) ||
 +           IS_COFFEELAKE(dev_priv) ||
 +           IS_COMETLAKE(dev_priv)) &&
                        intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
                vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
        }
@@@ -463,10 -498,8 +502,10 @@@ void intel_vgpu_emulate_hotplug(struct 
        struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
  
        /* TODO: add more platforms support */
 -      if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
 -          IS_COFFEELAKE(i915)) {
 +      if (IS_SKYLAKE(i915) ||
 +          IS_KABYLAKE(i915) ||
 +          IS_COFFEELAKE(i915) ||
 +          IS_COMETLAKE(i915)) {
                if (connected) {
                        vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
                                SFUSE_STRAP_DDID_DETECTED;
@@@ -494,10 -527,8 +533,10 @@@ void intel_vgpu_clean_display(struct in
  {
        struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
  
 -      if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
 -          IS_COFFEELAKE(dev_priv))
 +      if (IS_SKYLAKE(dev_priv) ||
 +          IS_KABYLAKE(dev_priv) ||
 +          IS_COFFEELAKE(dev_priv) ||
 +          IS_COMETLAKE(dev_priv))
                clean_virtual_dp_monitor(vgpu, PORT_D);
        else
                clean_virtual_dp_monitor(vgpu, PORT_B);
@@@ -520,10 -551,8 +559,10 @@@ int intel_vgpu_init_display(struct inte
  
        intel_vgpu_init_i2c_edid(vgpu);
  
 -      if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
 -          IS_COFFEELAKE(dev_priv))
 +      if (IS_SKYLAKE(dev_priv) ||
 +          IS_KABYLAKE(dev_priv) ||
 +          IS_COFFEELAKE(dev_priv) ||
 +          IS_COMETLAKE(dev_priv))
                return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
                                                resolution);
        else
index f5dc52a80fe5090e674de44e9665471fd37266c5,0fb1df71c637c088ad8292275ed6388dd7f85f6c..3c3b9842bbbdcf9cd7a726c8ce2a650a0018df4b
@@@ -348,7 -348,7 +348,7 @@@ static int copy_workload_to_ring_buffer
        u32 *cs;
        int err;
  
 -      if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context))
 +      if (IS_GEN(req->engine->i915, 9) && is_inhibit_context(req->context))
                intel_vgpu_restore_inhibit_context(vgpu, req);
  
        /*
@@@ -416,7 -416,11 +416,11 @@@ static void set_context_ppgtt_from_shad
                for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
                        struct i915_page_directory * const pd =
                                i915_pd_entry(ppgtt->pd, i);
+                       /* skip now as current i915 ppgtt alloc won't allocate
+                          top level pdp for non 4-level table, won't impact
+                          shadow ppgtt. */
+                       if (!pd)
+                               break;
                        px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
                }
        }
@@@ -505,18 -509,26 +509,18 @@@ static int prepare_shadow_batch_buffer(
                        bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
                                + bb->bb_offset;
  
 -              if (bb->ppgtt) {
 -                      /* for non-priv bb, scan&shadow is only for
 -                       * debugging purpose, so the content of shadow bb
 -                       * is the same as original bb. Therefore,
 -                       * here, rather than switch to shadow bb's gma
 -                       * address, we directly use original batch buffer's
 -                       * gma address, and send original bb to hardware
 -                       * directly
 -                       */
 -                      if (bb->clflush & CLFLUSH_AFTER) {
 -                              drm_clflush_virt_range(bb->va,
 -                                              bb->obj->base.size);
 -                              bb->clflush &= ~CLFLUSH_AFTER;
 -                      }
 -                      i915_gem_object_finish_access(bb->obj);
 -                      bb->accessing = false;
 -
 -              } else {
 +              /*
 +               * For non-priv bb, scan&shadow is only for
 +               * debugging purpose, so the content of shadow bb
 +               * is the same as original bb. Therefore,
 +               * here, rather than switch to shadow bb's gma
 +               * address, we directly use original batch buffer's
 +               * gma address, and send original bb to hardware
 +               * directly
 +               */
 +              if (!bb->ppgtt) {
                        bb->vma = i915_gem_object_ggtt_pin(bb->obj,
 -                                      NULL, 0, 0, 0);
 +                                                         NULL, 0, 0, 0);
                        if (IS_ERR(bb->vma)) {
                                ret = PTR_ERR(bb->vma);
                                goto err;
                        if (gmadr_bytes == 8)
                                bb->bb_start_cmd_va[2] = 0;
  
 -                      /* No one is going to touch shadow bb from now on. */
 -                      if (bb->clflush & CLFLUSH_AFTER) {
 -                              drm_clflush_virt_range(bb->va,
 -                                              bb->obj->base.size);
 -                              bb->clflush &= ~CLFLUSH_AFTER;
 -                      }
 -
 -                      ret = i915_gem_object_set_to_gtt_domain(bb->obj,
 -                                                              false);
 -                      if (ret)
 -                              goto err;
 -
                        ret = i915_vma_move_to_active(bb->vma,
                                                      workload->req,
                                                      0);
                        if (ret)
                                goto err;
 -
 -                      i915_gem_object_finish_access(bb->obj);
 -                      bb->accessing = false;
                }
 +
 +              /* No one is going to touch shadow bb from now on. */
 +              i915_gem_object_flush_map(bb->obj);
        }
        return 0;
  err:
@@@ -606,6 -630,9 +610,6 @@@ static void release_shadow_batch_buffer
  
        list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
                if (bb->obj) {
 -                      if (bb->accessing)
 -                              i915_gem_object_finish_access(bb->obj);
 -
                        if (bb->va && !IS_ERR(bb->va))
                                i915_gem_object_unpin_map(bb->obj);
  
@@@ -912,7 -939,7 +916,7 @@@ static void update_guest_context(struc
        context_page_num = rq->engine->context_size;
        context_page_num = context_page_num >> PAGE_SHIFT;
  
 -      if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
 +      if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0)
                context_page_num = 19;
  
        context_base = (void *) ctx->lrc_reg_state -
index 242f59910c1901454d9430c29134f407edbcc80e,bca036ac662129414d38168f17fd85db68b684e8..8594a8ef08ce9ddef07038ff81f6c4c656512973
@@@ -64,7 -64,7 +64,7 @@@ static int i915_capabilities(struct seq
        intel_driver_caps_print(&i915->caps, &p);
  
        kernel_param_lock(THIS_MODULE);
 -      i915_params_dump(&i915_modparams, &p);
 +      i915_params_dump(&i915->params, &p);
        kernel_param_unlock(THIS_MODULE);
  
        return 0;
@@@ -1898,7 -1898,7 +1898,7 @@@ static const struct i915_debugfs_files 
  #endif
  };
  
int i915_debugfs_register(struct drm_i915_private *dev_priv)
void i915_debugfs_register(struct drm_i915_private *dev_priv)
  {
        struct drm_minor *minor = dev_priv->drm.primary;
        int i;
                                    i915_debugfs_files[i].fops);
        }
  
-       return drm_debugfs_create_files(i915_debugfs_list,
-                                       I915_DEBUGFS_ENTRIES,
-                                       minor->debugfs_root, minor);
+       drm_debugfs_create_files(i915_debugfs_list,
+                                I915_DEBUGFS_ENTRIES,
+                                minor->debugfs_root, minor);
  }
index da991d1967a2a96d545b13bb17e0cd97d21fd969,34ee12f3f02d465d4ad7703548080fb97ba8b3a2..67102dc26fcec17180eb34095242c71b5ec5210f
@@@ -43,6 -43,7 +43,7 @@@
  #include <drm/drm_atomic_helper.h>
  #include <drm/drm_ioctl.h>
  #include <drm/drm_irq.h>
+ #include <drm/drm_managed.h>
  #include <drm/drm_probe_helper.h>
  
  #include "display/intel_acpi.h"
@@@ -500,8 -501,6 +501,8 @@@ static void i915_driver_late_release(st
  
        cpu_latency_qos_remove_request(&dev_priv->sb_qos);
        mutex_destroy(&dev_priv->sb_lock);
 +
 +      i915_params_free(&dev_priv->params);
  }
  
  /**
@@@ -907,24 -906,15 +908,18 @@@ i915_driver_create(struct pci_dev *pdev
                (struct intel_device_info *)ent->driver_data;
        struct intel_device_info *device_info;
        struct drm_i915_private *i915;
-       int err;
  
-       i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
-       if (!i915)
-               return ERR_PTR(-ENOMEM);
-       err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
-       if (err) {
-               kfree(i915);
-               return ERR_PTR(err);
-       }
+       i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
+                                 struct drm_i915_private, drm);
+       if (IS_ERR(i915))
+               return i915;
  
        i915->drm.pdev = pdev;
        pci_set_drvdata(pdev, i915);
  
 +      /* Device parameters start as a copy of module parameters. */
 +      i915_params_copy(&i915->params, &i915_modparams);
 +
        /* Setup the write-once "constant" device info */
        device_info = mkwrite_device_info(i915);
        memcpy(device_info, match_info, sizeof(*device_info));
        return i915;
  }
  
- static void i915_driver_destroy(struct drm_i915_private *i915)
- {
-       struct pci_dev *pdev = i915->drm.pdev;
-       drm_dev_fini(&i915->drm);
-       kfree(i915);
-       /* And make sure we never chase our dangling pointer from pci_dev */
-       pci_set_drvdata(pdev, NULL);
- }
  /**
   * i915_driver_probe - setup chip and create an initial config
   * @pdev: PCI device
@@@ -969,7 -948,7 +953,7 @@@ int i915_driver_probe(struct pci_dev *p
                return PTR_ERR(i915);
  
        /* Disable nuclear pageflip by default on pre-ILK */
 -      if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
 +      if (!i915->params.nuclear_pageflip && match_info->gen < 5)
                i915->drm.driver_features &= ~DRIVER_ATOMIC;
  
        /*
  #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
        if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
                if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
 -                  i915_modparams.fake_lmem_start) {
 +                  i915->params.fake_lmem_start) {
                        mkwrite_device_info(i915)->memory_regions =
                                REGION_SMEM | REGION_LMEM | REGION_STOLEN;
                        mkwrite_device_info(i915)->is_dgfx = true;
  
        i915_welcome_messages(i915);
  
+       i915->do_release = true;
        return 0;
  
  out_cleanup_irq:
@@@ -1046,7 -1027,6 +1032,6 @@@ out_pci_disable
        pci_disable_device(pdev);
  out_fini:
        i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
-       i915_driver_destroy(i915);
        return ret;
  }
  
@@@ -1086,6 -1066,9 +1071,9 @@@ static void i915_driver_release(struct 
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
  
+       if (!dev_priv->do_release)
+               return;
        disable_rpm_wakeref_asserts(rpm);
  
        i915_gem_driver_release(dev_priv);
        intel_runtime_pm_driver_release(rpm);
  
        i915_driver_late_release(dev_priv);
-       i915_driver_destroy(dev_priv);
  }
  
  static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
index 2697960f15a9d3d2335d76e1fcbefcfa7561cc71,adb9bf34cf97a3bb21690c8ec73bbeb382a0f7b3..9aad3ec979bdb167fc9562402d0af342cf5faa63
@@@ -273,7 -273,6 +273,7 @@@ struct drm_i915_display_funcs 
        void (*set_cdclk)(struct drm_i915_private *dev_priv,
                          const struct intel_cdclk_config *cdclk_config,
                          enum pipe pipe);
 +      int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
        int (*get_fifo_size)(struct drm_i915_private *dev_priv,
                             enum i9xx_plane_id i9xx_plane);
        int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
@@@ -827,9 -826,9 +827,12 @@@ struct i915_selftest_stash 
  struct drm_i915_private {
        struct drm_device drm;
  
+       /* FIXME: Device release actions should all be moved to drmm_ */
+       bool do_release;
 +      /* i915 device parameters */
 +      struct i915_params params;
 +
        const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
        struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
        struct intel_driver_caps caps;
                struct intel_global_obj obj;
        } cdclk;
  
 +      struct {
 +              /* The current hardware dbuf configuration */
 +              u8 enabled_slices;
 +
 +              struct intel_global_obj obj;
 +      } dbuf;
 +
        /**
         * wq - Driver workqueue for GEM.
         *
                 * Set during HW readout of watermarks/DDB.  Some platforms
                 * need to know when we're still using BIOS-provided values
                 * (which we don't fully trust).
 +               *
 +               * FIXME get rid of this.
                 */
                bool distrust_bios_wm;
        } wm;
  
 -      u8 enabled_dbuf_slices_mask; /* GEN11 has configurable 2 slices */
 -
        struct dram_info {
                bool valid;
                bool is_16gb_dimm;
@@@ -1264,11 -1256,6 +1267,11 @@@ static inline struct drm_i915_private *
             (engine__); \
             (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
  
 +#define for_each_uabi_class_engine(engine__, class__, i915__) \
 +      for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
 +           (engine__) && (engine__)->uabi_class == (class__); \
 +           (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
 +
  #define I915_GTT_OFFSET_NONE ((u32)-1)
  
  /*
@@@ -1419,12 -1406,10 +1422,12 @@@ IS_SUBPLATFORM(const struct drm_i915_pr
  #define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
  #define IS_GEMINILAKE(dev_priv)       IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
  #define IS_COFFEELAKE(dev_priv)       IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
 +#define IS_COMETLAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
  #define IS_CANNONLAKE(dev_priv)       IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
  #define IS_ICELAKE(dev_priv)  IS_PLATFORM(dev_priv, INTEL_ICELAKE)
  #define IS_ELKHARTLAKE(dev_priv)      IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
  #define IS_TIGERLAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
 +#define IS_ROCKETLAKE(dev_priv)       IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
  #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
                                    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
  #define IS_BDW_ULT(dev_priv) \
                                 INTEL_INFO(dev_priv)->gt == 2)
  #define IS_CFL_GT3(dev_priv)  (IS_COFFEELAKE(dev_priv) && \
                                 INTEL_INFO(dev_priv)->gt == 3)
 +
 +#define IS_CML_ULT(dev_priv) \
 +      IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
 +#define IS_CML_ULX(dev_priv) \
 +      IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
 +#define IS_CML_GT2(dev_priv)  (IS_COMETLAKE(dev_priv) && \
 +                               INTEL_INFO(dev_priv)->gt == 2)
 +
  #define IS_CNL_WITH_PORT_F(dev_priv) \
        IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
  #define IS_ICL_WITH_PORT_F(dev_priv) \
  #define IS_TGL_REVID(p, since, until) \
        (IS_TIGERLAKE(p) && IS_REVID(p, since, until))
  
 +#define RKL_REVID_A0          0x0
 +#define RKL_REVID_B0          0x1
 +#define RKL_REVID_C0          0x4
 +
 +#define IS_RKL_REVID(p, since, until) \
 +      (IS_ROCKETLAKE(p) && IS_REVID(p, since, until))
 +
  #define IS_LP(dev_priv)       (INTEL_INFO(dev_priv)->is_lp)
  #define IS_GEN9_LP(dev_priv)  (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
  #define IS_GEN9_BC(dev_priv)  (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
  #define HAS_DDI(dev_priv)              (INTEL_INFO(dev_priv)->display.has_ddi)
  #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
  #define HAS_PSR(dev_priv)              (INTEL_INFO(dev_priv)->display.has_psr)
 +#define HAS_PSR_HW_TRACKING(dev_priv) \
 +      (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
  #define HAS_TRANSCODER(dev_priv, trans)        ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
  
  #define HAS_RC6(dev_priv)              (INTEL_INFO(dev_priv)->has_rc6)
  #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
  
  /* Only valid when HAS_DISPLAY() is true */
 -#define INTEL_DISPLAY_ENABLED(dev_priv) (WARN_ON(!HAS_DISPLAY(dev_priv)), !i915_modparams.disable_display)
 +#define INTEL_DISPLAY_ENABLED(dev_priv) \
 +      (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
  
  static inline bool intel_vtd_active(void)
  {
index 498d8c982540e4226ff2f093496b263073ca7759,eb0b5be7c35d33305fbb490aaf0aa2c1e83b0fce..e5fdf17cd9cddfd5bb07d772df6bb177486bf296
@@@ -536,7 -536,6 +536,7 @@@ static const struct intel_device_info v
        .display.has_ddi = 1, \
        .has_fpga_dbg = 1, \
        .display.has_psr = 1, \
 +      .display.has_psr_hw_tracking = 1, \
        .display.has_dp_mst = 1, \
        .has_rc6p = 0 /* RC6p removed-by HSW */, \
        HSW_PIPE_OFFSETS, \
@@@ -691,7 -690,6 +691,7 @@@ static const struct intel_device_info s
        .display.has_fbc = 1, \
        .display.has_hdcp = 1, \
        .display.has_psr = 1, \
 +      .display.has_psr_hw_tracking = 1, \
        .has_runtime_pm = 1, \
        .display.has_csr = 1, \
        .has_rc6 = 1, \
@@@ -768,20 -766,6 +768,20 @@@ static const struct intel_device_info c
                BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
  };
  
 +#define CML_PLATFORM \
 +      GEN9_FEATURES, \
 +      PLATFORM(INTEL_COMETLAKE)
 +
 +static const struct intel_device_info cml_gt1_info = {
 +      CML_PLATFORM,
 +      .gt = 1,
 +};
 +
 +static const struct intel_device_info cml_gt2_info = {
 +      CML_PLATFORM,
 +      .gt = 2,
 +};
 +
  #define GEN10_FEATURES \
        GEN9_FEATURES, \
        GEN(10), \
@@@ -804,7 -788,6 +804,7 @@@ static const struct intel_device_info c
  #define GEN11_FEATURES \
        GEN10_FEATURES, \
        GEN11_DEFAULT_PAGE_SIZES, \
 +      .abox_mask = BIT(0), \
        .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
                BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
                BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
@@@ -848,7 -831,6 +848,7 @@@ static const struct intel_device_info e
  #define GEN12_FEATURES \
        GEN11_FEATURES, \
        GEN(12), \
 +      .abox_mask = GENMASK(2, 1), \
        .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
        .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
                BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
@@@ -881,19 -863,6 +881,19 @@@ static const struct intel_device_info t
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
  };
  
 +static const struct intel_device_info rkl_info = {
 +      GEN12_FEATURES,
 +      PLATFORM(INTEL_ROCKETLAKE),
 +      .abox_mask = BIT(0),
 +      .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
 +      .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
 +              BIT(TRANSCODER_C),
 +      .require_force_probe = 1,
 +      .display.has_psr_hw_tracking = 0,
 +      .engine_mask =
 +              BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
 +};
 +
  #define GEN12_DGFX_FEATURES \
        GEN12_FEATURES, \
        .is_dgfx = 1
@@@ -964,15 -933,14 +964,15 @@@ static const struct pci_device_id pciid
        INTEL_WHL_U_GT2_IDS(&cfl_gt2_info),
        INTEL_AML_CFL_GT2_IDS(&cfl_gt2_info),
        INTEL_WHL_U_GT3_IDS(&cfl_gt3_info),
 -      INTEL_CML_GT1_IDS(&cfl_gt1_info),
 -      INTEL_CML_GT2_IDS(&cfl_gt2_info),
 -      INTEL_CML_U_GT1_IDS(&cfl_gt1_info),
 -      INTEL_CML_U_GT2_IDS(&cfl_gt2_info),
 +      INTEL_CML_GT1_IDS(&cml_gt1_info),
 +      INTEL_CML_GT2_IDS(&cml_gt2_info),
 +      INTEL_CML_U_GT1_IDS(&cml_gt1_info),
 +      INTEL_CML_U_GT2_IDS(&cml_gt2_info),
        INTEL_CNL_IDS(&cnl_info),
        INTEL_ICL_11_IDS(&icl_info),
        INTEL_EHL_IDS(&ehl_info),
        INTEL_TGL_12_IDS(&tgl_info),
 +      INTEL_RKL_IDS(&rkl_info),
        {0, 0, 0}
  };
  MODULE_DEVICE_TABLE(pci, pciidlist);
@@@ -987,8 -955,6 +987,6 @@@ static void i915_pci_remove(struct pci_
  
        i915_driver_remove(i915);
        pci_set_drvdata(pdev, NULL);
-       drm_dev_put(&i915->drm);
  }
  
  /* is device_id present in comma separated list of ids */
index 9a8fdd3ac6bd0dcbba310a0815974ed6716fbc32,e75c528ebbe006f5a2712721666222a319e7a108..c1ebda9b5627b2695df6b94e2b0809b802d30021
@@@ -25,10 -25,6 +25,6 @@@ static int copy_query_item(void *query_
                           query_sz))
                return -EFAULT;
  
-       if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
-                      total_length))
-               return -EFAULT;
        return 0;
  }
  
@@@ -72,20 -68,20 +68,20 @@@ static int query_topology_info(struct d
        topo.eu_offset = slice_length + subslice_length;
        topo.eu_stride = sseu->eu_stride;
  
-       if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
+       if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
                           &topo, sizeof(topo)))
                return -EFAULT;
  
-       if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
+       if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
                           &sseu->slice_mask, slice_length))
                return -EFAULT;
  
-       if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+       if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
                                           sizeof(topo) + slice_length),
                           sseu->subslice_mask, subslice_length))
                return -EFAULT;
  
-       if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+       if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
                                           sizeof(topo) +
                                           slice_length + subslice_length),
                           sseu->eu_mask, eu_length))
@@@ -113,7 -109,8 +109,7 @@@ query_engine_info(struct drm_i915_priva
        for_each_uabi_engine(engine, i915)
                num_uabi_engines++;
  
 -      len = sizeof(struct drm_i915_query_engine_info) +
 -            num_uabi_engines * sizeof(struct drm_i915_engine_info);
 +      len = struct_size(query_ptr, engines, num_uabi_engines);
  
        ret = copy_query_item(&query, sizeof(query), len, query_item);
        if (ret != 0)
                info.engine.engine_instance = engine->uabi_instance;
                info.capabilities = engine->uabi_capabilities;
  
-               if (__copy_to_user(info_ptr, &info, sizeof(info)))
+               if (copy_to_user(info_ptr, &info, sizeof(info)))
                        return -EFAULT;
  
                query.num_engines++;
                info_ptr++;
        }
  
-       if (__copy_to_user(query_ptr, &query, sizeof(query)))
+       if (copy_to_user(query_ptr, &query, sizeof(query)))
                return -EFAULT;
  
        return len;
@@@ -157,10 -154,6 +153,6 @@@ static int can_copy_perf_config_registe
        if (user_n_regs < kernel_n_regs)
                return -EINVAL;
  
-       if (!access_ok(u64_to_user_ptr(user_regs_ptr),
-                      2 * sizeof(u32) * kernel_n_regs))
-               return -EFAULT;
        return 0;
  }
  
@@@ -169,6 -162,7 +161,7 @@@ static int copy_perf_config_registers_o
                                                u64 user_regs_ptr,
                                                u32 *user_n_regs)
  {
+       u32 __user *p = u64_to_user_ptr(user_regs_ptr);
        u32 r;
  
        if (*user_n_regs == 0) {
  
        *user_n_regs = kernel_n_regs;
  
-       for (r = 0; r < kernel_n_regs; r++) {
-               u32 __user *user_reg_ptr =
-                       u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2);
-               u32 __user *user_val_ptr =
-                       u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 +
-                                       sizeof(u32));
-               int ret;
-               ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
-                                user_reg_ptr);
-               if (ret)
-                       return -EFAULT;
+       if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs))
+               return -EFAULT;
  
-               ret = __put_user(kernel_regs[r].value, user_val_ptr);
-               if (ret)
-                       return -EFAULT;
+       for (r = 0; r < kernel_n_regs; r++, p += 2) {
+               unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
+                               p, Efault);
+               unsafe_put_user(kernel_regs[r].value, p + 1, Efault);
        }
+       user_write_access_end();
        return 0;
+ Efault:
+       user_write_access_end();
+       return -EFAULT;
  }
  
  static int query_perf_config_data(struct drm_i915_private *i915,
                return -EINVAL;
        }
  
-       if (!access_ok(user_query_config_ptr, total_size))
-               return -EFAULT;
-       if (__get_user(flags, &user_query_config_ptr->flags))
+       if (get_user(flags, &user_query_config_ptr->flags))
                return -EFAULT;
  
        if (flags != 0)
                BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
  
                memset(&uuid, 0, sizeof(uuid));
-               if (__copy_from_user(uuid, user_query_config_ptr->uuid,
+               if (copy_from_user(uuid, user_query_config_ptr->uuid,
                                     sizeof(user_query_config_ptr->uuid)))
                        return -EFAULT;
  
                }
                rcu_read_unlock();
        } else {
-               if (__get_user(config_id, &user_query_config_ptr->config))
+               if (get_user(config_id, &user_query_config_ptr->config))
                        return -EFAULT;
  
                oa_config = i915_perf_get_oa_config(perf, config_id);
        if (!oa_config)
                return -ENOENT;
  
-       if (__copy_from_user(&user_config, user_config_ptr,
-                            sizeof(user_config))) {
+       if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
                ret = -EFAULT;
                goto out;
        }
  
        memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
  
-       if (__copy_to_user(user_config_ptr, &user_config,
-                          sizeof(user_config))) {
+       if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
                ret = -EFAULT;
                goto out;
        }
index 19e1fed198c3eed3fac9d3b233b192f2071982a9,06cd1d28a176e526dcda0d41b8ae791cf3d55989..f09120cac89aa4c309f8f0b33b4270f365be8258
@@@ -186,7 -186,7 +186,7 @@@ typedef struct 
  
  #define INVALID_MMIO_REG _MMIO(0)
  
- static inline u32 i915_mmio_reg_offset(i915_reg_t reg)
+ static __always_inline u32 i915_mmio_reg_offset(i915_reg_t reg)
  {
        return reg.reg;
  }
@@@ -1869,11 -1869,9 +1869,11 @@@ static inline bool i915_mmio_reg_valid(
  #define _ICL_COMBOPHY_A                       0x162000
  #define _ICL_COMBOPHY_B                       0x6C000
  #define _EHL_COMBOPHY_C                       0x160000
 +#define _RKL_COMBOPHY_D                       0x161000
  #define _ICL_COMBOPHY(phy)            _PICK(phy, _ICL_COMBOPHY_A, \
                                              _ICL_COMBOPHY_B, \
 -                                            _EHL_COMBOPHY_C)
 +                                            _EHL_COMBOPHY_C, \
 +                                            _RKL_COMBOPHY_D)
  
  /* CNL/ICL Port CL_DW registers */
  #define _ICL_PORT_CL_DW(dw, phy)      (_ICL_COMBOPHY(phy) + \
  #define LM_FIFO_WATERMARK   0x0000001F
  #define MI_ARB_STATE  _MMIO(0x20e4) /* 915+ only */
  
 -#define MBUS_ABOX_CTL                 _MMIO(0x45038)
 -#define MBUS_ABOX1_CTL                        _MMIO(0x45048)
 -#define MBUS_ABOX2_CTL                        _MMIO(0x4504C)
 +#define _MBUS_ABOX0_CTL                       0x45038
 +#define _MBUS_ABOX1_CTL                       0x45048
 +#define _MBUS_ABOX2_CTL                       0x4504C
 +#define MBUS_ABOX_CTL(x)              _MMIO(_PICK(x, _MBUS_ABOX0_CTL, \
 +                                                  _MBUS_ABOX1_CTL, \
 +                                                  _MBUS_ABOX2_CTL))
  #define MBUS_ABOX_BW_CREDIT_MASK      (3 << 20)
  #define MBUS_ABOX_BW_CREDIT(x)                ((x) << 20)
  #define MBUS_ABOX_B_CREDIT_MASK               (0xF << 16)
  
  /* Clocking configuration register */
  #define CLKCFG                        _MMIO(MCHBAR_MIRROR_BASE + 0xc00)
 -#define CLKCFG_FSB_400                                        (5 << 0)        /* hrawclk 100 */
 +#define CLKCFG_FSB_400                                        (0 << 0)        /* hrawclk 100 */
 +#define CLKCFG_FSB_400_ALT                            (5 << 0)        /* hrawclk 100 */
  #define CLKCFG_FSB_533                                        (1 << 0)        /* hrawclk 133 */
  #define CLKCFG_FSB_667                                        (3 << 0)        /* hrawclk 166 */
  #define CLKCFG_FSB_800                                        (2 << 0)        /* hrawclk 200 */
  #define CLKCFG_FSB_1067                                       (6 << 0)        /* hrawclk 266 */
  #define CLKCFG_FSB_1067_ALT                           (0 << 0)        /* hrawclk 266 */
  #define CLKCFG_FSB_1333                                       (7 << 0)        /* hrawclk 333 */
 -/*
 - * Note that on at least on ELK the below value is reported for both
 - * 333 and 400 MHz BIOS FSB setting, but given that the gmch datasheet
 - * lists only 200/266/333 MHz FSB as supported let's decode it as 333 MHz.
 - */
  #define CLKCFG_FSB_1333_ALT                           (4 << 0)        /* hrawclk 333 */
 +#define CLKCFG_FSB_1600_ALT                           (6 << 0)        /* hrawclk 400 */
  #define CLKCFG_FSB_MASK                                       (7 << 0)
  #define CLKCFG_MEM_533                                        (1 << 4)
  #define CLKCFG_MEM_667                                        (2 << 4)
@@@ -4514,39 -4512,25 +4514,39 @@@ enum 
  #define   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE    (1 << 16) /* Reserved in ICL+ */
  #define   EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
  
 -#define _PSR2_CTL_A                   0x60900
 -#define _PSR2_CTL_EDP                 0x6f900
 -#define EDP_PSR2_CTL(tran)            _MMIO_TRANS2(tran, _PSR2_CTL_A)
 -#define   EDP_PSR2_ENABLE             (1 << 31)
 -#define   EDP_SU_TRACK_ENABLE         (1 << 30)
 -#define   EDP_Y_COORDINATE_VALID      (1 << 26) /* GLK and CNL+ */
 -#define   EDP_Y_COORDINATE_ENABLE     (1 << 25) /* GLK and CNL+ */
 -#define   EDP_MAX_SU_DISABLE_TIME(t)  ((t) << 20)
 -#define   EDP_MAX_SU_DISABLE_TIME_MASK        (0x1f << 20)
 -#define   EDP_PSR2_TP2_TIME_500us     (0 << 8)
 -#define   EDP_PSR2_TP2_TIME_100us     (1 << 8)
 -#define   EDP_PSR2_TP2_TIME_2500us    (2 << 8)
 -#define   EDP_PSR2_TP2_TIME_50us      (3 << 8)
 -#define   EDP_PSR2_TP2_TIME_MASK      (3 << 8)
 -#define   EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
 -#define   EDP_PSR2_FRAME_BEFORE_SU_MASK       (0xf << 4)
 -#define   EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4)
 -#define   EDP_PSR2_IDLE_FRAME_MASK    0xf
 -#define   EDP_PSR2_IDLE_FRAME_SHIFT   0
 +#define _PSR2_CTL_A                           0x60900
 +#define _PSR2_CTL_EDP                         0x6f900
 +#define EDP_PSR2_CTL(tran)                    _MMIO_TRANS2(tran, _PSR2_CTL_A)
 +#define   EDP_PSR2_ENABLE                     (1 << 31)
 +#define   EDP_SU_TRACK_ENABLE                 (1 << 30)
 +#define   TGL_EDP_PSR2_BLOCK_COUNT_NUM_2      (0 << 28)
 +#define   TGL_EDP_PSR2_BLOCK_COUNT_NUM_3      (1 << 28)
 +#define   EDP_Y_COORDINATE_VALID              (1 << 26) /* GLK and CNL+ */
 +#define   EDP_Y_COORDINATE_ENABLE             (1 << 25) /* GLK and CNL+ */
 +#define   EDP_MAX_SU_DISABLE_TIME(t)          ((t) << 20)
 +#define   EDP_MAX_SU_DISABLE_TIME_MASK                (0x1f << 20)
 +#define   EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES   8
 +#define   EDP_PSR2_IO_BUFFER_WAKE(lines)      ((EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines)) << 13)
 +#define   EDP_PSR2_IO_BUFFER_WAKE_MASK                (3 << 13)
 +#define   TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES       5
 +#define   TGL_EDP_PSR2_IO_BUFFER_WAKE(lines)  (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << 13)
 +#define   TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK    (7 << 13)
 +#define   EDP_PSR2_FAST_WAKE_MAX_LINES                8
 +#define   EDP_PSR2_FAST_WAKE(lines)           ((EDP_PSR2_FAST_WAKE_MAX_LINES - (lines)) << 11)
 +#define   EDP_PSR2_FAST_WAKE_MASK             (3 << 11)
 +#define   TGL_EDP_PSR2_FAST_WAKE_MIN_LINES    5
 +#define   TGL_EDP_PSR2_FAST_WAKE(lines)               (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << 10)
 +#define   TGL_EDP_PSR2_FAST_WAKE_MASK         (7 << 10)
 +#define   EDP_PSR2_TP2_TIME_500us             (0 << 8)
 +#define   EDP_PSR2_TP2_TIME_100us             (1 << 8)
 +#define   EDP_PSR2_TP2_TIME_2500us            (2 << 8)
 +#define   EDP_PSR2_TP2_TIME_50us              (3 << 8)
 +#define   EDP_PSR2_TP2_TIME_MASK              (3 << 8)
 +#define   EDP_PSR2_FRAME_BEFORE_SU_SHIFT      4
 +#define   EDP_PSR2_FRAME_BEFORE_SU_MASK               (0xf << 4)
 +#define   EDP_PSR2_FRAME_BEFORE_SU(a)         ((a) << 4)
 +#define   EDP_PSR2_IDLE_FRAME_MASK            0xf
 +#define   EDP_PSR2_IDLE_FRAME_SHIFT           0
  
  #define _PSR_EVENT_TRANS_A                    0x60848
  #define _PSR_EVENT_TRANS_B                    0x61848
  #define _PLANE_CUS_CTL_1_A                    0x701c8
  #define _PLANE_CUS_CTL_2_A                    0x702c8
  #define  PLANE_CUS_ENABLE                     (1 << 31)
 +#define  PLANE_CUS_PLANE_4_RKL                        (0 << 30)
 +#define  PLANE_CUS_PLANE_5_RKL                        (1 << 30)
  #define  PLANE_CUS_PLANE_6                    (0 << 30)
  #define  PLANE_CUS_PLANE_7                    (1 << 30)
  #define  PLANE_CUS_HPHASE_SIGN_NEGATIVE               (1 << 19)
  #define   PLANE_COLOR_INPUT_CSC_ENABLE                (1 << 20) /* ICL+ */
  #define   PLANE_COLOR_PIPE_CSC_ENABLE         (1 << 23) /* Pre-ICL */
  #define   PLANE_COLOR_CSC_MODE_BYPASS                 (0 << 17)
 -#define   PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709               (1 << 17)
 +#define   PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601               (1 << 17)
  #define   PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709               (2 << 17)
  #define   PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020     (3 << 17)
  #define   PLANE_COLOR_CSC_MODE_RGB709_TO_RGB2020      (4 << 17)
         GEN11_PIPE_PLANE7_FAULT | \
         GEN11_PIPE_PLANE6_FAULT | \
         GEN11_PIPE_PLANE5_FAULT)
 +#define RKL_DE_PIPE_IRQ_FAULT_ERRORS \
 +      (GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \
 +       GEN11_PIPE_PLANE5_FAULT)
  
  #define GEN8_DE_PORT_ISR _MMIO(0x44440)
  #define GEN8_DE_PORT_IMR _MMIO(0x44444)
  #define  WAIT_FOR_PCH_RESET_ACK               (1 << 1)
  #define  WAIT_FOR_PCH_FLR_ACK         (1 << 0)
  
 -#define BW_BUDDY1_CTL                 _MMIO(0x45140)
 -#define BW_BUDDY2_CTL                 _MMIO(0x45150)
 +#define _BW_BUDDY0_CTL                        0x45130
 +#define _BW_BUDDY1_CTL                        0x45140
 +#define BW_BUDDY_CTL(x)                       _MMIO(_PICK_EVEN(x, \
 +                                                       _BW_BUDDY0_CTL, \
 +                                                       _BW_BUDDY1_CTL))
  #define   BW_BUDDY_DISABLE            REG_BIT(31)
  #define   BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
 +#define   BW_BUDDY_TLB_REQ_TIMER(x)   REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, x)
  
 -#define BW_BUDDY1_PAGE_MASK           _MMIO(0x45144)
 -#define BW_BUDDY2_PAGE_MASK           _MMIO(0x45154)
 +#define _BW_BUDDY0_PAGE_MASK          0x45134
 +#define _BW_BUDDY1_PAGE_MASK          0x45144
 +#define BW_BUDDY_PAGE_MASK(x)         _MMIO(_PICK_EVEN(x, \
 +                                                       _BW_BUDDY0_PAGE_MASK, \
 +                                                       _BW_BUDDY1_PAGE_MASK))
  
  #define HSW_NDE_RSTWRN_OPT    _MMIO(0x46408)
  #define  RESET_PCH_HANDSHAKE_ENABLE   (1 << 4)
  #define   PER_PIXEL_ALPHA_BYPASS_EN           (1 << 7)
  
  #define FF_MODE2                      _MMIO(0x6604)
 +#define   FF_MODE2_GS_TIMER_MASK      REG_GENMASK(31, 24)
 +#define   FF_MODE2_GS_TIMER_224               REG_FIELD_PREP(FF_MODE2_GS_TIMER_MASK, 224)
  #define   FF_MODE2_TDS_TIMER_MASK     REG_GENMASK(23, 16)
  #define   FF_MODE2_TDS_TIMER_128      REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
  
index 31ccd0559c55fcc6baf389df82e8d1da3b3653e6,9cb2d7548daadff98b1da0d466c06f314e9f2694..153ca9e65382ebb8690b058a8263d23080f2227f
@@@ -116,9 -116,6 +116,9 @@@ track_intel_runtime_pm_wakeref(struct i
  static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
                                             depot_stack_handle_t stack)
  {
 +      struct drm_i915_private *i915 = container_of(rpm,
 +                                                   struct drm_i915_private,
 +                                                   runtime_pm);
        unsigned long flags, n;
        bool found = false;
  
        }
        spin_unlock_irqrestore(&rpm->debug.lock, flags);
  
 -      if (WARN(!found,
 -               "Unmatched wakeref (tracking %lu), count %u\n",
 -               rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
 +      if (drm_WARN(&i915->drm, !found,
 +                   "Unmatched wakeref (tracking %lu), count %u\n",
 +                   rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
                char *buf;
  
                buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
@@@ -358,14 -355,10 +358,14 @@@ intel_runtime_pm_release(struct intel_r
  static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
                                              bool wakelock)
  {
 +      struct drm_i915_private *i915 = container_of(rpm,
 +                                                   struct drm_i915_private,
 +                                                   runtime_pm);
        int ret;
  
        ret = pm_runtime_get_sync(rpm->kdev);
 -      WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
 +      drm_WARN_ONCE(&i915->drm, ret < 0,
 +                    "pm_runtime_get_sync() failed: %d\n", ret);
  
        intel_runtime_pm_acquire(rpm, wakelock);
  
@@@ -546,9 -539,6 +546,9 @@@ void intel_runtime_pm_put(struct intel_
   */
  void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
  {
 +      struct drm_i915_private *i915 = container_of(rpm,
 +                                                   struct drm_i915_private,
 +                                                   runtime_pm);
        struct device *kdev = rpm->kdev;
  
        /*
         * becaue the HDA driver may require us to enable the audio power
         * domain during system suspend.
         */
-       dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
+       dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE);
  
        pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
        pm_runtime_mark_last_busy(kdev);
  
                pm_runtime_dont_use_autosuspend(kdev);
                ret = pm_runtime_get_sync(kdev);
 -              WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
 +              drm_WARN(&i915->drm, ret < 0,
 +                       "pm_runtime_get_sync() failed: %d\n", ret);
        } else {
                pm_runtime_use_autosuspend(kdev);
        }
  
  void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
  {
 +      struct drm_i915_private *i915 = container_of(rpm,
 +                                                   struct drm_i915_private,
 +                                                   runtime_pm);
        struct device *kdev = rpm->kdev;
  
        /* Transfer rpm ownership back to core */
 -      WARN(pm_runtime_get_sync(kdev) < 0,
 -           "Failed to pass rpm ownership back to core\n");
 +      drm_WARN(&i915->drm, pm_runtime_get_sync(kdev) < 0,
 +               "Failed to pass rpm ownership back to core\n");
  
        pm_runtime_dont_use_autosuspend(kdev);
  
  
  void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
  {
 +      struct drm_i915_private *i915 = container_of(rpm,
 +                                                   struct drm_i915_private,
 +                                                   runtime_pm);
        int count = atomic_read(&rpm->wakeref_count);
  
 -      WARN(count,
 -           "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
 -           intel_rpm_raw_wakeref_count(count),
 -           intel_rpm_wakelock_count(count));
 +      drm_WARN(&i915->drm, count,
 +               "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
 +               intel_rpm_raw_wakeref_count(count),
 +               intel_rpm_wakelock_count(count));
  
        untrack_all_intel_runtime_pm_wakerefs(rpm);
  }