Merge tag 'v3.13-rc3' into drm-intel-next-queued
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 9 Dec 2013 08:17:02 +0000 (09:17 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 9 Dec 2013 08:19:14 +0000 (09:19 +0100)
Linux 3.13-rc3

I need a backmerge for two reasons:
- For merging the ppgtt patches from Ben I need to pull in the bdw
  support.
- We now have duplicated calls to intel_uncore_forcewake_reset in the
  setup code to due 2 different patches merged into -next and 3.13.
  The conflict is silen so I need the merge to be able to apply
  Deepak's fixup patch.

Conflicts:
drivers/gpu/drm/i915/intel_display.c

Trivial conflict, it doesn't even show up in the merge diff.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
1  2 
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_uncore.c

index 64ed8f4d991f92b5bca571b78f2fd43923dba615,ccdbecca070d2340919d5499f80824ddb84db4ac..780f815b6c9f228b17d8cbd11bb93f2ac91fefaf
@@@ -89,18 -89,6 +89,18 @@@ enum port 
  };
  #define port_name(p) ((p) + 'A')
  
 +#define I915_NUM_PHYS_VLV 1
 +
 +enum dpio_channel {
 +      DPIO_CH0,
 +      DPIO_CH1
 +};
 +
 +enum dpio_phy {
 +      DPIO_PHY0,
 +      DPIO_PHY1
 +};
 +
  enum intel_display_power_domain {
        POWER_DOMAIN_PIPE_A,
        POWER_DOMAIN_PIPE_B,
        POWER_DOMAIN_TRANSCODER_C,
        POWER_DOMAIN_TRANSCODER_EDP,
        POWER_DOMAIN_VGA,
 +      POWER_DOMAIN_AUDIO,
        POWER_DOMAIN_INIT,
  
        POWER_DOMAIN_NUM,
@@@ -364,7 -351,6 +364,7 @@@ struct drm_i915_error_state 
        enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
  };
  
 +struct intel_connector;
  struct intel_crtc_config;
  struct intel_crtc;
  struct intel_limit;
@@@ -427,20 -413,11 +427,20 @@@ struct drm_i915_display_funcs 
        /* render clock increase/decrease */
        /* display clock increase/decrease */
        /* pll clock increase/decrease */
 +
 +      int (*setup_backlight)(struct intel_connector *connector);
 +      uint32_t (*get_backlight)(struct intel_connector *connector);
 +      void (*set_backlight)(struct intel_connector *connector,
 +                            uint32_t level);
 +      void (*disable_backlight)(struct intel_connector *connector);
 +      void (*enable_backlight)(struct intel_connector *connector);
  };
  
  struct intel_uncore_funcs {
 -      void (*force_wake_get)(struct drm_i915_private *dev_priv);
 -      void (*force_wake_put)(struct drm_i915_private *dev_priv);
 +      void (*force_wake_get)(struct drm_i915_private *dev_priv,
 +                                                      int fw_engine);
 +      void (*force_wake_put)(struct drm_i915_private *dev_priv,
 +                                                      int fw_engine);
  
        uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
        uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
@@@ -465,9 -442,6 +465,9 @@@ struct intel_uncore 
        unsigned fifo_count;
        unsigned forcewake_count;
  
 +      unsigned fw_rendercount;
 +      unsigned fw_mediacount;
 +
        struct delayed_work force_wake_work;
  };
  
@@@ -734,6 -708,7 +734,6 @@@ enum intel_sbi_destination 
  #define QUIRK_PIPEA_FORCE (1<<0)
  #define QUIRK_LVDS_SSC_DISABLE (1<<1)
  #define QUIRK_INVERT_BRIGHTNESS (1<<2)
 -#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
  
  struct intel_fbdev;
  struct intel_fbc_work;
@@@ -786,6 -761,8 +786,6 @@@ struct i915_suspend_saved_registers 
        u32 saveBLC_PWM_CTL;
        u32 saveBLC_PWM_CTL2;
        u32 saveBLC_HIST_CTL_B;
 -      u32 saveBLC_PWM_CTL_B;
 -      u32 saveBLC_PWM_CTL2_B;
        u32 saveBLC_CPU_PWM_CTL;
        u32 saveBLC_CPU_PWM_CTL2;
        u32 saveFPB0;
@@@ -955,29 -932,21 +955,29 @@@ struct intel_ilk_power_mgmt 
  
  /* Power well structure for haswell */
  struct i915_power_well {
 +      const char *name;
 +      bool always_on;
        /* power well enable/disable usage count */
        int count;
 +      unsigned long domains;
 +      void *data;
 +      void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
 +                  bool enable);
 +      bool (*is_enabled)(struct drm_device *dev,
 +                         struct i915_power_well *power_well);
  };
  
 -#define I915_MAX_POWER_WELLS 1
 -
  struct i915_power_domains {
        /*
         * Power wells needed for initialization at driver init and suspend
         * time are on. They are kept on until after the first modeset.
         */
        bool init_power_on;
 +      int power_well_count;
  
        struct mutex lock;
 -      struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
 +      int domain_use_count[POWER_DOMAIN_NUM];
 +      struct i915_power_well *power_wells;
  };
  
  struct i915_dri1_state {
@@@ -1108,30 -1077,34 +1108,30 @@@ struct i915_gpu_error 
        unsigned long missed_irq_rings;
  
        /**
 -       * State variable and reset counter controlling the reset flow
 +       * State variable controlling the reset flow and count
         *
 -       * Upper bits are for the reset counter.  This counter is used by the
 -       * wait_seqno code to race-free noticed that a reset event happened and
 -       * that it needs to restart the entire ioctl (since most likely the
 -       * seqno it waited for won't ever signal anytime soon).
 +       * This is a counter which gets incremented when reset is triggered,
 +       * and again when reset has been handled. So odd values (lowest bit set)
 +       * means that reset is in progress and even values that
 +       * (reset_counter >> 1):th reset was successfully completed.
 +       *
 +       * If reset is not completed succesfully, the I915_WEDGE bit is
 +       * set meaning that hardware is terminally sour and there is no
 +       * recovery. All waiters on the reset_queue will be woken when
 +       * that happens.
 +       *
 +       * This counter is used by the wait_seqno code to notice that reset
 +       * event happened and it needs to restart the entire ioctl (since most
 +       * likely the seqno it waited for won't ever signal anytime soon).
         *
         * This is important for lock-free wait paths, where no contended lock
         * naturally enforces the correct ordering between the bail-out of the
         * waiter and the gpu reset work code.
 -       *
 -       * Lowest bit controls the reset state machine: Set means a reset is in
 -       * progress. This state will (presuming we don't have any bugs) decay
 -       * into either unset (successful reset) or the special WEDGED value (hw
 -       * terminally sour). All waiters on the reset_queue will be woken when
 -       * that happens.
         */
        atomic_t reset_counter;
  
 -      /**
 -       * Special values/flags for reset_counter
 -       *
 -       * Note that the code relies on
 -       *      I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
 -       * being true.
 -       */
  #define I915_RESET_IN_PROGRESS_FLAG   1
 -#define I915_WEDGED                   0xffffffff
 +#define I915_WEDGED                   (1 << 31)
  
        /**
         * Waitqueue to signal when the reset has completed. Used by clients
@@@ -1395,8 -1368,13 +1395,8 @@@ typedef struct drm_i915_private 
        struct intel_overlay *overlay;
        unsigned int sprite_scaling_enabled;
  
 -      /* backlight */
 -      struct {
 -              int level;
 -              bool enabled;
 -              spinlock_t lock; /* bl registers and the above bl fields */
 -              struct backlight_device *device;
 -      } backlight;
 +      /* backlight registers and fields in struct intel_panel */
 +      spinlock_t backlight_lock;
  
        /* LVDS info */
        bool no_aux_handshake;
        int num_shared_dpll;
        struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
        struct intel_ddi_plls ddi_plls;
 +      int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
  
        /* Reclocking support */
        bool render_reclock_avail;
        struct drm_property *broadcast_rgb_property;
        struct drm_property *force_audio_property;
  
 -      bool hw_contexts_disabled;
        uint32_t hw_context_size;
        struct list_head context_list;
  
@@@ -1777,13 -1755,8 +1777,13 @@@ struct drm_i915_file_private 
  #define IS_MOBILE(dev)                (INTEL_INFO(dev)->is_mobile)
  #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
                                 ((dev)->pdev->device & 0xFF00) == 0x0C00)
 -#define IS_ULT(dev)           (IS_HASWELL(dev) && \
 +#define IS_BDW_ULT(dev)               (IS_BROADWELL(dev) && \
 +                               (((dev)->pdev->device & 0xf) == 0x2  || \
 +                               ((dev)->pdev->device & 0xf) == 0x6 || \
 +                               ((dev)->pdev->device & 0xf) == 0xe))
 +#define IS_HSW_ULT(dev)               (IS_HASWELL(dev) && \
                                 ((dev)->pdev->device & 0xFF00) == 0x0A00)
 +#define IS_ULT(dev)           (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
  #define IS_HSW_GT3(dev)               (IS_HASWELL(dev) && \
                                 ((dev)->pdev->device & 0x00F0) == 0x0020)
  #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
  #define HAS_IPS(dev)          (IS_ULT(dev) || IS_BROADWELL(dev))
  
  #define HAS_DDI(dev)          (INTEL_INFO(dev)->has_ddi)
 -#define HAS_POWER_WELL(dev)   (IS_HASWELL(dev) || IS_BROADWELL(dev))
  #define HAS_FPGA_DBG_UNCLAIMED(dev)   (INTEL_INFO(dev)->has_fpga_dbg)
  #define HAS_PSR(dev)          (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ #define HAS_PC8(dev)          (IS_HASWELL(dev)) /* XXX HSW:ULX */
  
  #define INTEL_PCH_DEVICE_ID_MASK              0xff00
  #define INTEL_PCH_IBX_DEVICE_ID_TYPE          0x3b00
@@@ -1933,6 -1908,7 +1934,6 @@@ extern void intel_pm_init(struct drm_de
  extern void intel_uncore_sanitize(struct drm_device *dev);
  extern void intel_uncore_early_sanitize(struct drm_device *dev);
  extern void intel_uncore_init(struct drm_device *dev);
 -extern void intel_uncore_clear_errors(struct drm_device *dev);
  extern void intel_uncore_check_errors(struct drm_device *dev);
  extern void intel_uncore_fini(struct drm_device *dev);
  
@@@ -2084,17 -2060,12 +2085,17 @@@ int __must_check i915_gem_check_wedge(s
  static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
  {
        return unlikely(atomic_read(&error->reset_counter)
 -                      & I915_RESET_IN_PROGRESS_FLAG);
 +                      & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
  }
  
  static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
  {
 -      return atomic_read(&error->reset_counter) == I915_WEDGED;
 +      return atomic_read(&error->reset_counter) & I915_WEDGED;
 +}
 +
 +static inline u32 i915_reset_count(struct i915_gpu_error *error)
 +{
 +      return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
  }
  
  void i915_gem_reset(struct drm_device *dev);
@@@ -2206,7 -2177,7 +2207,7 @@@ i915_gem_obj_ggtt_pin(struct drm_i915_g
  }
  
  /* i915_gem_context.c */
 -void i915_gem_context_init(struct drm_device *dev);
 +int __must_check i915_gem_context_init(struct drm_device *dev);
  void i915_gem_context_fini(struct drm_device *dev);
  void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
  int i915_switch_context(struct intel_ring_buffer *ring,
@@@ -2424,8 -2395,6 +2425,8 @@@ extern int intel_enable_rc6(const struc
  extern bool i915_semaphore_is_enabled(struct drm_device *dev);
  int i915_reg_read_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
 +int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
 +                             struct drm_file *file);
  
  /* overlay */
  extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@@ -2441,8 -2410,8 +2442,8 @@@ extern void intel_display_print_error_s
   * must be set to prevent GT core from power down and stale values being
   * returned.
   */
 -void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
 -void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
 +void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
 +void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
  
  int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
  int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@@ -2457,8 -2426,6 +2458,8 @@@ u32 vlv_cck_read(struct drm_i915_privat
  void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
  void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 +u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
 +void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
  void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
@@@ -2468,27 -2435,8 +2469,27 @@@ u32 intel_sbi_read(struct drm_i915_priv
  void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
                     enum intel_sbi_destination destination);
  
 -int vlv_gpu_freq(int ddr_freq, int val);
 -int vlv_freq_opcode(int ddr_freq, int val);
 +int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
 +int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
 +
 +void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
 +void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
 +
 +#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
 +      (((reg) >= 0x2000 && (reg) < 0x4000) ||\
 +      ((reg) >= 0x5000 && (reg) < 0x8000) ||\
 +      ((reg) >= 0xB000 && (reg) < 0x12000) ||\
 +      ((reg) >= 0x2E000 && (reg) < 0x30000))
 +
 +#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
 +      (((reg) >= 0x12000 && (reg) < 0x14000) ||\
 +      ((reg) >= 0x22000 && (reg) < 0x24000) ||\
 +      ((reg) >= 0x30000 && (reg) < 0x40000))
 +
 +#define FORCEWAKE_RENDER      (1 << 0)
 +#define FORCEWAKE_MEDIA               (1 << 1)
 +#define FORCEWAKE_ALL         (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
 +
  
  #define I915_READ8(reg)               dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
  #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
index cc0a63349a9c1fb7f77307eec322e4567233021a,330077bcd0bddb22ecf056766dd88cd7ed70bfaf..86dc6ecd357fdc81b4a6dee856a4f0c9e3315912
@@@ -73,7 -73,7 +73,7 @@@ static const u32 hsw_ddi_translations_h
  };
  
  static const u32 bdw_ddi_translations_edp[] = {
 -      0x00FFFFFF, 0x00000012,         /* DP parameters */
 +      0x00FFFFFF, 0x00000012,         /* eDP parameters */
        0x00EBAFFF, 0x00020011,
        0x00C71FFF, 0x0006000F,
        0x00FFFFFF, 0x00020011,
@@@ -713,6 -713,8 +713,6 @@@ bool intel_ddi_pll_mode_set(struct drm_
        uint32_t reg, val;
        int clock = intel_crtc->config.port_clock;
  
 -      /* TODO: reuse PLLs when possible (compare values) */
 -
        intel_ddi_put_crtc_pll(crtc);
  
        if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
        } else if (type == INTEL_OUTPUT_HDMI) {
                unsigned p, n2, r2;
  
 -              if (plls->wrpll1_refcount == 0) {
 +              intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
 +
 +              val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
 +                    WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
 +                    WRPLL_DIVIDER_POST(p);
 +
 +              if (val == I915_READ(WRPLL_CTL1)) {
 +                      DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
 +                                    pipe_name(pipe));
 +                      reg = WRPLL_CTL1;
 +              } else if (val == I915_READ(WRPLL_CTL2)) {
 +                      DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
 +                                    pipe_name(pipe));
 +                      reg = WRPLL_CTL2;
 +              } else if (plls->wrpll1_refcount == 0) {
                        DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
                                      pipe_name(pipe));
 -                      plls->wrpll1_refcount++;
                        reg = WRPLL_CTL1;
 -                      intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
                } else if (plls->wrpll2_refcount == 0) {
                        DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
                                      pipe_name(pipe));
 -                      plls->wrpll2_refcount++;
                        reg = WRPLL_CTL2;
 -                      intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
                } else {
                        DRM_ERROR("No WRPLLs available!\n");
                        return false;
                }
  
 -              WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
 -                   "WRPLL already enabled\n");
 -
 -              intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
 -
 -              val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
 -                    WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
 -                    WRPLL_DIVIDER_POST(p);
 +              if (reg == WRPLL_CTL1) {
 +                      plls->wrpll1_refcount++;
 +                      intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
 +              } else {
 +                      plls->wrpll2_refcount++;
 +                      intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
 +              }
  
        } else if (type == INTEL_OUTPUT_ANALOG) {
                if (plls->spll_refcount == 0) {
@@@ -1413,6 -1406,26 +1413,26 @@@ void intel_ddi_get_config(struct intel_
        default:
                break;
        }
+       if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
+           pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+               /*
+                * This is a big fat ugly hack.
+                *
+                * Some machines in UEFI boot mode provide us a VBT that has 18
+                * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+                * unknown we fail to light up. Yet the same BIOS boots up with
+                * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+                * max, not what it tells us to use.
+                *
+                * Note: This will still be broken if the eDP panel is not lit
+                * up by the BIOS, and thus we can't get the mode at module
+                * load.
+                */
+               DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+                             pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+               dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+       }
  }
  
  static void intel_ddi_destroy(struct drm_encoder *encoder)
index 1f7af63b31df17788a15020c66507e6770a0b06e,7ec8b488bb1d30b6b950a23eecc833b574ab4db0..596ad09f0e5157b83ae1bf026cf26d82fed848c5
@@@ -329,8 -329,6 +329,8 @@@ static void vlv_clock(int refclk, intel
  {
        clock->m = clock->m1 * clock->m2;
        clock->p = clock->p1 * clock->p2;
 +      if (WARN_ON(clock->n == 0 || clock->p == 0))
 +              return;
        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  }
@@@ -432,8 -430,6 +432,8 @@@ static void pineview_clock(int refclk, 
  {
        clock->m = clock->m2 + 2;
        clock->p = clock->p1 * clock->p2;
 +      if (WARN_ON(clock->n == 0 || clock->p == 0))
 +              return;
        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  }
@@@ -447,8 -443,6 +447,8 @@@ static void i9xx_clock(int refclk, inte
  {
        clock->m = i9xx_dpll_compute_m(clock);
        clock->p = clock->p1 * clock->p2;
 +      if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
 +              return;
        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  }
@@@ -754,10 -748,10 +754,10 @@@ enum transcoder intel_pipe_to_cpu_trans
        return intel_crtc->config.cpu_transcoder;
  }
  
 -static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
 +static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 frame, frame_reg = PIPEFRAME(pipe);
 +      u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
  
        frame = I915_READ(frame_reg);
  
@@@ -778,8 -772,8 +778,8 @@@ void intel_wait_for_vblank(struct drm_d
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipestat_reg = PIPESTAT(pipe);
  
 -      if (INTEL_INFO(dev)->gen >= 5) {
 -              ironlake_wait_for_vblank(dev, pipe);
 +      if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
 +              g4x_wait_for_vblank(dev, pipe);
                return;
        }
  
@@@ -1367,7 -1361,6 +1367,7 @@@ static void intel_init_dpio(struct drm_
        if (!IS_VALLEYVIEW(dev))
                return;
  
 +      DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
        /*
         * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
         *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
@@@ -1501,25 -1494,18 +1501,25 @@@ static void vlv_disable_pll(struct drm_
        POSTING_READ(DPLL(pipe));
  }
  
 -void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
 +void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
 +              struct intel_digital_port *dport)
  {
        u32 port_mask;
  
 -      if (!port)
 +      switch (dport->port) {
 +      case PORT_B:
                port_mask = DPLL_PORTB_READY_MASK;
 -      else
 +              break;
 +      case PORT_C:
                port_mask = DPLL_PORTC_READY_MASK;
 +              break;
 +      default:
 +              BUG();
 +      }
  
        if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
                WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
 -                   'B' + port, I915_READ(DPLL(0)));
 +                   port_name(dport->port), I915_READ(DPLL(0)));
  }
  
  /**
@@@ -2247,12 -2233,7 +2247,12 @@@ void intel_display_handle_reset(struct 
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  
                mutex_lock(&crtc->mutex);
 -              if (intel_crtc->active)
 +              /*
 +               * FIXME: Once we have proper support for primary planes (and
 +               * disabling them without disabling the entire crtc) allow again
 +               * a NULL crtc->fb.
 +               */
 +              if (intel_crtc->active && crtc->fb)
                        dev_priv->display.update_plane(crtc, crtc->fb,
                                                       crtc->x, crtc->y);
                mutex_unlock(&crtc->mutex);
@@@ -3929,174 -3910,6 +3929,174 @@@ static void i9xx_pfit_enable(struct int
        I915_WRITE(BCLRPAT(crtc->pipe), 0);
  }
  
 +int valleyview_get_vco(struct drm_i915_private *dev_priv)
 +{
 +      int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
 +
 +      /* Obtain SKU information */
 +      mutex_lock(&dev_priv->dpio_lock);
 +      hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
 +              CCK_FUSE_HPLL_FREQ_MASK;
 +      mutex_unlock(&dev_priv->dpio_lock);
 +
 +      return vco_freq[hpll_freq];
 +}
 +
 +/* Adjust CDclk dividers to allow high res or save power if possible */
 +static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      u32 val, cmd;
 +
 +      if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
 +              cmd = 2;
 +      else if (cdclk == 266)
 +              cmd = 1;
 +      else
 +              cmd = 0;
 +
 +      mutex_lock(&dev_priv->rps.hw_lock);
 +      val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
 +      val &= ~DSPFREQGUAR_MASK;
 +      val |= (cmd << DSPFREQGUAR_SHIFT);
 +      vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
 +      if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
 +                    DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
 +                   50)) {
 +              DRM_ERROR("timed out waiting for CDclk change\n");
 +      }
 +      mutex_unlock(&dev_priv->rps.hw_lock);
 +
 +      if (cdclk == 400) {
 +              u32 divider, vco;
 +
 +              vco = valleyview_get_vco(dev_priv);
 +              divider = ((vco << 1) / cdclk) - 1;
 +
 +              mutex_lock(&dev_priv->dpio_lock);
 +              /* adjust cdclk divider */
 +              val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
 +              val &= ~0xf;
 +              val |= divider;
 +              vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
 +              mutex_unlock(&dev_priv->dpio_lock);
 +      }
 +
 +      mutex_lock(&dev_priv->dpio_lock);
 +      /* adjust self-refresh exit latency value */
 +      val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
 +      val &= ~0x7f;
 +
 +      /*
 +       * For high bandwidth configs, we set a higher latency in the bunit
 +       * so that the core display fetch happens in time to avoid underruns.
 +       */
 +      if (cdclk == 400)
 +              val |= 4500 / 250; /* 4.5 usec */
 +      else
 +              val |= 3000 / 250; /* 3.0 usec */
 +      vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
 +      mutex_unlock(&dev_priv->dpio_lock);
 +
 +      /* Since we changed the CDclk, we need to update the GMBUSFREQ too */
 +      intel_i2c_reset(dev);
 +}
 +
 +static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
 +{
 +      int cur_cdclk, vco;
 +      int divider;
 +
 +      vco = valleyview_get_vco(dev_priv);
 +
 +      mutex_lock(&dev_priv->dpio_lock);
 +      divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
 +      mutex_unlock(&dev_priv->dpio_lock);
 +
 +      divider &= 0xf;
 +
 +      cur_cdclk = (vco << 1) / (divider + 1);
 +
 +      return cur_cdclk;
 +}
 +
 +static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
 +                               int max_pixclk)
 +{
 +      int cur_cdclk;
 +
 +      cur_cdclk = valleyview_cur_cdclk(dev_priv);
 +
 +      /*
 +       * Really only a few cases to deal with, as only 4 CDclks are supported:
 +       *   200MHz
 +       *   267MHz
 +       *   320MHz
 +       *   400MHz
 +       * So we check to see whether we're above 90% of the lower bin and
 +       * adjust if needed.
 +       */
 +      if (max_pixclk > 288000) {
 +              return 400;
 +      } else if (max_pixclk > 240000) {
 +              return 320;
 +      } else
 +              return 266;
 +      /* Looks like the 200MHz CDclk freq doesn't work on some configs */
 +}
 +
 +static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
 +                               unsigned modeset_pipes,
 +                               struct intel_crtc_config *pipe_config)
 +{
 +      struct drm_device *dev = dev_priv->dev;
 +      struct intel_crtc *intel_crtc;
 +      int max_pixclk = 0;
 +
 +      list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
 +                          base.head) {
 +              if (modeset_pipes & (1 << intel_crtc->pipe))
 +                      max_pixclk = max(max_pixclk,
 +                                       pipe_config->adjusted_mode.crtc_clock);
 +              else if (intel_crtc->base.enabled)
 +                      max_pixclk = max(max_pixclk,
 +                                       intel_crtc->config.adjusted_mode.crtc_clock);
 +      }
 +
 +      return max_pixclk;
 +}
 +
 +static void valleyview_modeset_global_pipes(struct drm_device *dev,
 +                                          unsigned *prepare_pipes,
 +                                          unsigned modeset_pipes,
 +                                          struct intel_crtc_config *pipe_config)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc;
 +      int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
 +                                             pipe_config);
 +      int cur_cdclk = valleyview_cur_cdclk(dev_priv);
 +
 +      if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
 +              return;
 +
 +      list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
 +                          base.head)
 +              if (intel_crtc->base.enabled)
 +                      *prepare_pipes |= (1 << intel_crtc->pipe);
 +}
 +
 +static void valleyview_modeset_global_resources(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
 +      int cur_cdclk = valleyview_cur_cdclk(dev_priv);
 +      int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
 +
 +      if (req_cdclk != cur_cdclk)
 +              valleyview_set_cdclk(dev, req_cdclk);
 +}
 +
  static void valleyview_crtc_enable(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
@@@ -4821,24 -4634,24 +4821,24 @@@ static void vlv_pllb_recal_opamp(struc
         * PLLB opamp always calibrates to max value of 0x3f, force enable it
         * and set it to a reasonable value instead.
         */
 -      reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
 +      reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
        reg_val &= 0xffffff00;
        reg_val |= 0x00000030;
 -      vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  
 -      reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
 +      reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
        reg_val &= 0x8cffffff;
        reg_val = 0x8c000000;
 -      vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
 +      vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  
 -      reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
 +      reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
        reg_val &= 0xffffff00;
 -      vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  
 -      reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
 +      reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
        reg_val &= 0x00ffffff;
        reg_val |= 0xb0000000;
 -      vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
 +      vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  }
  
  static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@@ -4907,15 -4720,15 +4907,15 @@@ static void vlv_update_pll(struct intel
                vlv_pllb_recal_opamp(dev_priv, pipe);
  
        /* Set up Tx target for periodic Rcomp update */
 -      vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
  
        /* Disable target IRef on PLL */
 -      reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
 +      reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
        reg_val &= 0x00ffffff;
 -      vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
  
        /* Disable fast lock */
 -      vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
 +      vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
  
        /* Set idtafcrecal before PLL is enabled */
        mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
         * Note: don't use the DAC post divider as it seems unstable.
         */
        mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  
        mdiv |= DPIO_ENABLE_CALIBRATION;
 -      vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  
        /* Set HBR and RBR LPF coefficients */
        if (crtc->config.port_clock == 162000 ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
 -              vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
 +              vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x009f0003);
        else
 -              vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
 +              vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x00d0000f);
  
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
                /* Use SSC source */
                if (!pipe)
 -                      vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
 +                      vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
                                         0x0df40000);
                else
 -                      vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
 +                      vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
                                         0x0df70000);
        } else { /* HDMI or VGA */
                /* Use bend source */
                if (!pipe)
 -                      vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
 +                      vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
                                         0x0df70000);
                else
 -                      vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
 +                      vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
                                         0x0df40000);
        }
  
 -      coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
 +      coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
        coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
                coreclk |= 0x01000000;
 -      vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
  
 -      vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
  
        /* Enable DPIO clock input */
        dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
@@@ -5448,7 -5261,7 +5448,7 @@@ static void vlv_crtc_clock_get(struct i
        int refclk = 100000;
  
        mutex_lock(&dev_priv->dpio_lock);
 -      mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
 +      mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
        mutex_unlock(&dev_priv->dpio_lock);
  
        clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
@@@ -6490,7 -6303,7 +6490,7 @@@ static void assert_can_disable_lcpll(st
        uint32_t val;
  
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
 -              WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
 +              WARN(crtc->active, "CRTC for pipe %c enabled\n",
                     pipe_name(crtc->pipe));
  
        WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
@@@ -6589,7 -6402,7 +6589,7 @@@ static void hsw_restore_lcpll(struct dr
  
        /* Make sure we're not on PC8 state before disabling PC8, otherwise
         * we'll hang the machine! */
 -      dev_priv->uncore.funcs.force_wake_get(dev_priv);
 +      dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
  
        if (val & LCPLL_POWER_DOWN_ALLOW) {
                val &= ~LCPLL_POWER_DOWN_ALLOW;
                        DRM_ERROR("Switching back to LCPLL failed\n");
        }
  
 -      dev_priv->uncore.funcs.force_wake_put(dev_priv);
 +      dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
  }
  
  void hsw_enable_pc8_work(struct work_struct *__work)
@@@ -6705,6 -6518,9 +6705,9 @@@ static void __hsw_disable_package_c8(st
  
  void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
  {
+       if (!HAS_PC8(dev_priv->dev))
+               return;
        mutex_lock(&dev_priv->pc8.lock);
        __hsw_enable_package_c8(dev_priv);
        mutex_unlock(&dev_priv->pc8.lock);
  
  void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
  {
+       if (!HAS_PC8(dev_priv->dev))
+               return;
        mutex_lock(&dev_priv->pc8.lock);
        __hsw_disable_package_c8(dev_priv);
        mutex_unlock(&dev_priv->pc8.lock);
@@@ -6749,6 -6568,9 +6755,9 @@@ static void hsw_update_package_c8(struc
        struct drm_i915_private *dev_priv = dev->dev_private;
        bool allow;
  
+       if (!HAS_PC8(dev_priv->dev))
+               return;
        if (!i915_enable_pc8)
                return;
  
@@@ -6772,6 -6594,9 +6781,9 @@@ done
  
  static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
  {
+       if (!HAS_PC8(dev_priv->dev))
+               return;
        mutex_lock(&dev_priv->pc8.lock);
        if (!dev_priv->pc8.gpu_idle) {
                dev_priv->pc8.gpu_idle = true;
  
  static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
  {
+       if (!HAS_PC8(dev_priv->dev))
+               return;
        mutex_lock(&dev_priv->pc8.lock);
        if (dev_priv->pc8.gpu_idle) {
                dev_priv->pc8.gpu_idle = false;
@@@ -7375,7 -7203,9 +7390,9 @@@ static void i9xx_update_cursor(struct d
                intel_crtc->cursor_visible = visible;
        }
        /* and commit changes on next vblank */
+       POSTING_READ(CURCNTR(pipe));
        I915_WRITE(CURBASE(pipe), base);
+       POSTING_READ(CURBASE(pipe));
  }
  
  static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
                intel_crtc->cursor_visible = visible;
        }
        /* and commit changes on next vblank */
+       POSTING_READ(CURCNTR_IVB(pipe));
        I915_WRITE(CURBASE_IVB(pipe), base);
+       POSTING_READ(CURBASE_IVB(pipe));
  }
  
  /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@@ -9439,8 -9271,7 +9458,7 @@@ check_crtc_state(struct drm_device *dev
                        enum pipe pipe;
                        if (encoder->base.crtc != &crtc->base)
                                continue;
-                       if (encoder->get_config &&
-                           encoder->get_hw_state(encoder, &pipe))
+                       if (encoder->get_hw_state(encoder, &pipe))
                                encoder->get_config(encoder, &pipe_config);
                }
  
@@@ -9570,21 -9401,6 +9588,21 @@@ static int __intel_set_mode(struct drm_
                                       "[modeset]");
        }
  
 +      /*
 +       * See if the config requires any additional preparation, e.g.
 +       * to adjust global state with pipes off.  We need to do this
 +       * here so we can get the modeset_pipe updated config for the new
 +       * mode set on this crtc.  For other crtcs we need to use the
 +       * adjusted_mode bits in the crtc directly.
 +       */
 +      if (IS_VALLEYVIEW(dev)) {
 +              valleyview_modeset_global_pipes(dev, &prepare_pipes,
 +                                              modeset_pipes, pipe_config);
 +
 +              /* may have added more to prepare_pipes than we should */
 +              prepare_pipes &= ~disable_pipes;
 +      }
 +
        for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
                intel_crtc_disable(&intel_crtc->base);
  
@@@ -10109,13 -9925,10 +10127,13 @@@ static void intel_crtc_init(struct drm_
                intel_crtc->lut_b[i] = i;
        }
  
 -      /* Swap pipes & planes for FBC on pre-965 */
 +      /*
 +       * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
 +       * is hooked to plane B. Hence we want plane A feeding pipe B.
 +       */
        intel_crtc->pipe = pipe;
        intel_crtc->plane = pipe;
 -      if (IS_MOBILE(dev) && IS_GEN3(dev)) {
 +      if (IS_MOBILE(dev) && INTEL_INFO(dev)->gen < 4) {
                DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
                intel_crtc->plane = !pipe;
        }
@@@ -10599,11 -10412,8 +10617,11 @@@ static void intel_init_display(struct d
                }
        } else if (IS_G4X(dev)) {
                dev_priv->display.write_eld = g4x_write_eld;
 -      } else if (IS_VALLEYVIEW(dev))
 +      } else if (IS_VALLEYVIEW(dev)) {
 +              dev_priv->display.modeset_global_resources =
 +                      valleyview_modeset_global_resources;
                dev_priv->display.write_eld = ironlake_write_eld;
 +      }
  
        /* Default just returns -ENODEV to indicate unsupported */
        dev_priv->display.queue_flip = intel_default_queue_flip;
                dev_priv->display.queue_flip = intel_gen7_queue_flip;
                break;
        }
 +
 +      intel_panel_init_backlight_funcs(dev);
  }
  
  /*
@@@ -10668,6 -10476,17 +10686,6 @@@ static void quirk_invert_brightness(str
        DRM_INFO("applying inverted panel brightness quirk\n");
  }
  
 -/*
 - * Some machines (Dell XPS13) suffer broken backlight controls if
 - * BLM_PCH_PWM_ENABLE is set.
 - */
 -static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
 -      DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
 -}
 -
  struct intel_quirk {
        int device;
        int subsystem_vendor;
@@@ -10727,6 -10546,11 +10745,6 @@@ static struct intel_quirk intel_quirks[
         * seem to use inverted backlight PWM.
         */
        { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
 -
 -      /* Dell XPS13 HD Sandy Bridge */
 -      { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
 -      /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
 -      { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
  };
  
  static void intel_init_quirks(struct drm_device *dev)
@@@ -11046,7 -10870,7 +11064,7 @@@ void i915_redisable_vga(struct drm_devi
         * level, just check if the power well is enabled instead of trying to
         * follow the "don't touch the power well if we don't need it" policy
         * the rest of the driver uses. */
 -      if (HAS_POWER_WELL(dev) &&
 +      if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
            (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
                return;
  
@@@ -11107,8 -10931,7 +11125,7 @@@ static void intel_modeset_readout_hw_st
                if (encoder->get_hw_state(encoder, &pipe)) {
                        crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
                        encoder->base.crtc = &crtc->base;
-                       if (encoder->get_config)
-                               encoder->get_config(encoder, &crtc->config);
+                       encoder->get_config(encoder, &crtc->config);
                } else {
                        encoder->base.crtc = NULL;
                }
@@@ -11268,11 -11091,12 +11285,11 @@@ void intel_modeset_cleanup(struct drm_d
        /* flush any delayed tasks or pending work */
        flush_scheduled_work();
  
 -      /* destroy backlight, if any, before the connectors */
 -      intel_panel_destroy_backlight(dev);
 -
 -      /* destroy the sysfs files before encoders/connectors */
 -      list_for_each_entry(connector, &dev->mode_config.connector_list, head)
 +      /* destroy the backlight and sysfs files before encoders/connectors */
 +      list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 +              intel_panel_destroy_backlight(connector);
                drm_sysfs_connector_remove(connector);
 +      }
  
        drm_mode_config_cleanup(dev);
  
@@@ -11326,7 -11150,6 +11343,7 @@@ struct intel_display_error_state 
        } cursor[I915_MAX_PIPES];
  
        struct intel_pipe_error_state {
 +              bool power_domain_on;
                u32 source;
        } pipe[I915_MAX_PIPES];
  
        } plane[I915_MAX_PIPES];
  
        struct intel_transcoder_error_state {
 +              bool power_domain_on;
                enum transcoder cpu_transcoder;
  
                u32 conf;
@@@ -11375,13 -11197,11 +11392,13 @@@ intel_display_capture_error_state(struc
        if (error == NULL)
                return NULL;
  
 -      if (HAS_POWER_WELL(dev))
 +      if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
  
        for_each_pipe(i) {
 -              if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
 +              error->pipe[i].power_domain_on =
 +                      intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
 +              if (!error->pipe[i].power_domain_on)
                        continue;
  
                if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
        for (i = 0; i < error->num_transcoders; i++) {
                enum transcoder cpu_transcoder = transcoders[i];
  
 -              if (!intel_display_power_enabled(dev,
 -                              POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
 +              error->transcoder[i].power_domain_on =
 +                      intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
 +              if (!error->transcoder[i].power_domain_on)
                        continue;
  
                error->transcoder[i].cpu_transcoder = cpu_transcoder;
@@@ -11449,13 -11268,11 +11466,13 @@@ intel_display_print_error_state(struct 
                return;
  
        err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
 -      if (HAS_POWER_WELL(dev))
 +      if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                err_printf(m, "PWR_WELL_CTL2: %08x\n",
                           error->power_well_driver);
        for_each_pipe(i) {
                err_printf(m, "Pipe [%d]:\n", i);
 +              err_printf(m, "  Power: %s\n",
 +                         error->pipe[i].power_domain_on ? "on" : "off");
                err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
  
                err_printf(m, "Plane [%d]:\n", i);
        for (i = 0; i < error->num_transcoders; i++) {
                err_printf(m, "CPU transcoder: %c\n",
                           transcoder_name(error->transcoder[i].cpu_transcoder));
 +              err_printf(m, "  Power: %s\n",
 +                         error->transcoder[i].power_domain_on ? "on" : "off");
                err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
                err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
                err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
index d45b311c1ab6f926bef77df109c1ac6a7f4b60a9,0b2e842fef0151070b09af535d54fdcee2215602..f3b17b11b2701b839f3b0ba71bab19889900b157
@@@ -142,7 -142,7 +142,7 @@@ intel_dp_max_data_rate(int max_link_clo
        return (max_link_clock * max_lanes * 8) / 10;
  }
  
 -static int
 +static enum drm_mode_status
  intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
  {
@@@ -404,7 -404,7 +404,7 @@@ intel_dp_aux_ch(struct intel_dp *intel_
        int i, ret, recv_bytes;
        uint32_t status;
        int try, precharge, clock = 0;
 -      bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
 +      bool has_aux_irq = true;
        uint32_t timeout;
  
        /* dp aux is extremely sensitive to irq latency, hence request the
@@@ -1037,8 -1037,6 +1037,8 @@@ static void ironlake_wait_panel_status(
                                I915_READ(pp_stat_reg),
                                I915_READ(pp_ctrl_reg));
        }
 +
 +      DRM_DEBUG_KMS("Wait complete\n");
  }
  
  static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
@@@ -1776,7 -1774,7 +1776,7 @@@ static void intel_disable_dp(struct int
         * ensure that we have vdd while we switch off the panel. */
        ironlake_edp_panel_vdd_on(intel_dp);
        ironlake_edp_backlight_off(intel_dp);
-       intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+       intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
        ironlake_edp_panel_off(intel_dp);
  
        /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
@@@ -1847,23 -1845,23 +1847,23 @@@ static void vlv_pre_enable_dp(struct in
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
 -      int port = vlv_dport_to_channel(dport);
 +      enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
        struct edp_power_seq power_seq;
        u32 val;
  
        mutex_lock(&dev_priv->dpio_lock);
  
 -      val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
 +      val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
        val = 0;
        if (pipe)
                val |= (1<<21);
        else
                val &= ~(1<<21);
        val |= 0x001000c4;
 -      vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
  
        mutex_unlock(&dev_priv->dpio_lock);
  
  
        intel_enable_dp(encoder);
  
 -      vlv_wait_port_ready(dev_priv, port);
 +      vlv_wait_port_ready(dev_priv, dport);
  }
  
  static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
 -      int port = vlv_dport_to_channel(dport);
 +      enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
  
        /* Program Tx lane resets to default */
        mutex_lock(&dev_priv->dpio_lock);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
 +      vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
                         DPIO_PCS_TX_LANE2_RESET |
                         DPIO_PCS_TX_LANE1_RESET);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
 +      vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
                         DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
                         DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
                         (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
                                 DPIO_PCS_CLK_SOFT_RESET);
  
        /* Fix up inter-pair skew failure */
 -      vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
 +      vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
 +      vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
        mutex_unlock(&dev_priv->dpio_lock);
  }
  
@@@ -1943,6 -1941,18 +1943,6 @@@ intel_dp_get_link_status(struct intel_d
                                              DP_LINK_STATUS_SIZE);
  }
  
 -#if 0
 -static char   *voltage_names[] = {
 -      "0.4V", "0.6V", "0.8V", "1.2V"
 -};
 -static char   *pre_emph_names[] = {
 -      "0dB", "3.5dB", "6dB", "9.5dB"
 -};
 -static char   *link_train_names[] = {
 -      "pattern 1", "pattern 2", "idle", "off"
 -};
 -#endif
 -
  /*
   * These are source-specific values; current Intel hardware supports
   * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
@@@ -2040,7 -2050,7 +2040,7 @@@ static uint32_t intel_vlv_signal_levels
        unsigned long demph_reg_value, preemph_reg_value,
                uniqtranscale_reg_value;
        uint8_t train_set = intel_dp->train_set[0];
 -      int port = vlv_dport_to_channel(dport);
 +      enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
  
        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
        }
  
        mutex_lock(&dev_priv->dpio_lock);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
 +      vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
 +      vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
 +      vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
                         uniqtranscale_reg_value);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
 -      vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
 +      vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
 +      vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
 +      vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
        mutex_unlock(&dev_priv->dpio_lock);
  
        return 0;
index 853d13ea055f15dd205f726c30d4e6b444d1ff51,6d69a9bad86545c6a8cfc8e8ff86480d462c2132..3da259e280bad48696d0bbc905a2835c258d1b38
@@@ -64,7 -64,7 +64,7 @@@ struct opregion_header 
        u8 driver_ver[16];
        u32 mboxes;
        u8 reserved[164];
 -} __attribute__((packed));
 +} __packed;
  
  /* OpRegion mailbox #1: public ACPI methods */
  struct opregion_acpi {
@@@ -86,7 -86,7 +86,7 @@@
        u32 cnot;       /* current OS notification */
        u32 nrdy;       /* driver status */
        u8 rsvd2[60];
 -} __attribute__((packed));
 +} __packed;
  
  /* OpRegion mailbox #2: SWSCI */
  struct opregion_swsci {
@@@ -94,7 -94,7 +94,7 @@@
        u32 parm;       /* command parameters */
        u32 dslp;       /* driver sleep time-out */
        u8 rsvd[244];
 -} __attribute__((packed));
 +} __packed;
  
  /* OpRegion mailbox #3: ASLE */
  struct opregion_asle {
        u32 srot;       /* supported rotation angles */
        u32 iuer;       /* IUER events */
        u8 rsvd[86];
 -} __attribute__((packed));
 +} __packed;
  
  /* Driver readiness indicator */
  #define ASLE_ARDY_READY               (1 << 0)
@@@ -396,10 -396,13 +396,10 @@@ int intel_opregion_notify_adapter(struc
  static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_encoder *encoder;
        struct drm_connector *connector;
 -      struct intel_connector *intel_connector = NULL;
 -      struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
 +      struct intel_connector *intel_connector;
 +      struct intel_panel *panel;
        struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
 -      u32 ret = 0;
 -      bool found = false;
  
        DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
  
                return ASLC_BACKLIGHT_FAILED;
  
        mutex_lock(&dev->mode_config.mutex);
 +
        /*
 -       * Could match the OpRegion connector here instead, but we'd also need
 -       * to verify the connector could handle a backlight call.
 +       * Update backlight on all connectors that support backlight (usually
 +       * only one).
         */
 -      list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
 -              if (encoder->crtc == crtc) {
 -                      found = true;
 -                      break;
 -              }
 -
 -      if (!found) {
 -              ret = ASLC_BACKLIGHT_FAILED;
 -              goto out;
 -      }
 -
 -      list_for_each_entry(connector, &dev->mode_config.connector_list, head)
 -              if (connector->encoder == encoder)
 -                      intel_connector = to_intel_connector(connector);
 -
 -      if (!intel_connector) {
 -              ret = ASLC_BACKLIGHT_FAILED;
 -              goto out;
 -      }
 -
        DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
 -      intel_panel_set_backlight(intel_connector, bclp, 255);
 +      list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 +              intel_connector = to_intel_connector(connector);
 +              panel = &intel_connector->panel;
 +              if (panel->backlight.present)
 +                      intel_panel_set_backlight(intel_connector, bclp, 255);
 +      }
        iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
  
 -out:
        mutex_unlock(&dev->mode_config.mutex);
  
 -      return ret;
 +
 +      return 0;
  }
  
  static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
@@@ -621,7 -638,7 +621,7 @@@ static void intel_didl_outputs(struct d
        u32 temp;
        int i = 0;
  
-       handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+       handle = ACPI_HANDLE(&dev->pdev->dev);
        if (!handle || acpi_bus_get_device(handle, &acpi_dev))
                return;
  
index ac4a74a41344cf8f09747952c64a95dbc1c21afc,caf2ee4e5441426527234453dbb43ed4c4c7273f..41b6e080e3622a0636a67d95b4d21f027a767ab7
@@@ -191,11 -191,7 +191,11 @@@ static void sandybridge_blit_fbc_update
        u32 blt_ecoskpd;
  
        /* Make sure blitter notifies FBC of writes */
 -      gen6_gt_force_wake_get(dev_priv);
 +
 +      /* Blitter is part of Media powerwell on VLV. No impact of
 +       * his param in other platforms for now */
 +      gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
 +
        blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
        blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
                GEN6_BLITTER_LOCK_SHIFT;
                         GEN6_BLITTER_LOCK_SHIFT);
        I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
        POSTING_READ(GEN6_BLITTER_ECOSKPD);
 -      gen6_gt_force_wake_put(dev_priv);
 +
 +      gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
  }
  
  static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
        /* Set persistent mode for front-buffer rendering, ala X. */
        dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
 -      dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
 +      dpfc_ctl |= DPFC_CTL_FENCE_EN;
 +      if (IS_GEN5(dev))
 +              dpfc_ctl |= obj->fence_reg;
        I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
  
        I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
@@@ -302,7 -295,7 +302,7 @@@ static void gen7_enable_fbc(struct drm_
  
        sandybridge_blit_fbc_update(dev);
  
 -      DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
 +      DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
  }
  
  bool intel_fbc_enabled(struct drm_device *dev)
@@@ -537,10 -530,10 +537,10 @@@ void intel_update_fbc(struct drm_devic
                        DRM_DEBUG_KMS("mode too large for compression, disabling\n");
                goto out_disable;
        }
 -      if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
 -          intel_crtc->plane != 0) {
 +      if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
 +          intel_crtc->plane != PLANE_A) {
                if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
 -                      DRM_DEBUG_KMS("plane not 0, disabling compression\n");
 +                      DRM_DEBUG_KMS("plane not A, disabling compression\n");
                goto out_disable;
        }
  
@@@ -1632,7 -1625,7 +1632,7 @@@ static void i9xx_update_wm(struct drm_c
                        &to_intel_crtc(enabled)->config.adjusted_mode;
                int clock = adjusted_mode->crtc_clock;
                int htotal = adjusted_mode->htotal;
-               int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+               int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
                int pixel_size = enabled->fb->bits_per_pixel / 8;
                unsigned long line_time_us;
                int entries;
@@@ -3436,19 -3429,26 +3436,19 @@@ static void ironlake_disable_drps(struc
   * ourselves, instead of doing a rmw cycle (which might result in us clearing
   * all limits and the gpu stuck at whatever frequency it is at atm).
   */
 -static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
 +static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
  {
        u32 limits;
  
 -      limits = 0;
 -
 -      if (*val >= dev_priv->rps.max_delay)
 -              *val = dev_priv->rps.max_delay;
 -      limits |= dev_priv->rps.max_delay << 24;
 -
        /* Only set the down limit when we've reached the lowest level to avoid
         * getting more interrupts, otherwise leave this clear. This prevents a
         * race in the hw when coming out of rc6: There's a tiny window where
         * the hw runs at the minimal clock before selecting the desired
         * frequency, if the down threshold expires in that window we will not
         * receive a down interrupt. */
 -      if (*val <= dev_priv->rps.min_delay) {
 -              *val = dev_priv->rps.min_delay;
 +      limits = dev_priv->rps.max_delay << 24;
 +      if (val <= dev_priv->rps.min_delay)
                limits |= dev_priv->rps.min_delay << 16;
 -      }
  
        return limits;
  }
@@@ -3548,6 -3548,7 +3548,6 @@@ static void gen6_set_rps_thresholds(str
  void gen6_set_rps(struct drm_device *dev, u8 val)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 limits = gen6_rps_limits(dev_priv, &val);
  
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
        WARN_ON(val > dev_priv->rps.max_delay);
        /* Make sure we continue to get interrupts
         * until we hit the minimum or maximum frequencies.
         */
 -      I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
 +      I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
 +                 gen6_rps_limits(dev_priv, val));
  
        POSTING_READ(GEN6_RPNSWREQ);
  
@@@ -3606,18 -3606,48 +3606,18 @@@ void gen6_rps_boost(struct drm_i915_pri
        mutex_unlock(&dev_priv->rps.hw_lock);
  }
  
 -/*
 - * Wait until the previous freq change has completed,
 - * or the timeout elapsed, and then update our notion
 - * of the current GPU frequency.
 - */
 -static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
 -{
 -      u32 pval;
 -
 -      WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 -
 -      if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
 -              DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
 -
 -      pval >>= 8;
 -
 -      if (pval != dev_priv->rps.cur_delay)
 -              DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
 -                               vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
 -                               dev_priv->rps.cur_delay,
 -                               vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
 -
 -      dev_priv->rps.cur_delay = pval;
 -}
 -
  void valleyview_set_rps(struct drm_device *dev, u8 val)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
 -      gen6_rps_limits(dev_priv, &val);
 -
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
        WARN_ON(val > dev_priv->rps.max_delay);
        WARN_ON(val < dev_priv->rps.min_delay);
  
 -      vlv_update_rps_cur_delay(dev_priv);
 -
        DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
 -                       vlv_gpu_freq(dev_priv->mem_freq,
 -                                    dev_priv->rps.cur_delay),
 +                       vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
                         dev_priv->rps.cur_delay,
 -                       vlv_gpu_freq(dev_priv->mem_freq, val), val);
 +                       vlv_gpu_freq(dev_priv, val), val);
  
        if (val == dev_priv->rps.cur_delay)
                return;
  
        dev_priv->rps.cur_delay = val;
  
 -      trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
 +      trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
  }
  
  static void gen6_disable_rps_interrupts(struct drm_device *dev)
@@@ -3744,7 -3774,7 +3744,7 @@@ static void gen8_enable_rps(struct drm_
  
        /* 1c & 1d: Get forcewake during program sequence. Although the driver
         * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
 -      gen6_gt_force_wake_get(dev_priv);
 +      gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  
        /* 2a: Disable RC states. */
        I915_WRITE(GEN6_RC_CONTROL, 0);
  
        gen6_enable_rps_interrupts(dev);
  
 -      gen6_gt_force_wake_put(dev_priv);
 +      gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  }
  
  static void gen6_enable_rps(struct drm_device *dev)
                I915_WRITE(GTFIFODBG, gtfifodbg);
        }
  
 -      gen6_gt_force_wake_get(dev_priv);
 +      gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  
        rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
        gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
  
        I915_WRITE(GEN6_RC_SLEEP, 0);
        I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
-       if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
+       if (IS_IVYBRIDGE(dev))
                I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
        else
                I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
                        DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
        }
  
 -      gen6_gt_force_wake_put(dev_priv);
 +      gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  }
  
  void gen6_update_ring_freq(struct drm_device *dev)
@@@ -4085,8 -4115,7 +4085,8 @@@ static void valleyview_enable_rps(struc
  
        valleyview_setup_pctx(dev);
  
 -      gen6_gt_force_wake_get(dev_priv);
 +      /* If VLV, Forcewake all wells, else re-direct to regular path */
 +      gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  
        I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
        I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
        for_each_ring(ring, dev_priv, i)
                I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
  
 -      I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
 +      I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
  
        /* allows RC6 residency counter to work */
        I915_WRITE(VLV_COUNTER_CONTROL,
                                      VLV_MEDIA_RC6_COUNT_EN |
                                      VLV_RENDER_RC6_COUNT_EN));
        if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
 -              rc6_mode = GEN7_RC_CTL_TO_MODE;
 +              rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
  
        intel_print_rc6_info(dev, rc6_mode);
  
        I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
  
        val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 -      switch ((val >> 6) & 3) {
 -      case 0:
 -      case 1:
 -              dev_priv->mem_freq = 800;
 -              break;
 -      case 2:
 -              dev_priv->mem_freq = 1066;
 -              break;
 -      case 3:
 -              dev_priv->mem_freq = 1333;
 -              break;
 -      }
 -      DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
  
        DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
        DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
  
        dev_priv->rps.cur_delay = (val >> 8) & 0xff;
        DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
 -                       vlv_gpu_freq(dev_priv->mem_freq,
 -                                    dev_priv->rps.cur_delay),
 +                       vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
                         dev_priv->rps.cur_delay);
  
        dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
        dev_priv->rps.hw_max = dev_priv->rps.max_delay;
        DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
 -                       vlv_gpu_freq(dev_priv->mem_freq,
 -                                    dev_priv->rps.max_delay),
 +                       vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
                         dev_priv->rps.max_delay);
  
        dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
        DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
 -                       vlv_gpu_freq(dev_priv->mem_freq,
 -                                    dev_priv->rps.rpe_delay),
 +                       vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
                         dev_priv->rps.rpe_delay);
  
        dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
 -                       vlv_gpu_freq(dev_priv->mem_freq,
 -                                    dev_priv->rps.min_delay),
 +                       vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
                         dev_priv->rps.min_delay);
  
        DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
 -                       vlv_gpu_freq(dev_priv->mem_freq,
 -                                    dev_priv->rps.rpe_delay),
 +                       vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
                         dev_priv->rps.rpe_delay);
  
        valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
  
        gen6_enable_rps_interrupts(dev);
  
 -      gen6_gt_force_wake_put(dev_priv);
 +      gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  }
  
  void ironlake_teardown_rc6(struct drm_device *dev)
@@@ -5415,26 -5462,6 +5415,26 @@@ static void ivybridge_init_clock_gating
  static void valleyview_init_clock_gating(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 +      u32 val;
 +
 +      mutex_lock(&dev_priv->rps.hw_lock);
 +      val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 +      mutex_unlock(&dev_priv->rps.hw_lock);
 +      switch ((val >> 6) & 3) {
 +      case 0:
 +              dev_priv->mem_freq = 800;
 +              break;
 +      case 1:
 +              dev_priv->mem_freq = 1066;
 +              break;
 +      case 2:
 +              dev_priv->mem_freq = 1333;
 +              break;
 +      case 3:
 +              dev_priv->mem_freq = 1333;
 +              break;
 +      }
 +      DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
  
        I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
  
@@@ -5614,78 -5641,49 +5614,78 @@@ void intel_suspend_hw(struct drm_devic
                lpt_suspend_hw(dev);
  }
  
 -static bool is_always_on_power_domain(struct drm_device *dev,
 -                                    enum intel_display_power_domain domain)
 -{
 -      unsigned long always_on_domains;
 +#define for_each_power_well(i, power_well, domain_mask, power_domains)        \
 +      for (i = 0;                                                     \
 +           i < (power_domains)->power_well_count &&                   \
 +               ((power_well) = &(power_domains)->power_wells[i]);     \
 +           i++)                                                       \
 +              if ((power_well)->domains & (domain_mask))
  
 -      BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
 -
 -      if (IS_BROADWELL(dev)) {
 -              always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
 -      } else if (IS_HASWELL(dev)) {
 -              always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
 -      } else {
 -              WARN_ON(1);
 -              return true;
 -      }
 -
 -      return BIT(domain) & always_on_domains;
 -}
 +#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
 +      for (i = (power_domains)->power_well_count - 1;                  \
 +           i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
 +           i--)                                                        \
 +              if ((power_well)->domains & (domain_mask))
  
  /**
   * We should only use the power well if we explicitly asked the hardware to
   * enable it, so check if it's enabled and also check if we've requested it to
   * be enabled.
   */
 +static bool hsw_power_well_enabled(struct drm_device *dev,
 +                                 struct i915_power_well *power_well)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      return I915_READ(HSW_PWR_WELL_DRIVER) ==
 +                   (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
 +}
 +
 +bool intel_display_power_enabled_sw(struct drm_device *dev,
 +                                  enum intel_display_power_domain domain)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct i915_power_domains *power_domains;
 +
 +      power_domains = &dev_priv->power_domains;
 +
 +      return power_domains->domain_use_count[domain];
 +}
 +
  bool intel_display_power_enabled(struct drm_device *dev,
                                 enum intel_display_power_domain domain)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct i915_power_domains *power_domains;
 +      struct i915_power_well *power_well;
 +      bool is_enabled;
 +      int i;
  
 -      if (!HAS_POWER_WELL(dev))
 -              return true;
 +      power_domains = &dev_priv->power_domains;
  
 -      if (is_always_on_power_domain(dev, domain))
 -              return true;
 +      is_enabled = true;
  
 -      return I915_READ(HSW_PWR_WELL_DRIVER) ==
 -                   (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
 +      mutex_lock(&power_domains->lock);
 +      for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
 +              if (power_well->always_on)
 +                      continue;
 +
 +              if (!power_well->is_enabled(dev, power_well)) {
 +                      is_enabled = false;
 +                      break;
 +              }
 +      }
 +      mutex_unlock(&power_domains->lock);
 +
 +      return is_enabled;
  }
  
 -static void __intel_set_power_well(struct drm_device *dev, bool enable)
 +static void hsw_set_power_well(struct drm_device *dev,
 +                             struct i915_power_well *power_well, bool enable)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        bool is_enabled, enable_requested;
 +      unsigned long irqflags;
        uint32_t tmp;
  
        tmp = I915_READ(HSW_PWR_WELL_DRIVER);
                                      HSW_PWR_WELL_STATE_ENABLED), 20))
                                DRM_ERROR("Timeout enabling power well\n");
                }
 +
 +              if (IS_BROADWELL(dev)) {
 +                      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +                      I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
 +                                 dev_priv->de_irq_mask[PIPE_B]);
 +                      I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
 +                                 ~dev_priv->de_irq_mask[PIPE_B] |
 +                                 GEN8_PIPE_VBLANK);
 +                      I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
 +                                 dev_priv->de_irq_mask[PIPE_C]);
 +                      I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
 +                                 ~dev_priv->de_irq_mask[PIPE_C] |
 +                                 GEN8_PIPE_VBLANK);
 +                      POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
 +                      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +              }
        } else {
                if (enable_requested) {
 -                      unsigned long irqflags;
                        enum pipe p;
  
                        I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
  static void __intel_power_well_get(struct drm_device *dev,
                                   struct i915_power_well *power_well)
  {
 -      if (!power_well->count++)
 -              __intel_set_power_well(dev, true);
 +      if (!power_well->count++ && power_well->set)
 +              power_well->set(dev, power_well, true);
  }
  
  static void __intel_power_well_put(struct drm_device *dev,
                                   struct i915_power_well *power_well)
  {
        WARN_ON(!power_well->count);
 -      if (!--power_well->count && i915_disable_power_well)
 -              __intel_set_power_well(dev, false);
 +
 +      if (!--power_well->count && power_well->set && i915_disable_power_well)
 +              power_well->set(dev, power_well, false);
  }
  
  void intel_display_power_get(struct drm_device *dev,
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
 -
 -      if (!HAS_POWER_WELL(dev))
 -              return;
 -
 -      if (is_always_on_power_domain(dev, domain))
 -              return;
 +      struct i915_power_well *power_well;
 +      int i;
  
        power_domains = &dev_priv->power_domains;
  
        mutex_lock(&power_domains->lock);
 -      __intel_power_well_get(dev, &power_domains->power_wells[0]);
 +
 +      for_each_power_well(i, power_well, BIT(domain), power_domains)
 +              __intel_power_well_get(dev, power_well);
 +
 +      power_domains->domain_use_count[domain]++;
 +
        mutex_unlock(&power_domains->lock);
  }
  
@@@ -5785,19 -5766,17 +5785,19 @@@ void intel_display_power_put(struct drm
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
 -
 -      if (!HAS_POWER_WELL(dev))
 -              return;
 -
 -      if (is_always_on_power_domain(dev, domain))
 -              return;
 +      struct i915_power_well *power_well;
 +      int i;
  
        power_domains = &dev_priv->power_domains;
  
        mutex_lock(&power_domains->lock);
 -      __intel_power_well_put(dev, &power_domains->power_wells[0]);
 +
 +      WARN_ON(!power_domains->domain_use_count[domain]);
 +      power_domains->domain_use_count[domain]--;
 +
 +      for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
 +              __intel_power_well_put(dev, power_well);
 +
        mutex_unlock(&power_domains->lock);
  }
  
@@@ -5813,7 -5792,10 +5813,7 @@@ void i915_request_power_well(void
  
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
 -
 -      mutex_lock(&hsw_pwr->lock);
 -      __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
 -      mutex_unlock(&hsw_pwr->lock);
 +      intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
  }
  EXPORT_SYMBOL_GPL(i915_request_power_well);
  
@@@ -5827,71 -5809,24 +5827,71 @@@ void i915_release_power_well(void
  
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
 -
 -      mutex_lock(&hsw_pwr->lock);
 -      __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
 -      mutex_unlock(&hsw_pwr->lock);
 +      intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
  }
  EXPORT_SYMBOL_GPL(i915_release_power_well);
  
 +static struct i915_power_well i9xx_always_on_power_well[] = {
 +      {
 +              .name = "always-on",
 +              .always_on = 1,
 +              .domains = POWER_DOMAIN_MASK,
 +      },
 +};
 +
 +static struct i915_power_well hsw_power_wells[] = {
 +      {
 +              .name = "always-on",
 +              .always_on = 1,
 +              .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
 +      },
 +      {
 +              .name = "display",
 +              .domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
 +              .is_enabled = hsw_power_well_enabled,
 +              .set = hsw_set_power_well,
 +      },
 +};
 +
 +static struct i915_power_well bdw_power_wells[] = {
 +      {
 +              .name = "always-on",
 +              .always_on = 1,
 +              .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
 +      },
 +      {
 +              .name = "display",
 +              .domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
 +              .is_enabled = hsw_power_well_enabled,
 +              .set = hsw_set_power_well,
 +      },
 +};
 +
 +#define set_power_wells(power_domains, __power_wells) ({              \
 +      (power_domains)->power_wells = (__power_wells);                 \
 +      (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
 +})
 +
  int intel_power_domains_init(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
 -      struct i915_power_well *power_well;
  
        mutex_init(&power_domains->lock);
 -      hsw_pwr = power_domains;
  
 -      power_well = &power_domains->power_wells[0];
 -      power_well->count = 0;
 +      /*
 +       * The enabling order will be from lower to higher indexed wells,
 +       * the disabling order is reversed.
 +       */
 +      if (IS_HASWELL(dev)) {
 +              set_power_wells(power_domains, hsw_power_wells);
 +              hsw_pwr = power_domains;
 +      } else if (IS_BROADWELL(dev)) {
 +              set_power_wells(power_domains, bdw_power_wells);
 +              hsw_pwr = power_domains;
 +      } else {
 +              set_power_wells(power_domains, i9xx_always_on_power_well);
 +      }
  
        return 0;
  }
@@@ -5906,13 -5841,15 +5906,13 @@@ static void intel_power_domains_resume(
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        struct i915_power_well *power_well;
 -
 -      if (!HAS_POWER_WELL(dev))
 -              return;
 +      int i;
  
        mutex_lock(&power_domains->lock);
 -
 -      power_well = &power_domains->power_wells[0];
 -      __intel_set_power_well(dev, power_well->count > 0);
 -
 +      for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
 +              if (power_well->set)
 +                      power_well->set(dev, power_well, power_well->count > 0);
 +      }
        mutex_unlock(&power_domains->lock);
  }
  
@@@ -5926,13 -5863,13 +5926,13 @@@ void intel_power_domains_init_hw(struc
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
 -      if (!HAS_POWER_WELL(dev))
 -              return;
 -
        /* For now, we need the power well to be always enabled. */
        intel_display_set_init_power(dev, true);
        intel_power_domains_resume(dev);
  
 +      if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
 +              return;
 +
        /* We're taking over the BIOS, so clear any requests made by it since
         * the driver is in charge now. */
        if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
@@@ -5956,23 -5893,25 +5956,23 @@@ void intel_init_pm(struct drm_device *d
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        if (I915_HAS_FBC(dev)) {
 -              if (HAS_PCH_SPLIT(dev)) {
 +              if (INTEL_INFO(dev)->gen >= 7) {
                        dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
 -                      if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 -                              dev_priv->display.enable_fbc =
 -                                      gen7_enable_fbc;
 -                      else
 -                              dev_priv->display.enable_fbc =
 -                                      ironlake_enable_fbc;
 +                      dev_priv->display.enable_fbc = gen7_enable_fbc;
 +                      dev_priv->display.disable_fbc = ironlake_disable_fbc;
 +              } else if (INTEL_INFO(dev)->gen >= 5) {
 +                      dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
 +                      dev_priv->display.enable_fbc = ironlake_enable_fbc;
                        dev_priv->display.disable_fbc = ironlake_disable_fbc;
                } else if (IS_GM45(dev)) {
                        dev_priv->display.fbc_enabled = g4x_fbc_enabled;
                        dev_priv->display.enable_fbc = g4x_enable_fbc;
                        dev_priv->display.disable_fbc = g4x_disable_fbc;
 -              } else if (IS_CRESTLINE(dev)) {
 +              } else {
                        dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
                        dev_priv->display.enable_fbc = i8xx_enable_fbc;
                        dev_priv->display.disable_fbc = i8xx_disable_fbc;
                }
 -              /* 855GM needs testing */
        }
  
        /* For cxsr */
@@@ -6135,48 -6074,59 +6135,48 @@@ int sandybridge_pcode_write(struct drm_
        return 0;
  }
  
 -int vlv_gpu_freq(int ddr_freq, int val)
 +int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
  {
 -      int mult, base;
 +      int div;
  
 -      switch (ddr_freq) {
 +      /* 4 x czclk */
 +      switch (dev_priv->mem_freq) {
        case 800:
 -              mult = 20;
 -              base = 120;
 +              div = 10;
                break;
        case 1066:
 -              mult = 22;
 -              base = 133;
 +              div = 12;
                break;
        case 1333:
 -              mult = 21;
 -              base = 125;
 +              div = 16;
                break;
        default:
                return -1;
        }
  
 -      return ((val - 0xbd) * mult) + base;
 +      return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
  }
  
 -int vlv_freq_opcode(int ddr_freq, int val)
 +int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
  {
 -      int mult, base;
 +      int mul;
  
 -      switch (ddr_freq) {
 +      /* 4 x czclk */
 +      switch (dev_priv->mem_freq) {
        case 800:
 -              mult = 20;
 -              base = 120;
 +              mul = 10;
                break;
        case 1066:
 -              mult = 22;
 -              base = 133;
 +              mul = 12;
                break;
        case 1333:
 -              mult = 21;
 -              base = 125;
 +              mul = 16;
                break;
        default:
                return -1;
        }
  
 -      val /= mult;
 -      val -= base / mult;
 -      val += 0xbd;
 -
 -      if (val > 0xea)
 -              val = 0xea;
 -
 -      return val;
 +      return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
  }
  
  void intel_pm_init(struct drm_device *dev)
index b737a32dd399beb6cea86ef3635aa07d1f78d133,0b02078a0b848c4127385b1d3b5d3ab6be755c80..feb2d669254458d5a38e5c07381cf1f6aba0b619
@@@ -64,8 -64,7 +64,8 @@@ static void __gen6_gt_force_wake_reset(
        __raw_posting_read(dev_priv, ECOBUS);
  }
  
 -static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 +static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
 +                                                      int fw_engine)
  {
        if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
                            FORCEWAKE_ACK_TIMEOUT_MS))
@@@ -90,8 -89,7 +90,8 @@@ static void __gen6_gt_force_wake_mt_res
        __raw_posting_read(dev_priv, ECOBUS);
  }
  
 -static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
 +static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
 +                                                      int fw_engine)
  {
        u32 forcewake_ack;
  
@@@ -123,12 -121,12 +123,12 @@@ static void gen6_gt_check_fifodbg(struc
        u32 gtfifodbg;
  
        gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
 -      if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
 -           "MMIO read or write has been dropped %x\n", gtfifodbg))
 -              __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
 +      if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
 +              __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
  }
  
 -static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 +static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
 +                                                      int fw_engine)
  {
        __raw_i915_write32(dev_priv, FORCEWAKE, 0);
        /* something from same cacheline, but !FORCEWAKE */
        gen6_gt_check_fifodbg(dev_priv);
  }
  
 -static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
 +static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
 +                                                      int fw_engine)
  {
        __raw_i915_write32(dev_priv, FORCEWAKE_MT,
                           _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
@@@ -150,19 -147,12 +150,19 @@@ static int __gen6_gt_wait_for_fifo(stru
  {
        int ret = 0;
  
 +      /* On VLV, FIFO will be shared by both SW and HW.
 +       * So, we need to read the FREE_ENTRIES everytime */
 +      if (IS_VALLEYVIEW(dev_priv->dev))
 +              dev_priv->uncore.fifo_count =
 +                      __raw_i915_read32(dev_priv, GTFIFOCTL) &
 +                                              GT_FIFO_FREE_ENTRIES_MASK;
 +
        if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
                int loop = 500;
 -              u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
 +              u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
                while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
                        udelay(10);
 -                      fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
 +                      fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
                }
                if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
                        ++ret;
@@@ -181,112 -171,38 +181,112 @@@ static void vlv_force_wake_reset(struc
        __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  }
  
 -static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
 +static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
 +                                              int fw_engine)
  {
 -      if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
 -                          FORCEWAKE_ACK_TIMEOUT_MS))
 -              DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 +      /* Check for Render Engine */
 +      if (FORCEWAKE_RENDER & fw_engine) {
 +              if (wait_for_atomic((__raw_i915_read32(dev_priv,
 +                                              FORCEWAKE_ACK_VLV) &
 +                                              FORCEWAKE_KERNEL) == 0,
 +                                      FORCEWAKE_ACK_TIMEOUT_MS))
 +                      DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
  
 -      __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
 -                         _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 -      __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
 -                         _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 +              __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
 +                                 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  
 -      if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
 -                          FORCEWAKE_ACK_TIMEOUT_MS))
 -              DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
 +              if (wait_for_atomic((__raw_i915_read32(dev_priv,
 +                                              FORCEWAKE_ACK_VLV) &
 +                                              FORCEWAKE_KERNEL),
 +                                      FORCEWAKE_ACK_TIMEOUT_MS))
 +                      DRM_ERROR("Timed out: waiting for Render to ack.\n");
 +      }
  
 -      if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
 -                           FORCEWAKE_KERNEL),
 -                          FORCEWAKE_ACK_TIMEOUT_MS))
 -              DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
 +      /* Check for Media Engine */
 +      if (FORCEWAKE_MEDIA & fw_engine) {
 +              if (wait_for_atomic((__raw_i915_read32(dev_priv,
 +                                              FORCEWAKE_ACK_MEDIA_VLV) &
 +                                              FORCEWAKE_KERNEL) == 0,
 +                                      FORCEWAKE_ACK_TIMEOUT_MS))
 +                      DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
 +
 +              __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
 +                                 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 +
 +              if (wait_for_atomic((__raw_i915_read32(dev_priv,
 +                                              FORCEWAKE_ACK_MEDIA_VLV) &
 +                                              FORCEWAKE_KERNEL),
 +                                      FORCEWAKE_ACK_TIMEOUT_MS))
 +                      DRM_ERROR("Timed out: waiting for media to ack.\n");
 +      }
  
        /* WaRsForcewakeWaitTC0:vlv */
        __gen6_gt_wait_for_thread_c0(dev_priv);
 +
  }
  
 -static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
 +static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
 +                                      int fw_engine)
  {
 -      __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
 -                         _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 -      __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
 -                         _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 +
 +      /* Check for Render Engine */
 +      if (FORCEWAKE_RENDER & fw_engine)
 +              __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
 +                                      _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 +
 +
 +      /* Check for Media Engine */
 +      if (FORCEWAKE_MEDIA & fw_engine)
 +              __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
 +                              _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 +
        /* The below doubles as a POSTING_READ */
        gen6_gt_check_fifodbg(dev_priv);
 +
 +}
 +
 +void vlv_force_wake_get(struct drm_i915_private *dev_priv,
 +                                              int fw_engine)
 +{
 +      unsigned long irqflags;
 +
 +      spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 +      if (FORCEWAKE_RENDER & fw_engine) {
 +              if (dev_priv->uncore.fw_rendercount++ == 0)
 +                      dev_priv->uncore.funcs.force_wake_get(dev_priv,
 +                                                      FORCEWAKE_RENDER);
 +      }
 +      if (FORCEWAKE_MEDIA & fw_engine) {
 +              if (dev_priv->uncore.fw_mediacount++ == 0)
 +                      dev_priv->uncore.funcs.force_wake_get(dev_priv,
 +                                                      FORCEWAKE_MEDIA);
 +      }
 +
 +      spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 +}
 +
 +void vlv_force_wake_put(struct drm_i915_private *dev_priv,
 +                                              int fw_engine)
 +{
 +      unsigned long irqflags;
 +
 +      spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 +
 +      if (FORCEWAKE_RENDER & fw_engine) {
 +              WARN_ON(dev_priv->uncore.fw_rendercount == 0);
 +              if (--dev_priv->uncore.fw_rendercount == 0)
 +                      dev_priv->uncore.funcs.force_wake_put(dev_priv,
 +                                                      FORCEWAKE_RENDER);
 +      }
 +
 +      if (FORCEWAKE_MEDIA & fw_engine) {
 +              WARN_ON(dev_priv->uncore.fw_mediacount == 0);
 +              if (--dev_priv->uncore.fw_mediacount == 0)
 +                      dev_priv->uncore.funcs.force_wake_put(dev_priv,
 +                                                      FORCEWAKE_MEDIA);
 +      }
 +
 +      spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
  static void gen6_force_wake_work(struct work_struct *work)
  
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
        if (--dev_priv->uncore.forcewake_count == 0)
 -              dev_priv->uncore.funcs.force_wake_put(dev_priv);
 +              dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
+ static void intel_uncore_forcewake_reset(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (IS_VALLEYVIEW(dev)) {
+               vlv_force_wake_reset(dev_priv);
+       } else if (INTEL_INFO(dev)->gen >= 6) {
+               __gen6_gt_force_wake_reset(dev_priv);
+               if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+                       __gen6_gt_force_wake_mt_reset(dev_priv);
+       }
+ }
  void intel_uncore_early_sanitize(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
                dev_priv->ellc_size = 128;
                DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
        }
- }
  
- static void intel_uncore_forcewake_reset(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       if (IS_VALLEYVIEW(dev)) {
-               vlv_force_wake_reset(dev_priv);
-       } else if (INTEL_INFO(dev)->gen >= 6) {
-               __gen6_gt_force_wake_reset(dev_priv);
-               if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
-                       __gen6_gt_force_wake_mt_reset(dev_priv);
-       }
+       intel_uncore_forcewake_reset(dev);
  }
  
  void intel_uncore_sanitize(struct drm_device *dev)
   * be called at the beginning of the sequence followed by a call to
   * gen6_gt_force_wake_put() at the end of the sequence.
   */
 -void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 +void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  {
        unsigned long irqflags;
  
        if (!dev_priv->uncore.funcs.force_wake_get)
                return;
  
 +      /* Redirect to VLV specific routine */
 +      if (IS_VALLEYVIEW(dev_priv->dev))
 +              return vlv_force_wake_get(dev_priv, fw_engine);
 +
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
        if (dev_priv->uncore.forcewake_count++ == 0)
 -              dev_priv->uncore.funcs.force_wake_get(dev_priv);
 +              dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
  /*
   * see gen6_gt_force_wake_get()
   */
 -void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 +void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  {
        unsigned long irqflags;
  
        if (!dev_priv->uncore.funcs.force_wake_put)
                return;
  
 +      /* Redirect to VLV specific routine */
 +      if (IS_VALLEYVIEW(dev_priv->dev))
 +              return vlv_force_wake_put(dev_priv, fw_engine);
 +
 +
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
        if (--dev_priv->uncore.forcewake_count == 0) {
                dev_priv->uncore.forcewake_count++;
@@@ -470,51 -379,16 +472,51 @@@ gen6_read##x(struct drm_i915_private *d
        REG_READ_HEADER(x); \
        if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
                if (dev_priv->uncore.forcewake_count == 0) \
 -                      dev_priv->uncore.funcs.force_wake_get(dev_priv); \
 +                      dev_priv->uncore.funcs.force_wake_get(dev_priv, \
 +                                                      FORCEWAKE_ALL); \
                val = __raw_i915_read##x(dev_priv, reg); \
                if (dev_priv->uncore.forcewake_count == 0) \
 -                      dev_priv->uncore.funcs.force_wake_put(dev_priv); \
 +                      dev_priv->uncore.funcs.force_wake_put(dev_priv, \
 +                                                      FORCEWAKE_ALL); \
        } else { \
                val = __raw_i915_read##x(dev_priv, reg); \
        } \
        REG_READ_FOOTER; \
  }
  
 +#define __vlv_read(x) \
 +static u##x \
 +vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
 +      unsigned fwengine = 0; \
 +      unsigned *fwcount; \
 +      REG_READ_HEADER(x); \
 +      if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) {   \
 +              fwengine = FORCEWAKE_RENDER;            \
 +              fwcount = &dev_priv->uncore.fw_rendercount;    \
 +      }                                               \
 +      else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) {       \
 +              fwengine = FORCEWAKE_MEDIA;             \
 +              fwcount = &dev_priv->uncore.fw_mediacount;     \
 +      }  \
 +      if (fwengine != 0) {            \
 +              if ((*fwcount)++ == 0) \
 +                      (dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
 +                                                              fwengine); \
 +              val = __raw_i915_read##x(dev_priv, reg); \
 +              if (--(*fwcount) == 0) \
 +                      (dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
 +                                                      fwengine); \
 +      } else { \
 +              val = __raw_i915_read##x(dev_priv, reg); \
 +      } \
 +      REG_READ_FOOTER; \
 +}
 +
 +
 +__vlv_read(8)
 +__vlv_read(16)
 +__vlv_read(32)
 +__vlv_read(64)
  __gen6_read(8)
  __gen6_read(16)
  __gen6_read(32)
@@@ -528,7 -402,6 +530,7 @@@ __gen4_read(16
  __gen4_read(32)
  __gen4_read(64)
  
 +#undef __vlv_read
  #undef __gen6_read
  #undef __gen5_read
  #undef __gen4_read
        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  
 +#define REG_WRITE_FOOTER \
 +      spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
 +
  #define __gen4_write(x) \
  static void \
  gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
        REG_WRITE_HEADER; \
        __raw_i915_write##x(dev_priv, reg, val); \
 -      spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
 +      REG_WRITE_FOOTER; \
  }
  
  #define __gen5_write(x) \
@@@ -557,7 -427,7 +559,7 @@@ gen5_write##x(struct drm_i915_private *
        REG_WRITE_HEADER; \
        ilk_dummy_write(dev_priv); \
        __raw_i915_write##x(dev_priv, reg, val); \
 -      spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
 +      REG_WRITE_FOOTER; \
  }
  
  #define __gen6_write(x) \
@@@ -572,7 -442,7 +574,7 @@@ gen6_write##x(struct drm_i915_private *
        if (unlikely(__fifo_ret)) { \
                gen6_gt_check_fifodbg(dev_priv); \
        } \
 -      spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
 +      REG_WRITE_FOOTER; \
  }
  
  #define __hsw_write(x) \
@@@ -589,7 -459,7 +591,7 @@@ hsw_write##x(struct drm_i915_private *d
                gen6_gt_check_fifodbg(dev_priv); \
        } \
        hsw_unclaimed_reg_check(dev_priv, reg); \
 -      spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
 +      REG_WRITE_FOOTER; \
  }
  
  static const u32 gen8_shadowed_regs[] = {
@@@ -619,15 -489,13 +621,15 @@@ gen8_write##x(struct drm_i915_private *
        bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
        REG_WRITE_HEADER; \
        if (__needs_put) { \
 -              dev_priv->uncore.funcs.force_wake_get(dev_priv); \
 +              dev_priv->uncore.funcs.force_wake_get(dev_priv, \
 +                                                      FORCEWAKE_ALL); \
        } \
        __raw_i915_write##x(dev_priv, reg, val); \
        if (__needs_put) { \
 -              dev_priv->uncore.funcs.force_wake_put(dev_priv); \
 +              dev_priv->uncore.funcs.force_wake_put(dev_priv, \
 +                                                      FORCEWAKE_ALL); \
        } \
 -      spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
 +      REG_WRITE_FOOTER; \
  }
  
  __gen8_write(8)
@@@ -656,7 -524,6 +658,7 @@@ __gen4_write(64
  #undef __gen6_write
  #undef __gen5_write
  #undef __gen4_write
 +#undef REG_WRITE_FOOTER
  #undef REG_WRITE_HEADER
  
  void intel_uncore_init(struct drm_device *dev)
                          gen6_force_wake_work);
  
        if (IS_VALLEYVIEW(dev)) {
 -              dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
 -              dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
 +              dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
 +              dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
        } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
                dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
                dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
                 * forcewake being disabled.
                 */
                mutex_lock(&dev->struct_mutex);
 -              __gen6_gt_force_wake_mt_get(dev_priv);
 +              __gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
                ecobus = __raw_i915_read32(dev_priv, ECOBUS);
 -              __gen6_gt_force_wake_mt_put(dev_priv);
 +              __gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
                mutex_unlock(&dev->struct_mutex);
  
                if (ecobus & FORCEWAKE_MT_ENABLE) {
                        dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
                        dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
                }
 -              dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
 -              dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
 -              dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
 -              dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
 +
 +              if (IS_VALLEYVIEW(dev)) {
 +                      dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
 +                      dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
 +                      dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
 +                      dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
 +              } else {
 +                      dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
 +                      dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
 +                      dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
 +                      dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
 +              }
                break;
        case 5:
                dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
@@@ -828,43 -687,6 +830,43 @@@ int i915_reg_read_ioctl(struct drm_devi
        return 0;
  }
  
 +int i915_get_reset_stats_ioctl(struct drm_device *dev,
 +                             void *data, struct drm_file *file)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_reset_stats *args = data;
 +      struct i915_ctx_hang_stats *hs;
 +      int ret;
 +
 +      if (args->flags || args->pad)
 +              return -EINVAL;
 +
 +      if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
 +              return -EPERM;
 +
 +      ret = mutex_lock_interruptible(&dev->struct_mutex);
 +      if (ret)
 +              return ret;
 +
 +      hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
 +      if (IS_ERR(hs)) {
 +              mutex_unlock(&dev->struct_mutex);
 +              return PTR_ERR(hs);
 +      }
 +
 +      if (capable(CAP_SYS_ADMIN))
 +              args->reset_count = i915_reset_count(&dev_priv->gpu_error);
 +      else
 +              args->reset_count = 0;
 +
 +      args->batch_active = hs->batch_active;
 +      args->batch_pending = hs->batch_pending;
 +
 +      mutex_unlock(&dev->struct_mutex);
 +
 +      return 0;
 +}
 +
  static int i965_reset_complete(struct drm_device *dev)
  {
        u8 gdrst;
@@@ -948,12 -770,12 +950,12 @@@ static int gen6_do_reset(struct drm_dev
  
        /* If reset with a user forcewake, try to restore, otherwise turn it off */
        if (dev_priv->uncore.forcewake_count)
 -              dev_priv->uncore.funcs.force_wake_get(dev_priv);
 +              dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
        else
 -              dev_priv->uncore.funcs.force_wake_put(dev_priv);
 +              dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
  
        /* Restore fifo count */
 -      dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
 +      dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
  
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
        return ret;
  int intel_gpu_reset(struct drm_device *dev)
  {
        switch (INTEL_INFO(dev)->gen) {
 +      case 8:
        case 7:
        case 6: return gen6_do_reset(dev);
        case 5: return ironlake_do_reset(dev);
        }
  }
  
 -void intel_uncore_clear_errors(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      /* XXX needs spinlock around caller's grouping */
 -      if (HAS_FPGA_DBG_UNCLAIMED(dev))
 -              __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 -}
 -
  void intel_uncore_check_errors(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;