2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
33 #include <linux/vgaarb.h>
34 #include <drm/i915_powerwell.h>
35 #include <linux/pm_runtime.h>
38 * RC6 is a special power stage which allows the GPU to enter an very
39 * low-voltage mode when idle, using down to 0V while at this stage. This
40 * stage is entered automatically when the GPU is idle when RC6 support is
41 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
43 * There are different RC6 modes available in Intel GPU, which differentiate
44 * among each other with the latency required to enter and leave RC6 and
45 * voltage consumed by the GPU in different states.
47 * The combination of the following flags define which states GPU is allowed
48 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
49 * RC6pp is deepest RC6. Their support by hardware varies according to the
50 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
51 * which brings the most power savings; deeper states save more power, but
52 * require higher latency to switch to and wake up.
54 #define INTEL_RC6_ENABLE (1<<0)
55 #define INTEL_RC6p_ENABLE (1<<1)
56 #define INTEL_RC6pp_ENABLE (1<<2)
58 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
59 * framebuffer contents in-memory, aiming at reducing the required bandwidth
60 * during in-memory transfers and, therefore, reduce the power packet.
62 * The benefits of FBC are mostly visible with solid backgrounds and
63 * variation-less patterns.
65 * FBC-related functionality can be enabled by the means of the
66 * i915.i915_enable_fbc parameter
69 static void i8xx_disable_fbc(struct drm_device *dev)
71 struct drm_i915_private *dev_priv = dev->dev_private;
74 /* Disable compression */
75 fbc_ctl = I915_READ(FBC_CONTROL);
76 if ((fbc_ctl & FBC_CTL_EN) == 0)
79 fbc_ctl &= ~FBC_CTL_EN;
80 I915_WRITE(FBC_CONTROL, fbc_ctl);
82 /* Wait for compressing bit to clear */
83 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
84 DRM_DEBUG_KMS("FBC idle timed out\n");
88 DRM_DEBUG_KMS("disabled FBC\n");
91 static void i8xx_enable_fbc(struct drm_crtc *crtc)
93 struct drm_device *dev = crtc->dev;
94 struct drm_i915_private *dev_priv = dev->dev_private;
95 struct drm_framebuffer *fb = crtc->primary->fb;
96 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
97 struct drm_i915_gem_object *obj = intel_fb->obj;
98 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
103 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
104 if (fb->pitches[0] < cfb_pitch)
105 cfb_pitch = fb->pitches[0];
107 /* FBC_CTL wants 32B or 64B units */
109 cfb_pitch = (cfb_pitch / 32) - 1;
111 cfb_pitch = (cfb_pitch / 64) - 1;
114 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
115 I915_WRITE(FBC_TAG + (i * 4), 0);
121 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
122 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
123 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
124 I915_WRITE(FBC_FENCE_OFF, crtc->y);
128 fbc_ctl = I915_READ(FBC_CONTROL);
129 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
130 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
132 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
133 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
134 fbc_ctl |= obj->fence_reg;
135 I915_WRITE(FBC_CONTROL, fbc_ctl);
137 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
138 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
141 static bool i8xx_fbc_enabled(struct drm_device *dev)
143 struct drm_i915_private *dev_priv = dev->dev_private;
145 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
148 static void g4x_enable_fbc(struct drm_crtc *crtc)
150 struct drm_device *dev = crtc->dev;
151 struct drm_i915_private *dev_priv = dev->dev_private;
152 struct drm_framebuffer *fb = crtc->primary->fb;
153 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
154 struct drm_i915_gem_object *obj = intel_fb->obj;
155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
158 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
159 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
160 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
162 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
163 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
165 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
168 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
170 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
173 static void g4x_disable_fbc(struct drm_device *dev)
175 struct drm_i915_private *dev_priv = dev->dev_private;
178 /* Disable compression */
179 dpfc_ctl = I915_READ(DPFC_CONTROL);
180 if (dpfc_ctl & DPFC_CTL_EN) {
181 dpfc_ctl &= ~DPFC_CTL_EN;
182 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
184 DRM_DEBUG_KMS("disabled FBC\n");
188 static bool g4x_fbc_enabled(struct drm_device *dev)
190 struct drm_i915_private *dev_priv = dev->dev_private;
192 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
195 static void sandybridge_blit_fbc_update(struct drm_device *dev)
197 struct drm_i915_private *dev_priv = dev->dev_private;
200 /* Make sure blitter notifies FBC of writes */
202 /* Blitter is part of Media powerwell on VLV. No impact of
203 * his param in other platforms for now */
204 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
206 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
207 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
208 GEN6_BLITTER_LOCK_SHIFT;
209 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
210 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
211 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
212 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
213 GEN6_BLITTER_LOCK_SHIFT);
214 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
215 POSTING_READ(GEN6_BLITTER_ECOSKPD);
217 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
220 static void ironlake_enable_fbc(struct drm_crtc *crtc)
222 struct drm_device *dev = crtc->dev;
223 struct drm_i915_private *dev_priv = dev->dev_private;
224 struct drm_framebuffer *fb = crtc->primary->fb;
225 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
226 struct drm_i915_gem_object *obj = intel_fb->obj;
227 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
230 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
231 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
232 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
234 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
235 dpfc_ctl |= DPFC_CTL_FENCE_EN;
237 dpfc_ctl |= obj->fence_reg;
239 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
240 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
242 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
245 I915_WRITE(SNB_DPFC_CTL_SA,
246 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
247 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
248 sandybridge_blit_fbc_update(dev);
251 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
254 static void ironlake_disable_fbc(struct drm_device *dev)
256 struct drm_i915_private *dev_priv = dev->dev_private;
259 /* Disable compression */
260 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
261 if (dpfc_ctl & DPFC_CTL_EN) {
262 dpfc_ctl &= ~DPFC_CTL_EN;
263 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
265 DRM_DEBUG_KMS("disabled FBC\n");
269 static bool ironlake_fbc_enabled(struct drm_device *dev)
271 struct drm_i915_private *dev_priv = dev->dev_private;
273 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
276 static void gen7_enable_fbc(struct drm_crtc *crtc)
278 struct drm_device *dev = crtc->dev;
279 struct drm_i915_private *dev_priv = dev->dev_private;
280 struct drm_framebuffer *fb = crtc->primary->fb;
281 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
282 struct drm_i915_gem_object *obj = intel_fb->obj;
283 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
286 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
287 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
288 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
290 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
291 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
293 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
295 if (IS_IVYBRIDGE(dev)) {
296 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
297 I915_WRITE(ILK_DISPLAY_CHICKEN1,
298 I915_READ(ILK_DISPLAY_CHICKEN1) |
301 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
302 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
303 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
307 I915_WRITE(SNB_DPFC_CTL_SA,
308 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
309 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
311 sandybridge_blit_fbc_update(dev);
313 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
316 bool intel_fbc_enabled(struct drm_device *dev)
318 struct drm_i915_private *dev_priv = dev->dev_private;
320 if (!dev_priv->display.fbc_enabled)
323 return dev_priv->display.fbc_enabled(dev);
326 static void intel_fbc_work_fn(struct work_struct *__work)
328 struct intel_fbc_work *work =
329 container_of(to_delayed_work(__work),
330 struct intel_fbc_work, work);
331 struct drm_device *dev = work->crtc->dev;
332 struct drm_i915_private *dev_priv = dev->dev_private;
334 mutex_lock(&dev->struct_mutex);
335 if (work == dev_priv->fbc.fbc_work) {
336 /* Double check that we haven't switched fb without cancelling
339 if (work->crtc->primary->fb == work->fb) {
340 dev_priv->display.enable_fbc(work->crtc);
342 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
343 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
344 dev_priv->fbc.y = work->crtc->y;
347 dev_priv->fbc.fbc_work = NULL;
349 mutex_unlock(&dev->struct_mutex);
354 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
356 if (dev_priv->fbc.fbc_work == NULL)
359 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
361 /* Synchronisation is provided by struct_mutex and checking of
362 * dev_priv->fbc.fbc_work, so we can perform the cancellation
363 * entirely asynchronously.
365 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
366 /* tasklet was killed before being run, clean up */
367 kfree(dev_priv->fbc.fbc_work);
369 /* Mark the work as no longer wanted so that if it does
370 * wake-up (because the work was already running and waiting
371 * for our mutex), it will discover that is no longer
374 dev_priv->fbc.fbc_work = NULL;
377 static void intel_enable_fbc(struct drm_crtc *crtc)
379 struct intel_fbc_work *work;
380 struct drm_device *dev = crtc->dev;
381 struct drm_i915_private *dev_priv = dev->dev_private;
383 if (!dev_priv->display.enable_fbc)
386 intel_cancel_fbc_work(dev_priv);
388 work = kzalloc(sizeof(*work), GFP_KERNEL);
390 DRM_ERROR("Failed to allocate FBC work structure\n");
391 dev_priv->display.enable_fbc(crtc);
396 work->fb = crtc->primary->fb;
397 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
399 dev_priv->fbc.fbc_work = work;
401 /* Delay the actual enabling to let pageflipping cease and the
402 * display to settle before starting the compression. Note that
403 * this delay also serves a second purpose: it allows for a
404 * vblank to pass after disabling the FBC before we attempt
405 * to modify the control registers.
407 * A more complicated solution would involve tracking vblanks
408 * following the termination of the page-flipping sequence
409 * and indeed performing the enable as a co-routine and not
410 * waiting synchronously upon the vblank.
412 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
414 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
417 void intel_disable_fbc(struct drm_device *dev)
419 struct drm_i915_private *dev_priv = dev->dev_private;
421 intel_cancel_fbc_work(dev_priv);
423 if (!dev_priv->display.disable_fbc)
426 dev_priv->display.disable_fbc(dev);
427 dev_priv->fbc.plane = -1;
430 static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
431 enum no_fbc_reason reason)
433 if (dev_priv->fbc.no_fbc_reason == reason)
436 dev_priv->fbc.no_fbc_reason = reason;
441 * intel_update_fbc - enable/disable FBC as needed
442 * @dev: the drm_device
444 * Set up the framebuffer compression hardware at mode set time. We
445 * enable it if possible:
446 * - plane A only (on pre-965)
447 * - no pixel mulitply/line duplication
448 * - no alpha buffer discard
450 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
452 * We can't assume that any compression will take place (worst case),
453 * so the compressed buffer has to be the same size as the uncompressed
454 * one. It also must reside (along with the line length buffer) in
457 * We need to enable/disable FBC on a global basis.
459 void intel_update_fbc(struct drm_device *dev)
461 struct drm_i915_private *dev_priv = dev->dev_private;
462 struct drm_crtc *crtc = NULL, *tmp_crtc;
463 struct intel_crtc *intel_crtc;
464 struct drm_framebuffer *fb;
465 struct intel_framebuffer *intel_fb;
466 struct drm_i915_gem_object *obj;
467 const struct drm_display_mode *adjusted_mode;
468 unsigned int max_width, max_height;
471 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
475 if (!i915.powersave) {
476 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
477 DRM_DEBUG_KMS("fbc disabled per module param\n");
482 * If FBC is already on, we just have to verify that we can
483 * keep it that way...
484 * Need to disable if:
485 * - more than one pipe is active
486 * - changing FBC params (stride, fence, mode)
487 * - new fb is too large to fit in compressed buffer
488 * - going to an unsupported config (interlace, pixel multiply, etc.)
490 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
491 if (intel_crtc_active(tmp_crtc) &&
492 to_intel_crtc(tmp_crtc)->primary_enabled) {
494 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
495 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
502 if (!crtc || crtc->primary->fb == NULL) {
503 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
504 DRM_DEBUG_KMS("no output, disabling\n");
508 intel_crtc = to_intel_crtc(crtc);
509 fb = crtc->primary->fb;
510 intel_fb = to_intel_framebuffer(fb);
512 adjusted_mode = &intel_crtc->config.adjusted_mode;
514 if (i915.enable_fbc < 0 &&
515 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
516 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
517 DRM_DEBUG_KMS("disabled per chip default\n");
520 if (!i915.enable_fbc) {
521 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
522 DRM_DEBUG_KMS("fbc disabled per module param\n");
525 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
526 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
527 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
528 DRM_DEBUG_KMS("mode incompatible with compression, "
533 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
540 if (intel_crtc->config.pipe_src_w > max_width ||
541 intel_crtc->config.pipe_src_h > max_height) {
542 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
543 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
546 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
547 intel_crtc->plane != PLANE_A) {
548 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
549 DRM_DEBUG_KMS("plane not A, disabling compression\n");
553 /* The use of a CPU fence is mandatory in order to detect writes
554 * by the CPU to the scanout and trigger updates to the FBC.
556 if (obj->tiling_mode != I915_TILING_X ||
557 obj->fence_reg == I915_FENCE_REG_NONE) {
558 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
559 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
563 /* If the kernel debugger is active, always disable compression */
567 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
568 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
569 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
573 /* If the scanout has not changed, don't modify the FBC settings.
574 * Note that we make the fundamental assumption that the fb->obj
575 * cannot be unpinned (and have its GTT offset and fence revoked)
576 * without first being decoupled from the scanout and FBC disabled.
578 if (dev_priv->fbc.plane == intel_crtc->plane &&
579 dev_priv->fbc.fb_id == fb->base.id &&
580 dev_priv->fbc.y == crtc->y)
583 if (intel_fbc_enabled(dev)) {
584 /* We update FBC along two paths, after changing fb/crtc
585 * configuration (modeswitching) and after page-flipping
586 * finishes. For the latter, we know that not only did
587 * we disable the FBC at the start of the page-flip
588 * sequence, but also more than one vblank has passed.
590 * For the former case of modeswitching, it is possible
591 * to switch between two FBC valid configurations
592 * instantaneously so we do need to disable the FBC
593 * before we can modify its control registers. We also
594 * have to wait for the next vblank for that to take
595 * effect. However, since we delay enabling FBC we can
596 * assume that a vblank has passed since disabling and
597 * that we can safely alter the registers in the deferred
600 * In the scenario that we go from a valid to invalid
601 * and then back to valid FBC configuration we have
602 * no strict enforcement that a vblank occurred since
603 * disabling the FBC. However, along all current pipe
604 * disabling paths we do need to wait for a vblank at
605 * some point. And we wait before enabling FBC anyway.
607 DRM_DEBUG_KMS("disabling active FBC for update\n");
608 intel_disable_fbc(dev);
611 intel_enable_fbc(crtc);
612 dev_priv->fbc.no_fbc_reason = FBC_OK;
616 /* Multiple disables should be harmless */
617 if (intel_fbc_enabled(dev)) {
618 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
619 intel_disable_fbc(dev);
621 i915_gem_stolen_cleanup_compression(dev);
624 static void i915_pineview_get_mem_freq(struct drm_device *dev)
626 struct drm_i915_private *dev_priv = dev->dev_private;
629 tmp = I915_READ(CLKCFG);
631 switch (tmp & CLKCFG_FSB_MASK) {
633 dev_priv->fsb_freq = 533; /* 133*4 */
636 dev_priv->fsb_freq = 800; /* 200*4 */
639 dev_priv->fsb_freq = 667; /* 167*4 */
642 dev_priv->fsb_freq = 400; /* 100*4 */
646 switch (tmp & CLKCFG_MEM_MASK) {
648 dev_priv->mem_freq = 533;
651 dev_priv->mem_freq = 667;
654 dev_priv->mem_freq = 800;
658 /* detect pineview DDR3 setting */
659 tmp = I915_READ(CSHRDDR3CTL);
660 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
663 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
665 struct drm_i915_private *dev_priv = dev->dev_private;
668 ddrpll = I915_READ16(DDRMPLL1);
669 csipll = I915_READ16(CSIPLL0);
671 switch (ddrpll & 0xff) {
673 dev_priv->mem_freq = 800;
676 dev_priv->mem_freq = 1066;
679 dev_priv->mem_freq = 1333;
682 dev_priv->mem_freq = 1600;
685 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
687 dev_priv->mem_freq = 0;
691 dev_priv->ips.r_t = dev_priv->mem_freq;
693 switch (csipll & 0x3ff) {
695 dev_priv->fsb_freq = 3200;
698 dev_priv->fsb_freq = 3733;
701 dev_priv->fsb_freq = 4266;
704 dev_priv->fsb_freq = 4800;
707 dev_priv->fsb_freq = 5333;
710 dev_priv->fsb_freq = 5866;
713 dev_priv->fsb_freq = 6400;
716 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
718 dev_priv->fsb_freq = 0;
722 if (dev_priv->fsb_freq == 3200) {
723 dev_priv->ips.c_m = 0;
724 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
725 dev_priv->ips.c_m = 1;
727 dev_priv->ips.c_m = 2;
731 static const struct cxsr_latency cxsr_latency_table[] = {
732 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
733 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
734 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
735 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
736 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
738 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
739 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
740 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
741 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
742 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
744 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
745 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
746 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
747 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
748 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
750 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
751 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
752 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
753 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
754 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
756 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
757 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
758 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
759 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
760 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
762 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
763 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
764 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
765 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
766 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
769 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
774 const struct cxsr_latency *latency;
777 if (fsb == 0 || mem == 0)
780 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
781 latency = &cxsr_latency_table[i];
782 if (is_desktop == latency->is_desktop &&
783 is_ddr3 == latency->is_ddr3 &&
784 fsb == latency->fsb_freq && mem == latency->mem_freq)
788 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
793 static void pineview_disable_cxsr(struct drm_device *dev)
795 struct drm_i915_private *dev_priv = dev->dev_private;
797 /* deactivate cxsr */
798 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
802 * Latency for FIFO fetches is dependent on several factors:
803 * - memory configuration (speed, channels)
805 * - current MCH state
806 * It can be fairly high in some situations, so here we assume a fairly
807 * pessimal value. It's a tradeoff between extra memory fetches (if we
808 * set this value too high, the FIFO will fetch frequently to stay full)
809 * and power consumption (set it too low to save power and we might see
810 * FIFO underruns and display "flicker").
812 * A value of 5us seems to be a good balance; safe for very low end
813 * platforms but not overly aggressive on lower latency configs.
815 static const int latency_ns = 5000;
817 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
819 struct drm_i915_private *dev_priv = dev->dev_private;
820 uint32_t dsparb = I915_READ(DSPARB);
823 size = dsparb & 0x7f;
825 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
827 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
828 plane ? "B" : "A", size);
833 static int i830_get_fifo_size(struct drm_device *dev, int plane)
835 struct drm_i915_private *dev_priv = dev->dev_private;
836 uint32_t dsparb = I915_READ(DSPARB);
839 size = dsparb & 0x1ff;
841 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
842 size >>= 1; /* Convert to cachelines */
844 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
845 plane ? "B" : "A", size);
850 static int i845_get_fifo_size(struct drm_device *dev, int plane)
852 struct drm_i915_private *dev_priv = dev->dev_private;
853 uint32_t dsparb = I915_READ(DSPARB);
856 size = dsparb & 0x7f;
857 size >>= 2; /* Convert to cachelines */
859 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
866 /* Pineview has different values for various configs */
867 static const struct intel_watermark_params pineview_display_wm = {
868 PINEVIEW_DISPLAY_FIFO,
872 PINEVIEW_FIFO_LINE_SIZE
874 static const struct intel_watermark_params pineview_display_hplloff_wm = {
875 PINEVIEW_DISPLAY_FIFO,
877 PINEVIEW_DFT_HPLLOFF_WM,
879 PINEVIEW_FIFO_LINE_SIZE
881 static const struct intel_watermark_params pineview_cursor_wm = {
882 PINEVIEW_CURSOR_FIFO,
883 PINEVIEW_CURSOR_MAX_WM,
884 PINEVIEW_CURSOR_DFT_WM,
885 PINEVIEW_CURSOR_GUARD_WM,
886 PINEVIEW_FIFO_LINE_SIZE,
888 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
889 PINEVIEW_CURSOR_FIFO,
890 PINEVIEW_CURSOR_MAX_WM,
891 PINEVIEW_CURSOR_DFT_WM,
892 PINEVIEW_CURSOR_GUARD_WM,
893 PINEVIEW_FIFO_LINE_SIZE
895 static const struct intel_watermark_params g4x_wm_info = {
902 static const struct intel_watermark_params g4x_cursor_wm_info = {
909 static const struct intel_watermark_params valleyview_wm_info = {
910 VALLEYVIEW_FIFO_SIZE,
916 static const struct intel_watermark_params valleyview_cursor_wm_info = {
918 VALLEYVIEW_CURSOR_MAX_WM,
923 static const struct intel_watermark_params i965_cursor_wm_info = {
930 static const struct intel_watermark_params i945_wm_info = {
937 static const struct intel_watermark_params i915_wm_info = {
944 static const struct intel_watermark_params i830_wm_info = {
951 static const struct intel_watermark_params i845_wm_info = {
960 * intel_calculate_wm - calculate watermark level
961 * @clock_in_khz: pixel clock
962 * @wm: chip FIFO params
963 * @pixel_size: display pixel size
964 * @latency_ns: memory latency for the platform
966 * Calculate the watermark level (the level at which the display plane will
967 * start fetching from memory again). Each chip has a different display
968 * FIFO size and allocation, so the caller needs to figure that out and pass
969 * in the correct intel_watermark_params structure.
971 * As the pixel clock runs, the FIFO will be drained at a rate that depends
972 * on the pixel size. When it reaches the watermark level, it'll start
973 * fetching FIFO line sized based chunks from memory until the FIFO fills
974 * past the watermark point. If the FIFO drains completely, a FIFO underrun
975 * will occur, and a display engine hang could result.
977 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
978 const struct intel_watermark_params *wm,
981 unsigned long latency_ns)
983 long entries_required, wm_size;
986 * Note: we need to make sure we don't overflow for various clock &
988 * clocks go from a few thousand to several hundred thousand.
989 * latency is usually a few thousand
991 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
993 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
995 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
997 wm_size = fifo_size - (entries_required + wm->guard_size);
999 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1001 /* Don't promote wm_size to unsigned... */
1002 if (wm_size > (long)wm->max_wm)
1003 wm_size = wm->max_wm;
1005 wm_size = wm->default_wm;
1009 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1011 struct drm_crtc *crtc, *enabled = NULL;
1013 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1014 if (intel_crtc_active(crtc)) {
1024 static void pineview_update_wm(struct drm_crtc *unused_crtc)
1026 struct drm_device *dev = unused_crtc->dev;
1027 struct drm_i915_private *dev_priv = dev->dev_private;
1028 struct drm_crtc *crtc;
1029 const struct cxsr_latency *latency;
1033 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1034 dev_priv->fsb_freq, dev_priv->mem_freq);
1036 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1037 pineview_disable_cxsr(dev);
1041 crtc = single_enabled_crtc(dev);
1043 const struct drm_display_mode *adjusted_mode;
1044 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1047 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1048 clock = adjusted_mode->crtc_clock;
1051 wm = intel_calculate_wm(clock, &pineview_display_wm,
1052 pineview_display_wm.fifo_size,
1053 pixel_size, latency->display_sr);
1054 reg = I915_READ(DSPFW1);
1055 reg &= ~DSPFW_SR_MASK;
1056 reg |= wm << DSPFW_SR_SHIFT;
1057 I915_WRITE(DSPFW1, reg);
1058 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1061 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1062 pineview_display_wm.fifo_size,
1063 pixel_size, latency->cursor_sr);
1064 reg = I915_READ(DSPFW3);
1065 reg &= ~DSPFW_CURSOR_SR_MASK;
1066 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1067 I915_WRITE(DSPFW3, reg);
1069 /* Display HPLL off SR */
1070 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1071 pineview_display_hplloff_wm.fifo_size,
1072 pixel_size, latency->display_hpll_disable);
1073 reg = I915_READ(DSPFW3);
1074 reg &= ~DSPFW_HPLL_SR_MASK;
1075 reg |= wm & DSPFW_HPLL_SR_MASK;
1076 I915_WRITE(DSPFW3, reg);
1078 /* cursor HPLL off SR */
1079 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1080 pineview_display_hplloff_wm.fifo_size,
1081 pixel_size, latency->cursor_hpll_disable);
1082 reg = I915_READ(DSPFW3);
1083 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1084 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1085 I915_WRITE(DSPFW3, reg);
1086 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1090 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1091 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1093 pineview_disable_cxsr(dev);
1094 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1098 static bool g4x_compute_wm0(struct drm_device *dev,
1100 const struct intel_watermark_params *display,
1101 int display_latency_ns,
1102 const struct intel_watermark_params *cursor,
1103 int cursor_latency_ns,
1107 struct drm_crtc *crtc;
1108 const struct drm_display_mode *adjusted_mode;
1109 int htotal, hdisplay, clock, pixel_size;
1110 int line_time_us, line_count;
1111 int entries, tlb_miss;
1113 crtc = intel_get_crtc_for_plane(dev, plane);
1114 if (!intel_crtc_active(crtc)) {
1115 *cursor_wm = cursor->guard_size;
1116 *plane_wm = display->guard_size;
1120 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1121 clock = adjusted_mode->crtc_clock;
1122 htotal = adjusted_mode->crtc_htotal;
1123 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1124 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1126 /* Use the small buffer method to calculate plane watermark */
1127 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1128 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1130 entries += tlb_miss;
1131 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1132 *plane_wm = entries + display->guard_size;
1133 if (*plane_wm > (int)display->max_wm)
1134 *plane_wm = display->max_wm;
1136 /* Use the large buffer method to calculate cursor watermark */
1137 line_time_us = max(htotal * 1000 / clock, 1);
1138 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1139 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
1140 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1142 entries += tlb_miss;
1143 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1144 *cursor_wm = entries + cursor->guard_size;
1145 if (*cursor_wm > (int)cursor->max_wm)
1146 *cursor_wm = (int)cursor->max_wm;
1152 * Check the wm result.
1154 * If any calculated watermark values is larger than the maximum value that
1155 * can be programmed into the associated watermark register, that watermark
1158 static bool g4x_check_srwm(struct drm_device *dev,
1159 int display_wm, int cursor_wm,
1160 const struct intel_watermark_params *display,
1161 const struct intel_watermark_params *cursor)
1163 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1164 display_wm, cursor_wm);
1166 if (display_wm > display->max_wm) {
1167 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1168 display_wm, display->max_wm);
1172 if (cursor_wm > cursor->max_wm) {
1173 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1174 cursor_wm, cursor->max_wm);
1178 if (!(display_wm || cursor_wm)) {
1179 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1186 static bool g4x_compute_srwm(struct drm_device *dev,
1189 const struct intel_watermark_params *display,
1190 const struct intel_watermark_params *cursor,
1191 int *display_wm, int *cursor_wm)
1193 struct drm_crtc *crtc;
1194 const struct drm_display_mode *adjusted_mode;
1195 int hdisplay, htotal, pixel_size, clock;
1196 unsigned long line_time_us;
1197 int line_count, line_size;
1202 *display_wm = *cursor_wm = 0;
1206 crtc = intel_get_crtc_for_plane(dev, plane);
1207 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1208 clock = adjusted_mode->crtc_clock;
1209 htotal = adjusted_mode->crtc_htotal;
1210 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1211 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1213 line_time_us = max(htotal * 1000 / clock, 1);
1214 line_count = (latency_ns / line_time_us + 1000) / 1000;
1215 line_size = hdisplay * pixel_size;
1217 /* Use the minimum of the small and large buffer method for primary */
1218 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1219 large = line_count * line_size;
1221 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1222 *display_wm = entries + display->guard_size;
1224 /* calculate the self-refresh watermark for display cursor */
1225 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
1226 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1227 *cursor_wm = entries + cursor->guard_size;
1229 return g4x_check_srwm(dev,
1230 *display_wm, *cursor_wm,
1234 static bool vlv_compute_drain_latency(struct drm_device *dev,
1236 int *plane_prec_mult,
1238 int *cursor_prec_mult,
1241 struct drm_crtc *crtc;
1242 int clock, pixel_size;
1245 crtc = intel_get_crtc_for_plane(dev, plane);
1246 if (!intel_crtc_active(crtc))
1249 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1250 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1252 entries = (clock / 1000) * pixel_size;
1253 *plane_prec_mult = (entries > 256) ?
1254 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1255 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1258 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1259 *cursor_prec_mult = (entries > 256) ?
1260 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1261 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1267 * Update drain latency registers of memory arbiter
1269 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1270 * to be programmed. Each plane has a drain latency multiplier and a drain
1274 static void vlv_update_drain_latency(struct drm_device *dev)
1276 struct drm_i915_private *dev_priv = dev->dev_private;
1277 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1278 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1279 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1282 /* For plane A, Cursor A */
1283 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1284 &cursor_prec_mult, &cursora_dl)) {
1285 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1286 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1287 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1288 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1290 I915_WRITE(VLV_DDL1, cursora_prec |
1291 (cursora_dl << DDL_CURSORA_SHIFT) |
1292 planea_prec | planea_dl);
1295 /* For plane B, Cursor B */
1296 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1297 &cursor_prec_mult, &cursorb_dl)) {
1298 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1299 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1300 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1301 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1303 I915_WRITE(VLV_DDL2, cursorb_prec |
1304 (cursorb_dl << DDL_CURSORB_SHIFT) |
1305 planeb_prec | planeb_dl);
1309 #define single_plane_enabled(mask) is_power_of_2(mask)
1311 static void valleyview_update_wm(struct drm_crtc *crtc)
1313 struct drm_device *dev = crtc->dev;
1314 static const int sr_latency_ns = 12000;
1315 struct drm_i915_private *dev_priv = dev->dev_private;
1316 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1317 int plane_sr, cursor_sr;
1318 int ignore_plane_sr, ignore_cursor_sr;
1319 unsigned int enabled = 0;
1321 vlv_update_drain_latency(dev);
1323 if (g4x_compute_wm0(dev, PIPE_A,
1324 &valleyview_wm_info, latency_ns,
1325 &valleyview_cursor_wm_info, latency_ns,
1326 &planea_wm, &cursora_wm))
1327 enabled |= 1 << PIPE_A;
1329 if (g4x_compute_wm0(dev, PIPE_B,
1330 &valleyview_wm_info, latency_ns,
1331 &valleyview_cursor_wm_info, latency_ns,
1332 &planeb_wm, &cursorb_wm))
1333 enabled |= 1 << PIPE_B;
1335 if (single_plane_enabled(enabled) &&
1336 g4x_compute_srwm(dev, ffs(enabled) - 1,
1338 &valleyview_wm_info,
1339 &valleyview_cursor_wm_info,
1340 &plane_sr, &ignore_cursor_sr) &&
1341 g4x_compute_srwm(dev, ffs(enabled) - 1,
1343 &valleyview_wm_info,
1344 &valleyview_cursor_wm_info,
1345 &ignore_plane_sr, &cursor_sr)) {
1346 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1348 I915_WRITE(FW_BLC_SELF_VLV,
1349 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1350 plane_sr = cursor_sr = 0;
1353 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1354 planea_wm, cursora_wm,
1355 planeb_wm, cursorb_wm,
1356 plane_sr, cursor_sr);
1359 (plane_sr << DSPFW_SR_SHIFT) |
1360 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1361 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1364 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1365 (cursora_wm << DSPFW_CURSORA_SHIFT));
1367 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1368 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1371 static void g4x_update_wm(struct drm_crtc *crtc)
1373 struct drm_device *dev = crtc->dev;
1374 static const int sr_latency_ns = 12000;
1375 struct drm_i915_private *dev_priv = dev->dev_private;
1376 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1377 int plane_sr, cursor_sr;
1378 unsigned int enabled = 0;
1380 if (g4x_compute_wm0(dev, PIPE_A,
1381 &g4x_wm_info, latency_ns,
1382 &g4x_cursor_wm_info, latency_ns,
1383 &planea_wm, &cursora_wm))
1384 enabled |= 1 << PIPE_A;
1386 if (g4x_compute_wm0(dev, PIPE_B,
1387 &g4x_wm_info, latency_ns,
1388 &g4x_cursor_wm_info, latency_ns,
1389 &planeb_wm, &cursorb_wm))
1390 enabled |= 1 << PIPE_B;
1392 if (single_plane_enabled(enabled) &&
1393 g4x_compute_srwm(dev, ffs(enabled) - 1,
1396 &g4x_cursor_wm_info,
1397 &plane_sr, &cursor_sr)) {
1398 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1400 I915_WRITE(FW_BLC_SELF,
1401 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1402 plane_sr = cursor_sr = 0;
1405 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1406 planea_wm, cursora_wm,
1407 planeb_wm, cursorb_wm,
1408 plane_sr, cursor_sr);
1411 (plane_sr << DSPFW_SR_SHIFT) |
1412 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1413 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1416 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1417 (cursora_wm << DSPFW_CURSORA_SHIFT));
1418 /* HPLL off in SR has some issues on G4x... disable it */
1420 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1421 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1424 static void i965_update_wm(struct drm_crtc *unused_crtc)
1426 struct drm_device *dev = unused_crtc->dev;
1427 struct drm_i915_private *dev_priv = dev->dev_private;
1428 struct drm_crtc *crtc;
1432 /* Calc sr entries for one plane configs */
1433 crtc = single_enabled_crtc(dev);
1435 /* self-refresh has much higher latency */
1436 static const int sr_latency_ns = 12000;
1437 const struct drm_display_mode *adjusted_mode =
1438 &to_intel_crtc(crtc)->config.adjusted_mode;
1439 int clock = adjusted_mode->crtc_clock;
1440 int htotal = adjusted_mode->crtc_htotal;
1441 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1442 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1443 unsigned long line_time_us;
1446 line_time_us = max(htotal * 1000 / clock, 1);
1448 /* Use ns/us then divide to preserve precision */
1449 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1450 pixel_size * hdisplay;
1451 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1452 srwm = I965_FIFO_SIZE - entries;
1456 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1459 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1460 pixel_size * to_intel_crtc(crtc)->cursor_width;
1461 entries = DIV_ROUND_UP(entries,
1462 i965_cursor_wm_info.cacheline_size);
1463 cursor_sr = i965_cursor_wm_info.fifo_size -
1464 (entries + i965_cursor_wm_info.guard_size);
1466 if (cursor_sr > i965_cursor_wm_info.max_wm)
1467 cursor_sr = i965_cursor_wm_info.max_wm;
1469 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1470 "cursor %d\n", srwm, cursor_sr);
1472 if (IS_CRESTLINE(dev))
1473 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1475 /* Turn off self refresh if both pipes are enabled */
1476 if (IS_CRESTLINE(dev))
1477 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1481 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1484 /* 965 has limitations... */
1485 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1486 (8 << 16) | (8 << 8) | (8 << 0));
1487 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1488 /* update cursor SR watermark */
1489 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1492 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1494 struct drm_device *dev = unused_crtc->dev;
1495 struct drm_i915_private *dev_priv = dev->dev_private;
1496 const struct intel_watermark_params *wm_info;
1501 int planea_wm, planeb_wm;
1502 struct drm_crtc *crtc, *enabled = NULL;
1505 wm_info = &i945_wm_info;
1506 else if (!IS_GEN2(dev))
1507 wm_info = &i915_wm_info;
1509 wm_info = &i830_wm_info;
1511 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1512 crtc = intel_get_crtc_for_plane(dev, 0);
1513 if (intel_crtc_active(crtc)) {
1514 const struct drm_display_mode *adjusted_mode;
1515 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1519 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1520 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1521 wm_info, fifo_size, cpp,
1525 planea_wm = fifo_size - wm_info->guard_size;
1527 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1528 crtc = intel_get_crtc_for_plane(dev, 1);
1529 if (intel_crtc_active(crtc)) {
1530 const struct drm_display_mode *adjusted_mode;
1531 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1535 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1536 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1537 wm_info, fifo_size, cpp,
1539 if (enabled == NULL)
1544 planeb_wm = fifo_size - wm_info->guard_size;
1546 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1548 if (IS_I915GM(dev) && enabled) {
1549 struct intel_framebuffer *fb;
1551 fb = to_intel_framebuffer(enabled->primary->fb);
1553 /* self-refresh seems busted with untiled */
1554 if (fb->obj->tiling_mode == I915_TILING_NONE)
1559 * Overlay gets an aggressive default since video jitter is bad.
1563 /* Play safe and disable self-refresh before adjusting watermarks. */
1564 if (IS_I945G(dev) || IS_I945GM(dev))
1565 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1566 else if (IS_I915GM(dev))
1567 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
1569 /* Calc sr entries for one plane configs */
1570 if (HAS_FW_BLC(dev) && enabled) {
1571 /* self-refresh has much higher latency */
1572 static const int sr_latency_ns = 6000;
1573 const struct drm_display_mode *adjusted_mode =
1574 &to_intel_crtc(enabled)->config.adjusted_mode;
1575 int clock = adjusted_mode->crtc_clock;
1576 int htotal = adjusted_mode->crtc_htotal;
1577 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1578 int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
1579 unsigned long line_time_us;
1582 line_time_us = max(htotal * 1000 / clock, 1);
1584 /* Use ns/us then divide to preserve precision */
1585 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1586 pixel_size * hdisplay;
1587 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1588 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1589 srwm = wm_info->fifo_size - entries;
1593 if (IS_I945G(dev) || IS_I945GM(dev))
1594 I915_WRITE(FW_BLC_SELF,
1595 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1596 else if (IS_I915GM(dev))
1597 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1600 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1601 planea_wm, planeb_wm, cwm, srwm);
1603 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1604 fwater_hi = (cwm & 0x1f);
1606 /* Set request length to 8 cachelines per fetch */
1607 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1608 fwater_hi = fwater_hi | (1 << 8);
1610 I915_WRITE(FW_BLC, fwater_lo);
1611 I915_WRITE(FW_BLC2, fwater_hi);
1613 if (HAS_FW_BLC(dev)) {
1615 if (IS_I945G(dev) || IS_I945GM(dev))
1616 I915_WRITE(FW_BLC_SELF,
1617 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1618 else if (IS_I915GM(dev))
1619 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
1620 DRM_DEBUG_KMS("memory self refresh enabled\n");
1622 DRM_DEBUG_KMS("memory self refresh disabled\n");
1626 static void i845_update_wm(struct drm_crtc *unused_crtc)
1628 struct drm_device *dev = unused_crtc->dev;
1629 struct drm_i915_private *dev_priv = dev->dev_private;
1630 struct drm_crtc *crtc;
1631 const struct drm_display_mode *adjusted_mode;
1635 crtc = single_enabled_crtc(dev);
1639 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1640 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1642 dev_priv->display.get_fifo_size(dev, 0),
1644 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1645 fwater_lo |= (3<<8) | planea_wm;
1647 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1649 I915_WRITE(FW_BLC, fwater_lo);
1652 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1653 struct drm_crtc *crtc)
1655 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1656 uint32_t pixel_rate;
1658 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
1660 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1661 * adjust the pixel_rate here. */
1663 if (intel_crtc->config.pch_pfit.enabled) {
1664 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1665 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
1667 pipe_w = intel_crtc->config.pipe_src_w;
1668 pipe_h = intel_crtc->config.pipe_src_h;
1669 pfit_w = (pfit_size >> 16) & 0xFFFF;
1670 pfit_h = pfit_size & 0xFFFF;
1671 if (pipe_w < pfit_w)
1673 if (pipe_h < pfit_h)
1676 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1683 /* latency must be in 0.1us units. */
1684 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1689 if (WARN(latency == 0, "Latency value missing\n"))
1692 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1693 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1698 /* latency must be in 0.1us units. */
1699 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1700 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1705 if (WARN(latency == 0, "Latency value missing\n"))
1708 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1709 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1710 ret = DIV_ROUND_UP(ret, 64) + 2;
1714 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1715 uint8_t bytes_per_pixel)
1717 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1720 struct ilk_pipe_wm_parameters {
1722 uint32_t pipe_htotal;
1723 uint32_t pixel_rate;
1724 struct intel_plane_wm_parameters pri;
1725 struct intel_plane_wm_parameters spr;
1726 struct intel_plane_wm_parameters cur;
1729 struct ilk_wm_maximums {
1736 /* used in computing the new watermarks state */
1737 struct intel_wm_config {
1738 unsigned int num_pipes_active;
1739 bool sprites_enabled;
1740 bool sprites_scaled;
1744 * For both WM_PIPE and WM_LP.
1745 * mem_value must be in 0.1us units.
1747 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1751 uint32_t method1, method2;
1753 if (!params->active || !params->pri.enabled)
1756 method1 = ilk_wm_method1(params->pixel_rate,
1757 params->pri.bytes_per_pixel,
1763 method2 = ilk_wm_method2(params->pixel_rate,
1764 params->pipe_htotal,
1765 params->pri.horiz_pixels,
1766 params->pri.bytes_per_pixel,
1769 return min(method1, method2);
1773 * For both WM_PIPE and WM_LP.
1774 * mem_value must be in 0.1us units.
1776 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1779 uint32_t method1, method2;
1781 if (!params->active || !params->spr.enabled)
1784 method1 = ilk_wm_method1(params->pixel_rate,
1785 params->spr.bytes_per_pixel,
1787 method2 = ilk_wm_method2(params->pixel_rate,
1788 params->pipe_htotal,
1789 params->spr.horiz_pixels,
1790 params->spr.bytes_per_pixel,
1792 return min(method1, method2);
1796 * For both WM_PIPE and WM_LP.
1797 * mem_value must be in 0.1us units.
1799 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
1802 if (!params->active || !params->cur.enabled)
1805 return ilk_wm_method2(params->pixel_rate,
1806 params->pipe_htotal,
1807 params->cur.horiz_pixels,
1808 params->cur.bytes_per_pixel,
1812 /* Only for WM_LP. */
1813 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
1816 if (!params->active || !params->pri.enabled)
1819 return ilk_wm_fbc(pri_val,
1820 params->pri.horiz_pixels,
1821 params->pri.bytes_per_pixel);
1824 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1826 if (INTEL_INFO(dev)->gen >= 8)
1828 else if (INTEL_INFO(dev)->gen >= 7)
1834 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1835 int level, bool is_sprite)
1837 if (INTEL_INFO(dev)->gen >= 8)
1838 /* BDW primary/sprite plane watermarks */
1839 return level == 0 ? 255 : 2047;
1840 else if (INTEL_INFO(dev)->gen >= 7)
1841 /* IVB/HSW primary/sprite plane watermarks */
1842 return level == 0 ? 127 : 1023;
1843 else if (!is_sprite)
1844 /* ILK/SNB primary plane watermarks */
1845 return level == 0 ? 127 : 511;
1847 /* ILK/SNB sprite plane watermarks */
1848 return level == 0 ? 63 : 255;
1851 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1854 if (INTEL_INFO(dev)->gen >= 7)
1855 return level == 0 ? 63 : 255;
1857 return level == 0 ? 31 : 63;
1860 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1862 if (INTEL_INFO(dev)->gen >= 8)
1868 /* Calculate the maximum primary/sprite plane watermark */
1869 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1871 const struct intel_wm_config *config,
1872 enum intel_ddb_partitioning ddb_partitioning,
1875 unsigned int fifo_size = ilk_display_fifo_size(dev);
1877 /* if sprites aren't enabled, sprites get nothing */
1878 if (is_sprite && !config->sprites_enabled)
1881 /* HSW allows LP1+ watermarks even with multiple pipes */
1882 if (level == 0 || config->num_pipes_active > 1) {
1883 fifo_size /= INTEL_INFO(dev)->num_pipes;
1886 * For some reason the non self refresh
1887 * FIFO size is only half of the self
1888 * refresh FIFO size on ILK/SNB.
1890 if (INTEL_INFO(dev)->gen <= 6)
1894 if (config->sprites_enabled) {
1895 /* level 0 is always calculated with 1:1 split */
1896 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1905 /* clamp to max that the registers can hold */
1906 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1909 /* Calculate the maximum cursor plane watermark */
1910 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1912 const struct intel_wm_config *config)
1914 /* HSW LP1+ watermarks w/ multiple pipes */
1915 if (level > 0 && config->num_pipes_active > 1)
1918 /* otherwise just report max that registers can hold */
1919 return ilk_cursor_wm_reg_max(dev, level);
1922 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1924 const struct intel_wm_config *config,
1925 enum intel_ddb_partitioning ddb_partitioning,
1926 struct ilk_wm_maximums *max)
1928 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1929 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1930 max->cur = ilk_cursor_wm_max(dev, level, config);
1931 max->fbc = ilk_fbc_wm_reg_max(dev);
1934 static bool ilk_validate_wm_level(int level,
1935 const struct ilk_wm_maximums *max,
1936 struct intel_wm_level *result)
1940 /* already determined to be invalid? */
1941 if (!result->enable)
1944 result->enable = result->pri_val <= max->pri &&
1945 result->spr_val <= max->spr &&
1946 result->cur_val <= max->cur;
1948 ret = result->enable;
1951 * HACK until we can pre-compute everything,
1952 * and thus fail gracefully if LP0 watermarks
1955 if (level == 0 && !result->enable) {
1956 if (result->pri_val > max->pri)
1957 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1958 level, result->pri_val, max->pri);
1959 if (result->spr_val > max->spr)
1960 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1961 level, result->spr_val, max->spr);
1962 if (result->cur_val > max->cur)
1963 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1964 level, result->cur_val, max->cur);
1966 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
1967 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
1968 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
1969 result->enable = true;
1975 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1977 const struct ilk_pipe_wm_parameters *p,
1978 struct intel_wm_level *result)
1980 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
1981 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
1982 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
1984 /* WM1+ latency values stored in 0.5us units */
1991 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
1992 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
1993 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
1994 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
1995 result->enable = true;
1999 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2001 struct drm_i915_private *dev_priv = dev->dev_private;
2002 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2003 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2004 u32 linetime, ips_linetime;
2006 if (!intel_crtc_active(crtc))
2009 /* The WM are computed with base on how long it takes to fill a single
2010 * row at the given clock rate, multiplied by 8.
2012 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2014 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2015 intel_ddi_get_cdclk_freq(dev_priv));
2017 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2018 PIPE_WM_LINETIME_TIME(linetime);
2021 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2023 struct drm_i915_private *dev_priv = dev->dev_private;
2025 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2026 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2028 wm[0] = (sskpd >> 56) & 0xFF;
2030 wm[0] = sskpd & 0xF;
2031 wm[1] = (sskpd >> 4) & 0xFF;
2032 wm[2] = (sskpd >> 12) & 0xFF;
2033 wm[3] = (sskpd >> 20) & 0x1FF;
2034 wm[4] = (sskpd >> 32) & 0x1FF;
2035 } else if (INTEL_INFO(dev)->gen >= 6) {
2036 uint32_t sskpd = I915_READ(MCH_SSKPD);
2038 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2039 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2040 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2041 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2042 } else if (INTEL_INFO(dev)->gen >= 5) {
2043 uint32_t mltr = I915_READ(MLTR_ILK);
2045 /* ILK primary LP0 latency is 700 ns */
2047 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2048 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2052 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2054 /* ILK sprite LP0 latency is 1300 ns */
2055 if (INTEL_INFO(dev)->gen == 5)
2059 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2061 /* ILK cursor LP0 latency is 1300 ns */
2062 if (INTEL_INFO(dev)->gen == 5)
2065 /* WaDoubleCursorLP3Latency:ivb */
2066 if (IS_IVYBRIDGE(dev))
2070 static int ilk_wm_max_level(const struct drm_device *dev)
2072 /* how many WM levels are we expecting */
2073 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2075 else if (INTEL_INFO(dev)->gen >= 6)
2081 static void intel_print_wm_latency(struct drm_device *dev,
2083 const uint16_t wm[5])
2085 int level, max_level = ilk_wm_max_level(dev);
2087 for (level = 0; level <= max_level; level++) {
2088 unsigned int latency = wm[level];
2091 DRM_ERROR("%s WM%d latency not provided\n",
2096 /* WM1+ latency values in 0.5us units */
2100 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2101 name, level, wm[level],
2102 latency / 10, latency % 10);
2106 static void ilk_setup_wm_latency(struct drm_device *dev)
2108 struct drm_i915_private *dev_priv = dev->dev_private;
2110 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2112 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2113 sizeof(dev_priv->wm.pri_latency));
2114 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2115 sizeof(dev_priv->wm.pri_latency));
2117 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2118 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2120 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2121 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2122 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2125 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2126 struct ilk_pipe_wm_parameters *p)
2128 struct drm_device *dev = crtc->dev;
2129 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2130 enum pipe pipe = intel_crtc->pipe;
2131 struct drm_plane *plane;
2133 if (!intel_crtc_active(crtc))
2137 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2138 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2139 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2140 p->cur.bytes_per_pixel = 4;
2141 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2142 p->cur.horiz_pixels = intel_crtc->cursor_width;
2143 /* TODO: for now, assume primary and cursor planes are always enabled. */
2144 p->pri.enabled = true;
2145 p->cur.enabled = true;
2147 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2148 struct intel_plane *intel_plane = to_intel_plane(plane);
2150 if (intel_plane->pipe == pipe) {
2151 p->spr = intel_plane->wm;
2157 static void ilk_compute_wm_config(struct drm_device *dev,
2158 struct intel_wm_config *config)
2160 struct intel_crtc *intel_crtc;
2162 /* Compute the currently _active_ config */
2163 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2164 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2166 if (!wm->pipe_enabled)
2169 config->sprites_enabled |= wm->sprites_enabled;
2170 config->sprites_scaled |= wm->sprites_scaled;
2171 config->num_pipes_active++;
2175 /* Compute new watermarks for the pipe */
2176 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2177 const struct ilk_pipe_wm_parameters *params,
2178 struct intel_pipe_wm *pipe_wm)
2180 struct drm_device *dev = crtc->dev;
2181 const struct drm_i915_private *dev_priv = dev->dev_private;
2182 int level, max_level = ilk_wm_max_level(dev);
2183 /* LP0 watermark maximums depend on this pipe alone */
2184 struct intel_wm_config config = {
2185 .num_pipes_active = 1,
2186 .sprites_enabled = params->spr.enabled,
2187 .sprites_scaled = params->spr.scaled,
2189 struct ilk_wm_maximums max;
2191 /* LP0 watermarks always use 1/2 DDB partitioning */
2192 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2194 pipe_wm->pipe_enabled = params->active;
2195 pipe_wm->sprites_enabled = params->spr.enabled;
2196 pipe_wm->sprites_scaled = params->spr.scaled;
2198 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2199 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2202 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2203 if (params->spr.scaled)
2206 for (level = 0; level <= max_level; level++)
2207 ilk_compute_wm_level(dev_priv, level, params,
2208 &pipe_wm->wm[level]);
2210 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2211 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2213 /* At least LP0 must be valid */
2214 return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
2218 * Merge the watermarks from all active pipes for a specific level.
2220 static void ilk_merge_wm_level(struct drm_device *dev,
2222 struct intel_wm_level *ret_wm)
2224 const struct intel_crtc *intel_crtc;
2226 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2227 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2228 const struct intel_wm_level *wm = &active->wm[level];
2230 if (!active->pipe_enabled)
2236 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2237 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2238 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2239 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2242 ret_wm->enable = true;
2246 * Merge all low power watermarks for all active pipes.
2248 static void ilk_wm_merge(struct drm_device *dev,
2249 const struct intel_wm_config *config,
2250 const struct ilk_wm_maximums *max,
2251 struct intel_pipe_wm *merged)
2253 int level, max_level = ilk_wm_max_level(dev);
2255 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2256 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2257 config->num_pipes_active > 1)
2260 /* ILK: FBC WM must be disabled always */
2261 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2263 /* merge each WM1+ level */
2264 for (level = 1; level <= max_level; level++) {
2265 struct intel_wm_level *wm = &merged->wm[level];
2267 ilk_merge_wm_level(dev, level, wm);
2269 if (!ilk_validate_wm_level(level, max, wm))
2273 * The spec says it is preferred to disable
2274 * FBC WMs instead of disabling a WM level.
2276 if (wm->fbc_val > max->fbc) {
2277 merged->fbc_wm_enabled = false;
2282 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2284 * FIXME this is racy. FBC might get enabled later.
2285 * What we should check here is whether FBC can be
2286 * enabled sometime later.
2288 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2289 for (level = 2; level <= max_level; level++) {
2290 struct intel_wm_level *wm = &merged->wm[level];
2297 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2299 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2300 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2303 /* The value we need to program into the WM_LPx latency field */
2304 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2306 struct drm_i915_private *dev_priv = dev->dev_private;
2308 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2311 return dev_priv->wm.pri_latency[level];
2314 static void ilk_compute_wm_results(struct drm_device *dev,
2315 const struct intel_pipe_wm *merged,
2316 enum intel_ddb_partitioning partitioning,
2317 struct ilk_wm_values *results)
2319 struct intel_crtc *intel_crtc;
2322 results->enable_fbc_wm = merged->fbc_wm_enabled;
2323 results->partitioning = partitioning;
2325 /* LP1+ register values */
2326 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2327 const struct intel_wm_level *r;
2329 level = ilk_wm_lp_to_level(wm_lp, merged);
2331 r = &merged->wm[level];
2335 results->wm_lp[wm_lp - 1] = WM3_LP_EN |
2336 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2337 (r->pri_val << WM1_LP_SR_SHIFT) |
2340 if (INTEL_INFO(dev)->gen >= 8)
2341 results->wm_lp[wm_lp - 1] |=
2342 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2344 results->wm_lp[wm_lp - 1] |=
2345 r->fbc_val << WM1_LP_FBC_SHIFT;
2347 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2348 WARN_ON(wm_lp != 1);
2349 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2351 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2354 /* LP0 register values */
2355 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2356 enum pipe pipe = intel_crtc->pipe;
2357 const struct intel_wm_level *r =
2358 &intel_crtc->wm.active.wm[0];
2360 if (WARN_ON(!r->enable))
2363 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2365 results->wm_pipe[pipe] =
2366 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2367 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2372 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2373 * case both are at the same level. Prefer r1 in case they're the same. */
2374 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2375 struct intel_pipe_wm *r1,
2376 struct intel_pipe_wm *r2)
2378 int level, max_level = ilk_wm_max_level(dev);
2379 int level1 = 0, level2 = 0;
2381 for (level = 1; level <= max_level; level++) {
2382 if (r1->wm[level].enable)
2384 if (r2->wm[level].enable)
2388 if (level1 == level2) {
2389 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2393 } else if (level1 > level2) {
2400 /* dirty bits used to track which watermarks need changes */
2401 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2402 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2403 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2404 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2405 #define WM_DIRTY_FBC (1 << 24)
2406 #define WM_DIRTY_DDB (1 << 25)
2408 static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2409 const struct ilk_wm_values *old,
2410 const struct ilk_wm_values *new)
2412 unsigned int dirty = 0;
2416 for_each_pipe(pipe) {
2417 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2418 dirty |= WM_DIRTY_LINETIME(pipe);
2419 /* Must disable LP1+ watermarks too */
2420 dirty |= WM_DIRTY_LP_ALL;
2423 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2424 dirty |= WM_DIRTY_PIPE(pipe);
2425 /* Must disable LP1+ watermarks too */
2426 dirty |= WM_DIRTY_LP_ALL;
2430 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2431 dirty |= WM_DIRTY_FBC;
2432 /* Must disable LP1+ watermarks too */
2433 dirty |= WM_DIRTY_LP_ALL;
2436 if (old->partitioning != new->partitioning) {
2437 dirty |= WM_DIRTY_DDB;
2438 /* Must disable LP1+ watermarks too */
2439 dirty |= WM_DIRTY_LP_ALL;
2442 /* LP1+ watermarks already deemed dirty, no need to continue */
2443 if (dirty & WM_DIRTY_LP_ALL)
2446 /* Find the lowest numbered LP1+ watermark in need of an update... */
2447 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2448 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2449 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2453 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2454 for (; wm_lp <= 3; wm_lp++)
2455 dirty |= WM_DIRTY_LP(wm_lp);
2460 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2463 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2464 bool changed = false;
2466 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2467 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2468 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2471 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2472 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2473 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2476 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2477 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2478 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2483 * Don't touch WM1S_LP_EN here.
2484 * Doing so could cause underruns.
2491 * The spec says we shouldn't write when we don't need, because every write
2492 * causes WMs to be re-evaluated, expending some power.
2494 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2495 struct ilk_wm_values *results)
2497 struct drm_device *dev = dev_priv->dev;
2498 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2502 dirty = ilk_compute_wm_dirty(dev, previous, results);
2506 _ilk_disable_lp_wm(dev_priv, dirty);
2508 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2509 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2510 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2511 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2512 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2513 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2515 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2516 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2517 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2518 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2519 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2520 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2522 if (dirty & WM_DIRTY_DDB) {
2523 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2524 val = I915_READ(WM_MISC);
2525 if (results->partitioning == INTEL_DDB_PART_1_2)
2526 val &= ~WM_MISC_DATA_PARTITION_5_6;
2528 val |= WM_MISC_DATA_PARTITION_5_6;
2529 I915_WRITE(WM_MISC, val);
2531 val = I915_READ(DISP_ARB_CTL2);
2532 if (results->partitioning == INTEL_DDB_PART_1_2)
2533 val &= ~DISP_DATA_PARTITION_5_6;
2535 val |= DISP_DATA_PARTITION_5_6;
2536 I915_WRITE(DISP_ARB_CTL2, val);
2540 if (dirty & WM_DIRTY_FBC) {
2541 val = I915_READ(DISP_ARB_CTL);
2542 if (results->enable_fbc_wm)
2543 val &= ~DISP_FBC_WM_DIS;
2545 val |= DISP_FBC_WM_DIS;
2546 I915_WRITE(DISP_ARB_CTL, val);
2549 if (dirty & WM_DIRTY_LP(1) &&
2550 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2551 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2553 if (INTEL_INFO(dev)->gen >= 7) {
2554 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2555 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2556 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2557 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2560 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2561 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2562 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2563 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2564 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2565 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2567 dev_priv->wm.hw = *results;
2570 static bool ilk_disable_lp_wm(struct drm_device *dev)
2572 struct drm_i915_private *dev_priv = dev->dev_private;
2574 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2577 static void ilk_update_wm(struct drm_crtc *crtc)
2579 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2580 struct drm_device *dev = crtc->dev;
2581 struct drm_i915_private *dev_priv = dev->dev_private;
2582 struct ilk_wm_maximums max;
2583 struct ilk_pipe_wm_parameters params = {};
2584 struct ilk_wm_values results = {};
2585 enum intel_ddb_partitioning partitioning;
2586 struct intel_pipe_wm pipe_wm = {};
2587 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2588 struct intel_wm_config config = {};
2590 ilk_compute_wm_parameters(crtc, ¶ms);
2592 intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
2594 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2597 intel_crtc->wm.active = pipe_wm;
2599 ilk_compute_wm_config(dev, &config);
2601 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2602 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2604 /* 5/6 split only in single pipe config on IVB+ */
2605 if (INTEL_INFO(dev)->gen >= 7 &&
2606 config.num_pipes_active == 1 && config.sprites_enabled) {
2607 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2608 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2610 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2612 best_lp_wm = &lp_wm_1_2;
2615 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2616 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2618 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2620 ilk_write_wm_values(dev_priv, &results);
2623 static void ilk_update_sprite_wm(struct drm_plane *plane,
2624 struct drm_crtc *crtc,
2625 uint32_t sprite_width, int pixel_size,
2626 bool enabled, bool scaled)
2628 struct drm_device *dev = plane->dev;
2629 struct intel_plane *intel_plane = to_intel_plane(plane);
2631 intel_plane->wm.enabled = enabled;
2632 intel_plane->wm.scaled = scaled;
2633 intel_plane->wm.horiz_pixels = sprite_width;
2634 intel_plane->wm.bytes_per_pixel = pixel_size;
2637 * IVB workaround: must disable low power watermarks for at least
2638 * one frame before enabling scaling. LP watermarks can be re-enabled
2639 * when scaling is disabled.
2641 * WaCxSRDisabledForSpriteScaling:ivb
2643 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
2644 intel_wait_for_vblank(dev, intel_plane->pipe);
2646 ilk_update_wm(crtc);
2649 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2651 struct drm_device *dev = crtc->dev;
2652 struct drm_i915_private *dev_priv = dev->dev_private;
2653 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2654 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2655 struct intel_pipe_wm *active = &intel_crtc->wm.active;
2656 enum pipe pipe = intel_crtc->pipe;
2657 static const unsigned int wm0_pipe_reg[] = {
2658 [PIPE_A] = WM0_PIPEA_ILK,
2659 [PIPE_B] = WM0_PIPEB_ILK,
2660 [PIPE_C] = WM0_PIPEC_IVB,
2663 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
2664 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2665 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2667 active->pipe_enabled = intel_crtc_active(crtc);
2669 if (active->pipe_enabled) {
2670 u32 tmp = hw->wm_pipe[pipe];
2673 * For active pipes LP0 watermark is marked as
2674 * enabled, and LP1+ watermaks as disabled since
2675 * we can't really reverse compute them in case
2676 * multiple pipes are active.
2678 active->wm[0].enable = true;
2679 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
2680 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
2681 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
2682 active->linetime = hw->wm_linetime[pipe];
2684 int level, max_level = ilk_wm_max_level(dev);
2687 * For inactive pipes, all watermark levels
2688 * should be marked as enabled but zeroed,
2689 * which is what we'd compute them to.
2691 for (level = 0; level <= max_level; level++)
2692 active->wm[level].enable = true;
2696 void ilk_wm_get_hw_state(struct drm_device *dev)
2698 struct drm_i915_private *dev_priv = dev->dev_private;
2699 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2700 struct drm_crtc *crtc;
2702 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2703 ilk_pipe_wm_get_hw_state(crtc);
2705 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
2706 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
2707 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2709 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2710 if (INTEL_INFO(dev)->gen >= 7) {
2711 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2712 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2715 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2716 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2717 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2718 else if (IS_IVYBRIDGE(dev))
2719 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
2720 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2723 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2727 * intel_update_watermarks - update FIFO watermark values based on current modes
2729 * Calculate watermark values for the various WM regs based on current mode
2730 * and plane configuration.
2732 * There are several cases to deal with here:
2733 * - normal (i.e. non-self-refresh)
2734 * - self-refresh (SR) mode
2735 * - lines are large relative to FIFO size (buffer can hold up to 2)
2736 * - lines are small relative to FIFO size (buffer can hold more than 2
2737 * lines), so need to account for TLB latency
2739 * The normal calculation is:
2740 * watermark = dotclock * bytes per pixel * latency
2741 * where latency is platform & configuration dependent (we assume pessimal
2744 * The SR calculation is:
2745 * watermark = (trunc(latency/line time)+1) * surface width *
2748 * line time = htotal / dotclock
2749 * surface width = hdisplay for normal plane and 64 for cursor
2750 * and latency is assumed to be high, as above.
2752 * The final value programmed to the register should always be rounded up,
2753 * and include an extra 2 entries to account for clock crossings.
2755 * We don't use the sprite, so we can ignore that. And on Crestline we have
2756 * to set the non-SR watermarks to 8.
2758 void intel_update_watermarks(struct drm_crtc *crtc)
2760 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
2762 if (dev_priv->display.update_wm)
2763 dev_priv->display.update_wm(crtc);
2766 void intel_update_sprite_watermarks(struct drm_plane *plane,
2767 struct drm_crtc *crtc,
2768 uint32_t sprite_width, int pixel_size,
2769 bool enabled, bool scaled)
2771 struct drm_i915_private *dev_priv = plane->dev->dev_private;
2773 if (dev_priv->display.update_sprite_wm)
2774 dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
2775 pixel_size, enabled, scaled);
2778 static struct drm_i915_gem_object *
2779 intel_alloc_context_page(struct drm_device *dev)
2781 struct drm_i915_gem_object *ctx;
2784 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2786 ctx = i915_gem_alloc_object(dev, 4096);
2788 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2792 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
2794 DRM_ERROR("failed to pin power context: %d\n", ret);
2798 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2800 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2807 i915_gem_object_ggtt_unpin(ctx);
2809 drm_gem_object_unreference(&ctx->base);
2814 * Lock protecting IPS related data structures
2816 DEFINE_SPINLOCK(mchdev_lock);
2818 /* Global for IPS driver to get at the current i915 device. Protected by
2820 static struct drm_i915_private *i915_mch_dev;
2822 bool ironlake_set_drps(struct drm_device *dev, u8 val)
2824 struct drm_i915_private *dev_priv = dev->dev_private;
2827 assert_spin_locked(&mchdev_lock);
2829 rgvswctl = I915_READ16(MEMSWCTL);
2830 if (rgvswctl & MEMCTL_CMD_STS) {
2831 DRM_DEBUG("gpu busy, RCS change rejected\n");
2832 return false; /* still busy with another command */
2835 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2836 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2837 I915_WRITE16(MEMSWCTL, rgvswctl);
2838 POSTING_READ16(MEMSWCTL);
2840 rgvswctl |= MEMCTL_CMD_STS;
2841 I915_WRITE16(MEMSWCTL, rgvswctl);
2846 static void ironlake_enable_drps(struct drm_device *dev)
2848 struct drm_i915_private *dev_priv = dev->dev_private;
2849 u32 rgvmodectl = I915_READ(MEMMODECTL);
2850 u8 fmax, fmin, fstart, vstart;
2852 spin_lock_irq(&mchdev_lock);
2854 /* Enable temp reporting */
2855 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2856 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2858 /* 100ms RC evaluation intervals */
2859 I915_WRITE(RCUPEI, 100000);
2860 I915_WRITE(RCDNEI, 100000);
2862 /* Set max/min thresholds to 90ms and 80ms respectively */
2863 I915_WRITE(RCBMAXAVG, 90000);
2864 I915_WRITE(RCBMINAVG, 80000);
2866 I915_WRITE(MEMIHYST, 1);
2868 /* Set up min, max, and cur for interrupt handling */
2869 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2870 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2871 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2872 MEMMODE_FSTART_SHIFT;
2874 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2877 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
2878 dev_priv->ips.fstart = fstart;
2880 dev_priv->ips.max_delay = fstart;
2881 dev_priv->ips.min_delay = fmin;
2882 dev_priv->ips.cur_delay = fstart;
2884 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2885 fmax, fmin, fstart);
2887 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2890 * Interrupts will be enabled in ironlake_irq_postinstall
2893 I915_WRITE(VIDSTART, vstart);
2894 POSTING_READ(VIDSTART);
2896 rgvmodectl |= MEMMODE_SWMODE_EN;
2897 I915_WRITE(MEMMODECTL, rgvmodectl);
2899 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2900 DRM_ERROR("stuck trying to change perf mode\n");
2903 ironlake_set_drps(dev, fstart);
2905 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2907 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2908 dev_priv->ips.last_count2 = I915_READ(0x112f4);
2909 getrawmonotonic(&dev_priv->ips.last_time2);
2911 spin_unlock_irq(&mchdev_lock);
2914 static void ironlake_disable_drps(struct drm_device *dev)
2916 struct drm_i915_private *dev_priv = dev->dev_private;
2919 spin_lock_irq(&mchdev_lock);
2921 rgvswctl = I915_READ16(MEMSWCTL);
2923 /* Ack interrupts, disable EFC interrupt */
2924 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2925 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2926 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2927 I915_WRITE(DEIIR, DE_PCU_EVENT);
2928 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2930 /* Go back to the starting frequency */
2931 ironlake_set_drps(dev, dev_priv->ips.fstart);
2933 rgvswctl |= MEMCTL_CMD_STS;
2934 I915_WRITE(MEMSWCTL, rgvswctl);
2937 spin_unlock_irq(&mchdev_lock);
2940 /* There's a funny hw issue where the hw returns all 0 when reading from
2941 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
2942 * ourselves, instead of doing a rmw cycle (which might result in us clearing
2943 * all limits and the gpu stuck at whatever frequency it is at atm).
2945 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
2949 /* Only set the down limit when we've reached the lowest level to avoid
2950 * getting more interrupts, otherwise leave this clear. This prevents a
2951 * race in the hw when coming out of rc6: There's a tiny window where
2952 * the hw runs at the minimal clock before selecting the desired
2953 * frequency, if the down threshold expires in that window we will not
2954 * receive a down interrupt. */
2955 limits = dev_priv->rps.max_freq_softlimit << 24;
2956 if (val <= dev_priv->rps.min_freq_softlimit)
2957 limits |= dev_priv->rps.min_freq_softlimit << 16;
2962 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
2966 new_power = dev_priv->rps.power;
2967 switch (dev_priv->rps.power) {
2969 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
2970 new_power = BETWEEN;
2974 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
2975 new_power = LOW_POWER;
2976 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
2977 new_power = HIGH_POWER;
2981 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
2982 new_power = BETWEEN;
2985 /* Max/min bins are special */
2986 if (val == dev_priv->rps.min_freq_softlimit)
2987 new_power = LOW_POWER;
2988 if (val == dev_priv->rps.max_freq_softlimit)
2989 new_power = HIGH_POWER;
2990 if (new_power == dev_priv->rps.power)
2993 /* Note the units here are not exactly 1us, but 1280ns. */
2994 switch (new_power) {
2996 /* Upclock if more than 95% busy over 16ms */
2997 I915_WRITE(GEN6_RP_UP_EI, 12500);
2998 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3000 /* Downclock if less than 85% busy over 32ms */
3001 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3002 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3004 I915_WRITE(GEN6_RP_CONTROL,
3005 GEN6_RP_MEDIA_TURBO |
3006 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3007 GEN6_RP_MEDIA_IS_GFX |
3009 GEN6_RP_UP_BUSY_AVG |
3010 GEN6_RP_DOWN_IDLE_AVG);
3014 /* Upclock if more than 90% busy over 13ms */
3015 I915_WRITE(GEN6_RP_UP_EI, 10250);
3016 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3018 /* Downclock if less than 75% busy over 32ms */
3019 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3020 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3022 I915_WRITE(GEN6_RP_CONTROL,
3023 GEN6_RP_MEDIA_TURBO |
3024 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3025 GEN6_RP_MEDIA_IS_GFX |
3027 GEN6_RP_UP_BUSY_AVG |
3028 GEN6_RP_DOWN_IDLE_AVG);
3032 /* Upclock if more than 85% busy over 10ms */
3033 I915_WRITE(GEN6_RP_UP_EI, 8000);
3034 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3036 /* Downclock if less than 60% busy over 32ms */
3037 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3038 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3040 I915_WRITE(GEN6_RP_CONTROL,
3041 GEN6_RP_MEDIA_TURBO |
3042 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3043 GEN6_RP_MEDIA_IS_GFX |
3045 GEN6_RP_UP_BUSY_AVG |
3046 GEN6_RP_DOWN_IDLE_AVG);
3050 dev_priv->rps.power = new_power;
3051 dev_priv->rps.last_adj = 0;
3054 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3058 if (val > dev_priv->rps.min_freq_softlimit)
3059 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3060 if (val < dev_priv->rps.max_freq_softlimit)
3061 mask |= GEN6_PM_RP_UP_THRESHOLD;
3063 /* IVB and SNB hard hangs on looping batchbuffer
3064 * if GEN6_PM_UP_EI_EXPIRED is masked.
3066 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3067 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3072 /* gen6_set_rps is called to update the frequency request, but should also be
3073 * called when the range (min_delay and max_delay) is modified so that we can
3074 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3075 void gen6_set_rps(struct drm_device *dev, u8 val)
3077 struct drm_i915_private *dev_priv = dev->dev_private;
3079 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3080 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3081 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3083 /* min/max delay may still have been modified so be sure to
3084 * write the limits value.
3086 if (val != dev_priv->rps.cur_freq) {
3087 gen6_set_rps_thresholds(dev_priv, val);
3089 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3090 I915_WRITE(GEN6_RPNSWREQ,
3091 HSW_FREQUENCY(val));
3093 I915_WRITE(GEN6_RPNSWREQ,
3094 GEN6_FREQUENCY(val) |
3096 GEN6_AGGRESSIVE_TURBO);
3099 /* Make sure we continue to get interrupts
3100 * until we hit the minimum or maximum frequencies.
3102 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3103 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3105 POSTING_READ(GEN6_RPNSWREQ);
3107 dev_priv->rps.cur_freq = val;
3108 trace_intel_gpu_freq_change(val * 50);
3111 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3113 * * If Gfx is Idle, then
3114 * 1. Mask Turbo interrupts
3115 * 2. Bring up Gfx clock
3116 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3117 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3118 * 5. Unmask Turbo interrupts
3120 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3123 * When we are idle. Drop to min voltage state.
3126 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
3129 /* Mask turbo interrupt so that they will not come in between */
3130 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3132 /* Bring up the Gfx clock */
3133 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3134 I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
3135 VLV_GFX_CLK_FORCE_ON_BIT);
3137 if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
3138 I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
3139 DRM_ERROR("GFX_CLK_ON request timed out\n");
3143 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
3145 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
3146 dev_priv->rps.min_freq_softlimit);
3148 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3149 & GENFREQSTATUS) == 0, 5))
3150 DRM_ERROR("timed out waiting for Punit\n");
3152 /* Release the Gfx clock */
3153 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3154 I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
3155 ~VLV_GFX_CLK_FORCE_ON_BIT);
3157 I915_WRITE(GEN6_PMINTRMSK,
3158 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3161 void gen6_rps_idle(struct drm_i915_private *dev_priv)
3163 struct drm_device *dev = dev_priv->dev;
3165 mutex_lock(&dev_priv->rps.hw_lock);
3166 if (dev_priv->rps.enabled) {
3167 if (IS_VALLEYVIEW(dev))
3168 vlv_set_rps_idle(dev_priv);
3170 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3171 dev_priv->rps.last_adj = 0;
3173 mutex_unlock(&dev_priv->rps.hw_lock);
3176 void gen6_rps_boost(struct drm_i915_private *dev_priv)
3178 struct drm_device *dev = dev_priv->dev;
3180 mutex_lock(&dev_priv->rps.hw_lock);
3181 if (dev_priv->rps.enabled) {
3182 if (IS_VALLEYVIEW(dev))
3183 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3185 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3186 dev_priv->rps.last_adj = 0;
3188 mutex_unlock(&dev_priv->rps.hw_lock);
3191 void valleyview_set_rps(struct drm_device *dev, u8 val)
3193 struct drm_i915_private *dev_priv = dev->dev_private;
3195 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3196 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3197 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3199 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3200 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3201 dev_priv->rps.cur_freq,
3202 vlv_gpu_freq(dev_priv, val), val);
3204 if (val != dev_priv->rps.cur_freq)
3205 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3207 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3209 dev_priv->rps.cur_freq = val;
3210 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3213 static void gen6_disable_rps_interrupts(struct drm_device *dev)
3215 struct drm_i915_private *dev_priv = dev->dev_private;
3217 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3218 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3219 ~dev_priv->pm_rps_events);
3220 /* Complete PM interrupt masking here doesn't race with the rps work
3221 * item again unmasking PM interrupts because that is using a different
3222 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3223 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3225 spin_lock_irq(&dev_priv->irq_lock);
3226 dev_priv->rps.pm_iir = 0;
3227 spin_unlock_irq(&dev_priv->irq_lock);
3229 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3232 static void gen6_disable_rps(struct drm_device *dev)
3234 struct drm_i915_private *dev_priv = dev->dev_private;
3236 I915_WRITE(GEN6_RC_CONTROL, 0);
3237 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3239 gen6_disable_rps_interrupts(dev);
3242 static void valleyview_disable_rps(struct drm_device *dev)
3244 struct drm_i915_private *dev_priv = dev->dev_private;
3246 I915_WRITE(GEN6_RC_CONTROL, 0);
3248 gen6_disable_rps_interrupts(dev);
3251 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3253 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3254 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3255 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3256 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3259 int intel_enable_rc6(const struct drm_device *dev)
3261 /* No RC6 before Ironlake */
3262 if (INTEL_INFO(dev)->gen < 5)
3265 /* Respect the kernel parameter if it is set */
3266 if (i915.enable_rc6 >= 0)
3267 return i915.enable_rc6;
3269 /* Disable RC6 on Ironlake */
3270 if (INTEL_INFO(dev)->gen == 5)
3273 if (IS_IVYBRIDGE(dev))
3274 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3276 return INTEL_RC6_ENABLE;
3279 static void gen6_enable_rps_interrupts(struct drm_device *dev)
3281 struct drm_i915_private *dev_priv = dev->dev_private;
3283 spin_lock_irq(&dev_priv->irq_lock);
3284 WARN_ON(dev_priv->rps.pm_iir);
3285 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3286 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3287 spin_unlock_irq(&dev_priv->irq_lock);
3290 static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
3292 /* All of these values are in units of 50MHz */
3293 dev_priv->rps.cur_freq = 0;
3294 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3295 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3296 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3297 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3298 /* XXX: only BYT has a special efficient freq */
3299 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3300 /* hw_max = RP0 until we check for overclocking */
3301 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3303 /* Preserve min/max settings in case of re-init */
3304 if (dev_priv->rps.max_freq_softlimit == 0)
3305 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3307 if (dev_priv->rps.min_freq_softlimit == 0)
3308 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3311 static void gen8_enable_rps(struct drm_device *dev)
3313 struct drm_i915_private *dev_priv = dev->dev_private;
3314 struct intel_ring_buffer *ring;
3315 uint32_t rc6_mask = 0, rp_state_cap;
3318 /* 1a: Software RC state - RC0 */
3319 I915_WRITE(GEN6_RC_STATE, 0);
3321 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3322 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3323 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3325 /* 2a: Disable RC states. */
3326 I915_WRITE(GEN6_RC_CONTROL, 0);
3328 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3329 parse_rp_state_cap(dev_priv, rp_state_cap);
3331 /* 2b: Program RC6 thresholds.*/
3332 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3333 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3334 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3335 for_each_ring(ring, dev_priv, unused)
3336 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3337 I915_WRITE(GEN6_RC_SLEEP, 0);
3338 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3341 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3342 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3343 intel_print_rc6_info(dev, rc6_mask);
3344 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3345 GEN6_RC_CTL_EI_MODE(1) |
3348 /* 4 Program defaults and thresholds for RPS*/
3349 I915_WRITE(GEN6_RPNSWREQ,
3350 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3351 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3352 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3353 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3354 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3356 /* Docs recommend 900MHz, and 300 MHz respectively */
3357 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3358 dev_priv->rps.max_freq_softlimit << 24 |
3359 dev_priv->rps.min_freq_softlimit << 16);
3361 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3362 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3363 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3364 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3366 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3369 I915_WRITE(GEN6_RP_CONTROL,
3370 GEN6_RP_MEDIA_TURBO |
3371 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3372 GEN6_RP_MEDIA_IS_GFX |
3374 GEN6_RP_UP_BUSY_AVG |
3375 GEN6_RP_DOWN_IDLE_AVG);
3377 /* 6: Ring frequency + overclocking (our driver does this later */
3379 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3381 gen6_enable_rps_interrupts(dev);
3383 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3386 static void gen6_enable_rps(struct drm_device *dev)
3388 struct drm_i915_private *dev_priv = dev->dev_private;
3389 struct intel_ring_buffer *ring;
3392 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3397 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3399 /* Here begins a magic sequence of register writes to enable
3400 * auto-downclocking.
3402 * Perhaps there might be some value in exposing these to
3405 I915_WRITE(GEN6_RC_STATE, 0);
3407 /* Clear the DBG now so we don't confuse earlier errors */
3408 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3409 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3410 I915_WRITE(GTFIFODBG, gtfifodbg);
3413 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3415 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3416 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3418 parse_rp_state_cap(dev_priv, rp_state_cap);
3420 /* disable the counters and set deterministic thresholds */
3421 I915_WRITE(GEN6_RC_CONTROL, 0);
3423 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3424 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3425 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3426 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3427 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3429 for_each_ring(ring, dev_priv, i)
3430 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3432 I915_WRITE(GEN6_RC_SLEEP, 0);
3433 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3434 if (IS_IVYBRIDGE(dev))
3435 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3437 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3438 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3439 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3441 /* Check if we are enabling RC6 */
3442 rc6_mode = intel_enable_rc6(dev_priv->dev);
3443 if (rc6_mode & INTEL_RC6_ENABLE)
3444 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3446 /* We don't use those on Haswell */
3447 if (!IS_HASWELL(dev)) {
3448 if (rc6_mode & INTEL_RC6p_ENABLE)
3449 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3451 if (rc6_mode & INTEL_RC6pp_ENABLE)
3452 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3455 intel_print_rc6_info(dev, rc6_mask);
3457 I915_WRITE(GEN6_RC_CONTROL,
3459 GEN6_RC_CTL_EI_MODE(1) |
3460 GEN6_RC_CTL_HW_ENABLE);
3462 /* Power down if completely idle for over 50ms */
3463 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3464 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3466 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3468 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3470 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3471 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3472 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3473 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
3474 (pcu_mbox & 0xff) * 50);
3475 dev_priv->rps.max_freq = pcu_mbox & 0xff;
3478 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3479 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3481 gen6_enable_rps_interrupts(dev);
3484 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3485 if (IS_GEN6(dev) && ret) {
3486 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3487 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3488 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3489 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3490 rc6vids &= 0xffff00;
3491 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3492 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3494 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3497 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3500 void gen6_update_ring_freq(struct drm_device *dev)
3502 struct drm_i915_private *dev_priv = dev->dev_private;
3504 unsigned int gpu_freq;
3505 unsigned int max_ia_freq, min_ring_freq;
3506 int scaling_factor = 180;
3507 struct cpufreq_policy *policy;
3509 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3511 policy = cpufreq_cpu_get(0);
3513 max_ia_freq = policy->cpuinfo.max_freq;
3514 cpufreq_cpu_put(policy);
3517 * Default to measured freq if none found, PCU will ensure we
3520 max_ia_freq = tsc_khz;
3523 /* Convert from kHz to MHz */
3524 max_ia_freq /= 1000;
3526 min_ring_freq = I915_READ(DCLK) & 0xf;
3527 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3528 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3531 * For each potential GPU frequency, load a ring frequency we'd like
3532 * to use for memory access. We do this by specifying the IA frequency
3533 * the PCU should use as a reference to determine the ring frequency.
3535 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
3537 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
3538 unsigned int ia_freq = 0, ring_freq = 0;
3540 if (INTEL_INFO(dev)->gen >= 8) {
3541 /* max(2 * GT, DDR). NB: GT is 50MHz units */
3542 ring_freq = max(min_ring_freq, gpu_freq);
3543 } else if (IS_HASWELL(dev)) {
3544 ring_freq = mult_frac(gpu_freq, 5, 4);
3545 ring_freq = max(min_ring_freq, ring_freq);
3546 /* leave ia_freq as the default, chosen by cpufreq */
3548 /* On older processors, there is no separate ring
3549 * clock domain, so in order to boost the bandwidth
3550 * of the ring, we need to upclock the CPU (ia_freq).
3552 * For GPU frequencies less than 750MHz,
3553 * just use the lowest ring freq.
3555 if (gpu_freq < min_freq)
3558 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3559 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3562 sandybridge_pcode_write(dev_priv,
3563 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3564 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3565 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3570 int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3574 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3576 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3578 rp0 = min_t(u32, rp0, 0xea);
3583 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3587 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
3588 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
3589 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
3590 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3595 int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3597 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3600 /* Check that the pctx buffer wasn't move under us. */
3601 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
3603 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
3605 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
3606 dev_priv->vlv_pctx->stolen->start);
3609 static void valleyview_setup_pctx(struct drm_device *dev)
3611 struct drm_i915_private *dev_priv = dev->dev_private;
3612 struct drm_i915_gem_object *pctx;
3613 unsigned long pctx_paddr;
3615 int pctx_size = 24*1024;
3617 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3619 pcbr = I915_READ(VLV_PCBR);
3621 /* BIOS set it up already, grab the pre-alloc'd space */
3624 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3625 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3627 I915_GTT_OFFSET_NONE,
3633 * From the Gunit register HAS:
3634 * The Gfx driver is expected to program this register and ensure
3635 * proper allocation within Gfx stolen memory. For example, this
3636 * register should be programmed such than the PCBR range does not
3637 * overlap with other ranges, such as the frame buffer, protected
3638 * memory, or any other relevant ranges.
3640 pctx = i915_gem_object_create_stolen(dev, pctx_size);
3642 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3646 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3647 I915_WRITE(VLV_PCBR, pctx_paddr);
3650 dev_priv->vlv_pctx = pctx;
3653 static void valleyview_cleanup_pctx(struct drm_device *dev)
3655 struct drm_i915_private *dev_priv = dev->dev_private;
3657 if (WARN_ON(!dev_priv->vlv_pctx))
3660 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3661 dev_priv->vlv_pctx = NULL;
3664 static void valleyview_enable_rps(struct drm_device *dev)
3666 struct drm_i915_private *dev_priv = dev->dev_private;
3667 struct intel_ring_buffer *ring;
3668 u32 gtfifodbg, val, rc6_mode = 0;
3671 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3673 valleyview_check_pctx(dev_priv);
3675 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3676 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
3678 I915_WRITE(GTFIFODBG, gtfifodbg);
3681 /* If VLV, Forcewake all wells, else re-direct to regular path */
3682 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3684 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3685 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3686 I915_WRITE(GEN6_RP_UP_EI, 66000);
3687 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3689 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3691 I915_WRITE(GEN6_RP_CONTROL,
3692 GEN6_RP_MEDIA_TURBO |
3693 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3694 GEN6_RP_MEDIA_IS_GFX |
3696 GEN6_RP_UP_BUSY_AVG |
3697 GEN6_RP_DOWN_IDLE_CONT);
3699 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
3700 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3701 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3703 for_each_ring(ring, dev_priv, i)
3704 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3706 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
3708 /* allows RC6 residency counter to work */
3709 I915_WRITE(VLV_COUNTER_CONTROL,
3710 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
3711 VLV_MEDIA_RC6_COUNT_EN |
3712 VLV_RENDER_RC6_COUNT_EN));
3713 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3714 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
3716 intel_print_rc6_info(dev, rc6_mode);
3718 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
3720 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3722 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
3723 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3725 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
3726 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3727 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3728 dev_priv->rps.cur_freq);
3730 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
3731 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
3732 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3733 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
3734 dev_priv->rps.max_freq);
3736 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
3737 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3738 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3739 dev_priv->rps.efficient_freq);
3741 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
3742 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3743 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
3744 dev_priv->rps.min_freq);
3746 /* Preserve min/max settings in case of re-init */
3747 if (dev_priv->rps.max_freq_softlimit == 0)
3748 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3750 if (dev_priv->rps.min_freq_softlimit == 0)
3751 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3753 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3754 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3755 dev_priv->rps.efficient_freq);
3757 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
3759 gen6_enable_rps_interrupts(dev);
3761 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3764 void ironlake_teardown_rc6(struct drm_device *dev)
3766 struct drm_i915_private *dev_priv = dev->dev_private;
3768 if (dev_priv->ips.renderctx) {
3769 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
3770 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
3771 dev_priv->ips.renderctx = NULL;
3774 if (dev_priv->ips.pwrctx) {
3775 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
3776 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
3777 dev_priv->ips.pwrctx = NULL;
3781 static void ironlake_disable_rc6(struct drm_device *dev)
3783 struct drm_i915_private *dev_priv = dev->dev_private;
3785 if (I915_READ(PWRCTXA)) {
3786 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
3787 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
3788 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
3791 I915_WRITE(PWRCTXA, 0);
3792 POSTING_READ(PWRCTXA);
3794 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3795 POSTING_READ(RSTDBYCTL);
3799 static int ironlake_setup_rc6(struct drm_device *dev)
3801 struct drm_i915_private *dev_priv = dev->dev_private;
3803 if (dev_priv->ips.renderctx == NULL)
3804 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
3805 if (!dev_priv->ips.renderctx)
3808 if (dev_priv->ips.pwrctx == NULL)
3809 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
3810 if (!dev_priv->ips.pwrctx) {
3811 ironlake_teardown_rc6(dev);
3818 static void ironlake_enable_rc6(struct drm_device *dev)
3820 struct drm_i915_private *dev_priv = dev->dev_private;
3821 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
3822 bool was_interruptible;
3825 /* rc6 disabled by default due to repeated reports of hanging during
3828 if (!intel_enable_rc6(dev))
3831 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3833 ret = ironlake_setup_rc6(dev);
3837 was_interruptible = dev_priv->mm.interruptible;
3838 dev_priv->mm.interruptible = false;
3841 * GPU can automatically power down the render unit if given a page
3844 ret = intel_ring_begin(ring, 6);
3846 ironlake_teardown_rc6(dev);
3847 dev_priv->mm.interruptible = was_interruptible;
3851 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
3852 intel_ring_emit(ring, MI_SET_CONTEXT);
3853 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
3855 MI_SAVE_EXT_STATE_EN |
3856 MI_RESTORE_EXT_STATE_EN |
3857 MI_RESTORE_INHIBIT);
3858 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
3859 intel_ring_emit(ring, MI_NOOP);
3860 intel_ring_emit(ring, MI_FLUSH);
3861 intel_ring_advance(ring);
3864 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
3865 * does an implicit flush, combined with MI_FLUSH above, it should be
3866 * safe to assume that renderctx is valid
3868 ret = intel_ring_idle(ring);
3869 dev_priv->mm.interruptible = was_interruptible;
3871 DRM_ERROR("failed to enable ironlake power savings\n");
3872 ironlake_teardown_rc6(dev);
3876 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
3877 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3879 intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
3882 static unsigned long intel_pxfreq(u32 vidfreq)
3885 int div = (vidfreq & 0x3f0000) >> 16;
3886 int post = (vidfreq & 0x3000) >> 12;
3887 int pre = (vidfreq & 0x7);
3892 freq = ((div * 133333) / ((1<<post) * pre));
3897 static const struct cparams {
3903 { 1, 1333, 301, 28664 },
3904 { 1, 1066, 294, 24460 },
3905 { 1, 800, 294, 25192 },
3906 { 0, 1333, 276, 27605 },
3907 { 0, 1066, 276, 27605 },
3908 { 0, 800, 231, 23784 },
3911 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
3913 u64 total_count, diff, ret;
3914 u32 count1, count2, count3, m = 0, c = 0;
3915 unsigned long now = jiffies_to_msecs(jiffies), diff1;
3918 assert_spin_locked(&mchdev_lock);
3920 diff1 = now - dev_priv->ips.last_time1;
3922 /* Prevent division-by-zero if we are asking too fast.
3923 * Also, we don't get interesting results if we are polling
3924 * faster than once in 10ms, so just return the saved value
3928 return dev_priv->ips.chipset_power;
3930 count1 = I915_READ(DMIEC);
3931 count2 = I915_READ(DDREC);
3932 count3 = I915_READ(CSIEC);
3934 total_count = count1 + count2 + count3;
3936 /* FIXME: handle per-counter overflow */
3937 if (total_count < dev_priv->ips.last_count1) {
3938 diff = ~0UL - dev_priv->ips.last_count1;
3939 diff += total_count;
3941 diff = total_count - dev_priv->ips.last_count1;
3944 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
3945 if (cparams[i].i == dev_priv->ips.c_m &&
3946 cparams[i].t == dev_priv->ips.r_t) {
3953 diff = div_u64(diff, diff1);
3954 ret = ((m * diff) + c);
3955 ret = div_u64(ret, 10);
3957 dev_priv->ips.last_count1 = total_count;
3958 dev_priv->ips.last_time1 = now;
3960 dev_priv->ips.chipset_power = ret;
3965 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
3967 struct drm_device *dev = dev_priv->dev;
3970 if (INTEL_INFO(dev)->gen != 5)
3973 spin_lock_irq(&mchdev_lock);
3975 val = __i915_chipset_val(dev_priv);
3977 spin_unlock_irq(&mchdev_lock);
3982 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
3984 unsigned long m, x, b;
3987 tsfs = I915_READ(TSFS);
3989 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
3990 x = I915_READ8(TR1);
3992 b = tsfs & TSFS_INTR_MASK;
3994 return ((m * x) / 127) - b;
3997 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
3999 struct drm_device *dev = dev_priv->dev;
4000 static const struct v_table {
4001 u16 vd; /* in .1 mil */
4002 u16 vm; /* in .1 mil */
4133 if (INTEL_INFO(dev)->is_mobile)
4134 return v_table[pxvid].vm;
4136 return v_table[pxvid].vd;
4139 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4141 struct timespec now, diff1;
4143 unsigned long diffms;
4146 assert_spin_locked(&mchdev_lock);
4148 getrawmonotonic(&now);
4149 diff1 = timespec_sub(now, dev_priv->ips.last_time2);
4151 /* Don't divide by 0 */
4152 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
4156 count = I915_READ(GFXEC);
4158 if (count < dev_priv->ips.last_count2) {
4159 diff = ~0UL - dev_priv->ips.last_count2;
4162 diff = count - dev_priv->ips.last_count2;
4165 dev_priv->ips.last_count2 = count;
4166 dev_priv->ips.last_time2 = now;
4168 /* More magic constants... */
4170 diff = div_u64(diff, diffms * 10);
4171 dev_priv->ips.gfx_power = diff;
4174 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4176 struct drm_device *dev = dev_priv->dev;
4178 if (INTEL_INFO(dev)->gen != 5)
4181 spin_lock_irq(&mchdev_lock);
4183 __i915_update_gfx_val(dev_priv);
4185 spin_unlock_irq(&mchdev_lock);
4188 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4190 unsigned long t, corr, state1, corr2, state2;
4193 assert_spin_locked(&mchdev_lock);
4195 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
4196 pxvid = (pxvid >> 24) & 0x7f;
4197 ext_v = pvid_to_extvid(dev_priv, pxvid);
4201 t = i915_mch_val(dev_priv);
4203 /* Revel in the empirically derived constants */
4205 /* Correction factor in 1/100000 units */
4207 corr = ((t * 2349) + 135940);
4209 corr = ((t * 964) + 29317);
4211 corr = ((t * 301) + 1004);
4213 corr = corr * ((150142 * state1) / 10000 - 78642);
4215 corr2 = (corr * dev_priv->ips.corr);
4217 state2 = (corr2 * state1) / 10000;
4218 state2 /= 100; /* convert to mW */
4220 __i915_update_gfx_val(dev_priv);
4222 return dev_priv->ips.gfx_power + state2;
4225 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4227 struct drm_device *dev = dev_priv->dev;
4230 if (INTEL_INFO(dev)->gen != 5)
4233 spin_lock_irq(&mchdev_lock);
4235 val = __i915_gfx_val(dev_priv);
4237 spin_unlock_irq(&mchdev_lock);
4243 * i915_read_mch_val - return value for IPS use
4245 * Calculate and return a value for the IPS driver to use when deciding whether
4246 * we have thermal and power headroom to increase CPU or GPU power budget.
4248 unsigned long i915_read_mch_val(void)
4250 struct drm_i915_private *dev_priv;
4251 unsigned long chipset_val, graphics_val, ret = 0;
4253 spin_lock_irq(&mchdev_lock);
4256 dev_priv = i915_mch_dev;
4258 chipset_val = __i915_chipset_val(dev_priv);
4259 graphics_val = __i915_gfx_val(dev_priv);
4261 ret = chipset_val + graphics_val;
4264 spin_unlock_irq(&mchdev_lock);
4268 EXPORT_SYMBOL_GPL(i915_read_mch_val);
4271 * i915_gpu_raise - raise GPU frequency limit
4273 * Raise the limit; IPS indicates we have thermal headroom.
4275 bool i915_gpu_raise(void)
4277 struct drm_i915_private *dev_priv;
4280 spin_lock_irq(&mchdev_lock);
4281 if (!i915_mch_dev) {
4285 dev_priv = i915_mch_dev;
4287 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4288 dev_priv->ips.max_delay--;
4291 spin_unlock_irq(&mchdev_lock);
4295 EXPORT_SYMBOL_GPL(i915_gpu_raise);
4298 * i915_gpu_lower - lower GPU frequency limit
4300 * IPS indicates we're close to a thermal limit, so throttle back the GPU
4301 * frequency maximum.
4303 bool i915_gpu_lower(void)
4305 struct drm_i915_private *dev_priv;
4308 spin_lock_irq(&mchdev_lock);
4309 if (!i915_mch_dev) {
4313 dev_priv = i915_mch_dev;
4315 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4316 dev_priv->ips.max_delay++;
4319 spin_unlock_irq(&mchdev_lock);
4323 EXPORT_SYMBOL_GPL(i915_gpu_lower);
4326 * i915_gpu_busy - indicate GPU business to IPS
4328 * Tell the IPS driver whether or not the GPU is busy.
4330 bool i915_gpu_busy(void)
4332 struct drm_i915_private *dev_priv;
4333 struct intel_ring_buffer *ring;
4337 spin_lock_irq(&mchdev_lock);
4340 dev_priv = i915_mch_dev;
4342 for_each_ring(ring, dev_priv, i)
4343 ret |= !list_empty(&ring->request_list);
4346 spin_unlock_irq(&mchdev_lock);
4350 EXPORT_SYMBOL_GPL(i915_gpu_busy);
4353 * i915_gpu_turbo_disable - disable graphics turbo
4355 * Disable graphics turbo by resetting the max frequency and setting the
4356 * current frequency to the default.
4358 bool i915_gpu_turbo_disable(void)
4360 struct drm_i915_private *dev_priv;
4363 spin_lock_irq(&mchdev_lock);
4364 if (!i915_mch_dev) {
4368 dev_priv = i915_mch_dev;
4370 dev_priv->ips.max_delay = dev_priv->ips.fstart;
4372 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
4376 spin_unlock_irq(&mchdev_lock);
4380 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
4383 * Tells the intel_ips driver that the i915 driver is now loaded, if
4384 * IPS got loaded first.
4386 * This awkward dance is so that neither module has to depend on the
4387 * other in order for IPS to do the appropriate communication of
4388 * GPU turbo limits to i915.
4391 ips_ping_for_i915_load(void)
4395 link = symbol_get(ips_link_to_i915_driver);
4398 symbol_put(ips_link_to_i915_driver);
4402 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
4404 /* We only register the i915 ips part with intel-ips once everything is
4405 * set up, to avoid intel-ips sneaking in and reading bogus values. */
4406 spin_lock_irq(&mchdev_lock);
4407 i915_mch_dev = dev_priv;
4408 spin_unlock_irq(&mchdev_lock);
4410 ips_ping_for_i915_load();
4413 void intel_gpu_ips_teardown(void)
4415 spin_lock_irq(&mchdev_lock);
4416 i915_mch_dev = NULL;
4417 spin_unlock_irq(&mchdev_lock);
4420 static void intel_init_emon(struct drm_device *dev)
4422 struct drm_i915_private *dev_priv = dev->dev_private;
4427 /* Disable to program */
4431 /* Program energy weights for various events */
4432 I915_WRITE(SDEW, 0x15040d00);
4433 I915_WRITE(CSIEW0, 0x007f0000);
4434 I915_WRITE(CSIEW1, 0x1e220004);
4435 I915_WRITE(CSIEW2, 0x04000004);
4437 for (i = 0; i < 5; i++)
4438 I915_WRITE(PEW + (i * 4), 0);
4439 for (i = 0; i < 3; i++)
4440 I915_WRITE(DEW + (i * 4), 0);
4442 /* Program P-state weights to account for frequency power adjustment */
4443 for (i = 0; i < 16; i++) {
4444 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
4445 unsigned long freq = intel_pxfreq(pxvidfreq);
4446 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
4451 val *= (freq / 1000);
4453 val /= (127*127*900);
4455 DRM_ERROR("bad pxval: %ld\n", val);
4458 /* Render standby states get 0 weight */
4462 for (i = 0; i < 4; i++) {
4463 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
4464 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
4465 I915_WRITE(PXW + (i * 4), val);
4468 /* Adjust magic regs to magic values (more experimental results) */
4469 I915_WRITE(OGW0, 0);
4470 I915_WRITE(OGW1, 0);
4471 I915_WRITE(EG0, 0x00007f00);
4472 I915_WRITE(EG1, 0x0000000e);
4473 I915_WRITE(EG2, 0x000e0000);
4474 I915_WRITE(EG3, 0x68000300);
4475 I915_WRITE(EG4, 0x42000000);
4476 I915_WRITE(EG5, 0x00140031);
4480 for (i = 0; i < 8; i++)
4481 I915_WRITE(PXWL + (i * 4), 0);
4483 /* Enable PMON + select events */
4484 I915_WRITE(ECR, 0x80000019);
4486 lcfuse = I915_READ(LCFUSE02);
4488 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
4491 void intel_init_gt_powersave(struct drm_device *dev)
4493 if (IS_VALLEYVIEW(dev))
4494 valleyview_setup_pctx(dev);
4497 void intel_cleanup_gt_powersave(struct drm_device *dev)
4499 if (IS_VALLEYVIEW(dev))
4500 valleyview_cleanup_pctx(dev);
4503 void intel_disable_gt_powersave(struct drm_device *dev)
4505 struct drm_i915_private *dev_priv = dev->dev_private;
4507 /* Interrupts should be disabled already to avoid re-arming. */
4508 WARN_ON(dev->irq_enabled);
4510 if (IS_IRONLAKE_M(dev)) {
4511 ironlake_disable_drps(dev);
4512 ironlake_disable_rc6(dev);
4513 } else if (INTEL_INFO(dev)->gen >= 6) {
4514 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4515 cancel_work_sync(&dev_priv->rps.work);
4516 mutex_lock(&dev_priv->rps.hw_lock);
4517 if (IS_VALLEYVIEW(dev))
4518 valleyview_disable_rps(dev);
4520 gen6_disable_rps(dev);
4521 dev_priv->rps.enabled = false;
4522 mutex_unlock(&dev_priv->rps.hw_lock);
4526 static void intel_gen6_powersave_work(struct work_struct *work)
4528 struct drm_i915_private *dev_priv =
4529 container_of(work, struct drm_i915_private,
4530 rps.delayed_resume_work.work);
4531 struct drm_device *dev = dev_priv->dev;
4533 mutex_lock(&dev_priv->rps.hw_lock);
4535 if (IS_VALLEYVIEW(dev)) {
4536 valleyview_enable_rps(dev);
4537 } else if (IS_BROADWELL(dev)) {
4538 gen8_enable_rps(dev);
4539 gen6_update_ring_freq(dev);
4541 gen6_enable_rps(dev);
4542 gen6_update_ring_freq(dev);
4544 dev_priv->rps.enabled = true;
4545 mutex_unlock(&dev_priv->rps.hw_lock);
4548 void intel_enable_gt_powersave(struct drm_device *dev)
4550 struct drm_i915_private *dev_priv = dev->dev_private;
4552 if (IS_IRONLAKE_M(dev)) {
4553 ironlake_enable_drps(dev);
4554 ironlake_enable_rc6(dev);
4555 intel_init_emon(dev);
4556 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
4558 * PCU communication is slow and this doesn't need to be
4559 * done at any specific time, so do this out of our fast path
4560 * to make resume and init faster.
4562 schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
4563 round_jiffies_up_relative(HZ));
4567 static void ibx_init_clock_gating(struct drm_device *dev)
4569 struct drm_i915_private *dev_priv = dev->dev_private;
4572 * On Ibex Peak and Cougar Point, we need to disable clock
4573 * gating for the panel power sequencer or it will fail to
4574 * start up when no ports are active.
4576 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4579 static void g4x_disable_trickle_feed(struct drm_device *dev)
4581 struct drm_i915_private *dev_priv = dev->dev_private;
4584 for_each_pipe(pipe) {
4585 I915_WRITE(DSPCNTR(pipe),
4586 I915_READ(DSPCNTR(pipe)) |
4587 DISPPLANE_TRICKLE_FEED_DISABLE);
4588 intel_flush_primary_plane(dev_priv, pipe);
4592 static void ilk_init_lp_watermarks(struct drm_device *dev)
4594 struct drm_i915_private *dev_priv = dev->dev_private;
4596 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
4597 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
4598 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
4601 * Don't touch WM1S_LP_EN here.
4602 * Doing so could cause underruns.
4606 static void ironlake_init_clock_gating(struct drm_device *dev)
4608 struct drm_i915_private *dev_priv = dev->dev_private;
4609 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4613 * WaFbcDisableDpfcClockGating:ilk
4615 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
4616 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
4617 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
4619 I915_WRITE(PCH_3DCGDIS0,
4620 MARIUNIT_CLOCK_GATE_DISABLE |
4621 SVSMUNIT_CLOCK_GATE_DISABLE);
4622 I915_WRITE(PCH_3DCGDIS1,
4623 VFMUNIT_CLOCK_GATE_DISABLE);
4626 * According to the spec the following bits should be set in
4627 * order to enable memory self-refresh
4628 * The bit 22/21 of 0x42004
4629 * The bit 5 of 0x42020
4630 * The bit 15 of 0x45000
4632 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4633 (I915_READ(ILK_DISPLAY_CHICKEN2) |
4634 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
4635 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
4636 I915_WRITE(DISP_ARB_CTL,
4637 (I915_READ(DISP_ARB_CTL) |
4640 ilk_init_lp_watermarks(dev);
4643 * Based on the document from hardware guys the following bits
4644 * should be set unconditionally in order to enable FBC.
4645 * The bit 22 of 0x42000
4646 * The bit 22 of 0x42004
4647 * The bit 7,8,9 of 0x42020.
4649 if (IS_IRONLAKE_M(dev)) {
4650 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
4651 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4652 I915_READ(ILK_DISPLAY_CHICKEN1) |
4654 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4655 I915_READ(ILK_DISPLAY_CHICKEN2) |
4659 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4661 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4662 I915_READ(ILK_DISPLAY_CHICKEN2) |
4663 ILK_ELPIN_409_SELECT);
4664 I915_WRITE(_3D_CHICKEN2,
4665 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
4666 _3D_CHICKEN2_WM_READ_PIPELINED);
4668 /* WaDisableRenderCachePipelinedFlush:ilk */
4669 I915_WRITE(CACHE_MODE_0,
4670 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
4672 /* WaDisable_RenderCache_OperationalFlush:ilk */
4673 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
4675 g4x_disable_trickle_feed(dev);
4677 ibx_init_clock_gating(dev);
4680 static void cpt_init_clock_gating(struct drm_device *dev)
4682 struct drm_i915_private *dev_priv = dev->dev_private;
4687 * On Ibex Peak and Cougar Point, we need to disable clock
4688 * gating for the panel power sequencer or it will fail to
4689 * start up when no ports are active.
4691 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
4692 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
4693 PCH_CPUNIT_CLOCK_GATE_DISABLE);
4694 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
4695 DPLS_EDP_PPS_FIX_DIS);
4696 /* The below fixes the weird display corruption, a few pixels shifted
4697 * downward, on (only) LVDS of some HP laptops with IVY.
4699 for_each_pipe(pipe) {
4700 val = I915_READ(TRANS_CHICKEN2(pipe));
4701 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
4702 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4703 if (dev_priv->vbt.fdi_rx_polarity_inverted)
4704 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4705 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
4706 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
4707 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
4708 I915_WRITE(TRANS_CHICKEN2(pipe), val);
4710 /* WADP0ClockGatingDisable */
4711 for_each_pipe(pipe) {
4712 I915_WRITE(TRANS_CHICKEN1(pipe),
4713 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4717 static void gen6_check_mch_setup(struct drm_device *dev)
4719 struct drm_i915_private *dev_priv = dev->dev_private;
4722 tmp = I915_READ(MCH_SSKPD);
4723 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
4724 DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
4725 DRM_INFO("This can cause pipe underruns and display issues.\n");
4726 DRM_INFO("Please upgrade your BIOS to fix this.\n");
4730 static void gen6_init_clock_gating(struct drm_device *dev)
4732 struct drm_i915_private *dev_priv = dev->dev_private;
4733 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4735 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4737 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4738 I915_READ(ILK_DISPLAY_CHICKEN2) |
4739 ILK_ELPIN_409_SELECT);
4741 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
4742 I915_WRITE(_3D_CHICKEN,
4743 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
4745 /* WaSetupGtModeTdRowDispatch:snb */
4746 if (IS_SNB_GT1(dev))
4747 I915_WRITE(GEN6_GT_MODE,
4748 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
4750 /* WaDisable_RenderCache_OperationalFlush:snb */
4751 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
4754 * BSpec recoomends 8x4 when MSAA is used,
4755 * however in practice 16x4 seems fastest.
4757 * Note that PS/WM thread counts depend on the WIZ hashing
4758 * disable bit, which we don't touch here, but it's good
4759 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
4761 I915_WRITE(GEN6_GT_MODE,
4762 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
4764 ilk_init_lp_watermarks(dev);
4766 I915_WRITE(CACHE_MODE_0,
4767 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
4769 I915_WRITE(GEN6_UCGCTL1,
4770 I915_READ(GEN6_UCGCTL1) |
4771 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
4772 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
4774 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4775 * gating disable must be set. Failure to set it results in
4776 * flickering pixels due to Z write ordering failures after
4777 * some amount of runtime in the Mesa "fire" demo, and Unigine
4778 * Sanctuary and Tropics, and apparently anything else with
4779 * alpha test or pixel discard.
4781 * According to the spec, bit 11 (RCCUNIT) must also be set,
4782 * but we didn't debug actual testcases to find it out.
4784 * WaDisableRCCUnitClockGating:snb
4785 * WaDisableRCPBUnitClockGating:snb
4787 I915_WRITE(GEN6_UCGCTL2,
4788 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4789 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4791 /* WaStripsFansDisableFastClipPerformanceFix:snb */
4792 I915_WRITE(_3D_CHICKEN3,
4793 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
4797 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
4798 * 3DSTATE_SF number of SF output attributes is more than 16."
4800 I915_WRITE(_3D_CHICKEN3,
4801 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
4804 * According to the spec the following bits should be
4805 * set in order to enable memory self-refresh and fbc:
4806 * The bit21 and bit22 of 0x42000
4807 * The bit21 and bit22 of 0x42004
4808 * The bit5 and bit7 of 0x42020
4809 * The bit14 of 0x70180
4810 * The bit14 of 0x71180
4812 * WaFbcAsynchFlipDisableFbcQueue:snb
4814 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4815 I915_READ(ILK_DISPLAY_CHICKEN1) |
4816 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
4817 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4818 I915_READ(ILK_DISPLAY_CHICKEN2) |
4819 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
4820 I915_WRITE(ILK_DSPCLK_GATE_D,
4821 I915_READ(ILK_DSPCLK_GATE_D) |
4822 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
4823 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
4825 g4x_disable_trickle_feed(dev);
4827 cpt_init_clock_gating(dev);
4829 gen6_check_mch_setup(dev);
4832 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
4834 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
4837 * WaVSThreadDispatchOverride:ivb,vlv
4839 * This actually overrides the dispatch
4840 * mode for all thread types.
4842 reg &= ~GEN7_FF_SCHED_MASK;
4843 reg |= GEN7_FF_TS_SCHED_HW;
4844 reg |= GEN7_FF_VS_SCHED_HW;
4845 reg |= GEN7_FF_DS_SCHED_HW;
4847 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
4850 static void lpt_init_clock_gating(struct drm_device *dev)
4852 struct drm_i915_private *dev_priv = dev->dev_private;
4855 * TODO: this bit should only be enabled when really needed, then
4856 * disabled when not needed anymore in order to save power.
4858 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
4859 I915_WRITE(SOUTH_DSPCLK_GATE_D,
4860 I915_READ(SOUTH_DSPCLK_GATE_D) |
4861 PCH_LP_PARTITION_LEVEL_DISABLE);
4863 /* WADPOClockGatingDisable:hsw */
4864 I915_WRITE(_TRANSA_CHICKEN1,
4865 I915_READ(_TRANSA_CHICKEN1) |
4866 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4869 static void lpt_suspend_hw(struct drm_device *dev)
4871 struct drm_i915_private *dev_priv = dev->dev_private;
4873 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
4874 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
4876 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4877 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4881 static void gen8_init_clock_gating(struct drm_device *dev)
4883 struct drm_i915_private *dev_priv = dev->dev_private;
4886 I915_WRITE(WM3_LP_ILK, 0);
4887 I915_WRITE(WM2_LP_ILK, 0);
4888 I915_WRITE(WM1_LP_ILK, 0);
4890 /* FIXME(BDW): Check all the w/a, some might only apply to
4891 * pre-production hw. */
4893 /* WaDisablePartialInstShootdown:bdw */
4894 I915_WRITE(GEN8_ROW_CHICKEN,
4895 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
4897 /* WaDisableThreadStallDopClockGating:bdw */
4898 /* FIXME: Unclear whether we really need this on production bdw. */
4899 I915_WRITE(GEN8_ROW_CHICKEN,
4900 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
4903 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
4904 * pre-production hardware
4906 I915_WRITE(HALF_SLICE_CHICKEN3,
4907 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
4908 I915_WRITE(HALF_SLICE_CHICKEN3,
4909 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
4910 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
4912 I915_WRITE(_3D_CHICKEN3,
4913 _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
4915 I915_WRITE(COMMON_SLICE_CHICKEN2,
4916 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
4918 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4919 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
4921 /* WaSwitchSolVfFArbitrationPriority:bdw */
4922 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4924 /* WaPsrDPAMaskVBlankInSRD:bdw */
4925 I915_WRITE(CHICKEN_PAR1_1,
4926 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
4928 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
4929 for_each_pipe(pipe) {
4930 I915_WRITE(CHICKEN_PIPESL_1(pipe),
4931 I915_READ(CHICKEN_PIPESL_1(pipe)) |
4932 BDW_DPRS_MASK_VBLANK_SRD);
4935 /* Use Force Non-Coherent whenever executing a 3D context. This is a
4936 * workaround for for a possible hang in the unlikely event a TLB
4937 * invalidation occurs during a PSD flush.
4939 I915_WRITE(HDC_CHICKEN0,
4940 I915_READ(HDC_CHICKEN0) |
4941 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
4943 /* WaVSRefCountFullforceMissDisable:bdw */
4944 /* WaDSRefCountFullforceMissDisable:bdw */
4945 I915_WRITE(GEN7_FF_THREAD_MODE,
4946 I915_READ(GEN7_FF_THREAD_MODE) &
4947 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
4950 * BSpec recommends 8x4 when MSAA is used,
4951 * however in practice 16x4 seems fastest.
4953 * Note that PS/WM thread counts depend on the WIZ hashing
4954 * disable bit, which we don't touch here, but it's good
4955 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
4957 I915_WRITE(GEN7_GT_MODE,
4958 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
4960 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
4961 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
4963 /* WaDisableSDEUnitClockGating:bdw */
4964 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
4965 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
4967 /* Wa4x4STCOptimizationDisable:bdw */
4968 I915_WRITE(CACHE_MODE_1,
4969 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
4972 static void haswell_init_clock_gating(struct drm_device *dev)
4974 struct drm_i915_private *dev_priv = dev->dev_private;
4976 ilk_init_lp_watermarks(dev);
4978 /* L3 caching of data atomics doesn't work -- disable it. */
4979 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
4980 I915_WRITE(HSW_ROW_CHICKEN3,
4981 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
4983 /* This is required by WaCatErrorRejectionIssue:hsw */
4984 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4985 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4986 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4988 /* WaVSRefCountFullforceMissDisable:hsw */
4989 I915_WRITE(GEN7_FF_THREAD_MODE,
4990 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
4992 /* WaDisable_RenderCache_OperationalFlush:hsw */
4993 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
4995 /* enable HiZ Raw Stall Optimization */
4996 I915_WRITE(CACHE_MODE_0_GEN7,
4997 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
4999 /* WaDisable4x2SubspanOptimization:hsw */
5000 I915_WRITE(CACHE_MODE_1,
5001 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5004 * BSpec recommends 8x4 when MSAA is used,
5005 * however in practice 16x4 seems fastest.
5007 * Note that PS/WM thread counts depend on the WIZ hashing
5008 * disable bit, which we don't touch here, but it's good
5009 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5011 I915_WRITE(GEN7_GT_MODE,
5012 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5014 /* WaSwitchSolVfFArbitrationPriority:hsw */
5015 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5017 /* WaRsPkgCStateDisplayPMReq:hsw */
5018 I915_WRITE(CHICKEN_PAR1_1,
5019 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
5021 lpt_init_clock_gating(dev);
5024 static void ivybridge_init_clock_gating(struct drm_device *dev)
5026 struct drm_i915_private *dev_priv = dev->dev_private;
5029 ilk_init_lp_watermarks(dev);
5031 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
5033 /* WaDisableEarlyCull:ivb */
5034 I915_WRITE(_3D_CHICKEN3,
5035 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5037 /* WaDisableBackToBackFlipFix:ivb */
5038 I915_WRITE(IVB_CHICKEN3,
5039 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5040 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5042 /* WaDisablePSDDualDispatchEnable:ivb */
5043 if (IS_IVB_GT1(dev))
5044 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5045 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5047 /* WaDisable_RenderCache_OperationalFlush:ivb */
5048 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5050 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
5051 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5052 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5054 /* WaApplyL3ControlAndL3ChickenMode:ivb */
5055 I915_WRITE(GEN7_L3CNTLREG1,
5056 GEN7_WA_FOR_GEN7_L3_CONTROL);
5057 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5058 GEN7_WA_L3_CHICKEN_MODE);
5059 if (IS_IVB_GT1(dev))
5060 I915_WRITE(GEN7_ROW_CHICKEN2,
5061 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5063 /* must write both registers */
5064 I915_WRITE(GEN7_ROW_CHICKEN2,
5065 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5066 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
5067 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5070 /* WaForceL3Serialization:ivb */
5071 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5072 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5075 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5076 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
5078 I915_WRITE(GEN6_UCGCTL2,
5079 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5081 /* This is required by WaCatErrorRejectionIssue:ivb */
5082 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5083 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5084 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5086 g4x_disable_trickle_feed(dev);
5088 gen7_setup_fixed_func_scheduler(dev_priv);
5090 if (0) { /* causes HiZ corruption on ivb:gt1 */
5091 /* enable HiZ Raw Stall Optimization */
5092 I915_WRITE(CACHE_MODE_0_GEN7,
5093 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5096 /* WaDisable4x2SubspanOptimization:ivb */
5097 I915_WRITE(CACHE_MODE_1,
5098 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5101 * BSpec recommends 8x4 when MSAA is used,
5102 * however in practice 16x4 seems fastest.
5104 * Note that PS/WM thread counts depend on the WIZ hashing
5105 * disable bit, which we don't touch here, but it's good
5106 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5108 I915_WRITE(GEN7_GT_MODE,
5109 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5111 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5112 snpcr &= ~GEN6_MBC_SNPCR_MASK;
5113 snpcr |= GEN6_MBC_SNPCR_MED;
5114 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5116 if (!HAS_PCH_NOP(dev))
5117 cpt_init_clock_gating(dev);
5119 gen6_check_mch_setup(dev);
5122 static void valleyview_init_clock_gating(struct drm_device *dev)
5124 struct drm_i915_private *dev_priv = dev->dev_private;
5127 mutex_lock(&dev_priv->rps.hw_lock);
5128 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5129 mutex_unlock(&dev_priv->rps.hw_lock);
5130 switch ((val >> 6) & 3) {
5133 dev_priv->mem_freq = 800;
5136 dev_priv->mem_freq = 1066;
5139 dev_priv->mem_freq = 1333;
5142 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5144 dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
5145 DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
5146 dev_priv->vlv_cdclk_freq);
5148 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5150 /* WaDisableEarlyCull:vlv */
5151 I915_WRITE(_3D_CHICKEN3,
5152 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5154 /* WaDisableBackToBackFlipFix:vlv */
5155 I915_WRITE(IVB_CHICKEN3,
5156 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5157 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5159 /* WaPsdDispatchEnable:vlv */
5160 /* WaDisablePSDDualDispatchEnable:vlv */
5161 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5162 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5163 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5165 /* WaDisable_RenderCache_OperationalFlush:vlv */
5166 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5168 /* WaForceL3Serialization:vlv */
5169 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5170 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5172 /* WaDisableDopClockGating:vlv */
5173 I915_WRITE(GEN7_ROW_CHICKEN2,
5174 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5176 /* This is required by WaCatErrorRejectionIssue:vlv */
5177 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5178 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5179 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5181 gen7_setup_fixed_func_scheduler(dev_priv);
5184 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5185 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
5187 I915_WRITE(GEN6_UCGCTL2,
5188 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5190 /* WaDisableL3Bank2xClockGate:vlv */
5191 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5193 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5196 * BSpec says this must be set, even though
5197 * WaDisable4x2SubspanOptimization isn't listed for VLV.
5199 I915_WRITE(CACHE_MODE_1,
5200 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5203 * WaIncreaseL3CreditsForVLVB0:vlv
5204 * This is the hardware default actually.
5206 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
5209 * WaDisableVLVClockGating_VBIIssue:vlv
5210 * Disable clock gating on th GCFG unit to prevent a delay
5211 * in the reporting of vblank events.
5213 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
5216 static void g4x_init_clock_gating(struct drm_device *dev)
5218 struct drm_i915_private *dev_priv = dev->dev_private;
5219 uint32_t dspclk_gate;
5221 I915_WRITE(RENCLK_GATE_D1, 0);
5222 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
5223 GS_UNIT_CLOCK_GATE_DISABLE |
5224 CL_UNIT_CLOCK_GATE_DISABLE);
5225 I915_WRITE(RAMCLK_GATE_D, 0);
5226 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
5227 OVRUNIT_CLOCK_GATE_DISABLE |
5228 OVCUNIT_CLOCK_GATE_DISABLE;
5230 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5231 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
5233 /* WaDisableRenderCachePipelinedFlush */
5234 I915_WRITE(CACHE_MODE_0,
5235 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5237 /* WaDisable_RenderCache_OperationalFlush:g4x */
5238 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5240 g4x_disable_trickle_feed(dev);
5243 static void crestline_init_clock_gating(struct drm_device *dev)
5245 struct drm_i915_private *dev_priv = dev->dev_private;
5247 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5248 I915_WRITE(RENCLK_GATE_D2, 0);
5249 I915_WRITE(DSPCLK_GATE_D, 0);
5250 I915_WRITE(RAMCLK_GATE_D, 0);
5251 I915_WRITE16(DEUC, 0);
5252 I915_WRITE(MI_ARB_STATE,
5253 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5255 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5256 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5259 static void broadwater_init_clock_gating(struct drm_device *dev)
5261 struct drm_i915_private *dev_priv = dev->dev_private;
5263 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5264 I965_RCC_CLOCK_GATE_DISABLE |
5265 I965_RCPB_CLOCK_GATE_DISABLE |
5266 I965_ISC_CLOCK_GATE_DISABLE |
5267 I965_FBC_CLOCK_GATE_DISABLE);
5268 I915_WRITE(RENCLK_GATE_D2, 0);
5269 I915_WRITE(MI_ARB_STATE,
5270 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5272 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5273 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5276 static void gen3_init_clock_gating(struct drm_device *dev)
5278 struct drm_i915_private *dev_priv = dev->dev_private;
5279 u32 dstate = I915_READ(D_STATE);
5281 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
5282 DSTATE_DOT_CLOCK_GATING;
5283 I915_WRITE(D_STATE, dstate);
5285 if (IS_PINEVIEW(dev))
5286 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
5288 /* IIR "flip pending" means done if this bit is set */
5289 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
5292 static void i85x_init_clock_gating(struct drm_device *dev)
5294 struct drm_i915_private *dev_priv = dev->dev_private;
5296 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
5299 static void i830_init_clock_gating(struct drm_device *dev)
5301 struct drm_i915_private *dev_priv = dev->dev_private;
5303 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
5306 void intel_init_clock_gating(struct drm_device *dev)
5308 struct drm_i915_private *dev_priv = dev->dev_private;
5310 dev_priv->display.init_clock_gating(dev);
5313 void intel_suspend_hw(struct drm_device *dev)
5315 if (HAS_PCH_LPT(dev))
5316 lpt_suspend_hw(dev);
5319 #define for_each_power_well(i, power_well, domain_mask, power_domains) \
5321 i < (power_domains)->power_well_count && \
5322 ((power_well) = &(power_domains)->power_wells[i]); \
5324 if ((power_well)->domains & (domain_mask))
5326 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
5327 for (i = (power_domains)->power_well_count - 1; \
5328 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
5330 if ((power_well)->domains & (domain_mask))
5333 * We should only use the power well if we explicitly asked the hardware to
5334 * enable it, so check if it's enabled and also check if we've requested it to
5337 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
5338 struct i915_power_well *power_well)
5340 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5341 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5344 bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
5345 enum intel_display_power_domain domain)
5347 struct i915_power_domains *power_domains;
5349 power_domains = &dev_priv->power_domains;
5351 return power_domains->domain_use_count[domain];
5354 bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
5355 enum intel_display_power_domain domain)
5357 struct i915_power_domains *power_domains;
5358 struct i915_power_well *power_well;
5362 if (dev_priv->pm.suspended)
5365 power_domains = &dev_priv->power_domains;
5369 mutex_lock(&power_domains->lock);
5370 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5371 if (power_well->always_on)
5374 if (!power_well->ops->is_enabled(dev_priv, power_well)) {
5379 mutex_unlock(&power_domains->lock);
5385 * Starting with Haswell, we have a "Power Down Well" that can be turned off
5386 * when not needed anymore. We have 4 registers that can request the power well
5387 * to be enabled, and it will only be disabled if none of the registers is
5388 * requesting it to be enabled.
5390 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
5392 struct drm_device *dev = dev_priv->dev;
5393 unsigned long irqflags;
5396 * After we re-enable the power well, if we touch VGA register 0x3d5
5397 * we'll get unclaimed register interrupts. This stops after we write
5398 * anything to the VGA MSR register. The vgacon module uses this
5399 * register all the time, so if we unbind our driver and, as a
5400 * consequence, bind vgacon, we'll get stuck in an infinite loop at
5401 * console_unlock(). So make here we touch the VGA MSR register, making
5402 * sure vgacon can keep working normally without triggering interrupts
5403 * and error messages.
5405 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
5406 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
5407 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
5409 if (IS_BROADWELL(dev)) {
5410 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
5411 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
5412 dev_priv->de_irq_mask[PIPE_B]);
5413 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
5414 ~dev_priv->de_irq_mask[PIPE_B] |
5416 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
5417 dev_priv->de_irq_mask[PIPE_C]);
5418 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
5419 ~dev_priv->de_irq_mask[PIPE_C] |
5421 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
5422 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5426 static void reset_vblank_counter(struct drm_device *dev, enum pipe pipe)
5428 assert_spin_locked(&dev->vbl_lock);
5430 dev->vblank[pipe].last = 0;
5433 static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
5435 struct drm_device *dev = dev_priv->dev;
5437 unsigned long irqflags;
5440 * After this, the registers on the pipes that are part of the power
5441 * well will become zero, so we have to adjust our counters according to
5444 * FIXME: Should we do this in general in drm_vblank_post_modeset?
5446 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5449 reset_vblank_counter(dev, pipe);
5450 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5453 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
5454 struct i915_power_well *power_well, bool enable)
5456 bool is_enabled, enable_requested;
5459 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5460 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5461 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
5464 if (!enable_requested)
5465 I915_WRITE(HSW_PWR_WELL_DRIVER,
5466 HSW_PWR_WELL_ENABLE_REQUEST);
5469 DRM_DEBUG_KMS("Enabling power well\n");
5470 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
5471 HSW_PWR_WELL_STATE_ENABLED), 20))
5472 DRM_ERROR("Timeout enabling power well\n");
5475 hsw_power_well_post_enable(dev_priv);
5477 if (enable_requested) {
5478 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5479 POSTING_READ(HSW_PWR_WELL_DRIVER);
5480 DRM_DEBUG_KMS("Requesting to disable the power well\n");
5482 hsw_power_well_post_disable(dev_priv);
5487 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
5488 struct i915_power_well *power_well)
5490 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
5493 * We're taking over the BIOS, so clear any requests made by it since
5494 * the driver is in charge now.
5496 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
5497 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
5500 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
5501 struct i915_power_well *power_well)
5503 hsw_set_power_well(dev_priv, power_well, true);
5506 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
5507 struct i915_power_well *power_well)
5509 hsw_set_power_well(dev_priv, power_well, false);
5512 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
5513 struct i915_power_well *power_well)
5517 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
5518 struct i915_power_well *power_well)
5523 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
5524 struct i915_power_well *power_well, bool enable)
5526 enum punit_power_well power_well_id = power_well->data;
5531 mask = PUNIT_PWRGT_MASK(power_well_id);
5532 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
5533 PUNIT_PWRGT_PWR_GATE(power_well_id);
5535 mutex_lock(&dev_priv->rps.hw_lock);
5538 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
5543 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
5546 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
5548 if (wait_for(COND, 100))
5549 DRM_ERROR("timout setting power well state %08x (%08x)\n",
5551 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
5556 mutex_unlock(&dev_priv->rps.hw_lock);
5559 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
5560 struct i915_power_well *power_well)
5562 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
5565 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
5566 struct i915_power_well *power_well)
5568 vlv_set_power_well(dev_priv, power_well, true);
5571 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
5572 struct i915_power_well *power_well)
5574 vlv_set_power_well(dev_priv, power_well, false);
5577 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
5578 struct i915_power_well *power_well)
5580 int power_well_id = power_well->data;
5581 bool enabled = false;
5586 mask = PUNIT_PWRGT_MASK(power_well_id);
5587 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
5589 mutex_lock(&dev_priv->rps.hw_lock);
5591 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
5593 * We only ever set the power-on and power-gate states, anything
5594 * else is unexpected.
5596 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
5597 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
5602 * A transient state at this point would mean some unexpected party
5603 * is poking at the power controls too.
5605 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
5606 WARN_ON(ctrl != state);
5608 mutex_unlock(&dev_priv->rps.hw_lock);
5613 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
5614 struct i915_power_well *power_well)
5616 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
5618 vlv_set_power_well(dev_priv, power_well, true);
5620 spin_lock_irq(&dev_priv->irq_lock);
5621 valleyview_enable_display_irqs(dev_priv);
5622 spin_unlock_irq(&dev_priv->irq_lock);
5625 * During driver initialization we need to defer enabling hotplug
5626 * processing until fbdev is set up.
5628 if (dev_priv->enable_hotplug_processing)
5629 intel_hpd_init(dev_priv->dev);
5631 i915_redisable_vga_power_on(dev_priv->dev);
5634 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
5635 struct i915_power_well *power_well)
5637 struct drm_device *dev = dev_priv->dev;
5640 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
5642 spin_lock_irq(&dev_priv->irq_lock);
5644 __intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
5646 valleyview_disable_display_irqs(dev_priv);
5647 spin_unlock_irq(&dev_priv->irq_lock);
5649 spin_lock_irq(&dev->vbl_lock);
5651 reset_vblank_counter(dev, pipe);
5652 spin_unlock_irq(&dev->vbl_lock);
5654 vlv_set_power_well(dev_priv, power_well, false);
5657 static void check_power_well_state(struct drm_i915_private *dev_priv,
5658 struct i915_power_well *power_well)
5660 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
5662 if (power_well->always_on || !i915.disable_power_well) {
5669 if (enabled != (power_well->count > 0))
5675 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
5676 power_well->name, power_well->always_on, enabled,
5677 power_well->count, i915.disable_power_well);
5680 void intel_display_power_get(struct drm_i915_private *dev_priv,
5681 enum intel_display_power_domain domain)
5683 struct i915_power_domains *power_domains;
5684 struct i915_power_well *power_well;
5687 intel_runtime_pm_get(dev_priv);
5689 power_domains = &dev_priv->power_domains;
5691 mutex_lock(&power_domains->lock);
5693 for_each_power_well(i, power_well, BIT(domain), power_domains) {
5694 if (!power_well->count++) {
5695 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
5696 power_well->ops->enable(dev_priv, power_well);
5699 check_power_well_state(dev_priv, power_well);
5702 power_domains->domain_use_count[domain]++;
5704 mutex_unlock(&power_domains->lock);
5707 void intel_display_power_put(struct drm_i915_private *dev_priv,
5708 enum intel_display_power_domain domain)
5710 struct i915_power_domains *power_domains;
5711 struct i915_power_well *power_well;
5714 power_domains = &dev_priv->power_domains;
5716 mutex_lock(&power_domains->lock);
5718 WARN_ON(!power_domains->domain_use_count[domain]);
5719 power_domains->domain_use_count[domain]--;
5721 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5722 WARN_ON(!power_well->count);
5724 if (!--power_well->count && i915.disable_power_well) {
5725 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
5726 power_well->ops->disable(dev_priv, power_well);
5729 check_power_well_state(dev_priv, power_well);
5732 mutex_unlock(&power_domains->lock);
5734 intel_runtime_pm_put(dev_priv);
5737 static struct i915_power_domains *hsw_pwr;
5739 /* Display audio driver power well request */
5740 void i915_request_power_well(void)
5742 struct drm_i915_private *dev_priv;
5744 if (WARN_ON(!hsw_pwr))
5747 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5749 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
5751 EXPORT_SYMBOL_GPL(i915_request_power_well);
5753 /* Display audio driver power well release */
5754 void i915_release_power_well(void)
5756 struct drm_i915_private *dev_priv;
5758 if (WARN_ON(!hsw_pwr))
5761 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5763 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
5765 EXPORT_SYMBOL_GPL(i915_release_power_well);
5767 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
5769 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
5770 BIT(POWER_DOMAIN_PIPE_A) | \
5771 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
5772 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
5773 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
5774 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5775 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5776 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5777 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5778 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
5779 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
5780 BIT(POWER_DOMAIN_PORT_CRT) | \
5781 BIT(POWER_DOMAIN_INIT))
5782 #define HSW_DISPLAY_POWER_DOMAINS ( \
5783 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
5784 BIT(POWER_DOMAIN_INIT))
5786 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
5787 HSW_ALWAYS_ON_POWER_DOMAINS | \
5788 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
5789 #define BDW_DISPLAY_POWER_DOMAINS ( \
5790 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
5791 BIT(POWER_DOMAIN_INIT))
5793 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
5794 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
5796 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
5797 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5798 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5799 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5800 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5801 BIT(POWER_DOMAIN_PORT_CRT) | \
5802 BIT(POWER_DOMAIN_INIT))
5804 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
5805 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5806 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5807 BIT(POWER_DOMAIN_INIT))
5809 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
5810 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5811 BIT(POWER_DOMAIN_INIT))
5813 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
5814 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5815 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5816 BIT(POWER_DOMAIN_INIT))
5818 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
5819 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5820 BIT(POWER_DOMAIN_INIT))
5822 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
5823 .sync_hw = i9xx_always_on_power_well_noop,
5824 .enable = i9xx_always_on_power_well_noop,
5825 .disable = i9xx_always_on_power_well_noop,
5826 .is_enabled = i9xx_always_on_power_well_enabled,
5829 static struct i915_power_well i9xx_always_on_power_well[] = {
5831 .name = "always-on",
5833 .domains = POWER_DOMAIN_MASK,
5834 .ops = &i9xx_always_on_power_well_ops,
5838 static const struct i915_power_well_ops hsw_power_well_ops = {
5839 .sync_hw = hsw_power_well_sync_hw,
5840 .enable = hsw_power_well_enable,
5841 .disable = hsw_power_well_disable,
5842 .is_enabled = hsw_power_well_enabled,
5845 static struct i915_power_well hsw_power_wells[] = {
5847 .name = "always-on",
5849 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
5850 .ops = &i9xx_always_on_power_well_ops,
5854 .domains = HSW_DISPLAY_POWER_DOMAINS,
5855 .ops = &hsw_power_well_ops,
5859 static struct i915_power_well bdw_power_wells[] = {
5861 .name = "always-on",
5863 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
5864 .ops = &i9xx_always_on_power_well_ops,
5868 .domains = BDW_DISPLAY_POWER_DOMAINS,
5869 .ops = &hsw_power_well_ops,
5873 static const struct i915_power_well_ops vlv_display_power_well_ops = {
5874 .sync_hw = vlv_power_well_sync_hw,
5875 .enable = vlv_display_power_well_enable,
5876 .disable = vlv_display_power_well_disable,
5877 .is_enabled = vlv_power_well_enabled,
5880 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
5881 .sync_hw = vlv_power_well_sync_hw,
5882 .enable = vlv_power_well_enable,
5883 .disable = vlv_power_well_disable,
5884 .is_enabled = vlv_power_well_enabled,
5887 static struct i915_power_well vlv_power_wells[] = {
5889 .name = "always-on",
5891 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
5892 .ops = &i9xx_always_on_power_well_ops,
5896 .domains = VLV_DISPLAY_POWER_DOMAINS,
5897 .data = PUNIT_POWER_WELL_DISP2D,
5898 .ops = &vlv_display_power_well_ops,
5901 .name = "dpio-common",
5902 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
5903 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
5904 .ops = &vlv_dpio_power_well_ops,
5907 .name = "dpio-tx-b-01",
5908 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5909 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5910 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5911 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5912 .ops = &vlv_dpio_power_well_ops,
5913 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
5916 .name = "dpio-tx-b-23",
5917 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5918 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5919 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5920 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5921 .ops = &vlv_dpio_power_well_ops,
5922 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
5925 .name = "dpio-tx-c-01",
5926 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5927 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5928 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5929 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5930 .ops = &vlv_dpio_power_well_ops,
5931 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
5934 .name = "dpio-tx-c-23",
5935 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5936 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5937 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5938 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5939 .ops = &vlv_dpio_power_well_ops,
5940 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
5944 #define set_power_wells(power_domains, __power_wells) ({ \
5945 (power_domains)->power_wells = (__power_wells); \
5946 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
5949 int intel_power_domains_init(struct drm_i915_private *dev_priv)
5951 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5953 mutex_init(&power_domains->lock);
5956 * The enabling order will be from lower to higher indexed wells,
5957 * the disabling order is reversed.
5959 if (IS_HASWELL(dev_priv->dev)) {
5960 set_power_wells(power_domains, hsw_power_wells);
5961 hsw_pwr = power_domains;
5962 } else if (IS_BROADWELL(dev_priv->dev)) {
5963 set_power_wells(power_domains, bdw_power_wells);
5964 hsw_pwr = power_domains;
5965 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
5966 set_power_wells(power_domains, vlv_power_wells);
5968 set_power_wells(power_domains, i9xx_always_on_power_well);
5974 void intel_power_domains_remove(struct drm_i915_private *dev_priv)
5979 static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
5981 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5982 struct i915_power_well *power_well;
5985 mutex_lock(&power_domains->lock);
5986 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
5987 power_well->ops->sync_hw(dev_priv, power_well);
5988 mutex_unlock(&power_domains->lock);
5991 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
5993 /* For now, we need the power well to be always enabled. */
5994 intel_display_set_init_power(dev_priv, true);
5995 intel_power_domains_resume(dev_priv);
5998 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
6000 intel_runtime_pm_get(dev_priv);
6003 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
6005 intel_runtime_pm_put(dev_priv);
6008 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
6010 struct drm_device *dev = dev_priv->dev;
6011 struct device *device = &dev->pdev->dev;
6013 if (!HAS_RUNTIME_PM(dev))
6016 pm_runtime_get_sync(device);
6017 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
6020 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
6022 struct drm_device *dev = dev_priv->dev;
6023 struct device *device = &dev->pdev->dev;
6025 if (!HAS_RUNTIME_PM(dev))
6028 pm_runtime_mark_last_busy(device);
6029 pm_runtime_put_autosuspend(device);
6032 void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
6034 struct drm_device *dev = dev_priv->dev;
6035 struct device *device = &dev->pdev->dev;
6037 if (!HAS_RUNTIME_PM(dev))
6040 pm_runtime_set_active(device);
6042 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
6043 pm_runtime_mark_last_busy(device);
6044 pm_runtime_use_autosuspend(device);
6046 pm_runtime_put_autosuspend(device);
6049 void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
6051 struct drm_device *dev = dev_priv->dev;
6052 struct device *device = &dev->pdev->dev;
6054 if (!HAS_RUNTIME_PM(dev))
6057 /* Make sure we're not suspended first. */
6058 pm_runtime_get_sync(device);
6059 pm_runtime_disable(device);
6062 /* Set up chip specific power management-related functions */
6063 void intel_init_pm(struct drm_device *dev)
6065 struct drm_i915_private *dev_priv = dev->dev_private;
6068 if (INTEL_INFO(dev)->gen >= 7) {
6069 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6070 dev_priv->display.enable_fbc = gen7_enable_fbc;
6071 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6072 } else if (INTEL_INFO(dev)->gen >= 5) {
6073 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6074 dev_priv->display.enable_fbc = ironlake_enable_fbc;
6075 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6076 } else if (IS_GM45(dev)) {
6077 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
6078 dev_priv->display.enable_fbc = g4x_enable_fbc;
6079 dev_priv->display.disable_fbc = g4x_disable_fbc;
6081 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
6082 dev_priv->display.enable_fbc = i8xx_enable_fbc;
6083 dev_priv->display.disable_fbc = i8xx_disable_fbc;
6085 /* This value was pulled out of someone's hat */
6086 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
6091 if (IS_PINEVIEW(dev))
6092 i915_pineview_get_mem_freq(dev);
6093 else if (IS_GEN5(dev))
6094 i915_ironlake_get_mem_freq(dev);
6096 /* For FIFO watermark updates */
6097 if (HAS_PCH_SPLIT(dev)) {
6098 ilk_setup_wm_latency(dev);
6100 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
6101 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
6102 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
6103 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
6104 dev_priv->display.update_wm = ilk_update_wm;
6105 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
6107 DRM_DEBUG_KMS("Failed to read display plane latency. "
6112 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
6113 else if (IS_GEN6(dev))
6114 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
6115 else if (IS_IVYBRIDGE(dev))
6116 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6117 else if (IS_HASWELL(dev))
6118 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6119 else if (INTEL_INFO(dev)->gen == 8)
6120 dev_priv->display.init_clock_gating = gen8_init_clock_gating;
6121 } else if (IS_VALLEYVIEW(dev)) {
6122 dev_priv->display.update_wm = valleyview_update_wm;
6123 dev_priv->display.init_clock_gating =
6124 valleyview_init_clock_gating;
6125 } else if (IS_PINEVIEW(dev)) {
6126 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
6129 dev_priv->mem_freq)) {
6130 DRM_INFO("failed to find known CxSR latency "
6131 "(found ddr%s fsb freq %d, mem freq %d), "
6133 (dev_priv->is_ddr3 == 1) ? "3" : "2",
6134 dev_priv->fsb_freq, dev_priv->mem_freq);
6135 /* Disable CxSR and never update its watermark again */
6136 pineview_disable_cxsr(dev);
6137 dev_priv->display.update_wm = NULL;
6139 dev_priv->display.update_wm = pineview_update_wm;
6140 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6141 } else if (IS_G4X(dev)) {
6142 dev_priv->display.update_wm = g4x_update_wm;
6143 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
6144 } else if (IS_GEN4(dev)) {
6145 dev_priv->display.update_wm = i965_update_wm;
6146 if (IS_CRESTLINE(dev))
6147 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
6148 else if (IS_BROADWATER(dev))
6149 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
6150 } else if (IS_GEN3(dev)) {
6151 dev_priv->display.update_wm = i9xx_update_wm;
6152 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
6153 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6154 } else if (IS_GEN2(dev)) {
6155 if (INTEL_INFO(dev)->num_pipes == 1) {
6156 dev_priv->display.update_wm = i845_update_wm;
6157 dev_priv->display.get_fifo_size = i845_get_fifo_size;
6159 dev_priv->display.update_wm = i9xx_update_wm;
6160 dev_priv->display.get_fifo_size = i830_get_fifo_size;
6163 if (IS_I85X(dev) || IS_I865G(dev))
6164 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6166 dev_priv->display.init_clock_gating = i830_init_clock_gating;
6168 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6172 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
6174 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6176 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6177 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
6181 I915_WRITE(GEN6_PCODE_DATA, *val);
6182 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6184 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6186 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
6190 *val = I915_READ(GEN6_PCODE_DATA);
6191 I915_WRITE(GEN6_PCODE_DATA, 0);
6196 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
6198 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6200 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6201 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
6205 I915_WRITE(GEN6_PCODE_DATA, val);
6206 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6208 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6210 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
6214 I915_WRITE(GEN6_PCODE_DATA, 0);
6219 int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
6224 switch (dev_priv->mem_freq) {
6238 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
6241 int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
6246 switch (dev_priv->mem_freq) {
6260 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
6263 void intel_pm_setup(struct drm_device *dev)
6265 struct drm_i915_private *dev_priv = dev->dev_private;
6267 mutex_init(&dev_priv->rps.hw_lock);
6269 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6270 intel_gen6_powersave_work);
6272 dev_priv->pm.suspended = false;
6273 dev_priv->pm.irqs_disabled = false;