2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <linux/module.h>
30 #include <linux/pm_runtime.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_fourcc.h>
34 #include <drm/drm_plane_helper.h>
36 #include "display/intel_atomic.h"
37 #include "display/intel_fbc.h"
38 #include "display/intel_sprite.h"
42 #include "intel_drv.h"
44 #include "intel_sideband.h"
45 #include "../../../platform/x86/intel_ips.h"
50 * RC6 is a special power stage which allows the GPU to enter an very
51 * low-voltage mode when idle, using down to 0V while at this stage. This
52 * stage is entered automatically when the GPU is idle when RC6 support is
53 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
55 * There are different RC6 modes available in Intel GPU, which differentiate
56 * among each other with the latency required to enter and leave RC6 and
57 * voltage consumed by the GPU in different states.
59 * The combination of the following flags define which states GPU is allowed
60 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
61 * RC6pp is deepest RC6. Their support by hardware varies according to the
62 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
63 * which brings the most power savings; deeper states save more power, but
64 * require higher latency to switch to and wake up.
67 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
69 if (HAS_LLC(dev_priv)) {
71 * WaCompressedResourceDisplayNewHashMode:skl,kbl
72 * Display WA #0390: skl,kbl
74 * Must match Sampler, Pixel Back End, and Media. See
75 * WaCompressedResourceSamplerPbeMediaNewHashMode.
77 I915_WRITE(CHICKEN_PAR1_1,
78 I915_READ(CHICKEN_PAR1_1) |
79 SKL_DE_COMPRESSED_HASH_MODE);
82 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
83 I915_WRITE(CHICKEN_PAR1_1,
84 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
86 /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
87 I915_WRITE(GEN8_CHICKEN_DCPR_1,
88 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
90 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
91 /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
92 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
94 DISP_FBC_MEMORY_WAKE);
96 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
97 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
98 ILK_DPFC_DISABLE_DUMMY0);
100 if (IS_SKYLAKE(dev_priv)) {
101 /* WaDisableDopClockGating */
102 I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
103 & ~GEN7_DOP_CLOCK_GATE_ENABLE);
107 static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
109 gen9_init_clock_gating(dev_priv);
111 /* WaDisableSDEUnitClockGating:bxt */
112 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
113 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
117 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
119 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
120 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
123 * Wa: Backlight PWM may stop in the asserted state, causing backlight
126 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
127 PWM1_GATING_DIS | PWM2_GATING_DIS);
130 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
132 gen9_init_clock_gating(dev_priv);
135 * WaDisablePWMClockGating:glk
136 * Backlight PWM may stop in the asserted state, causing backlight
139 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
140 PWM1_GATING_DIS | PWM2_GATING_DIS);
142 /* WaDDIIOTimeout:glk */
143 if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
144 u32 val = I915_READ(CHICKEN_MISC_2);
145 val &= ~(GLK_CL0_PWR_DOWN |
148 I915_WRITE(CHICKEN_MISC_2, val);
153 static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
157 tmp = I915_READ(CLKCFG);
159 switch (tmp & CLKCFG_FSB_MASK) {
161 dev_priv->fsb_freq = 533; /* 133*4 */
164 dev_priv->fsb_freq = 800; /* 200*4 */
167 dev_priv->fsb_freq = 667; /* 167*4 */
170 dev_priv->fsb_freq = 400; /* 100*4 */
174 switch (tmp & CLKCFG_MEM_MASK) {
176 dev_priv->mem_freq = 533;
179 dev_priv->mem_freq = 667;
182 dev_priv->mem_freq = 800;
186 /* detect pineview DDR3 setting */
187 tmp = I915_READ(CSHRDDR3CTL);
188 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
191 static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
195 ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
196 csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
198 switch (ddrpll & 0xff) {
200 dev_priv->mem_freq = 800;
203 dev_priv->mem_freq = 1066;
206 dev_priv->mem_freq = 1333;
209 dev_priv->mem_freq = 1600;
212 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
214 dev_priv->mem_freq = 0;
218 dev_priv->ips.r_t = dev_priv->mem_freq;
220 switch (csipll & 0x3ff) {
222 dev_priv->fsb_freq = 3200;
225 dev_priv->fsb_freq = 3733;
228 dev_priv->fsb_freq = 4266;
231 dev_priv->fsb_freq = 4800;
234 dev_priv->fsb_freq = 5333;
237 dev_priv->fsb_freq = 5866;
240 dev_priv->fsb_freq = 6400;
243 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
245 dev_priv->fsb_freq = 0;
249 if (dev_priv->fsb_freq == 3200) {
250 dev_priv->ips.c_m = 0;
251 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
252 dev_priv->ips.c_m = 1;
254 dev_priv->ips.c_m = 2;
258 static const struct cxsr_latency cxsr_latency_table[] = {
259 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
260 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
261 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
262 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
263 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
265 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
266 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
267 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
268 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
269 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
271 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
272 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
273 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
274 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
275 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
277 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
278 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
279 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
280 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
281 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
283 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
284 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
285 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
286 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
287 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
289 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
290 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
291 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
292 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
293 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
296 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
301 const struct cxsr_latency *latency;
304 if (fsb == 0 || mem == 0)
307 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
308 latency = &cxsr_latency_table[i];
309 if (is_desktop == latency->is_desktop &&
310 is_ddr3 == latency->is_ddr3 &&
311 fsb == latency->fsb_freq && mem == latency->mem_freq)
315 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
320 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
324 vlv_punit_get(dev_priv);
326 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
328 val &= ~FORCE_DDR_HIGH_FREQ;
330 val |= FORCE_DDR_HIGH_FREQ;
331 val &= ~FORCE_DDR_LOW_FREQ;
332 val |= FORCE_DDR_FREQ_REQ_ACK;
333 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
335 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
336 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
337 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
339 vlv_punit_put(dev_priv);
342 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
346 vlv_punit_get(dev_priv);
348 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
350 val |= DSP_MAXFIFO_PM5_ENABLE;
352 val &= ~DSP_MAXFIFO_PM5_ENABLE;
353 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
355 vlv_punit_put(dev_priv);
358 #define FW_WM(value, plane) \
359 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
361 static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
366 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
367 was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
368 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
369 POSTING_READ(FW_BLC_SELF_VLV);
370 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
371 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
372 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
373 POSTING_READ(FW_BLC_SELF);
374 } else if (IS_PINEVIEW(dev_priv)) {
375 val = I915_READ(DSPFW3);
376 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
378 val |= PINEVIEW_SELF_REFRESH_EN;
380 val &= ~PINEVIEW_SELF_REFRESH_EN;
381 I915_WRITE(DSPFW3, val);
382 POSTING_READ(DSPFW3);
383 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
384 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
385 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
386 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
387 I915_WRITE(FW_BLC_SELF, val);
388 POSTING_READ(FW_BLC_SELF);
389 } else if (IS_I915GM(dev_priv)) {
391 * FIXME can't find a bit like this for 915G, and
392 * and yet it does have the related watermark in
393 * FW_BLC_SELF. What's going on?
395 was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
396 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
397 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
398 I915_WRITE(INSTPM, val);
399 POSTING_READ(INSTPM);
404 trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
406 DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
407 enableddisabled(enable),
408 enableddisabled(was_enabled));
414 * intel_set_memory_cxsr - Configure CxSR state
415 * @dev_priv: i915 device
416 * @enable: Allow vs. disallow CxSR
418 * Allow or disallow the system to enter a special CxSR
419 * (C-state self refresh) state. What typically happens in CxSR mode
420 * is that several display FIFOs may get combined into a single larger
421 * FIFO for a particular plane (so called max FIFO mode) to allow the
422 * system to defer memory fetches longer, and the memory will enter
425 * Note that enabling CxSR does not guarantee that the system enter
426 * this special mode, nor does it guarantee that the system stays
427 * in that mode once entered. So this just allows/disallows the system
428 * to autonomously utilize the CxSR mode. Other factors such as core
429 * C-states will affect when/if the system actually enters/exits the
432 * Note that on VLV/CHV this actually only controls the max FIFO mode,
433 * and the system is free to enter/exit memory self refresh at any time
434 * even when the use of CxSR has been disallowed.
436 * While the system is actually in the CxSR/max FIFO mode, some plane
437 * control registers will not get latched on vblank. Thus in order to
438 * guarantee the system will respond to changes in the plane registers
439 * we must always disallow CxSR prior to making changes to those registers.
440 * Unfortunately the system will re-evaluate the CxSR conditions at
441 * frame start which happens after vblank start (which is when the plane
442 * registers would get latched), so we can't proceed with the plane update
443 * during the same frame where we disallowed CxSR.
445 * Certain platforms also have a deeper HPLL SR mode. Fortunately the
446 * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
447 * the hardware w.r.t. HPLL SR when writing to plane registers.
448 * Disallowing just CxSR is sufficient.
450 bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
454 mutex_lock(&dev_priv->wm.wm_mutex);
455 ret = _intel_set_memory_cxsr(dev_priv, enable);
456 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
457 dev_priv->wm.vlv.cxsr = enable;
458 else if (IS_G4X(dev_priv))
459 dev_priv->wm.g4x.cxsr = enable;
460 mutex_unlock(&dev_priv->wm.wm_mutex);
466 * Latency for FIFO fetches is dependent on several factors:
467 * - memory configuration (speed, channels)
469 * - current MCH state
470 * It can be fairly high in some situations, so here we assume a fairly
471 * pessimal value. It's a tradeoff between extra memory fetches (if we
472 * set this value too high, the FIFO will fetch frequently to stay full)
473 * and power consumption (set it too low to save power and we might see
474 * FIFO underruns and display "flicker").
476 * A value of 5us seems to be a good balance; safe for very low end
477 * platforms but not overly aggressive on lower latency configs.
479 static const int pessimal_latency_ns = 5000;
481 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
482 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
484 static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
486 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
487 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
488 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
489 enum pipe pipe = crtc->pipe;
490 int sprite0_start, sprite1_start;
493 u32 dsparb, dsparb2, dsparb3;
495 dsparb = I915_READ(DSPARB);
496 dsparb2 = I915_READ(DSPARB2);
497 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
498 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
501 dsparb = I915_READ(DSPARB);
502 dsparb2 = I915_READ(DSPARB2);
503 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
504 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
507 dsparb2 = I915_READ(DSPARB2);
508 dsparb3 = I915_READ(DSPARB3);
509 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
510 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
517 fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
518 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
519 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
520 fifo_state->plane[PLANE_CURSOR] = 63;
523 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
524 enum i9xx_plane_id i9xx_plane)
526 u32 dsparb = I915_READ(DSPARB);
529 size = dsparb & 0x7f;
530 if (i9xx_plane == PLANE_B)
531 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
533 DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
534 dsparb, plane_name(i9xx_plane), size);
539 static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
540 enum i9xx_plane_id i9xx_plane)
542 u32 dsparb = I915_READ(DSPARB);
545 size = dsparb & 0x1ff;
546 if (i9xx_plane == PLANE_B)
547 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
548 size >>= 1; /* Convert to cachelines */
550 DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
551 dsparb, plane_name(i9xx_plane), size);
556 static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
557 enum i9xx_plane_id i9xx_plane)
559 u32 dsparb = I915_READ(DSPARB);
562 size = dsparb & 0x7f;
563 size >>= 2; /* Convert to cachelines */
565 DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
566 dsparb, plane_name(i9xx_plane), size);
571 /* Pineview has different values for various configs */
572 static const struct intel_watermark_params pineview_display_wm = {
573 .fifo_size = PINEVIEW_DISPLAY_FIFO,
574 .max_wm = PINEVIEW_MAX_WM,
575 .default_wm = PINEVIEW_DFT_WM,
576 .guard_size = PINEVIEW_GUARD_WM,
577 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
579 static const struct intel_watermark_params pineview_display_hplloff_wm = {
580 .fifo_size = PINEVIEW_DISPLAY_FIFO,
581 .max_wm = PINEVIEW_MAX_WM,
582 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
583 .guard_size = PINEVIEW_GUARD_WM,
584 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
586 static const struct intel_watermark_params pineview_cursor_wm = {
587 .fifo_size = PINEVIEW_CURSOR_FIFO,
588 .max_wm = PINEVIEW_CURSOR_MAX_WM,
589 .default_wm = PINEVIEW_CURSOR_DFT_WM,
590 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
591 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
593 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
594 .fifo_size = PINEVIEW_CURSOR_FIFO,
595 .max_wm = PINEVIEW_CURSOR_MAX_WM,
596 .default_wm = PINEVIEW_CURSOR_DFT_WM,
597 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
598 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
600 static const struct intel_watermark_params i965_cursor_wm_info = {
601 .fifo_size = I965_CURSOR_FIFO,
602 .max_wm = I965_CURSOR_MAX_WM,
603 .default_wm = I965_CURSOR_DFT_WM,
605 .cacheline_size = I915_FIFO_LINE_SIZE,
607 static const struct intel_watermark_params i945_wm_info = {
608 .fifo_size = I945_FIFO_SIZE,
609 .max_wm = I915_MAX_WM,
612 .cacheline_size = I915_FIFO_LINE_SIZE,
614 static const struct intel_watermark_params i915_wm_info = {
615 .fifo_size = I915_FIFO_SIZE,
616 .max_wm = I915_MAX_WM,
619 .cacheline_size = I915_FIFO_LINE_SIZE,
621 static const struct intel_watermark_params i830_a_wm_info = {
622 .fifo_size = I855GM_FIFO_SIZE,
623 .max_wm = I915_MAX_WM,
626 .cacheline_size = I830_FIFO_LINE_SIZE,
628 static const struct intel_watermark_params i830_bc_wm_info = {
629 .fifo_size = I855GM_FIFO_SIZE,
630 .max_wm = I915_MAX_WM/2,
633 .cacheline_size = I830_FIFO_LINE_SIZE,
635 static const struct intel_watermark_params i845_wm_info = {
636 .fifo_size = I830_FIFO_SIZE,
637 .max_wm = I915_MAX_WM,
640 .cacheline_size = I830_FIFO_LINE_SIZE,
644 * intel_wm_method1 - Method 1 / "small buffer" watermark formula
645 * @pixel_rate: Pipe pixel rate in kHz
646 * @cpp: Plane bytes per pixel
647 * @latency: Memory wakeup latency in 0.1us units
649 * Compute the watermark using the method 1 or "small buffer"
650 * formula. The caller may additonally add extra cachelines
651 * to account for TLB misses and clock crossings.
653 * This method is concerned with the short term drain rate
654 * of the FIFO, ie. it does not account for blanking periods
655 * which would effectively reduce the average drain rate across
656 * a longer period. The name "small" refers to the fact the
657 * FIFO is relatively small compared to the amount of data
660 * The FIFO level vs. time graph might look something like:
664 * __---__---__ (- plane active, _ blanking)
667 * or perhaps like this:
670 * __----__----__ (- plane active, _ blanking)
674 * The watermark in bytes
676 static unsigned int intel_wm_method1(unsigned int pixel_rate,
678 unsigned int latency)
682 ret = mul_u32_u32(pixel_rate, cpp * latency);
683 ret = DIV_ROUND_UP_ULL(ret, 10000);
689 * intel_wm_method2 - Method 2 / "large buffer" watermark formula
690 * @pixel_rate: Pipe pixel rate in kHz
691 * @htotal: Pipe horizontal total
692 * @width: Plane width in pixels
693 * @cpp: Plane bytes per pixel
694 * @latency: Memory wakeup latency in 0.1us units
696 * Compute the watermark using the method 2 or "large buffer"
697 * formula. The caller may additonally add extra cachelines
698 * to account for TLB misses and clock crossings.
700 * This method is concerned with the long term drain rate
701 * of the FIFO, ie. it does account for blanking periods
702 * which effectively reduce the average drain rate across
703 * a longer period. The name "large" refers to the fact the
704 * FIFO is relatively large compared to the amount of data
707 * The FIFO level vs. time graph might look something like:
712 * __ --__--__--__--__--__--__ (- plane active, _ blanking)
716 * The watermark in bytes
718 static unsigned int intel_wm_method2(unsigned int pixel_rate,
722 unsigned int latency)
727 * FIXME remove once all users are computing
728 * watermarks in the correct place.
730 if (WARN_ON_ONCE(htotal == 0))
733 ret = (latency * pixel_rate) / (htotal * 10000);
734 ret = (ret + 1) * width * cpp;
740 * intel_calculate_wm - calculate watermark level
741 * @pixel_rate: pixel clock
742 * @wm: chip FIFO params
743 * @fifo_size: size of the FIFO buffer
744 * @cpp: bytes per pixel
745 * @latency_ns: memory latency for the platform
747 * Calculate the watermark level (the level at which the display plane will
748 * start fetching from memory again). Each chip has a different display
749 * FIFO size and allocation, so the caller needs to figure that out and pass
750 * in the correct intel_watermark_params structure.
752 * As the pixel clock runs, the FIFO will be drained at a rate that depends
753 * on the pixel size. When it reaches the watermark level, it'll start
754 * fetching FIFO line sized based chunks from memory until the FIFO fills
755 * past the watermark point. If the FIFO drains completely, a FIFO underrun
756 * will occur, and a display engine hang could result.
758 static unsigned int intel_calculate_wm(int pixel_rate,
759 const struct intel_watermark_params *wm,
760 int fifo_size, int cpp,
761 unsigned int latency_ns)
763 int entries, wm_size;
766 * Note: we need to make sure we don't overflow for various clock &
768 * clocks go from a few thousand to several hundred thousand.
769 * latency is usually a few thousand
771 entries = intel_wm_method1(pixel_rate, cpp,
773 entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
775 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
777 wm_size = fifo_size - entries;
778 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
780 /* Don't promote wm_size to unsigned... */
781 if (wm_size > wm->max_wm)
782 wm_size = wm->max_wm;
784 wm_size = wm->default_wm;
787 * Bspec seems to indicate that the value shouldn't be lower than
788 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
789 * Lets go for 8 which is the burst size since certain platforms
790 * already use a hardcoded 8 (which is what the spec says should be
799 static bool is_disabling(int old, int new, int threshold)
801 return old >= threshold && new < threshold;
804 static bool is_enabling(int old, int new, int threshold)
806 return old < threshold && new >= threshold;
809 static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
811 return dev_priv->wm.max_level + 1;
814 static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
815 const struct intel_plane_state *plane_state)
817 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
819 /* FIXME check the 'enable' instead */
820 if (!crtc_state->base.active)
824 * Treat cursor with fb as always visible since cursor updates
825 * can happen faster than the vrefresh rate, and the current
826 * watermark code doesn't handle that correctly. Cursor updates
827 * which set/clear the fb or change the cursor size are going
828 * to get throttled by intel_legacy_cursor_update() to work
829 * around this problem with the watermark code.
831 if (plane->id == PLANE_CURSOR)
832 return plane_state->base.fb != NULL;
834 return plane_state->base.visible;
837 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
839 struct intel_crtc *crtc, *enabled = NULL;
841 for_each_intel_crtc(&dev_priv->drm, crtc) {
842 if (intel_crtc_active(crtc)) {
852 static void pineview_update_wm(struct intel_crtc *unused_crtc)
854 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
855 struct intel_crtc *crtc;
856 const struct cxsr_latency *latency;
860 latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
865 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
866 intel_set_memory_cxsr(dev_priv, false);
870 crtc = single_enabled_crtc(dev_priv);
872 const struct drm_display_mode *adjusted_mode =
873 &crtc->config->base.adjusted_mode;
874 const struct drm_framebuffer *fb =
875 crtc->base.primary->state->fb;
876 int cpp = fb->format->cpp[0];
877 int clock = adjusted_mode->crtc_clock;
880 wm = intel_calculate_wm(clock, &pineview_display_wm,
881 pineview_display_wm.fifo_size,
882 cpp, latency->display_sr);
883 reg = I915_READ(DSPFW1);
884 reg &= ~DSPFW_SR_MASK;
885 reg |= FW_WM(wm, SR);
886 I915_WRITE(DSPFW1, reg);
887 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
890 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
891 pineview_display_wm.fifo_size,
892 4, latency->cursor_sr);
893 reg = I915_READ(DSPFW3);
894 reg &= ~DSPFW_CURSOR_SR_MASK;
895 reg |= FW_WM(wm, CURSOR_SR);
896 I915_WRITE(DSPFW3, reg);
898 /* Display HPLL off SR */
899 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
900 pineview_display_hplloff_wm.fifo_size,
901 cpp, latency->display_hpll_disable);
902 reg = I915_READ(DSPFW3);
903 reg &= ~DSPFW_HPLL_SR_MASK;
904 reg |= FW_WM(wm, HPLL_SR);
905 I915_WRITE(DSPFW3, reg);
907 /* cursor HPLL off SR */
908 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
909 pineview_display_hplloff_wm.fifo_size,
910 4, latency->cursor_hpll_disable);
911 reg = I915_READ(DSPFW3);
912 reg &= ~DSPFW_HPLL_CURSOR_MASK;
913 reg |= FW_WM(wm, HPLL_CURSOR);
914 I915_WRITE(DSPFW3, reg);
915 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
917 intel_set_memory_cxsr(dev_priv, true);
919 intel_set_memory_cxsr(dev_priv, false);
924 * Documentation says:
925 * "If the line size is small, the TLB fetches can get in the way of the
926 * data fetches, causing some lag in the pixel data return which is not
927 * accounted for in the above formulas. The following adjustment only
928 * needs to be applied if eight whole lines fit in the buffer at once.
929 * The WM is adjusted upwards by the difference between the FIFO size
930 * and the size of 8 whole lines. This adjustment is always performed
931 * in the actual pixel depth regardless of whether FBC is enabled or not."
933 static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
935 int tlb_miss = fifo_size * 64 - width * cpp * 8;
937 return max(0, tlb_miss);
940 static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
941 const struct g4x_wm_values *wm)
945 for_each_pipe(dev_priv, pipe)
946 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
949 FW_WM(wm->sr.plane, SR) |
950 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
951 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
952 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
954 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
955 FW_WM(wm->sr.fbc, FBC_SR) |
956 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
957 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
958 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
959 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
961 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
962 FW_WM(wm->sr.cursor, CURSOR_SR) |
963 FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
964 FW_WM(wm->hpll.plane, HPLL_SR));
966 POSTING_READ(DSPFW1);
969 #define FW_WM_VLV(value, plane) \
970 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
972 static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
973 const struct vlv_wm_values *wm)
977 for_each_pipe(dev_priv, pipe) {
978 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
980 I915_WRITE(VLV_DDL(pipe),
981 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
982 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
983 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
984 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
988 * Zero the (unused) WM1 watermarks, and also clear all the
989 * high order bits so that there are no out of bounds values
990 * present in the registers during the reprogramming.
992 I915_WRITE(DSPHOWM, 0);
993 I915_WRITE(DSPHOWM1, 0);
994 I915_WRITE(DSPFW4, 0);
995 I915_WRITE(DSPFW5, 0);
996 I915_WRITE(DSPFW6, 0);
999 FW_WM(wm->sr.plane, SR) |
1000 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
1001 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
1002 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
1004 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
1005 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1006 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1008 FW_WM(wm->sr.cursor, CURSOR_SR));
1010 if (IS_CHERRYVIEW(dev_priv)) {
1011 I915_WRITE(DSPFW7_CHV,
1012 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1013 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1014 I915_WRITE(DSPFW8_CHV,
1015 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
1016 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
1017 I915_WRITE(DSPFW9_CHV,
1018 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
1019 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
1021 FW_WM(wm->sr.plane >> 9, SR_HI) |
1022 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
1023 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
1024 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
1025 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1026 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1027 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1028 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1029 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1030 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1033 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1034 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1036 FW_WM(wm->sr.plane >> 9, SR_HI) |
1037 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1038 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1039 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1040 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1041 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1042 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1045 POSTING_READ(DSPFW1);
1050 static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
1052 /* all latencies in usec */
1053 dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
1054 dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1055 dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1057 dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1060 static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
1063 * DSPCNTR[13] supposedly controls whether the
1064 * primary plane can use the FIFO space otherwise
1065 * reserved for the sprite plane. It's not 100% clear
1066 * what the actual FIFO size is, but it looks like we
1067 * can happily set both primary and sprite watermarks
1068 * up to 127 cachelines. So that would seem to mean
1069 * that either DSPCNTR[13] doesn't do anything, or that
1070 * the total FIFO is >= 256 cachelines in size. Either
1071 * way, we don't seem to have to worry about this
1072 * repartitioning as the maximum watermark value the
1073 * register can hold for each plane is lower than the
1074 * minimum FIFO size.
1080 return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
1082 return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
1084 MISSING_CASE(plane_id);
1089 static int g4x_fbc_fifo_size(int level)
1092 case G4X_WM_LEVEL_SR:
1094 case G4X_WM_LEVEL_HPLL:
1097 MISSING_CASE(level);
1102 static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1103 const struct intel_plane_state *plane_state,
1106 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1107 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1108 const struct drm_display_mode *adjusted_mode =
1109 &crtc_state->base.adjusted_mode;
1110 unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
1111 unsigned int clock, htotal, cpp, width, wm;
1116 if (!intel_wm_plane_visible(crtc_state, plane_state))
1120 * Not 100% sure which way ELK should go here as the
1121 * spec only says CL/CTG should assume 32bpp and BW
1122 * doesn't need to. But as these things followed the
1123 * mobile vs. desktop lines on gen3 as well, let's
1124 * assume ELK doesn't need this.
1126 * The spec also fails to list such a restriction for
1127 * the HPLL watermark, which seems a little strange.
1128 * Let's use 32bpp for the HPLL watermark as well.
1130 if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
1131 level != G4X_WM_LEVEL_NORMAL)
1134 cpp = plane_state->base.fb->format->cpp[0];
1136 clock = adjusted_mode->crtc_clock;
1137 htotal = adjusted_mode->crtc_htotal;
1139 if (plane->id == PLANE_CURSOR)
1140 width = plane_state->base.crtc_w;
1142 width = drm_rect_width(&plane_state->base.dst);
1144 if (plane->id == PLANE_CURSOR) {
1145 wm = intel_wm_method2(clock, htotal, width, cpp, latency);
1146 } else if (plane->id == PLANE_PRIMARY &&
1147 level == G4X_WM_LEVEL_NORMAL) {
1148 wm = intel_wm_method1(clock, cpp, latency);
1150 unsigned int small, large;
1152 small = intel_wm_method1(clock, cpp, latency);
1153 large = intel_wm_method2(clock, htotal, width, cpp, latency);
1155 wm = min(small, large);
1158 wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1161 wm = DIV_ROUND_UP(wm, 64) + 2;
1163 return min_t(unsigned int, wm, USHRT_MAX);
1166 static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1167 int level, enum plane_id plane_id, u16 value)
1169 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1172 for (; level < intel_wm_num_levels(dev_priv); level++) {
1173 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1175 dirty |= raw->plane[plane_id] != value;
1176 raw->plane[plane_id] = value;
1182 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
1183 int level, u16 value)
1185 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1188 /* NORMAL level doesn't have an FBC watermark */
1189 level = max(level, G4X_WM_LEVEL_SR);
1191 for (; level < intel_wm_num_levels(dev_priv); level++) {
1192 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1194 dirty |= raw->fbc != value;
1201 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1202 const struct intel_plane_state *pstate,
1205 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1206 const struct intel_plane_state *plane_state)
1208 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1209 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1210 enum plane_id plane_id = plane->id;
1214 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1215 dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1216 if (plane_id == PLANE_PRIMARY)
1217 dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
1221 for (level = 0; level < num_levels; level++) {
1222 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1225 wm = g4x_compute_wm(crtc_state, plane_state, level);
1226 max_wm = g4x_plane_fifo_size(plane_id, level);
1231 dirty |= raw->plane[plane_id] != wm;
1232 raw->plane[plane_id] = wm;
1234 if (plane_id != PLANE_PRIMARY ||
1235 level == G4X_WM_LEVEL_NORMAL)
1238 wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1239 raw->plane[plane_id]);
1240 max_wm = g4x_fbc_fifo_size(level);
1243 * FBC wm is not mandatory as we
1244 * can always just disable its use.
1249 dirty |= raw->fbc != wm;
1253 /* mark watermarks as invalid */
1254 dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1256 if (plane_id == PLANE_PRIMARY)
1257 dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1261 DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1263 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1264 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1265 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1267 if (plane_id == PLANE_PRIMARY)
1268 DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n",
1269 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1270 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1276 static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1277 enum plane_id plane_id, int level)
1279 const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1281 return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1284 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1287 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1289 if (level > dev_priv->wm.max_level)
1292 return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1293 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1294 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1297 /* mark all levels starting from 'level' as invalid */
1298 static void g4x_invalidate_wms(struct intel_crtc *crtc,
1299 struct g4x_wm_state *wm_state, int level)
1301 if (level <= G4X_WM_LEVEL_NORMAL) {
1302 enum plane_id plane_id;
1304 for_each_plane_id_on_crtc(crtc, plane_id)
1305 wm_state->wm.plane[plane_id] = USHRT_MAX;
1308 if (level <= G4X_WM_LEVEL_SR) {
1309 wm_state->cxsr = false;
1310 wm_state->sr.cursor = USHRT_MAX;
1311 wm_state->sr.plane = USHRT_MAX;
1312 wm_state->sr.fbc = USHRT_MAX;
1315 if (level <= G4X_WM_LEVEL_HPLL) {
1316 wm_state->hpll_en = false;
1317 wm_state->hpll.cursor = USHRT_MAX;
1318 wm_state->hpll.plane = USHRT_MAX;
1319 wm_state->hpll.fbc = USHRT_MAX;
1323 static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1325 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1326 struct intel_atomic_state *state =
1327 to_intel_atomic_state(crtc_state->base.state);
1328 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1329 int num_active_planes = hweight32(crtc_state->active_planes &
1330 ~BIT(PLANE_CURSOR));
1331 const struct g4x_pipe_wm *raw;
1332 const struct intel_plane_state *old_plane_state;
1333 const struct intel_plane_state *new_plane_state;
1334 struct intel_plane *plane;
1335 enum plane_id plane_id;
1337 unsigned int dirty = 0;
1339 for_each_oldnew_intel_plane_in_state(state, plane,
1341 new_plane_state, i) {
1342 if (new_plane_state->base.crtc != &crtc->base &&
1343 old_plane_state->base.crtc != &crtc->base)
1346 if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
1347 dirty |= BIT(plane->id);
1353 level = G4X_WM_LEVEL_NORMAL;
1354 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1357 raw = &crtc_state->wm.g4x.raw[level];
1358 for_each_plane_id_on_crtc(crtc, plane_id)
1359 wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1361 level = G4X_WM_LEVEL_SR;
1363 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1366 raw = &crtc_state->wm.g4x.raw[level];
1367 wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1368 wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1369 wm_state->sr.fbc = raw->fbc;
1371 wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
1373 level = G4X_WM_LEVEL_HPLL;
1375 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1378 raw = &crtc_state->wm.g4x.raw[level];
1379 wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1380 wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1381 wm_state->hpll.fbc = raw->fbc;
1383 wm_state->hpll_en = wm_state->cxsr;
1388 if (level == G4X_WM_LEVEL_NORMAL)
1391 /* invalidate the higher levels */
1392 g4x_invalidate_wms(crtc, wm_state, level);
1395 * Determine if the FBC watermark(s) can be used. IF
1396 * this isn't the case we prefer to disable the FBC
1397 ( watermark(s) rather than disable the SR/HPLL
1398 * level(s) entirely.
1400 wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL;
1402 if (level >= G4X_WM_LEVEL_SR &&
1403 wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1404 wm_state->fbc_en = false;
1405 else if (level >= G4X_WM_LEVEL_HPLL &&
1406 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1407 wm_state->fbc_en = false;
1412 static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
1414 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1415 struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1416 const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1417 struct intel_atomic_state *intel_state =
1418 to_intel_atomic_state(new_crtc_state->base.state);
1419 const struct intel_crtc_state *old_crtc_state =
1420 intel_atomic_get_old_crtc_state(intel_state, crtc);
1421 const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1422 enum plane_id plane_id;
1424 if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
1425 *intermediate = *optimal;
1427 intermediate->cxsr = false;
1428 intermediate->hpll_en = false;
1432 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1433 !new_crtc_state->disable_cxsr;
1434 intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1435 !new_crtc_state->disable_cxsr;
1436 intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1438 for_each_plane_id_on_crtc(crtc, plane_id) {
1439 intermediate->wm.plane[plane_id] =
1440 max(optimal->wm.plane[plane_id],
1441 active->wm.plane[plane_id]);
1443 WARN_ON(intermediate->wm.plane[plane_id] >
1444 g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1447 intermediate->sr.plane = max(optimal->sr.plane,
1449 intermediate->sr.cursor = max(optimal->sr.cursor,
1451 intermediate->sr.fbc = max(optimal->sr.fbc,
1454 intermediate->hpll.plane = max(optimal->hpll.plane,
1455 active->hpll.plane);
1456 intermediate->hpll.cursor = max(optimal->hpll.cursor,
1457 active->hpll.cursor);
1458 intermediate->hpll.fbc = max(optimal->hpll.fbc,
1461 WARN_ON((intermediate->sr.plane >
1462 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1463 intermediate->sr.cursor >
1464 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1465 intermediate->cxsr);
1466 WARN_ON((intermediate->sr.plane >
1467 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1468 intermediate->sr.cursor >
1469 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1470 intermediate->hpll_en);
1472 WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1473 intermediate->fbc_en && intermediate->cxsr);
1474 WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1475 intermediate->fbc_en && intermediate->hpll_en);
1479 * If our intermediate WM are identical to the final WM, then we can
1480 * omit the post-vblank programming; only update if it's different.
1482 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1483 new_crtc_state->wm.need_postvbl_update = true;
1488 static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1489 struct g4x_wm_values *wm)
1491 struct intel_crtc *crtc;
1492 int num_active_crtcs = 0;
1498 for_each_intel_crtc(&dev_priv->drm, crtc) {
1499 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1504 if (!wm_state->cxsr)
1506 if (!wm_state->hpll_en)
1507 wm->hpll_en = false;
1508 if (!wm_state->fbc_en)
1514 if (num_active_crtcs != 1) {
1516 wm->hpll_en = false;
1520 for_each_intel_crtc(&dev_priv->drm, crtc) {
1521 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1522 enum pipe pipe = crtc->pipe;
1524 wm->pipe[pipe] = wm_state->wm;
1525 if (crtc->active && wm->cxsr)
1526 wm->sr = wm_state->sr;
1527 if (crtc->active && wm->hpll_en)
1528 wm->hpll = wm_state->hpll;
1532 static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1534 struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
1535 struct g4x_wm_values new_wm = {};
1537 g4x_merge_wm(dev_priv, &new_wm);
1539 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1542 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1543 _intel_set_memory_cxsr(dev_priv, false);
1545 g4x_write_wm_values(dev_priv, &new_wm);
1547 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1548 _intel_set_memory_cxsr(dev_priv, true);
1553 static void g4x_initial_watermarks(struct intel_atomic_state *state,
1554 struct intel_crtc_state *crtc_state)
1556 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1557 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1559 mutex_lock(&dev_priv->wm.wm_mutex);
1560 crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1561 g4x_program_watermarks(dev_priv);
1562 mutex_unlock(&dev_priv->wm.wm_mutex);
1565 static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1566 struct intel_crtc_state *crtc_state)
1568 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1569 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
1571 if (!crtc_state->wm.need_postvbl_update)
1574 mutex_lock(&dev_priv->wm.wm_mutex);
1575 intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1576 g4x_program_watermarks(dev_priv);
1577 mutex_unlock(&dev_priv->wm.wm_mutex);
1580 /* latency must be in 0.1us units. */
1581 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1582 unsigned int htotal,
1585 unsigned int latency)
1589 ret = intel_wm_method2(pixel_rate, htotal,
1590 width, cpp, latency);
1591 ret = DIV_ROUND_UP(ret, 64);
1596 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1598 /* all latencies in usec */
1599 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1601 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1603 if (IS_CHERRYVIEW(dev_priv)) {
1604 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1605 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1607 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1611 static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1612 const struct intel_plane_state *plane_state,
1615 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1616 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1617 const struct drm_display_mode *adjusted_mode =
1618 &crtc_state->base.adjusted_mode;
1619 unsigned int clock, htotal, cpp, width, wm;
1621 if (dev_priv->wm.pri_latency[level] == 0)
1624 if (!intel_wm_plane_visible(crtc_state, plane_state))
1627 cpp = plane_state->base.fb->format->cpp[0];
1628 clock = adjusted_mode->crtc_clock;
1629 htotal = adjusted_mode->crtc_htotal;
1630 width = crtc_state->pipe_src_w;
1632 if (plane->id == PLANE_CURSOR) {
1634 * FIXME the formula gives values that are
1635 * too big for the cursor FIFO, and hence we
1636 * would never be able to use cursors. For
1637 * now just hardcode the watermark.
1641 wm = vlv_wm_method2(clock, htotal, width, cpp,
1642 dev_priv->wm.pri_latency[level] * 10);
1645 return min_t(unsigned int, wm, USHRT_MAX);
1648 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1650 return (active_planes & (BIT(PLANE_SPRITE0) |
1651 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1654 static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1656 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1657 const struct g4x_pipe_wm *raw =
1658 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1659 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1660 unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1661 int num_active_planes = hweight32(active_planes);
1662 const int fifo_size = 511;
1663 int fifo_extra, fifo_left = fifo_size;
1664 int sprite0_fifo_extra = 0;
1665 unsigned int total_rate;
1666 enum plane_id plane_id;
1669 * When enabling sprite0 after sprite1 has already been enabled
1670 * we tend to get an underrun unless sprite0 already has some
1671 * FIFO space allcoated. Hence we always allocate at least one
1672 * cacheline for sprite0 whenever sprite1 is enabled.
1674 * All other plane enable sequences appear immune to this problem.
1676 if (vlv_need_sprite0_fifo_workaround(active_planes))
1677 sprite0_fifo_extra = 1;
1679 total_rate = raw->plane[PLANE_PRIMARY] +
1680 raw->plane[PLANE_SPRITE0] +
1681 raw->plane[PLANE_SPRITE1] +
1684 if (total_rate > fifo_size)
1687 if (total_rate == 0)
1690 for_each_plane_id_on_crtc(crtc, plane_id) {
1693 if ((active_planes & BIT(plane_id)) == 0) {
1694 fifo_state->plane[plane_id] = 0;
1698 rate = raw->plane[plane_id];
1699 fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1700 fifo_left -= fifo_state->plane[plane_id];
1703 fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1704 fifo_left -= sprite0_fifo_extra;
1706 fifo_state->plane[PLANE_CURSOR] = 63;
1708 fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1710 /* spread the remainder evenly */
1711 for_each_plane_id_on_crtc(crtc, plane_id) {
1717 if ((active_planes & BIT(plane_id)) == 0)
1720 plane_extra = min(fifo_extra, fifo_left);
1721 fifo_state->plane[plane_id] += plane_extra;
1722 fifo_left -= plane_extra;
1725 WARN_ON(active_planes != 0 && fifo_left != 0);
1727 /* give it all to the first plane if none are active */
1728 if (active_planes == 0) {
1729 WARN_ON(fifo_left != fifo_size);
1730 fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1736 /* mark all levels starting from 'level' as invalid */
1737 static void vlv_invalidate_wms(struct intel_crtc *crtc,
1738 struct vlv_wm_state *wm_state, int level)
1740 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1742 for (; level < intel_wm_num_levels(dev_priv); level++) {
1743 enum plane_id plane_id;
1745 for_each_plane_id_on_crtc(crtc, plane_id)
1746 wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1748 wm_state->sr[level].cursor = USHRT_MAX;
1749 wm_state->sr[level].plane = USHRT_MAX;
1753 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1758 return fifo_size - wm;
1762 * Starting from 'level' set all higher
1763 * levels to 'value' in the "raw" watermarks.
1765 static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1766 int level, enum plane_id plane_id, u16 value)
1768 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1769 int num_levels = intel_wm_num_levels(dev_priv);
1772 for (; level < num_levels; level++) {
1773 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1775 dirty |= raw->plane[plane_id] != value;
1776 raw->plane[plane_id] = value;
1782 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1783 const struct intel_plane_state *plane_state)
1785 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1786 enum plane_id plane_id = plane->id;
1787 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1791 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1792 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1796 for (level = 0; level < num_levels; level++) {
1797 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1798 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1799 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1804 dirty |= raw->plane[plane_id] != wm;
1805 raw->plane[plane_id] = wm;
1808 /* mark all higher levels as invalid */
1809 dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1813 DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1815 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1816 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1817 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1822 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1823 enum plane_id plane_id, int level)
1825 const struct g4x_pipe_wm *raw =
1826 &crtc_state->wm.vlv.raw[level];
1827 const struct vlv_fifo_state *fifo_state =
1828 &crtc_state->wm.vlv.fifo_state;
1830 return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1833 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1835 return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1836 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1837 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1838 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1841 static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1843 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1844 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1845 struct intel_atomic_state *state =
1846 to_intel_atomic_state(crtc_state->base.state);
1847 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1848 const struct vlv_fifo_state *fifo_state =
1849 &crtc_state->wm.vlv.fifo_state;
1850 int num_active_planes = hweight32(crtc_state->active_planes &
1851 ~BIT(PLANE_CURSOR));
1852 bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
1853 const struct intel_plane_state *old_plane_state;
1854 const struct intel_plane_state *new_plane_state;
1855 struct intel_plane *plane;
1856 enum plane_id plane_id;
1858 unsigned int dirty = 0;
1860 for_each_oldnew_intel_plane_in_state(state, plane,
1862 new_plane_state, i) {
1863 if (new_plane_state->base.crtc != &crtc->base &&
1864 old_plane_state->base.crtc != &crtc->base)
1867 if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
1868 dirty |= BIT(plane->id);
1872 * DSPARB registers may have been reset due to the
1873 * power well being turned off. Make sure we restore
1874 * them to a consistent state even if no primary/sprite
1875 * planes are initially active.
1878 crtc_state->fifo_changed = true;
1883 /* cursor changes don't warrant a FIFO recompute */
1884 if (dirty & ~BIT(PLANE_CURSOR)) {
1885 const struct intel_crtc_state *old_crtc_state =
1886 intel_atomic_get_old_crtc_state(state, crtc);
1887 const struct vlv_fifo_state *old_fifo_state =
1888 &old_crtc_state->wm.vlv.fifo_state;
1890 ret = vlv_compute_fifo(crtc_state);
1894 if (needs_modeset ||
1895 memcmp(old_fifo_state, fifo_state,
1896 sizeof(*fifo_state)) != 0)
1897 crtc_state->fifo_changed = true;
1900 /* initially allow all levels */
1901 wm_state->num_levels = intel_wm_num_levels(dev_priv);
1903 * Note that enabling cxsr with no primary/sprite planes
1904 * enabled can wedge the pipe. Hence we only allow cxsr
1905 * with exactly one enabled primary/sprite plane.
1907 wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1909 for (level = 0; level < wm_state->num_levels; level++) {
1910 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1911 const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
1913 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1916 for_each_plane_id_on_crtc(crtc, plane_id) {
1917 wm_state->wm[level].plane[plane_id] =
1918 vlv_invert_wm_value(raw->plane[plane_id],
1919 fifo_state->plane[plane_id]);
1922 wm_state->sr[level].plane =
1923 vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1924 raw->plane[PLANE_SPRITE0],
1925 raw->plane[PLANE_SPRITE1]),
1928 wm_state->sr[level].cursor =
1929 vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1936 /* limit to only levels we can actually handle */
1937 wm_state->num_levels = level;
1939 /* invalidate the higher levels */
1940 vlv_invalidate_wms(crtc, wm_state, level);
1945 #define VLV_FIFO(plane, value) \
1946 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1948 static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
1949 struct intel_crtc_state *crtc_state)
1951 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1952 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1953 struct intel_uncore *uncore = &dev_priv->uncore;
1954 const struct vlv_fifo_state *fifo_state =
1955 &crtc_state->wm.vlv.fifo_state;
1956 int sprite0_start, sprite1_start, fifo_size;
1958 if (!crtc_state->fifo_changed)
1961 sprite0_start = fifo_state->plane[PLANE_PRIMARY];
1962 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
1963 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
1965 WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
1966 WARN_ON(fifo_size != 511);
1968 trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
1971 * uncore.lock serves a double purpose here. It allows us to
1972 * use the less expensive I915_{READ,WRITE}_FW() functions, and
1973 * it protects the DSPARB registers from getting clobbered by
1974 * parallel updates from multiple pipes.
1976 * intel_pipe_update_start() has already disabled interrupts
1977 * for us, so a plain spin_lock() is sufficient here.
1979 spin_lock(&uncore->lock);
1981 switch (crtc->pipe) {
1982 u32 dsparb, dsparb2, dsparb3;
1984 dsparb = intel_uncore_read_fw(uncore, DSPARB);
1985 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
1987 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1988 VLV_FIFO(SPRITEB, 0xff));
1989 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1990 VLV_FIFO(SPRITEB, sprite1_start));
1992 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1993 VLV_FIFO(SPRITEB_HI, 0x1));
1994 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1995 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1997 intel_uncore_write_fw(uncore, DSPARB, dsparb);
1998 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2001 dsparb = intel_uncore_read_fw(uncore, DSPARB);
2002 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2004 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
2005 VLV_FIFO(SPRITED, 0xff));
2006 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
2007 VLV_FIFO(SPRITED, sprite1_start));
2009 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
2010 VLV_FIFO(SPRITED_HI, 0xff));
2011 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
2012 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
2014 intel_uncore_write_fw(uncore, DSPARB, dsparb);
2015 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2018 dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
2019 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2021 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
2022 VLV_FIFO(SPRITEF, 0xff));
2023 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
2024 VLV_FIFO(SPRITEF, sprite1_start));
2026 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
2027 VLV_FIFO(SPRITEF_HI, 0xff));
2028 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
2029 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
2031 intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
2032 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2038 intel_uncore_posting_read_fw(uncore, DSPARB);
2040 spin_unlock(&uncore->lock);
2045 static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
2047 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
2048 struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
2049 const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
2050 struct intel_atomic_state *intel_state =
2051 to_intel_atomic_state(new_crtc_state->base.state);
2052 const struct intel_crtc_state *old_crtc_state =
2053 intel_atomic_get_old_crtc_state(intel_state, crtc);
2054 const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
2057 if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
2058 *intermediate = *optimal;
2060 intermediate->cxsr = false;
2064 intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2065 intermediate->cxsr = optimal->cxsr && active->cxsr &&
2066 !new_crtc_state->disable_cxsr;
2068 for (level = 0; level < intermediate->num_levels; level++) {
2069 enum plane_id plane_id;
2071 for_each_plane_id_on_crtc(crtc, plane_id) {
2072 intermediate->wm[level].plane[plane_id] =
2073 min(optimal->wm[level].plane[plane_id],
2074 active->wm[level].plane[plane_id]);
2077 intermediate->sr[level].plane = min(optimal->sr[level].plane,
2078 active->sr[level].plane);
2079 intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
2080 active->sr[level].cursor);
2083 vlv_invalidate_wms(crtc, intermediate, level);
2087 * If our intermediate WM are identical to the final WM, then we can
2088 * omit the post-vblank programming; only update if it's different.
2090 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2091 new_crtc_state->wm.need_postvbl_update = true;
2096 static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2097 struct vlv_wm_values *wm)
2099 struct intel_crtc *crtc;
2100 int num_active_crtcs = 0;
2102 wm->level = dev_priv->wm.max_level;
2105 for_each_intel_crtc(&dev_priv->drm, crtc) {
2106 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2111 if (!wm_state->cxsr)
2115 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2118 if (num_active_crtcs != 1)
2121 if (num_active_crtcs > 1)
2122 wm->level = VLV_WM_LEVEL_PM2;
2124 for_each_intel_crtc(&dev_priv->drm, crtc) {
2125 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2126 enum pipe pipe = crtc->pipe;
2128 wm->pipe[pipe] = wm_state->wm[wm->level];
2129 if (crtc->active && wm->cxsr)
2130 wm->sr = wm_state->sr[wm->level];
2132 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2133 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2134 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2135 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2139 static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2141 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
2142 struct vlv_wm_values new_wm = {};
2144 vlv_merge_wm(dev_priv, &new_wm);
2146 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2149 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2150 chv_set_memory_dvfs(dev_priv, false);
2152 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2153 chv_set_memory_pm5(dev_priv, false);
2155 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2156 _intel_set_memory_cxsr(dev_priv, false);
2158 vlv_write_wm_values(dev_priv, &new_wm);
2160 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2161 _intel_set_memory_cxsr(dev_priv, true);
2163 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2164 chv_set_memory_pm5(dev_priv, true);
2166 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2167 chv_set_memory_dvfs(dev_priv, true);
2172 static void vlv_initial_watermarks(struct intel_atomic_state *state,
2173 struct intel_crtc_state *crtc_state)
2175 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2176 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2178 mutex_lock(&dev_priv->wm.wm_mutex);
2179 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2180 vlv_program_watermarks(dev_priv);
2181 mutex_unlock(&dev_priv->wm.wm_mutex);
2184 static void vlv_optimize_watermarks(struct intel_atomic_state *state,
2185 struct intel_crtc_state *crtc_state)
2187 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2188 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2190 if (!crtc_state->wm.need_postvbl_update)
2193 mutex_lock(&dev_priv->wm.wm_mutex);
2194 intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2195 vlv_program_watermarks(dev_priv);
2196 mutex_unlock(&dev_priv->wm.wm_mutex);
2199 static void i965_update_wm(struct intel_crtc *unused_crtc)
2201 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2202 struct intel_crtc *crtc;
2207 /* Calc sr entries for one plane configs */
2208 crtc = single_enabled_crtc(dev_priv);
2210 /* self-refresh has much higher latency */
2211 static const int sr_latency_ns = 12000;
2212 const struct drm_display_mode *adjusted_mode =
2213 &crtc->config->base.adjusted_mode;
2214 const struct drm_framebuffer *fb =
2215 crtc->base.primary->state->fb;
2216 int clock = adjusted_mode->crtc_clock;
2217 int htotal = adjusted_mode->crtc_htotal;
2218 int hdisplay = crtc->config->pipe_src_w;
2219 int cpp = fb->format->cpp[0];
2222 entries = intel_wm_method2(clock, htotal,
2223 hdisplay, cpp, sr_latency_ns / 100);
2224 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2225 srwm = I965_FIFO_SIZE - entries;
2229 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
2232 entries = intel_wm_method2(clock, htotal,
2233 crtc->base.cursor->state->crtc_w, 4,
2234 sr_latency_ns / 100);
2235 entries = DIV_ROUND_UP(entries,
2236 i965_cursor_wm_info.cacheline_size) +
2237 i965_cursor_wm_info.guard_size;
2239 cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2240 if (cursor_sr > i965_cursor_wm_info.max_wm)
2241 cursor_sr = i965_cursor_wm_info.max_wm;
2243 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
2244 "cursor %d\n", srwm, cursor_sr);
2246 cxsr_enabled = true;
2248 cxsr_enabled = false;
2249 /* Turn off self refresh if both pipes are enabled */
2250 intel_set_memory_cxsr(dev_priv, false);
2253 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2256 /* 965 has limitations... */
2257 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
2261 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
2262 FW_WM(8, PLANEC_OLD));
2263 /* update cursor SR watermark */
2264 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2267 intel_set_memory_cxsr(dev_priv, true);
2272 static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2274 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2275 const struct intel_watermark_params *wm_info;
2280 int planea_wm, planeb_wm;
2281 struct intel_crtc *crtc, *enabled = NULL;
2283 if (IS_I945GM(dev_priv))
2284 wm_info = &i945_wm_info;
2285 else if (!IS_GEN(dev_priv, 2))
2286 wm_info = &i915_wm_info;
2288 wm_info = &i830_a_wm_info;
2290 fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A);
2291 crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
2292 if (intel_crtc_active(crtc)) {
2293 const struct drm_display_mode *adjusted_mode =
2294 &crtc->config->base.adjusted_mode;
2295 const struct drm_framebuffer *fb =
2296 crtc->base.primary->state->fb;
2299 if (IS_GEN(dev_priv, 2))
2302 cpp = fb->format->cpp[0];
2304 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2305 wm_info, fifo_size, cpp,
2306 pessimal_latency_ns);
2309 planea_wm = fifo_size - wm_info->guard_size;
2310 if (planea_wm > (long)wm_info->max_wm)
2311 planea_wm = wm_info->max_wm;
2314 if (IS_GEN(dev_priv, 2))
2315 wm_info = &i830_bc_wm_info;
2317 fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
2318 crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
2319 if (intel_crtc_active(crtc)) {
2320 const struct drm_display_mode *adjusted_mode =
2321 &crtc->config->base.adjusted_mode;
2322 const struct drm_framebuffer *fb =
2323 crtc->base.primary->state->fb;
2326 if (IS_GEN(dev_priv, 2))
2329 cpp = fb->format->cpp[0];
2331 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2332 wm_info, fifo_size, cpp,
2333 pessimal_latency_ns);
2334 if (enabled == NULL)
2339 planeb_wm = fifo_size - wm_info->guard_size;
2340 if (planeb_wm > (long)wm_info->max_wm)
2341 planeb_wm = wm_info->max_wm;
2344 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2346 if (IS_I915GM(dev_priv) && enabled) {
2347 struct drm_i915_gem_object *obj;
2349 obj = intel_fb_obj(enabled->base.primary->state->fb);
2351 /* self-refresh seems busted with untiled */
2352 if (!i915_gem_object_is_tiled(obj))
2357 * Overlay gets an aggressive default since video jitter is bad.
2361 /* Play safe and disable self-refresh before adjusting watermarks. */
2362 intel_set_memory_cxsr(dev_priv, false);
2364 /* Calc sr entries for one plane configs */
2365 if (HAS_FW_BLC(dev_priv) && enabled) {
2366 /* self-refresh has much higher latency */
2367 static const int sr_latency_ns = 6000;
2368 const struct drm_display_mode *adjusted_mode =
2369 &enabled->config->base.adjusted_mode;
2370 const struct drm_framebuffer *fb =
2371 enabled->base.primary->state->fb;
2372 int clock = adjusted_mode->crtc_clock;
2373 int htotal = adjusted_mode->crtc_htotal;
2374 int hdisplay = enabled->config->pipe_src_w;
2378 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2381 cpp = fb->format->cpp[0];
2383 entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
2384 sr_latency_ns / 100);
2385 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2386 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
2387 srwm = wm_info->fifo_size - entries;
2391 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2392 I915_WRITE(FW_BLC_SELF,
2393 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2395 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2398 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2399 planea_wm, planeb_wm, cwm, srwm);
2401 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2402 fwater_hi = (cwm & 0x1f);
2404 /* Set request length to 8 cachelines per fetch */
2405 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2406 fwater_hi = fwater_hi | (1 << 8);
2408 I915_WRITE(FW_BLC, fwater_lo);
2409 I915_WRITE(FW_BLC2, fwater_hi);
2412 intel_set_memory_cxsr(dev_priv, true);
2415 static void i845_update_wm(struct intel_crtc *unused_crtc)
2417 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2418 struct intel_crtc *crtc;
2419 const struct drm_display_mode *adjusted_mode;
2423 crtc = single_enabled_crtc(dev_priv);
2427 adjusted_mode = &crtc->config->base.adjusted_mode;
2428 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2430 dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
2431 4, pessimal_latency_ns);
2432 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
2433 fwater_lo |= (3<<8) | planea_wm;
2435 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
2437 I915_WRITE(FW_BLC, fwater_lo);
2440 /* latency must be in 0.1us units. */
2441 static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2443 unsigned int latency)
2447 ret = intel_wm_method1(pixel_rate, cpp, latency);
2448 ret = DIV_ROUND_UP(ret, 64) + 2;
2453 /* latency must be in 0.1us units. */
2454 static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2455 unsigned int htotal,
2458 unsigned int latency)
2462 ret = intel_wm_method2(pixel_rate, htotal,
2463 width, cpp, latency);
2464 ret = DIV_ROUND_UP(ret, 64) + 2;
2469 static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
2472 * Neither of these should be possible since this function shouldn't be
2473 * called if the CRTC is off or the plane is invisible. But let's be
2474 * extra paranoid to avoid a potential divide-by-zero if we screw up
2475 * elsewhere in the driver.
2479 if (WARN_ON(!horiz_pixels))
2482 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2485 struct ilk_wm_maximums {
2493 * For both WM_PIPE and WM_LP.
2494 * mem_value must be in 0.1us units.
2496 static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
2497 const struct intel_plane_state *pstate,
2498 u32 mem_value, bool is_lp)
2500 u32 method1, method2;
2506 if (!intel_wm_plane_visible(cstate, pstate))
2509 cpp = pstate->base.fb->format->cpp[0];
2511 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
2516 method2 = ilk_wm_method2(cstate->pixel_rate,
2517 cstate->base.adjusted_mode.crtc_htotal,
2518 drm_rect_width(&pstate->base.dst),
2521 return min(method1, method2);
2525 * For both WM_PIPE and WM_LP.
2526 * mem_value must be in 0.1us units.
2528 static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
2529 const struct intel_plane_state *pstate,
2532 u32 method1, method2;
2538 if (!intel_wm_plane_visible(cstate, pstate))
2541 cpp = pstate->base.fb->format->cpp[0];
2543 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
2544 method2 = ilk_wm_method2(cstate->pixel_rate,
2545 cstate->base.adjusted_mode.crtc_htotal,
2546 drm_rect_width(&pstate->base.dst),
2548 return min(method1, method2);
2552 * For both WM_PIPE and WM_LP.
2553 * mem_value must be in 0.1us units.
2555 static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
2556 const struct intel_plane_state *pstate,
2564 if (!intel_wm_plane_visible(cstate, pstate))
2567 cpp = pstate->base.fb->format->cpp[0];
2569 return ilk_wm_method2(cstate->pixel_rate,
2570 cstate->base.adjusted_mode.crtc_htotal,
2571 pstate->base.crtc_w, cpp, mem_value);
2574 /* Only for WM_LP. */
2575 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
2576 const struct intel_plane_state *pstate,
2581 if (!intel_wm_plane_visible(cstate, pstate))
2584 cpp = pstate->base.fb->format->cpp[0];
2586 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
2590 ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2592 if (INTEL_GEN(dev_priv) >= 8)
2594 else if (INTEL_GEN(dev_priv) >= 7)
2601 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2602 int level, bool is_sprite)
2604 if (INTEL_GEN(dev_priv) >= 8)
2605 /* BDW primary/sprite plane watermarks */
2606 return level == 0 ? 255 : 2047;
2607 else if (INTEL_GEN(dev_priv) >= 7)
2608 /* IVB/HSW primary/sprite plane watermarks */
2609 return level == 0 ? 127 : 1023;
2610 else if (!is_sprite)
2611 /* ILK/SNB primary plane watermarks */
2612 return level == 0 ? 127 : 511;
2614 /* ILK/SNB sprite plane watermarks */
2615 return level == 0 ? 63 : 255;
2619 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2621 if (INTEL_GEN(dev_priv) >= 7)
2622 return level == 0 ? 63 : 255;
2624 return level == 0 ? 31 : 63;
2627 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2629 if (INTEL_GEN(dev_priv) >= 8)
2635 /* Calculate the maximum primary/sprite plane watermark */
2636 static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
2638 const struct intel_wm_config *config,
2639 enum intel_ddb_partitioning ddb_partitioning,
2642 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2644 /* if sprites aren't enabled, sprites get nothing */
2645 if (is_sprite && !config->sprites_enabled)
2648 /* HSW allows LP1+ watermarks even with multiple pipes */
2649 if (level == 0 || config->num_pipes_active > 1) {
2650 fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
2653 * For some reason the non self refresh
2654 * FIFO size is only half of the self
2655 * refresh FIFO size on ILK/SNB.
2657 if (INTEL_GEN(dev_priv) <= 6)
2661 if (config->sprites_enabled) {
2662 /* level 0 is always calculated with 1:1 split */
2663 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2672 /* clamp to max that the registers can hold */
2673 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2676 /* Calculate the maximum cursor plane watermark */
2677 static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
2679 const struct intel_wm_config *config)
2681 /* HSW LP1+ watermarks w/ multiple pipes */
2682 if (level > 0 && config->num_pipes_active > 1)
2685 /* otherwise just report max that registers can hold */
2686 return ilk_cursor_wm_reg_max(dev_priv, level);
2689 static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
2691 const struct intel_wm_config *config,
2692 enum intel_ddb_partitioning ddb_partitioning,
2693 struct ilk_wm_maximums *max)
2695 max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
2696 max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
2697 max->cur = ilk_cursor_wm_max(dev_priv, level, config);
2698 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2701 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2703 struct ilk_wm_maximums *max)
2705 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2706 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2707 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2708 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2711 static bool ilk_validate_wm_level(int level,
2712 const struct ilk_wm_maximums *max,
2713 struct intel_wm_level *result)
2717 /* already determined to be invalid? */
2718 if (!result->enable)
2721 result->enable = result->pri_val <= max->pri &&
2722 result->spr_val <= max->spr &&
2723 result->cur_val <= max->cur;
2725 ret = result->enable;
2728 * HACK until we can pre-compute everything,
2729 * and thus fail gracefully if LP0 watermarks
2732 if (level == 0 && !result->enable) {
2733 if (result->pri_val > max->pri)
2734 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2735 level, result->pri_val, max->pri);
2736 if (result->spr_val > max->spr)
2737 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2738 level, result->spr_val, max->spr);
2739 if (result->cur_val > max->cur)
2740 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2741 level, result->cur_val, max->cur);
2743 result->pri_val = min_t(u32, result->pri_val, max->pri);
2744 result->spr_val = min_t(u32, result->spr_val, max->spr);
2745 result->cur_val = min_t(u32, result->cur_val, max->cur);
2746 result->enable = true;
2752 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2753 const struct intel_crtc *intel_crtc,
2755 struct intel_crtc_state *cstate,
2756 const struct intel_plane_state *pristate,
2757 const struct intel_plane_state *sprstate,
2758 const struct intel_plane_state *curstate,
2759 struct intel_wm_level *result)
2761 u16 pri_latency = dev_priv->wm.pri_latency[level];
2762 u16 spr_latency = dev_priv->wm.spr_latency[level];
2763 u16 cur_latency = dev_priv->wm.cur_latency[level];
2765 /* WM1+ latency values stored in 0.5us units */
2773 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2774 pri_latency, level);
2775 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2779 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2782 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2784 result->enable = true;
2788 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2790 const struct intel_atomic_state *intel_state =
2791 to_intel_atomic_state(cstate->base.state);
2792 const struct drm_display_mode *adjusted_mode =
2793 &cstate->base.adjusted_mode;
2794 u32 linetime, ips_linetime;
2796 if (!cstate->base.active)
2798 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2800 if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
2803 /* The WM are computed with base on how long it takes to fill a single
2804 * row at the given clock rate, multiplied by 8.
2806 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2807 adjusted_mode->crtc_clock);
2808 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2809 intel_state->cdclk.logical.cdclk);
2811 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2812 PIPE_WM_LINETIME_TIME(linetime);
2815 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2818 struct intel_uncore *uncore = &dev_priv->uncore;
2820 if (INTEL_GEN(dev_priv) >= 9) {
2823 int level, max_level = ilk_wm_max_level(dev_priv);
2825 /* read the first set of memory latencies[0:3] */
2826 val = 0; /* data0 to be programmed to 0 for first set */
2827 ret = sandybridge_pcode_read(dev_priv,
2828 GEN9_PCODE_READ_MEM_LATENCY,
2832 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2836 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2837 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2838 GEN9_MEM_LATENCY_LEVEL_MASK;
2839 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2840 GEN9_MEM_LATENCY_LEVEL_MASK;
2841 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2842 GEN9_MEM_LATENCY_LEVEL_MASK;
2844 /* read the second set of memory latencies[4:7] */
2845 val = 1; /* data0 to be programmed to 1 for second set */
2846 ret = sandybridge_pcode_read(dev_priv,
2847 GEN9_PCODE_READ_MEM_LATENCY,
2850 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2854 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2855 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2856 GEN9_MEM_LATENCY_LEVEL_MASK;
2857 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2858 GEN9_MEM_LATENCY_LEVEL_MASK;
2859 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2860 GEN9_MEM_LATENCY_LEVEL_MASK;
2863 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2864 * need to be disabled. We make sure to sanitize the values out
2865 * of the punit to satisfy this requirement.
2867 for (level = 1; level <= max_level; level++) {
2868 if (wm[level] == 0) {
2869 for (i = level + 1; i <= max_level; i++)
2876 * WaWmMemoryReadLatency:skl+,glk
2878 * punit doesn't take into account the read latency so we need
2879 * to add 2us to the various latency levels we retrieve from the
2880 * punit when level 0 response data us 0us.
2884 for (level = 1; level <= max_level; level++) {
2892 * WA Level-0 adjustment for 16GB DIMMs: SKL+
2893 * If we could not get dimm info enable this WA to prevent from
2894 * any underrun. If not able to get Dimm info assume 16GB dimm
2895 * to avoid any underrun.
2897 if (dev_priv->dram_info.is_16gb_dimm)
2900 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2901 u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
2903 wm[0] = (sskpd >> 56) & 0xFF;
2905 wm[0] = sskpd & 0xF;
2906 wm[1] = (sskpd >> 4) & 0xFF;
2907 wm[2] = (sskpd >> 12) & 0xFF;
2908 wm[3] = (sskpd >> 20) & 0x1FF;
2909 wm[4] = (sskpd >> 32) & 0x1FF;
2910 } else if (INTEL_GEN(dev_priv) >= 6) {
2911 u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
2913 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2914 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2915 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2916 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2917 } else if (INTEL_GEN(dev_priv) >= 5) {
2918 u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
2920 /* ILK primary LP0 latency is 700 ns */
2922 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2923 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2925 MISSING_CASE(INTEL_DEVID(dev_priv));
2929 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2932 /* ILK sprite LP0 latency is 1300 ns */
2933 if (IS_GEN(dev_priv, 5))
2937 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2940 /* ILK cursor LP0 latency is 1300 ns */
2941 if (IS_GEN(dev_priv, 5))
2945 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2947 /* how many WM levels are we expecting */
2948 if (INTEL_GEN(dev_priv) >= 9)
2950 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2952 else if (INTEL_GEN(dev_priv) >= 6)
2958 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2962 int level, max_level = ilk_wm_max_level(dev_priv);
2964 for (level = 0; level <= max_level; level++) {
2965 unsigned int latency = wm[level];
2968 DRM_DEBUG_KMS("%s WM%d latency not provided\n",
2974 * - latencies are in us on gen9.
2975 * - before then, WM1+ latency values are in 0.5us units
2977 if (INTEL_GEN(dev_priv) >= 9)
2982 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2983 name, level, wm[level],
2984 latency / 10, latency % 10);
2988 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2991 int level, max_level = ilk_wm_max_level(dev_priv);
2996 wm[0] = max(wm[0], min);
2997 for (level = 1; level <= max_level; level++)
2998 wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
3003 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3008 * The BIOS provided WM memory latency values are often
3009 * inadequate for high resolution displays. Adjust them.
3011 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
3012 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
3013 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
3018 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
3019 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3020 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3021 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3024 static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3027 * On some SNB machines (Thinkpad X220 Tablet at least)
3028 * LP3 usage can cause vblank interrupts to be lost.
3029 * The DEIIR bit will go high but it looks like the CPU
3030 * never gets interrupted.
3032 * It's not clear whether other interrupt source could
3033 * be affected or if this is somehow limited to vblank
3034 * interrupts only. To play it safe we disable LP3
3035 * watermarks entirely.
3037 if (dev_priv->wm.pri_latency[3] == 0 &&
3038 dev_priv->wm.spr_latency[3] == 0 &&
3039 dev_priv->wm.cur_latency[3] == 0)
3042 dev_priv->wm.pri_latency[3] = 0;
3043 dev_priv->wm.spr_latency[3] = 0;
3044 dev_priv->wm.cur_latency[3] = 0;
3046 DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
3047 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3048 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3049 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3052 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3054 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
3056 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
3057 sizeof(dev_priv->wm.pri_latency));
3058 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
3059 sizeof(dev_priv->wm.pri_latency));
3061 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
3062 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
3064 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3065 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3066 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3068 if (IS_GEN(dev_priv, 6)) {
3069 snb_wm_latency_quirk(dev_priv);
3070 snb_wm_lp3_irq_quirk(dev_priv);
3074 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
3076 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
3077 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3080 static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
3081 struct intel_pipe_wm *pipe_wm)
3083 /* LP0 watermark maximums depend on this pipe alone */
3084 const struct intel_wm_config config = {
3085 .num_pipes_active = 1,
3086 .sprites_enabled = pipe_wm->sprites_enabled,
3087 .sprites_scaled = pipe_wm->sprites_scaled,
3089 struct ilk_wm_maximums max;
3091 /* LP0 watermarks always use 1/2 DDB partitioning */
3092 ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
3094 /* At least LP0 must be valid */
3095 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
3096 DRM_DEBUG_KMS("LP0 watermark invalid\n");
3103 /* Compute new watermarks for the pipe */
3104 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
3106 struct drm_atomic_state *state = cstate->base.state;
3107 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3108 struct intel_pipe_wm *pipe_wm;
3109 struct drm_device *dev = state->dev;
3110 const struct drm_i915_private *dev_priv = to_i915(dev);
3111 struct drm_plane *plane;
3112 const struct drm_plane_state *plane_state;
3113 const struct intel_plane_state *pristate = NULL;
3114 const struct intel_plane_state *sprstate = NULL;
3115 const struct intel_plane_state *curstate = NULL;
3116 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
3117 struct ilk_wm_maximums max;
3119 pipe_wm = &cstate->wm.ilk.optimal;
3121 drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
3122 const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
3124 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
3126 else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
3128 else if (plane->type == DRM_PLANE_TYPE_CURSOR)
3132 pipe_wm->pipe_enabled = cstate->base.active;
3134 pipe_wm->sprites_enabled = sprstate->base.visible;
3135 pipe_wm->sprites_scaled = sprstate->base.visible &&
3136 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
3137 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
3140 usable_level = max_level;
3142 /* ILK/SNB: LP2+ watermarks only w/o sprites */
3143 if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
3146 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
3147 if (pipe_wm->sprites_scaled)
3150 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3151 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
3152 pristate, sprstate, curstate, &pipe_wm->wm[0]);
3154 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3155 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
3157 if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
3160 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
3162 for (level = 1; level <= usable_level; level++) {
3163 struct intel_wm_level *wm = &pipe_wm->wm[level];
3165 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
3166 pristate, sprstate, curstate, wm);
3169 * Disable any watermark level that exceeds the
3170 * register maximums since such watermarks are
3173 if (!ilk_validate_wm_level(level, &max, wm)) {
3174 memset(wm, 0, sizeof(*wm));
3183 * Build a set of 'intermediate' watermark values that satisfy both the old
3184 * state and the new state. These can be programmed to the hardware
3187 static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
3189 struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc);
3190 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3191 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
3192 struct intel_atomic_state *intel_state =
3193 to_intel_atomic_state(newstate->base.state);
3194 const struct intel_crtc_state *oldstate =
3195 intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
3196 const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
3197 int level, max_level = ilk_wm_max_level(dev_priv);
3200 * Start with the final, target watermarks, then combine with the
3201 * currently active watermarks to get values that are safe both before
3202 * and after the vblank.
3204 *a = newstate->wm.ilk.optimal;
3205 if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
3206 intel_state->skip_intermediate_wm)
3209 a->pipe_enabled |= b->pipe_enabled;
3210 a->sprites_enabled |= b->sprites_enabled;
3211 a->sprites_scaled |= b->sprites_scaled;
3213 for (level = 0; level <= max_level; level++) {
3214 struct intel_wm_level *a_wm = &a->wm[level];
3215 const struct intel_wm_level *b_wm = &b->wm[level];
3217 a_wm->enable &= b_wm->enable;
3218 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
3219 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
3220 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
3221 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
3225 * We need to make sure that these merged watermark values are
3226 * actually a valid configuration themselves. If they're not,
3227 * there's no safe way to transition from the old state to
3228 * the new state, so we need to fail the atomic transaction.
3230 if (!ilk_validate_pipe_wm(dev_priv, a))
3234 * If our intermediate WM are identical to the final WM, then we can
3235 * omit the post-vblank programming; only update if it's different.
3237 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
3238 newstate->wm.need_postvbl_update = true;
3244 * Merge the watermarks from all active pipes for a specific level.
3246 static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
3248 struct intel_wm_level *ret_wm)
3250 const struct intel_crtc *intel_crtc;
3252 ret_wm->enable = true;
3254 for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3255 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
3256 const struct intel_wm_level *wm = &active->wm[level];
3258 if (!active->pipe_enabled)
3262 * The watermark values may have been used in the past,
3263 * so we must maintain them in the registers for some
3264 * time even if the level is now disabled.
3267 ret_wm->enable = false;
3269 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3270 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3271 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3272 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3277 * Merge all low power watermarks for all active pipes.
3279 static void ilk_wm_merge(struct drm_i915_private *dev_priv,
3280 const struct intel_wm_config *config,
3281 const struct ilk_wm_maximums *max,
3282 struct intel_pipe_wm *merged)
3284 int level, max_level = ilk_wm_max_level(dev_priv);
3285 int last_enabled_level = max_level;
3287 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
3288 if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
3289 config->num_pipes_active > 1)
3290 last_enabled_level = 0;
3292 /* ILK: FBC WM must be disabled always */
3293 merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
3295 /* merge each WM1+ level */
3296 for (level = 1; level <= max_level; level++) {
3297 struct intel_wm_level *wm = &merged->wm[level];
3299 ilk_merge_wm_level(dev_priv, level, wm);
3301 if (level > last_enabled_level)
3303 else if (!ilk_validate_wm_level(level, max, wm))
3304 /* make sure all following levels get disabled */
3305 last_enabled_level = level - 1;
3308 * The spec says it is preferred to disable
3309 * FBC WMs instead of disabling a WM level.
3311 if (wm->fbc_val > max->fbc) {
3313 merged->fbc_wm_enabled = false;
3318 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
3320 * FIXME this is racy. FBC might get enabled later.
3321 * What we should check here is whether FBC can be
3322 * enabled sometime later.
3324 if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
3325 intel_fbc_is_active(dev_priv)) {
3326 for (level = 2; level <= max_level; level++) {
3327 struct intel_wm_level *wm = &merged->wm[level];
3334 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3336 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
3337 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3340 /* The value we need to program into the WM_LPx latency field */
3341 static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
3344 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3347 return dev_priv->wm.pri_latency[level];
3350 static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
3351 const struct intel_pipe_wm *merged,
3352 enum intel_ddb_partitioning partitioning,
3353 struct ilk_wm_values *results)
3355 struct intel_crtc *intel_crtc;
3358 results->enable_fbc_wm = merged->fbc_wm_enabled;
3359 results->partitioning = partitioning;
3361 /* LP1+ register values */
3362 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3363 const struct intel_wm_level *r;
3365 level = ilk_wm_lp_to_level(wm_lp, merged);
3367 r = &merged->wm[level];
3370 * Maintain the watermark values even if the level is
3371 * disabled. Doing otherwise could cause underruns.
3373 results->wm_lp[wm_lp - 1] =
3374 (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
3375 (r->pri_val << WM1_LP_SR_SHIFT) |
3379 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
3381 if (INTEL_GEN(dev_priv) >= 8)
3382 results->wm_lp[wm_lp - 1] |=
3383 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
3385 results->wm_lp[wm_lp - 1] |=
3386 r->fbc_val << WM1_LP_FBC_SHIFT;
3389 * Always set WM1S_LP_EN when spr_val != 0, even if the
3390 * level is disabled. Doing otherwise could cause underruns.
3392 if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
3393 WARN_ON(wm_lp != 1);
3394 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
3396 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
3399 /* LP0 register values */
3400 for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3401 enum pipe pipe = intel_crtc->pipe;
3402 const struct intel_wm_level *r =
3403 &intel_crtc->wm.active.ilk.wm[0];
3405 if (WARN_ON(!r->enable))
3408 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
3410 results->wm_pipe[pipe] =
3411 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
3412 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
3417 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
3418 * case both are at the same level. Prefer r1 in case they're the same. */
3419 static struct intel_pipe_wm *
3420 ilk_find_best_result(struct drm_i915_private *dev_priv,
3421 struct intel_pipe_wm *r1,
3422 struct intel_pipe_wm *r2)
3424 int level, max_level = ilk_wm_max_level(dev_priv);
3425 int level1 = 0, level2 = 0;
3427 for (level = 1; level <= max_level; level++) {
3428 if (r1->wm[level].enable)
3430 if (r2->wm[level].enable)
3434 if (level1 == level2) {
3435 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3439 } else if (level1 > level2) {
3446 /* dirty bits used to track which watermarks need changes */
3447 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3448 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
3449 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3450 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3451 #define WM_DIRTY_FBC (1 << 24)
3452 #define WM_DIRTY_DDB (1 << 25)
3454 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3455 const struct ilk_wm_values *old,
3456 const struct ilk_wm_values *new)
3458 unsigned int dirty = 0;
3462 for_each_pipe(dev_priv, pipe) {
3463 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
3464 dirty |= WM_DIRTY_LINETIME(pipe);
3465 /* Must disable LP1+ watermarks too */
3466 dirty |= WM_DIRTY_LP_ALL;
3469 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3470 dirty |= WM_DIRTY_PIPE(pipe);
3471 /* Must disable LP1+ watermarks too */
3472 dirty |= WM_DIRTY_LP_ALL;
3476 if (old->enable_fbc_wm != new->enable_fbc_wm) {
3477 dirty |= WM_DIRTY_FBC;
3478 /* Must disable LP1+ watermarks too */
3479 dirty |= WM_DIRTY_LP_ALL;
3482 if (old->partitioning != new->partitioning) {
3483 dirty |= WM_DIRTY_DDB;
3484 /* Must disable LP1+ watermarks too */
3485 dirty |= WM_DIRTY_LP_ALL;
3488 /* LP1+ watermarks already deemed dirty, no need to continue */
3489 if (dirty & WM_DIRTY_LP_ALL)
3492 /* Find the lowest numbered LP1+ watermark in need of an update... */
3493 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3494 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3495 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3499 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
3500 for (; wm_lp <= 3; wm_lp++)
3501 dirty |= WM_DIRTY_LP(wm_lp);
3506 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3509 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3510 bool changed = false;
3512 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
3513 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
3514 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
3517 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
3518 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
3519 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
3522 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
3523 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
3524 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
3529 * Don't touch WM1S_LP_EN here.
3530 * Doing so could cause underruns.
3537 * The spec says we shouldn't write when we don't need, because every write
3538 * causes WMs to be re-evaluated, expending some power.
3540 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3541 struct ilk_wm_values *results)
3543 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3547 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3551 _ilk_disable_lp_wm(dev_priv, dirty);
3553 if (dirty & WM_DIRTY_PIPE(PIPE_A))
3554 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
3555 if (dirty & WM_DIRTY_PIPE(PIPE_B))
3556 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
3557 if (dirty & WM_DIRTY_PIPE(PIPE_C))
3558 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
3560 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
3561 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
3562 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
3563 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
3564 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
3565 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
3567 if (dirty & WM_DIRTY_DDB) {
3568 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3569 val = I915_READ(WM_MISC);
3570 if (results->partitioning == INTEL_DDB_PART_1_2)
3571 val &= ~WM_MISC_DATA_PARTITION_5_6;
3573 val |= WM_MISC_DATA_PARTITION_5_6;
3574 I915_WRITE(WM_MISC, val);
3576 val = I915_READ(DISP_ARB_CTL2);
3577 if (results->partitioning == INTEL_DDB_PART_1_2)
3578 val &= ~DISP_DATA_PARTITION_5_6;
3580 val |= DISP_DATA_PARTITION_5_6;
3581 I915_WRITE(DISP_ARB_CTL2, val);
3585 if (dirty & WM_DIRTY_FBC) {
3586 val = I915_READ(DISP_ARB_CTL);
3587 if (results->enable_fbc_wm)
3588 val &= ~DISP_FBC_WM_DIS;
3590 val |= DISP_FBC_WM_DIS;
3591 I915_WRITE(DISP_ARB_CTL, val);
3594 if (dirty & WM_DIRTY_LP(1) &&
3595 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3596 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
3598 if (INTEL_GEN(dev_priv) >= 7) {
3599 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3600 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
3601 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3602 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
3605 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3606 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
3607 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3608 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
3609 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3610 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
3612 dev_priv->wm.hw = *results;
3615 bool ilk_disable_lp_wm(struct drm_device *dev)
3617 struct drm_i915_private *dev_priv = to_i915(dev);
3619 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3622 static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
3626 /* Slice 1 will always be enabled */
3629 /* Gen prior to GEN11 have only one DBuf slice */
3630 if (INTEL_GEN(dev_priv) < 11)
3631 return enabled_slices;
3634 * FIXME: for now we'll only ever use 1 slice; pretend that we have
3635 * only that 1 slice enabled until we have a proper way for on-demand
3636 * toggling of the second slice.
3638 if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
3641 return enabled_slices;
3645 * FIXME: We still don't have the proper code detect if we need to apply the WA,
3646 * so assume we'll always need it in order to avoid underruns.
3648 static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
3650 return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv);
3654 intel_has_sagv(struct drm_i915_private *dev_priv)
3656 return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
3657 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
3661 * SAGV dynamically adjusts the system agent voltage and clock frequencies
3662 * depending on power and performance requirements. The display engine access
3663 * to system memory is blocked during the adjustment time. Because of the
3664 * blocking time, having this enabled can cause full system hangs and/or pipe
3665 * underruns if we don't meet all of the following requirements:
3667 * - <= 1 pipe enabled
3668 * - All planes can enable watermarks for latencies >= SAGV engine block time
3669 * - We're not using an interlaced display configuration
3672 intel_enable_sagv(struct drm_i915_private *dev_priv)
3676 if (!intel_has_sagv(dev_priv))
3679 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3682 DRM_DEBUG_KMS("Enabling SAGV\n");
3683 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3686 /* We don't need to wait for SAGV when enabling */
3689 * Some skl systems, pre-release machines in particular,
3690 * don't actually have SAGV.
3692 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3693 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3694 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3696 } else if (ret < 0) {
3697 DRM_ERROR("Failed to enable SAGV\n");
3701 dev_priv->sagv_status = I915_SAGV_ENABLED;
3706 intel_disable_sagv(struct drm_i915_private *dev_priv)
3710 if (!intel_has_sagv(dev_priv))
3713 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3716 DRM_DEBUG_KMS("Disabling SAGV\n");
3717 /* bspec says to keep retrying for at least 1 ms */
3718 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3720 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
3723 * Some skl systems, pre-release machines in particular,
3724 * don't actually have SAGV.
3726 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3727 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3728 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3730 } else if (ret < 0) {
3731 DRM_ERROR("Failed to disable SAGV (%d)\n", ret);
3735 dev_priv->sagv_status = I915_SAGV_DISABLED;
3739 bool intel_can_enable_sagv(struct drm_atomic_state *state)
3741 struct drm_device *dev = state->dev;
3742 struct drm_i915_private *dev_priv = to_i915(dev);
3743 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3744 struct intel_crtc *crtc;
3745 struct intel_plane *plane;
3746 struct intel_crtc_state *cstate;
3749 int sagv_block_time_us;
3751 if (!intel_has_sagv(dev_priv))
3754 if (IS_GEN(dev_priv, 9))
3755 sagv_block_time_us = 30;
3756 else if (IS_GEN(dev_priv, 10))
3757 sagv_block_time_us = 20;
3759 sagv_block_time_us = 10;
3762 * If there are no active CRTCs, no additional checks need be performed
3764 if (hweight32(intel_state->active_crtcs) == 0)
3768 * SKL+ workaround: bspec recommends we disable SAGV when we have
3769 * more then one pipe enabled
3771 if (hweight32(intel_state->active_crtcs) > 1)
3774 /* Since we're now guaranteed to only have one active CRTC... */
3775 pipe = ffs(intel_state->active_crtcs) - 1;
3776 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3777 cstate = to_intel_crtc_state(crtc->base.state);
3779 if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3782 for_each_intel_plane_on_crtc(dev, crtc, plane) {
3783 struct skl_plane_wm *wm =
3784 &cstate->wm.skl.optimal.planes[plane->id];
3786 /* Skip this plane if it's not enabled */
3787 if (!wm->wm[0].plane_en)
3790 /* Find the highest enabled wm level for this plane */
3791 for (level = ilk_wm_max_level(dev_priv);
3792 !wm->wm[level].plane_en; --level)
3795 latency = dev_priv->wm.skl_latency[level];
3797 if (skl_needs_memory_bw_wa(dev_priv) &&
3798 plane->base.state->fb->modifier ==
3799 I915_FORMAT_MOD_X_TILED)
3803 * If any of the planes on this pipe don't enable wm levels that
3804 * incur memory latencies higher than sagv_block_time_us we
3805 * can't enable SAGV.
3807 if (latency < sagv_block_time_us)
3814 static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
3815 const struct intel_crtc_state *cstate,
3816 const u64 total_data_rate,
3817 const int num_active,
3818 struct skl_ddb_allocation *ddb)
3820 const struct drm_display_mode *adjusted_mode;
3822 u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3824 WARN_ON(ddb_size == 0);
3826 if (INTEL_GEN(dev_priv) < 11)
3827 return ddb_size - 4; /* 4 blocks for bypass path allocation */
3829 adjusted_mode = &cstate->base.adjusted_mode;
3830 total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
3833 * 12GB/s is maximum BW supported by single DBuf slice.
3835 * FIXME dbuf slice code is broken:
3836 * - must wait for planes to stop using the slice before powering it off
3837 * - plane straddling both slices is illegal in multi-pipe scenarios
3838 * - should validate we stay within the hw bandwidth limits
3840 if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
3841 ddb->enabled_slices = 2;
3843 ddb->enabled_slices = 1;
3851 skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
3852 const struct intel_crtc_state *cstate,
3853 const u64 total_data_rate,
3854 struct skl_ddb_allocation *ddb,
3855 struct skl_ddb_entry *alloc, /* out */
3856 int *num_active /* out */)
3858 struct drm_atomic_state *state = cstate->base.state;
3859 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3860 struct drm_crtc *for_crtc = cstate->base.crtc;
3861 const struct drm_crtc_state *crtc_state;
3862 const struct drm_crtc *crtc;
3863 u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
3864 enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
3868 if (WARN_ON(!state) || !cstate->base.active) {
3871 *num_active = hweight32(dev_priv->active_crtcs);
3875 if (intel_state->active_pipe_changes)
3876 *num_active = hweight32(intel_state->active_crtcs);
3878 *num_active = hweight32(dev_priv->active_crtcs);
3880 ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate,
3884 * If the state doesn't change the active CRTC's or there is no
3885 * modeset request, then there's no need to recalculate;
3886 * the existing pipe allocation limits should remain unchanged.
3887 * Note that we're safe from racing commits since any racing commit
3888 * that changes the active CRTC list or do modeset would need to
3889 * grab _all_ crtc locks, including the one we currently hold.
3891 if (!intel_state->active_pipe_changes && !intel_state->modeset) {
3893 * alloc may be cleared by clear_intel_crtc_state,
3894 * copy from old state to be sure
3896 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
3901 * Watermark/ddb requirement highly depends upon width of the
3902 * framebuffer, So instead of allocating DDB equally among pipes
3903 * distribute DDB based on resolution/width of the display.
3905 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3906 const struct drm_display_mode *adjusted_mode;
3907 int hdisplay, vdisplay;
3910 if (!crtc_state->enable)
3913 pipe = to_intel_crtc(crtc)->pipe;
3914 adjusted_mode = &crtc_state->adjusted_mode;
3915 drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
3916 total_width += hdisplay;
3918 if (pipe < for_pipe)
3919 width_before_pipe += hdisplay;
3920 else if (pipe == for_pipe)
3921 pipe_width = hdisplay;
3924 alloc->start = ddb_size * width_before_pipe / total_width;
3925 alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
3928 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
3929 int width, const struct drm_format_info *format,
3930 u64 modifier, unsigned int rotation,
3931 u32 plane_pixel_rate, struct skl_wm_params *wp,
3933 static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
3935 const struct skl_wm_params *wp,
3936 const struct skl_wm_level *result_prev,
3937 struct skl_wm_level *result /* out */);
3940 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
3943 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3944 int level, max_level = ilk_wm_max_level(dev_priv);
3945 struct skl_wm_level wm = {};
3946 int ret, min_ddb_alloc = 0;
3947 struct skl_wm_params wp;
3949 ret = skl_compute_wm_params(crtc_state, 256,
3950 drm_format_info(DRM_FORMAT_ARGB8888),
3951 DRM_FORMAT_MOD_LINEAR,
3953 crtc_state->pixel_rate, &wp, 0);
3956 for (level = 0; level <= max_level; level++) {
3957 skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
3958 if (wm.min_ddb_alloc == U16_MAX)
3961 min_ddb_alloc = wm.min_ddb_alloc;
3964 return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
3967 static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
3968 struct skl_ddb_entry *entry, u32 reg)
3971 entry->start = reg & DDB_ENTRY_MASK;
3972 entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
3979 skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
3980 const enum pipe pipe,
3981 const enum plane_id plane_id,
3982 struct skl_ddb_entry *ddb_y,
3983 struct skl_ddb_entry *ddb_uv)
3988 /* Cursor doesn't support NV12/planar, so no extra calculation needed */
3989 if (plane_id == PLANE_CURSOR) {
3990 val = I915_READ(CUR_BUF_CFG(pipe));
3991 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
3995 val = I915_READ(PLANE_CTL(pipe, plane_id));
3997 /* No DDB allocated for disabled planes */
3998 if (val & PLANE_CTL_ENABLE)
3999 fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
4000 val & PLANE_CTL_ORDER_RGBX,
4001 val & PLANE_CTL_ALPHA_MASK);
4003 if (INTEL_GEN(dev_priv) >= 11) {
4004 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
4005 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4007 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
4008 val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
4010 if (is_planar_yuv_format(fourcc))
4013 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4014 skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
4018 void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
4019 struct skl_ddb_entry *ddb_y,
4020 struct skl_ddb_entry *ddb_uv)
4022 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4023 enum intel_display_power_domain power_domain;
4024 enum pipe pipe = crtc->pipe;
4025 intel_wakeref_t wakeref;
4026 enum plane_id plane_id;
4028 power_domain = POWER_DOMAIN_PIPE(pipe);
4029 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4033 for_each_plane_id_on_crtc(crtc, plane_id)
4034 skl_ddb_get_hw_plane_state(dev_priv, pipe,
4039 intel_display_power_put(dev_priv, power_domain, wakeref);
4042 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
4043 struct skl_ddb_allocation *ddb /* out */)
4045 ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
4049 * Determines the downscale amount of a plane for the purposes of watermark calculations.
4050 * The bspec defines downscale amount as:
4053 * Horizontal down scale amount = maximum[1, Horizontal source size /
4054 * Horizontal destination size]
4055 * Vertical down scale amount = maximum[1, Vertical source size /
4056 * Vertical destination size]
4057 * Total down scale amount = Horizontal down scale amount *
4058 * Vertical down scale amount
4061 * Return value is provided in 16.16 fixed point form to retain fractional part.
4062 * Caller should take care of dividing & rounding off the value.
4064 static uint_fixed_16_16_t
4065 skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
4066 const struct intel_plane_state *pstate)
4068 struct intel_plane *plane = to_intel_plane(pstate->base.plane);
4069 u32 src_w, src_h, dst_w, dst_h;
4070 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
4071 uint_fixed_16_16_t downscale_h, downscale_w;
4073 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
4074 return u32_to_fixed16(0);
4076 /* n.b., src is 16.16 fixed point, dst is whole integer */
4077 if (plane->id == PLANE_CURSOR) {
4079 * Cursors only support 0/180 degree rotation,
4080 * hence no need to account for rotation here.
4082 src_w = pstate->base.src_w >> 16;
4083 src_h = pstate->base.src_h >> 16;
4084 dst_w = pstate->base.crtc_w;
4085 dst_h = pstate->base.crtc_h;
4088 * Src coordinates are already rotated by 270 degrees for
4089 * the 90/270 degree plane rotation cases (to match the
4090 * GTT mapping), hence no need to account for rotation here.
4092 src_w = drm_rect_width(&pstate->base.src) >> 16;
4093 src_h = drm_rect_height(&pstate->base.src) >> 16;
4094 dst_w = drm_rect_width(&pstate->base.dst);
4095 dst_h = drm_rect_height(&pstate->base.dst);
4098 fp_w_ratio = div_fixed16(src_w, dst_w);
4099 fp_h_ratio = div_fixed16(src_h, dst_h);
4100 downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
4101 downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
4103 return mul_fixed16(downscale_w, downscale_h);
4106 static uint_fixed_16_16_t
4107 skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
4109 uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1);
4111 if (!crtc_state->base.enable)
4112 return pipe_downscale;
4114 if (crtc_state->pch_pfit.enabled) {
4115 u32 src_w, src_h, dst_w, dst_h;
4116 u32 pfit_size = crtc_state->pch_pfit.size;
4117 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
4118 uint_fixed_16_16_t downscale_h, downscale_w;
4120 src_w = crtc_state->pipe_src_w;
4121 src_h = crtc_state->pipe_src_h;
4122 dst_w = pfit_size >> 16;
4123 dst_h = pfit_size & 0xffff;
4125 if (!dst_w || !dst_h)
4126 return pipe_downscale;
4128 fp_w_ratio = div_fixed16(src_w, dst_w);
4129 fp_h_ratio = div_fixed16(src_h, dst_h);
4130 downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
4131 downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
4133 pipe_downscale = mul_fixed16(downscale_w, downscale_h);
4136 return pipe_downscale;
4139 int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
4140 struct intel_crtc_state *cstate)
4142 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4143 struct drm_crtc_state *crtc_state = &cstate->base;
4144 struct drm_atomic_state *state = crtc_state->state;
4145 struct drm_plane *plane;
4146 const struct drm_plane_state *pstate;
4147 struct intel_plane_state *intel_pstate;
4148 int crtc_clock, dotclk;
4149 u32 pipe_max_pixel_rate;
4150 uint_fixed_16_16_t pipe_downscale;
4151 uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
4153 if (!cstate->base.enable)
4156 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
4157 uint_fixed_16_16_t plane_downscale;
4158 uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
4161 if (!intel_wm_plane_visible(cstate,
4162 to_intel_plane_state(pstate)))
4165 if (WARN_ON(!pstate->fb))
4168 intel_pstate = to_intel_plane_state(pstate);
4169 plane_downscale = skl_plane_downscale_amount(cstate,
4171 bpp = pstate->fb->format->cpp[0] * 8;
4173 plane_downscale = mul_fixed16(plane_downscale,
4176 max_downscale = max_fixed16(plane_downscale, max_downscale);
4178 pipe_downscale = skl_pipe_downscale_amount(cstate);
4180 pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
4182 crtc_clock = crtc_state->adjusted_mode.crtc_clock;
4183 dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
4185 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
4188 pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
4190 if (pipe_max_pixel_rate < crtc_clock) {
4191 DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n");
4199 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
4200 const struct intel_plane_state *intel_pstate,
4203 struct intel_plane *intel_plane =
4204 to_intel_plane(intel_pstate->base.plane);
4206 u32 width = 0, height = 0;
4207 struct drm_framebuffer *fb;
4209 uint_fixed_16_16_t down_scale_amount;
4212 if (!intel_pstate->base.visible)
4215 fb = intel_pstate->base.fb;
4216 format = fb->format->format;
4218 if (intel_plane->id == PLANE_CURSOR)
4220 if (plane == 1 && !is_planar_yuv_format(format))
4224 * Src coordinates are already rotated by 270 degrees for
4225 * the 90/270 degree plane rotation cases (to match the
4226 * GTT mapping), hence no need to account for rotation here.
4228 width = drm_rect_width(&intel_pstate->base.src) >> 16;
4229 height = drm_rect_height(&intel_pstate->base.src) >> 16;
4231 /* UV plane does 1/2 pixel sub-sampling */
4232 if (plane == 1 && is_planar_yuv_format(format)) {
4237 data_rate = width * height;
4239 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
4241 rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4243 rate *= fb->format->cpp[plane];
4248 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4249 u64 *plane_data_rate,
4250 u64 *uv_plane_data_rate)
4252 struct drm_crtc_state *cstate = &intel_cstate->base;
4253 struct drm_atomic_state *state = cstate->state;
4254 struct drm_plane *plane;
4255 const struct drm_plane_state *pstate;
4256 u64 total_data_rate = 0;
4258 if (WARN_ON(!state))
4261 /* Calculate and cache data rate for each plane */
4262 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
4263 enum plane_id plane_id = to_intel_plane(plane)->id;
4265 const struct intel_plane_state *intel_pstate =
4266 to_intel_plane_state(pstate);
4269 rate = skl_plane_relative_data_rate(intel_cstate,
4271 plane_data_rate[plane_id] = rate;
4272 total_data_rate += rate;
4275 rate = skl_plane_relative_data_rate(intel_cstate,
4277 uv_plane_data_rate[plane_id] = rate;
4278 total_data_rate += rate;
4281 return total_data_rate;
4285 icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4286 u64 *plane_data_rate)
4288 struct drm_crtc_state *cstate = &intel_cstate->base;
4289 struct drm_atomic_state *state = cstate->state;
4290 struct drm_plane *plane;
4291 const struct drm_plane_state *pstate;
4292 u64 total_data_rate = 0;
4294 if (WARN_ON(!state))
4297 /* Calculate and cache data rate for each plane */
4298 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
4299 const struct intel_plane_state *intel_pstate =
4300 to_intel_plane_state(pstate);
4301 enum plane_id plane_id = to_intel_plane(plane)->id;
4304 if (!intel_pstate->linked_plane) {
4305 rate = skl_plane_relative_data_rate(intel_cstate,
4307 plane_data_rate[plane_id] = rate;
4308 total_data_rate += rate;
4310 enum plane_id y_plane_id;
4313 * The slave plane might not iterate in
4314 * drm_atomic_crtc_state_for_each_plane_state(),
4315 * and needs the master plane state which may be
4316 * NULL if we try get_new_plane_state(), so we
4317 * always calculate from the master.
4319 if (intel_pstate->slave)
4322 /* Y plane rate is calculated on the slave */
4323 rate = skl_plane_relative_data_rate(intel_cstate,
4325 y_plane_id = intel_pstate->linked_plane->id;
4326 plane_data_rate[y_plane_id] = rate;
4327 total_data_rate += rate;
4329 rate = skl_plane_relative_data_rate(intel_cstate,
4331 plane_data_rate[plane_id] = rate;
4332 total_data_rate += rate;
4336 return total_data_rate;
4340 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4341 struct skl_ddb_allocation *ddb /* out */)
4343 struct drm_atomic_state *state = cstate->base.state;
4344 struct drm_crtc *crtc = cstate->base.crtc;
4345 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4346 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4347 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
4348 u16 alloc_size, start = 0;
4349 u16 total[I915_MAX_PLANES] = {};
4350 u16 uv_total[I915_MAX_PLANES] = {};
4351 u64 total_data_rate;
4352 enum plane_id plane_id;
4354 u64 plane_data_rate[I915_MAX_PLANES] = {};
4355 u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
4359 /* Clear the partitioning for disabled planes. */
4360 memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
4361 memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
4363 if (WARN_ON(!state))
4366 if (!cstate->base.active) {
4367 alloc->start = alloc->end = 0;
4371 if (INTEL_GEN(dev_priv) >= 11)
4373 icl_get_total_relative_data_rate(cstate,
4377 skl_get_total_relative_data_rate(cstate,
4379 uv_plane_data_rate);
4382 skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
4383 ddb, alloc, &num_active);
4384 alloc_size = skl_ddb_entry_size(alloc);
4385 if (alloc_size == 0)
4388 /* Allocate fixed number of blocks for cursor. */
4389 total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active);
4390 alloc_size -= total[PLANE_CURSOR];
4391 cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
4392 alloc->end - total[PLANE_CURSOR];
4393 cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
4395 if (total_data_rate == 0)
4399 * Find the highest watermark level for which we can satisfy the block
4400 * requirement of active planes.
4402 for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
4404 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4405 const struct skl_plane_wm *wm =
4406 &cstate->wm.skl.optimal.planes[plane_id];
4408 if (plane_id == PLANE_CURSOR) {
4409 if (WARN_ON(wm->wm[level].min_ddb_alloc >
4410 total[PLANE_CURSOR])) {
4417 blocks += wm->wm[level].min_ddb_alloc;
4418 blocks += wm->uv_wm[level].min_ddb_alloc;
4421 if (blocks <= alloc_size) {
4422 alloc_size -= blocks;
4428 DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
4429 DRM_DEBUG_KMS("minimum required %d/%d\n", blocks,
4435 * Grant each plane the blocks it requires at the highest achievable
4436 * watermark level, plus an extra share of the leftover blocks
4437 * proportional to its relative data rate.
4439 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4440 const struct skl_plane_wm *wm =
4441 &cstate->wm.skl.optimal.planes[plane_id];
4445 if (plane_id == PLANE_CURSOR)
4449 * We've accounted for all active planes; remaining planes are
4452 if (total_data_rate == 0)
4455 rate = plane_data_rate[plane_id];
4456 extra = min_t(u16, alloc_size,
4457 DIV64_U64_ROUND_UP(alloc_size * rate,
4459 total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
4460 alloc_size -= extra;
4461 total_data_rate -= rate;
4463 if (total_data_rate == 0)
4466 rate = uv_plane_data_rate[plane_id];
4467 extra = min_t(u16, alloc_size,
4468 DIV64_U64_ROUND_UP(alloc_size * rate,
4470 uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
4471 alloc_size -= extra;
4472 total_data_rate -= rate;
4474 WARN_ON(alloc_size != 0 || total_data_rate != 0);
4476 /* Set the actual DDB start/end points for each plane */
4477 start = alloc->start;
4478 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4479 struct skl_ddb_entry *plane_alloc =
4480 &cstate->wm.skl.plane_ddb_y[plane_id];
4481 struct skl_ddb_entry *uv_plane_alloc =
4482 &cstate->wm.skl.plane_ddb_uv[plane_id];
4484 if (plane_id == PLANE_CURSOR)
4487 /* Gen11+ uses a separate plane for UV watermarks */
4488 WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
4490 /* Leave disabled planes at (0,0) */
4491 if (total[plane_id]) {
4492 plane_alloc->start = start;
4493 start += total[plane_id];
4494 plane_alloc->end = start;
4497 if (uv_total[plane_id]) {
4498 uv_plane_alloc->start = start;
4499 start += uv_total[plane_id];
4500 uv_plane_alloc->end = start;
4505 * When we calculated watermark values we didn't know how high
4506 * of a level we'd actually be able to hit, so we just marked
4507 * all levels as "enabled." Go back now and disable the ones
4508 * that aren't actually possible.
4510 for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
4511 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4512 struct skl_plane_wm *wm =
4513 &cstate->wm.skl.optimal.planes[plane_id];
4516 * We only disable the watermarks for each plane if
4517 * they exceed the ddb allocation of said plane. This
4518 * is done so that we don't end up touching cursor
4519 * watermarks needlessly when some other plane reduces
4520 * our max possible watermark level.
4522 * Bspec has this to say about the PLANE_WM enable bit:
4523 * "All the watermarks at this level for all enabled
4524 * planes must be enabled before the level will be used."
4525 * So this is actually safe to do.
4527 if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
4528 wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
4529 memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
4532 * Wa_1408961008:icl, ehl
4533 * Underruns with WM1+ disabled
4535 if (IS_GEN(dev_priv, 11) &&
4536 level == 1 && wm->wm[0].plane_en) {
4537 wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
4538 wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
4539 wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
4545 * Go back and disable the transition watermark if it turns out we
4546 * don't have enough DDB blocks for it.
4548 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4549 struct skl_plane_wm *wm =
4550 &cstate->wm.skl.optimal.planes[plane_id];
4552 if (wm->trans_wm.plane_res_b >= total[plane_id])
4553 memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
4560 * The max latency should be 257 (max the punit can code is 255 and we add 2us
4561 * for the read latency) and cpp should always be <= 8, so that
4562 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
4563 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
4565 static uint_fixed_16_16_t
4566 skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
4567 u8 cpp, u32 latency, u32 dbuf_block_size)
4569 u32 wm_intermediate_val;
4570 uint_fixed_16_16_t ret;
4573 return FP_16_16_MAX;
4575 wm_intermediate_val = latency * pixel_rate * cpp;
4576 ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
4578 if (INTEL_GEN(dev_priv) >= 10)
4579 ret = add_fixed16_u32(ret, 1);
4584 static uint_fixed_16_16_t
4585 skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
4586 uint_fixed_16_16_t plane_blocks_per_line)
4588 u32 wm_intermediate_val;
4589 uint_fixed_16_16_t ret;
4592 return FP_16_16_MAX;
4594 wm_intermediate_val = latency * pixel_rate;
4595 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
4596 pipe_htotal * 1000);
4597 ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
4601 static uint_fixed_16_16_t
4602 intel_get_linetime_us(const struct intel_crtc_state *cstate)
4606 uint_fixed_16_16_t linetime_us;
4608 if (!cstate->base.active)
4609 return u32_to_fixed16(0);
4611 pixel_rate = cstate->pixel_rate;
4613 if (WARN_ON(pixel_rate == 0))
4614 return u32_to_fixed16(0);
4616 crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
4617 linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
4623 skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
4624 const struct intel_plane_state *pstate)
4626 u64 adjusted_pixel_rate;
4627 uint_fixed_16_16_t downscale_amount;
4629 /* Shouldn't reach here on disabled planes... */
4630 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
4634 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
4635 * with additional adjustments for plane-specific scaling.
4637 adjusted_pixel_rate = cstate->pixel_rate;
4638 downscale_amount = skl_plane_downscale_amount(cstate, pstate);
4640 return mul_round_up_u32_fixed16(adjusted_pixel_rate,
4645 skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
4646 int width, const struct drm_format_info *format,
4647 u64 modifier, unsigned int rotation,
4648 u32 plane_pixel_rate, struct skl_wm_params *wp,
4651 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4652 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4655 /* only planar format has two planes */
4656 if (color_plane == 1 && !is_planar_yuv_format(format->format)) {
4657 DRM_DEBUG_KMS("Non planar format have single plane\n");
4661 wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
4662 modifier == I915_FORMAT_MOD_Yf_TILED ||
4663 modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4664 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4665 wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
4666 wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4667 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4668 wp->is_planar = is_planar_yuv_format(format->format);
4671 if (color_plane == 1 && wp->is_planar)
4674 wp->cpp = format->cpp[color_plane];
4675 wp->plane_pixel_rate = plane_pixel_rate;
4677 if (INTEL_GEN(dev_priv) >= 11 &&
4678 modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
4679 wp->dbuf_block_size = 256;
4681 wp->dbuf_block_size = 512;
4683 if (drm_rotation_90_or_270(rotation)) {
4686 wp->y_min_scanlines = 16;
4689 wp->y_min_scanlines = 8;
4692 wp->y_min_scanlines = 4;
4695 MISSING_CASE(wp->cpp);
4699 wp->y_min_scanlines = 4;
4702 if (skl_needs_memory_bw_wa(dev_priv))
4703 wp->y_min_scanlines *= 2;
4705 wp->plane_bytes_per_line = wp->width * wp->cpp;
4707 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
4708 wp->y_min_scanlines,
4709 wp->dbuf_block_size);
4711 if (INTEL_GEN(dev_priv) >= 10)
4714 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
4715 wp->y_min_scanlines);
4716 } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
4717 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
4718 wp->dbuf_block_size);
4719 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4721 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
4722 wp->dbuf_block_size) + 1;
4723 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4726 wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
4727 wp->plane_blocks_per_line);
4729 wp->linetime_us = fixed16_to_u32_round_up(
4730 intel_get_linetime_us(crtc_state));
4736 skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
4737 const struct intel_plane_state *plane_state,
4738 struct skl_wm_params *wp, int color_plane)
4740 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
4741 const struct drm_framebuffer *fb = plane_state->base.fb;
4744 if (plane->id == PLANE_CURSOR) {
4745 width = plane_state->base.crtc_w;
4748 * Src coordinates are already rotated by 270 degrees for
4749 * the 90/270 degree plane rotation cases (to match the
4750 * GTT mapping), hence no need to account for rotation here.
4752 width = drm_rect_width(&plane_state->base.src) >> 16;
4755 return skl_compute_wm_params(crtc_state, width,
4756 fb->format, fb->modifier,
4757 plane_state->base.rotation,
4758 skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
4762 static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
4764 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4767 /* The number of lines are ignored for the level 0 watermark. */
4771 static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
4773 const struct skl_wm_params *wp,
4774 const struct skl_wm_level *result_prev,
4775 struct skl_wm_level *result /* out */)
4777 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4778 u32 latency = dev_priv->wm.skl_latency[level];
4779 uint_fixed_16_16_t method1, method2;
4780 uint_fixed_16_16_t selected_result;
4781 u32 res_blocks, res_lines, min_ddb_alloc = 0;
4785 result->min_ddb_alloc = U16_MAX;
4790 * WaIncreaseLatencyIPCEnabled: kbl,cfl
4791 * Display WA #1141: kbl,cfl
4793 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
4794 dev_priv->ipc_enabled)
4797 if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
4800 method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
4801 wp->cpp, latency, wp->dbuf_block_size);
4802 method2 = skl_wm_method2(wp->plane_pixel_rate,
4803 cstate->base.adjusted_mode.crtc_htotal,
4805 wp->plane_blocks_per_line);
4808 selected_result = max_fixed16(method2, wp->y_tile_minimum);
4810 if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
4811 wp->dbuf_block_size < 1) &&
4812 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
4813 selected_result = method2;
4814 } else if (latency >= wp->linetime_us) {
4815 if (IS_GEN(dev_priv, 9) &&
4816 !IS_GEMINILAKE(dev_priv))
4817 selected_result = min_fixed16(method1, method2);
4819 selected_result = method2;
4821 selected_result = method1;
4825 res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
4826 res_lines = div_round_up_fixed16(selected_result,
4827 wp->plane_blocks_per_line);
4829 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
4830 /* Display WA #1125: skl,bxt,kbl */
4831 if (level == 0 && wp->rc_surface)
4833 fixed16_to_u32_round_up(wp->y_tile_minimum);
4835 /* Display WA #1126: skl,bxt,kbl */
4836 if (level >= 1 && level <= 7) {
4839 fixed16_to_u32_round_up(wp->y_tile_minimum);
4840 res_lines += wp->y_min_scanlines;
4846 * Make sure result blocks for higher latency levels are
4847 * atleast as high as level below the current level.
4848 * Assumption in DDB algorithm optimization for special
4849 * cases. Also covers Display WA #1125 for RC.
4851 if (result_prev->plane_res_b > res_blocks)
4852 res_blocks = result_prev->plane_res_b;
4856 if (INTEL_GEN(dev_priv) >= 11) {
4860 if (res_lines % wp->y_min_scanlines == 0)
4861 extra_lines = wp->y_min_scanlines;
4863 extra_lines = wp->y_min_scanlines * 2 -
4864 res_lines % wp->y_min_scanlines;
4866 min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines,
4867 wp->plane_blocks_per_line);
4869 min_ddb_alloc = res_blocks +
4870 DIV_ROUND_UP(res_blocks, 10);
4874 if (!skl_wm_has_lines(dev_priv, level))
4877 if (res_lines > 31) {
4879 result->min_ddb_alloc = U16_MAX;
4884 * If res_lines is valid, assume we can use this watermark level
4885 * for now. We'll come back and disable it after we calculate the
4886 * DDB allocation if it turns out we don't actually have enough
4887 * blocks to satisfy it.
4889 result->plane_res_b = res_blocks;
4890 result->plane_res_l = res_lines;
4891 /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
4892 result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1;
4893 result->plane_en = true;
4897 skl_compute_wm_levels(const struct intel_crtc_state *cstate,
4898 const struct skl_wm_params *wm_params,
4899 struct skl_wm_level *levels)
4901 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4902 int level, max_level = ilk_wm_max_level(dev_priv);
4903 struct skl_wm_level *result_prev = &levels[0];
4905 for (level = 0; level <= max_level; level++) {
4906 struct skl_wm_level *result = &levels[level];
4908 skl_compute_plane_wm(cstate, level, wm_params,
4909 result_prev, result);
4911 result_prev = result;
4916 skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
4918 struct drm_atomic_state *state = cstate->base.state;
4919 struct drm_i915_private *dev_priv = to_i915(state->dev);
4920 uint_fixed_16_16_t linetime_us;
4923 linetime_us = intel_get_linetime_us(cstate);
4924 linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
4926 /* Display WA #1135: BXT:ALL GLK:ALL */
4927 if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
4933 static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
4934 const struct skl_wm_params *wp,
4935 struct skl_plane_wm *wm)
4937 struct drm_device *dev = cstate->base.crtc->dev;
4938 const struct drm_i915_private *dev_priv = to_i915(dev);
4939 u16 trans_min, trans_y_tile_min;
4940 const u16 trans_amount = 10; /* This is configurable amount */
4941 u16 wm0_sel_res_b, trans_offset_b, res_blocks;
4943 /* Transition WM are not recommended by HW team for GEN9 */
4944 if (INTEL_GEN(dev_priv) <= 9)
4947 /* Transition WM don't make any sense if ipc is disabled */
4948 if (!dev_priv->ipc_enabled)
4952 if (INTEL_GEN(dev_priv) >= 11)
4955 trans_offset_b = trans_min + trans_amount;
4958 * The spec asks for Selected Result Blocks for wm0 (the real value),
4959 * not Result Blocks (the integer value). Pay attention to the capital
4960 * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
4961 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
4962 * and since we later will have to get the ceiling of the sum in the
4963 * transition watermarks calculation, we can just pretend Selected
4964 * Result Blocks is Result Blocks minus 1 and it should work for the
4965 * current platforms.
4967 wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
4971 (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
4972 res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
4975 res_blocks = wm0_sel_res_b + trans_offset_b;
4977 /* WA BUG:1938466 add one block for non y-tile planes */
4978 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
4984 * Just assume we can enable the transition watermark. After
4985 * computing the DDB we'll come back and disable it if that
4986 * assumption turns out to be false.
4988 wm->trans_wm.plane_res_b = res_blocks + 1;
4989 wm->trans_wm.plane_en = true;
4992 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
4993 const struct intel_plane_state *plane_state,
4994 enum plane_id plane_id, int color_plane)
4996 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
4997 struct skl_wm_params wm_params;
5000 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
5001 &wm_params, color_plane);
5005 skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
5006 skl_compute_transition_wm(crtc_state, &wm_params, wm);
5011 static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
5012 const struct intel_plane_state *plane_state,
5013 enum plane_id plane_id)
5015 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
5016 struct skl_wm_params wm_params;
5019 wm->is_planar = true;
5021 /* uv plane watermarks must also be validated for NV12/Planar */
5022 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
5027 skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
5032 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
5033 const struct intel_plane_state *plane_state)
5035 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
5036 const struct drm_framebuffer *fb = plane_state->base.fb;
5037 enum plane_id plane_id = plane->id;
5040 if (!intel_wm_plane_visible(crtc_state, plane_state))
5043 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5048 if (fb->format->is_yuv && fb->format->num_planes > 1) {
5049 ret = skl_build_plane_wm_uv(crtc_state, plane_state,
5058 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
5059 const struct intel_plane_state *plane_state)
5061 enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
5064 /* Watermarks calculated in master */
5065 if (plane_state->slave)
5068 if (plane_state->linked_plane) {
5069 const struct drm_framebuffer *fb = plane_state->base.fb;
5070 enum plane_id y_plane_id = plane_state->linked_plane->id;
5072 WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
5073 WARN_ON(!fb->format->is_yuv ||
5074 fb->format->num_planes == 1);
5076 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5081 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5085 } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
5086 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5095 static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
5097 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
5098 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
5099 struct drm_crtc_state *crtc_state = &cstate->base;
5100 struct drm_plane *plane;
5101 const struct drm_plane_state *pstate;
5105 * We'll only calculate watermarks for planes that are actually
5106 * enabled, so make sure all other planes are set as disabled.
5108 memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
5110 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
5111 const struct intel_plane_state *intel_pstate =
5112 to_intel_plane_state(pstate);
5114 if (INTEL_GEN(dev_priv) >= 11)
5115 ret = icl_build_plane_wm(cstate, intel_pstate);
5117 ret = skl_build_plane_wm(cstate, intel_pstate);
5122 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
5127 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
5129 const struct skl_ddb_entry *entry)
5132 I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
5134 I915_WRITE_FW(reg, 0);
5137 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
5139 const struct skl_wm_level *level)
5143 if (level->plane_en)
5145 if (level->ignore_lines)
5146 val |= PLANE_WM_IGNORE_LINES;
5147 val |= level->plane_res_b;
5148 val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
5150 I915_WRITE_FW(reg, val);
5153 void skl_write_plane_wm(struct intel_plane *plane,
5154 const struct intel_crtc_state *crtc_state)
5156 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5157 int level, max_level = ilk_wm_max_level(dev_priv);
5158 enum plane_id plane_id = plane->id;
5159 enum pipe pipe = plane->pipe;
5160 const struct skl_plane_wm *wm =
5161 &crtc_state->wm.skl.optimal.planes[plane_id];
5162 const struct skl_ddb_entry *ddb_y =
5163 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5164 const struct skl_ddb_entry *ddb_uv =
5165 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
5167 for (level = 0; level <= max_level; level++) {
5168 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
5171 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
5174 if (INTEL_GEN(dev_priv) >= 11) {
5175 skl_ddb_entry_write(dev_priv,
5176 PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5181 swap(ddb_y, ddb_uv);
5183 skl_ddb_entry_write(dev_priv,
5184 PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5185 skl_ddb_entry_write(dev_priv,
5186 PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
5189 void skl_write_cursor_wm(struct intel_plane *plane,
5190 const struct intel_crtc_state *crtc_state)
5192 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5193 int level, max_level = ilk_wm_max_level(dev_priv);
5194 enum plane_id plane_id = plane->id;
5195 enum pipe pipe = plane->pipe;
5196 const struct skl_plane_wm *wm =
5197 &crtc_state->wm.skl.optimal.planes[plane_id];
5198 const struct skl_ddb_entry *ddb =
5199 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5201 for (level = 0; level <= max_level; level++) {
5202 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
5205 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
5207 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
5210 bool skl_wm_level_equals(const struct skl_wm_level *l1,
5211 const struct skl_wm_level *l2)
5213 return l1->plane_en == l2->plane_en &&
5214 l1->ignore_lines == l2->ignore_lines &&
5215 l1->plane_res_l == l2->plane_res_l &&
5216 l1->plane_res_b == l2->plane_res_b;
5219 static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
5220 const struct skl_plane_wm *wm1,
5221 const struct skl_plane_wm *wm2)
5223 int level, max_level = ilk_wm_max_level(dev_priv);
5225 for (level = 0; level <= max_level; level++) {
5226 if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
5227 !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
5231 return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
5234 static bool skl_pipe_wm_equals(struct intel_crtc *crtc,
5235 const struct skl_pipe_wm *wm1,
5236 const struct skl_pipe_wm *wm2)
5238 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5239 enum plane_id plane_id;
5241 for_each_plane_id_on_crtc(crtc, plane_id) {
5242 if (!skl_plane_wm_equals(dev_priv,
5243 &wm1->planes[plane_id],
5244 &wm2->planes[plane_id]))
5248 return wm1->linetime == wm2->linetime;
5251 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
5252 const struct skl_ddb_entry *b)
5254 return a->start < b->end && b->start < a->end;
5257 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
5258 const struct skl_ddb_entry *entries,
5259 int num_entries, int ignore_idx)
5263 for (i = 0; i < num_entries; i++) {
5264 if (i != ignore_idx &&
5265 skl_ddb_entries_overlap(ddb, &entries[i]))
5273 pipes_modified(struct intel_atomic_state *state)
5275 struct intel_crtc *crtc;
5276 struct intel_crtc_state *cstate;
5279 for_each_new_intel_crtc_in_state(state, crtc, cstate, i)
5280 ret |= drm_crtc_mask(&crtc->base);
5286 skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
5287 struct intel_crtc_state *new_crtc_state)
5289 struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
5290 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5291 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5292 struct intel_plane *plane;
5294 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5295 struct intel_plane_state *plane_state;
5296 enum plane_id plane_id = plane->id;
5298 if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
5299 &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
5300 skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
5301 &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
5304 plane_state = intel_atomic_get_plane_state(state, plane);
5305 if (IS_ERR(plane_state))
5306 return PTR_ERR(plane_state);
5308 new_crtc_state->update_planes |= BIT(plane_id);
5315 skl_compute_ddb(struct intel_atomic_state *state)
5317 const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5318 struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
5319 struct intel_crtc_state *old_crtc_state;
5320 struct intel_crtc_state *new_crtc_state;
5321 struct intel_crtc *crtc;
5324 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
5326 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5327 new_crtc_state, i) {
5328 ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
5332 ret = skl_ddb_add_affected_planes(old_crtc_state,
5341 static char enast(bool enable)
5343 return enable ? '*' : ' ';
5347 skl_print_wm_changes(struct intel_atomic_state *state)
5349 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5350 const struct intel_crtc_state *old_crtc_state;
5351 const struct intel_crtc_state *new_crtc_state;
5352 struct intel_plane *plane;
5353 struct intel_crtc *crtc;
5356 if ((drm_debug & DRM_UT_KMS) == 0)
5359 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5360 new_crtc_state, i) {
5361 const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
5363 old_pipe_wm = &old_crtc_state->wm.skl.optimal;
5364 new_pipe_wm = &new_crtc_state->wm.skl.optimal;
5366 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5367 enum plane_id plane_id = plane->id;
5368 const struct skl_ddb_entry *old, *new;
5370 old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
5371 new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
5373 if (skl_ddb_entry_equal(old, new))
5376 DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
5377 plane->base.base.id, plane->base.name,
5378 old->start, old->end, new->start, new->end,
5379 skl_ddb_entry_size(old), skl_ddb_entry_size(new));
5382 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5383 enum plane_id plane_id = plane->id;
5384 const struct skl_plane_wm *old_wm, *new_wm;
5386 old_wm = &old_pipe_wm->planes[plane_id];
5387 new_wm = &new_pipe_wm->planes[plane_id];
5389 if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
5392 DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
5393 " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
5394 plane->base.base.id, plane->base.name,
5395 enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
5396 enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
5397 enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
5398 enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
5399 enast(old_wm->trans_wm.plane_en),
5400 enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
5401 enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
5402 enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
5403 enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
5404 enast(new_wm->trans_wm.plane_en));
5406 DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
5407 " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
5408 plane->base.base.id, plane->base.name,
5409 enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
5410 enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
5411 enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
5412 enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
5413 enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
5414 enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
5415 enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
5416 enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
5417 enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
5419 enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
5420 enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
5421 enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
5422 enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
5423 enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
5424 enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
5425 enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
5426 enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
5427 enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
5429 DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5430 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5431 plane->base.base.id, plane->base.name,
5432 old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
5433 old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
5434 old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
5435 old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
5436 old_wm->trans_wm.plane_res_b,
5437 new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
5438 new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
5439 new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
5440 new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
5441 new_wm->trans_wm.plane_res_b);
5443 DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5444 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5445 plane->base.base.id, plane->base.name,
5446 old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
5447 old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
5448 old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
5449 old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
5450 old_wm->trans_wm.min_ddb_alloc,
5451 new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
5452 new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
5453 new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
5454 new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
5455 new_wm->trans_wm.min_ddb_alloc);
5461 skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
5463 struct drm_device *dev = state->base.dev;
5464 const struct drm_i915_private *dev_priv = to_i915(dev);
5465 struct intel_crtc *crtc;
5466 struct intel_crtc_state *crtc_state;
5467 u32 realloc_pipes = pipes_modified(state);
5471 * When we distrust bios wm we always need to recompute to set the
5472 * expected DDB allocations for each CRTC.
5474 if (dev_priv->wm.distrust_bios_wm)
5478 * If this transaction isn't actually touching any CRTC's, don't
5479 * bother with watermark calculation. Note that if we pass this
5480 * test, we're guaranteed to hold at least one CRTC state mutex,
5481 * which means we can safely use values like dev_priv->active_crtcs
5482 * since any racing commits that want to update them would need to
5483 * hold _all_ CRTC state mutexes.
5485 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
5492 * If this is our first atomic update following hardware readout,
5493 * we can't trust the DDB that the BIOS programmed for us. Let's
5494 * pretend that all pipes switched active status so that we'll
5495 * ensure a full DDB recompute.
5497 if (dev_priv->wm.distrust_bios_wm) {
5498 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
5499 state->base.acquire_ctx);
5503 state->active_pipe_changes = ~0;
5506 * We usually only initialize state->active_crtcs if we
5507 * we're doing a modeset; make sure this field is always
5508 * initialized during the sanitization process that happens
5509 * on the first commit too.
5511 if (!state->modeset)
5512 state->active_crtcs = dev_priv->active_crtcs;
5516 * If the modeset changes which CRTC's are active, we need to
5517 * recompute the DDB allocation for *all* active pipes, even
5518 * those that weren't otherwise being modified in any way by this
5519 * atomic commit. Due to the shrinking of the per-pipe allocations
5520 * when new active CRTC's are added, it's possible for a pipe that
5521 * we were already using and aren't changing at all here to suddenly
5522 * become invalid if its DDB needs exceeds its new allocation.
5524 * Note that if we wind up doing a full DDB recompute, we can't let
5525 * any other display updates race with this transaction, so we need
5526 * to grab the lock on *all* CRTC's.
5528 if (state->active_pipe_changes || state->modeset) {
5530 state->wm_results.dirty_pipes = ~0;
5534 * We're not recomputing for the pipes not included in the commit, so
5535 * make sure we start with the current state.
5537 for_each_intel_crtc_mask(dev, crtc, realloc_pipes) {
5538 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5539 if (IS_ERR(crtc_state))
5540 return PTR_ERR(crtc_state);
5547 * To make sure the cursor watermark registers are always consistent
5548 * with our computed state the following scenario needs special
5552 * 2. move cursor entirely offscreen
5555 * Step 2. does call .disable_plane() but does not zero the watermarks
5556 * (since we consider an offscreen cursor still active for the purposes
5557 * of watermarks). Step 3. would not normally call .disable_plane()
5558 * because the actual plane visibility isn't changing, and we don't
5559 * deallocate the cursor ddb until the pipe gets disabled. So we must
5560 * force step 3. to call .disable_plane() to update the watermark
5561 * registers properly.
5563 * Other planes do not suffer from this issues as their watermarks are
5564 * calculated based on the actual plane visibility. The only time this
5565 * can trigger for the other planes is during the initial readout as the
5566 * default value of the watermarks registers is not zero.
5568 static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
5569 struct intel_crtc *crtc)
5571 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5572 const struct intel_crtc_state *old_crtc_state =
5573 intel_atomic_get_old_crtc_state(state, crtc);
5574 struct intel_crtc_state *new_crtc_state =
5575 intel_atomic_get_new_crtc_state(state, crtc);
5576 struct intel_plane *plane;
5578 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5579 struct intel_plane_state *plane_state;
5580 enum plane_id plane_id = plane->id;
5583 * Force a full wm update for every plane on modeset.
5584 * Required because the reset value of the wm registers
5585 * is non-zero, whereas we want all disabled planes to
5586 * have zero watermarks. So if we turn off the relevant
5587 * power well the hardware state will go out of sync
5588 * with the software state.
5590 if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
5591 skl_plane_wm_equals(dev_priv,
5592 &old_crtc_state->wm.skl.optimal.planes[plane_id],
5593 &new_crtc_state->wm.skl.optimal.planes[plane_id]))
5596 plane_state = intel_atomic_get_plane_state(state, plane);
5597 if (IS_ERR(plane_state))
5598 return PTR_ERR(plane_state);
5600 new_crtc_state->update_planes |= BIT(plane_id);
5607 skl_compute_wm(struct intel_atomic_state *state)
5609 struct intel_crtc *crtc;
5610 struct intel_crtc_state *new_crtc_state;
5611 struct intel_crtc_state *old_crtc_state;
5612 struct skl_ddb_values *results = &state->wm_results;
5613 bool changed = false;
5616 /* Clear all dirty flags */
5617 results->dirty_pipes = 0;
5619 ret = skl_ddb_add_affected_pipes(state, &changed);
5620 if (ret || !changed)
5624 * Calculate WM's for all pipes that are part of this transaction.
5625 * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
5626 * weren't otherwise being modified (and set bits in dirty_pipes) if
5627 * pipe allocations had to change.
5629 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5630 new_crtc_state, i) {
5631 ret = skl_build_pipe_wm(new_crtc_state);
5635 ret = skl_wm_add_affected_planes(state, crtc);
5639 if (!skl_pipe_wm_equals(crtc,
5640 &old_crtc_state->wm.skl.optimal,
5641 &new_crtc_state->wm.skl.optimal))
5642 results->dirty_pipes |= drm_crtc_mask(&crtc->base);
5645 ret = skl_compute_ddb(state);
5649 skl_print_wm_changes(state);
5654 static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
5655 struct intel_crtc_state *cstate)
5657 struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
5658 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5659 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
5660 enum pipe pipe = crtc->pipe;
5662 if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
5665 I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
5668 static void skl_initial_wm(struct intel_atomic_state *state,
5669 struct intel_crtc_state *cstate)
5671 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5672 struct drm_device *dev = intel_crtc->base.dev;
5673 struct drm_i915_private *dev_priv = to_i915(dev);
5674 struct skl_ddb_values *results = &state->wm_results;
5676 if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
5679 mutex_lock(&dev_priv->wm.wm_mutex);
5681 if (cstate->base.active_changed)
5682 skl_atomic_update_crtc_wm(state, cstate);
5684 mutex_unlock(&dev_priv->wm.wm_mutex);
5687 static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
5688 struct intel_wm_config *config)
5690 struct intel_crtc *crtc;
5692 /* Compute the currently _active_ config */
5693 for_each_intel_crtc(&dev_priv->drm, crtc) {
5694 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
5696 if (!wm->pipe_enabled)
5699 config->sprites_enabled |= wm->sprites_enabled;
5700 config->sprites_scaled |= wm->sprites_scaled;
5701 config->num_pipes_active++;
5705 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
5707 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
5708 struct ilk_wm_maximums max;
5709 struct intel_wm_config config = {};
5710 struct ilk_wm_values results = {};
5711 enum intel_ddb_partitioning partitioning;
5713 ilk_compute_wm_config(dev_priv, &config);
5715 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
5716 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
5718 /* 5/6 split only in single pipe config on IVB+ */
5719 if (INTEL_GEN(dev_priv) >= 7 &&
5720 config.num_pipes_active == 1 && config.sprites_enabled) {
5721 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
5722 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
5724 best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
5726 best_lp_wm = &lp_wm_1_2;
5729 partitioning = (best_lp_wm == &lp_wm_1_2) ?
5730 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
5732 ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
5734 ilk_write_wm_values(dev_priv, &results);
5737 static void ilk_initial_watermarks(struct intel_atomic_state *state,
5738 struct intel_crtc_state *cstate)
5740 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
5741 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5743 mutex_lock(&dev_priv->wm.wm_mutex);
5744 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
5745 ilk_program_watermarks(dev_priv);
5746 mutex_unlock(&dev_priv->wm.wm_mutex);
5749 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
5750 struct intel_crtc_state *cstate)
5752 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
5753 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5755 mutex_lock(&dev_priv->wm.wm_mutex);
5756 if (cstate->wm.need_postvbl_update) {
5757 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
5758 ilk_program_watermarks(dev_priv);
5760 mutex_unlock(&dev_priv->wm.wm_mutex);
5763 static inline void skl_wm_level_from_reg_val(u32 val,
5764 struct skl_wm_level *level)
5766 level->plane_en = val & PLANE_WM_EN;
5767 level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
5768 level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
5769 level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
5770 PLANE_WM_LINES_MASK;
5773 void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
5774 struct skl_pipe_wm *out)
5776 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5777 enum pipe pipe = crtc->pipe;
5778 int level, max_level;
5779 enum plane_id plane_id;
5782 max_level = ilk_wm_max_level(dev_priv);
5784 for_each_plane_id_on_crtc(crtc, plane_id) {
5785 struct skl_plane_wm *wm = &out->planes[plane_id];
5787 for (level = 0; level <= max_level; level++) {
5788 if (plane_id != PLANE_CURSOR)
5789 val = I915_READ(PLANE_WM(pipe, plane_id, level));
5791 val = I915_READ(CUR_WM(pipe, level));
5793 skl_wm_level_from_reg_val(val, &wm->wm[level]);
5796 if (plane_id != PLANE_CURSOR)
5797 val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
5799 val = I915_READ(CUR_WM_TRANS(pipe));
5801 skl_wm_level_from_reg_val(val, &wm->trans_wm);
5807 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
5810 void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
5812 struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
5813 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
5814 struct intel_crtc *crtc;
5815 struct intel_crtc_state *cstate;
5817 skl_ddb_get_hw_state(dev_priv, ddb);
5818 for_each_intel_crtc(&dev_priv->drm, crtc) {
5819 cstate = to_intel_crtc_state(crtc->base.state);
5821 skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
5824 hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
5827 if (dev_priv->active_crtcs) {
5828 /* Fully recompute DDB on first atomic commit */
5829 dev_priv->wm.distrust_bios_wm = true;
5833 static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
5835 struct drm_device *dev = crtc->base.dev;
5836 struct drm_i915_private *dev_priv = to_i915(dev);
5837 struct ilk_wm_values *hw = &dev_priv->wm.hw;
5838 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state);
5839 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
5840 enum pipe pipe = crtc->pipe;
5841 static const i915_reg_t wm0_pipe_reg[] = {
5842 [PIPE_A] = WM0_PIPEA_ILK,
5843 [PIPE_B] = WM0_PIPEB_ILK,
5844 [PIPE_C] = WM0_PIPEC_IVB,
5847 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
5848 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5849 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
5851 memset(active, 0, sizeof(*active));
5853 active->pipe_enabled = crtc->active;
5855 if (active->pipe_enabled) {
5856 u32 tmp = hw->wm_pipe[pipe];
5859 * For active pipes LP0 watermark is marked as
5860 * enabled, and LP1+ watermaks as disabled since
5861 * we can't really reverse compute them in case
5862 * multiple pipes are active.
5864 active->wm[0].enable = true;
5865 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
5866 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
5867 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
5868 active->linetime = hw->wm_linetime[pipe];
5870 int level, max_level = ilk_wm_max_level(dev_priv);
5873 * For inactive pipes, all watermark levels
5874 * should be marked as enabled but zeroed,
5875 * which is what we'd compute them to.
5877 for (level = 0; level <= max_level; level++)
5878 active->wm[level].enable = true;
5881 crtc->wm.active.ilk = *active;
5884 #define _FW_WM(value, plane) \
5885 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
5886 #define _FW_WM_VLV(value, plane) \
5887 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
5889 static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
5890 struct g4x_wm_values *wm)
5894 tmp = I915_READ(DSPFW1);
5895 wm->sr.plane = _FW_WM(tmp, SR);
5896 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5897 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
5898 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
5900 tmp = I915_READ(DSPFW2);
5901 wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
5902 wm->sr.fbc = _FW_WM(tmp, FBC_SR);
5903 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
5904 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
5905 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5906 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
5908 tmp = I915_READ(DSPFW3);
5909 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
5910 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5911 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
5912 wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
5915 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
5916 struct vlv_wm_values *wm)
5921 for_each_pipe(dev_priv, pipe) {
5922 tmp = I915_READ(VLV_DDL(pipe));
5924 wm->ddl[pipe].plane[PLANE_PRIMARY] =
5925 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5926 wm->ddl[pipe].plane[PLANE_CURSOR] =
5927 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5928 wm->ddl[pipe].plane[PLANE_SPRITE0] =
5929 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5930 wm->ddl[pipe].plane[PLANE_SPRITE1] =
5931 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5934 tmp = I915_READ(DSPFW1);
5935 wm->sr.plane = _FW_WM(tmp, SR);
5936 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5937 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
5938 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
5940 tmp = I915_READ(DSPFW2);
5941 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
5942 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5943 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
5945 tmp = I915_READ(DSPFW3);
5946 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5948 if (IS_CHERRYVIEW(dev_priv)) {
5949 tmp = I915_READ(DSPFW7_CHV);
5950 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5951 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5953 tmp = I915_READ(DSPFW8_CHV);
5954 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
5955 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
5957 tmp = I915_READ(DSPFW9_CHV);
5958 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
5959 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
5961 tmp = I915_READ(DSPHOWM);
5962 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5963 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
5964 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
5965 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
5966 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5967 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5968 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5969 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5970 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5971 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5973 tmp = I915_READ(DSPFW7);
5974 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5975 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5977 tmp = I915_READ(DSPHOWM);
5978 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5979 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5980 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5981 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5982 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5983 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5984 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5991 void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
5993 struct g4x_wm_values *wm = &dev_priv->wm.g4x;
5994 struct intel_crtc *crtc;
5996 g4x_read_wm_values(dev_priv, wm);
5998 wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
6000 for_each_intel_crtc(&dev_priv->drm, crtc) {
6001 struct intel_crtc_state *crtc_state =
6002 to_intel_crtc_state(crtc->base.state);
6003 struct g4x_wm_state *active = &crtc->wm.active.g4x;
6004 struct g4x_pipe_wm *raw;
6005 enum pipe pipe = crtc->pipe;
6006 enum plane_id plane_id;
6007 int level, max_level;
6009 active->cxsr = wm->cxsr;
6010 active->hpll_en = wm->hpll_en;
6011 active->fbc_en = wm->fbc_en;
6013 active->sr = wm->sr;
6014 active->hpll = wm->hpll;
6016 for_each_plane_id_on_crtc(crtc, plane_id) {
6017 active->wm.plane[plane_id] =
6018 wm->pipe[pipe].plane[plane_id];
6021 if (wm->cxsr && wm->hpll_en)
6022 max_level = G4X_WM_LEVEL_HPLL;
6024 max_level = G4X_WM_LEVEL_SR;
6026 max_level = G4X_WM_LEVEL_NORMAL;
6028 level = G4X_WM_LEVEL_NORMAL;
6029 raw = &crtc_state->wm.g4x.raw[level];
6030 for_each_plane_id_on_crtc(crtc, plane_id)
6031 raw->plane[plane_id] = active->wm.plane[plane_id];
6033 if (++level > max_level)
6036 raw = &crtc_state->wm.g4x.raw[level];
6037 raw->plane[PLANE_PRIMARY] = active->sr.plane;
6038 raw->plane[PLANE_CURSOR] = active->sr.cursor;
6039 raw->plane[PLANE_SPRITE0] = 0;
6040 raw->fbc = active->sr.fbc;
6042 if (++level > max_level)
6045 raw = &crtc_state->wm.g4x.raw[level];
6046 raw->plane[PLANE_PRIMARY] = active->hpll.plane;
6047 raw->plane[PLANE_CURSOR] = active->hpll.cursor;
6048 raw->plane[PLANE_SPRITE0] = 0;
6049 raw->fbc = active->hpll.fbc;
6052 for_each_plane_id_on_crtc(crtc, plane_id)
6053 g4x_raw_plane_wm_set(crtc_state, level,
6054 plane_id, USHRT_MAX);
6055 g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
6057 crtc_state->wm.g4x.optimal = *active;
6058 crtc_state->wm.g4x.intermediate = *active;
6060 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
6062 wm->pipe[pipe].plane[PLANE_PRIMARY],
6063 wm->pipe[pipe].plane[PLANE_CURSOR],
6064 wm->pipe[pipe].plane[PLANE_SPRITE0]);
6067 DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
6068 wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
6069 DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
6070 wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
6071 DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n",
6072 yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
6075 void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
6077 struct intel_plane *plane;
6078 struct intel_crtc *crtc;
6080 mutex_lock(&dev_priv->wm.wm_mutex);
6082 for_each_intel_plane(&dev_priv->drm, plane) {
6083 struct intel_crtc *crtc =
6084 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
6085 struct intel_crtc_state *crtc_state =
6086 to_intel_crtc_state(crtc->base.state);
6087 struct intel_plane_state *plane_state =
6088 to_intel_plane_state(plane->base.state);
6089 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
6090 enum plane_id plane_id = plane->id;
6093 if (plane_state->base.visible)
6096 for (level = 0; level < 3; level++) {
6097 struct g4x_pipe_wm *raw =
6098 &crtc_state->wm.g4x.raw[level];
6100 raw->plane[plane_id] = 0;
6101 wm_state->wm.plane[plane_id] = 0;
6104 if (plane_id == PLANE_PRIMARY) {
6105 for (level = 0; level < 3; level++) {
6106 struct g4x_pipe_wm *raw =
6107 &crtc_state->wm.g4x.raw[level];
6111 wm_state->sr.fbc = 0;
6112 wm_state->hpll.fbc = 0;
6113 wm_state->fbc_en = false;
6117 for_each_intel_crtc(&dev_priv->drm, crtc) {
6118 struct intel_crtc_state *crtc_state =
6119 to_intel_crtc_state(crtc->base.state);
6121 crtc_state->wm.g4x.intermediate =
6122 crtc_state->wm.g4x.optimal;
6123 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
6126 g4x_program_watermarks(dev_priv);
6128 mutex_unlock(&dev_priv->wm.wm_mutex);
6131 void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
6133 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
6134 struct intel_crtc *crtc;
6137 vlv_read_wm_values(dev_priv, wm);
6139 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
6140 wm->level = VLV_WM_LEVEL_PM2;
6142 if (IS_CHERRYVIEW(dev_priv)) {
6143 vlv_punit_get(dev_priv);
6145 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
6146 if (val & DSP_MAXFIFO_PM5_ENABLE)
6147 wm->level = VLV_WM_LEVEL_PM5;
6150 * If DDR DVFS is disabled in the BIOS, Punit
6151 * will never ack the request. So if that happens
6152 * assume we don't have to enable/disable DDR DVFS
6153 * dynamically. To test that just set the REQ_ACK
6154 * bit to poke the Punit, but don't change the
6155 * HIGH/LOW bits so that we don't actually change
6156 * the current state.
6158 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6159 val |= FORCE_DDR_FREQ_REQ_ACK;
6160 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
6162 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
6163 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
6164 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
6165 "assuming DDR DVFS is disabled\n");
6166 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
6168 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6169 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
6170 wm->level = VLV_WM_LEVEL_DDR_DVFS;
6173 vlv_punit_put(dev_priv);
6176 for_each_intel_crtc(&dev_priv->drm, crtc) {
6177 struct intel_crtc_state *crtc_state =
6178 to_intel_crtc_state(crtc->base.state);
6179 struct vlv_wm_state *active = &crtc->wm.active.vlv;
6180 const struct vlv_fifo_state *fifo_state =
6181 &crtc_state->wm.vlv.fifo_state;
6182 enum pipe pipe = crtc->pipe;
6183 enum plane_id plane_id;
6186 vlv_get_fifo_size(crtc_state);
6188 active->num_levels = wm->level + 1;
6189 active->cxsr = wm->cxsr;
6191 for (level = 0; level < active->num_levels; level++) {
6192 struct g4x_pipe_wm *raw =
6193 &crtc_state->wm.vlv.raw[level];
6195 active->sr[level].plane = wm->sr.plane;
6196 active->sr[level].cursor = wm->sr.cursor;
6198 for_each_plane_id_on_crtc(crtc, plane_id) {
6199 active->wm[level].plane[plane_id] =
6200 wm->pipe[pipe].plane[plane_id];
6202 raw->plane[plane_id] =
6203 vlv_invert_wm_value(active->wm[level].plane[plane_id],
6204 fifo_state->plane[plane_id]);
6208 for_each_plane_id_on_crtc(crtc, plane_id)
6209 vlv_raw_plane_wm_set(crtc_state, level,
6210 plane_id, USHRT_MAX);
6211 vlv_invalidate_wms(crtc, active, level);
6213 crtc_state->wm.vlv.optimal = *active;
6214 crtc_state->wm.vlv.intermediate = *active;
6216 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
6218 wm->pipe[pipe].plane[PLANE_PRIMARY],
6219 wm->pipe[pipe].plane[PLANE_CURSOR],
6220 wm->pipe[pipe].plane[PLANE_SPRITE0],
6221 wm->pipe[pipe].plane[PLANE_SPRITE1]);
6224 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
6225 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
6228 void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
6230 struct intel_plane *plane;
6231 struct intel_crtc *crtc;
6233 mutex_lock(&dev_priv->wm.wm_mutex);
6235 for_each_intel_plane(&dev_priv->drm, plane) {
6236 struct intel_crtc *crtc =
6237 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
6238 struct intel_crtc_state *crtc_state =
6239 to_intel_crtc_state(crtc->base.state);
6240 struct intel_plane_state *plane_state =
6241 to_intel_plane_state(plane->base.state);
6242 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
6243 const struct vlv_fifo_state *fifo_state =
6244 &crtc_state->wm.vlv.fifo_state;
6245 enum plane_id plane_id = plane->id;
6248 if (plane_state->base.visible)
6251 for (level = 0; level < wm_state->num_levels; level++) {
6252 struct g4x_pipe_wm *raw =
6253 &crtc_state->wm.vlv.raw[level];
6255 raw->plane[plane_id] = 0;
6257 wm_state->wm[level].plane[plane_id] =
6258 vlv_invert_wm_value(raw->plane[plane_id],
6259 fifo_state->plane[plane_id]);
6263 for_each_intel_crtc(&dev_priv->drm, crtc) {
6264 struct intel_crtc_state *crtc_state =
6265 to_intel_crtc_state(crtc->base.state);
6267 crtc_state->wm.vlv.intermediate =
6268 crtc_state->wm.vlv.optimal;
6269 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
6272 vlv_program_watermarks(dev_priv);
6274 mutex_unlock(&dev_priv->wm.wm_mutex);
6278 * FIXME should probably kill this and improve
6279 * the real watermark readout/sanitation instead
6281 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
6283 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6284 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6285 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6288 * Don't touch WM1S_LP_EN here.
6289 * Doing so could cause underruns.
6293 void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
6295 struct ilk_wm_values *hw = &dev_priv->wm.hw;
6296 struct intel_crtc *crtc;
6298 ilk_init_lp_watermarks(dev_priv);
6300 for_each_intel_crtc(&dev_priv->drm, crtc)
6301 ilk_pipe_wm_get_hw_state(crtc);
6303 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
6304 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
6305 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
6307 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
6308 if (INTEL_GEN(dev_priv) >= 7) {
6309 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
6310 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
6313 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6314 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
6315 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6316 else if (IS_IVYBRIDGE(dev_priv))
6317 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
6318 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6321 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
6325 * intel_update_watermarks - update FIFO watermark values based on current modes
6326 * @crtc: the #intel_crtc on which to compute the WM
6328 * Calculate watermark values for the various WM regs based on current mode
6329 * and plane configuration.
6331 * There are several cases to deal with here:
6332 * - normal (i.e. non-self-refresh)
6333 * - self-refresh (SR) mode
6334 * - lines are large relative to FIFO size (buffer can hold up to 2)
6335 * - lines are small relative to FIFO size (buffer can hold more than 2
6336 * lines), so need to account for TLB latency
6338 * The normal calculation is:
6339 * watermark = dotclock * bytes per pixel * latency
6340 * where latency is platform & configuration dependent (we assume pessimal
6343 * The SR calculation is:
6344 * watermark = (trunc(latency/line time)+1) * surface width *
6347 * line time = htotal / dotclock
6348 * surface width = hdisplay for normal plane and 64 for cursor
6349 * and latency is assumed to be high, as above.
6351 * The final value programmed to the register should always be rounded up,
6352 * and include an extra 2 entries to account for clock crossings.
6354 * We don't use the sprite, so we can ignore that. And on Crestline we have
6355 * to set the non-SR watermarks to 8.
6357 void intel_update_watermarks(struct intel_crtc *crtc)
6359 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6361 if (dev_priv->display.update_wm)
6362 dev_priv->display.update_wm(crtc);
6365 void intel_enable_ipc(struct drm_i915_private *dev_priv)
6369 if (!HAS_IPC(dev_priv))
6372 val = I915_READ(DISP_ARB_CTL2);
6374 if (dev_priv->ipc_enabled)
6375 val |= DISP_IPC_ENABLE;
6377 val &= ~DISP_IPC_ENABLE;
6379 I915_WRITE(DISP_ARB_CTL2, val);
6382 static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
6384 /* Display WA #0477 WaDisableIPC: skl */
6385 if (IS_SKYLAKE(dev_priv))
6388 /* Display WA #1141: SKL:all KBL:all CFL */
6389 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
6390 return dev_priv->dram_info.symmetric_memory;
6395 void intel_init_ipc(struct drm_i915_private *dev_priv)
6397 if (!HAS_IPC(dev_priv))
6400 dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
6402 intel_enable_ipc(dev_priv);
6406 * Lock protecting IPS related data structures
6408 DEFINE_SPINLOCK(mchdev_lock);
6410 bool ironlake_set_drps(struct drm_i915_private *i915, u8 val)
6412 struct intel_uncore *uncore = &i915->uncore;
6415 lockdep_assert_held(&mchdev_lock);
6417 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
6418 if (rgvswctl & MEMCTL_CMD_STS) {
6419 DRM_DEBUG("gpu busy, RCS change rejected\n");
6420 return false; /* still busy with another command */
6423 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
6424 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
6425 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
6426 intel_uncore_posting_read16(uncore, MEMSWCTL);
6428 rgvswctl |= MEMCTL_CMD_STS;
6429 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
6434 static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
6436 struct intel_uncore *uncore = &dev_priv->uncore;
6438 u8 fmax, fmin, fstart, vstart;
6440 spin_lock_irq(&mchdev_lock);
6442 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
6444 /* Enable temp reporting */
6445 intel_uncore_write16(uncore, PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
6446 intel_uncore_write16(uncore, TSC1, I915_READ(TSC1) | TSE);
6448 /* 100ms RC evaluation intervals */
6449 intel_uncore_write(uncore, RCUPEI, 100000);
6450 intel_uncore_write(uncore, RCDNEI, 100000);
6452 /* Set max/min thresholds to 90ms and 80ms respectively */
6453 intel_uncore_write(uncore, RCBMAXAVG, 90000);
6454 intel_uncore_write(uncore, RCBMINAVG, 80000);
6456 intel_uncore_write(uncore, MEMIHYST, 1);
6458 /* Set up min, max, and cur for interrupt handling */
6459 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
6460 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
6461 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
6462 MEMMODE_FSTART_SHIFT;
6464 vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
6465 PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
6467 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
6468 dev_priv->ips.fstart = fstart;
6470 dev_priv->ips.max_delay = fstart;
6471 dev_priv->ips.min_delay = fmin;
6472 dev_priv->ips.cur_delay = fstart;
6474 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
6475 fmax, fmin, fstart);
6477 intel_uncore_write(uncore,
6479 MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
6482 * Interrupts will be enabled in ironlake_irq_postinstall
6485 intel_uncore_write(uncore, VIDSTART, vstart);
6486 intel_uncore_posting_read(uncore, VIDSTART);
6488 rgvmodectl |= MEMMODE_SWMODE_EN;
6489 intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
6491 if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
6492 MEMCTL_CMD_STS) == 0, 10))
6493 DRM_ERROR("stuck trying to change perf mode\n");
6496 ironlake_set_drps(dev_priv, fstart);
6498 dev_priv->ips.last_count1 =
6499 intel_uncore_read(uncore, DMIEC) +
6500 intel_uncore_read(uncore, DDREC) +
6501 intel_uncore_read(uncore, CSIEC);
6502 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
6503 dev_priv->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
6504 dev_priv->ips.last_time2 = ktime_get_raw_ns();
6506 spin_unlock_irq(&mchdev_lock);
6509 static void ironlake_disable_drps(struct drm_i915_private *i915)
6511 struct intel_uncore *uncore = &i915->uncore;
6514 spin_lock_irq(&mchdev_lock);
6516 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
6518 /* Ack interrupts, disable EFC interrupt */
6519 intel_uncore_write(uncore,
6521 intel_uncore_read(uncore, MEMINTREN) &
6522 ~MEMINT_EVAL_CHG_EN);
6523 intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
6524 intel_uncore_write(uncore,
6526 intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
6527 intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
6528 intel_uncore_write(uncore,
6530 intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
6532 /* Go back to the starting frequency */
6533 ironlake_set_drps(i915, i915->ips.fstart);
6535 rgvswctl |= MEMCTL_CMD_STS;
6536 intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
6539 spin_unlock_irq(&mchdev_lock);
6542 /* There's a funny hw issue where the hw returns all 0 when reading from
6543 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
6544 * ourselves, instead of doing a rmw cycle (which might result in us clearing
6545 * all limits and the gpu stuck at whatever frequency it is at atm).
6547 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
6549 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6552 /* Only set the down limit when we've reached the lowest level to avoid
6553 * getting more interrupts, otherwise leave this clear. This prevents a
6554 * race in the hw when coming out of rc6: There's a tiny window where
6555 * the hw runs at the minimal clock before selecting the desired
6556 * frequency, if the down threshold expires in that window we will not
6557 * receive a down interrupt. */
6558 if (INTEL_GEN(dev_priv) >= 9) {
6559 limits = (rps->max_freq_softlimit) << 23;
6560 if (val <= rps->min_freq_softlimit)
6561 limits |= (rps->min_freq_softlimit) << 14;
6563 limits = rps->max_freq_softlimit << 24;
6564 if (val <= rps->min_freq_softlimit)
6565 limits |= rps->min_freq_softlimit << 16;
6571 static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
6573 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6574 u32 threshold_up = 0, threshold_down = 0; /* in % */
6575 u32 ei_up = 0, ei_down = 0;
6577 lockdep_assert_held(&rps->power.mutex);
6579 if (new_power == rps->power.mode)
6582 /* Note the units here are not exactly 1us, but 1280ns. */
6583 switch (new_power) {
6585 /* Upclock if more than 95% busy over 16ms */
6589 /* Downclock if less than 85% busy over 32ms */
6591 threshold_down = 85;
6595 /* Upclock if more than 90% busy over 13ms */
6599 /* Downclock if less than 75% busy over 32ms */
6601 threshold_down = 75;
6605 /* Upclock if more than 85% busy over 10ms */
6609 /* Downclock if less than 60% busy over 32ms */
6611 threshold_down = 60;
6615 /* When byt can survive without system hang with dynamic
6616 * sw freq adjustments, this restriction can be lifted.
6618 if (IS_VALLEYVIEW(dev_priv))
6621 I915_WRITE(GEN6_RP_UP_EI,
6622 GT_INTERVAL_FROM_US(dev_priv, ei_up));
6623 I915_WRITE(GEN6_RP_UP_THRESHOLD,
6624 GT_INTERVAL_FROM_US(dev_priv,
6625 ei_up * threshold_up / 100));
6627 I915_WRITE(GEN6_RP_DOWN_EI,
6628 GT_INTERVAL_FROM_US(dev_priv, ei_down));
6629 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
6630 GT_INTERVAL_FROM_US(dev_priv,
6631 ei_down * threshold_down / 100));
6633 I915_WRITE(GEN6_RP_CONTROL,
6634 (INTEL_GEN(dev_priv) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
6635 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6636 GEN6_RP_MEDIA_IS_GFX |
6638 GEN6_RP_UP_BUSY_AVG |
6639 GEN6_RP_DOWN_IDLE_AVG);
6642 rps->power.mode = new_power;
6643 rps->power.up_threshold = threshold_up;
6644 rps->power.down_threshold = threshold_down;
6647 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
6649 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6652 new_power = rps->power.mode;
6653 switch (rps->power.mode) {
6655 if (val > rps->efficient_freq + 1 &&
6656 val > rps->cur_freq)
6657 new_power = BETWEEN;
6661 if (val <= rps->efficient_freq &&
6662 val < rps->cur_freq)
6663 new_power = LOW_POWER;
6664 else if (val >= rps->rp0_freq &&
6665 val > rps->cur_freq)
6666 new_power = HIGH_POWER;
6670 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
6671 val < rps->cur_freq)
6672 new_power = BETWEEN;
6675 /* Max/min bins are special */
6676 if (val <= rps->min_freq_softlimit)
6677 new_power = LOW_POWER;
6678 if (val >= rps->max_freq_softlimit)
6679 new_power = HIGH_POWER;
6681 mutex_lock(&rps->power.mutex);
6682 if (rps->power.interactive)
6683 new_power = HIGH_POWER;
6684 rps_set_power(dev_priv, new_power);
6685 mutex_unlock(&rps->power.mutex);
6688 void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
6690 struct intel_rps *rps = &i915->gt_pm.rps;
6692 if (INTEL_GEN(i915) < 6)
6695 mutex_lock(&rps->power.mutex);
6697 if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake))
6698 rps_set_power(i915, HIGH_POWER);
6700 GEM_BUG_ON(!rps->power.interactive);
6701 rps->power.interactive--;
6703 mutex_unlock(&rps->power.mutex);
6706 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
6708 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6711 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
6712 if (val > rps->min_freq_softlimit)
6713 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
6714 if (val < rps->max_freq_softlimit)
6715 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
6717 mask &= dev_priv->pm_rps_events;
6719 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
6722 /* gen6_set_rps is called to update the frequency request, but should also be
6723 * called when the range (min_delay and max_delay) is modified so that we can
6724 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
6725 static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
6727 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6729 /* min/max delay may still have been modified so be sure to
6730 * write the limits value.
6732 if (val != rps->cur_freq) {
6733 gen6_set_rps_thresholds(dev_priv, val);
6735 if (INTEL_GEN(dev_priv) >= 9)
6736 I915_WRITE(GEN6_RPNSWREQ,
6737 GEN9_FREQUENCY(val));
6738 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6739 I915_WRITE(GEN6_RPNSWREQ,
6740 HSW_FREQUENCY(val));
6742 I915_WRITE(GEN6_RPNSWREQ,
6743 GEN6_FREQUENCY(val) |
6745 GEN6_AGGRESSIVE_TURBO);
6748 /* Make sure we continue to get interrupts
6749 * until we hit the minimum or maximum frequencies.
6751 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
6752 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
6754 rps->cur_freq = val;
6755 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
6760 static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
6764 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
6765 "Odd GPU freq value\n"))
6768 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
6770 if (val != dev_priv->gt_pm.rps.cur_freq) {
6771 vlv_punit_get(dev_priv);
6772 err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
6773 vlv_punit_put(dev_priv);
6777 gen6_set_rps_thresholds(dev_priv, val);
6780 dev_priv->gt_pm.rps.cur_freq = val;
6781 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
6786 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
6788 * * If Gfx is Idle, then
6789 * 1. Forcewake Media well.
6790 * 2. Request idle freq.
6791 * 3. Release Forcewake of Media well.
6793 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
6795 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6796 u32 val = rps->idle_freq;
6799 if (rps->cur_freq <= val)
6802 /* The punit delays the write of the frequency and voltage until it
6803 * determines the GPU is awake. During normal usage we don't want to
6804 * waste power changing the frequency if the GPU is sleeping (rc6).
6805 * However, the GPU and driver is now idle and we do not want to delay
6806 * switching to minimum voltage (reducing power whilst idle) as we do
6807 * not expect to be woken in the near future and so must flush the
6808 * change by waking the device.
6810 * We choose to take the media powerwell (either would do to trick the
6811 * punit into committing the voltage change) as that takes a lot less
6812 * power than the render powerwell.
6814 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA);
6815 err = valleyview_set_rps(dev_priv, val);
6816 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA);
6819 DRM_ERROR("Failed to set RPS for idle\n");
6822 void gen6_rps_busy(struct drm_i915_private *dev_priv)
6824 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6826 mutex_lock(&rps->lock);
6830 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
6831 gen6_rps_reset_ei(dev_priv);
6832 I915_WRITE(GEN6_PMINTRMSK,
6833 gen6_rps_pm_mask(dev_priv, rps->cur_freq));
6835 gen6_enable_rps_interrupts(dev_priv);
6837 /* Use the user's desired frequency as a guide, but for better
6838 * performance, jump directly to RPe as our starting frequency.
6840 freq = max(rps->cur_freq,
6841 rps->efficient_freq);
6843 if (intel_set_rps(dev_priv,
6845 rps->min_freq_softlimit,
6846 rps->max_freq_softlimit)))
6847 DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
6849 mutex_unlock(&rps->lock);
6852 void gen6_rps_idle(struct drm_i915_private *dev_priv)
6854 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6856 /* Flush our bottom-half so that it does not race with us
6857 * setting the idle frequency and so that it is bounded by
6858 * our rpm wakeref. And then disable the interrupts to stop any
6859 * futher RPS reclocking whilst we are asleep.
6861 gen6_disable_rps_interrupts(dev_priv);
6863 mutex_lock(&rps->lock);
6865 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6866 vlv_set_rps_idle(dev_priv);
6868 gen6_set_rps(dev_priv, rps->idle_freq);
6870 I915_WRITE(GEN6_PMINTRMSK,
6871 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
6873 mutex_unlock(&rps->lock);
6876 void gen6_rps_boost(struct i915_request *rq)
6878 struct intel_rps *rps = &rq->i915->gt_pm.rps;
6879 unsigned long flags;
6882 /* This is intentionally racy! We peek at the state here, then
6883 * validate inside the RPS worker.
6888 if (i915_request_signaled(rq))
6891 /* Serializes with i915_request_retire() */
6893 spin_lock_irqsave(&rq->lock, flags);
6894 if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
6895 boost = !atomic_fetch_inc(&rps->num_waiters);
6896 rq->waitboost = true;
6898 spin_unlock_irqrestore(&rq->lock, flags);
6902 if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
6903 schedule_work(&rps->work);
6905 atomic_inc(&rps->boosts);
6908 int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
6910 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6913 lockdep_assert_held(&rps->lock);
6914 GEM_BUG_ON(val > rps->max_freq);
6915 GEM_BUG_ON(val < rps->min_freq);
6917 if (!rps->enabled) {
6918 rps->cur_freq = val;
6922 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6923 err = valleyview_set_rps(dev_priv, val);
6925 err = gen6_set_rps(dev_priv, val);
6930 static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
6932 I915_WRITE(GEN6_RC_CONTROL, 0);
6933 I915_WRITE(GEN9_PG_ENABLE, 0);
6936 static void gen9_disable_rps(struct drm_i915_private *dev_priv)
6938 I915_WRITE(GEN6_RP_CONTROL, 0);
6941 static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
6943 I915_WRITE(GEN6_RC_CONTROL, 0);
6946 static void gen6_disable_rps(struct drm_i915_private *dev_priv)
6948 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
6949 I915_WRITE(GEN6_RP_CONTROL, 0);
6952 static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
6954 I915_WRITE(GEN6_RC_CONTROL, 0);
6957 static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
6959 I915_WRITE(GEN6_RP_CONTROL, 0);
6962 static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
6964 /* We're doing forcewake before Disabling RC6,
6965 * This what the BIOS expects when going into suspend */
6966 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
6968 I915_WRITE(GEN6_RC_CONTROL, 0);
6970 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
6973 static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
6975 I915_WRITE(GEN6_RP_CONTROL, 0);
6978 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
6980 bool enable_rc6 = true;
6981 unsigned long rc6_ctx_base;
6985 rc_ctl = I915_READ(GEN6_RC_CONTROL);
6986 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
6987 RC_SW_TARGET_STATE_SHIFT;
6988 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
6989 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
6990 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
6991 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
6994 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
6995 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
7000 * The exact context size is not known for BXT, so assume a page size
7003 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
7004 if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) &&
7005 (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) {
7006 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
7010 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
7011 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
7012 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
7013 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
7014 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
7018 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
7019 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
7020 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
7021 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
7025 if (!I915_READ(GEN6_GFXPAUSE)) {
7026 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
7030 if (!I915_READ(GEN8_MISC_CTRL0)) {
7031 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
7038 static bool sanitize_rc6(struct drm_i915_private *i915)
7040 struct intel_device_info *info = mkwrite_device_info(i915);
7042 /* Powersaving is controlled by the host when inside a VM */
7043 if (intel_vgpu_active(i915)) {
7045 info->has_rps = false;
7048 if (info->has_rc6 &&
7049 IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) {
7050 DRM_INFO("RC6 disabled by BIOS\n");
7055 * We assume that we do not have any deep rc6 levels if we don't have
7056 * have the previous rc6 level supported, i.e. we use HAS_RC6()
7057 * as the initial coarse check for rc6 in general, moving on to
7058 * progressively finer/deeper levels.
7060 if (!info->has_rc6 && info->has_rc6p)
7063 return info->has_rc6;
7066 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
7068 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7070 /* All of these values are in units of 50MHz */
7072 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
7073 if (IS_GEN9_LP(dev_priv)) {
7074 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
7075 rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
7076 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
7077 rps->min_freq = (rp_state_cap >> 0) & 0xff;
7079 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
7080 rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
7081 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
7082 rps->min_freq = (rp_state_cap >> 16) & 0xff;
7084 /* hw_max = RP0 until we check for overclocking */
7085 rps->max_freq = rps->rp0_freq;
7087 rps->efficient_freq = rps->rp1_freq;
7088 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
7089 IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
7090 u32 ddcc_status = 0;
7092 if (sandybridge_pcode_read(dev_priv,
7093 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
7094 &ddcc_status, NULL) == 0)
7095 rps->efficient_freq =
7097 ((ddcc_status >> 8) & 0xff),
7102 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
7103 /* Store the frequency values in 16.66 MHZ units, which is
7104 * the natural hardware unit for SKL
7106 rps->rp0_freq *= GEN9_FREQ_SCALER;
7107 rps->rp1_freq *= GEN9_FREQ_SCALER;
7108 rps->min_freq *= GEN9_FREQ_SCALER;
7109 rps->max_freq *= GEN9_FREQ_SCALER;
7110 rps->efficient_freq *= GEN9_FREQ_SCALER;
7114 static void reset_rps(struct drm_i915_private *dev_priv,
7115 int (*set)(struct drm_i915_private *, u8))
7117 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7118 u8 freq = rps->cur_freq;
7121 rps->power.mode = -1;
7124 if (set(dev_priv, freq))
7125 DRM_ERROR("Failed to reset RPS to initial values\n");
7128 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
7129 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
7131 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7133 /* Program defaults and thresholds for RPS */
7134 if (IS_GEN(dev_priv, 9))
7135 I915_WRITE(GEN6_RC_VIDEO_FREQ,
7136 GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
7138 /* 1 second timeout*/
7139 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
7140 GT_INTERVAL_FROM_US(dev_priv, 1000000));
7142 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
7144 /* Leaning on the below call to gen6_set_rps to program/setup the
7145 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
7146 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
7147 reset_rps(dev_priv, gen6_set_rps);
7149 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7152 static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
7154 struct intel_engine_cs *engine;
7155 enum intel_engine_id id;
7157 /* 1a: Software RC state - RC0 */
7158 I915_WRITE(GEN6_RC_STATE, 0);
7161 * 1b: Get forcewake during program sequence. Although the driver
7162 * hasn't enabled a state yet where we need forcewake, BIOS may have.
7164 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7166 /* 2a: Disable RC states. */
7167 I915_WRITE(GEN6_RC_CONTROL, 0);
7169 /* 2b: Program RC6 thresholds.*/
7170 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
7171 I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
7173 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
7174 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
7175 for_each_engine(engine, dev_priv, id)
7176 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7178 if (HAS_GUC(dev_priv))
7179 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
7181 I915_WRITE(GEN6_RC_SLEEP, 0);
7183 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
7186 * 2c: Program Coarse Power Gating Policies.
7188 * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
7189 * use instead is a more conservative estimate for the maximum time
7190 * it takes us to service a CS interrupt and submit a new ELSP - that
7191 * is the time which the GPU is idle waiting for the CPU to select the
7192 * next request to execute. If the idle hysteresis is less than that
7193 * interrupt service latency, the hardware will automatically gate
7194 * the power well and we will then incur the wake up cost on top of
7195 * the service latency. A similar guide from intel_pstate is that we
7196 * do not want the enable hysteresis to less than the wakeup latency.
7198 * igt/gem_exec_nop/sequential provides a rough estimate for the
7199 * service latency, and puts it around 10us for Broadwell (and other
7200 * big core) and around 40us for Broxton (and other low power cores).
7201 * [Note that for legacy ringbuffer submission, this is less than 1us!]
7202 * However, the wakeup latency on Broxton is closer to 100us. To be
7203 * conservative, we have to factor in a context switch on top (due
7206 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
7207 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
7209 /* 3a: Enable RC6 */
7210 I915_WRITE(GEN6_RC_CONTROL,
7211 GEN6_RC_CTL_HW_ENABLE |
7212 GEN6_RC_CTL_RC6_ENABLE |
7213 GEN6_RC_CTL_EI_MODE(1));
7215 /* 3b: Enable Coarse Power Gating only when RC6 is enabled. */
7216 I915_WRITE(GEN9_PG_ENABLE,
7217 GEN9_RENDER_PG_ENABLE |
7218 GEN9_MEDIA_PG_ENABLE |
7219 GEN11_MEDIA_SAMPLER_PG_ENABLE);
7221 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7224 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
7226 struct intel_engine_cs *engine;
7227 enum intel_engine_id id;
7230 /* 1a: Software RC state - RC0 */
7231 I915_WRITE(GEN6_RC_STATE, 0);
7233 /* 1b: Get forcewake during program sequence. Although the driver
7234 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
7235 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7237 /* 2a: Disable RC states. */
7238 I915_WRITE(GEN6_RC_CONTROL, 0);
7240 /* 2b: Program RC6 thresholds.*/
7241 if (INTEL_GEN(dev_priv) >= 10) {
7242 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
7243 I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
7244 } else if (IS_SKYLAKE(dev_priv)) {
7246 * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
7247 * when CPG is enabled
7249 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
7251 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
7254 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
7255 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
7256 for_each_engine(engine, dev_priv, id)
7257 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7259 if (HAS_GUC(dev_priv))
7260 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
7262 I915_WRITE(GEN6_RC_SLEEP, 0);
7265 * 2c: Program Coarse Power Gating Policies.
7267 * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
7268 * use instead is a more conservative estimate for the maximum time
7269 * it takes us to service a CS interrupt and submit a new ELSP - that
7270 * is the time which the GPU is idle waiting for the CPU to select the
7271 * next request to execute. If the idle hysteresis is less than that
7272 * interrupt service latency, the hardware will automatically gate
7273 * the power well and we will then incur the wake up cost on top of
7274 * the service latency. A similar guide from intel_pstate is that we
7275 * do not want the enable hysteresis to less than the wakeup latency.
7277 * igt/gem_exec_nop/sequential provides a rough estimate for the
7278 * service latency, and puts it around 10us for Broadwell (and other
7279 * big core) and around 40us for Broxton (and other low power cores).
7280 * [Note that for legacy ringbuffer submission, this is less than 1us!]
7281 * However, the wakeup latency on Broxton is closer to 100us. To be
7282 * conservative, we have to factor in a context switch on top (due
7285 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
7286 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
7288 /* 3a: Enable RC6 */
7289 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
7291 /* WaRsUseTimeoutMode:cnl (pre-prod) */
7292 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
7293 rc6_mode = GEN7_RC_CTL_TO_MODE;
7295 rc6_mode = GEN6_RC_CTL_EI_MODE(1);
7297 I915_WRITE(GEN6_RC_CONTROL,
7298 GEN6_RC_CTL_HW_ENABLE |
7299 GEN6_RC_CTL_RC6_ENABLE |
7303 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
7304 * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6.
7306 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
7307 I915_WRITE(GEN9_PG_ENABLE, 0);
7309 I915_WRITE(GEN9_PG_ENABLE,
7310 GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
7312 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7315 static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
7317 struct intel_engine_cs *engine;
7318 enum intel_engine_id id;
7320 /* 1a: Software RC state - RC0 */
7321 I915_WRITE(GEN6_RC_STATE, 0);
7323 /* 1b: Get forcewake during program sequence. Although the driver
7324 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
7325 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7327 /* 2a: Disable RC states. */
7328 I915_WRITE(GEN6_RC_CONTROL, 0);
7330 /* 2b: Program RC6 thresholds.*/
7331 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
7332 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
7333 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
7334 for_each_engine(engine, dev_priv, id)
7335 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7336 I915_WRITE(GEN6_RC_SLEEP, 0);
7337 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
7341 I915_WRITE(GEN6_RC_CONTROL,
7342 GEN6_RC_CTL_HW_ENABLE |
7343 GEN7_RC_CTL_TO_MODE |
7344 GEN6_RC_CTL_RC6_ENABLE);
7346 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7349 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
7351 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7353 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7355 /* 1 Program defaults and thresholds for RPS*/
7356 I915_WRITE(GEN6_RPNSWREQ,
7357 HSW_FREQUENCY(rps->rp1_freq));
7358 I915_WRITE(GEN6_RC_VIDEO_FREQ,
7359 HSW_FREQUENCY(rps->rp1_freq));
7360 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
7361 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
7363 /* Docs recommend 900MHz, and 300 MHz respectively */
7364 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
7365 rps->max_freq_softlimit << 24 |
7366 rps->min_freq_softlimit << 16);
7368 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
7369 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
7370 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
7371 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
7373 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7376 I915_WRITE(GEN6_RP_CONTROL,
7377 GEN6_RP_MEDIA_TURBO |
7378 GEN6_RP_MEDIA_HW_NORMAL_MODE |
7379 GEN6_RP_MEDIA_IS_GFX |
7381 GEN6_RP_UP_BUSY_AVG |
7382 GEN6_RP_DOWN_IDLE_AVG);
7384 reset_rps(dev_priv, gen6_set_rps);
7386 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7389 static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
7391 struct intel_engine_cs *engine;
7392 enum intel_engine_id id;
7393 u32 rc6vids, rc6_mask;
7397 I915_WRITE(GEN6_RC_STATE, 0);
7399 /* Clear the DBG now so we don't confuse earlier errors */
7400 gtfifodbg = I915_READ(GTFIFODBG);
7402 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
7403 I915_WRITE(GTFIFODBG, gtfifodbg);
7406 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7408 /* disable the counters and set deterministic thresholds */
7409 I915_WRITE(GEN6_RC_CONTROL, 0);
7411 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
7412 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
7413 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
7414 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
7415 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
7417 for_each_engine(engine, dev_priv, id)
7418 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7420 I915_WRITE(GEN6_RC_SLEEP, 0);
7421 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
7422 if (IS_IVYBRIDGE(dev_priv))
7423 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
7425 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
7426 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
7427 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
7429 /* We don't use those on Haswell */
7430 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
7431 if (HAS_RC6p(dev_priv))
7432 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
7433 if (HAS_RC6pp(dev_priv))
7434 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
7435 I915_WRITE(GEN6_RC_CONTROL,
7437 GEN6_RC_CTL_EI_MODE(1) |
7438 GEN6_RC_CTL_HW_ENABLE);
7441 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
7443 if (IS_GEN(dev_priv, 6) && ret) {
7444 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
7445 } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
7446 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
7447 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
7448 rc6vids &= 0xffff00;
7449 rc6vids |= GEN6_ENCODE_RC6_VID(450);
7450 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
7452 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
7455 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7458 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
7460 /* Here begins a magic sequence of register writes to enable
7461 * auto-downclocking.
7463 * Perhaps there might be some value in exposing these to
7466 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7468 /* Power down if completely idle for over 50ms */
7469 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
7470 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7472 reset_rps(dev_priv, gen6_set_rps);
7474 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7477 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
7479 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7480 const int min_freq = 15;
7481 const int scaling_factor = 180;
7482 unsigned int gpu_freq;
7483 unsigned int max_ia_freq, min_ring_freq;
7484 unsigned int max_gpu_freq, min_gpu_freq;
7485 struct cpufreq_policy *policy;
7487 lockdep_assert_held(&rps->lock);
7489 if (rps->max_freq <= rps->min_freq)
7492 policy = cpufreq_cpu_get(0);
7494 max_ia_freq = policy->cpuinfo.max_freq;
7495 cpufreq_cpu_put(policy);
7498 * Default to measured freq if none found, PCU will ensure we
7501 max_ia_freq = tsc_khz;
7504 /* Convert from kHz to MHz */
7505 max_ia_freq /= 1000;
7507 min_ring_freq = I915_READ(DCLK) & 0xf;
7508 /* convert DDR frequency from units of 266.6MHz to bandwidth */
7509 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
7511 min_gpu_freq = rps->min_freq;
7512 max_gpu_freq = rps->max_freq;
7513 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
7514 /* Convert GT frequency to 50 HZ units */
7515 min_gpu_freq /= GEN9_FREQ_SCALER;
7516 max_gpu_freq /= GEN9_FREQ_SCALER;
7520 * For each potential GPU frequency, load a ring frequency we'd like
7521 * to use for memory access. We do this by specifying the IA frequency
7522 * the PCU should use as a reference to determine the ring frequency.
7524 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
7525 const int diff = max_gpu_freq - gpu_freq;
7526 unsigned int ia_freq = 0, ring_freq = 0;
7528 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
7530 * ring_freq = 2 * GT. ring_freq is in 100MHz units
7531 * No floor required for ring frequency on SKL.
7533 ring_freq = gpu_freq;
7534 } else if (INTEL_GEN(dev_priv) >= 8) {
7535 /* max(2 * GT, DDR). NB: GT is 50MHz units */
7536 ring_freq = max(min_ring_freq, gpu_freq);
7537 } else if (IS_HASWELL(dev_priv)) {
7538 ring_freq = mult_frac(gpu_freq, 5, 4);
7539 ring_freq = max(min_ring_freq, ring_freq);
7540 /* leave ia_freq as the default, chosen by cpufreq */
7542 /* On older processors, there is no separate ring
7543 * clock domain, so in order to boost the bandwidth
7544 * of the ring, we need to upclock the CPU (ia_freq).
7546 * For GPU frequencies less than 750MHz,
7547 * just use the lowest ring freq.
7549 if (gpu_freq < min_freq)
7552 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
7553 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
7556 sandybridge_pcode_write(dev_priv,
7557 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
7558 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
7559 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
7564 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
7568 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
7570 switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
7572 /* (2 * 4) config */
7573 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
7576 /* (2 * 6) config */
7577 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
7580 /* (2 * 8) config */
7582 /* Setting (2 * 8) Min RP0 for any other combination */
7583 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
7587 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
7592 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
7596 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
7597 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
7602 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
7606 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
7607 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
7612 static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
7616 val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
7617 rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
7618 FB_GFX_FREQ_FUSE_MASK);
7623 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
7627 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
7629 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
7634 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
7638 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
7640 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
7642 rp0 = min_t(u32, rp0, 0xea);
7647 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
7651 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
7652 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
7653 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
7654 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
7659 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
7663 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
7665 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
7666 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
7667 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
7668 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
7669 * to make sure it matches what Punit accepts.
7671 return max_t(u32, val, 0xc0);
7674 /* Check that the pctx buffer wasn't move under us. */
7675 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
7677 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
7679 WARN_ON(pctx_addr != dev_priv->dsm.start +
7680 dev_priv->vlv_pctx->stolen->start);
7684 /* Check that the pcbr address is not empty. */
7685 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
7687 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
7689 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
7692 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
7694 resource_size_t pctx_paddr, paddr;
7695 resource_size_t pctx_size = 32*1024;
7698 pcbr = I915_READ(VLV_PCBR);
7699 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
7700 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
7701 paddr = dev_priv->dsm.end + 1 - pctx_size;
7702 GEM_BUG_ON(paddr > U32_MAX);
7704 pctx_paddr = (paddr & (~4095));
7705 I915_WRITE(VLV_PCBR, pctx_paddr);
7708 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
7711 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
7713 struct drm_i915_gem_object *pctx;
7714 resource_size_t pctx_paddr;
7715 resource_size_t pctx_size = 24*1024;
7718 pcbr = I915_READ(VLV_PCBR);
7720 /* BIOS set it up already, grab the pre-alloc'd space */
7721 resource_size_t pcbr_offset;
7723 pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start;
7724 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
7726 I915_GTT_OFFSET_NONE,
7731 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
7734 * From the Gunit register HAS:
7735 * The Gfx driver is expected to program this register and ensure
7736 * proper allocation within Gfx stolen memory. For example, this
7737 * register should be programmed such than the PCBR range does not
7738 * overlap with other ranges, such as the frame buffer, protected
7739 * memory, or any other relevant ranges.
7741 pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
7743 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
7747 GEM_BUG_ON(range_overflows_t(u64,
7748 dev_priv->dsm.start,
7749 pctx->stolen->start,
7751 pctx_paddr = dev_priv->dsm.start + pctx->stolen->start;
7752 I915_WRITE(VLV_PCBR, pctx_paddr);
7755 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
7756 dev_priv->vlv_pctx = pctx;
7759 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
7761 struct drm_i915_gem_object *pctx;
7763 pctx = fetch_and_zero(&dev_priv->vlv_pctx);
7765 i915_gem_object_put(pctx);
7768 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
7770 dev_priv->gt_pm.rps.gpll_ref_freq =
7771 vlv_get_cck_clock(dev_priv, "GPLL ref",
7772 CCK_GPLL_CLOCK_CONTROL,
7773 dev_priv->czclk_freq);
7775 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
7776 dev_priv->gt_pm.rps.gpll_ref_freq);
7779 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
7781 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7784 valleyview_setup_pctx(dev_priv);
7786 vlv_iosf_sb_get(dev_priv,
7787 BIT(VLV_IOSF_SB_PUNIT) |
7788 BIT(VLV_IOSF_SB_NC) |
7789 BIT(VLV_IOSF_SB_CCK));
7791 vlv_init_gpll_ref_freq(dev_priv);
7793 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
7794 switch ((val >> 6) & 3) {
7797 dev_priv->mem_freq = 800;
7800 dev_priv->mem_freq = 1066;
7803 dev_priv->mem_freq = 1333;
7806 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
7808 rps->max_freq = valleyview_rps_max_freq(dev_priv);
7809 rps->rp0_freq = rps->max_freq;
7810 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7811 intel_gpu_freq(dev_priv, rps->max_freq),
7814 rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
7815 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7816 intel_gpu_freq(dev_priv, rps->efficient_freq),
7817 rps->efficient_freq);
7819 rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
7820 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
7821 intel_gpu_freq(dev_priv, rps->rp1_freq),
7824 rps->min_freq = valleyview_rps_min_freq(dev_priv);
7825 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7826 intel_gpu_freq(dev_priv, rps->min_freq),
7829 vlv_iosf_sb_put(dev_priv,
7830 BIT(VLV_IOSF_SB_PUNIT) |
7831 BIT(VLV_IOSF_SB_NC) |
7832 BIT(VLV_IOSF_SB_CCK));
7835 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
7837 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7840 cherryview_setup_pctx(dev_priv);
7842 vlv_iosf_sb_get(dev_priv,
7843 BIT(VLV_IOSF_SB_PUNIT) |
7844 BIT(VLV_IOSF_SB_NC) |
7845 BIT(VLV_IOSF_SB_CCK));
7847 vlv_init_gpll_ref_freq(dev_priv);
7849 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
7851 switch ((val >> 2) & 0x7) {
7853 dev_priv->mem_freq = 2000;
7856 dev_priv->mem_freq = 1600;
7859 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
7861 rps->max_freq = cherryview_rps_max_freq(dev_priv);
7862 rps->rp0_freq = rps->max_freq;
7863 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7864 intel_gpu_freq(dev_priv, rps->max_freq),
7867 rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
7868 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7869 intel_gpu_freq(dev_priv, rps->efficient_freq),
7870 rps->efficient_freq);
7872 rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
7873 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
7874 intel_gpu_freq(dev_priv, rps->rp1_freq),
7877 rps->min_freq = cherryview_rps_min_freq(dev_priv);
7878 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7879 intel_gpu_freq(dev_priv, rps->min_freq),
7882 vlv_iosf_sb_put(dev_priv,
7883 BIT(VLV_IOSF_SB_PUNIT) |
7884 BIT(VLV_IOSF_SB_NC) |
7885 BIT(VLV_IOSF_SB_CCK));
7887 WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
7889 "Odd GPU freq values\n");
7892 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
7894 valleyview_cleanup_pctx(dev_priv);
7897 static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
7899 struct intel_engine_cs *engine;
7900 enum intel_engine_id id;
7901 u32 gtfifodbg, rc6_mode, pcbr;
7903 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
7904 GT_FIFO_FREE_ENTRIES_CHV);
7906 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
7908 I915_WRITE(GTFIFODBG, gtfifodbg);
7911 cherryview_check_pctx(dev_priv);
7913 /* 1a & 1b: Get forcewake during program sequence. Although the driver
7914 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
7915 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7917 /* Disable RC states. */
7918 I915_WRITE(GEN6_RC_CONTROL, 0);
7920 /* 2a: Program RC6 thresholds.*/
7921 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
7922 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
7923 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
7925 for_each_engine(engine, dev_priv, id)
7926 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7927 I915_WRITE(GEN6_RC_SLEEP, 0);
7929 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
7930 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
7932 /* Allows RC6 residency counter to work */
7933 I915_WRITE(VLV_COUNTER_CONTROL,
7934 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
7935 VLV_MEDIA_RC6_COUNT_EN |
7936 VLV_RENDER_RC6_COUNT_EN));
7938 /* For now we assume BIOS is allocating and populating the PCBR */
7939 pcbr = I915_READ(VLV_PCBR);
7943 if (pcbr >> VLV_PCBR_ADDR_SHIFT)
7944 rc6_mode = GEN7_RC_CTL_TO_MODE;
7945 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
7947 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7950 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
7954 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7956 /* 1: Program defaults and thresholds for RPS*/
7957 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
7958 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
7959 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
7960 I915_WRITE(GEN6_RP_UP_EI, 66000);
7961 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
7963 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7966 I915_WRITE(GEN6_RP_CONTROL,
7967 GEN6_RP_MEDIA_HW_NORMAL_MODE |
7968 GEN6_RP_MEDIA_IS_GFX |
7970 GEN6_RP_UP_BUSY_AVG |
7971 GEN6_RP_DOWN_IDLE_AVG);
7973 /* Setting Fixed Bias */
7974 vlv_punit_get(dev_priv);
7976 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
7977 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
7979 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
7981 vlv_punit_put(dev_priv);
7983 /* RPS code assumes GPLL is used */
7984 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
7986 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
7987 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
7989 reset_rps(dev_priv, valleyview_set_rps);
7991 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7994 static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
7996 struct intel_engine_cs *engine;
7997 enum intel_engine_id id;
8000 valleyview_check_pctx(dev_priv);
8002 gtfifodbg = I915_READ(GTFIFODBG);
8004 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
8006 I915_WRITE(GTFIFODBG, gtfifodbg);
8009 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
8011 /* Disable RC states. */
8012 I915_WRITE(GEN6_RC_CONTROL, 0);
8014 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
8015 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
8016 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
8018 for_each_engine(engine, dev_priv, id)
8019 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
8021 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
8023 /* Allows RC6 residency counter to work */
8024 I915_WRITE(VLV_COUNTER_CONTROL,
8025 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
8026 VLV_MEDIA_RC0_COUNT_EN |
8027 VLV_RENDER_RC0_COUNT_EN |
8028 VLV_MEDIA_RC6_COUNT_EN |
8029 VLV_RENDER_RC6_COUNT_EN));
8031 I915_WRITE(GEN6_RC_CONTROL,
8032 GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
8034 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
8037 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
8041 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
8043 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
8044 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
8045 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
8046 I915_WRITE(GEN6_RP_UP_EI, 66000);
8047 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
8049 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
8051 I915_WRITE(GEN6_RP_CONTROL,
8052 GEN6_RP_MEDIA_TURBO |
8053 GEN6_RP_MEDIA_HW_NORMAL_MODE |
8054 GEN6_RP_MEDIA_IS_GFX |
8056 GEN6_RP_UP_BUSY_AVG |
8057 GEN6_RP_DOWN_IDLE_CONT);
8059 vlv_punit_get(dev_priv);
8061 /* Setting Fixed Bias */
8062 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
8063 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
8065 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
8067 vlv_punit_put(dev_priv);
8069 /* RPS code assumes GPLL is used */
8070 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
8072 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
8073 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
8075 reset_rps(dev_priv, valleyview_set_rps);
8077 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
8080 static unsigned long intel_pxfreq(u32 vidfreq)
8083 int div = (vidfreq & 0x3f0000) >> 16;
8084 int post = (vidfreq & 0x3000) >> 12;
8085 int pre = (vidfreq & 0x7);
8090 freq = ((div * 133333) / ((1<<post) * pre));
8095 static const struct cparams {
8101 { 1, 1333, 301, 28664 },
8102 { 1, 1066, 294, 24460 },
8103 { 1, 800, 294, 25192 },
8104 { 0, 1333, 276, 27605 },
8105 { 0, 1066, 276, 27605 },
8106 { 0, 800, 231, 23784 },
8109 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
8111 u64 total_count, diff, ret;
8112 u32 count1, count2, count3, m = 0, c = 0;
8113 unsigned long now = jiffies_to_msecs(jiffies), diff1;
8116 lockdep_assert_held(&mchdev_lock);
8118 diff1 = now - dev_priv->ips.last_time1;
8120 /* Prevent division-by-zero if we are asking too fast.
8121 * Also, we don't get interesting results if we are polling
8122 * faster than once in 10ms, so just return the saved value
8126 return dev_priv->ips.chipset_power;
8128 count1 = I915_READ(DMIEC);
8129 count2 = I915_READ(DDREC);
8130 count3 = I915_READ(CSIEC);
8132 total_count = count1 + count2 + count3;
8134 /* FIXME: handle per-counter overflow */
8135 if (total_count < dev_priv->ips.last_count1) {
8136 diff = ~0UL - dev_priv->ips.last_count1;
8137 diff += total_count;
8139 diff = total_count - dev_priv->ips.last_count1;
8142 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
8143 if (cparams[i].i == dev_priv->ips.c_m &&
8144 cparams[i].t == dev_priv->ips.r_t) {
8151 diff = div_u64(diff, diff1);
8152 ret = ((m * diff) + c);
8153 ret = div_u64(ret, 10);
8155 dev_priv->ips.last_count1 = total_count;
8156 dev_priv->ips.last_time1 = now;
8158 dev_priv->ips.chipset_power = ret;
8163 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
8165 intel_wakeref_t wakeref;
8166 unsigned long val = 0;
8168 if (!IS_GEN(dev_priv, 5))
8171 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
8172 spin_lock_irq(&mchdev_lock);
8173 val = __i915_chipset_val(dev_priv);
8174 spin_unlock_irq(&mchdev_lock);
8180 unsigned long i915_mch_val(struct drm_i915_private *i915)
8182 unsigned long m, x, b;
8185 tsfs = intel_uncore_read(&i915->uncore, TSFS);
8187 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
8188 x = intel_uncore_read8(&i915->uncore, TR1);
8190 b = tsfs & TSFS_INTR_MASK;
8192 return ((m * x) / 127) - b;
8195 static int _pxvid_to_vd(u8 pxvid)
8200 if (pxvid >= 8 && pxvid < 31)
8203 return (pxvid + 2) * 125;
8206 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
8208 const int vd = _pxvid_to_vd(pxvid);
8209 const int vm = vd - 1125;
8211 if (INTEL_INFO(dev_priv)->is_mobile)
8212 return vm > 0 ? vm : 0;
8217 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
8219 u64 now, diff, diffms;
8222 lockdep_assert_held(&mchdev_lock);
8224 now = ktime_get_raw_ns();
8225 diffms = now - dev_priv->ips.last_time2;
8226 do_div(diffms, NSEC_PER_MSEC);
8228 /* Don't divide by 0 */
8232 count = I915_READ(GFXEC);
8234 if (count < dev_priv->ips.last_count2) {
8235 diff = ~0UL - dev_priv->ips.last_count2;
8238 diff = count - dev_priv->ips.last_count2;
8241 dev_priv->ips.last_count2 = count;
8242 dev_priv->ips.last_time2 = now;
8244 /* More magic constants... */
8246 diff = div_u64(diff, diffms * 10);
8247 dev_priv->ips.gfx_power = diff;
8250 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
8252 intel_wakeref_t wakeref;
8254 if (!IS_GEN(dev_priv, 5))
8257 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
8258 spin_lock_irq(&mchdev_lock);
8259 __i915_update_gfx_val(dev_priv);
8260 spin_unlock_irq(&mchdev_lock);
8264 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
8266 unsigned long t, corr, state1, corr2, state2;
8269 lockdep_assert_held(&mchdev_lock);
8271 pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
8272 pxvid = (pxvid >> 24) & 0x7f;
8273 ext_v = pvid_to_extvid(dev_priv, pxvid);
8277 t = i915_mch_val(dev_priv);
8279 /* Revel in the empirically derived constants */
8281 /* Correction factor in 1/100000 units */
8283 corr = ((t * 2349) + 135940);
8285 corr = ((t * 964) + 29317);
8287 corr = ((t * 301) + 1004);
8289 corr = corr * ((150142 * state1) / 10000 - 78642);
8291 corr2 = (corr * dev_priv->ips.corr);
8293 state2 = (corr2 * state1) / 10000;
8294 state2 /= 100; /* convert to mW */
8296 __i915_update_gfx_val(dev_priv);
8298 return dev_priv->ips.gfx_power + state2;
8301 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
8303 intel_wakeref_t wakeref;
8304 unsigned long val = 0;
8306 if (!IS_GEN(dev_priv, 5))
8309 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
8310 spin_lock_irq(&mchdev_lock);
8311 val = __i915_gfx_val(dev_priv);
8312 spin_unlock_irq(&mchdev_lock);
8318 static struct drm_i915_private __rcu *i915_mch_dev;
8320 static struct drm_i915_private *mchdev_get(void)
8322 struct drm_i915_private *i915;
8325 i915 = rcu_dereference(i915_mch_dev);
8326 if (!kref_get_unless_zero(&i915->drm.ref))
8334 * i915_read_mch_val - return value for IPS use
8336 * Calculate and return a value for the IPS driver to use when deciding whether
8337 * we have thermal and power headroom to increase CPU or GPU power budget.
8339 unsigned long i915_read_mch_val(void)
8341 struct drm_i915_private *i915;
8342 unsigned long chipset_val = 0;
8343 unsigned long graphics_val = 0;
8344 intel_wakeref_t wakeref;
8346 i915 = mchdev_get();
8350 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
8351 spin_lock_irq(&mchdev_lock);
8352 chipset_val = __i915_chipset_val(i915);
8353 graphics_val = __i915_gfx_val(i915);
8354 spin_unlock_irq(&mchdev_lock);
8357 drm_dev_put(&i915->drm);
8358 return chipset_val + graphics_val;
8360 EXPORT_SYMBOL_GPL(i915_read_mch_val);
8363 * i915_gpu_raise - raise GPU frequency limit
8365 * Raise the limit; IPS indicates we have thermal headroom.
8367 bool i915_gpu_raise(void)
8369 struct drm_i915_private *i915;
8371 i915 = mchdev_get();
8375 spin_lock_irq(&mchdev_lock);
8376 if (i915->ips.max_delay > i915->ips.fmax)
8377 i915->ips.max_delay--;
8378 spin_unlock_irq(&mchdev_lock);
8380 drm_dev_put(&i915->drm);
8383 EXPORT_SYMBOL_GPL(i915_gpu_raise);
8386 * i915_gpu_lower - lower GPU frequency limit
8388 * IPS indicates we're close to a thermal limit, so throttle back the GPU
8389 * frequency maximum.
8391 bool i915_gpu_lower(void)
8393 struct drm_i915_private *i915;
8395 i915 = mchdev_get();
8399 spin_lock_irq(&mchdev_lock);
8400 if (i915->ips.max_delay < i915->ips.min_delay)
8401 i915->ips.max_delay++;
8402 spin_unlock_irq(&mchdev_lock);
8404 drm_dev_put(&i915->drm);
8407 EXPORT_SYMBOL_GPL(i915_gpu_lower);
8410 * i915_gpu_busy - indicate GPU business to IPS
8412 * Tell the IPS driver whether or not the GPU is busy.
8414 bool i915_gpu_busy(void)
8416 struct drm_i915_private *i915;
8419 i915 = mchdev_get();
8423 ret = i915->gt.awake;
8425 drm_dev_put(&i915->drm);
8428 EXPORT_SYMBOL_GPL(i915_gpu_busy);
8431 * i915_gpu_turbo_disable - disable graphics turbo
8433 * Disable graphics turbo by resetting the max frequency and setting the
8434 * current frequency to the default.
8436 bool i915_gpu_turbo_disable(void)
8438 struct drm_i915_private *i915;
8441 i915 = mchdev_get();
8445 spin_lock_irq(&mchdev_lock);
8446 i915->ips.max_delay = i915->ips.fstart;
8447 ret = ironlake_set_drps(i915, i915->ips.fstart);
8448 spin_unlock_irq(&mchdev_lock);
8450 drm_dev_put(&i915->drm);
8453 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
8456 * Tells the intel_ips driver that the i915 driver is now loaded, if
8457 * IPS got loaded first.
8459 * This awkward dance is so that neither module has to depend on the
8460 * other in order for IPS to do the appropriate communication of
8461 * GPU turbo limits to i915.
8464 ips_ping_for_i915_load(void)
8468 link = symbol_get(ips_link_to_i915_driver);
8471 symbol_put(ips_link_to_i915_driver);
8475 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
8477 /* We only register the i915 ips part with intel-ips once everything is
8478 * set up, to avoid intel-ips sneaking in and reading bogus values. */
8479 rcu_assign_pointer(i915_mch_dev, dev_priv);
8481 ips_ping_for_i915_load();
8484 void intel_gpu_ips_teardown(void)
8486 rcu_assign_pointer(i915_mch_dev, NULL);
8489 static void intel_init_emon(struct drm_i915_private *dev_priv)
8495 /* Disable to program */
8499 /* Program energy weights for various events */
8500 I915_WRITE(SDEW, 0x15040d00);
8501 I915_WRITE(CSIEW0, 0x007f0000);
8502 I915_WRITE(CSIEW1, 0x1e220004);
8503 I915_WRITE(CSIEW2, 0x04000004);
8505 for (i = 0; i < 5; i++)
8506 I915_WRITE(PEW(i), 0);
8507 for (i = 0; i < 3; i++)
8508 I915_WRITE(DEW(i), 0);
8510 /* Program P-state weights to account for frequency power adjustment */
8511 for (i = 0; i < 16; i++) {
8512 u32 pxvidfreq = I915_READ(PXVFREQ(i));
8513 unsigned long freq = intel_pxfreq(pxvidfreq);
8514 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
8519 val *= (freq / 1000);
8521 val /= (127*127*900);
8523 DRM_ERROR("bad pxval: %ld\n", val);
8526 /* Render standby states get 0 weight */
8530 for (i = 0; i < 4; i++) {
8531 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
8532 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
8533 I915_WRITE(PXW(i), val);
8536 /* Adjust magic regs to magic values (more experimental results) */
8537 I915_WRITE(OGW0, 0);
8538 I915_WRITE(OGW1, 0);
8539 I915_WRITE(EG0, 0x00007f00);
8540 I915_WRITE(EG1, 0x0000000e);
8541 I915_WRITE(EG2, 0x000e0000);
8542 I915_WRITE(EG3, 0x68000300);
8543 I915_WRITE(EG4, 0x42000000);
8544 I915_WRITE(EG5, 0x00140031);
8548 for (i = 0; i < 8; i++)
8549 I915_WRITE(PXWL(i), 0);
8551 /* Enable PMON + select events */
8552 I915_WRITE(ECR, 0x80000019);
8554 lcfuse = I915_READ(LCFUSE02);
8556 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
8559 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
8561 struct intel_rps *rps = &dev_priv->gt_pm.rps;
8564 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
8567 if (!sanitize_rc6(dev_priv)) {
8568 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
8569 pm_runtime_get(&dev_priv->drm.pdev->dev);
8572 /* Initialize RPS limits (for userspace) */
8573 if (IS_CHERRYVIEW(dev_priv))
8574 cherryview_init_gt_powersave(dev_priv);
8575 else if (IS_VALLEYVIEW(dev_priv))
8576 valleyview_init_gt_powersave(dev_priv);
8577 else if (INTEL_GEN(dev_priv) >= 6)
8578 gen6_init_rps_frequencies(dev_priv);
8580 /* Derive initial user preferences/limits from the hardware limits */
8581 rps->max_freq_softlimit = rps->max_freq;
8582 rps->min_freq_softlimit = rps->min_freq;
8584 /* After setting max-softlimit, find the overclock max freq */
8585 if (IS_GEN(dev_priv, 6) ||
8586 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
8589 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS,
8591 if (params & BIT(31)) { /* OC supported */
8592 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
8593 (rps->max_freq & 0xff) * 50,
8594 (params & 0xff) * 50);
8595 rps->max_freq = params & 0xff;
8599 /* Finally allow us to boost to max by default */
8600 rps->boost_freq = rps->max_freq;
8601 rps->idle_freq = rps->min_freq;
8602 rps->cur_freq = rps->idle_freq;
8605 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
8607 if (IS_VALLEYVIEW(dev_priv))
8608 valleyview_cleanup_gt_powersave(dev_priv);
8610 if (!HAS_RC6(dev_priv))
8611 pm_runtime_put(&dev_priv->drm.pdev->dev);
8614 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
8616 dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
8617 dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
8618 intel_disable_gt_powersave(dev_priv);
8620 if (INTEL_GEN(dev_priv) >= 11)
8621 gen11_reset_rps_interrupts(dev_priv);
8622 else if (INTEL_GEN(dev_priv) >= 6)
8623 gen6_reset_rps_interrupts(dev_priv);
8626 static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
8628 lockdep_assert_held(&i915->gt_pm.rps.lock);
8630 if (!i915->gt_pm.llc_pstate.enabled)
8633 /* Currently there is no HW configuration to be done to disable. */
8635 i915->gt_pm.llc_pstate.enabled = false;
8638 static void intel_disable_rc6(struct drm_i915_private *dev_priv)
8640 lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
8642 if (!dev_priv->gt_pm.rc6.enabled)
8645 if (INTEL_GEN(dev_priv) >= 9)
8646 gen9_disable_rc6(dev_priv);
8647 else if (IS_CHERRYVIEW(dev_priv))
8648 cherryview_disable_rc6(dev_priv);
8649 else if (IS_VALLEYVIEW(dev_priv))
8650 valleyview_disable_rc6(dev_priv);
8651 else if (INTEL_GEN(dev_priv) >= 6)
8652 gen6_disable_rc6(dev_priv);
8654 dev_priv->gt_pm.rc6.enabled = false;
8657 static void intel_disable_rps(struct drm_i915_private *dev_priv)
8659 lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
8661 if (!dev_priv->gt_pm.rps.enabled)
8664 if (INTEL_GEN(dev_priv) >= 9)
8665 gen9_disable_rps(dev_priv);
8666 else if (IS_CHERRYVIEW(dev_priv))
8667 cherryview_disable_rps(dev_priv);
8668 else if (IS_VALLEYVIEW(dev_priv))
8669 valleyview_disable_rps(dev_priv);
8670 else if (INTEL_GEN(dev_priv) >= 6)
8671 gen6_disable_rps(dev_priv);
8672 else if (IS_IRONLAKE_M(dev_priv))
8673 ironlake_disable_drps(dev_priv);
8675 dev_priv->gt_pm.rps.enabled = false;
8678 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
8680 mutex_lock(&dev_priv->gt_pm.rps.lock);
8682 intel_disable_rc6(dev_priv);
8683 intel_disable_rps(dev_priv);
8684 if (HAS_LLC(dev_priv))
8685 intel_disable_llc_pstate(dev_priv);
8687 mutex_unlock(&dev_priv->gt_pm.rps.lock);
8690 static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
8692 lockdep_assert_held(&i915->gt_pm.rps.lock);
8694 if (i915->gt_pm.llc_pstate.enabled)
8697 gen6_update_ring_freq(i915);
8699 i915->gt_pm.llc_pstate.enabled = true;
8702 static void intel_enable_rc6(struct drm_i915_private *dev_priv)
8704 lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
8706 if (dev_priv->gt_pm.rc6.enabled)
8709 if (IS_CHERRYVIEW(dev_priv))
8710 cherryview_enable_rc6(dev_priv);
8711 else if (IS_VALLEYVIEW(dev_priv))
8712 valleyview_enable_rc6(dev_priv);
8713 else if (INTEL_GEN(dev_priv) >= 11)
8714 gen11_enable_rc6(dev_priv);
8715 else if (INTEL_GEN(dev_priv) >= 9)
8716 gen9_enable_rc6(dev_priv);
8717 else if (IS_BROADWELL(dev_priv))
8718 gen8_enable_rc6(dev_priv);
8719 else if (INTEL_GEN(dev_priv) >= 6)
8720 gen6_enable_rc6(dev_priv);
8722 dev_priv->gt_pm.rc6.enabled = true;
8725 static void intel_enable_rps(struct drm_i915_private *dev_priv)
8727 struct intel_rps *rps = &dev_priv->gt_pm.rps;
8729 lockdep_assert_held(&rps->lock);
8734 if (IS_CHERRYVIEW(dev_priv)) {
8735 cherryview_enable_rps(dev_priv);
8736 } else if (IS_VALLEYVIEW(dev_priv)) {
8737 valleyview_enable_rps(dev_priv);
8738 } else if (INTEL_GEN(dev_priv) >= 9) {
8739 gen9_enable_rps(dev_priv);
8740 } else if (IS_BROADWELL(dev_priv)) {
8741 gen8_enable_rps(dev_priv);
8742 } else if (INTEL_GEN(dev_priv) >= 6) {
8743 gen6_enable_rps(dev_priv);
8744 } else if (IS_IRONLAKE_M(dev_priv)) {
8745 ironlake_enable_drps(dev_priv);
8746 intel_init_emon(dev_priv);
8749 WARN_ON(rps->max_freq < rps->min_freq);
8750 WARN_ON(rps->idle_freq > rps->max_freq);
8752 WARN_ON(rps->efficient_freq < rps->min_freq);
8753 WARN_ON(rps->efficient_freq > rps->max_freq);
8755 rps->enabled = true;
8758 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
8760 /* Powersaving is controlled by the host when inside a VM */
8761 if (intel_vgpu_active(dev_priv))
8764 mutex_lock(&dev_priv->gt_pm.rps.lock);
8766 if (HAS_RC6(dev_priv))
8767 intel_enable_rc6(dev_priv);
8768 if (HAS_RPS(dev_priv))
8769 intel_enable_rps(dev_priv);
8770 if (HAS_LLC(dev_priv))
8771 intel_enable_llc_pstate(dev_priv);
8773 mutex_unlock(&dev_priv->gt_pm.rps.lock);
8776 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
8779 * On Ibex Peak and Cougar Point, we need to disable clock
8780 * gating for the panel power sequencer or it will fail to
8781 * start up when no ports are active.
8783 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8786 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
8790 for_each_pipe(dev_priv, pipe) {
8791 I915_WRITE(DSPCNTR(pipe),
8792 I915_READ(DSPCNTR(pipe)) |
8793 DISPPLANE_TRICKLE_FEED_DISABLE);
8795 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
8796 POSTING_READ(DSPSURF(pipe));
8800 static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
8802 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
8806 * WaFbcDisableDpfcClockGating:ilk
8808 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
8809 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
8810 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
8812 I915_WRITE(PCH_3DCGDIS0,
8813 MARIUNIT_CLOCK_GATE_DISABLE |
8814 SVSMUNIT_CLOCK_GATE_DISABLE);
8815 I915_WRITE(PCH_3DCGDIS1,
8816 VFMUNIT_CLOCK_GATE_DISABLE);
8819 * According to the spec the following bits should be set in
8820 * order to enable memory self-refresh
8821 * The bit 22/21 of 0x42004
8822 * The bit 5 of 0x42020
8823 * The bit 15 of 0x45000
8825 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8826 (I915_READ(ILK_DISPLAY_CHICKEN2) |
8827 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8828 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
8829 I915_WRITE(DISP_ARB_CTL,
8830 (I915_READ(DISP_ARB_CTL) |
8834 * Based on the document from hardware guys the following bits
8835 * should be set unconditionally in order to enable FBC.
8836 * The bit 22 of 0x42000
8837 * The bit 22 of 0x42004
8838 * The bit 7,8,9 of 0x42020.
8840 if (IS_IRONLAKE_M(dev_priv)) {
8841 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
8842 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8843 I915_READ(ILK_DISPLAY_CHICKEN1) |
8845 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8846 I915_READ(ILK_DISPLAY_CHICKEN2) |
8850 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
8852 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8853 I915_READ(ILK_DISPLAY_CHICKEN2) |
8854 ILK_ELPIN_409_SELECT);
8855 I915_WRITE(_3D_CHICKEN2,
8856 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8857 _3D_CHICKEN2_WM_READ_PIPELINED);
8859 /* WaDisableRenderCachePipelinedFlush:ilk */
8860 I915_WRITE(CACHE_MODE_0,
8861 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
8863 /* WaDisable_RenderCache_OperationalFlush:ilk */
8864 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8866 g4x_disable_trickle_feed(dev_priv);
8868 ibx_init_clock_gating(dev_priv);
8871 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
8877 * On Ibex Peak and Cougar Point, we need to disable clock
8878 * gating for the panel power sequencer or it will fail to
8879 * start up when no ports are active.
8881 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
8882 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
8883 PCH_CPUNIT_CLOCK_GATE_DISABLE);
8884 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
8885 DPLS_EDP_PPS_FIX_DIS);
8886 /* The below fixes the weird display corruption, a few pixels shifted
8887 * downward, on (only) LVDS of some HP laptops with IVY.
8889 for_each_pipe(dev_priv, pipe) {
8890 val = I915_READ(TRANS_CHICKEN2(pipe));
8891 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
8892 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
8893 if (dev_priv->vbt.fdi_rx_polarity_inverted)
8894 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
8895 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
8896 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
8897 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
8898 I915_WRITE(TRANS_CHICKEN2(pipe), val);
8900 /* WADP0ClockGatingDisable */
8901 for_each_pipe(dev_priv, pipe) {
8902 I915_WRITE(TRANS_CHICKEN1(pipe),
8903 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
8907 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
8911 tmp = I915_READ(MCH_SSKPD);
8912 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
8913 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
8917 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
8919 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
8921 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
8923 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8924 I915_READ(ILK_DISPLAY_CHICKEN2) |
8925 ILK_ELPIN_409_SELECT);
8927 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
8928 I915_WRITE(_3D_CHICKEN,
8929 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
8931 /* WaDisable_RenderCache_OperationalFlush:snb */
8932 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8935 * BSpec recoomends 8x4 when MSAA is used,
8936 * however in practice 16x4 seems fastest.
8938 * Note that PS/WM thread counts depend on the WIZ hashing
8939 * disable bit, which we don't touch here, but it's good
8940 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8942 I915_WRITE(GEN6_GT_MODE,
8943 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8945 I915_WRITE(CACHE_MODE_0,
8946 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
8948 I915_WRITE(GEN6_UCGCTL1,
8949 I915_READ(GEN6_UCGCTL1) |
8950 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
8951 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
8953 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8954 * gating disable must be set. Failure to set it results in
8955 * flickering pixels due to Z write ordering failures after
8956 * some amount of runtime in the Mesa "fire" demo, and Unigine
8957 * Sanctuary and Tropics, and apparently anything else with
8958 * alpha test or pixel discard.
8960 * According to the spec, bit 11 (RCCUNIT) must also be set,
8961 * but we didn't debug actual testcases to find it out.
8963 * WaDisableRCCUnitClockGating:snb
8964 * WaDisableRCPBUnitClockGating:snb
8966 I915_WRITE(GEN6_UCGCTL2,
8967 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8968 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8970 /* WaStripsFansDisableFastClipPerformanceFix:snb */
8971 I915_WRITE(_3D_CHICKEN3,
8972 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
8976 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
8977 * 3DSTATE_SF number of SF output attributes is more than 16."
8979 I915_WRITE(_3D_CHICKEN3,
8980 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
8983 * According to the spec the following bits should be
8984 * set in order to enable memory self-refresh and fbc:
8985 * The bit21 and bit22 of 0x42000
8986 * The bit21 and bit22 of 0x42004
8987 * The bit5 and bit7 of 0x42020
8988 * The bit14 of 0x70180
8989 * The bit14 of 0x71180
8991 * WaFbcAsynchFlipDisableFbcQueue:snb
8993 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8994 I915_READ(ILK_DISPLAY_CHICKEN1) |
8995 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8996 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8997 I915_READ(ILK_DISPLAY_CHICKEN2) |
8998 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8999 I915_WRITE(ILK_DSPCLK_GATE_D,
9000 I915_READ(ILK_DSPCLK_GATE_D) |
9001 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
9002 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
9004 g4x_disable_trickle_feed(dev_priv);
9006 cpt_init_clock_gating(dev_priv);
9008 gen6_check_mch_setup(dev_priv);
9011 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
9013 u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
9016 * WaVSThreadDispatchOverride:ivb,vlv
9018 * This actually overrides the dispatch
9019 * mode for all thread types.
9021 reg &= ~GEN7_FF_SCHED_MASK;
9022 reg |= GEN7_FF_TS_SCHED_HW;
9023 reg |= GEN7_FF_VS_SCHED_HW;
9024 reg |= GEN7_FF_DS_SCHED_HW;
9026 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
9029 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
9032 * TODO: this bit should only be enabled when really needed, then
9033 * disabled when not needed anymore in order to save power.
9035 if (HAS_PCH_LPT_LP(dev_priv))
9036 I915_WRITE(SOUTH_DSPCLK_GATE_D,
9037 I915_READ(SOUTH_DSPCLK_GATE_D) |
9038 PCH_LP_PARTITION_LEVEL_DISABLE);
9040 /* WADPOClockGatingDisable:hsw */
9041 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
9042 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
9043 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
9046 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
9048 if (HAS_PCH_LPT_LP(dev_priv)) {
9049 u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9051 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9052 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9056 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
9057 int general_prio_credits,
9058 int high_prio_credits)
9063 /* WaTempDisableDOPClkGating:bdw */
9064 misccpctl = I915_READ(GEN7_MISCCPCTL);
9065 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
9067 val = I915_READ(GEN8_L3SQCREG1);
9068 val &= ~L3_PRIO_CREDITS_MASK;
9069 val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
9070 val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
9071 I915_WRITE(GEN8_L3SQCREG1, val);
9074 * Wait at least 100 clocks before re-enabling clock gating.
9075 * See the definition of L3SQCREG1 in BSpec.
9077 POSTING_READ(GEN8_L3SQCREG1);
9079 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
9082 static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
9084 /* This is not an Wa. Enable to reduce Sampler power */
9085 I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
9086 I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
9088 /* WaEnable32PlaneMode:icl */
9089 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
9090 _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
9093 static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
9095 if (!HAS_PCH_CNP(dev_priv))
9098 /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
9099 I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
9100 CNP_PWM_CGE_GATING_DISABLE);
9103 static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
9106 cnp_init_clock_gating(dev_priv);
9108 /* This is not an Wa. Enable for better image quality */
9109 I915_WRITE(_3D_CHICKEN3,
9110 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
9112 /* WaEnableChickenDCPR:cnl */
9113 I915_WRITE(GEN8_CHICKEN_DCPR_1,
9114 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
9116 /* WaFbcWakeMemOn:cnl */
9117 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
9118 DISP_FBC_MEMORY_WAKE);
9120 val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
9121 /* ReadHitWriteOnlyDisable:cnl */
9122 val |= RCCUNIT_CLKGATE_DIS;
9123 /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
9124 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
9125 val |= SARBUNIT_CLKGATE_DIS;
9126 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
9128 /* Wa_2201832410:cnl */
9129 val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
9130 val |= GWUNIT_CLKGATE_DIS;
9131 I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
9133 /* WaDisableVFclkgate:cnl */
9134 /* WaVFUnitClockGatingDisable:cnl */
9135 val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
9136 val |= VFUNIT_CLKGATE_DIS;
9137 I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
9140 static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
9142 cnp_init_clock_gating(dev_priv);
9143 gen9_init_clock_gating(dev_priv);
9145 /* WaFbcNukeOnHostModify:cfl */
9146 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
9147 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
9150 static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
9152 gen9_init_clock_gating(dev_priv);
9154 /* WaDisableSDEUnitClockGating:kbl */
9155 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
9156 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
9157 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
9159 /* WaDisableGamClockGating:kbl */
9160 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
9161 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
9162 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
9164 /* WaFbcNukeOnHostModify:kbl */
9165 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
9166 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
9169 static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
9171 gen9_init_clock_gating(dev_priv);
9173 /* WAC6entrylatency:skl */
9174 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
9175 FBC_LLC_FULLY_OPEN);
9177 /* WaFbcNukeOnHostModify:skl */
9178 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
9179 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
9182 static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
9184 /* The GTT cache must be disabled if the system is using 2M pages. */
9185 bool can_use_gtt_cache = !HAS_PAGE_SIZES(dev_priv,
9186 I915_GTT_PAGE_SIZE_2M);
9189 /* WaSwitchSolVfFArbitrationPriority:bdw */
9190 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
9192 /* WaPsrDPAMaskVBlankInSRD:bdw */
9193 I915_WRITE(CHICKEN_PAR1_1,
9194 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
9196 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
9197 for_each_pipe(dev_priv, pipe) {
9198 I915_WRITE(CHICKEN_PIPESL_1(pipe),
9199 I915_READ(CHICKEN_PIPESL_1(pipe)) |
9200 BDW_DPRS_MASK_VBLANK_SRD);
9203 /* WaVSRefCountFullforceMissDisable:bdw */
9204 /* WaDSRefCountFullforceMissDisable:bdw */
9205 I915_WRITE(GEN7_FF_THREAD_MODE,
9206 I915_READ(GEN7_FF_THREAD_MODE) &
9207 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
9209 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
9210 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
9212 /* WaDisableSDEUnitClockGating:bdw */
9213 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
9214 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
9216 /* WaProgramL3SqcReg1Default:bdw */
9217 gen8_set_l3sqc_credits(dev_priv, 30, 2);
9219 /* WaGttCachingOffByDefault:bdw */
9220 I915_WRITE(HSW_GTT_CACHE_EN, can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
9222 /* WaKVMNotificationOnConfigChange:bdw */
9223 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
9224 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
9226 lpt_init_clock_gating(dev_priv);
9228 /* WaDisableDopClockGating:bdw
9230 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
9233 I915_WRITE(GEN6_UCGCTL1,
9234 I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
9237 static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
9239 /* L3 caching of data atomics doesn't work -- disable it. */
9240 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
9241 I915_WRITE(HSW_ROW_CHICKEN3,
9242 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
9244 /* This is required by WaCatErrorRejectionIssue:hsw */
9245 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
9246 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
9247 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
9249 /* WaVSRefCountFullforceMissDisable:hsw */
9250 I915_WRITE(GEN7_FF_THREAD_MODE,
9251 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
9253 /* WaDisable_RenderCache_OperationalFlush:hsw */
9254 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9256 /* enable HiZ Raw Stall Optimization */
9257 I915_WRITE(CACHE_MODE_0_GEN7,
9258 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
9260 /* WaDisable4x2SubspanOptimization:hsw */
9261 I915_WRITE(CACHE_MODE_1,
9262 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
9265 * BSpec recommends 8x4 when MSAA is used,
9266 * however in practice 16x4 seems fastest.
9268 * Note that PS/WM thread counts depend on the WIZ hashing
9269 * disable bit, which we don't touch here, but it's good
9270 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
9272 I915_WRITE(GEN7_GT_MODE,
9273 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
9275 /* WaSampleCChickenBitEnable:hsw */
9276 I915_WRITE(HALF_SLICE_CHICKEN3,
9277 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
9279 /* WaSwitchSolVfFArbitrationPriority:hsw */
9280 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
9282 lpt_init_clock_gating(dev_priv);
9285 static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
9289 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
9291 /* WaDisableEarlyCull:ivb */
9292 I915_WRITE(_3D_CHICKEN3,
9293 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
9295 /* WaDisableBackToBackFlipFix:ivb */
9296 I915_WRITE(IVB_CHICKEN3,
9297 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
9298 CHICKEN3_DGMG_DONE_FIX_DISABLE);
9300 /* WaDisablePSDDualDispatchEnable:ivb */
9301 if (IS_IVB_GT1(dev_priv))
9302 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
9303 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
9305 /* WaDisable_RenderCache_OperationalFlush:ivb */
9306 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9308 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
9309 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
9310 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
9312 /* WaApplyL3ControlAndL3ChickenMode:ivb */
9313 I915_WRITE(GEN7_L3CNTLREG1,
9314 GEN7_WA_FOR_GEN7_L3_CONTROL);
9315 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
9316 GEN7_WA_L3_CHICKEN_MODE);
9317 if (IS_IVB_GT1(dev_priv))
9318 I915_WRITE(GEN7_ROW_CHICKEN2,
9319 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
9321 /* must write both registers */
9322 I915_WRITE(GEN7_ROW_CHICKEN2,
9323 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
9324 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
9325 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
9328 /* WaForceL3Serialization:ivb */
9329 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
9330 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
9333 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
9334 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
9336 I915_WRITE(GEN6_UCGCTL2,
9337 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
9339 /* This is required by WaCatErrorRejectionIssue:ivb */
9340 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
9341 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
9342 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
9344 g4x_disable_trickle_feed(dev_priv);
9346 gen7_setup_fixed_func_scheduler(dev_priv);
9348 if (0) { /* causes HiZ corruption on ivb:gt1 */
9349 /* enable HiZ Raw Stall Optimization */
9350 I915_WRITE(CACHE_MODE_0_GEN7,
9351 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
9354 /* WaDisable4x2SubspanOptimization:ivb */
9355 I915_WRITE(CACHE_MODE_1,
9356 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
9359 * BSpec recommends 8x4 when MSAA is used,
9360 * however in practice 16x4 seems fastest.
9362 * Note that PS/WM thread counts depend on the WIZ hashing
9363 * disable bit, which we don't touch here, but it's good
9364 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
9366 I915_WRITE(GEN7_GT_MODE,
9367 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
9369 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
9370 snpcr &= ~GEN6_MBC_SNPCR_MASK;
9371 snpcr |= GEN6_MBC_SNPCR_MED;
9372 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
9374 if (!HAS_PCH_NOP(dev_priv))
9375 cpt_init_clock_gating(dev_priv);
9377 gen6_check_mch_setup(dev_priv);
9380 static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
9382 /* WaDisableEarlyCull:vlv */
9383 I915_WRITE(_3D_CHICKEN3,
9384 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
9386 /* WaDisableBackToBackFlipFix:vlv */
9387 I915_WRITE(IVB_CHICKEN3,
9388 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
9389 CHICKEN3_DGMG_DONE_FIX_DISABLE);
9391 /* WaPsdDispatchEnable:vlv */
9392 /* WaDisablePSDDualDispatchEnable:vlv */
9393 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
9394 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
9395 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
9397 /* WaDisable_RenderCache_OperationalFlush:vlv */
9398 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9400 /* WaForceL3Serialization:vlv */
9401 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
9402 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
9404 /* WaDisableDopClockGating:vlv */
9405 I915_WRITE(GEN7_ROW_CHICKEN2,
9406 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
9408 /* This is required by WaCatErrorRejectionIssue:vlv */
9409 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
9410 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
9411 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
9413 gen7_setup_fixed_func_scheduler(dev_priv);
9416 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
9417 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
9419 I915_WRITE(GEN6_UCGCTL2,
9420 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
9422 /* WaDisableL3Bank2xClockGate:vlv
9423 * Disabling L3 clock gating- MMIO 940c[25] = 1
9424 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
9425 I915_WRITE(GEN7_UCGCTL4,
9426 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
9429 * BSpec says this must be set, even though
9430 * WaDisable4x2SubspanOptimization isn't listed for VLV.
9432 I915_WRITE(CACHE_MODE_1,
9433 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
9436 * BSpec recommends 8x4 when MSAA is used,
9437 * however in practice 16x4 seems fastest.
9439 * Note that PS/WM thread counts depend on the WIZ hashing
9440 * disable bit, which we don't touch here, but it's good
9441 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
9443 I915_WRITE(GEN7_GT_MODE,
9444 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
9447 * WaIncreaseL3CreditsForVLVB0:vlv
9448 * This is the hardware default actually.
9450 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
9453 * WaDisableVLVClockGating_VBIIssue:vlv
9454 * Disable clock gating on th GCFG unit to prevent a delay
9455 * in the reporting of vblank events.
9457 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
9460 static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
9462 /* WaVSRefCountFullforceMissDisable:chv */
9463 /* WaDSRefCountFullforceMissDisable:chv */
9464 I915_WRITE(GEN7_FF_THREAD_MODE,
9465 I915_READ(GEN7_FF_THREAD_MODE) &
9466 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
9468 /* WaDisableSemaphoreAndSyncFlipWait:chv */
9469 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
9470 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
9472 /* WaDisableCSUnitClockGating:chv */
9473 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
9474 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
9476 /* WaDisableSDEUnitClockGating:chv */
9477 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
9478 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
9481 * WaProgramL3SqcReg1Default:chv
9482 * See gfxspecs/Related Documents/Performance Guide/
9483 * LSQC Setting Recommendations.
9485 gen8_set_l3sqc_credits(dev_priv, 38, 2);
9488 * GTT cache may not work with big pages, so if those
9489 * are ever enabled GTT cache may need to be disabled.
9491 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
9494 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
9498 I915_WRITE(RENCLK_GATE_D1, 0);
9499 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
9500 GS_UNIT_CLOCK_GATE_DISABLE |
9501 CL_UNIT_CLOCK_GATE_DISABLE);
9502 I915_WRITE(RAMCLK_GATE_D, 0);
9503 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
9504 OVRUNIT_CLOCK_GATE_DISABLE |
9505 OVCUNIT_CLOCK_GATE_DISABLE;
9506 if (IS_GM45(dev_priv))
9507 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
9508 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
9510 /* WaDisableRenderCachePipelinedFlush */
9511 I915_WRITE(CACHE_MODE_0,
9512 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
9514 /* WaDisable_RenderCache_OperationalFlush:g4x */
9515 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9517 g4x_disable_trickle_feed(dev_priv);
9520 static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
9522 struct intel_uncore *uncore = &dev_priv->uncore;
9524 intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
9525 intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
9526 intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
9527 intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
9528 intel_uncore_write16(uncore, DEUC, 0);
9529 intel_uncore_write(uncore,
9531 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
9533 /* WaDisable_RenderCache_OperationalFlush:gen4 */
9534 intel_uncore_write(uncore,
9536 _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9539 static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
9541 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
9542 I965_RCC_CLOCK_GATE_DISABLE |
9543 I965_RCPB_CLOCK_GATE_DISABLE |
9544 I965_ISC_CLOCK_GATE_DISABLE |
9545 I965_FBC_CLOCK_GATE_DISABLE);
9546 I915_WRITE(RENCLK_GATE_D2, 0);
9547 I915_WRITE(MI_ARB_STATE,
9548 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
9550 /* WaDisable_RenderCache_OperationalFlush:gen4 */
9551 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9554 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
9556 u32 dstate = I915_READ(D_STATE);
9558 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
9559 DSTATE_DOT_CLOCK_GATING;
9560 I915_WRITE(D_STATE, dstate);
9562 if (IS_PINEVIEW(dev_priv))
9563 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
9565 /* IIR "flip pending" means done if this bit is set */
9566 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
9568 /* interrupts should cause a wake up from C3 */
9569 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
9571 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
9572 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
9574 I915_WRITE(MI_ARB_STATE,
9575 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
9578 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
9580 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
9582 /* interrupts should cause a wake up from C3 */
9583 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
9584 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
9586 I915_WRITE(MEM_MODE,
9587 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
9590 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
9592 I915_WRITE(MEM_MODE,
9593 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
9594 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
9597 void intel_init_clock_gating(struct drm_i915_private *dev_priv)
9599 dev_priv->display.init_clock_gating(dev_priv);
9602 void intel_suspend_hw(struct drm_i915_private *dev_priv)
9604 if (HAS_PCH_LPT(dev_priv))
9605 lpt_suspend_hw(dev_priv);
9608 static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
9610 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
9614 * intel_init_clock_gating_hooks - setup the clock gating hooks
9615 * @dev_priv: device private
9617 * Setup the hooks that configure which clocks of a given platform can be
9618 * gated and also apply various GT and display specific workarounds for these
9619 * platforms. Note that some GT specific workarounds are applied separately
9620 * when GPU contexts or batchbuffers start their execution.
9622 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
9624 if (IS_GEN(dev_priv, 11))
9625 dev_priv->display.init_clock_gating = icl_init_clock_gating;
9626 else if (IS_CANNONLAKE(dev_priv))
9627 dev_priv->display.init_clock_gating = cnl_init_clock_gating;
9628 else if (IS_COFFEELAKE(dev_priv))
9629 dev_priv->display.init_clock_gating = cfl_init_clock_gating;
9630 else if (IS_SKYLAKE(dev_priv))
9631 dev_priv->display.init_clock_gating = skl_init_clock_gating;
9632 else if (IS_KABYLAKE(dev_priv))
9633 dev_priv->display.init_clock_gating = kbl_init_clock_gating;
9634 else if (IS_BROXTON(dev_priv))
9635 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
9636 else if (IS_GEMINILAKE(dev_priv))
9637 dev_priv->display.init_clock_gating = glk_init_clock_gating;
9638 else if (IS_BROADWELL(dev_priv))
9639 dev_priv->display.init_clock_gating = bdw_init_clock_gating;
9640 else if (IS_CHERRYVIEW(dev_priv))
9641 dev_priv->display.init_clock_gating = chv_init_clock_gating;
9642 else if (IS_HASWELL(dev_priv))
9643 dev_priv->display.init_clock_gating = hsw_init_clock_gating;
9644 else if (IS_IVYBRIDGE(dev_priv))
9645 dev_priv->display.init_clock_gating = ivb_init_clock_gating;
9646 else if (IS_VALLEYVIEW(dev_priv))
9647 dev_priv->display.init_clock_gating = vlv_init_clock_gating;
9648 else if (IS_GEN(dev_priv, 6))
9649 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
9650 else if (IS_GEN(dev_priv, 5))
9651 dev_priv->display.init_clock_gating = ilk_init_clock_gating;
9652 else if (IS_G4X(dev_priv))
9653 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
9654 else if (IS_I965GM(dev_priv))
9655 dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
9656 else if (IS_I965G(dev_priv))
9657 dev_priv->display.init_clock_gating = i965g_init_clock_gating;
9658 else if (IS_GEN(dev_priv, 3))
9659 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9660 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
9661 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9662 else if (IS_GEN(dev_priv, 2))
9663 dev_priv->display.init_clock_gating = i830_init_clock_gating;
9665 MISSING_CASE(INTEL_DEVID(dev_priv));
9666 dev_priv->display.init_clock_gating = nop_init_clock_gating;
9670 /* Set up chip specific power management-related functions */
9671 void intel_init_pm(struct drm_i915_private *dev_priv)
9674 if (IS_PINEVIEW(dev_priv))
9675 i915_pineview_get_mem_freq(dev_priv);
9676 else if (IS_GEN(dev_priv, 5))
9677 i915_ironlake_get_mem_freq(dev_priv);
9679 /* For FIFO watermark updates */
9680 if (INTEL_GEN(dev_priv) >= 9) {
9681 skl_setup_wm_latency(dev_priv);
9682 dev_priv->display.initial_watermarks = skl_initial_wm;
9683 dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
9684 dev_priv->display.compute_global_watermarks = skl_compute_wm;
9685 } else if (HAS_PCH_SPLIT(dev_priv)) {
9686 ilk_setup_wm_latency(dev_priv);
9688 if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
9689 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
9690 (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
9691 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
9692 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
9693 dev_priv->display.compute_intermediate_wm =
9694 ilk_compute_intermediate_wm;
9695 dev_priv->display.initial_watermarks =
9696 ilk_initial_watermarks;
9697 dev_priv->display.optimize_watermarks =
9698 ilk_optimize_watermarks;
9700 DRM_DEBUG_KMS("Failed to read display plane latency. "
9703 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9704 vlv_setup_wm_latency(dev_priv);
9705 dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
9706 dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
9707 dev_priv->display.initial_watermarks = vlv_initial_watermarks;
9708 dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
9709 dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
9710 } else if (IS_G4X(dev_priv)) {
9711 g4x_setup_wm_latency(dev_priv);
9712 dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
9713 dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
9714 dev_priv->display.initial_watermarks = g4x_initial_watermarks;
9715 dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
9716 } else if (IS_PINEVIEW(dev_priv)) {
9717 if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
9720 dev_priv->mem_freq)) {
9721 DRM_INFO("failed to find known CxSR latency "
9722 "(found ddr%s fsb freq %d, mem freq %d), "
9724 (dev_priv->is_ddr3 == 1) ? "3" : "2",
9725 dev_priv->fsb_freq, dev_priv->mem_freq);
9726 /* Disable CxSR and never update its watermark again */
9727 intel_set_memory_cxsr(dev_priv, false);
9728 dev_priv->display.update_wm = NULL;
9730 dev_priv->display.update_wm = pineview_update_wm;
9731 } else if (IS_GEN(dev_priv, 4)) {
9732 dev_priv->display.update_wm = i965_update_wm;
9733 } else if (IS_GEN(dev_priv, 3)) {
9734 dev_priv->display.update_wm = i9xx_update_wm;
9735 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9736 } else if (IS_GEN(dev_priv, 2)) {
9737 if (INTEL_INFO(dev_priv)->num_pipes == 1) {
9738 dev_priv->display.update_wm = i845_update_wm;
9739 dev_priv->display.get_fifo_size = i845_get_fifo_size;
9741 dev_priv->display.update_wm = i9xx_update_wm;
9742 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9745 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
9749 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
9751 struct intel_rps *rps = &dev_priv->gt_pm.rps;
9755 * Slow = Fast = GPLL ref * N
9757 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
9760 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
9762 struct intel_rps *rps = &dev_priv->gt_pm.rps;
9764 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
9767 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
9769 struct intel_rps *rps = &dev_priv->gt_pm.rps;
9773 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
9775 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
9778 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
9780 struct intel_rps *rps = &dev_priv->gt_pm.rps;
9782 /* CHV needs even values */
9783 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
9786 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
9788 if (INTEL_GEN(dev_priv) >= 9)
9789 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
9791 else if (IS_CHERRYVIEW(dev_priv))
9792 return chv_gpu_freq(dev_priv, val);
9793 else if (IS_VALLEYVIEW(dev_priv))
9794 return byt_gpu_freq(dev_priv, val);
9796 return val * GT_FREQUENCY_MULTIPLIER;
9799 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
9801 if (INTEL_GEN(dev_priv) >= 9)
9802 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
9803 GT_FREQUENCY_MULTIPLIER);
9804 else if (IS_CHERRYVIEW(dev_priv))
9805 return chv_freq_opcode(dev_priv, val);
9806 else if (IS_VALLEYVIEW(dev_priv))
9807 return byt_freq_opcode(dev_priv, val);
9809 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
9812 void intel_pm_setup(struct drm_i915_private *dev_priv)
9814 mutex_init(&dev_priv->gt_pm.rps.lock);
9815 mutex_init(&dev_priv->gt_pm.rps.power.mutex);
9817 atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
9819 dev_priv->runtime_pm.suspended = false;
9820 atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
9823 static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
9824 const i915_reg_t reg)
9826 u32 lower, upper, tmp;
9830 * The register accessed do not need forcewake. We borrow
9831 * uncore lock to prevent concurrent access to range reg.
9833 lockdep_assert_held(&dev_priv->uncore.lock);
9836 * vlv and chv residency counters are 40 bits in width.
9837 * With a control bit, we can choose between upper or lower
9838 * 32bit window into this counter.
9840 * Although we always use the counter in high-range mode elsewhere,
9841 * userspace may attempt to read the value before rc6 is initialised,
9842 * before we have set the default VLV_COUNTER_CONTROL value. So always
9843 * set the high bit to be safe.
9845 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9846 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
9847 upper = I915_READ_FW(reg);
9851 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9852 _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
9853 lower = I915_READ_FW(reg);
9855 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9856 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
9857 upper = I915_READ_FW(reg);
9858 } while (upper != tmp && --loop);
9861 * Everywhere else we always use VLV_COUNTER_CONTROL with the
9862 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
9866 return lower | (u64)upper << 8;
9869 u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
9870 const i915_reg_t reg)
9872 struct intel_uncore *uncore = &dev_priv->uncore;
9873 u64 time_hw, prev_hw, overflow_hw;
9874 unsigned int fw_domains;
9875 unsigned long flags;
9879 if (!HAS_RC6(dev_priv))
9883 * Store previous hw counter values for counter wrap-around handling.
9885 * There are only four interesting registers and they live next to each
9886 * other so we can use the relative address, compared to the smallest
9887 * one as the index into driver storage.
9889 i = (i915_mmio_reg_offset(reg) -
9890 i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
9891 if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
9894 fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
9896 spin_lock_irqsave(&uncore->lock, flags);
9897 intel_uncore_forcewake_get__locked(uncore, fw_domains);
9899 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
9900 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9902 div = dev_priv->czclk_freq;
9903 overflow_hw = BIT_ULL(40);
9904 time_hw = vlv_residency_raw(dev_priv, reg);
9906 /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
9907 if (IS_GEN9_LP(dev_priv)) {
9915 overflow_hw = BIT_ULL(32);
9916 time_hw = intel_uncore_read_fw(uncore, reg);
9920 * Counter wrap handling.
9922 * But relying on a sufficient frequency of queries otherwise counters
9925 prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i];
9926 dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw;
9928 /* RC6 delta from last sample. */
9929 if (time_hw >= prev_hw)
9932 time_hw += overflow_hw - prev_hw;
9934 /* Add delta to RC6 extended raw driver copy. */
9935 time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
9936 dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
9938 intel_uncore_forcewake_put__locked(uncore, fw_domains);
9939 spin_unlock_irqrestore(&uncore->lock, flags);
9941 return mul_u64_u32_div(time_hw, mul, div);
9944 u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
9947 return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
9950 u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
9954 if (INTEL_GEN(dev_priv) >= 9)
9955 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
9956 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
9957 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
9959 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;