2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
31 #include "intel_drv.h"
32 #include "../../../platform/x86/intel_ips.h"
33 #include <linux/module.h>
38 * RC6 is a special power stage which allows the GPU to enter an very
39 * low-voltage mode when idle, using down to 0V while at this stage. This
40 * stage is entered automatically when the GPU is idle when RC6 support is
41 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
43 * There are different RC6 modes available in Intel GPU, which differentiate
44 * among each other with the latency required to enter and leave RC6 and
45 * voltage consumed by the GPU in different states.
47 * The combination of the following flags define which states GPU is allowed
48 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
49 * RC6pp is deepest RC6. Their support by hardware varies according to the
50 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
51 * which brings the most power savings; deeper states save more power, but
52 * require higher latency to switch to and wake up.
54 #define INTEL_RC6_ENABLE (1<<0)
55 #define INTEL_RC6p_ENABLE (1<<1)
56 #define INTEL_RC6pp_ENABLE (1<<2)
58 static void gen9_init_clock_gating(struct drm_device *dev)
60 struct drm_i915_private *dev_priv = dev->dev_private;
62 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
63 I915_WRITE(CHICKEN_PAR1_1,
64 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
66 I915_WRITE(GEN8_CONFIG0,
67 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
69 /* WaEnableChickenDCPR:skl,bxt,kbl */
70 I915_WRITE(GEN8_CHICKEN_DCPR_1,
71 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
73 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
74 /* WaFbcWakeMemOn:skl,bxt,kbl */
75 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
77 DISP_FBC_MEMORY_WAKE);
79 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
80 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
81 ILK_DPFC_DISABLE_DUMMY0);
84 static void bxt_init_clock_gating(struct drm_device *dev)
86 struct drm_i915_private *dev_priv = to_i915(dev);
88 gen9_init_clock_gating(dev);
90 /* WaDisableSDEUnitClockGating:bxt */
91 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
92 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
96 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
98 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
99 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
102 * Wa: Backlight PWM may stop in the asserted state, causing backlight
105 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
106 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
107 PWM1_GATING_DIS | PWM2_GATING_DIS);
110 static void i915_pineview_get_mem_freq(struct drm_device *dev)
112 struct drm_i915_private *dev_priv = to_i915(dev);
115 tmp = I915_READ(CLKCFG);
117 switch (tmp & CLKCFG_FSB_MASK) {
119 dev_priv->fsb_freq = 533; /* 133*4 */
122 dev_priv->fsb_freq = 800; /* 200*4 */
125 dev_priv->fsb_freq = 667; /* 167*4 */
128 dev_priv->fsb_freq = 400; /* 100*4 */
132 switch (tmp & CLKCFG_MEM_MASK) {
134 dev_priv->mem_freq = 533;
137 dev_priv->mem_freq = 667;
140 dev_priv->mem_freq = 800;
144 /* detect pineview DDR3 setting */
145 tmp = I915_READ(CSHRDDR3CTL);
146 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
149 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
151 struct drm_i915_private *dev_priv = to_i915(dev);
154 ddrpll = I915_READ16(DDRMPLL1);
155 csipll = I915_READ16(CSIPLL0);
157 switch (ddrpll & 0xff) {
159 dev_priv->mem_freq = 800;
162 dev_priv->mem_freq = 1066;
165 dev_priv->mem_freq = 1333;
168 dev_priv->mem_freq = 1600;
171 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
173 dev_priv->mem_freq = 0;
177 dev_priv->ips.r_t = dev_priv->mem_freq;
179 switch (csipll & 0x3ff) {
181 dev_priv->fsb_freq = 3200;
184 dev_priv->fsb_freq = 3733;
187 dev_priv->fsb_freq = 4266;
190 dev_priv->fsb_freq = 4800;
193 dev_priv->fsb_freq = 5333;
196 dev_priv->fsb_freq = 5866;
199 dev_priv->fsb_freq = 6400;
202 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
204 dev_priv->fsb_freq = 0;
208 if (dev_priv->fsb_freq == 3200) {
209 dev_priv->ips.c_m = 0;
210 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
211 dev_priv->ips.c_m = 1;
213 dev_priv->ips.c_m = 2;
217 static const struct cxsr_latency cxsr_latency_table[] = {
218 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
219 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
220 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
221 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
222 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
224 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
225 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
226 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
227 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
228 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
230 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
231 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
232 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
233 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
234 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
236 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
237 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
238 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
239 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
240 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
242 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
243 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
244 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
245 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
246 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
248 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
249 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
250 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
251 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
252 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
255 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
260 const struct cxsr_latency *latency;
263 if (fsb == 0 || mem == 0)
266 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
267 latency = &cxsr_latency_table[i];
268 if (is_desktop == latency->is_desktop &&
269 is_ddr3 == latency->is_ddr3 &&
270 fsb == latency->fsb_freq && mem == latency->mem_freq)
274 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
279 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
283 mutex_lock(&dev_priv->rps.hw_lock);
285 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
287 val &= ~FORCE_DDR_HIGH_FREQ;
289 val |= FORCE_DDR_HIGH_FREQ;
290 val &= ~FORCE_DDR_LOW_FREQ;
291 val |= FORCE_DDR_FREQ_REQ_ACK;
292 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
294 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
295 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
296 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
298 mutex_unlock(&dev_priv->rps.hw_lock);
301 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
305 mutex_lock(&dev_priv->rps.hw_lock);
307 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
309 val |= DSP_MAXFIFO_PM5_ENABLE;
311 val &= ~DSP_MAXFIFO_PM5_ENABLE;
312 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
314 mutex_unlock(&dev_priv->rps.hw_lock);
317 #define FW_WM(value, plane) \
318 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
320 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
322 struct drm_device *dev = &dev_priv->drm;
325 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
326 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
327 POSTING_READ(FW_BLC_SELF_VLV);
328 dev_priv->wm.vlv.cxsr = enable;
329 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
330 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
331 POSTING_READ(FW_BLC_SELF);
332 } else if (IS_PINEVIEW(dev)) {
333 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
334 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
335 I915_WRITE(DSPFW3, val);
336 POSTING_READ(DSPFW3);
337 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
338 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
339 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
340 I915_WRITE(FW_BLC_SELF, val);
341 POSTING_READ(FW_BLC_SELF);
342 } else if (IS_I915GM(dev)) {
344 * FIXME can't find a bit like this for 915G, and
345 * and yet it does have the related watermark in
346 * FW_BLC_SELF. What's going on?
348 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
349 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
350 I915_WRITE(INSTPM, val);
351 POSTING_READ(INSTPM);
356 DRM_DEBUG_KMS("memory self-refresh is %s\n",
357 enable ? "enabled" : "disabled");
362 * Latency for FIFO fetches is dependent on several factors:
363 * - memory configuration (speed, channels)
365 * - current MCH state
366 * It can be fairly high in some situations, so here we assume a fairly
367 * pessimal value. It's a tradeoff between extra memory fetches (if we
368 * set this value too high, the FIFO will fetch frequently to stay full)
369 * and power consumption (set it too low to save power and we might see
370 * FIFO underruns and display "flicker").
372 * A value of 5us seems to be a good balance; safe for very low end
373 * platforms but not overly aggressive on lower latency configs.
375 static const int pessimal_latency_ns = 5000;
377 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
378 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
380 static int vlv_get_fifo_size(struct drm_device *dev,
381 enum pipe pipe, int plane)
383 struct drm_i915_private *dev_priv = to_i915(dev);
384 int sprite0_start, sprite1_start, size;
387 uint32_t dsparb, dsparb2, dsparb3;
389 dsparb = I915_READ(DSPARB);
390 dsparb2 = I915_READ(DSPARB2);
391 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
392 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
395 dsparb = I915_READ(DSPARB);
396 dsparb2 = I915_READ(DSPARB2);
397 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
398 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
401 dsparb2 = I915_READ(DSPARB2);
402 dsparb3 = I915_READ(DSPARB3);
403 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
404 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
412 size = sprite0_start;
415 size = sprite1_start - sprite0_start;
418 size = 512 - 1 - sprite1_start;
424 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
425 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
426 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
432 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
434 struct drm_i915_private *dev_priv = to_i915(dev);
435 uint32_t dsparb = I915_READ(DSPARB);
438 size = dsparb & 0x7f;
440 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
442 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
443 plane ? "B" : "A", size);
448 static int i830_get_fifo_size(struct drm_device *dev, int plane)
450 struct drm_i915_private *dev_priv = to_i915(dev);
451 uint32_t dsparb = I915_READ(DSPARB);
454 size = dsparb & 0x1ff;
456 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
457 size >>= 1; /* Convert to cachelines */
459 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
460 plane ? "B" : "A", size);
465 static int i845_get_fifo_size(struct drm_device *dev, int plane)
467 struct drm_i915_private *dev_priv = to_i915(dev);
468 uint32_t dsparb = I915_READ(DSPARB);
471 size = dsparb & 0x7f;
472 size >>= 2; /* Convert to cachelines */
474 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
481 /* Pineview has different values for various configs */
482 static const struct intel_watermark_params pineview_display_wm = {
483 .fifo_size = PINEVIEW_DISPLAY_FIFO,
484 .max_wm = PINEVIEW_MAX_WM,
485 .default_wm = PINEVIEW_DFT_WM,
486 .guard_size = PINEVIEW_GUARD_WM,
487 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
489 static const struct intel_watermark_params pineview_display_hplloff_wm = {
490 .fifo_size = PINEVIEW_DISPLAY_FIFO,
491 .max_wm = PINEVIEW_MAX_WM,
492 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
493 .guard_size = PINEVIEW_GUARD_WM,
494 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
496 static const struct intel_watermark_params pineview_cursor_wm = {
497 .fifo_size = PINEVIEW_CURSOR_FIFO,
498 .max_wm = PINEVIEW_CURSOR_MAX_WM,
499 .default_wm = PINEVIEW_CURSOR_DFT_WM,
500 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
501 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
503 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
504 .fifo_size = PINEVIEW_CURSOR_FIFO,
505 .max_wm = PINEVIEW_CURSOR_MAX_WM,
506 .default_wm = PINEVIEW_CURSOR_DFT_WM,
507 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
508 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
510 static const struct intel_watermark_params g4x_wm_info = {
511 .fifo_size = G4X_FIFO_SIZE,
512 .max_wm = G4X_MAX_WM,
513 .default_wm = G4X_MAX_WM,
515 .cacheline_size = G4X_FIFO_LINE_SIZE,
517 static const struct intel_watermark_params g4x_cursor_wm_info = {
518 .fifo_size = I965_CURSOR_FIFO,
519 .max_wm = I965_CURSOR_MAX_WM,
520 .default_wm = I965_CURSOR_DFT_WM,
522 .cacheline_size = G4X_FIFO_LINE_SIZE,
524 static const struct intel_watermark_params i965_cursor_wm_info = {
525 .fifo_size = I965_CURSOR_FIFO,
526 .max_wm = I965_CURSOR_MAX_WM,
527 .default_wm = I965_CURSOR_DFT_WM,
529 .cacheline_size = I915_FIFO_LINE_SIZE,
531 static const struct intel_watermark_params i945_wm_info = {
532 .fifo_size = I945_FIFO_SIZE,
533 .max_wm = I915_MAX_WM,
536 .cacheline_size = I915_FIFO_LINE_SIZE,
538 static const struct intel_watermark_params i915_wm_info = {
539 .fifo_size = I915_FIFO_SIZE,
540 .max_wm = I915_MAX_WM,
543 .cacheline_size = I915_FIFO_LINE_SIZE,
545 static const struct intel_watermark_params i830_a_wm_info = {
546 .fifo_size = I855GM_FIFO_SIZE,
547 .max_wm = I915_MAX_WM,
550 .cacheline_size = I830_FIFO_LINE_SIZE,
552 static const struct intel_watermark_params i830_bc_wm_info = {
553 .fifo_size = I855GM_FIFO_SIZE,
554 .max_wm = I915_MAX_WM/2,
557 .cacheline_size = I830_FIFO_LINE_SIZE,
559 static const struct intel_watermark_params i845_wm_info = {
560 .fifo_size = I830_FIFO_SIZE,
561 .max_wm = I915_MAX_WM,
564 .cacheline_size = I830_FIFO_LINE_SIZE,
568 * intel_calculate_wm - calculate watermark level
569 * @clock_in_khz: pixel clock
570 * @wm: chip FIFO params
571 * @cpp: bytes per pixel
572 * @latency_ns: memory latency for the platform
574 * Calculate the watermark level (the level at which the display plane will
575 * start fetching from memory again). Each chip has a different display
576 * FIFO size and allocation, so the caller needs to figure that out and pass
577 * in the correct intel_watermark_params structure.
579 * As the pixel clock runs, the FIFO will be drained at a rate that depends
580 * on the pixel size. When it reaches the watermark level, it'll start
581 * fetching FIFO line sized based chunks from memory until the FIFO fills
582 * past the watermark point. If the FIFO drains completely, a FIFO underrun
583 * will occur, and a display engine hang could result.
585 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
586 const struct intel_watermark_params *wm,
587 int fifo_size, int cpp,
588 unsigned long latency_ns)
590 long entries_required, wm_size;
593 * Note: we need to make sure we don't overflow for various clock &
595 * clocks go from a few thousand to several hundred thousand.
596 * latency is usually a few thousand
598 entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
600 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
602 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
604 wm_size = fifo_size - (entries_required + wm->guard_size);
606 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
608 /* Don't promote wm_size to unsigned... */
609 if (wm_size > (long)wm->max_wm)
610 wm_size = wm->max_wm;
612 wm_size = wm->default_wm;
615 * Bspec seems to indicate that the value shouldn't be lower than
616 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
617 * Lets go for 8 which is the burst size since certain platforms
618 * already use a hardcoded 8 (which is what the spec says should be
627 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
629 struct drm_crtc *crtc, *enabled = NULL;
631 for_each_crtc(dev, crtc) {
632 if (intel_crtc_active(crtc)) {
642 static void pineview_update_wm(struct drm_crtc *unused_crtc)
644 struct drm_device *dev = unused_crtc->dev;
645 struct drm_i915_private *dev_priv = to_i915(dev);
646 struct drm_crtc *crtc;
647 const struct cxsr_latency *latency;
651 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
652 dev_priv->fsb_freq, dev_priv->mem_freq);
654 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
655 intel_set_memory_cxsr(dev_priv, false);
659 crtc = single_enabled_crtc(dev);
661 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
662 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
663 int clock = adjusted_mode->crtc_clock;
666 wm = intel_calculate_wm(clock, &pineview_display_wm,
667 pineview_display_wm.fifo_size,
668 cpp, latency->display_sr);
669 reg = I915_READ(DSPFW1);
670 reg &= ~DSPFW_SR_MASK;
671 reg |= FW_WM(wm, SR);
672 I915_WRITE(DSPFW1, reg);
673 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
676 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
677 pineview_display_wm.fifo_size,
678 cpp, latency->cursor_sr);
679 reg = I915_READ(DSPFW3);
680 reg &= ~DSPFW_CURSOR_SR_MASK;
681 reg |= FW_WM(wm, CURSOR_SR);
682 I915_WRITE(DSPFW3, reg);
684 /* Display HPLL off SR */
685 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
686 pineview_display_hplloff_wm.fifo_size,
687 cpp, latency->display_hpll_disable);
688 reg = I915_READ(DSPFW3);
689 reg &= ~DSPFW_HPLL_SR_MASK;
690 reg |= FW_WM(wm, HPLL_SR);
691 I915_WRITE(DSPFW3, reg);
693 /* cursor HPLL off SR */
694 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
695 pineview_display_hplloff_wm.fifo_size,
696 cpp, latency->cursor_hpll_disable);
697 reg = I915_READ(DSPFW3);
698 reg &= ~DSPFW_HPLL_CURSOR_MASK;
699 reg |= FW_WM(wm, HPLL_CURSOR);
700 I915_WRITE(DSPFW3, reg);
701 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
703 intel_set_memory_cxsr(dev_priv, true);
705 intel_set_memory_cxsr(dev_priv, false);
709 static bool g4x_compute_wm0(struct drm_device *dev,
711 const struct intel_watermark_params *display,
712 int display_latency_ns,
713 const struct intel_watermark_params *cursor,
714 int cursor_latency_ns,
718 struct drm_crtc *crtc;
719 const struct drm_display_mode *adjusted_mode;
720 int htotal, hdisplay, clock, cpp;
721 int line_time_us, line_count;
722 int entries, tlb_miss;
724 crtc = intel_get_crtc_for_plane(dev, plane);
725 if (!intel_crtc_active(crtc)) {
726 *cursor_wm = cursor->guard_size;
727 *plane_wm = display->guard_size;
731 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
732 clock = adjusted_mode->crtc_clock;
733 htotal = adjusted_mode->crtc_htotal;
734 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
735 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
737 /* Use the small buffer method to calculate plane watermark */
738 entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
739 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
742 entries = DIV_ROUND_UP(entries, display->cacheline_size);
743 *plane_wm = entries + display->guard_size;
744 if (*plane_wm > (int)display->max_wm)
745 *plane_wm = display->max_wm;
747 /* Use the large buffer method to calculate cursor watermark */
748 line_time_us = max(htotal * 1000 / clock, 1);
749 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
750 entries = line_count * crtc->cursor->state->crtc_w * cpp;
751 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
754 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
755 *cursor_wm = entries + cursor->guard_size;
756 if (*cursor_wm > (int)cursor->max_wm)
757 *cursor_wm = (int)cursor->max_wm;
763 * Check the wm result.
765 * If any calculated watermark values is larger than the maximum value that
766 * can be programmed into the associated watermark register, that watermark
769 static bool g4x_check_srwm(struct drm_device *dev,
770 int display_wm, int cursor_wm,
771 const struct intel_watermark_params *display,
772 const struct intel_watermark_params *cursor)
774 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
775 display_wm, cursor_wm);
777 if (display_wm > display->max_wm) {
778 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
779 display_wm, display->max_wm);
783 if (cursor_wm > cursor->max_wm) {
784 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
785 cursor_wm, cursor->max_wm);
789 if (!(display_wm || cursor_wm)) {
790 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
797 static bool g4x_compute_srwm(struct drm_device *dev,
800 const struct intel_watermark_params *display,
801 const struct intel_watermark_params *cursor,
802 int *display_wm, int *cursor_wm)
804 struct drm_crtc *crtc;
805 const struct drm_display_mode *adjusted_mode;
806 int hdisplay, htotal, cpp, clock;
807 unsigned long line_time_us;
808 int line_count, line_size;
813 *display_wm = *cursor_wm = 0;
817 crtc = intel_get_crtc_for_plane(dev, plane);
818 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
819 clock = adjusted_mode->crtc_clock;
820 htotal = adjusted_mode->crtc_htotal;
821 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
822 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
824 line_time_us = max(htotal * 1000 / clock, 1);
825 line_count = (latency_ns / line_time_us + 1000) / 1000;
826 line_size = hdisplay * cpp;
828 /* Use the minimum of the small and large buffer method for primary */
829 small = ((clock * cpp / 1000) * latency_ns) / 1000;
830 large = line_count * line_size;
832 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
833 *display_wm = entries + display->guard_size;
835 /* calculate the self-refresh watermark for display cursor */
836 entries = line_count * cpp * crtc->cursor->state->crtc_w;
837 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
838 *cursor_wm = entries + cursor->guard_size;
840 return g4x_check_srwm(dev,
841 *display_wm, *cursor_wm,
845 #define FW_WM_VLV(value, plane) \
846 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
848 static void vlv_write_wm_values(struct intel_crtc *crtc,
849 const struct vlv_wm_values *wm)
851 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
852 enum pipe pipe = crtc->pipe;
854 I915_WRITE(VLV_DDL(pipe),
855 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
856 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
857 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
858 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
861 FW_WM(wm->sr.plane, SR) |
862 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
863 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
864 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
866 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
867 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
868 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
870 FW_WM(wm->sr.cursor, CURSOR_SR));
872 if (IS_CHERRYVIEW(dev_priv)) {
873 I915_WRITE(DSPFW7_CHV,
874 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
875 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
876 I915_WRITE(DSPFW8_CHV,
877 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
878 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
879 I915_WRITE(DSPFW9_CHV,
880 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
881 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
883 FW_WM(wm->sr.plane >> 9, SR_HI) |
884 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
885 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
886 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
887 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
888 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
889 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
890 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
891 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
892 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
895 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
896 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
898 FW_WM(wm->sr.plane >> 9, SR_HI) |
899 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
900 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
901 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
902 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
903 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
904 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
907 /* zero (unused) WM1 watermarks */
908 I915_WRITE(DSPFW4, 0);
909 I915_WRITE(DSPFW5, 0);
910 I915_WRITE(DSPFW6, 0);
911 I915_WRITE(DSPHOWM1, 0);
913 POSTING_READ(DSPFW1);
921 VLV_WM_LEVEL_DDR_DVFS,
924 /* latency must be in 0.1us units. */
925 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
926 unsigned int pipe_htotal,
927 unsigned int horiz_pixels,
929 unsigned int latency)
933 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
934 ret = (ret + 1) * horiz_pixels * cpp;
935 ret = DIV_ROUND_UP(ret, 64);
940 static void vlv_setup_wm_latency(struct drm_device *dev)
942 struct drm_i915_private *dev_priv = to_i915(dev);
944 /* all latencies in usec */
945 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
947 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
949 if (IS_CHERRYVIEW(dev_priv)) {
950 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
951 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
953 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
957 static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
958 struct intel_crtc *crtc,
959 const struct intel_plane_state *state,
962 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
963 int clock, htotal, cpp, width, wm;
965 if (dev_priv->wm.pri_latency[level] == 0)
968 if (!state->base.visible)
971 cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
972 clock = crtc->config->base.adjusted_mode.crtc_clock;
973 htotal = crtc->config->base.adjusted_mode.crtc_htotal;
974 width = crtc->config->pipe_src_w;
975 if (WARN_ON(htotal == 0))
978 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
980 * FIXME the formula gives values that are
981 * too big for the cursor FIFO, and hence we
982 * would never be able to use cursors. For
983 * now just hardcode the watermark.
987 wm = vlv_wm_method2(clock, htotal, width, cpp,
988 dev_priv->wm.pri_latency[level] * 10);
991 return min_t(int, wm, USHRT_MAX);
994 static void vlv_compute_fifo(struct intel_crtc *crtc)
996 struct drm_device *dev = crtc->base.dev;
997 struct vlv_wm_state *wm_state = &crtc->wm_state;
998 struct intel_plane *plane;
999 unsigned int total_rate = 0;
1000 const int fifo_size = 512 - 1;
1001 int fifo_extra, fifo_left = fifo_size;
1003 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1004 struct intel_plane_state *state =
1005 to_intel_plane_state(plane->base.state);
1007 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1010 if (state->base.visible) {
1011 wm_state->num_active_planes++;
1012 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1016 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1017 struct intel_plane_state *state =
1018 to_intel_plane_state(plane->base.state);
1021 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1022 plane->wm.fifo_size = 63;
1026 if (!state->base.visible) {
1027 plane->wm.fifo_size = 0;
1031 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1032 plane->wm.fifo_size = fifo_size * rate / total_rate;
1033 fifo_left -= plane->wm.fifo_size;
1036 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1038 /* spread the remainder evenly */
1039 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1045 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1048 /* give it all to the first plane if none are active */
1049 if (plane->wm.fifo_size == 0 &&
1050 wm_state->num_active_planes)
1053 plane_extra = min(fifo_extra, fifo_left);
1054 plane->wm.fifo_size += plane_extra;
1055 fifo_left -= plane_extra;
1058 WARN_ON(fifo_left != 0);
1061 static void vlv_invert_wms(struct intel_crtc *crtc)
1063 struct vlv_wm_state *wm_state = &crtc->wm_state;
1066 for (level = 0; level < wm_state->num_levels; level++) {
1067 struct drm_device *dev = crtc->base.dev;
1068 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1069 struct intel_plane *plane;
1071 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
1072 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
1074 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1075 switch (plane->base.type) {
1077 case DRM_PLANE_TYPE_CURSOR:
1078 wm_state->wm[level].cursor = plane->wm.fifo_size -
1079 wm_state->wm[level].cursor;
1081 case DRM_PLANE_TYPE_PRIMARY:
1082 wm_state->wm[level].primary = plane->wm.fifo_size -
1083 wm_state->wm[level].primary;
1085 case DRM_PLANE_TYPE_OVERLAY:
1086 sprite = plane->plane;
1087 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
1088 wm_state->wm[level].sprite[sprite];
1095 static void vlv_compute_wm(struct intel_crtc *crtc)
1097 struct drm_device *dev = crtc->base.dev;
1098 struct vlv_wm_state *wm_state = &crtc->wm_state;
1099 struct intel_plane *plane;
1100 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1103 memset(wm_state, 0, sizeof(*wm_state));
1105 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
1106 wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
1108 wm_state->num_active_planes = 0;
1110 vlv_compute_fifo(crtc);
1112 if (wm_state->num_active_planes != 1)
1113 wm_state->cxsr = false;
1115 if (wm_state->cxsr) {
1116 for (level = 0; level < wm_state->num_levels; level++) {
1117 wm_state->sr[level].plane = sr_fifo_size;
1118 wm_state->sr[level].cursor = 63;
1122 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1123 struct intel_plane_state *state =
1124 to_intel_plane_state(plane->base.state);
1126 if (!state->base.visible)
1129 /* normal watermarks */
1130 for (level = 0; level < wm_state->num_levels; level++) {
1131 int wm = vlv_compute_wm_level(plane, crtc, state, level);
1132 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
1135 if (WARN_ON(level == 0 && wm > max_wm))
1138 if (wm > plane->wm.fifo_size)
1141 switch (plane->base.type) {
1143 case DRM_PLANE_TYPE_CURSOR:
1144 wm_state->wm[level].cursor = wm;
1146 case DRM_PLANE_TYPE_PRIMARY:
1147 wm_state->wm[level].primary = wm;
1149 case DRM_PLANE_TYPE_OVERLAY:
1150 sprite = plane->plane;
1151 wm_state->wm[level].sprite[sprite] = wm;
1156 wm_state->num_levels = level;
1158 if (!wm_state->cxsr)
1161 /* maxfifo watermarks */
1162 switch (plane->base.type) {
1164 case DRM_PLANE_TYPE_CURSOR:
1165 for (level = 0; level < wm_state->num_levels; level++)
1166 wm_state->sr[level].cursor =
1167 wm_state->wm[level].cursor;
1169 case DRM_PLANE_TYPE_PRIMARY:
1170 for (level = 0; level < wm_state->num_levels; level++)
1171 wm_state->sr[level].plane =
1172 min(wm_state->sr[level].plane,
1173 wm_state->wm[level].primary);
1175 case DRM_PLANE_TYPE_OVERLAY:
1176 sprite = plane->plane;
1177 for (level = 0; level < wm_state->num_levels; level++)
1178 wm_state->sr[level].plane =
1179 min(wm_state->sr[level].plane,
1180 wm_state->wm[level].sprite[sprite]);
1185 /* clear any (partially) filled invalid levels */
1186 for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
1187 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1188 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1191 vlv_invert_wms(crtc);
1194 #define VLV_FIFO(plane, value) \
1195 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1197 static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1199 struct drm_device *dev = crtc->base.dev;
1200 struct drm_i915_private *dev_priv = to_i915(dev);
1201 struct intel_plane *plane;
1202 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1204 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1205 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1206 WARN_ON(plane->wm.fifo_size != 63);
1210 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
1211 sprite0_start = plane->wm.fifo_size;
1212 else if (plane->plane == 0)
1213 sprite1_start = sprite0_start + plane->wm.fifo_size;
1215 fifo_size = sprite1_start + plane->wm.fifo_size;
1218 WARN_ON(fifo_size != 512 - 1);
1220 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1221 pipe_name(crtc->pipe), sprite0_start,
1222 sprite1_start, fifo_size);
1224 switch (crtc->pipe) {
1225 uint32_t dsparb, dsparb2, dsparb3;
1227 dsparb = I915_READ(DSPARB);
1228 dsparb2 = I915_READ(DSPARB2);
1230 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1231 VLV_FIFO(SPRITEB, 0xff));
1232 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1233 VLV_FIFO(SPRITEB, sprite1_start));
1235 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1236 VLV_FIFO(SPRITEB_HI, 0x1));
1237 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1238 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1240 I915_WRITE(DSPARB, dsparb);
1241 I915_WRITE(DSPARB2, dsparb2);
1244 dsparb = I915_READ(DSPARB);
1245 dsparb2 = I915_READ(DSPARB2);
1247 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1248 VLV_FIFO(SPRITED, 0xff));
1249 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1250 VLV_FIFO(SPRITED, sprite1_start));
1252 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1253 VLV_FIFO(SPRITED_HI, 0xff));
1254 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1255 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1257 I915_WRITE(DSPARB, dsparb);
1258 I915_WRITE(DSPARB2, dsparb2);
1261 dsparb3 = I915_READ(DSPARB3);
1262 dsparb2 = I915_READ(DSPARB2);
1264 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1265 VLV_FIFO(SPRITEF, 0xff));
1266 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1267 VLV_FIFO(SPRITEF, sprite1_start));
1269 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1270 VLV_FIFO(SPRITEF_HI, 0xff));
1271 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1272 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1274 I915_WRITE(DSPARB3, dsparb3);
1275 I915_WRITE(DSPARB2, dsparb2);
1284 static void vlv_merge_wm(struct drm_device *dev,
1285 struct vlv_wm_values *wm)
1287 struct intel_crtc *crtc;
1288 int num_active_crtcs = 0;
1290 wm->level = to_i915(dev)->wm.max_level;
1293 for_each_intel_crtc(dev, crtc) {
1294 const struct vlv_wm_state *wm_state = &crtc->wm_state;
1299 if (!wm_state->cxsr)
1303 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1306 if (num_active_crtcs != 1)
1309 if (num_active_crtcs > 1)
1310 wm->level = VLV_WM_LEVEL_PM2;
1312 for_each_intel_crtc(dev, crtc) {
1313 struct vlv_wm_state *wm_state = &crtc->wm_state;
1314 enum pipe pipe = crtc->pipe;
1319 wm->pipe[pipe] = wm_state->wm[wm->level];
1321 wm->sr = wm_state->sr[wm->level];
1323 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
1324 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
1325 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
1326 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
1330 static void vlv_update_wm(struct drm_crtc *crtc)
1332 struct drm_device *dev = crtc->dev;
1333 struct drm_i915_private *dev_priv = to_i915(dev);
1334 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1335 enum pipe pipe = intel_crtc->pipe;
1336 struct vlv_wm_values wm = {};
1338 vlv_compute_wm(intel_crtc);
1339 vlv_merge_wm(dev, &wm);
1341 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1342 /* FIXME should be part of crtc atomic commit */
1343 vlv_pipe_set_fifo_size(intel_crtc);
1347 if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1348 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1349 chv_set_memory_dvfs(dev_priv, false);
1351 if (wm.level < VLV_WM_LEVEL_PM5 &&
1352 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1353 chv_set_memory_pm5(dev_priv, false);
1355 if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
1356 intel_set_memory_cxsr(dev_priv, false);
1358 /* FIXME should be part of crtc atomic commit */
1359 vlv_pipe_set_fifo_size(intel_crtc);
1361 vlv_write_wm_values(intel_crtc, &wm);
1363 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1364 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1365 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1366 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
1367 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1369 if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
1370 intel_set_memory_cxsr(dev_priv, true);
1372 if (wm.level >= VLV_WM_LEVEL_PM5 &&
1373 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1374 chv_set_memory_pm5(dev_priv, true);
1376 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1377 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1378 chv_set_memory_dvfs(dev_priv, true);
1380 dev_priv->wm.vlv = wm;
1383 #define single_plane_enabled(mask) is_power_of_2(mask)
1385 static void g4x_update_wm(struct drm_crtc *crtc)
1387 struct drm_device *dev = crtc->dev;
1388 static const int sr_latency_ns = 12000;
1389 struct drm_i915_private *dev_priv = to_i915(dev);
1390 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1391 int plane_sr, cursor_sr;
1392 unsigned int enabled = 0;
1395 if (g4x_compute_wm0(dev, PIPE_A,
1396 &g4x_wm_info, pessimal_latency_ns,
1397 &g4x_cursor_wm_info, pessimal_latency_ns,
1398 &planea_wm, &cursora_wm))
1399 enabled |= 1 << PIPE_A;
1401 if (g4x_compute_wm0(dev, PIPE_B,
1402 &g4x_wm_info, pessimal_latency_ns,
1403 &g4x_cursor_wm_info, pessimal_latency_ns,
1404 &planeb_wm, &cursorb_wm))
1405 enabled |= 1 << PIPE_B;
1407 if (single_plane_enabled(enabled) &&
1408 g4x_compute_srwm(dev, ffs(enabled) - 1,
1411 &g4x_cursor_wm_info,
1412 &plane_sr, &cursor_sr)) {
1413 cxsr_enabled = true;
1415 cxsr_enabled = false;
1416 intel_set_memory_cxsr(dev_priv, false);
1417 plane_sr = cursor_sr = 0;
1420 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1421 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1422 planea_wm, cursora_wm,
1423 planeb_wm, cursorb_wm,
1424 plane_sr, cursor_sr);
1427 FW_WM(plane_sr, SR) |
1428 FW_WM(cursorb_wm, CURSORB) |
1429 FW_WM(planeb_wm, PLANEB) |
1430 FW_WM(planea_wm, PLANEA));
1432 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1433 FW_WM(cursora_wm, CURSORA));
1434 /* HPLL off in SR has some issues on G4x... disable it */
1436 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1437 FW_WM(cursor_sr, CURSOR_SR));
1440 intel_set_memory_cxsr(dev_priv, true);
1443 static void i965_update_wm(struct drm_crtc *unused_crtc)
1445 struct drm_device *dev = unused_crtc->dev;
1446 struct drm_i915_private *dev_priv = to_i915(dev);
1447 struct drm_crtc *crtc;
1452 /* Calc sr entries for one plane configs */
1453 crtc = single_enabled_crtc(dev);
1455 /* self-refresh has much higher latency */
1456 static const int sr_latency_ns = 12000;
1457 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1458 int clock = adjusted_mode->crtc_clock;
1459 int htotal = adjusted_mode->crtc_htotal;
1460 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
1461 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1462 unsigned long line_time_us;
1465 line_time_us = max(htotal * 1000 / clock, 1);
1467 /* Use ns/us then divide to preserve precision */
1468 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1470 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1471 srwm = I965_FIFO_SIZE - entries;
1475 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1478 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1479 cpp * crtc->cursor->state->crtc_w;
1480 entries = DIV_ROUND_UP(entries,
1481 i965_cursor_wm_info.cacheline_size);
1482 cursor_sr = i965_cursor_wm_info.fifo_size -
1483 (entries + i965_cursor_wm_info.guard_size);
1485 if (cursor_sr > i965_cursor_wm_info.max_wm)
1486 cursor_sr = i965_cursor_wm_info.max_wm;
1488 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1489 "cursor %d\n", srwm, cursor_sr);
1491 cxsr_enabled = true;
1493 cxsr_enabled = false;
1494 /* Turn off self refresh if both pipes are enabled */
1495 intel_set_memory_cxsr(dev_priv, false);
1498 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1501 /* 965 has limitations... */
1502 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1506 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1507 FW_WM(8, PLANEC_OLD));
1508 /* update cursor SR watermark */
1509 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
1512 intel_set_memory_cxsr(dev_priv, true);
1517 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1519 struct drm_device *dev = unused_crtc->dev;
1520 struct drm_i915_private *dev_priv = to_i915(dev);
1521 const struct intel_watermark_params *wm_info;
1526 int planea_wm, planeb_wm;
1527 struct drm_crtc *crtc, *enabled = NULL;
1530 wm_info = &i945_wm_info;
1531 else if (!IS_GEN2(dev))
1532 wm_info = &i915_wm_info;
1534 wm_info = &i830_a_wm_info;
1536 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1537 crtc = intel_get_crtc_for_plane(dev, 0);
1538 if (intel_crtc_active(crtc)) {
1539 const struct drm_display_mode *adjusted_mode;
1540 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1544 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1545 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1546 wm_info, fifo_size, cpp,
1547 pessimal_latency_ns);
1550 planea_wm = fifo_size - wm_info->guard_size;
1551 if (planea_wm > (long)wm_info->max_wm)
1552 planea_wm = wm_info->max_wm;
1556 wm_info = &i830_bc_wm_info;
1558 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1559 crtc = intel_get_crtc_for_plane(dev, 1);
1560 if (intel_crtc_active(crtc)) {
1561 const struct drm_display_mode *adjusted_mode;
1562 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1566 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1567 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1568 wm_info, fifo_size, cpp,
1569 pessimal_latency_ns);
1570 if (enabled == NULL)
1575 planeb_wm = fifo_size - wm_info->guard_size;
1576 if (planeb_wm > (long)wm_info->max_wm)
1577 planeb_wm = wm_info->max_wm;
1580 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1582 if (IS_I915GM(dev) && enabled) {
1583 struct drm_i915_gem_object *obj;
1585 obj = intel_fb_obj(enabled->primary->state->fb);
1587 /* self-refresh seems busted with untiled */
1588 if (!i915_gem_object_is_tiled(obj))
1593 * Overlay gets an aggressive default since video jitter is bad.
1597 /* Play safe and disable self-refresh before adjusting watermarks. */
1598 intel_set_memory_cxsr(dev_priv, false);
1600 /* Calc sr entries for one plane configs */
1601 if (HAS_FW_BLC(dev) && enabled) {
1602 /* self-refresh has much higher latency */
1603 static const int sr_latency_ns = 6000;
1604 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
1605 int clock = adjusted_mode->crtc_clock;
1606 int htotal = adjusted_mode->crtc_htotal;
1607 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
1608 int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
1609 unsigned long line_time_us;
1612 if (IS_I915GM(dev) || IS_I945GM(dev))
1615 line_time_us = max(htotal * 1000 / clock, 1);
1617 /* Use ns/us then divide to preserve precision */
1618 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1620 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1621 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1622 srwm = wm_info->fifo_size - entries;
1626 if (IS_I945G(dev) || IS_I945GM(dev))
1627 I915_WRITE(FW_BLC_SELF,
1628 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1630 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1633 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1634 planea_wm, planeb_wm, cwm, srwm);
1636 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1637 fwater_hi = (cwm & 0x1f);
1639 /* Set request length to 8 cachelines per fetch */
1640 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1641 fwater_hi = fwater_hi | (1 << 8);
1643 I915_WRITE(FW_BLC, fwater_lo);
1644 I915_WRITE(FW_BLC2, fwater_hi);
1647 intel_set_memory_cxsr(dev_priv, true);
1650 static void i845_update_wm(struct drm_crtc *unused_crtc)
1652 struct drm_device *dev = unused_crtc->dev;
1653 struct drm_i915_private *dev_priv = to_i915(dev);
1654 struct drm_crtc *crtc;
1655 const struct drm_display_mode *adjusted_mode;
1659 crtc = single_enabled_crtc(dev);
1663 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1664 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1666 dev_priv->display.get_fifo_size(dev, 0),
1667 4, pessimal_latency_ns);
1668 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1669 fwater_lo |= (3<<8) | planea_wm;
1671 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1673 I915_WRITE(FW_BLC, fwater_lo);
1676 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
1678 uint32_t pixel_rate;
1680 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
1682 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1683 * adjust the pixel_rate here. */
1685 if (pipe_config->pch_pfit.enabled) {
1686 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1687 uint32_t pfit_size = pipe_config->pch_pfit.size;
1689 pipe_w = pipe_config->pipe_src_w;
1690 pipe_h = pipe_config->pipe_src_h;
1692 pfit_w = (pfit_size >> 16) & 0xFFFF;
1693 pfit_h = pfit_size & 0xFFFF;
1694 if (pipe_w < pfit_w)
1696 if (pipe_h < pfit_h)
1699 if (WARN_ON(!pfit_w || !pfit_h))
1702 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1709 /* latency must be in 0.1us units. */
1710 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
1714 if (WARN(latency == 0, "Latency value missing\n"))
1717 ret = (uint64_t) pixel_rate * cpp * latency;
1718 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1723 /* latency must be in 0.1us units. */
1724 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1725 uint32_t horiz_pixels, uint8_t cpp,
1730 if (WARN(latency == 0, "Latency value missing\n"))
1732 if (WARN_ON(!pipe_htotal))
1735 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1736 ret = (ret + 1) * horiz_pixels * cpp;
1737 ret = DIV_ROUND_UP(ret, 64) + 2;
1741 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1745 * Neither of these should be possible since this function shouldn't be
1746 * called if the CRTC is off or the plane is invisible. But let's be
1747 * extra paranoid to avoid a potential divide-by-zero if we screw up
1748 * elsewhere in the driver.
1752 if (WARN_ON(!horiz_pixels))
1755 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
1758 struct ilk_wm_maximums {
1766 * For both WM_PIPE and WM_LP.
1767 * mem_value must be in 0.1us units.
1769 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
1770 const struct intel_plane_state *pstate,
1774 int cpp = pstate->base.fb ?
1775 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1776 uint32_t method1, method2;
1778 if (!cstate->base.active || !pstate->base.visible)
1781 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1786 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1787 cstate->base.adjusted_mode.crtc_htotal,
1788 drm_rect_width(&pstate->base.dst),
1791 return min(method1, method2);
1795 * For both WM_PIPE and WM_LP.
1796 * mem_value must be in 0.1us units.
1798 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
1799 const struct intel_plane_state *pstate,
1802 int cpp = pstate->base.fb ?
1803 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1804 uint32_t method1, method2;
1806 if (!cstate->base.active || !pstate->base.visible)
1809 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1810 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1811 cstate->base.adjusted_mode.crtc_htotal,
1812 drm_rect_width(&pstate->base.dst),
1814 return min(method1, method2);
1818 * For both WM_PIPE and WM_LP.
1819 * mem_value must be in 0.1us units.
1821 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1822 const struct intel_plane_state *pstate,
1826 * We treat the cursor plane as always-on for the purposes of watermark
1827 * calculation. Until we have two-stage watermark programming merged,
1828 * this is necessary to avoid flickering.
1831 int width = pstate->base.visible ? pstate->base.crtc_w : 64;
1833 if (!cstate->base.active)
1836 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1837 cstate->base.adjusted_mode.crtc_htotal,
1838 width, cpp, mem_value);
1841 /* Only for WM_LP. */
1842 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1843 const struct intel_plane_state *pstate,
1846 int cpp = pstate->base.fb ?
1847 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1849 if (!cstate->base.active || !pstate->base.visible)
1852 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
1855 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1857 if (INTEL_INFO(dev)->gen >= 8)
1859 else if (INTEL_INFO(dev)->gen >= 7)
1865 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1866 int level, bool is_sprite)
1868 if (INTEL_INFO(dev)->gen >= 8)
1869 /* BDW primary/sprite plane watermarks */
1870 return level == 0 ? 255 : 2047;
1871 else if (INTEL_INFO(dev)->gen >= 7)
1872 /* IVB/HSW primary/sprite plane watermarks */
1873 return level == 0 ? 127 : 1023;
1874 else if (!is_sprite)
1875 /* ILK/SNB primary plane watermarks */
1876 return level == 0 ? 127 : 511;
1878 /* ILK/SNB sprite plane watermarks */
1879 return level == 0 ? 63 : 255;
1882 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1885 if (INTEL_INFO(dev)->gen >= 7)
1886 return level == 0 ? 63 : 255;
1888 return level == 0 ? 31 : 63;
1891 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1893 if (INTEL_INFO(dev)->gen >= 8)
1899 /* Calculate the maximum primary/sprite plane watermark */
1900 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1902 const struct intel_wm_config *config,
1903 enum intel_ddb_partitioning ddb_partitioning,
1906 unsigned int fifo_size = ilk_display_fifo_size(dev);
1908 /* if sprites aren't enabled, sprites get nothing */
1909 if (is_sprite && !config->sprites_enabled)
1912 /* HSW allows LP1+ watermarks even with multiple pipes */
1913 if (level == 0 || config->num_pipes_active > 1) {
1914 fifo_size /= INTEL_INFO(dev)->num_pipes;
1917 * For some reason the non self refresh
1918 * FIFO size is only half of the self
1919 * refresh FIFO size on ILK/SNB.
1921 if (INTEL_INFO(dev)->gen <= 6)
1925 if (config->sprites_enabled) {
1926 /* level 0 is always calculated with 1:1 split */
1927 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1936 /* clamp to max that the registers can hold */
1937 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1940 /* Calculate the maximum cursor plane watermark */
1941 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1943 const struct intel_wm_config *config)
1945 /* HSW LP1+ watermarks w/ multiple pipes */
1946 if (level > 0 && config->num_pipes_active > 1)
1949 /* otherwise just report max that registers can hold */
1950 return ilk_cursor_wm_reg_max(dev, level);
1953 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1955 const struct intel_wm_config *config,
1956 enum intel_ddb_partitioning ddb_partitioning,
1957 struct ilk_wm_maximums *max)
1959 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1960 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1961 max->cur = ilk_cursor_wm_max(dev, level, config);
1962 max->fbc = ilk_fbc_wm_reg_max(dev);
1965 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1967 struct ilk_wm_maximums *max)
1969 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1970 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1971 max->cur = ilk_cursor_wm_reg_max(dev, level);
1972 max->fbc = ilk_fbc_wm_reg_max(dev);
1975 static bool ilk_validate_wm_level(int level,
1976 const struct ilk_wm_maximums *max,
1977 struct intel_wm_level *result)
1981 /* already determined to be invalid? */
1982 if (!result->enable)
1985 result->enable = result->pri_val <= max->pri &&
1986 result->spr_val <= max->spr &&
1987 result->cur_val <= max->cur;
1989 ret = result->enable;
1992 * HACK until we can pre-compute everything,
1993 * and thus fail gracefully if LP0 watermarks
1996 if (level == 0 && !result->enable) {
1997 if (result->pri_val > max->pri)
1998 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1999 level, result->pri_val, max->pri);
2000 if (result->spr_val > max->spr)
2001 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2002 level, result->spr_val, max->spr);
2003 if (result->cur_val > max->cur)
2004 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2005 level, result->cur_val, max->cur);
2007 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2008 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2009 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2010 result->enable = true;
2016 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2017 const struct intel_crtc *intel_crtc,
2019 struct intel_crtc_state *cstate,
2020 struct intel_plane_state *pristate,
2021 struct intel_plane_state *sprstate,
2022 struct intel_plane_state *curstate,
2023 struct intel_wm_level *result)
2025 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2026 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2027 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2029 /* WM1+ latency values stored in 0.5us units */
2037 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2038 pri_latency, level);
2039 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2043 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2046 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2048 result->enable = true;
2052 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2054 const struct intel_atomic_state *intel_state =
2055 to_intel_atomic_state(cstate->base.state);
2056 const struct drm_display_mode *adjusted_mode =
2057 &cstate->base.adjusted_mode;
2058 u32 linetime, ips_linetime;
2060 if (!cstate->base.active)
2062 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2064 if (WARN_ON(intel_state->cdclk == 0))
2067 /* The WM are computed with base on how long it takes to fill a single
2068 * row at the given clock rate, multiplied by 8.
2070 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2071 adjusted_mode->crtc_clock);
2072 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2073 intel_state->cdclk);
2075 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2076 PIPE_WM_LINETIME_TIME(linetime);
2079 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2081 struct drm_i915_private *dev_priv = to_i915(dev);
2086 int level, max_level = ilk_wm_max_level(dev);
2088 /* read the first set of memory latencies[0:3] */
2089 val = 0; /* data0 to be programmed to 0 for first set */
2090 mutex_lock(&dev_priv->rps.hw_lock);
2091 ret = sandybridge_pcode_read(dev_priv,
2092 GEN9_PCODE_READ_MEM_LATENCY,
2094 mutex_unlock(&dev_priv->rps.hw_lock);
2097 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2101 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2102 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2103 GEN9_MEM_LATENCY_LEVEL_MASK;
2104 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2105 GEN9_MEM_LATENCY_LEVEL_MASK;
2106 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2107 GEN9_MEM_LATENCY_LEVEL_MASK;
2109 /* read the second set of memory latencies[4:7] */
2110 val = 1; /* data0 to be programmed to 1 for second set */
2111 mutex_lock(&dev_priv->rps.hw_lock);
2112 ret = sandybridge_pcode_read(dev_priv,
2113 GEN9_PCODE_READ_MEM_LATENCY,
2115 mutex_unlock(&dev_priv->rps.hw_lock);
2117 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2121 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2122 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2123 GEN9_MEM_LATENCY_LEVEL_MASK;
2124 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2125 GEN9_MEM_LATENCY_LEVEL_MASK;
2126 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2127 GEN9_MEM_LATENCY_LEVEL_MASK;
2130 * WaWmMemoryReadLatency:skl
2132 * punit doesn't take into account the read latency so we need
2133 * to add 2us to the various latency levels we retrieve from
2135 * - W0 is a bit special in that it's the only level that
2136 * can't be disabled if we want to have display working, so
2137 * we always add 2us there.
2138 * - For levels >=1, punit returns 0us latency when they are
2139 * disabled, so we respect that and don't add 2us then
2141 * Additionally, if a level n (n > 1) has a 0us latency, all
2142 * levels m (m >= n) need to be disabled. We make sure to
2143 * sanitize the values out of the punit to satisfy this
2147 for (level = 1; level <= max_level; level++)
2151 for (i = level + 1; i <= max_level; i++)
2156 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2157 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2159 wm[0] = (sskpd >> 56) & 0xFF;
2161 wm[0] = sskpd & 0xF;
2162 wm[1] = (sskpd >> 4) & 0xFF;
2163 wm[2] = (sskpd >> 12) & 0xFF;
2164 wm[3] = (sskpd >> 20) & 0x1FF;
2165 wm[4] = (sskpd >> 32) & 0x1FF;
2166 } else if (INTEL_INFO(dev)->gen >= 6) {
2167 uint32_t sskpd = I915_READ(MCH_SSKPD);
2169 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2170 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2171 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2172 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2173 } else if (INTEL_INFO(dev)->gen >= 5) {
2174 uint32_t mltr = I915_READ(MLTR_ILK);
2176 /* ILK primary LP0 latency is 700 ns */
2178 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2179 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2183 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2185 /* ILK sprite LP0 latency is 1300 ns */
2190 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2192 /* ILK cursor LP0 latency is 1300 ns */
2196 /* WaDoubleCursorLP3Latency:ivb */
2197 if (IS_IVYBRIDGE(dev))
2201 int ilk_wm_max_level(const struct drm_device *dev)
2203 /* how many WM levels are we expecting */
2204 if (INTEL_INFO(dev)->gen >= 9)
2206 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2208 else if (INTEL_INFO(dev)->gen >= 6)
2214 static void intel_print_wm_latency(struct drm_device *dev,
2216 const uint16_t wm[8])
2218 int level, max_level = ilk_wm_max_level(dev);
2220 for (level = 0; level <= max_level; level++) {
2221 unsigned int latency = wm[level];
2224 DRM_ERROR("%s WM%d latency not provided\n",
2230 * - latencies are in us on gen9.
2231 * - before then, WM1+ latency values are in 0.5us units
2238 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2239 name, level, wm[level],
2240 latency / 10, latency % 10);
2244 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2245 uint16_t wm[5], uint16_t min)
2247 int level, max_level = ilk_wm_max_level(&dev_priv->drm);
2252 wm[0] = max(wm[0], min);
2253 for (level = 1; level <= max_level; level++)
2254 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2259 static void snb_wm_latency_quirk(struct drm_device *dev)
2261 struct drm_i915_private *dev_priv = to_i915(dev);
2265 * The BIOS provided WM memory latency values are often
2266 * inadequate for high resolution displays. Adjust them.
2268 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2269 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2270 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2275 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2276 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2277 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2278 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2281 static void ilk_setup_wm_latency(struct drm_device *dev)
2283 struct drm_i915_private *dev_priv = to_i915(dev);
2285 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2287 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2288 sizeof(dev_priv->wm.pri_latency));
2289 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2290 sizeof(dev_priv->wm.pri_latency));
2292 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2293 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2295 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2296 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2297 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2300 snb_wm_latency_quirk(dev);
2303 static void skl_setup_wm_latency(struct drm_device *dev)
2305 struct drm_i915_private *dev_priv = to_i915(dev);
2307 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2308 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2311 static bool ilk_validate_pipe_wm(struct drm_device *dev,
2312 struct intel_pipe_wm *pipe_wm)
2314 /* LP0 watermark maximums depend on this pipe alone */
2315 const struct intel_wm_config config = {
2316 .num_pipes_active = 1,
2317 .sprites_enabled = pipe_wm->sprites_enabled,
2318 .sprites_scaled = pipe_wm->sprites_scaled,
2320 struct ilk_wm_maximums max;
2322 /* LP0 watermarks always use 1/2 DDB partitioning */
2323 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2325 /* At least LP0 must be valid */
2326 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
2327 DRM_DEBUG_KMS("LP0 watermark invalid\n");
2334 /* Compute new watermarks for the pipe */
2335 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2337 struct drm_atomic_state *state = cstate->base.state;
2338 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2339 struct intel_pipe_wm *pipe_wm;
2340 struct drm_device *dev = state->dev;
2341 const struct drm_i915_private *dev_priv = to_i915(dev);
2342 struct intel_plane *intel_plane;
2343 struct intel_plane_state *pristate = NULL;
2344 struct intel_plane_state *sprstate = NULL;
2345 struct intel_plane_state *curstate = NULL;
2346 int level, max_level = ilk_wm_max_level(dev), usable_level;
2347 struct ilk_wm_maximums max;
2349 pipe_wm = &cstate->wm.ilk.optimal;
2351 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2352 struct intel_plane_state *ps;
2354 ps = intel_atomic_get_existing_plane_state(state,
2359 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2361 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2363 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2367 pipe_wm->pipe_enabled = cstate->base.active;
2369 pipe_wm->sprites_enabled = sprstate->base.visible;
2370 pipe_wm->sprites_scaled = sprstate->base.visible &&
2371 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
2372 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
2375 usable_level = max_level;
2377 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2378 if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
2381 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2382 if (pipe_wm->sprites_scaled)
2385 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
2386 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
2388 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
2389 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
2391 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2392 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
2394 if (!ilk_validate_pipe_wm(dev, pipe_wm))
2397 ilk_compute_wm_reg_maximums(dev, 1, &max);
2399 for (level = 1; level <= max_level; level++) {
2400 struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
2402 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
2403 pristate, sprstate, curstate, wm);
2406 * Disable any watermark level that exceeds the
2407 * register maximums since such watermarks are
2410 if (level > usable_level)
2413 if (ilk_validate_wm_level(level, &max, wm))
2414 pipe_wm->wm[level] = *wm;
2416 usable_level = level;
2423 * Build a set of 'intermediate' watermark values that satisfy both the old
2424 * state and the new state. These can be programmed to the hardware
2427 static int ilk_compute_intermediate_wm(struct drm_device *dev,
2428 struct intel_crtc *intel_crtc,
2429 struct intel_crtc_state *newstate)
2431 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
2432 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2433 int level, max_level = ilk_wm_max_level(dev);
2436 * Start with the final, target watermarks, then combine with the
2437 * currently active watermarks to get values that are safe both before
2438 * and after the vblank.
2440 *a = newstate->wm.ilk.optimal;
2441 a->pipe_enabled |= b->pipe_enabled;
2442 a->sprites_enabled |= b->sprites_enabled;
2443 a->sprites_scaled |= b->sprites_scaled;
2445 for (level = 0; level <= max_level; level++) {
2446 struct intel_wm_level *a_wm = &a->wm[level];
2447 const struct intel_wm_level *b_wm = &b->wm[level];
2449 a_wm->enable &= b_wm->enable;
2450 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
2451 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
2452 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
2453 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
2457 * We need to make sure that these merged watermark values are
2458 * actually a valid configuration themselves. If they're not,
2459 * there's no safe way to transition from the old state to
2460 * the new state, so we need to fail the atomic transaction.
2462 if (!ilk_validate_pipe_wm(dev, a))
2466 * If our intermediate WM are identical to the final WM, then we can
2467 * omit the post-vblank programming; only update if it's different.
2469 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
2470 newstate->wm.need_postvbl_update = false;
2476 * Merge the watermarks from all active pipes for a specific level.
2478 static void ilk_merge_wm_level(struct drm_device *dev,
2480 struct intel_wm_level *ret_wm)
2482 const struct intel_crtc *intel_crtc;
2484 ret_wm->enable = true;
2486 for_each_intel_crtc(dev, intel_crtc) {
2487 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
2488 const struct intel_wm_level *wm = &active->wm[level];
2490 if (!active->pipe_enabled)
2494 * The watermark values may have been used in the past,
2495 * so we must maintain them in the registers for some
2496 * time even if the level is now disabled.
2499 ret_wm->enable = false;
2501 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2502 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2503 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2504 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2509 * Merge all low power watermarks for all active pipes.
2511 static void ilk_wm_merge(struct drm_device *dev,
2512 const struct intel_wm_config *config,
2513 const struct ilk_wm_maximums *max,
2514 struct intel_pipe_wm *merged)
2516 struct drm_i915_private *dev_priv = to_i915(dev);
2517 int level, max_level = ilk_wm_max_level(dev);
2518 int last_enabled_level = max_level;
2520 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2521 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2522 config->num_pipes_active > 1)
2523 last_enabled_level = 0;
2525 /* ILK: FBC WM must be disabled always */
2526 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2528 /* merge each WM1+ level */
2529 for (level = 1; level <= max_level; level++) {
2530 struct intel_wm_level *wm = &merged->wm[level];
2532 ilk_merge_wm_level(dev, level, wm);
2534 if (level > last_enabled_level)
2536 else if (!ilk_validate_wm_level(level, max, wm))
2537 /* make sure all following levels get disabled */
2538 last_enabled_level = level - 1;
2541 * The spec says it is preferred to disable
2542 * FBC WMs instead of disabling a WM level.
2544 if (wm->fbc_val > max->fbc) {
2546 merged->fbc_wm_enabled = false;
2551 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2553 * FIXME this is racy. FBC might get enabled later.
2554 * What we should check here is whether FBC can be
2555 * enabled sometime later.
2557 if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
2558 intel_fbc_is_active(dev_priv)) {
2559 for (level = 2; level <= max_level; level++) {
2560 struct intel_wm_level *wm = &merged->wm[level];
2567 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2569 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2570 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2573 /* The value we need to program into the WM_LPx latency field */
2574 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2576 struct drm_i915_private *dev_priv = to_i915(dev);
2578 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2581 return dev_priv->wm.pri_latency[level];
2584 static void ilk_compute_wm_results(struct drm_device *dev,
2585 const struct intel_pipe_wm *merged,
2586 enum intel_ddb_partitioning partitioning,
2587 struct ilk_wm_values *results)
2589 struct intel_crtc *intel_crtc;
2592 results->enable_fbc_wm = merged->fbc_wm_enabled;
2593 results->partitioning = partitioning;
2595 /* LP1+ register values */
2596 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2597 const struct intel_wm_level *r;
2599 level = ilk_wm_lp_to_level(wm_lp, merged);
2601 r = &merged->wm[level];
2604 * Maintain the watermark values even if the level is
2605 * disabled. Doing otherwise could cause underruns.
2607 results->wm_lp[wm_lp - 1] =
2608 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2609 (r->pri_val << WM1_LP_SR_SHIFT) |
2613 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2615 if (INTEL_INFO(dev)->gen >= 8)
2616 results->wm_lp[wm_lp - 1] |=
2617 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2619 results->wm_lp[wm_lp - 1] |=
2620 r->fbc_val << WM1_LP_FBC_SHIFT;
2623 * Always set WM1S_LP_EN when spr_val != 0, even if the
2624 * level is disabled. Doing otherwise could cause underruns.
2626 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2627 WARN_ON(wm_lp != 1);
2628 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2630 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2633 /* LP0 register values */
2634 for_each_intel_crtc(dev, intel_crtc) {
2635 enum pipe pipe = intel_crtc->pipe;
2636 const struct intel_wm_level *r =
2637 &intel_crtc->wm.active.ilk.wm[0];
2639 if (WARN_ON(!r->enable))
2642 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
2644 results->wm_pipe[pipe] =
2645 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2646 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2651 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2652 * case both are at the same level. Prefer r1 in case they're the same. */
2653 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2654 struct intel_pipe_wm *r1,
2655 struct intel_pipe_wm *r2)
2657 int level, max_level = ilk_wm_max_level(dev);
2658 int level1 = 0, level2 = 0;
2660 for (level = 1; level <= max_level; level++) {
2661 if (r1->wm[level].enable)
2663 if (r2->wm[level].enable)
2667 if (level1 == level2) {
2668 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2672 } else if (level1 > level2) {
2679 /* dirty bits used to track which watermarks need changes */
2680 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2681 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2682 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2683 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2684 #define WM_DIRTY_FBC (1 << 24)
2685 #define WM_DIRTY_DDB (1 << 25)
2687 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2688 const struct ilk_wm_values *old,
2689 const struct ilk_wm_values *new)
2691 unsigned int dirty = 0;
2695 for_each_pipe(dev_priv, pipe) {
2696 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2697 dirty |= WM_DIRTY_LINETIME(pipe);
2698 /* Must disable LP1+ watermarks too */
2699 dirty |= WM_DIRTY_LP_ALL;
2702 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2703 dirty |= WM_DIRTY_PIPE(pipe);
2704 /* Must disable LP1+ watermarks too */
2705 dirty |= WM_DIRTY_LP_ALL;
2709 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2710 dirty |= WM_DIRTY_FBC;
2711 /* Must disable LP1+ watermarks too */
2712 dirty |= WM_DIRTY_LP_ALL;
2715 if (old->partitioning != new->partitioning) {
2716 dirty |= WM_DIRTY_DDB;
2717 /* Must disable LP1+ watermarks too */
2718 dirty |= WM_DIRTY_LP_ALL;
2721 /* LP1+ watermarks already deemed dirty, no need to continue */
2722 if (dirty & WM_DIRTY_LP_ALL)
2725 /* Find the lowest numbered LP1+ watermark in need of an update... */
2726 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2727 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2728 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2732 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2733 for (; wm_lp <= 3; wm_lp++)
2734 dirty |= WM_DIRTY_LP(wm_lp);
2739 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2742 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2743 bool changed = false;
2745 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2746 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2747 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2750 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2751 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2752 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2755 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2756 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2757 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2762 * Don't touch WM1S_LP_EN here.
2763 * Doing so could cause underruns.
2770 * The spec says we shouldn't write when we don't need, because every write
2771 * causes WMs to be re-evaluated, expending some power.
2773 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2774 struct ilk_wm_values *results)
2776 struct drm_device *dev = &dev_priv->drm;
2777 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2781 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2785 _ilk_disable_lp_wm(dev_priv, dirty);
2787 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2788 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2789 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2790 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2791 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2792 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2794 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2795 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2796 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2797 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2798 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2799 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2801 if (dirty & WM_DIRTY_DDB) {
2802 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2803 val = I915_READ(WM_MISC);
2804 if (results->partitioning == INTEL_DDB_PART_1_2)
2805 val &= ~WM_MISC_DATA_PARTITION_5_6;
2807 val |= WM_MISC_DATA_PARTITION_5_6;
2808 I915_WRITE(WM_MISC, val);
2810 val = I915_READ(DISP_ARB_CTL2);
2811 if (results->partitioning == INTEL_DDB_PART_1_2)
2812 val &= ~DISP_DATA_PARTITION_5_6;
2814 val |= DISP_DATA_PARTITION_5_6;
2815 I915_WRITE(DISP_ARB_CTL2, val);
2819 if (dirty & WM_DIRTY_FBC) {
2820 val = I915_READ(DISP_ARB_CTL);
2821 if (results->enable_fbc_wm)
2822 val &= ~DISP_FBC_WM_DIS;
2824 val |= DISP_FBC_WM_DIS;
2825 I915_WRITE(DISP_ARB_CTL, val);
2828 if (dirty & WM_DIRTY_LP(1) &&
2829 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2830 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2832 if (INTEL_INFO(dev)->gen >= 7) {
2833 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2834 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2835 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2836 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2839 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2840 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2841 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2842 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2843 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2844 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2846 dev_priv->wm.hw = *results;
2849 bool ilk_disable_lp_wm(struct drm_device *dev)
2851 struct drm_i915_private *dev_priv = to_i915(dev);
2853 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2857 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2858 * different active planes.
2861 #define SKL_DDB_SIZE 896 /* in blocks */
2862 #define BXT_DDB_SIZE 512
2865 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2866 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2867 * other universal planes are in indices 1..n. Note that this may leave unused
2868 * indices between the top "sprite" plane and the cursor.
2871 skl_wm_plane_id(const struct intel_plane *plane)
2873 switch (plane->base.type) {
2874 case DRM_PLANE_TYPE_PRIMARY:
2876 case DRM_PLANE_TYPE_CURSOR:
2877 return PLANE_CURSOR;
2878 case DRM_PLANE_TYPE_OVERLAY:
2879 return plane->plane + 1;
2881 MISSING_CASE(plane->base.type);
2882 return plane->plane;
2887 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2888 const struct intel_crtc_state *cstate,
2889 struct skl_ddb_entry *alloc, /* out */
2890 int *num_active /* out */)
2892 struct drm_atomic_state *state = cstate->base.state;
2893 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2894 struct drm_i915_private *dev_priv = to_i915(dev);
2895 struct drm_crtc *for_crtc = cstate->base.crtc;
2896 unsigned int pipe_size, ddb_size;
2897 int nth_active_pipe;
2898 int pipe = to_intel_crtc(for_crtc)->pipe;
2900 if (WARN_ON(!state) || !cstate->base.active) {
2903 *num_active = hweight32(dev_priv->active_crtcs);
2907 if (intel_state->active_pipe_changes)
2908 *num_active = hweight32(intel_state->active_crtcs);
2910 *num_active = hweight32(dev_priv->active_crtcs);
2912 if (IS_BROXTON(dev))
2913 ddb_size = BXT_DDB_SIZE;
2915 ddb_size = SKL_DDB_SIZE;
2917 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2920 * If the state doesn't change the active CRTC's, then there's
2921 * no need to recalculate; the existing pipe allocation limits
2922 * should remain unchanged. Note that we're safe from racing
2923 * commits since any racing commit that changes the active CRTC
2924 * list would need to grab _all_ crtc locks, including the one
2925 * we currently hold.
2927 if (!intel_state->active_pipe_changes) {
2928 *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
2932 nth_active_pipe = hweight32(intel_state->active_crtcs &
2933 (drm_crtc_mask(for_crtc) - 1));
2934 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
2935 alloc->start = nth_active_pipe * ddb_size / *num_active;
2936 alloc->end = alloc->start + pipe_size;
2939 static unsigned int skl_cursor_allocation(int num_active)
2941 if (num_active == 1)
2947 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
2949 entry->start = reg & 0x3ff;
2950 entry->end = (reg >> 16) & 0x3ff;
2955 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2956 struct skl_ddb_allocation *ddb /* out */)
2962 memset(ddb, 0, sizeof(*ddb));
2964 for_each_pipe(dev_priv, pipe) {
2965 enum intel_display_power_domain power_domain;
2967 power_domain = POWER_DOMAIN_PIPE(pipe);
2968 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2971 for_each_plane(dev_priv, pipe, plane) {
2972 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2973 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
2977 val = I915_READ(CUR_BUF_CFG(pipe));
2978 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
2981 intel_display_power_put(dev_priv, power_domain);
2986 * Determines the downscale amount of a plane for the purposes of watermark calculations.
2987 * The bspec defines downscale amount as:
2990 * Horizontal down scale amount = maximum[1, Horizontal source size /
2991 * Horizontal destination size]
2992 * Vertical down scale amount = maximum[1, Vertical source size /
2993 * Vertical destination size]
2994 * Total down scale amount = Horizontal down scale amount *
2995 * Vertical down scale amount
2998 * Return value is provided in 16.16 fixed point form to retain fractional part.
2999 * Caller should take care of dividing & rounding off the value.
3002 skl_plane_downscale_amount(const struct intel_plane_state *pstate)
3004 uint32_t downscale_h, downscale_w;
3005 uint32_t src_w, src_h, dst_w, dst_h;
3007 if (WARN_ON(!pstate->base.visible))
3008 return DRM_PLANE_HELPER_NO_SCALING;
3010 /* n.b., src is 16.16 fixed point, dst is whole integer */
3011 src_w = drm_rect_width(&pstate->base.src);
3012 src_h = drm_rect_height(&pstate->base.src);
3013 dst_w = drm_rect_width(&pstate->base.dst);
3014 dst_h = drm_rect_height(&pstate->base.dst);
3015 if (intel_rotation_90_or_270(pstate->base.rotation))
3018 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3019 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3021 /* Provide result in 16.16 fixed point */
3022 return (uint64_t)downscale_w * downscale_h >> 16;
3026 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3027 const struct drm_plane_state *pstate,
3030 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3031 struct drm_framebuffer *fb = pstate->fb;
3032 uint32_t down_scale_amount, data_rate;
3033 uint32_t width = 0, height = 0;
3034 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
3036 if (!intel_pstate->base.visible)
3038 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
3040 if (y && format != DRM_FORMAT_NV12)
3043 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3044 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3046 if (intel_rotation_90_or_270(pstate->rotation))
3047 swap(width, height);
3049 /* for planar format */
3050 if (format == DRM_FORMAT_NV12) {
3051 if (y) /* y-plane data rate */
3052 data_rate = width * height *
3053 drm_format_plane_cpp(format, 0);
3054 else /* uv-plane data rate */
3055 data_rate = (width / 2) * (height / 2) *
3056 drm_format_plane_cpp(format, 1);
3058 /* for packed formats */
3059 data_rate = width * height * drm_format_plane_cpp(format, 0);
3062 down_scale_amount = skl_plane_downscale_amount(intel_pstate);
3064 return (uint64_t)data_rate * down_scale_amount >> 16;
3068 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3069 * a 8192x4096@32bpp framebuffer:
3070 * 3 * 4096 * 8192 * 4 < 2^32
3073 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
3075 struct drm_crtc_state *cstate = &intel_cstate->base;
3076 struct drm_atomic_state *state = cstate->state;
3077 struct drm_crtc *crtc = cstate->crtc;
3078 struct drm_device *dev = crtc->dev;
3079 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3080 const struct drm_plane *plane;
3081 const struct intel_plane *intel_plane;
3082 struct drm_plane_state *pstate;
3083 unsigned int rate, total_data_rate = 0;
3087 if (WARN_ON(!state))
3090 /* Calculate and cache data rate for each plane */
3091 for_each_plane_in_state(state, plane, pstate, i) {
3092 id = skl_wm_plane_id(to_intel_plane(plane));
3093 intel_plane = to_intel_plane(plane);
3095 if (intel_plane->pipe != intel_crtc->pipe)
3099 rate = skl_plane_relative_data_rate(intel_cstate,
3101 intel_cstate->wm.skl.plane_data_rate[id] = rate;
3104 rate = skl_plane_relative_data_rate(intel_cstate,
3106 intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
3109 /* Calculate CRTC's total data rate from cached values */
3110 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3111 int id = skl_wm_plane_id(intel_plane);
3114 total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
3115 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
3118 WARN_ON(cstate->plane_mask && total_data_rate == 0);
3120 return total_data_rate;
3124 skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3127 struct drm_framebuffer *fb = pstate->fb;
3128 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3129 uint32_t src_w, src_h;
3130 uint32_t min_scanlines = 8;
3136 /* For packed formats, no y-plane, return 0 */
3137 if (y && fb->pixel_format != DRM_FORMAT_NV12)
3140 /* For Non Y-tile return 8-blocks */
3141 if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
3142 fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
3145 src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
3146 src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
3148 if (intel_rotation_90_or_270(pstate->rotation))
3151 /* Halve UV plane width and height for NV12 */
3152 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
3157 if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
3158 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
3160 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
3162 if (intel_rotation_90_or_270(pstate->rotation)) {
3163 switch (plane_bpp) {
3177 WARN(1, "Unsupported pixel depth %u for rotation",
3183 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
3187 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3188 struct skl_ddb_allocation *ddb /* out */)
3190 struct drm_atomic_state *state = cstate->base.state;
3191 struct drm_crtc *crtc = cstate->base.crtc;
3192 struct drm_device *dev = crtc->dev;
3193 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3194 struct intel_plane *intel_plane;
3195 struct drm_plane *plane;
3196 struct drm_plane_state *pstate;
3197 enum pipe pipe = intel_crtc->pipe;
3198 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
3199 uint16_t alloc_size, start, cursor_blocks;
3200 uint16_t *minimum = cstate->wm.skl.minimum_blocks;
3201 uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
3202 unsigned int total_data_rate;
3206 if (WARN_ON(!state))
3209 if (!cstate->base.active) {
3210 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
3211 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3212 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3216 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
3217 alloc_size = skl_ddb_entry_size(alloc);
3218 if (alloc_size == 0) {
3219 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3223 cursor_blocks = skl_cursor_allocation(num_active);
3224 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
3225 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3227 alloc_size -= cursor_blocks;
3229 /* 1. Allocate the mininum required blocks for each active plane */
3230 for_each_plane_in_state(state, plane, pstate, i) {
3231 intel_plane = to_intel_plane(plane);
3232 id = skl_wm_plane_id(intel_plane);
3234 if (intel_plane->pipe != pipe)
3237 if (!to_intel_plane_state(pstate)->base.visible) {
3242 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3248 minimum[id] = skl_ddb_min_alloc(pstate, 0);
3249 y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
3252 for (i = 0; i < PLANE_CURSOR; i++) {
3253 alloc_size -= minimum[i];
3254 alloc_size -= y_minimum[i];
3258 * 2. Distribute the remaining space in proportion to the amount of
3259 * data each plane needs to fetch from memory.
3261 * FIXME: we may not allocate every single block here.
3263 total_data_rate = skl_get_total_relative_data_rate(cstate);
3264 if (total_data_rate == 0)
3267 start = alloc->start;
3268 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3269 unsigned int data_rate, y_data_rate;
3270 uint16_t plane_blocks, y_plane_blocks = 0;
3271 int id = skl_wm_plane_id(intel_plane);
3273 data_rate = cstate->wm.skl.plane_data_rate[id];
3276 * allocation for (packed formats) or (uv-plane part of planar format):
3277 * promote the expression to 64 bits to avoid overflowing, the
3278 * result is < available as data_rate / total_data_rate < 1
3280 plane_blocks = minimum[id];
3281 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3284 /* Leave disabled planes at (0,0) */
3286 ddb->plane[pipe][id].start = start;
3287 ddb->plane[pipe][id].end = start + plane_blocks;
3290 start += plane_blocks;
3293 * allocation for y_plane part of planar format:
3295 y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
3297 y_plane_blocks = y_minimum[id];
3298 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3302 ddb->y_plane[pipe][id].start = start;
3303 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3306 start += y_plane_blocks;
3312 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
3314 /* TODO: Take into account the scalers once we support them */
3315 return config->base.adjusted_mode.crtc_clock;
3319 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3320 * for the read latency) and cpp should always be <= 8, so that
3321 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3322 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3324 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
3326 uint32_t wm_intermediate_val, ret;
3331 wm_intermediate_val = latency * pixel_rate * cpp / 512;
3332 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3337 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3338 uint32_t horiz_pixels, uint8_t cpp,
3339 uint64_t tiling, uint32_t latency)
3342 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3343 uint32_t wm_intermediate_val;
3348 plane_bytes_per_line = horiz_pixels * cpp;
3350 if (tiling == I915_FORMAT_MOD_Y_TILED ||
3351 tiling == I915_FORMAT_MOD_Yf_TILED) {
3352 plane_bytes_per_line *= 4;
3353 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3354 plane_blocks_per_line /= 4;
3355 } else if (tiling == DRM_FORMAT_MOD_NONE) {
3356 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
3358 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3361 wm_intermediate_val = latency * pixel_rate;
3362 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
3363 plane_blocks_per_line;
3368 static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
3369 struct intel_plane_state *pstate)
3371 uint64_t adjusted_pixel_rate;
3372 uint64_t downscale_amount;
3373 uint64_t pixel_rate;
3375 /* Shouldn't reach here on disabled planes... */
3376 if (WARN_ON(!pstate->base.visible))
3380 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3381 * with additional adjustments for plane-specific scaling.
3383 adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
3384 downscale_amount = skl_plane_downscale_amount(pstate);
3386 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
3387 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
3392 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3393 struct intel_crtc_state *cstate,
3394 struct intel_plane_state *intel_pstate,
3395 uint16_t ddb_allocation,
3397 uint16_t *out_blocks, /* out */
3398 uint8_t *out_lines, /* out */
3399 bool *enabled /* out */)
3401 struct drm_plane_state *pstate = &intel_pstate->base;
3402 struct drm_framebuffer *fb = pstate->fb;
3403 uint32_t latency = dev_priv->wm.skl_latency[level];
3404 uint32_t method1, method2;
3405 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3406 uint32_t res_blocks, res_lines;
3407 uint32_t selected_result;
3409 uint32_t width = 0, height = 0;
3410 uint32_t plane_pixel_rate;
3412 if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
3417 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3418 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3420 if (intel_rotation_90_or_270(pstate->rotation))
3421 swap(width, height);
3423 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3424 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3426 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
3427 method2 = skl_wm_method2(plane_pixel_rate,
3428 cstate->base.adjusted_mode.crtc_htotal,
3434 plane_bytes_per_line = width * cpp;
3435 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3437 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3438 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3439 uint32_t min_scanlines = 4;
3440 uint32_t y_tile_minimum;
3441 if (intel_rotation_90_or_270(pstate->rotation)) {
3442 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3443 drm_format_plane_cpp(fb->pixel_format, 1) :
3444 drm_format_plane_cpp(fb->pixel_format, 0);
3454 WARN(1, "Unsupported pixel depth for rotation");
3457 y_tile_minimum = plane_blocks_per_line * min_scanlines;
3458 selected_result = max(method2, y_tile_minimum);
3460 if ((ddb_allocation / plane_blocks_per_line) >= 1)
3461 selected_result = min(method1, method2);
3463 selected_result = method1;
3466 res_blocks = selected_result + 1;
3467 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
3469 if (level >= 1 && level <= 7) {
3470 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3471 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
3477 if (res_blocks >= ddb_allocation || res_lines > 31) {
3481 * If there are no valid level 0 watermarks, then we can't
3482 * support this display configuration.
3487 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3488 DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
3489 to_intel_crtc(cstate->base.crtc)->pipe,
3490 skl_wm_plane_id(to_intel_plane(pstate->plane)),
3491 res_blocks, ddb_allocation, res_lines);
3497 *out_blocks = res_blocks;
3498 *out_lines = res_lines;
3505 skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3506 struct skl_ddb_allocation *ddb,
3507 struct intel_crtc_state *cstate,
3509 struct skl_wm_level *result)
3511 struct drm_atomic_state *state = cstate->base.state;
3512 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3513 struct drm_plane *plane;
3514 struct intel_plane *intel_plane;
3515 struct intel_plane_state *intel_pstate;
3516 uint16_t ddb_blocks;
3517 enum pipe pipe = intel_crtc->pipe;
3521 * We'll only calculate watermarks for planes that are actually
3522 * enabled, so make sure all other planes are set as disabled.
3524 memset(result, 0, sizeof(*result));
3526 for_each_intel_plane_mask(&dev_priv->drm,
3528 cstate->base.plane_mask) {
3529 int i = skl_wm_plane_id(intel_plane);
3531 plane = &intel_plane->base;
3532 intel_pstate = NULL;
3535 intel_atomic_get_existing_plane_state(state,
3539 * Note: If we start supporting multiple pending atomic commits
3540 * against the same planes/CRTC's in the future, plane->state
3541 * will no longer be the correct pre-state to use for the
3542 * calculations here and we'll need to change where we get the
3543 * 'unchanged' plane data from.
3545 * For now this is fine because we only allow one queued commit
3546 * against a CRTC. Even if the plane isn't modified by this
3547 * transaction and we don't have a plane lock, we still have
3548 * the CRTC's lock, so we know that no other transactions are
3549 * racing with us to update it.
3552 intel_pstate = to_intel_plane_state(plane->state);
3554 WARN_ON(!intel_pstate->base.fb);
3556 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3558 ret = skl_compute_plane_wm(dev_priv,
3563 &result->plane_res_b[i],
3564 &result->plane_res_l[i],
3565 &result->plane_en[i]);
3574 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3576 if (!cstate->base.active)
3579 if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
3582 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3583 skl_pipe_pixel_rate(cstate));
3586 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3587 struct skl_wm_level *trans_wm /* out */)
3589 struct drm_crtc *crtc = cstate->base.crtc;
3590 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3591 struct intel_plane *intel_plane;
3593 if (!cstate->base.active)
3596 /* Until we know more, just disable transition WMs */
3597 for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
3598 int i = skl_wm_plane_id(intel_plane);
3600 trans_wm->plane_en[i] = false;
3604 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3605 struct skl_ddb_allocation *ddb,
3606 struct skl_pipe_wm *pipe_wm)
3608 struct drm_device *dev = cstate->base.crtc->dev;
3609 const struct drm_i915_private *dev_priv = to_i915(dev);
3610 int level, max_level = ilk_wm_max_level(dev);
3613 for (level = 0; level <= max_level; level++) {
3614 ret = skl_compute_wm_level(dev_priv, ddb, cstate,
3615 level, &pipe_wm->wm[level]);
3619 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3621 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
3626 static void skl_compute_wm_results(struct drm_device *dev,
3627 struct skl_pipe_wm *p_wm,
3628 struct skl_wm_values *r,
3629 struct intel_crtc *intel_crtc)
3631 int level, max_level = ilk_wm_max_level(dev);
3632 enum pipe pipe = intel_crtc->pipe;
3636 for (level = 0; level <= max_level; level++) {
3637 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3640 temp |= p_wm->wm[level].plane_res_l[i] <<
3641 PLANE_WM_LINES_SHIFT;
3642 temp |= p_wm->wm[level].plane_res_b[i];
3643 if (p_wm->wm[level].plane_en[i])
3644 temp |= PLANE_WM_EN;
3646 r->plane[pipe][i][level] = temp;
3651 temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3652 temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
3654 if (p_wm->wm[level].plane_en[PLANE_CURSOR])
3655 temp |= PLANE_WM_EN;
3657 r->plane[pipe][PLANE_CURSOR][level] = temp;
3661 /* transition WMs */
3662 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3664 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3665 temp |= p_wm->trans_wm.plane_res_b[i];
3666 if (p_wm->trans_wm.plane_en[i])
3667 temp |= PLANE_WM_EN;
3669 r->plane_trans[pipe][i] = temp;
3673 temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3674 temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
3675 if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
3676 temp |= PLANE_WM_EN;
3678 r->plane_trans[pipe][PLANE_CURSOR] = temp;
3680 r->wm_linetime[pipe] = p_wm->linetime;
3683 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3685 const struct skl_ddb_entry *entry)
3688 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3693 static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3694 const struct skl_wm_values *new)
3696 struct drm_device *dev = &dev_priv->drm;
3697 struct intel_crtc *crtc;
3699 for_each_intel_crtc(dev, crtc) {
3700 int i, level, max_level = ilk_wm_max_level(dev);
3701 enum pipe pipe = crtc->pipe;
3703 if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
3708 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
3710 for (level = 0; level <= max_level; level++) {
3711 for (i = 0; i < intel_num_planes(crtc); i++)
3712 I915_WRITE(PLANE_WM(pipe, i, level),
3713 new->plane[pipe][i][level]);
3714 I915_WRITE(CUR_WM(pipe, level),
3715 new->plane[pipe][PLANE_CURSOR][level]);
3717 for (i = 0; i < intel_num_planes(crtc); i++)
3718 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3719 new->plane_trans[pipe][i]);
3720 I915_WRITE(CUR_WM_TRANS(pipe),
3721 new->plane_trans[pipe][PLANE_CURSOR]);
3723 for (i = 0; i < intel_num_planes(crtc); i++) {
3724 skl_ddb_entry_write(dev_priv,
3725 PLANE_BUF_CFG(pipe, i),
3726 &new->ddb.plane[pipe][i]);
3727 skl_ddb_entry_write(dev_priv,
3728 PLANE_NV12_BUF_CFG(pipe, i),
3729 &new->ddb.y_plane[pipe][i]);
3732 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3733 &new->ddb.plane[pipe][PLANE_CURSOR]);
3738 * When setting up a new DDB allocation arrangement, we need to correctly
3739 * sequence the times at which the new allocations for the pipes are taken into
3740 * account or we'll have pipes fetching from space previously allocated to
3743 * Roughly the sequence looks like:
3744 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3745 * overlapping with a previous light-up pipe (another way to put it is:
3746 * pipes with their new allocation strickly included into their old ones).
3747 * 2. re-allocate the other pipes that get their allocation reduced
3748 * 3. allocate the pipes having their allocation increased
3750 * Steps 1. and 2. are here to take care of the following case:
3751 * - Initially DDB looks like this:
3754 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3758 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3762 skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
3766 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3768 for_each_plane(dev_priv, pipe, plane) {
3769 I915_WRITE(PLANE_SURF(pipe, plane),
3770 I915_READ(PLANE_SURF(pipe, plane)));
3772 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3776 skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3777 const struct skl_ddb_allocation *new,
3780 uint16_t old_size, new_size;
3782 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3783 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3785 return old_size != new_size &&
3786 new->pipe[pipe].start >= old->pipe[pipe].start &&
3787 new->pipe[pipe].end <= old->pipe[pipe].end;
3790 static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3791 struct skl_wm_values *new_values)
3793 struct drm_device *dev = &dev_priv->drm;
3794 struct skl_ddb_allocation *cur_ddb, *new_ddb;
3795 bool reallocated[I915_MAX_PIPES] = {};
3796 struct intel_crtc *crtc;
3799 new_ddb = &new_values->ddb;
3800 cur_ddb = &dev_priv->wm.skl_hw.ddb;
3803 * First pass: flush the pipes with the new allocation contained into
3806 * We'll wait for the vblank on those pipes to ensure we can safely
3807 * re-allocate the freed space without this pipe fetching from it.
3809 for_each_intel_crtc(dev, crtc) {
3815 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3818 skl_wm_flush_pipe(dev_priv, pipe, 1);
3819 intel_wait_for_vblank(dev, pipe);
3821 reallocated[pipe] = true;
3826 * Second pass: flush the pipes that are having their allocation
3827 * reduced, but overlapping with a previous allocation.
3829 * Here as well we need to wait for the vblank to make sure the freed
3830 * space is not used anymore.
3832 for_each_intel_crtc(dev, crtc) {
3838 if (reallocated[pipe])
3841 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3842 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
3843 skl_wm_flush_pipe(dev_priv, pipe, 2);
3844 intel_wait_for_vblank(dev, pipe);
3845 reallocated[pipe] = true;
3850 * Third pass: flush the pipes that got more space allocated.
3852 * We don't need to actively wait for the update here, next vblank
3853 * will just get more DDB space with the correct WM values.
3855 for_each_intel_crtc(dev, crtc) {
3862 * At this point, only the pipes more space than before are
3863 * left to re-allocate.
3865 if (reallocated[pipe])
3868 skl_wm_flush_pipe(dev_priv, pipe, 3);
3872 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3873 struct skl_ddb_allocation *ddb, /* out */
3874 struct skl_pipe_wm *pipe_wm, /* out */
3875 bool *changed /* out */)
3877 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
3878 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
3881 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
3885 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
3894 pipes_modified(struct drm_atomic_state *state)
3896 struct drm_crtc *crtc;
3897 struct drm_crtc_state *cstate;
3898 uint32_t i, ret = 0;
3900 for_each_crtc_in_state(state, crtc, cstate, i)
3901 ret |= drm_crtc_mask(crtc);
3907 skl_compute_ddb(struct drm_atomic_state *state)
3909 struct drm_device *dev = state->dev;
3910 struct drm_i915_private *dev_priv = to_i915(dev);
3911 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3912 struct intel_crtc *intel_crtc;
3913 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
3914 uint32_t realloc_pipes = pipes_modified(state);
3918 * If this is our first atomic update following hardware readout,
3919 * we can't trust the DDB that the BIOS programmed for us. Let's
3920 * pretend that all pipes switched active status so that we'll
3921 * ensure a full DDB recompute.
3923 if (dev_priv->wm.distrust_bios_wm)
3924 intel_state->active_pipe_changes = ~0;
3927 * If the modeset changes which CRTC's are active, we need to
3928 * recompute the DDB allocation for *all* active pipes, even
3929 * those that weren't otherwise being modified in any way by this
3930 * atomic commit. Due to the shrinking of the per-pipe allocations
3931 * when new active CRTC's are added, it's possible for a pipe that
3932 * we were already using and aren't changing at all here to suddenly
3933 * become invalid if its DDB needs exceeds its new allocation.
3935 * Note that if we wind up doing a full DDB recompute, we can't let
3936 * any other display updates race with this transaction, so we need
3937 * to grab the lock on *all* CRTC's.
3939 if (intel_state->active_pipe_changes) {
3941 intel_state->wm_results.dirty_pipes = ~0;
3944 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
3945 struct intel_crtc_state *cstate;
3947 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
3949 return PTR_ERR(cstate);
3951 ret = skl_allocate_pipe_ddb(cstate, ddb);
3960 skl_compute_wm(struct drm_atomic_state *state)
3962 struct drm_crtc *crtc;
3963 struct drm_crtc_state *cstate;
3964 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3965 struct skl_wm_values *results = &intel_state->wm_results;
3966 struct skl_pipe_wm *pipe_wm;
3967 bool changed = false;
3971 * If this transaction isn't actually touching any CRTC's, don't
3972 * bother with watermark calculation. Note that if we pass this
3973 * test, we're guaranteed to hold at least one CRTC state mutex,
3974 * which means we can safely use values like dev_priv->active_crtcs
3975 * since any racing commits that want to update them would need to
3976 * hold _all_ CRTC state mutexes.
3978 for_each_crtc_in_state(state, crtc, cstate, i)
3983 /* Clear all dirty flags */
3984 results->dirty_pipes = 0;
3986 ret = skl_compute_ddb(state);
3991 * Calculate WM's for all pipes that are part of this transaction.
3992 * Note that the DDB allocation above may have added more CRTC's that
3993 * weren't otherwise being modified (and set bits in dirty_pipes) if
3994 * pipe allocations had to change.
3996 * FIXME: Now that we're doing this in the atomic check phase, we
3997 * should allow skl_update_pipe_wm() to return failure in cases where
3998 * no suitable watermark values can be found.
4000 for_each_crtc_in_state(state, crtc, cstate, i) {
4001 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4002 struct intel_crtc_state *intel_cstate =
4003 to_intel_crtc_state(cstate);
4005 pipe_wm = &intel_cstate->wm.skl.optimal;
4006 ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
4012 results->dirty_pipes |= drm_crtc_mask(crtc);
4014 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4015 /* This pipe's WM's did not change */
4018 intel_cstate->update_wm_pre = true;
4019 skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
4025 static void skl_update_wm(struct drm_crtc *crtc)
4027 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4028 struct drm_device *dev = crtc->dev;
4029 struct drm_i915_private *dev_priv = to_i915(dev);
4030 struct skl_wm_values *results = &dev_priv->wm.skl_results;
4031 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4032 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
4034 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4037 intel_crtc->wm.active.skl = *pipe_wm;
4039 mutex_lock(&dev_priv->wm.wm_mutex);
4041 skl_write_wm_values(dev_priv, results);
4042 skl_flush_wm_values(dev_priv, results);
4044 /* store the new configuration */
4045 dev_priv->wm.skl_hw = *results;
4047 mutex_unlock(&dev_priv->wm.wm_mutex);
4050 static void ilk_compute_wm_config(struct drm_device *dev,
4051 struct intel_wm_config *config)
4053 struct intel_crtc *crtc;
4055 /* Compute the currently _active_ config */
4056 for_each_intel_crtc(dev, crtc) {
4057 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
4059 if (!wm->pipe_enabled)
4062 config->sprites_enabled |= wm->sprites_enabled;
4063 config->sprites_scaled |= wm->sprites_scaled;
4064 config->num_pipes_active++;
4068 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
4070 struct drm_device *dev = &dev_priv->drm;
4071 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
4072 struct ilk_wm_maximums max;
4073 struct intel_wm_config config = {};
4074 struct ilk_wm_values results = {};
4075 enum intel_ddb_partitioning partitioning;
4077 ilk_compute_wm_config(dev, &config);
4079 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
4080 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
4082 /* 5/6 split only in single pipe config on IVB+ */
4083 if (INTEL_INFO(dev)->gen >= 7 &&
4084 config.num_pipes_active == 1 && config.sprites_enabled) {
4085 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
4086 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
4088 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
4090 best_lp_wm = &lp_wm_1_2;
4093 partitioning = (best_lp_wm == &lp_wm_1_2) ?
4094 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
4096 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
4098 ilk_write_wm_values(dev_priv, &results);
4101 static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
4103 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4104 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4106 mutex_lock(&dev_priv->wm.wm_mutex);
4107 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
4108 ilk_program_watermarks(dev_priv);
4109 mutex_unlock(&dev_priv->wm.wm_mutex);
4112 static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
4114 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4115 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4117 mutex_lock(&dev_priv->wm.wm_mutex);
4118 if (cstate->wm.need_postvbl_update) {
4119 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
4120 ilk_program_watermarks(dev_priv);
4122 mutex_unlock(&dev_priv->wm.wm_mutex);
4125 static void skl_pipe_wm_active_state(uint32_t val,
4126 struct skl_pipe_wm *active,
4132 bool is_enabled = (val & PLANE_WM_EN) != 0;
4136 active->wm[level].plane_en[i] = is_enabled;
4137 active->wm[level].plane_res_b[i] =
4138 val & PLANE_WM_BLOCKS_MASK;
4139 active->wm[level].plane_res_l[i] =
4140 (val >> PLANE_WM_LINES_SHIFT) &
4141 PLANE_WM_LINES_MASK;
4143 active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
4144 active->wm[level].plane_res_b[PLANE_CURSOR] =
4145 val & PLANE_WM_BLOCKS_MASK;
4146 active->wm[level].plane_res_l[PLANE_CURSOR] =
4147 (val >> PLANE_WM_LINES_SHIFT) &
4148 PLANE_WM_LINES_MASK;
4152 active->trans_wm.plane_en[i] = is_enabled;
4153 active->trans_wm.plane_res_b[i] =
4154 val & PLANE_WM_BLOCKS_MASK;
4155 active->trans_wm.plane_res_l[i] =
4156 (val >> PLANE_WM_LINES_SHIFT) &
4157 PLANE_WM_LINES_MASK;
4159 active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
4160 active->trans_wm.plane_res_b[PLANE_CURSOR] =
4161 val & PLANE_WM_BLOCKS_MASK;
4162 active->trans_wm.plane_res_l[PLANE_CURSOR] =
4163 (val >> PLANE_WM_LINES_SHIFT) &
4164 PLANE_WM_LINES_MASK;
4169 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4171 struct drm_device *dev = crtc->dev;
4172 struct drm_i915_private *dev_priv = to_i915(dev);
4173 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
4174 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4175 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4176 struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
4177 enum pipe pipe = intel_crtc->pipe;
4178 int level, i, max_level;
4181 max_level = ilk_wm_max_level(dev);
4183 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
4185 for (level = 0; level <= max_level; level++) {
4186 for (i = 0; i < intel_num_planes(intel_crtc); i++)
4187 hw->plane[pipe][i][level] =
4188 I915_READ(PLANE_WM(pipe, i, level));
4189 hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
4192 for (i = 0; i < intel_num_planes(intel_crtc); i++)
4193 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
4194 hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
4196 if (!intel_crtc->active)
4199 hw->dirty_pipes |= drm_crtc_mask(crtc);
4201 active->linetime = hw->wm_linetime[pipe];
4203 for (level = 0; level <= max_level; level++) {
4204 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
4205 temp = hw->plane[pipe][i][level];
4206 skl_pipe_wm_active_state(temp, active, false,
4209 temp = hw->plane[pipe][PLANE_CURSOR][level];
4210 skl_pipe_wm_active_state(temp, active, false, true, i, level);
4213 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
4214 temp = hw->plane_trans[pipe][i];
4215 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
4218 temp = hw->plane_trans[pipe][PLANE_CURSOR];
4219 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
4221 intel_crtc->wm.active.skl = *active;
4224 void skl_wm_get_hw_state(struct drm_device *dev)
4226 struct drm_i915_private *dev_priv = to_i915(dev);
4227 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
4228 struct drm_crtc *crtc;
4230 skl_ddb_get_hw_state(dev_priv, ddb);
4231 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
4232 skl_pipe_wm_get_hw_state(crtc);
4234 if (dev_priv->active_crtcs) {
4235 /* Fully recompute DDB on first atomic commit */
4236 dev_priv->wm.distrust_bios_wm = true;
4238 /* Easy/common case; just sanitize DDB now if everything off */
4239 memset(ddb, 0, sizeof(*ddb));
4243 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4245 struct drm_device *dev = crtc->dev;
4246 struct drm_i915_private *dev_priv = to_i915(dev);
4247 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4248 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4249 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4250 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
4251 enum pipe pipe = intel_crtc->pipe;
4252 static const i915_reg_t wm0_pipe_reg[] = {
4253 [PIPE_A] = WM0_PIPEA_ILK,
4254 [PIPE_B] = WM0_PIPEB_ILK,
4255 [PIPE_C] = WM0_PIPEC_IVB,
4258 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
4259 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4260 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
4262 memset(active, 0, sizeof(*active));
4264 active->pipe_enabled = intel_crtc->active;
4266 if (active->pipe_enabled) {
4267 u32 tmp = hw->wm_pipe[pipe];
4270 * For active pipes LP0 watermark is marked as
4271 * enabled, and LP1+ watermaks as disabled since
4272 * we can't really reverse compute them in case
4273 * multiple pipes are active.
4275 active->wm[0].enable = true;
4276 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
4277 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
4278 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
4279 active->linetime = hw->wm_linetime[pipe];
4281 int level, max_level = ilk_wm_max_level(dev);
4284 * For inactive pipes, all watermark levels
4285 * should be marked as enabled but zeroed,
4286 * which is what we'd compute them to.
4288 for (level = 0; level <= max_level; level++)
4289 active->wm[level].enable = true;
4292 intel_crtc->wm.active.ilk = *active;
4295 #define _FW_WM(value, plane) \
4296 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
4297 #define _FW_WM_VLV(value, plane) \
4298 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
4300 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
4301 struct vlv_wm_values *wm)
4306 for_each_pipe(dev_priv, pipe) {
4307 tmp = I915_READ(VLV_DDL(pipe));
4309 wm->ddl[pipe].primary =
4310 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4311 wm->ddl[pipe].cursor =
4312 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4313 wm->ddl[pipe].sprite[0] =
4314 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4315 wm->ddl[pipe].sprite[1] =
4316 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4319 tmp = I915_READ(DSPFW1);
4320 wm->sr.plane = _FW_WM(tmp, SR);
4321 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
4322 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
4323 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
4325 tmp = I915_READ(DSPFW2);
4326 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
4327 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
4328 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
4330 tmp = I915_READ(DSPFW3);
4331 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
4333 if (IS_CHERRYVIEW(dev_priv)) {
4334 tmp = I915_READ(DSPFW7_CHV);
4335 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4336 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4338 tmp = I915_READ(DSPFW8_CHV);
4339 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
4340 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
4342 tmp = I915_READ(DSPFW9_CHV);
4343 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
4344 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
4346 tmp = I915_READ(DSPHOWM);
4347 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4348 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
4349 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
4350 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
4351 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4352 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4353 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4354 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4355 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4356 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4358 tmp = I915_READ(DSPFW7);
4359 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4360 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4362 tmp = I915_READ(DSPHOWM);
4363 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4364 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4365 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4366 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4367 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4368 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4369 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4376 void vlv_wm_get_hw_state(struct drm_device *dev)
4378 struct drm_i915_private *dev_priv = to_i915(dev);
4379 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
4380 struct intel_plane *plane;
4384 vlv_read_wm_values(dev_priv, wm);
4386 for_each_intel_plane(dev, plane) {
4387 switch (plane->base.type) {
4389 case DRM_PLANE_TYPE_CURSOR:
4390 plane->wm.fifo_size = 63;
4392 case DRM_PLANE_TYPE_PRIMARY:
4393 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
4395 case DRM_PLANE_TYPE_OVERLAY:
4396 sprite = plane->plane;
4397 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
4402 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4403 wm->level = VLV_WM_LEVEL_PM2;
4405 if (IS_CHERRYVIEW(dev_priv)) {
4406 mutex_lock(&dev_priv->rps.hw_lock);
4408 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4409 if (val & DSP_MAXFIFO_PM5_ENABLE)
4410 wm->level = VLV_WM_LEVEL_PM5;
4413 * If DDR DVFS is disabled in the BIOS, Punit
4414 * will never ack the request. So if that happens
4415 * assume we don't have to enable/disable DDR DVFS
4416 * dynamically. To test that just set the REQ_ACK
4417 * bit to poke the Punit, but don't change the
4418 * HIGH/LOW bits so that we don't actually change
4419 * the current state.
4421 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4422 val |= FORCE_DDR_FREQ_REQ_ACK;
4423 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
4425 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
4426 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
4427 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4428 "assuming DDR DVFS is disabled\n");
4429 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
4431 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4432 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4433 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4436 mutex_unlock(&dev_priv->rps.hw_lock);
4439 for_each_pipe(dev_priv, pipe)
4440 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4441 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
4442 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
4444 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4445 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4448 void ilk_wm_get_hw_state(struct drm_device *dev)
4450 struct drm_i915_private *dev_priv = to_i915(dev);
4451 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4452 struct drm_crtc *crtc;
4454 for_each_crtc(dev, crtc)
4455 ilk_pipe_wm_get_hw_state(crtc);
4457 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4458 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4459 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4461 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
4462 if (INTEL_INFO(dev)->gen >= 7) {
4463 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4464 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4467 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4468 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4469 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4470 else if (IS_IVYBRIDGE(dev))
4471 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4472 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4475 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4479 * intel_update_watermarks - update FIFO watermark values based on current modes
4481 * Calculate watermark values for the various WM regs based on current mode
4482 * and plane configuration.
4484 * There are several cases to deal with here:
4485 * - normal (i.e. non-self-refresh)
4486 * - self-refresh (SR) mode
4487 * - lines are large relative to FIFO size (buffer can hold up to 2)
4488 * - lines are small relative to FIFO size (buffer can hold more than 2
4489 * lines), so need to account for TLB latency
4491 * The normal calculation is:
4492 * watermark = dotclock * bytes per pixel * latency
4493 * where latency is platform & configuration dependent (we assume pessimal
4496 * The SR calculation is:
4497 * watermark = (trunc(latency/line time)+1) * surface width *
4500 * line time = htotal / dotclock
4501 * surface width = hdisplay for normal plane and 64 for cursor
4502 * and latency is assumed to be high, as above.
4504 * The final value programmed to the register should always be rounded up,
4505 * and include an extra 2 entries to account for clock crossings.
4507 * We don't use the sprite, so we can ignore that. And on Crestline we have
4508 * to set the non-SR watermarks to 8.
4510 void intel_update_watermarks(struct drm_crtc *crtc)
4512 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4514 if (dev_priv->display.update_wm)
4515 dev_priv->display.update_wm(crtc);
4519 * Lock protecting IPS related data structures
4521 DEFINE_SPINLOCK(mchdev_lock);
4523 /* Global for IPS driver to get at the current i915 device. Protected by
4525 static struct drm_i915_private *i915_mch_dev;
4527 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
4531 assert_spin_locked(&mchdev_lock);
4533 rgvswctl = I915_READ16(MEMSWCTL);
4534 if (rgvswctl & MEMCTL_CMD_STS) {
4535 DRM_DEBUG("gpu busy, RCS change rejected\n");
4536 return false; /* still busy with another command */
4539 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4540 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4541 I915_WRITE16(MEMSWCTL, rgvswctl);
4542 POSTING_READ16(MEMSWCTL);
4544 rgvswctl |= MEMCTL_CMD_STS;
4545 I915_WRITE16(MEMSWCTL, rgvswctl);
4550 static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
4553 u8 fmax, fmin, fstart, vstart;
4555 spin_lock_irq(&mchdev_lock);
4557 rgvmodectl = I915_READ(MEMMODECTL);
4559 /* Enable temp reporting */
4560 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4561 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4563 /* 100ms RC evaluation intervals */
4564 I915_WRITE(RCUPEI, 100000);
4565 I915_WRITE(RCDNEI, 100000);
4567 /* Set max/min thresholds to 90ms and 80ms respectively */
4568 I915_WRITE(RCBMAXAVG, 90000);
4569 I915_WRITE(RCBMINAVG, 80000);
4571 I915_WRITE(MEMIHYST, 1);
4573 /* Set up min, max, and cur for interrupt handling */
4574 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4575 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4576 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4577 MEMMODE_FSTART_SHIFT;
4579 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
4582 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4583 dev_priv->ips.fstart = fstart;
4585 dev_priv->ips.max_delay = fstart;
4586 dev_priv->ips.min_delay = fmin;
4587 dev_priv->ips.cur_delay = fstart;
4589 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4590 fmax, fmin, fstart);
4592 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4595 * Interrupts will be enabled in ironlake_irq_postinstall
4598 I915_WRITE(VIDSTART, vstart);
4599 POSTING_READ(VIDSTART);
4601 rgvmodectl |= MEMMODE_SWMODE_EN;
4602 I915_WRITE(MEMMODECTL, rgvmodectl);
4604 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
4605 DRM_ERROR("stuck trying to change perf mode\n");
4608 ironlake_set_drps(dev_priv, fstart);
4610 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4611 I915_READ(DDREC) + I915_READ(CSIEC);
4612 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4613 dev_priv->ips.last_count2 = I915_READ(GFXEC);
4614 dev_priv->ips.last_time2 = ktime_get_raw_ns();
4616 spin_unlock_irq(&mchdev_lock);
4619 static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
4623 spin_lock_irq(&mchdev_lock);
4625 rgvswctl = I915_READ16(MEMSWCTL);
4627 /* Ack interrupts, disable EFC interrupt */
4628 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4629 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4630 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4631 I915_WRITE(DEIIR, DE_PCU_EVENT);
4632 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4634 /* Go back to the starting frequency */
4635 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
4637 rgvswctl |= MEMCTL_CMD_STS;
4638 I915_WRITE(MEMSWCTL, rgvswctl);
4641 spin_unlock_irq(&mchdev_lock);
4644 /* There's a funny hw issue where the hw returns all 0 when reading from
4645 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4646 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4647 * all limits and the gpu stuck at whatever frequency it is at atm).
4649 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
4653 /* Only set the down limit when we've reached the lowest level to avoid
4654 * getting more interrupts, otherwise leave this clear. This prevents a
4655 * race in the hw when coming out of rc6: There's a tiny window where
4656 * the hw runs at the minimal clock before selecting the desired
4657 * frequency, if the down threshold expires in that window we will not
4658 * receive a down interrupt. */
4659 if (IS_GEN9(dev_priv)) {
4660 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4661 if (val <= dev_priv->rps.min_freq_softlimit)
4662 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4664 limits = dev_priv->rps.max_freq_softlimit << 24;
4665 if (val <= dev_priv->rps.min_freq_softlimit)
4666 limits |= dev_priv->rps.min_freq_softlimit << 16;
4672 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4675 u32 threshold_up = 0, threshold_down = 0; /* in % */
4676 u32 ei_up = 0, ei_down = 0;
4678 new_power = dev_priv->rps.power;
4679 switch (dev_priv->rps.power) {
4681 if (val > dev_priv->rps.efficient_freq + 1 &&
4682 val > dev_priv->rps.cur_freq)
4683 new_power = BETWEEN;
4687 if (val <= dev_priv->rps.efficient_freq &&
4688 val < dev_priv->rps.cur_freq)
4689 new_power = LOW_POWER;
4690 else if (val >= dev_priv->rps.rp0_freq &&
4691 val > dev_priv->rps.cur_freq)
4692 new_power = HIGH_POWER;
4696 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
4697 val < dev_priv->rps.cur_freq)
4698 new_power = BETWEEN;
4701 /* Max/min bins are special */
4702 if (val <= dev_priv->rps.min_freq_softlimit)
4703 new_power = LOW_POWER;
4704 if (val >= dev_priv->rps.max_freq_softlimit)
4705 new_power = HIGH_POWER;
4706 if (new_power == dev_priv->rps.power)
4709 /* Note the units here are not exactly 1us, but 1280ns. */
4710 switch (new_power) {
4712 /* Upclock if more than 95% busy over 16ms */
4716 /* Downclock if less than 85% busy over 32ms */
4718 threshold_down = 85;
4722 /* Upclock if more than 90% busy over 13ms */
4726 /* Downclock if less than 75% busy over 32ms */
4728 threshold_down = 75;
4732 /* Upclock if more than 85% busy over 10ms */
4736 /* Downclock if less than 60% busy over 32ms */
4738 threshold_down = 60;
4742 I915_WRITE(GEN6_RP_UP_EI,
4743 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4744 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4745 GT_INTERVAL_FROM_US(dev_priv,
4746 ei_up * threshold_up / 100));
4748 I915_WRITE(GEN6_RP_DOWN_EI,
4749 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4750 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4751 GT_INTERVAL_FROM_US(dev_priv,
4752 ei_down * threshold_down / 100));
4754 I915_WRITE(GEN6_RP_CONTROL,
4755 GEN6_RP_MEDIA_TURBO |
4756 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4757 GEN6_RP_MEDIA_IS_GFX |
4759 GEN6_RP_UP_BUSY_AVG |
4760 GEN6_RP_DOWN_IDLE_AVG);
4762 dev_priv->rps.power = new_power;
4763 dev_priv->rps.up_threshold = threshold_up;
4764 dev_priv->rps.down_threshold = threshold_down;
4765 dev_priv->rps.last_adj = 0;
4768 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4772 if (val > dev_priv->rps.min_freq_softlimit)
4773 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4774 if (val < dev_priv->rps.max_freq_softlimit)
4775 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4777 mask &= dev_priv->pm_rps_events;
4779 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
4782 /* gen6_set_rps is called to update the frequency request, but should also be
4783 * called when the range (min_delay and max_delay) is modified so that we can
4784 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4785 static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
4787 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4788 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
4791 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4792 WARN_ON(val > dev_priv->rps.max_freq);
4793 WARN_ON(val < dev_priv->rps.min_freq);
4795 /* min/max delay may still have been modified so be sure to
4796 * write the limits value.
4798 if (val != dev_priv->rps.cur_freq) {
4799 gen6_set_rps_thresholds(dev_priv, val);
4801 if (IS_GEN9(dev_priv))
4802 I915_WRITE(GEN6_RPNSWREQ,
4803 GEN9_FREQUENCY(val));
4804 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4805 I915_WRITE(GEN6_RPNSWREQ,
4806 HSW_FREQUENCY(val));
4808 I915_WRITE(GEN6_RPNSWREQ,
4809 GEN6_FREQUENCY(val) |
4811 GEN6_AGGRESSIVE_TURBO);
4814 /* Make sure we continue to get interrupts
4815 * until we hit the minimum or maximum frequencies.
4817 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
4818 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4820 POSTING_READ(GEN6_RPNSWREQ);
4822 dev_priv->rps.cur_freq = val;
4823 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4826 static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
4828 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4829 WARN_ON(val > dev_priv->rps.max_freq);
4830 WARN_ON(val < dev_priv->rps.min_freq);
4832 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
4833 "Odd GPU freq value\n"))
4836 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4838 if (val != dev_priv->rps.cur_freq) {
4839 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4840 if (!IS_CHERRYVIEW(dev_priv))
4841 gen6_set_rps_thresholds(dev_priv, val);
4844 dev_priv->rps.cur_freq = val;
4845 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4848 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
4850 * * If Gfx is Idle, then
4851 * 1. Forcewake Media well.
4852 * 2. Request idle freq.
4853 * 3. Release Forcewake of Media well.
4855 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4857 u32 val = dev_priv->rps.idle_freq;
4859 if (dev_priv->rps.cur_freq <= val)
4862 /* Wake up the media well, as that takes a lot less
4863 * power than the Render well. */
4864 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4865 valleyview_set_rps(dev_priv, val);
4866 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
4869 void gen6_rps_busy(struct drm_i915_private *dev_priv)
4871 mutex_lock(&dev_priv->rps.hw_lock);
4872 if (dev_priv->rps.enabled) {
4873 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4874 gen6_rps_reset_ei(dev_priv);
4875 I915_WRITE(GEN6_PMINTRMSK,
4876 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4878 gen6_enable_rps_interrupts(dev_priv);
4880 /* Ensure we start at the user's desired frequency */
4881 intel_set_rps(dev_priv,
4882 clamp(dev_priv->rps.cur_freq,
4883 dev_priv->rps.min_freq_softlimit,
4884 dev_priv->rps.max_freq_softlimit));
4886 mutex_unlock(&dev_priv->rps.hw_lock);
4889 void gen6_rps_idle(struct drm_i915_private *dev_priv)
4891 /* Flush our bottom-half so that it does not race with us
4892 * setting the idle frequency and so that it is bounded by
4893 * our rpm wakeref. And then disable the interrupts to stop any
4894 * futher RPS reclocking whilst we are asleep.
4896 gen6_disable_rps_interrupts(dev_priv);
4898 mutex_lock(&dev_priv->rps.hw_lock);
4899 if (dev_priv->rps.enabled) {
4900 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4901 vlv_set_rps_idle(dev_priv);
4903 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4904 dev_priv->rps.last_adj = 0;
4905 I915_WRITE(GEN6_PMINTRMSK,
4906 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
4908 mutex_unlock(&dev_priv->rps.hw_lock);
4910 spin_lock(&dev_priv->rps.client_lock);
4911 while (!list_empty(&dev_priv->rps.clients))
4912 list_del_init(dev_priv->rps.clients.next);
4913 spin_unlock(&dev_priv->rps.client_lock);
4916 void gen6_rps_boost(struct drm_i915_private *dev_priv,
4917 struct intel_rps_client *rps,
4918 unsigned long submitted)
4920 /* This is intentionally racy! We peek at the state here, then
4921 * validate inside the RPS worker.
4923 if (!(dev_priv->gt.awake &&
4924 dev_priv->rps.enabled &&
4925 dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
4928 /* Force a RPS boost (and don't count it against the client) if
4929 * the GPU is severely congested.
4931 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
4934 spin_lock(&dev_priv->rps.client_lock);
4935 if (rps == NULL || list_empty(&rps->link)) {
4936 spin_lock_irq(&dev_priv->irq_lock);
4937 if (dev_priv->rps.interrupts_enabled) {
4938 dev_priv->rps.client_boost = true;
4939 schedule_work(&dev_priv->rps.work);
4941 spin_unlock_irq(&dev_priv->irq_lock);
4944 list_add(&rps->link, &dev_priv->rps.clients);
4947 dev_priv->rps.boosts++;
4949 spin_unlock(&dev_priv->rps.client_lock);
4952 void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
4954 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4955 valleyview_set_rps(dev_priv, val);
4957 gen6_set_rps(dev_priv, val);
4960 static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
4962 I915_WRITE(GEN6_RC_CONTROL, 0);
4963 I915_WRITE(GEN9_PG_ENABLE, 0);
4966 static void gen9_disable_rps(struct drm_i915_private *dev_priv)
4968 I915_WRITE(GEN6_RP_CONTROL, 0);
4971 static void gen6_disable_rps(struct drm_i915_private *dev_priv)
4973 I915_WRITE(GEN6_RC_CONTROL, 0);
4974 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4975 I915_WRITE(GEN6_RP_CONTROL, 0);
4978 static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
4980 I915_WRITE(GEN6_RC_CONTROL, 0);
4983 static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
4985 /* we're doing forcewake before Disabling RC6,
4986 * This what the BIOS expects when going into suspend */
4987 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4989 I915_WRITE(GEN6_RC_CONTROL, 0);
4991 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4994 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
4996 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4997 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4998 mode = GEN6_RC_CTL_RC6_ENABLE;
5002 if (HAS_RC6p(dev_priv))
5003 DRM_DEBUG_DRIVER("Enabling RC6 states: "
5004 "RC6 %s RC6p %s RC6pp %s\n",
5005 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
5006 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
5007 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
5010 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
5011 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
5014 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
5016 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5017 bool enable_rc6 = true;
5018 unsigned long rc6_ctx_base;
5022 rc_ctl = I915_READ(GEN6_RC_CONTROL);
5023 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
5024 RC_SW_TARGET_STATE_SHIFT;
5025 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5026 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5027 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
5028 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
5031 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
5032 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
5037 * The exact context size is not known for BXT, so assume a page size
5040 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
5041 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
5042 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
5043 ggtt->stolen_reserved_size))) {
5044 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
5048 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
5049 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
5050 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
5051 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
5052 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
5056 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
5057 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
5058 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
5059 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5063 if (!I915_READ(GEN6_GFXPAUSE)) {
5064 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
5068 if (!I915_READ(GEN8_MISC_CTRL0)) {
5069 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
5076 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
5078 /* No RC6 before Ironlake and code is gone for ilk. */
5079 if (INTEL_INFO(dev_priv)->gen < 6)
5085 if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
5086 DRM_INFO("RC6 disabled by BIOS\n");
5090 /* Respect the kernel parameter if it is set */
5091 if (enable_rc6 >= 0) {
5094 if (HAS_RC6p(dev_priv))
5095 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
5098 mask = INTEL_RC6_ENABLE;
5100 if ((enable_rc6 & mask) != enable_rc6)
5101 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
5102 "(requested %d, valid %d)\n",
5103 enable_rc6 & mask, enable_rc6, mask);
5105 return enable_rc6 & mask;
5108 if (IS_IVYBRIDGE(dev_priv))
5109 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
5111 return INTEL_RC6_ENABLE;
5114 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
5116 /* All of these values are in units of 50MHz */
5118 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
5119 if (IS_BROXTON(dev_priv)) {
5120 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
5121 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
5122 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5123 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
5125 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
5126 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
5127 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5128 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
5130 /* hw_max = RP0 until we check for overclocking */
5131 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
5133 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
5134 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
5135 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5136 u32 ddcc_status = 0;
5138 if (sandybridge_pcode_read(dev_priv,
5139 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
5141 dev_priv->rps.efficient_freq =
5143 ((ddcc_status >> 8) & 0xff),
5144 dev_priv->rps.min_freq,
5145 dev_priv->rps.max_freq);
5148 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5149 /* Store the frequency values in 16.66 MHZ units, which is
5150 * the natural hardware unit for SKL
5152 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
5153 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
5154 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
5155 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
5156 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
5160 static void reset_rps(struct drm_i915_private *dev_priv,
5161 void (*set)(struct drm_i915_private *, u8))
5163 u8 freq = dev_priv->rps.cur_freq;
5166 dev_priv->rps.power = -1;
5167 dev_priv->rps.cur_freq = -1;
5169 set(dev_priv, freq);
5172 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
5173 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
5175 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5177 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
5178 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
5180 * BIOS could leave the Hw Turbo enabled, so need to explicitly
5181 * clear out the Control register just to avoid inconsitency
5182 * with debugfs interface, which will show Turbo as enabled
5183 * only and that is not expected by the User after adding the
5184 * WaGsvDisableTurbo. Apart from this there is no problem even
5185 * if the Turbo is left enabled in the Control register, as the
5186 * Up/Down interrupts would remain masked.
5188 gen9_disable_rps(dev_priv);
5189 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5193 /* Program defaults and thresholds for RPS*/
5194 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5195 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
5197 /* 1 second timeout*/
5198 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
5199 GT_INTERVAL_FROM_US(dev_priv, 1000000));
5201 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
5203 /* Leaning on the below call to gen6_set_rps to program/setup the
5204 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
5205 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
5206 reset_rps(dev_priv, gen6_set_rps);
5208 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5211 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
5213 struct intel_engine_cs *engine;
5214 uint32_t rc6_mask = 0;
5216 /* 1a: Software RC state - RC0 */
5217 I915_WRITE(GEN6_RC_STATE, 0);
5219 /* 1b: Get forcewake during program sequence. Although the driver
5220 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5221 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5223 /* 2a: Disable RC states. */
5224 I915_WRITE(GEN6_RC_CONTROL, 0);
5226 /* 2b: Program RC6 thresholds.*/
5228 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
5229 if (IS_SKYLAKE(dev_priv))
5230 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
5232 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
5233 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5234 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5235 for_each_engine(engine, dev_priv)
5236 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5238 if (HAS_GUC(dev_priv))
5239 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
5241 I915_WRITE(GEN6_RC_SLEEP, 0);
5243 /* 2c: Program Coarse Power Gating Policies. */
5244 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
5245 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
5247 /* 3a: Enable RC6 */
5248 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5249 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5250 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
5251 /* WaRsUseTimeoutMode */
5252 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
5253 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
5254 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
5255 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5256 GEN7_RC_CTL_TO_MODE |
5259 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
5260 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5261 GEN6_RC_CTL_EI_MODE(1) |
5266 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
5267 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
5269 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
5270 I915_WRITE(GEN9_PG_ENABLE, 0);
5272 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
5273 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
5275 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5278 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
5280 struct intel_engine_cs *engine;
5281 uint32_t rc6_mask = 0;
5283 /* 1a: Software RC state - RC0 */
5284 I915_WRITE(GEN6_RC_STATE, 0);
5286 /* 1c & 1d: Get forcewake during program sequence. Although the driver
5287 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5288 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5290 /* 2a: Disable RC states. */
5291 I915_WRITE(GEN6_RC_CONTROL, 0);
5293 /* 2b: Program RC6 thresholds.*/
5294 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5295 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5296 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5297 for_each_engine(engine, dev_priv)
5298 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5299 I915_WRITE(GEN6_RC_SLEEP, 0);
5300 if (IS_BROADWELL(dev_priv))
5301 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
5303 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
5306 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5307 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5308 intel_print_rc6_info(dev_priv, rc6_mask);
5309 if (IS_BROADWELL(dev_priv))
5310 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5311 GEN7_RC_CTL_TO_MODE |
5314 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5315 GEN6_RC_CTL_EI_MODE(1) |
5318 /* 4 Program defaults and thresholds for RPS*/
5319 I915_WRITE(GEN6_RPNSWREQ,
5320 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5321 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5322 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5323 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
5324 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
5326 /* Docs recommend 900MHz, and 300 MHz respectively */
5327 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
5328 dev_priv->rps.max_freq_softlimit << 24 |
5329 dev_priv->rps.min_freq_softlimit << 16);
5331 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
5332 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5333 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
5334 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
5336 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5339 I915_WRITE(GEN6_RP_CONTROL,
5340 GEN6_RP_MEDIA_TURBO |
5341 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5342 GEN6_RP_MEDIA_IS_GFX |
5344 GEN6_RP_UP_BUSY_AVG |
5345 GEN6_RP_DOWN_IDLE_AVG);
5347 /* 6: Ring frequency + overclocking (our driver does this later */
5349 reset_rps(dev_priv, gen6_set_rps);
5351 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5354 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
5356 struct intel_engine_cs *engine;
5357 u32 rc6vids, rc6_mask = 0;
5362 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5364 /* Here begins a magic sequence of register writes to enable
5365 * auto-downclocking.
5367 * Perhaps there might be some value in exposing these to
5370 I915_WRITE(GEN6_RC_STATE, 0);
5372 /* Clear the DBG now so we don't confuse earlier errors */
5373 gtfifodbg = I915_READ(GTFIFODBG);
5375 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
5376 I915_WRITE(GTFIFODBG, gtfifodbg);
5379 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5381 /* disable the counters and set deterministic thresholds */
5382 I915_WRITE(GEN6_RC_CONTROL, 0);
5384 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
5385 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
5386 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
5387 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5388 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5390 for_each_engine(engine, dev_priv)
5391 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5393 I915_WRITE(GEN6_RC_SLEEP, 0);
5394 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
5395 if (IS_IVYBRIDGE(dev_priv))
5396 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5398 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
5399 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
5400 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5402 /* Check if we are enabling RC6 */
5403 rc6_mode = intel_enable_rc6();
5404 if (rc6_mode & INTEL_RC6_ENABLE)
5405 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5407 /* We don't use those on Haswell */
5408 if (!IS_HASWELL(dev_priv)) {
5409 if (rc6_mode & INTEL_RC6p_ENABLE)
5410 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
5412 if (rc6_mode & INTEL_RC6pp_ENABLE)
5413 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5416 intel_print_rc6_info(dev_priv, rc6_mask);
5418 I915_WRITE(GEN6_RC_CONTROL,
5420 GEN6_RC_CTL_EI_MODE(1) |
5421 GEN6_RC_CTL_HW_ENABLE);
5423 /* Power down if completely idle for over 50ms */
5424 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
5425 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5427 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
5429 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
5431 reset_rps(dev_priv, gen6_set_rps);
5434 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5435 if (IS_GEN6(dev_priv) && ret) {
5436 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5437 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5438 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5439 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5440 rc6vids &= 0xffff00;
5441 rc6vids |= GEN6_ENCODE_RC6_VID(450);
5442 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5444 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5447 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5450 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5453 unsigned int gpu_freq;
5454 unsigned int max_ia_freq, min_ring_freq;
5455 unsigned int max_gpu_freq, min_gpu_freq;
5456 int scaling_factor = 180;
5457 struct cpufreq_policy *policy;
5459 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5461 policy = cpufreq_cpu_get(0);
5463 max_ia_freq = policy->cpuinfo.max_freq;
5464 cpufreq_cpu_put(policy);
5467 * Default to measured freq if none found, PCU will ensure we
5470 max_ia_freq = tsc_khz;
5473 /* Convert from kHz to MHz */
5474 max_ia_freq /= 1000;
5476 min_ring_freq = I915_READ(DCLK) & 0xf;
5477 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5478 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5480 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5481 /* Convert GT frequency to 50 HZ units */
5482 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5483 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5485 min_gpu_freq = dev_priv->rps.min_freq;
5486 max_gpu_freq = dev_priv->rps.max_freq;
5490 * For each potential GPU frequency, load a ring frequency we'd like
5491 * to use for memory access. We do this by specifying the IA frequency
5492 * the PCU should use as a reference to determine the ring frequency.
5494 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5495 int diff = max_gpu_freq - gpu_freq;
5496 unsigned int ia_freq = 0, ring_freq = 0;
5498 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5500 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5501 * No floor required for ring frequency on SKL.
5503 ring_freq = gpu_freq;
5504 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
5505 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5506 ring_freq = max(min_ring_freq, gpu_freq);
5507 } else if (IS_HASWELL(dev_priv)) {
5508 ring_freq = mult_frac(gpu_freq, 5, 4);
5509 ring_freq = max(min_ring_freq, ring_freq);
5510 /* leave ia_freq as the default, chosen by cpufreq */
5512 /* On older processors, there is no separate ring
5513 * clock domain, so in order to boost the bandwidth
5514 * of the ring, we need to upclock the CPU (ia_freq).
5516 * For GPU frequencies less than 750MHz,
5517 * just use the lowest ring freq.
5519 if (gpu_freq < min_freq)
5522 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5523 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5526 sandybridge_pcode_write(dev_priv,
5527 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
5528 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5529 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5534 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5538 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5540 switch (INTEL_INFO(dev_priv)->eu_total) {
5542 /* (2 * 4) config */
5543 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5546 /* (2 * 6) config */
5547 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5550 /* (2 * 8) config */
5552 /* Setting (2 * 8) Min RP0 for any other combination */
5553 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5557 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5562 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5566 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5567 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5572 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5576 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5577 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5582 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5586 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5588 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5593 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
5597 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5599 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5601 rp0 = min_t(u32, rp0, 0xea);
5606 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5610 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
5611 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
5612 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
5613 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5618 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
5622 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
5624 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5625 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5626 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5627 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5628 * to make sure it matches what Punit accepts.
5630 return max_t(u32, val, 0xc0);
5633 /* Check that the pctx buffer wasn't move under us. */
5634 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5636 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5638 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5639 dev_priv->vlv_pctx->stolen->start);
5643 /* Check that the pcbr address is not empty. */
5644 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5646 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5648 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5651 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
5653 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5654 unsigned long pctx_paddr, paddr;
5656 int pctx_size = 32*1024;
5658 pcbr = I915_READ(VLV_PCBR);
5659 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5660 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5661 paddr = (dev_priv->mm.stolen_base +
5662 (ggtt->stolen_size - pctx_size));
5664 pctx_paddr = (paddr & (~4095));
5665 I915_WRITE(VLV_PCBR, pctx_paddr);
5668 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5671 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5673 struct drm_i915_gem_object *pctx;
5674 unsigned long pctx_paddr;
5676 int pctx_size = 24*1024;
5678 pcbr = I915_READ(VLV_PCBR);
5680 /* BIOS set it up already, grab the pre-alloc'd space */
5683 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5684 pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
5686 I915_GTT_OFFSET_NONE,
5691 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5694 * From the Gunit register HAS:
5695 * The Gfx driver is expected to program this register and ensure
5696 * proper allocation within Gfx stolen memory. For example, this
5697 * register should be programmed such than the PCBR range does not
5698 * overlap with other ranges, such as the frame buffer, protected
5699 * memory, or any other relevant ranges.
5701 pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
5703 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5707 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5708 I915_WRITE(VLV_PCBR, pctx_paddr);
5711 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5712 dev_priv->vlv_pctx = pctx;
5715 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
5717 if (WARN_ON(!dev_priv->vlv_pctx))
5720 i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
5721 dev_priv->vlv_pctx = NULL;
5724 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
5726 dev_priv->rps.gpll_ref_freq =
5727 vlv_get_cck_clock(dev_priv, "GPLL ref",
5728 CCK_GPLL_CLOCK_CONTROL,
5729 dev_priv->czclk_freq);
5731 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
5732 dev_priv->rps.gpll_ref_freq);
5735 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
5739 valleyview_setup_pctx(dev_priv);
5741 vlv_init_gpll_ref_freq(dev_priv);
5743 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5744 switch ((val >> 6) & 3) {
5747 dev_priv->mem_freq = 800;
5750 dev_priv->mem_freq = 1066;
5753 dev_priv->mem_freq = 1333;
5756 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5758 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5759 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5760 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5761 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5762 dev_priv->rps.max_freq);
5764 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5765 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5766 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5767 dev_priv->rps.efficient_freq);
5769 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5770 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5771 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5772 dev_priv->rps.rp1_freq);
5774 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5775 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5776 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5777 dev_priv->rps.min_freq);
5780 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
5784 cherryview_setup_pctx(dev_priv);
5786 vlv_init_gpll_ref_freq(dev_priv);
5788 mutex_lock(&dev_priv->sb_lock);
5789 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
5790 mutex_unlock(&dev_priv->sb_lock);
5792 switch ((val >> 2) & 0x7) {
5794 dev_priv->mem_freq = 2000;
5797 dev_priv->mem_freq = 1600;
5800 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5802 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5803 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5804 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5805 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5806 dev_priv->rps.max_freq);
5808 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5809 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5810 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5811 dev_priv->rps.efficient_freq);
5813 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5814 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5815 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5816 dev_priv->rps.rp1_freq);
5818 /* PUnit validated range is only [RPe, RP0] */
5819 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
5820 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5821 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5822 dev_priv->rps.min_freq);
5824 WARN_ONCE((dev_priv->rps.max_freq |
5825 dev_priv->rps.efficient_freq |
5826 dev_priv->rps.rp1_freq |
5827 dev_priv->rps.min_freq) & 1,
5828 "Odd GPU freq values\n");
5831 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
5833 valleyview_cleanup_pctx(dev_priv);
5836 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
5838 struct intel_engine_cs *engine;
5839 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5841 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5843 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
5844 GT_FIFO_FREE_ENTRIES_CHV);
5846 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5848 I915_WRITE(GTFIFODBG, gtfifodbg);
5851 cherryview_check_pctx(dev_priv);
5853 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5854 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5855 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5857 /* Disable RC states. */
5858 I915_WRITE(GEN6_RC_CONTROL, 0);
5860 /* 2a: Program RC6 thresholds.*/
5861 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5862 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5863 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5865 for_each_engine(engine, dev_priv)
5866 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5867 I915_WRITE(GEN6_RC_SLEEP, 0);
5869 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5870 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
5872 /* allows RC6 residency counter to work */
5873 I915_WRITE(VLV_COUNTER_CONTROL,
5874 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5875 VLV_MEDIA_RC6_COUNT_EN |
5876 VLV_RENDER_RC6_COUNT_EN));
5878 /* For now we assume BIOS is allocating and populating the PCBR */
5879 pcbr = I915_READ(VLV_PCBR);
5882 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
5883 (pcbr >> VLV_PCBR_ADDR_SHIFT))
5884 rc6_mode = GEN7_RC_CTL_TO_MODE;
5886 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5888 /* 4 Program defaults and thresholds for RPS*/
5889 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5890 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5891 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5892 I915_WRITE(GEN6_RP_UP_EI, 66000);
5893 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5895 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5898 I915_WRITE(GEN6_RP_CONTROL,
5899 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5900 GEN6_RP_MEDIA_IS_GFX |
5902 GEN6_RP_UP_BUSY_AVG |
5903 GEN6_RP_DOWN_IDLE_AVG);
5905 /* Setting Fixed Bias */
5906 val = VLV_OVERRIDE_EN |
5908 CHV_BIAS_CPU_50_SOC_50;
5909 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5911 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5913 /* RPS code assumes GPLL is used */
5914 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5916 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5917 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5919 reset_rps(dev_priv, valleyview_set_rps);
5921 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5924 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
5926 struct intel_engine_cs *engine;
5927 u32 gtfifodbg, val, rc6_mode = 0;
5929 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5931 valleyview_check_pctx(dev_priv);
5933 gtfifodbg = I915_READ(GTFIFODBG);
5935 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5937 I915_WRITE(GTFIFODBG, gtfifodbg);
5940 /* If VLV, Forcewake all wells, else re-direct to regular path */
5941 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5943 /* Disable RC states. */
5944 I915_WRITE(GEN6_RC_CONTROL, 0);
5946 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5947 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5948 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5949 I915_WRITE(GEN6_RP_UP_EI, 66000);
5950 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5952 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5954 I915_WRITE(GEN6_RP_CONTROL,
5955 GEN6_RP_MEDIA_TURBO |
5956 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5957 GEN6_RP_MEDIA_IS_GFX |
5959 GEN6_RP_UP_BUSY_AVG |
5960 GEN6_RP_DOWN_IDLE_CONT);
5962 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
5963 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5964 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5966 for_each_engine(engine, dev_priv)
5967 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5969 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
5971 /* allows RC6 residency counter to work */
5972 I915_WRITE(VLV_COUNTER_CONTROL,
5973 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
5974 VLV_RENDER_RC0_COUNT_EN |
5975 VLV_MEDIA_RC6_COUNT_EN |
5976 VLV_RENDER_RC6_COUNT_EN));
5978 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5979 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
5981 intel_print_rc6_info(dev_priv, rc6_mode);
5983 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5985 /* Setting Fixed Bias */
5986 val = VLV_OVERRIDE_EN |
5988 VLV_BIAS_CPU_125_SOC_875;
5989 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5991 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5993 /* RPS code assumes GPLL is used */
5994 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5996 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5997 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5999 reset_rps(dev_priv, valleyview_set_rps);
6001 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6004 static unsigned long intel_pxfreq(u32 vidfreq)
6007 int div = (vidfreq & 0x3f0000) >> 16;
6008 int post = (vidfreq & 0x3000) >> 12;
6009 int pre = (vidfreq & 0x7);
6014 freq = ((div * 133333) / ((1<<post) * pre));
6019 static const struct cparams {
6025 { 1, 1333, 301, 28664 },
6026 { 1, 1066, 294, 24460 },
6027 { 1, 800, 294, 25192 },
6028 { 0, 1333, 276, 27605 },
6029 { 0, 1066, 276, 27605 },
6030 { 0, 800, 231, 23784 },
6033 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
6035 u64 total_count, diff, ret;
6036 u32 count1, count2, count3, m = 0, c = 0;
6037 unsigned long now = jiffies_to_msecs(jiffies), diff1;
6040 assert_spin_locked(&mchdev_lock);
6042 diff1 = now - dev_priv->ips.last_time1;
6044 /* Prevent division-by-zero if we are asking too fast.
6045 * Also, we don't get interesting results if we are polling
6046 * faster than once in 10ms, so just return the saved value
6050 return dev_priv->ips.chipset_power;
6052 count1 = I915_READ(DMIEC);
6053 count2 = I915_READ(DDREC);
6054 count3 = I915_READ(CSIEC);
6056 total_count = count1 + count2 + count3;
6058 /* FIXME: handle per-counter overflow */
6059 if (total_count < dev_priv->ips.last_count1) {
6060 diff = ~0UL - dev_priv->ips.last_count1;
6061 diff += total_count;
6063 diff = total_count - dev_priv->ips.last_count1;
6066 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
6067 if (cparams[i].i == dev_priv->ips.c_m &&
6068 cparams[i].t == dev_priv->ips.r_t) {
6075 diff = div_u64(diff, diff1);
6076 ret = ((m * diff) + c);
6077 ret = div_u64(ret, 10);
6079 dev_priv->ips.last_count1 = total_count;
6080 dev_priv->ips.last_time1 = now;
6082 dev_priv->ips.chipset_power = ret;
6087 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
6091 if (INTEL_INFO(dev_priv)->gen != 5)
6094 spin_lock_irq(&mchdev_lock);
6096 val = __i915_chipset_val(dev_priv);
6098 spin_unlock_irq(&mchdev_lock);
6103 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
6105 unsigned long m, x, b;
6108 tsfs = I915_READ(TSFS);
6110 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
6111 x = I915_READ8(TR1);
6113 b = tsfs & TSFS_INTR_MASK;
6115 return ((m * x) / 127) - b;
6118 static int _pxvid_to_vd(u8 pxvid)
6123 if (pxvid >= 8 && pxvid < 31)
6126 return (pxvid + 2) * 125;
6129 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
6131 const int vd = _pxvid_to_vd(pxvid);
6132 const int vm = vd - 1125;
6134 if (INTEL_INFO(dev_priv)->is_mobile)
6135 return vm > 0 ? vm : 0;
6140 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
6142 u64 now, diff, diffms;
6145 assert_spin_locked(&mchdev_lock);
6147 now = ktime_get_raw_ns();
6148 diffms = now - dev_priv->ips.last_time2;
6149 do_div(diffms, NSEC_PER_MSEC);
6151 /* Don't divide by 0 */
6155 count = I915_READ(GFXEC);
6157 if (count < dev_priv->ips.last_count2) {
6158 diff = ~0UL - dev_priv->ips.last_count2;
6161 diff = count - dev_priv->ips.last_count2;
6164 dev_priv->ips.last_count2 = count;
6165 dev_priv->ips.last_time2 = now;
6167 /* More magic constants... */
6169 diff = div_u64(diff, diffms * 10);
6170 dev_priv->ips.gfx_power = diff;
6173 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
6175 if (INTEL_INFO(dev_priv)->gen != 5)
6178 spin_lock_irq(&mchdev_lock);
6180 __i915_update_gfx_val(dev_priv);
6182 spin_unlock_irq(&mchdev_lock);
6185 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
6187 unsigned long t, corr, state1, corr2, state2;
6190 assert_spin_locked(&mchdev_lock);
6192 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
6193 pxvid = (pxvid >> 24) & 0x7f;
6194 ext_v = pvid_to_extvid(dev_priv, pxvid);
6198 t = i915_mch_val(dev_priv);
6200 /* Revel in the empirically derived constants */
6202 /* Correction factor in 1/100000 units */
6204 corr = ((t * 2349) + 135940);
6206 corr = ((t * 964) + 29317);
6208 corr = ((t * 301) + 1004);
6210 corr = corr * ((150142 * state1) / 10000 - 78642);
6212 corr2 = (corr * dev_priv->ips.corr);
6214 state2 = (corr2 * state1) / 10000;
6215 state2 /= 100; /* convert to mW */
6217 __i915_update_gfx_val(dev_priv);
6219 return dev_priv->ips.gfx_power + state2;
6222 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
6226 if (INTEL_INFO(dev_priv)->gen != 5)
6229 spin_lock_irq(&mchdev_lock);
6231 val = __i915_gfx_val(dev_priv);
6233 spin_unlock_irq(&mchdev_lock);
6239 * i915_read_mch_val - return value for IPS use
6241 * Calculate and return a value for the IPS driver to use when deciding whether
6242 * we have thermal and power headroom to increase CPU or GPU power budget.
6244 unsigned long i915_read_mch_val(void)
6246 struct drm_i915_private *dev_priv;
6247 unsigned long chipset_val, graphics_val, ret = 0;
6249 spin_lock_irq(&mchdev_lock);
6252 dev_priv = i915_mch_dev;
6254 chipset_val = __i915_chipset_val(dev_priv);
6255 graphics_val = __i915_gfx_val(dev_priv);
6257 ret = chipset_val + graphics_val;
6260 spin_unlock_irq(&mchdev_lock);
6264 EXPORT_SYMBOL_GPL(i915_read_mch_val);
6267 * i915_gpu_raise - raise GPU frequency limit
6269 * Raise the limit; IPS indicates we have thermal headroom.
6271 bool i915_gpu_raise(void)
6273 struct drm_i915_private *dev_priv;
6276 spin_lock_irq(&mchdev_lock);
6277 if (!i915_mch_dev) {
6281 dev_priv = i915_mch_dev;
6283 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
6284 dev_priv->ips.max_delay--;
6287 spin_unlock_irq(&mchdev_lock);
6291 EXPORT_SYMBOL_GPL(i915_gpu_raise);
6294 * i915_gpu_lower - lower GPU frequency limit
6296 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6297 * frequency maximum.
6299 bool i915_gpu_lower(void)
6301 struct drm_i915_private *dev_priv;
6304 spin_lock_irq(&mchdev_lock);
6305 if (!i915_mch_dev) {
6309 dev_priv = i915_mch_dev;
6311 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
6312 dev_priv->ips.max_delay++;
6315 spin_unlock_irq(&mchdev_lock);
6319 EXPORT_SYMBOL_GPL(i915_gpu_lower);
6322 * i915_gpu_busy - indicate GPU business to IPS
6324 * Tell the IPS driver whether or not the GPU is busy.
6326 bool i915_gpu_busy(void)
6330 spin_lock_irq(&mchdev_lock);
6332 ret = i915_mch_dev->gt.awake;
6333 spin_unlock_irq(&mchdev_lock);
6337 EXPORT_SYMBOL_GPL(i915_gpu_busy);
6340 * i915_gpu_turbo_disable - disable graphics turbo
6342 * Disable graphics turbo by resetting the max frequency and setting the
6343 * current frequency to the default.
6345 bool i915_gpu_turbo_disable(void)
6347 struct drm_i915_private *dev_priv;
6350 spin_lock_irq(&mchdev_lock);
6351 if (!i915_mch_dev) {
6355 dev_priv = i915_mch_dev;
6357 dev_priv->ips.max_delay = dev_priv->ips.fstart;
6359 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
6363 spin_unlock_irq(&mchdev_lock);
6367 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6370 * Tells the intel_ips driver that the i915 driver is now loaded, if
6371 * IPS got loaded first.
6373 * This awkward dance is so that neither module has to depend on the
6374 * other in order for IPS to do the appropriate communication of
6375 * GPU turbo limits to i915.
6378 ips_ping_for_i915_load(void)
6382 link = symbol_get(ips_link_to_i915_driver);
6385 symbol_put(ips_link_to_i915_driver);
6389 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6391 /* We only register the i915 ips part with intel-ips once everything is
6392 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6393 spin_lock_irq(&mchdev_lock);
6394 i915_mch_dev = dev_priv;
6395 spin_unlock_irq(&mchdev_lock);
6397 ips_ping_for_i915_load();
6400 void intel_gpu_ips_teardown(void)
6402 spin_lock_irq(&mchdev_lock);
6403 i915_mch_dev = NULL;
6404 spin_unlock_irq(&mchdev_lock);
6407 static void intel_init_emon(struct drm_i915_private *dev_priv)
6413 /* Disable to program */
6417 /* Program energy weights for various events */
6418 I915_WRITE(SDEW, 0x15040d00);
6419 I915_WRITE(CSIEW0, 0x007f0000);
6420 I915_WRITE(CSIEW1, 0x1e220004);
6421 I915_WRITE(CSIEW2, 0x04000004);
6423 for (i = 0; i < 5; i++)
6424 I915_WRITE(PEW(i), 0);
6425 for (i = 0; i < 3; i++)
6426 I915_WRITE(DEW(i), 0);
6428 /* Program P-state weights to account for frequency power adjustment */
6429 for (i = 0; i < 16; i++) {
6430 u32 pxvidfreq = I915_READ(PXVFREQ(i));
6431 unsigned long freq = intel_pxfreq(pxvidfreq);
6432 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6437 val *= (freq / 1000);
6439 val /= (127*127*900);
6441 DRM_ERROR("bad pxval: %ld\n", val);
6444 /* Render standby states get 0 weight */
6448 for (i = 0; i < 4; i++) {
6449 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6450 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6451 I915_WRITE(PXW(i), val);
6454 /* Adjust magic regs to magic values (more experimental results) */
6455 I915_WRITE(OGW0, 0);
6456 I915_WRITE(OGW1, 0);
6457 I915_WRITE(EG0, 0x00007f00);
6458 I915_WRITE(EG1, 0x0000000e);
6459 I915_WRITE(EG2, 0x000e0000);
6460 I915_WRITE(EG3, 0x68000300);
6461 I915_WRITE(EG4, 0x42000000);
6462 I915_WRITE(EG5, 0x00140031);
6466 for (i = 0; i < 8; i++)
6467 I915_WRITE(PXWL(i), 0);
6469 /* Enable PMON + select events */
6470 I915_WRITE(ECR, 0x80000019);
6472 lcfuse = I915_READ(LCFUSE02);
6474 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6477 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6480 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6483 if (!i915.enable_rc6) {
6484 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6485 intel_runtime_pm_get(dev_priv);
6488 mutex_lock(&dev_priv->drm.struct_mutex);
6489 mutex_lock(&dev_priv->rps.hw_lock);
6491 /* Initialize RPS limits (for userspace) */
6492 if (IS_CHERRYVIEW(dev_priv))
6493 cherryview_init_gt_powersave(dev_priv);
6494 else if (IS_VALLEYVIEW(dev_priv))
6495 valleyview_init_gt_powersave(dev_priv);
6496 else if (INTEL_GEN(dev_priv) >= 6)
6497 gen6_init_rps_frequencies(dev_priv);
6499 /* Derive initial user preferences/limits from the hardware limits */
6500 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
6501 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
6503 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
6504 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
6506 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6507 dev_priv->rps.min_freq_softlimit =
6509 dev_priv->rps.efficient_freq,
6510 intel_freq_opcode(dev_priv, 450));
6512 /* After setting max-softlimit, find the overclock max freq */
6513 if (IS_GEN6(dev_priv) ||
6514 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
6517 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms);
6518 if (params & BIT(31)) { /* OC supported */
6519 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
6520 (dev_priv->rps.max_freq & 0xff) * 50,
6521 (params & 0xff) * 50);
6522 dev_priv->rps.max_freq = params & 0xff;
6526 /* Finally allow us to boost to max by default */
6527 dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
6529 mutex_unlock(&dev_priv->rps.hw_lock);
6530 mutex_unlock(&dev_priv->drm.struct_mutex);
6532 intel_autoenable_gt_powersave(dev_priv);
6535 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6537 if (IS_VALLEYVIEW(dev_priv))
6538 valleyview_cleanup_gt_powersave(dev_priv);
6540 if (!i915.enable_rc6)
6541 intel_runtime_pm_put(dev_priv);
6545 * intel_suspend_gt_powersave - suspend PM work and helper threads
6546 * @dev_priv: i915 device
6548 * We don't want to disable RC6 or other features here, we just want
6549 * to make sure any work we've queued has finished and won't bother
6550 * us while we're suspended.
6552 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
6554 if (INTEL_GEN(dev_priv) < 6)
6557 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
6558 intel_runtime_pm_put(dev_priv);
6560 /* gen6_rps_idle() will be called later to disable interrupts */
6563 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
6565 dev_priv->rps.enabled = true; /* force disabling */
6566 intel_disable_gt_powersave(dev_priv);
6568 gen6_reset_rps_interrupts(dev_priv);
6571 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
6573 if (!READ_ONCE(dev_priv->rps.enabled))
6576 mutex_lock(&dev_priv->rps.hw_lock);
6578 if (INTEL_GEN(dev_priv) >= 9) {
6579 gen9_disable_rc6(dev_priv);
6580 gen9_disable_rps(dev_priv);
6581 } else if (IS_CHERRYVIEW(dev_priv)) {
6582 cherryview_disable_rps(dev_priv);
6583 } else if (IS_VALLEYVIEW(dev_priv)) {
6584 valleyview_disable_rps(dev_priv);
6585 } else if (INTEL_GEN(dev_priv) >= 6) {
6586 gen6_disable_rps(dev_priv);
6587 } else if (IS_IRONLAKE_M(dev_priv)) {
6588 ironlake_disable_drps(dev_priv);
6591 dev_priv->rps.enabled = false;
6592 mutex_unlock(&dev_priv->rps.hw_lock);
6595 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6597 /* We shouldn't be disabling as we submit, so this should be less
6598 * racy than it appears!
6600 if (READ_ONCE(dev_priv->rps.enabled))
6603 /* Powersaving is controlled by the host when inside a VM */
6604 if (intel_vgpu_active(dev_priv))
6607 mutex_lock(&dev_priv->rps.hw_lock);
6609 if (IS_CHERRYVIEW(dev_priv)) {
6610 cherryview_enable_rps(dev_priv);
6611 } else if (IS_VALLEYVIEW(dev_priv)) {
6612 valleyview_enable_rps(dev_priv);
6613 } else if (INTEL_GEN(dev_priv) >= 9) {
6614 gen9_enable_rc6(dev_priv);
6615 gen9_enable_rps(dev_priv);
6616 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
6617 gen6_update_ring_freq(dev_priv);
6618 } else if (IS_BROADWELL(dev_priv)) {
6619 gen8_enable_rps(dev_priv);
6620 gen6_update_ring_freq(dev_priv);
6621 } else if (INTEL_GEN(dev_priv) >= 6) {
6622 gen6_enable_rps(dev_priv);
6623 gen6_update_ring_freq(dev_priv);
6624 } else if (IS_IRONLAKE_M(dev_priv)) {
6625 ironlake_enable_drps(dev_priv);
6626 intel_init_emon(dev_priv);
6629 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6630 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6632 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6633 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6635 dev_priv->rps.enabled = true;
6636 mutex_unlock(&dev_priv->rps.hw_lock);
6639 static void __intel_autoenable_gt_powersave(struct work_struct *work)
6641 struct drm_i915_private *dev_priv =
6642 container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
6643 struct intel_engine_cs *rcs;
6644 struct drm_i915_gem_request *req;
6646 if (READ_ONCE(dev_priv->rps.enabled))
6649 rcs = &dev_priv->engine[RCS];
6650 if (rcs->last_context)
6653 if (!rcs->init_context)
6656 mutex_lock(&dev_priv->drm.struct_mutex);
6658 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
6662 if (!i915.enable_execlists && i915_switch_context(req) == 0)
6663 rcs->init_context(req);
6665 /* Mark the device busy, calling intel_enable_gt_powersave() */
6666 i915_add_request_no_flush(req);
6669 mutex_unlock(&dev_priv->drm.struct_mutex);
6671 intel_runtime_pm_put(dev_priv);
6674 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
6676 if (READ_ONCE(dev_priv->rps.enabled))
6679 if (IS_IRONLAKE_M(dev_priv)) {
6680 ironlake_enable_drps(dev_priv);
6681 mutex_lock(&dev_priv->drm.struct_mutex);
6682 intel_init_emon(dev_priv);
6683 mutex_unlock(&dev_priv->drm.struct_mutex);
6684 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6686 * PCU communication is slow and this doesn't need to be
6687 * done at any specific time, so do this out of our fast path
6688 * to make resume and init faster.
6690 * We depend on the HW RC6 power context save/restore
6691 * mechanism when entering D3 through runtime PM suspend. So
6692 * disable RPM until RPS/RC6 is properly setup. We can only
6693 * get here via the driver load/system resume/runtime resume
6694 * paths, so the _noresume version is enough (and in case of
6695 * runtime resume it's necessary).
6697 if (queue_delayed_work(dev_priv->wq,
6698 &dev_priv->rps.autoenable_work,
6699 round_jiffies_up_relative(HZ)))
6700 intel_runtime_pm_get_noresume(dev_priv);
6704 static void ibx_init_clock_gating(struct drm_device *dev)
6706 struct drm_i915_private *dev_priv = to_i915(dev);
6709 * On Ibex Peak and Cougar Point, we need to disable clock
6710 * gating for the panel power sequencer or it will fail to
6711 * start up when no ports are active.
6713 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6716 static void g4x_disable_trickle_feed(struct drm_device *dev)
6718 struct drm_i915_private *dev_priv = to_i915(dev);
6721 for_each_pipe(dev_priv, pipe) {
6722 I915_WRITE(DSPCNTR(pipe),
6723 I915_READ(DSPCNTR(pipe)) |
6724 DISPPLANE_TRICKLE_FEED_DISABLE);
6726 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6727 POSTING_READ(DSPSURF(pipe));
6731 static void ilk_init_lp_watermarks(struct drm_device *dev)
6733 struct drm_i915_private *dev_priv = to_i915(dev);
6735 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6736 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6737 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6740 * Don't touch WM1S_LP_EN here.
6741 * Doing so could cause underruns.
6745 static void ironlake_init_clock_gating(struct drm_device *dev)
6747 struct drm_i915_private *dev_priv = to_i915(dev);
6748 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6752 * WaFbcDisableDpfcClockGating:ilk
6754 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6755 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6756 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6758 I915_WRITE(PCH_3DCGDIS0,
6759 MARIUNIT_CLOCK_GATE_DISABLE |
6760 SVSMUNIT_CLOCK_GATE_DISABLE);
6761 I915_WRITE(PCH_3DCGDIS1,
6762 VFMUNIT_CLOCK_GATE_DISABLE);
6765 * According to the spec the following bits should be set in
6766 * order to enable memory self-refresh
6767 * The bit 22/21 of 0x42004
6768 * The bit 5 of 0x42020
6769 * The bit 15 of 0x45000
6771 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6772 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6773 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6774 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6775 I915_WRITE(DISP_ARB_CTL,
6776 (I915_READ(DISP_ARB_CTL) |
6779 ilk_init_lp_watermarks(dev);
6782 * Based on the document from hardware guys the following bits
6783 * should be set unconditionally in order to enable FBC.
6784 * The bit 22 of 0x42000
6785 * The bit 22 of 0x42004
6786 * The bit 7,8,9 of 0x42020.
6788 if (IS_IRONLAKE_M(dev)) {
6789 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6790 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6791 I915_READ(ILK_DISPLAY_CHICKEN1) |
6793 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6794 I915_READ(ILK_DISPLAY_CHICKEN2) |
6798 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6800 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6801 I915_READ(ILK_DISPLAY_CHICKEN2) |
6802 ILK_ELPIN_409_SELECT);
6803 I915_WRITE(_3D_CHICKEN2,
6804 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6805 _3D_CHICKEN2_WM_READ_PIPELINED);
6807 /* WaDisableRenderCachePipelinedFlush:ilk */
6808 I915_WRITE(CACHE_MODE_0,
6809 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6811 /* WaDisable_RenderCache_OperationalFlush:ilk */
6812 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6814 g4x_disable_trickle_feed(dev);
6816 ibx_init_clock_gating(dev);
6819 static void cpt_init_clock_gating(struct drm_device *dev)
6821 struct drm_i915_private *dev_priv = to_i915(dev);
6826 * On Ibex Peak and Cougar Point, we need to disable clock
6827 * gating for the panel power sequencer or it will fail to
6828 * start up when no ports are active.
6830 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6831 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6832 PCH_CPUNIT_CLOCK_GATE_DISABLE);
6833 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6834 DPLS_EDP_PPS_FIX_DIS);
6835 /* The below fixes the weird display corruption, a few pixels shifted
6836 * downward, on (only) LVDS of some HP laptops with IVY.
6838 for_each_pipe(dev_priv, pipe) {
6839 val = I915_READ(TRANS_CHICKEN2(pipe));
6840 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6841 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6842 if (dev_priv->vbt.fdi_rx_polarity_inverted)
6843 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6844 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6845 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6846 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6847 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6849 /* WADP0ClockGatingDisable */
6850 for_each_pipe(dev_priv, pipe) {
6851 I915_WRITE(TRANS_CHICKEN1(pipe),
6852 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6856 static void gen6_check_mch_setup(struct drm_device *dev)
6858 struct drm_i915_private *dev_priv = to_i915(dev);
6861 tmp = I915_READ(MCH_SSKPD);
6862 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6863 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6867 static void gen6_init_clock_gating(struct drm_device *dev)
6869 struct drm_i915_private *dev_priv = to_i915(dev);
6870 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6872 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6874 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6875 I915_READ(ILK_DISPLAY_CHICKEN2) |
6876 ILK_ELPIN_409_SELECT);
6878 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6879 I915_WRITE(_3D_CHICKEN,
6880 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6882 /* WaDisable_RenderCache_OperationalFlush:snb */
6883 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6886 * BSpec recoomends 8x4 when MSAA is used,
6887 * however in practice 16x4 seems fastest.
6889 * Note that PS/WM thread counts depend on the WIZ hashing
6890 * disable bit, which we don't touch here, but it's good
6891 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6893 I915_WRITE(GEN6_GT_MODE,
6894 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6896 ilk_init_lp_watermarks(dev);
6898 I915_WRITE(CACHE_MODE_0,
6899 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6901 I915_WRITE(GEN6_UCGCTL1,
6902 I915_READ(GEN6_UCGCTL1) |
6903 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6904 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6906 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6907 * gating disable must be set. Failure to set it results in
6908 * flickering pixels due to Z write ordering failures after
6909 * some amount of runtime in the Mesa "fire" demo, and Unigine
6910 * Sanctuary and Tropics, and apparently anything else with
6911 * alpha test or pixel discard.
6913 * According to the spec, bit 11 (RCCUNIT) must also be set,
6914 * but we didn't debug actual testcases to find it out.
6916 * WaDisableRCCUnitClockGating:snb
6917 * WaDisableRCPBUnitClockGating:snb
6919 I915_WRITE(GEN6_UCGCTL2,
6920 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6921 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6923 /* WaStripsFansDisableFastClipPerformanceFix:snb */
6924 I915_WRITE(_3D_CHICKEN3,
6925 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6929 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6930 * 3DSTATE_SF number of SF output attributes is more than 16."
6932 I915_WRITE(_3D_CHICKEN3,
6933 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6936 * According to the spec the following bits should be
6937 * set in order to enable memory self-refresh and fbc:
6938 * The bit21 and bit22 of 0x42000
6939 * The bit21 and bit22 of 0x42004
6940 * The bit5 and bit7 of 0x42020
6941 * The bit14 of 0x70180
6942 * The bit14 of 0x71180
6944 * WaFbcAsynchFlipDisableFbcQueue:snb
6946 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6947 I915_READ(ILK_DISPLAY_CHICKEN1) |
6948 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6949 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6950 I915_READ(ILK_DISPLAY_CHICKEN2) |
6951 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6952 I915_WRITE(ILK_DSPCLK_GATE_D,
6953 I915_READ(ILK_DSPCLK_GATE_D) |
6954 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6955 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6957 g4x_disable_trickle_feed(dev);
6959 cpt_init_clock_gating(dev);
6961 gen6_check_mch_setup(dev);
6964 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6966 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
6969 * WaVSThreadDispatchOverride:ivb,vlv
6971 * This actually overrides the dispatch
6972 * mode for all thread types.
6974 reg &= ~GEN7_FF_SCHED_MASK;
6975 reg |= GEN7_FF_TS_SCHED_HW;
6976 reg |= GEN7_FF_VS_SCHED_HW;
6977 reg |= GEN7_FF_DS_SCHED_HW;
6979 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6982 static void lpt_init_clock_gating(struct drm_device *dev)
6984 struct drm_i915_private *dev_priv = to_i915(dev);
6987 * TODO: this bit should only be enabled when really needed, then
6988 * disabled when not needed anymore in order to save power.
6990 if (HAS_PCH_LPT_LP(dev))
6991 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6992 I915_READ(SOUTH_DSPCLK_GATE_D) |
6993 PCH_LP_PARTITION_LEVEL_DISABLE);
6995 /* WADPOClockGatingDisable:hsw */
6996 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
6997 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
6998 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7001 static void lpt_suspend_hw(struct drm_device *dev)
7003 struct drm_i915_private *dev_priv = to_i915(dev);
7005 if (HAS_PCH_LPT_LP(dev)) {
7006 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
7008 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7009 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7013 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
7014 int general_prio_credits,
7015 int high_prio_credits)
7019 /* WaTempDisableDOPClkGating:bdw */
7020 misccpctl = I915_READ(GEN7_MISCCPCTL);
7021 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
7023 I915_WRITE(GEN8_L3SQCREG1,
7024 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
7025 L3_HIGH_PRIO_CREDITS(high_prio_credits));
7028 * Wait at least 100 clocks before re-enabling clock gating.
7029 * See the definition of L3SQCREG1 in BSpec.
7031 POSTING_READ(GEN8_L3SQCREG1);
7033 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
7036 static void kabylake_init_clock_gating(struct drm_device *dev)
7038 struct drm_i915_private *dev_priv = dev->dev_private;
7040 gen9_init_clock_gating(dev);
7042 /* WaDisableSDEUnitClockGating:kbl */
7043 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7044 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7045 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7047 /* WaDisableGamClockGating:kbl */
7048 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7049 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7050 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
7052 /* WaFbcNukeOnHostModify:kbl */
7053 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7054 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7057 static void skylake_init_clock_gating(struct drm_device *dev)
7059 struct drm_i915_private *dev_priv = dev->dev_private;
7061 gen9_init_clock_gating(dev);
7063 /* WAC6entrylatency:skl */
7064 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
7065 FBC_LLC_FULLY_OPEN);
7067 /* WaFbcNukeOnHostModify:skl */
7068 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7069 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7072 static void broadwell_init_clock_gating(struct drm_device *dev)
7074 struct drm_i915_private *dev_priv = to_i915(dev);
7077 ilk_init_lp_watermarks(dev);
7079 /* WaSwitchSolVfFArbitrationPriority:bdw */
7080 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7082 /* WaPsrDPAMaskVBlankInSRD:bdw */
7083 I915_WRITE(CHICKEN_PAR1_1,
7084 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
7086 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
7087 for_each_pipe(dev_priv, pipe) {
7088 I915_WRITE(CHICKEN_PIPESL_1(pipe),
7089 I915_READ(CHICKEN_PIPESL_1(pipe)) |
7090 BDW_DPRS_MASK_VBLANK_SRD);
7093 /* WaVSRefCountFullforceMissDisable:bdw */
7094 /* WaDSRefCountFullforceMissDisable:bdw */
7095 I915_WRITE(GEN7_FF_THREAD_MODE,
7096 I915_READ(GEN7_FF_THREAD_MODE) &
7097 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7099 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7100 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7102 /* WaDisableSDEUnitClockGating:bdw */
7103 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7104 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7106 /* WaProgramL3SqcReg1Default:bdw */
7107 gen8_set_l3sqc_credits(dev_priv, 30, 2);
7110 * WaGttCachingOffByDefault:bdw
7111 * GTT cache may not work with big pages, so if those
7112 * are ever enabled GTT cache may need to be disabled.
7114 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7116 /* WaKVMNotificationOnConfigChange:bdw */
7117 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
7118 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7120 lpt_init_clock_gating(dev);
7123 static void haswell_init_clock_gating(struct drm_device *dev)
7125 struct drm_i915_private *dev_priv = to_i915(dev);
7127 ilk_init_lp_watermarks(dev);
7129 /* L3 caching of data atomics doesn't work -- disable it. */
7130 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
7131 I915_WRITE(HSW_ROW_CHICKEN3,
7132 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
7134 /* This is required by WaCatErrorRejectionIssue:hsw */
7135 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7136 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7137 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7139 /* WaVSRefCountFullforceMissDisable:hsw */
7140 I915_WRITE(GEN7_FF_THREAD_MODE,
7141 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
7143 /* WaDisable_RenderCache_OperationalFlush:hsw */
7144 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7146 /* enable HiZ Raw Stall Optimization */
7147 I915_WRITE(CACHE_MODE_0_GEN7,
7148 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7150 /* WaDisable4x2SubspanOptimization:hsw */
7151 I915_WRITE(CACHE_MODE_1,
7152 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7155 * BSpec recommends 8x4 when MSAA is used,
7156 * however in practice 16x4 seems fastest.
7158 * Note that PS/WM thread counts depend on the WIZ hashing
7159 * disable bit, which we don't touch here, but it's good
7160 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7162 I915_WRITE(GEN7_GT_MODE,
7163 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7165 /* WaSampleCChickenBitEnable:hsw */
7166 I915_WRITE(HALF_SLICE_CHICKEN3,
7167 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
7169 /* WaSwitchSolVfFArbitrationPriority:hsw */
7170 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7172 /* WaRsPkgCStateDisplayPMReq:hsw */
7173 I915_WRITE(CHICKEN_PAR1_1,
7174 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
7176 lpt_init_clock_gating(dev);
7179 static void ivybridge_init_clock_gating(struct drm_device *dev)
7181 struct drm_i915_private *dev_priv = to_i915(dev);
7184 ilk_init_lp_watermarks(dev);
7186 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
7188 /* WaDisableEarlyCull:ivb */
7189 I915_WRITE(_3D_CHICKEN3,
7190 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7192 /* WaDisableBackToBackFlipFix:ivb */
7193 I915_WRITE(IVB_CHICKEN3,
7194 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7195 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7197 /* WaDisablePSDDualDispatchEnable:ivb */
7198 if (IS_IVB_GT1(dev))
7199 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7200 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7202 /* WaDisable_RenderCache_OperationalFlush:ivb */
7203 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7205 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
7206 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
7207 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
7209 /* WaApplyL3ControlAndL3ChickenMode:ivb */
7210 I915_WRITE(GEN7_L3CNTLREG1,
7211 GEN7_WA_FOR_GEN7_L3_CONTROL);
7212 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
7213 GEN7_WA_L3_CHICKEN_MODE);
7214 if (IS_IVB_GT1(dev))
7215 I915_WRITE(GEN7_ROW_CHICKEN2,
7216 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7218 /* must write both registers */
7219 I915_WRITE(GEN7_ROW_CHICKEN2,
7220 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7221 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
7222 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7225 /* WaForceL3Serialization:ivb */
7226 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7227 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7230 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7231 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
7233 I915_WRITE(GEN6_UCGCTL2,
7234 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7236 /* This is required by WaCatErrorRejectionIssue:ivb */
7237 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7238 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7239 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7241 g4x_disable_trickle_feed(dev);
7243 gen7_setup_fixed_func_scheduler(dev_priv);
7245 if (0) { /* causes HiZ corruption on ivb:gt1 */
7246 /* enable HiZ Raw Stall Optimization */
7247 I915_WRITE(CACHE_MODE_0_GEN7,
7248 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7251 /* WaDisable4x2SubspanOptimization:ivb */
7252 I915_WRITE(CACHE_MODE_1,
7253 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7256 * BSpec recommends 8x4 when MSAA is used,
7257 * however in practice 16x4 seems fastest.
7259 * Note that PS/WM thread counts depend on the WIZ hashing
7260 * disable bit, which we don't touch here, but it's good
7261 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7263 I915_WRITE(GEN7_GT_MODE,
7264 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7266 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
7267 snpcr &= ~GEN6_MBC_SNPCR_MASK;
7268 snpcr |= GEN6_MBC_SNPCR_MED;
7269 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
7271 if (!HAS_PCH_NOP(dev))
7272 cpt_init_clock_gating(dev);
7274 gen6_check_mch_setup(dev);
7277 static void valleyview_init_clock_gating(struct drm_device *dev)
7279 struct drm_i915_private *dev_priv = to_i915(dev);
7281 /* WaDisableEarlyCull:vlv */
7282 I915_WRITE(_3D_CHICKEN3,
7283 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7285 /* WaDisableBackToBackFlipFix:vlv */
7286 I915_WRITE(IVB_CHICKEN3,
7287 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7288 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7290 /* WaPsdDispatchEnable:vlv */
7291 /* WaDisablePSDDualDispatchEnable:vlv */
7292 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7293 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
7294 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7296 /* WaDisable_RenderCache_OperationalFlush:vlv */
7297 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7299 /* WaForceL3Serialization:vlv */
7300 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7301 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7303 /* WaDisableDopClockGating:vlv */
7304 I915_WRITE(GEN7_ROW_CHICKEN2,
7305 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7307 /* This is required by WaCatErrorRejectionIssue:vlv */
7308 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7309 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7310 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7312 gen7_setup_fixed_func_scheduler(dev_priv);
7315 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7316 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7318 I915_WRITE(GEN6_UCGCTL2,
7319 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7321 /* WaDisableL3Bank2xClockGate:vlv
7322 * Disabling L3 clock gating- MMIO 940c[25] = 1
7323 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7324 I915_WRITE(GEN7_UCGCTL4,
7325 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7328 * BSpec says this must be set, even though
7329 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7331 I915_WRITE(CACHE_MODE_1,
7332 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7335 * BSpec recommends 8x4 when MSAA is used,
7336 * however in practice 16x4 seems fastest.
7338 * Note that PS/WM thread counts depend on the WIZ hashing
7339 * disable bit, which we don't touch here, but it's good
7340 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7342 I915_WRITE(GEN7_GT_MODE,
7343 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7346 * WaIncreaseL3CreditsForVLVB0:vlv
7347 * This is the hardware default actually.
7349 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
7352 * WaDisableVLVClockGating_VBIIssue:vlv
7353 * Disable clock gating on th GCFG unit to prevent a delay
7354 * in the reporting of vblank events.
7356 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7359 static void cherryview_init_clock_gating(struct drm_device *dev)
7361 struct drm_i915_private *dev_priv = to_i915(dev);
7363 /* WaVSRefCountFullforceMissDisable:chv */
7364 /* WaDSRefCountFullforceMissDisable:chv */
7365 I915_WRITE(GEN7_FF_THREAD_MODE,
7366 I915_READ(GEN7_FF_THREAD_MODE) &
7367 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7369 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7370 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7371 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7373 /* WaDisableCSUnitClockGating:chv */
7374 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7375 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7377 /* WaDisableSDEUnitClockGating:chv */
7378 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7379 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7382 * WaProgramL3SqcReg1Default:chv
7383 * See gfxspecs/Related Documents/Performance Guide/
7384 * LSQC Setting Recommendations.
7386 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7389 * GTT cache may not work with big pages, so if those
7390 * are ever enabled GTT cache may need to be disabled.
7392 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7395 static void g4x_init_clock_gating(struct drm_device *dev)
7397 struct drm_i915_private *dev_priv = to_i915(dev);
7398 uint32_t dspclk_gate;
7400 I915_WRITE(RENCLK_GATE_D1, 0);
7401 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7402 GS_UNIT_CLOCK_GATE_DISABLE |
7403 CL_UNIT_CLOCK_GATE_DISABLE);
7404 I915_WRITE(RAMCLK_GATE_D, 0);
7405 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7406 OVRUNIT_CLOCK_GATE_DISABLE |
7407 OVCUNIT_CLOCK_GATE_DISABLE;
7409 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7410 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7412 /* WaDisableRenderCachePipelinedFlush */
7413 I915_WRITE(CACHE_MODE_0,
7414 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7416 /* WaDisable_RenderCache_OperationalFlush:g4x */
7417 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7419 g4x_disable_trickle_feed(dev);
7422 static void crestline_init_clock_gating(struct drm_device *dev)
7424 struct drm_i915_private *dev_priv = to_i915(dev);
7426 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7427 I915_WRITE(RENCLK_GATE_D2, 0);
7428 I915_WRITE(DSPCLK_GATE_D, 0);
7429 I915_WRITE(RAMCLK_GATE_D, 0);
7430 I915_WRITE16(DEUC, 0);
7431 I915_WRITE(MI_ARB_STATE,
7432 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7434 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7435 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7438 static void broadwater_init_clock_gating(struct drm_device *dev)
7440 struct drm_i915_private *dev_priv = to_i915(dev);
7442 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7443 I965_RCC_CLOCK_GATE_DISABLE |
7444 I965_RCPB_CLOCK_GATE_DISABLE |
7445 I965_ISC_CLOCK_GATE_DISABLE |
7446 I965_FBC_CLOCK_GATE_DISABLE);
7447 I915_WRITE(RENCLK_GATE_D2, 0);
7448 I915_WRITE(MI_ARB_STATE,
7449 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7451 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7452 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7455 static void gen3_init_clock_gating(struct drm_device *dev)
7457 struct drm_i915_private *dev_priv = to_i915(dev);
7458 u32 dstate = I915_READ(D_STATE);
7460 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7461 DSTATE_DOT_CLOCK_GATING;
7462 I915_WRITE(D_STATE, dstate);
7464 if (IS_PINEVIEW(dev))
7465 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7467 /* IIR "flip pending" means done if this bit is set */
7468 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7470 /* interrupts should cause a wake up from C3 */
7471 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7473 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7474 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7476 I915_WRITE(MI_ARB_STATE,
7477 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7480 static void i85x_init_clock_gating(struct drm_device *dev)
7482 struct drm_i915_private *dev_priv = to_i915(dev);
7484 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7486 /* interrupts should cause a wake up from C3 */
7487 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7488 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7490 I915_WRITE(MEM_MODE,
7491 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7494 static void i830_init_clock_gating(struct drm_device *dev)
7496 struct drm_i915_private *dev_priv = to_i915(dev);
7498 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7500 I915_WRITE(MEM_MODE,
7501 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7502 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7505 void intel_init_clock_gating(struct drm_device *dev)
7507 struct drm_i915_private *dev_priv = to_i915(dev);
7509 dev_priv->display.init_clock_gating(dev);
7512 void intel_suspend_hw(struct drm_device *dev)
7514 if (HAS_PCH_LPT(dev))
7515 lpt_suspend_hw(dev);
7518 static void nop_init_clock_gating(struct drm_device *dev)
7520 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7524 * intel_init_clock_gating_hooks - setup the clock gating hooks
7525 * @dev_priv: device private
7527 * Setup the hooks that configure which clocks of a given platform can be
7528 * gated and also apply various GT and display specific workarounds for these
7529 * platforms. Note that some GT specific workarounds are applied separately
7530 * when GPU contexts or batchbuffers start their execution.
7532 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7534 if (IS_SKYLAKE(dev_priv))
7535 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7536 else if (IS_KABYLAKE(dev_priv))
7537 dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
7538 else if (IS_BROXTON(dev_priv))
7539 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7540 else if (IS_BROADWELL(dev_priv))
7541 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7542 else if (IS_CHERRYVIEW(dev_priv))
7543 dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
7544 else if (IS_HASWELL(dev_priv))
7545 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7546 else if (IS_IVYBRIDGE(dev_priv))
7547 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7548 else if (IS_VALLEYVIEW(dev_priv))
7549 dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
7550 else if (IS_GEN6(dev_priv))
7551 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7552 else if (IS_GEN5(dev_priv))
7553 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7554 else if (IS_G4X(dev_priv))
7555 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7556 else if (IS_CRESTLINE(dev_priv))
7557 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7558 else if (IS_BROADWATER(dev_priv))
7559 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7560 else if (IS_GEN3(dev_priv))
7561 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7562 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7563 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7564 else if (IS_GEN2(dev_priv))
7565 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7567 MISSING_CASE(INTEL_DEVID(dev_priv));
7568 dev_priv->display.init_clock_gating = nop_init_clock_gating;
7572 /* Set up chip specific power management-related functions */
7573 void intel_init_pm(struct drm_device *dev)
7575 struct drm_i915_private *dev_priv = to_i915(dev);
7577 intel_fbc_init(dev_priv);
7580 if (IS_PINEVIEW(dev))
7581 i915_pineview_get_mem_freq(dev);
7582 else if (IS_GEN5(dev))
7583 i915_ironlake_get_mem_freq(dev);
7585 /* For FIFO watermark updates */
7586 if (INTEL_INFO(dev)->gen >= 9) {
7587 skl_setup_wm_latency(dev);
7588 dev_priv->display.update_wm = skl_update_wm;
7589 dev_priv->display.compute_global_watermarks = skl_compute_wm;
7590 } else if (HAS_PCH_SPLIT(dev)) {
7591 ilk_setup_wm_latency(dev);
7593 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7594 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7595 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7596 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7597 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7598 dev_priv->display.compute_intermediate_wm =
7599 ilk_compute_intermediate_wm;
7600 dev_priv->display.initial_watermarks =
7601 ilk_initial_watermarks;
7602 dev_priv->display.optimize_watermarks =
7603 ilk_optimize_watermarks;
7605 DRM_DEBUG_KMS("Failed to read display plane latency. "
7608 } else if (IS_CHERRYVIEW(dev)) {
7609 vlv_setup_wm_latency(dev);
7610 dev_priv->display.update_wm = vlv_update_wm;
7611 } else if (IS_VALLEYVIEW(dev)) {
7612 vlv_setup_wm_latency(dev);
7613 dev_priv->display.update_wm = vlv_update_wm;
7614 } else if (IS_PINEVIEW(dev)) {
7615 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7618 dev_priv->mem_freq)) {
7619 DRM_INFO("failed to find known CxSR latency "
7620 "(found ddr%s fsb freq %d, mem freq %d), "
7622 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7623 dev_priv->fsb_freq, dev_priv->mem_freq);
7624 /* Disable CxSR and never update its watermark again */
7625 intel_set_memory_cxsr(dev_priv, false);
7626 dev_priv->display.update_wm = NULL;
7628 dev_priv->display.update_wm = pineview_update_wm;
7629 } else if (IS_G4X(dev)) {
7630 dev_priv->display.update_wm = g4x_update_wm;
7631 } else if (IS_GEN4(dev)) {
7632 dev_priv->display.update_wm = i965_update_wm;
7633 } else if (IS_GEN3(dev)) {
7634 dev_priv->display.update_wm = i9xx_update_wm;
7635 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7636 } else if (IS_GEN2(dev)) {
7637 if (INTEL_INFO(dev)->num_pipes == 1) {
7638 dev_priv->display.update_wm = i845_update_wm;
7639 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7641 dev_priv->display.update_wm = i9xx_update_wm;
7642 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7645 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7649 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7651 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7653 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7654 * use te fw I915_READ variants to reduce the amount of work
7655 * required when reading/writing.
7658 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7659 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7663 I915_WRITE_FW(GEN6_PCODE_DATA, *val);
7664 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
7665 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7667 if (intel_wait_for_register_fw(dev_priv,
7668 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7670 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7674 *val = I915_READ_FW(GEN6_PCODE_DATA);
7675 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7680 int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7683 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7685 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7686 * use te fw I915_READ variants to reduce the amount of work
7687 * required when reading/writing.
7690 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7691 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7695 I915_WRITE_FW(GEN6_PCODE_DATA, val);
7696 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7698 if (intel_wait_for_register_fw(dev_priv,
7699 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7701 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7705 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7710 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7714 * Slow = Fast = GPLL ref * N
7716 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
7719 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7721 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
7724 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7728 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
7730 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
7733 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7735 /* CHV needs even values */
7736 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
7739 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7741 if (IS_GEN9(dev_priv))
7742 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
7744 else if (IS_CHERRYVIEW(dev_priv))
7745 return chv_gpu_freq(dev_priv, val);
7746 else if (IS_VALLEYVIEW(dev_priv))
7747 return byt_gpu_freq(dev_priv, val);
7749 return val * GT_FREQUENCY_MULTIPLIER;
7752 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7754 if (IS_GEN9(dev_priv))
7755 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
7756 GT_FREQUENCY_MULTIPLIER);
7757 else if (IS_CHERRYVIEW(dev_priv))
7758 return chv_freq_opcode(dev_priv, val);
7759 else if (IS_VALLEYVIEW(dev_priv))
7760 return byt_freq_opcode(dev_priv, val);
7762 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
7765 struct request_boost {
7766 struct work_struct work;
7767 struct drm_i915_gem_request *req;
7770 static void __intel_rps_boost_work(struct work_struct *work)
7772 struct request_boost *boost = container_of(work, struct request_boost, work);
7773 struct drm_i915_gem_request *req = boost->req;
7775 if (!i915_gem_request_completed(req))
7776 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
7778 i915_gem_request_put(req);
7782 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
7784 struct request_boost *boost;
7786 if (req == NULL || INTEL_GEN(req->i915) < 6)
7789 if (i915_gem_request_completed(req))
7792 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
7796 boost->req = i915_gem_request_get(req);
7798 INIT_WORK(&boost->work, __intel_rps_boost_work);
7799 queue_work(req->i915->wq, &boost->work);
7802 void intel_pm_setup(struct drm_device *dev)
7804 struct drm_i915_private *dev_priv = to_i915(dev);
7806 mutex_init(&dev_priv->rps.hw_lock);
7807 spin_lock_init(&dev_priv->rps.client_lock);
7809 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
7810 __intel_autoenable_gt_powersave);
7811 INIT_LIST_HEAD(&dev_priv->rps.clients);
7813 dev_priv->pm.suspended = false;
7814 atomic_set(&dev_priv->pm.wakeref_count, 0);
7815 atomic_set(&dev_priv->pm.atomic_seq, 0);