2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
35 * RC6 is a special power stage which allows the GPU to enter an very
36 * low-voltage mode when idle, using down to 0V while at this stage. This
37 * stage is entered automatically when the GPU is idle when RC6 support is
38 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
40 * There are different RC6 modes available in Intel GPU, which differentiate
41 * among each other with the latency required to enter and leave RC6 and
42 * voltage consumed by the GPU in different states.
44 * The combination of the following flags define which states GPU is allowed
45 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
46 * RC6pp is deepest RC6. Their support by hardware varies according to the
47 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
48 * which brings the most power savings; deeper states save more power, but
49 * require higher latency to switch to and wake up.
51 #define INTEL_RC6_ENABLE (1<<0)
52 #define INTEL_RC6p_ENABLE (1<<1)
53 #define INTEL_RC6pp_ENABLE (1<<2)
55 static void gen9_init_clock_gating(struct drm_device *dev)
57 struct drm_i915_private *dev_priv = dev->dev_private;
59 /* WaEnableLbsSlaRetryTimerDecrement:skl */
60 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
61 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
64 static void skl_init_clock_gating(struct drm_device *dev)
66 struct drm_i915_private *dev_priv = dev->dev_private;
68 gen9_init_clock_gating(dev);
70 if (INTEL_REVID(dev) == SKL_REVID_A0) {
72 * WaDisableSDEUnitClockGating:skl
73 * WaSetGAPSunitClckGateDisable:skl
75 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
76 GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
77 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
80 if (INTEL_REVID(dev) <= SKL_REVID_D0) {
81 /* WaDisableHDCInvalidation:skl */
82 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
83 BDW_DISABLE_HDC_INVALIDATION);
85 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
86 I915_WRITE(FF_SLICE_CS_CHICKEN2,
87 I915_READ(FF_SLICE_CS_CHICKEN2) |
88 GEN9_TSG_BARRIER_ACK_DISABLE);
91 if (INTEL_REVID(dev) <= SKL_REVID_E0)
92 /* WaDisableLSQCROPERFforOCL:skl */
93 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
94 GEN8_LQSC_RO_PERF_DIS);
97 static void i915_pineview_get_mem_freq(struct drm_device *dev)
99 struct drm_i915_private *dev_priv = dev->dev_private;
102 tmp = I915_READ(CLKCFG);
104 switch (tmp & CLKCFG_FSB_MASK) {
106 dev_priv->fsb_freq = 533; /* 133*4 */
109 dev_priv->fsb_freq = 800; /* 200*4 */
112 dev_priv->fsb_freq = 667; /* 167*4 */
115 dev_priv->fsb_freq = 400; /* 100*4 */
119 switch (tmp & CLKCFG_MEM_MASK) {
121 dev_priv->mem_freq = 533;
124 dev_priv->mem_freq = 667;
127 dev_priv->mem_freq = 800;
131 /* detect pineview DDR3 setting */
132 tmp = I915_READ(CSHRDDR3CTL);
133 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
136 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
138 struct drm_i915_private *dev_priv = dev->dev_private;
141 ddrpll = I915_READ16(DDRMPLL1);
142 csipll = I915_READ16(CSIPLL0);
144 switch (ddrpll & 0xff) {
146 dev_priv->mem_freq = 800;
149 dev_priv->mem_freq = 1066;
152 dev_priv->mem_freq = 1333;
155 dev_priv->mem_freq = 1600;
158 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
160 dev_priv->mem_freq = 0;
164 dev_priv->ips.r_t = dev_priv->mem_freq;
166 switch (csipll & 0x3ff) {
168 dev_priv->fsb_freq = 3200;
171 dev_priv->fsb_freq = 3733;
174 dev_priv->fsb_freq = 4266;
177 dev_priv->fsb_freq = 4800;
180 dev_priv->fsb_freq = 5333;
183 dev_priv->fsb_freq = 5866;
186 dev_priv->fsb_freq = 6400;
189 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
191 dev_priv->fsb_freq = 0;
195 if (dev_priv->fsb_freq == 3200) {
196 dev_priv->ips.c_m = 0;
197 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
198 dev_priv->ips.c_m = 1;
200 dev_priv->ips.c_m = 2;
204 static const struct cxsr_latency cxsr_latency_table[] = {
205 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
206 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
207 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
208 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
209 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
211 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
212 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
213 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
214 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
215 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
217 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
218 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
219 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
220 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
221 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
223 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
224 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
225 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
226 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
227 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
229 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
230 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
231 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
232 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
233 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
235 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
236 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
237 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
238 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
239 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
242 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
247 const struct cxsr_latency *latency;
250 if (fsb == 0 || mem == 0)
253 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
254 latency = &cxsr_latency_table[i];
255 if (is_desktop == latency->is_desktop &&
256 is_ddr3 == latency->is_ddr3 &&
257 fsb == latency->fsb_freq && mem == latency->mem_freq)
261 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
266 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
268 struct drm_device *dev = dev_priv->dev;
271 if (IS_VALLEYVIEW(dev)) {
272 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
273 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
274 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
275 } else if (IS_PINEVIEW(dev)) {
276 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
277 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
278 I915_WRITE(DSPFW3, val);
279 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
280 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
281 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
282 I915_WRITE(FW_BLC_SELF, val);
283 } else if (IS_I915GM(dev)) {
284 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
285 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
286 I915_WRITE(INSTPM, val);
291 DRM_DEBUG_KMS("memory self-refresh is %s\n",
292 enable ? "enabled" : "disabled");
296 * Latency for FIFO fetches is dependent on several factors:
297 * - memory configuration (speed, channels)
299 * - current MCH state
300 * It can be fairly high in some situations, so here we assume a fairly
301 * pessimal value. It's a tradeoff between extra memory fetches (if we
302 * set this value too high, the FIFO will fetch frequently to stay full)
303 * and power consumption (set it too low to save power and we might see
304 * FIFO underruns and display "flicker").
306 * A value of 5us seems to be a good balance; safe for very low end
307 * platforms but not overly aggressive on lower latency configs.
309 static const int pessimal_latency_ns = 5000;
311 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
313 struct drm_i915_private *dev_priv = dev->dev_private;
314 uint32_t dsparb = I915_READ(DSPARB);
317 size = dsparb & 0x7f;
319 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
321 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
322 plane ? "B" : "A", size);
327 static int i830_get_fifo_size(struct drm_device *dev, int plane)
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 uint32_t dsparb = I915_READ(DSPARB);
333 size = dsparb & 0x1ff;
335 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
336 size >>= 1; /* Convert to cachelines */
338 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
339 plane ? "B" : "A", size);
344 static int i845_get_fifo_size(struct drm_device *dev, int plane)
346 struct drm_i915_private *dev_priv = dev->dev_private;
347 uint32_t dsparb = I915_READ(DSPARB);
350 size = dsparb & 0x7f;
351 size >>= 2; /* Convert to cachelines */
353 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
360 /* Pineview has different values for various configs */
361 static const struct intel_watermark_params pineview_display_wm = {
362 .fifo_size = PINEVIEW_DISPLAY_FIFO,
363 .max_wm = PINEVIEW_MAX_WM,
364 .default_wm = PINEVIEW_DFT_WM,
365 .guard_size = PINEVIEW_GUARD_WM,
366 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
368 static const struct intel_watermark_params pineview_display_hplloff_wm = {
369 .fifo_size = PINEVIEW_DISPLAY_FIFO,
370 .max_wm = PINEVIEW_MAX_WM,
371 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
372 .guard_size = PINEVIEW_GUARD_WM,
373 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
375 static const struct intel_watermark_params pineview_cursor_wm = {
376 .fifo_size = PINEVIEW_CURSOR_FIFO,
377 .max_wm = PINEVIEW_CURSOR_MAX_WM,
378 .default_wm = PINEVIEW_CURSOR_DFT_WM,
379 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
380 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
382 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
383 .fifo_size = PINEVIEW_CURSOR_FIFO,
384 .max_wm = PINEVIEW_CURSOR_MAX_WM,
385 .default_wm = PINEVIEW_CURSOR_DFT_WM,
386 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
387 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
389 static const struct intel_watermark_params g4x_wm_info = {
390 .fifo_size = G4X_FIFO_SIZE,
391 .max_wm = G4X_MAX_WM,
392 .default_wm = G4X_MAX_WM,
394 .cacheline_size = G4X_FIFO_LINE_SIZE,
396 static const struct intel_watermark_params g4x_cursor_wm_info = {
397 .fifo_size = I965_CURSOR_FIFO,
398 .max_wm = I965_CURSOR_MAX_WM,
399 .default_wm = I965_CURSOR_DFT_WM,
401 .cacheline_size = G4X_FIFO_LINE_SIZE,
403 static const struct intel_watermark_params valleyview_wm_info = {
404 .fifo_size = VALLEYVIEW_FIFO_SIZE,
405 .max_wm = VALLEYVIEW_MAX_WM,
406 .default_wm = VALLEYVIEW_MAX_WM,
408 .cacheline_size = G4X_FIFO_LINE_SIZE,
410 static const struct intel_watermark_params valleyview_cursor_wm_info = {
411 .fifo_size = I965_CURSOR_FIFO,
412 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
413 .default_wm = I965_CURSOR_DFT_WM,
415 .cacheline_size = G4X_FIFO_LINE_SIZE,
417 static const struct intel_watermark_params i965_cursor_wm_info = {
418 .fifo_size = I965_CURSOR_FIFO,
419 .max_wm = I965_CURSOR_MAX_WM,
420 .default_wm = I965_CURSOR_DFT_WM,
422 .cacheline_size = I915_FIFO_LINE_SIZE,
424 static const struct intel_watermark_params i945_wm_info = {
425 .fifo_size = I945_FIFO_SIZE,
426 .max_wm = I915_MAX_WM,
429 .cacheline_size = I915_FIFO_LINE_SIZE,
431 static const struct intel_watermark_params i915_wm_info = {
432 .fifo_size = I915_FIFO_SIZE,
433 .max_wm = I915_MAX_WM,
436 .cacheline_size = I915_FIFO_LINE_SIZE,
438 static const struct intel_watermark_params i830_a_wm_info = {
439 .fifo_size = I855GM_FIFO_SIZE,
440 .max_wm = I915_MAX_WM,
443 .cacheline_size = I830_FIFO_LINE_SIZE,
445 static const struct intel_watermark_params i830_bc_wm_info = {
446 .fifo_size = I855GM_FIFO_SIZE,
447 .max_wm = I915_MAX_WM/2,
450 .cacheline_size = I830_FIFO_LINE_SIZE,
452 static const struct intel_watermark_params i845_wm_info = {
453 .fifo_size = I830_FIFO_SIZE,
454 .max_wm = I915_MAX_WM,
457 .cacheline_size = I830_FIFO_LINE_SIZE,
461 * intel_calculate_wm - calculate watermark level
462 * @clock_in_khz: pixel clock
463 * @wm: chip FIFO params
464 * @pixel_size: display pixel size
465 * @latency_ns: memory latency for the platform
467 * Calculate the watermark level (the level at which the display plane will
468 * start fetching from memory again). Each chip has a different display
469 * FIFO size and allocation, so the caller needs to figure that out and pass
470 * in the correct intel_watermark_params structure.
472 * As the pixel clock runs, the FIFO will be drained at a rate that depends
473 * on the pixel size. When it reaches the watermark level, it'll start
474 * fetching FIFO line sized based chunks from memory until the FIFO fills
475 * past the watermark point. If the FIFO drains completely, a FIFO underrun
476 * will occur, and a display engine hang could result.
478 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
479 const struct intel_watermark_params *wm,
482 unsigned long latency_ns)
484 long entries_required, wm_size;
487 * Note: we need to make sure we don't overflow for various clock &
489 * clocks go from a few thousand to several hundred thousand.
490 * latency is usually a few thousand
492 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
494 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
496 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
498 wm_size = fifo_size - (entries_required + wm->guard_size);
500 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
502 /* Don't promote wm_size to unsigned... */
503 if (wm_size > (long)wm->max_wm)
504 wm_size = wm->max_wm;
506 wm_size = wm->default_wm;
509 * Bspec seems to indicate that the value shouldn't be lower than
510 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
511 * Lets go for 8 which is the burst size since certain platforms
512 * already use a hardcoded 8 (which is what the spec says should be
521 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
523 struct drm_crtc *crtc, *enabled = NULL;
525 for_each_crtc(dev, crtc) {
526 if (intel_crtc_active(crtc)) {
536 static void pineview_update_wm(struct drm_crtc *unused_crtc)
538 struct drm_device *dev = unused_crtc->dev;
539 struct drm_i915_private *dev_priv = dev->dev_private;
540 struct drm_crtc *crtc;
541 const struct cxsr_latency *latency;
545 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
546 dev_priv->fsb_freq, dev_priv->mem_freq);
548 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
549 intel_set_memory_cxsr(dev_priv, false);
553 crtc = single_enabled_crtc(dev);
555 const struct drm_display_mode *adjusted_mode;
556 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
559 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
560 clock = adjusted_mode->crtc_clock;
563 wm = intel_calculate_wm(clock, &pineview_display_wm,
564 pineview_display_wm.fifo_size,
565 pixel_size, latency->display_sr);
566 reg = I915_READ(DSPFW1);
567 reg &= ~DSPFW_SR_MASK;
568 reg |= wm << DSPFW_SR_SHIFT;
569 I915_WRITE(DSPFW1, reg);
570 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
573 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
574 pineview_display_wm.fifo_size,
575 pixel_size, latency->cursor_sr);
576 reg = I915_READ(DSPFW3);
577 reg &= ~DSPFW_CURSOR_SR_MASK;
578 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
579 I915_WRITE(DSPFW3, reg);
581 /* Display HPLL off SR */
582 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
583 pineview_display_hplloff_wm.fifo_size,
584 pixel_size, latency->display_hpll_disable);
585 reg = I915_READ(DSPFW3);
586 reg &= ~DSPFW_HPLL_SR_MASK;
587 reg |= wm & DSPFW_HPLL_SR_MASK;
588 I915_WRITE(DSPFW3, reg);
590 /* cursor HPLL off SR */
591 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
592 pineview_display_hplloff_wm.fifo_size,
593 pixel_size, latency->cursor_hpll_disable);
594 reg = I915_READ(DSPFW3);
595 reg &= ~DSPFW_HPLL_CURSOR_MASK;
596 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
597 I915_WRITE(DSPFW3, reg);
598 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
600 intel_set_memory_cxsr(dev_priv, true);
602 intel_set_memory_cxsr(dev_priv, false);
606 static bool g4x_compute_wm0(struct drm_device *dev,
608 const struct intel_watermark_params *display,
609 int display_latency_ns,
610 const struct intel_watermark_params *cursor,
611 int cursor_latency_ns,
615 struct drm_crtc *crtc;
616 const struct drm_display_mode *adjusted_mode;
617 int htotal, hdisplay, clock, pixel_size;
618 int line_time_us, line_count;
619 int entries, tlb_miss;
621 crtc = intel_get_crtc_for_plane(dev, plane);
622 if (!intel_crtc_active(crtc)) {
623 *cursor_wm = cursor->guard_size;
624 *plane_wm = display->guard_size;
628 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
629 clock = adjusted_mode->crtc_clock;
630 htotal = adjusted_mode->crtc_htotal;
631 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
632 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
634 /* Use the small buffer method to calculate plane watermark */
635 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
636 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
639 entries = DIV_ROUND_UP(entries, display->cacheline_size);
640 *plane_wm = entries + display->guard_size;
641 if (*plane_wm > (int)display->max_wm)
642 *plane_wm = display->max_wm;
644 /* Use the large buffer method to calculate cursor watermark */
645 line_time_us = max(htotal * 1000 / clock, 1);
646 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
647 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
648 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
651 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
652 *cursor_wm = entries + cursor->guard_size;
653 if (*cursor_wm > (int)cursor->max_wm)
654 *cursor_wm = (int)cursor->max_wm;
660 * Check the wm result.
662 * If any calculated watermark values is larger than the maximum value that
663 * can be programmed into the associated watermark register, that watermark
666 static bool g4x_check_srwm(struct drm_device *dev,
667 int display_wm, int cursor_wm,
668 const struct intel_watermark_params *display,
669 const struct intel_watermark_params *cursor)
671 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
672 display_wm, cursor_wm);
674 if (display_wm > display->max_wm) {
675 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
676 display_wm, display->max_wm);
680 if (cursor_wm > cursor->max_wm) {
681 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
682 cursor_wm, cursor->max_wm);
686 if (!(display_wm || cursor_wm)) {
687 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
694 static bool g4x_compute_srwm(struct drm_device *dev,
697 const struct intel_watermark_params *display,
698 const struct intel_watermark_params *cursor,
699 int *display_wm, int *cursor_wm)
701 struct drm_crtc *crtc;
702 const struct drm_display_mode *adjusted_mode;
703 int hdisplay, htotal, pixel_size, clock;
704 unsigned long line_time_us;
705 int line_count, line_size;
710 *display_wm = *cursor_wm = 0;
714 crtc = intel_get_crtc_for_plane(dev, plane);
715 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
716 clock = adjusted_mode->crtc_clock;
717 htotal = adjusted_mode->crtc_htotal;
718 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
719 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
721 line_time_us = max(htotal * 1000 / clock, 1);
722 line_count = (latency_ns / line_time_us + 1000) / 1000;
723 line_size = hdisplay * pixel_size;
725 /* Use the minimum of the small and large buffer method for primary */
726 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
727 large = line_count * line_size;
729 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
730 *display_wm = entries + display->guard_size;
732 /* calculate the self-refresh watermark for display cursor */
733 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
734 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
735 *cursor_wm = entries + cursor->guard_size;
737 return g4x_check_srwm(dev,
738 *display_wm, *cursor_wm,
742 static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
747 struct drm_device *dev = crtc->dev;
749 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
751 if (WARN(clock == 0, "Pixel clock is zero!\n"))
754 if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
757 entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
758 if (IS_CHERRYVIEW(dev))
759 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
760 DRAIN_LATENCY_PRECISION_16;
762 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
763 DRAIN_LATENCY_PRECISION_32;
764 *drain_latency = (64 * (*prec_mult) * 4) / entries;
766 if (*drain_latency > DRAIN_LATENCY_MASK)
767 *drain_latency = DRAIN_LATENCY_MASK;
773 * Update drain latency registers of memory arbiter
775 * Valleyview SoC has a new memory arbiter and needs drain latency registers
776 * to be programmed. Each plane has a drain latency multiplier and a drain
780 static void vlv_update_drain_latency(struct drm_crtc *crtc)
782 struct drm_device *dev = crtc->dev;
783 struct drm_i915_private *dev_priv = dev->dev_private;
784 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
787 enum pipe pipe = intel_crtc->pipe;
788 int plane_prec, prec_mult, plane_dl;
789 const int high_precision = IS_CHERRYVIEW(dev) ?
790 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
792 plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
793 DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
794 (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
796 if (!intel_crtc_active(crtc)) {
797 I915_WRITE(VLV_DDL(pipe), plane_dl);
801 /* Primary plane Drain Latency */
802 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
803 if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
804 plane_prec = (prec_mult == high_precision) ?
805 DDL_PLANE_PRECISION_HIGH :
806 DDL_PLANE_PRECISION_LOW;
807 plane_dl |= plane_prec | drain_latency;
810 /* Cursor Drain Latency
811 * BPP is always 4 for cursor
815 /* Program cursor DL only if it is enabled */
816 if (intel_crtc->cursor_base &&
817 vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
818 plane_prec = (prec_mult == high_precision) ?
819 DDL_CURSOR_PRECISION_HIGH :
820 DDL_CURSOR_PRECISION_LOW;
821 plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
824 I915_WRITE(VLV_DDL(pipe), plane_dl);
827 #define single_plane_enabled(mask) is_power_of_2(mask)
829 static void valleyview_update_wm(struct drm_crtc *crtc)
831 struct drm_device *dev = crtc->dev;
832 static const int sr_latency_ns = 12000;
833 struct drm_i915_private *dev_priv = dev->dev_private;
834 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
835 int plane_sr, cursor_sr;
836 int ignore_plane_sr, ignore_cursor_sr;
837 unsigned int enabled = 0;
840 vlv_update_drain_latency(crtc);
842 if (g4x_compute_wm0(dev, PIPE_A,
843 &valleyview_wm_info, pessimal_latency_ns,
844 &valleyview_cursor_wm_info, pessimal_latency_ns,
845 &planea_wm, &cursora_wm))
846 enabled |= 1 << PIPE_A;
848 if (g4x_compute_wm0(dev, PIPE_B,
849 &valleyview_wm_info, pessimal_latency_ns,
850 &valleyview_cursor_wm_info, pessimal_latency_ns,
851 &planeb_wm, &cursorb_wm))
852 enabled |= 1 << PIPE_B;
854 if (single_plane_enabled(enabled) &&
855 g4x_compute_srwm(dev, ffs(enabled) - 1,
858 &valleyview_cursor_wm_info,
859 &plane_sr, &ignore_cursor_sr) &&
860 g4x_compute_srwm(dev, ffs(enabled) - 1,
863 &valleyview_cursor_wm_info,
864 &ignore_plane_sr, &cursor_sr)) {
867 cxsr_enabled = false;
868 intel_set_memory_cxsr(dev_priv, false);
869 plane_sr = cursor_sr = 0;
872 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
873 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
874 planea_wm, cursora_wm,
875 planeb_wm, cursorb_wm,
876 plane_sr, cursor_sr);
879 (plane_sr << DSPFW_SR_SHIFT) |
880 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
881 (planeb_wm << DSPFW_PLANEB_SHIFT) |
882 (planea_wm << DSPFW_PLANEA_SHIFT));
884 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
885 (cursora_wm << DSPFW_CURSORA_SHIFT));
887 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
888 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
891 intel_set_memory_cxsr(dev_priv, true);
894 static void cherryview_update_wm(struct drm_crtc *crtc)
896 struct drm_device *dev = crtc->dev;
897 static const int sr_latency_ns = 12000;
898 struct drm_i915_private *dev_priv = dev->dev_private;
899 int planea_wm, planeb_wm, planec_wm;
900 int cursora_wm, cursorb_wm, cursorc_wm;
901 int plane_sr, cursor_sr;
902 int ignore_plane_sr, ignore_cursor_sr;
903 unsigned int enabled = 0;
906 vlv_update_drain_latency(crtc);
908 if (g4x_compute_wm0(dev, PIPE_A,
909 &valleyview_wm_info, pessimal_latency_ns,
910 &valleyview_cursor_wm_info, pessimal_latency_ns,
911 &planea_wm, &cursora_wm))
912 enabled |= 1 << PIPE_A;
914 if (g4x_compute_wm0(dev, PIPE_B,
915 &valleyview_wm_info, pessimal_latency_ns,
916 &valleyview_cursor_wm_info, pessimal_latency_ns,
917 &planeb_wm, &cursorb_wm))
918 enabled |= 1 << PIPE_B;
920 if (g4x_compute_wm0(dev, PIPE_C,
921 &valleyview_wm_info, pessimal_latency_ns,
922 &valleyview_cursor_wm_info, pessimal_latency_ns,
923 &planec_wm, &cursorc_wm))
924 enabled |= 1 << PIPE_C;
926 if (single_plane_enabled(enabled) &&
927 g4x_compute_srwm(dev, ffs(enabled) - 1,
930 &valleyview_cursor_wm_info,
931 &plane_sr, &ignore_cursor_sr) &&
932 g4x_compute_srwm(dev, ffs(enabled) - 1,
935 &valleyview_cursor_wm_info,
936 &ignore_plane_sr, &cursor_sr)) {
939 cxsr_enabled = false;
940 intel_set_memory_cxsr(dev_priv, false);
941 plane_sr = cursor_sr = 0;
944 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
945 "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
946 "SR: plane=%d, cursor=%d\n",
947 planea_wm, cursora_wm,
948 planeb_wm, cursorb_wm,
949 planec_wm, cursorc_wm,
950 plane_sr, cursor_sr);
953 (plane_sr << DSPFW_SR_SHIFT) |
954 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
955 (planeb_wm << DSPFW_PLANEB_SHIFT) |
956 (planea_wm << DSPFW_PLANEA_SHIFT));
958 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
959 (cursora_wm << DSPFW_CURSORA_SHIFT));
961 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
962 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
963 I915_WRITE(DSPFW9_CHV,
964 (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
965 DSPFW_CURSORC_MASK)) |
966 (planec_wm << DSPFW_PLANEC_SHIFT) |
967 (cursorc_wm << DSPFW_CURSORC_SHIFT));
970 intel_set_memory_cxsr(dev_priv, true);
973 static void valleyview_update_sprite_wm(struct drm_plane *plane,
974 struct drm_crtc *crtc,
975 uint32_t sprite_width,
976 uint32_t sprite_height,
978 bool enabled, bool scaled)
980 struct drm_device *dev = crtc->dev;
981 struct drm_i915_private *dev_priv = dev->dev_private;
982 int pipe = to_intel_plane(plane)->pipe;
983 int sprite = to_intel_plane(plane)->plane;
988 const int high_precision = IS_CHERRYVIEW(dev) ?
989 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
991 sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
992 (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
994 if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
996 plane_prec = (prec_mult == high_precision) ?
997 DDL_SPRITE_PRECISION_HIGH(sprite) :
998 DDL_SPRITE_PRECISION_LOW(sprite);
999 sprite_dl |= plane_prec |
1000 (drain_latency << DDL_SPRITE_SHIFT(sprite));
1003 I915_WRITE(VLV_DDL(pipe), sprite_dl);
1006 static void g4x_update_wm(struct drm_crtc *crtc)
1008 struct drm_device *dev = crtc->dev;
1009 static const int sr_latency_ns = 12000;
1010 struct drm_i915_private *dev_priv = dev->dev_private;
1011 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1012 int plane_sr, cursor_sr;
1013 unsigned int enabled = 0;
1016 if (g4x_compute_wm0(dev, PIPE_A,
1017 &g4x_wm_info, pessimal_latency_ns,
1018 &g4x_cursor_wm_info, pessimal_latency_ns,
1019 &planea_wm, &cursora_wm))
1020 enabled |= 1 << PIPE_A;
1022 if (g4x_compute_wm0(dev, PIPE_B,
1023 &g4x_wm_info, pessimal_latency_ns,
1024 &g4x_cursor_wm_info, pessimal_latency_ns,
1025 &planeb_wm, &cursorb_wm))
1026 enabled |= 1 << PIPE_B;
1028 if (single_plane_enabled(enabled) &&
1029 g4x_compute_srwm(dev, ffs(enabled) - 1,
1032 &g4x_cursor_wm_info,
1033 &plane_sr, &cursor_sr)) {
1034 cxsr_enabled = true;
1036 cxsr_enabled = false;
1037 intel_set_memory_cxsr(dev_priv, false);
1038 plane_sr = cursor_sr = 0;
1041 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1042 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1043 planea_wm, cursora_wm,
1044 planeb_wm, cursorb_wm,
1045 plane_sr, cursor_sr);
1048 (plane_sr << DSPFW_SR_SHIFT) |
1049 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1050 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1051 (planea_wm << DSPFW_PLANEA_SHIFT));
1053 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1054 (cursora_wm << DSPFW_CURSORA_SHIFT));
1055 /* HPLL off in SR has some issues on G4x... disable it */
1057 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1058 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1061 intel_set_memory_cxsr(dev_priv, true);
1064 static void i965_update_wm(struct drm_crtc *unused_crtc)
1066 struct drm_device *dev = unused_crtc->dev;
1067 struct drm_i915_private *dev_priv = dev->dev_private;
1068 struct drm_crtc *crtc;
1073 /* Calc sr entries for one plane configs */
1074 crtc = single_enabled_crtc(dev);
1076 /* self-refresh has much higher latency */
1077 static const int sr_latency_ns = 12000;
1078 const struct drm_display_mode *adjusted_mode =
1079 &to_intel_crtc(crtc)->config->base.adjusted_mode;
1080 int clock = adjusted_mode->crtc_clock;
1081 int htotal = adjusted_mode->crtc_htotal;
1082 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
1083 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1084 unsigned long line_time_us;
1087 line_time_us = max(htotal * 1000 / clock, 1);
1089 /* Use ns/us then divide to preserve precision */
1090 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1091 pixel_size * hdisplay;
1092 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1093 srwm = I965_FIFO_SIZE - entries;
1097 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1100 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1101 pixel_size * to_intel_crtc(crtc)->cursor_width;
1102 entries = DIV_ROUND_UP(entries,
1103 i965_cursor_wm_info.cacheline_size);
1104 cursor_sr = i965_cursor_wm_info.fifo_size -
1105 (entries + i965_cursor_wm_info.guard_size);
1107 if (cursor_sr > i965_cursor_wm_info.max_wm)
1108 cursor_sr = i965_cursor_wm_info.max_wm;
1110 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1111 "cursor %d\n", srwm, cursor_sr);
1113 cxsr_enabled = true;
1115 cxsr_enabled = false;
1116 /* Turn off self refresh if both pipes are enabled */
1117 intel_set_memory_cxsr(dev_priv, false);
1120 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1123 /* 965 has limitations... */
1124 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1125 (8 << DSPFW_CURSORB_SHIFT) |
1126 (8 << DSPFW_PLANEB_SHIFT) |
1127 (8 << DSPFW_PLANEA_SHIFT));
1128 I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
1129 (8 << DSPFW_PLANEC_SHIFT_OLD));
1130 /* update cursor SR watermark */
1131 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1134 intel_set_memory_cxsr(dev_priv, true);
1137 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1139 struct drm_device *dev = unused_crtc->dev;
1140 struct drm_i915_private *dev_priv = dev->dev_private;
1141 const struct intel_watermark_params *wm_info;
1146 int planea_wm, planeb_wm;
1147 struct drm_crtc *crtc, *enabled = NULL;
1150 wm_info = &i945_wm_info;
1151 else if (!IS_GEN2(dev))
1152 wm_info = &i915_wm_info;
1154 wm_info = &i830_a_wm_info;
1156 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1157 crtc = intel_get_crtc_for_plane(dev, 0);
1158 if (intel_crtc_active(crtc)) {
1159 const struct drm_display_mode *adjusted_mode;
1160 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1164 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1165 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1166 wm_info, fifo_size, cpp,
1167 pessimal_latency_ns);
1170 planea_wm = fifo_size - wm_info->guard_size;
1171 if (planea_wm > (long)wm_info->max_wm)
1172 planea_wm = wm_info->max_wm;
1176 wm_info = &i830_bc_wm_info;
1178 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1179 crtc = intel_get_crtc_for_plane(dev, 1);
1180 if (intel_crtc_active(crtc)) {
1181 const struct drm_display_mode *adjusted_mode;
1182 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1186 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1187 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1188 wm_info, fifo_size, cpp,
1189 pessimal_latency_ns);
1190 if (enabled == NULL)
1195 planeb_wm = fifo_size - wm_info->guard_size;
1196 if (planeb_wm > (long)wm_info->max_wm)
1197 planeb_wm = wm_info->max_wm;
1200 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1202 if (IS_I915GM(dev) && enabled) {
1203 struct drm_i915_gem_object *obj;
1205 obj = intel_fb_obj(enabled->primary->fb);
1207 /* self-refresh seems busted with untiled */
1208 if (obj->tiling_mode == I915_TILING_NONE)
1213 * Overlay gets an aggressive default since video jitter is bad.
1217 /* Play safe and disable self-refresh before adjusting watermarks. */
1218 intel_set_memory_cxsr(dev_priv, false);
1220 /* Calc sr entries for one plane configs */
1221 if (HAS_FW_BLC(dev) && enabled) {
1222 /* self-refresh has much higher latency */
1223 static const int sr_latency_ns = 6000;
1224 const struct drm_display_mode *adjusted_mode =
1225 &to_intel_crtc(enabled)->config->base.adjusted_mode;
1226 int clock = adjusted_mode->crtc_clock;
1227 int htotal = adjusted_mode->crtc_htotal;
1228 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
1229 int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
1230 unsigned long line_time_us;
1233 line_time_us = max(htotal * 1000 / clock, 1);
1235 /* Use ns/us then divide to preserve precision */
1236 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1237 pixel_size * hdisplay;
1238 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1239 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1240 srwm = wm_info->fifo_size - entries;
1244 if (IS_I945G(dev) || IS_I945GM(dev))
1245 I915_WRITE(FW_BLC_SELF,
1246 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1247 else if (IS_I915GM(dev))
1248 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1251 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1252 planea_wm, planeb_wm, cwm, srwm);
1254 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1255 fwater_hi = (cwm & 0x1f);
1257 /* Set request length to 8 cachelines per fetch */
1258 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1259 fwater_hi = fwater_hi | (1 << 8);
1261 I915_WRITE(FW_BLC, fwater_lo);
1262 I915_WRITE(FW_BLC2, fwater_hi);
1265 intel_set_memory_cxsr(dev_priv, true);
1268 static void i845_update_wm(struct drm_crtc *unused_crtc)
1270 struct drm_device *dev = unused_crtc->dev;
1271 struct drm_i915_private *dev_priv = dev->dev_private;
1272 struct drm_crtc *crtc;
1273 const struct drm_display_mode *adjusted_mode;
1277 crtc = single_enabled_crtc(dev);
1281 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1282 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1284 dev_priv->display.get_fifo_size(dev, 0),
1285 4, pessimal_latency_ns);
1286 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1287 fwater_lo |= (3<<8) | planea_wm;
1289 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1291 I915_WRITE(FW_BLC, fwater_lo);
1294 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1295 struct drm_crtc *crtc)
1297 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1298 uint32_t pixel_rate;
1300 pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock;
1302 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1303 * adjust the pixel_rate here. */
1305 if (intel_crtc->config->pch_pfit.enabled) {
1306 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1307 uint32_t pfit_size = intel_crtc->config->pch_pfit.size;
1309 pipe_w = intel_crtc->config->pipe_src_w;
1310 pipe_h = intel_crtc->config->pipe_src_h;
1311 pfit_w = (pfit_size >> 16) & 0xFFFF;
1312 pfit_h = pfit_size & 0xFFFF;
1313 if (pipe_w < pfit_w)
1315 if (pipe_h < pfit_h)
1318 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1325 /* latency must be in 0.1us units. */
1326 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1331 if (WARN(latency == 0, "Latency value missing\n"))
1334 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1335 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1340 /* latency must be in 0.1us units. */
1341 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1342 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1347 if (WARN(latency == 0, "Latency value missing\n"))
1350 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1351 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1352 ret = DIV_ROUND_UP(ret, 64) + 2;
1356 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1357 uint8_t bytes_per_pixel)
1359 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1362 struct skl_pipe_wm_parameters {
1364 uint32_t pipe_htotal;
1365 uint32_t pixel_rate; /* in KHz */
1366 struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
1367 struct intel_plane_wm_parameters cursor;
1370 struct ilk_pipe_wm_parameters {
1372 uint32_t pipe_htotal;
1373 uint32_t pixel_rate;
1374 struct intel_plane_wm_parameters pri;
1375 struct intel_plane_wm_parameters spr;
1376 struct intel_plane_wm_parameters cur;
1379 struct ilk_wm_maximums {
1386 /* used in computing the new watermarks state */
1387 struct intel_wm_config {
1388 unsigned int num_pipes_active;
1389 bool sprites_enabled;
1390 bool sprites_scaled;
1394 * For both WM_PIPE and WM_LP.
1395 * mem_value must be in 0.1us units.
1397 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1401 uint32_t method1, method2;
1403 if (!params->active || !params->pri.enabled)
1406 method1 = ilk_wm_method1(params->pixel_rate,
1407 params->pri.bytes_per_pixel,
1413 method2 = ilk_wm_method2(params->pixel_rate,
1414 params->pipe_htotal,
1415 params->pri.horiz_pixels,
1416 params->pri.bytes_per_pixel,
1419 return min(method1, method2);
1423 * For both WM_PIPE and WM_LP.
1424 * mem_value must be in 0.1us units.
1426 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1429 uint32_t method1, method2;
1431 if (!params->active || !params->spr.enabled)
1434 method1 = ilk_wm_method1(params->pixel_rate,
1435 params->spr.bytes_per_pixel,
1437 method2 = ilk_wm_method2(params->pixel_rate,
1438 params->pipe_htotal,
1439 params->spr.horiz_pixels,
1440 params->spr.bytes_per_pixel,
1442 return min(method1, method2);
1446 * For both WM_PIPE and WM_LP.
1447 * mem_value must be in 0.1us units.
1449 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
1452 if (!params->active || !params->cur.enabled)
1455 return ilk_wm_method2(params->pixel_rate,
1456 params->pipe_htotal,
1457 params->cur.horiz_pixels,
1458 params->cur.bytes_per_pixel,
1462 /* Only for WM_LP. */
1463 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
1466 if (!params->active || !params->pri.enabled)
1469 return ilk_wm_fbc(pri_val,
1470 params->pri.horiz_pixels,
1471 params->pri.bytes_per_pixel);
1474 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1476 if (INTEL_INFO(dev)->gen >= 8)
1478 else if (INTEL_INFO(dev)->gen >= 7)
1484 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1485 int level, bool is_sprite)
1487 if (INTEL_INFO(dev)->gen >= 8)
1488 /* BDW primary/sprite plane watermarks */
1489 return level == 0 ? 255 : 2047;
1490 else if (INTEL_INFO(dev)->gen >= 7)
1491 /* IVB/HSW primary/sprite plane watermarks */
1492 return level == 0 ? 127 : 1023;
1493 else if (!is_sprite)
1494 /* ILK/SNB primary plane watermarks */
1495 return level == 0 ? 127 : 511;
1497 /* ILK/SNB sprite plane watermarks */
1498 return level == 0 ? 63 : 255;
1501 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1504 if (INTEL_INFO(dev)->gen >= 7)
1505 return level == 0 ? 63 : 255;
1507 return level == 0 ? 31 : 63;
1510 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1512 if (INTEL_INFO(dev)->gen >= 8)
1518 /* Calculate the maximum primary/sprite plane watermark */
1519 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1521 const struct intel_wm_config *config,
1522 enum intel_ddb_partitioning ddb_partitioning,
1525 unsigned int fifo_size = ilk_display_fifo_size(dev);
1527 /* if sprites aren't enabled, sprites get nothing */
1528 if (is_sprite && !config->sprites_enabled)
1531 /* HSW allows LP1+ watermarks even with multiple pipes */
1532 if (level == 0 || config->num_pipes_active > 1) {
1533 fifo_size /= INTEL_INFO(dev)->num_pipes;
1536 * For some reason the non self refresh
1537 * FIFO size is only half of the self
1538 * refresh FIFO size on ILK/SNB.
1540 if (INTEL_INFO(dev)->gen <= 6)
1544 if (config->sprites_enabled) {
1545 /* level 0 is always calculated with 1:1 split */
1546 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1555 /* clamp to max that the registers can hold */
1556 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1559 /* Calculate the maximum cursor plane watermark */
1560 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1562 const struct intel_wm_config *config)
1564 /* HSW LP1+ watermarks w/ multiple pipes */
1565 if (level > 0 && config->num_pipes_active > 1)
1568 /* otherwise just report max that registers can hold */
1569 return ilk_cursor_wm_reg_max(dev, level);
1572 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1574 const struct intel_wm_config *config,
1575 enum intel_ddb_partitioning ddb_partitioning,
1576 struct ilk_wm_maximums *max)
1578 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1579 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1580 max->cur = ilk_cursor_wm_max(dev, level, config);
1581 max->fbc = ilk_fbc_wm_reg_max(dev);
1584 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1586 struct ilk_wm_maximums *max)
1588 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1589 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1590 max->cur = ilk_cursor_wm_reg_max(dev, level);
1591 max->fbc = ilk_fbc_wm_reg_max(dev);
1594 static bool ilk_validate_wm_level(int level,
1595 const struct ilk_wm_maximums *max,
1596 struct intel_wm_level *result)
1600 /* already determined to be invalid? */
1601 if (!result->enable)
1604 result->enable = result->pri_val <= max->pri &&
1605 result->spr_val <= max->spr &&
1606 result->cur_val <= max->cur;
1608 ret = result->enable;
1611 * HACK until we can pre-compute everything,
1612 * and thus fail gracefully if LP0 watermarks
1615 if (level == 0 && !result->enable) {
1616 if (result->pri_val > max->pri)
1617 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1618 level, result->pri_val, max->pri);
1619 if (result->spr_val > max->spr)
1620 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1621 level, result->spr_val, max->spr);
1622 if (result->cur_val > max->cur)
1623 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1624 level, result->cur_val, max->cur);
1626 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
1627 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
1628 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
1629 result->enable = true;
1635 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1637 const struct ilk_pipe_wm_parameters *p,
1638 struct intel_wm_level *result)
1640 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
1641 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
1642 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
1644 /* WM1+ latency values stored in 0.5us units */
1651 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
1652 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
1653 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
1654 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
1655 result->enable = true;
1659 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
1661 struct drm_i915_private *dev_priv = dev->dev_private;
1662 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1663 struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
1664 u32 linetime, ips_linetime;
1666 if (!intel_crtc_active(crtc))
1669 /* The WM are computed with base on how long it takes to fill a single
1670 * row at the given clock rate, multiplied by 8.
1672 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
1674 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
1675 intel_ddi_get_cdclk_freq(dev_priv));
1677 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
1678 PIPE_WM_LINETIME_TIME(linetime);
1681 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
1683 struct drm_i915_private *dev_priv = dev->dev_private;
1688 int level, max_level = ilk_wm_max_level(dev);
1690 /* read the first set of memory latencies[0:3] */
1691 val = 0; /* data0 to be programmed to 0 for first set */
1692 mutex_lock(&dev_priv->rps.hw_lock);
1693 ret = sandybridge_pcode_read(dev_priv,
1694 GEN9_PCODE_READ_MEM_LATENCY,
1696 mutex_unlock(&dev_priv->rps.hw_lock);
1699 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
1703 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
1704 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
1705 GEN9_MEM_LATENCY_LEVEL_MASK;
1706 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
1707 GEN9_MEM_LATENCY_LEVEL_MASK;
1708 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
1709 GEN9_MEM_LATENCY_LEVEL_MASK;
1711 /* read the second set of memory latencies[4:7] */
1712 val = 1; /* data0 to be programmed to 1 for second set */
1713 mutex_lock(&dev_priv->rps.hw_lock);
1714 ret = sandybridge_pcode_read(dev_priv,
1715 GEN9_PCODE_READ_MEM_LATENCY,
1717 mutex_unlock(&dev_priv->rps.hw_lock);
1719 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
1723 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
1724 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
1725 GEN9_MEM_LATENCY_LEVEL_MASK;
1726 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
1727 GEN9_MEM_LATENCY_LEVEL_MASK;
1728 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
1729 GEN9_MEM_LATENCY_LEVEL_MASK;
1732 * WaWmMemoryReadLatency:skl
1734 * punit doesn't take into account the read latency so we need
1735 * to add 2us to the various latency levels we retrieve from
1737 * - W0 is a bit special in that it's the only level that
1738 * can't be disabled if we want to have display working, so
1739 * we always add 2us there.
1740 * - For levels >=1, punit returns 0us latency when they are
1741 * disabled, so we respect that and don't add 2us then
1743 * Additionally, if a level n (n > 1) has a 0us latency, all
1744 * levels m (m >= n) need to be disabled. We make sure to
1745 * sanitize the values out of the punit to satisfy this
1749 for (level = 1; level <= max_level; level++)
1753 for (i = level + 1; i <= max_level; i++)
1758 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1759 uint64_t sskpd = I915_READ64(MCH_SSKPD);
1761 wm[0] = (sskpd >> 56) & 0xFF;
1763 wm[0] = sskpd & 0xF;
1764 wm[1] = (sskpd >> 4) & 0xFF;
1765 wm[2] = (sskpd >> 12) & 0xFF;
1766 wm[3] = (sskpd >> 20) & 0x1FF;
1767 wm[4] = (sskpd >> 32) & 0x1FF;
1768 } else if (INTEL_INFO(dev)->gen >= 6) {
1769 uint32_t sskpd = I915_READ(MCH_SSKPD);
1771 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
1772 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
1773 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
1774 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
1775 } else if (INTEL_INFO(dev)->gen >= 5) {
1776 uint32_t mltr = I915_READ(MLTR_ILK);
1778 /* ILK primary LP0 latency is 700 ns */
1780 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
1781 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
1785 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
1787 /* ILK sprite LP0 latency is 1300 ns */
1788 if (INTEL_INFO(dev)->gen == 5)
1792 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
1794 /* ILK cursor LP0 latency is 1300 ns */
1795 if (INTEL_INFO(dev)->gen == 5)
1798 /* WaDoubleCursorLP3Latency:ivb */
1799 if (IS_IVYBRIDGE(dev))
1803 int ilk_wm_max_level(const struct drm_device *dev)
1805 /* how many WM levels are we expecting */
1808 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1810 else if (INTEL_INFO(dev)->gen >= 6)
1816 static void intel_print_wm_latency(struct drm_device *dev,
1818 const uint16_t wm[8])
1820 int level, max_level = ilk_wm_max_level(dev);
1822 for (level = 0; level <= max_level; level++) {
1823 unsigned int latency = wm[level];
1826 DRM_ERROR("%s WM%d latency not provided\n",
1832 * - latencies are in us on gen9.
1833 * - before then, WM1+ latency values are in 0.5us units
1840 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
1841 name, level, wm[level],
1842 latency / 10, latency % 10);
1846 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
1847 uint16_t wm[5], uint16_t min)
1849 int level, max_level = ilk_wm_max_level(dev_priv->dev);
1854 wm[0] = max(wm[0], min);
1855 for (level = 1; level <= max_level; level++)
1856 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
1861 static void snb_wm_latency_quirk(struct drm_device *dev)
1863 struct drm_i915_private *dev_priv = dev->dev_private;
1867 * The BIOS provided WM memory latency values are often
1868 * inadequate for high resolution displays. Adjust them.
1870 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
1871 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
1872 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
1877 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
1878 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
1879 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
1880 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
1883 static void ilk_setup_wm_latency(struct drm_device *dev)
1885 struct drm_i915_private *dev_priv = dev->dev_private;
1887 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
1889 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
1890 sizeof(dev_priv->wm.pri_latency));
1891 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
1892 sizeof(dev_priv->wm.pri_latency));
1894 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
1895 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
1897 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
1898 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
1899 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
1902 snb_wm_latency_quirk(dev);
1905 static void skl_setup_wm_latency(struct drm_device *dev)
1907 struct drm_i915_private *dev_priv = dev->dev_private;
1909 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
1910 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
1913 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
1914 struct ilk_pipe_wm_parameters *p)
1916 struct drm_device *dev = crtc->dev;
1917 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1918 enum pipe pipe = intel_crtc->pipe;
1919 struct drm_plane *plane;
1921 if (!intel_crtc_active(crtc))
1925 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
1926 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
1927 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
1928 p->cur.bytes_per_pixel = 4;
1929 p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
1930 p->cur.horiz_pixels = intel_crtc->cursor_width;
1931 /* TODO: for now, assume primary and cursor planes are always enabled. */
1932 p->pri.enabled = true;
1933 p->cur.enabled = true;
1935 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
1936 struct intel_plane *intel_plane = to_intel_plane(plane);
1938 if (intel_plane->pipe == pipe) {
1939 p->spr = intel_plane->wm;
1945 static void ilk_compute_wm_config(struct drm_device *dev,
1946 struct intel_wm_config *config)
1948 struct intel_crtc *intel_crtc;
1950 /* Compute the currently _active_ config */
1951 for_each_intel_crtc(dev, intel_crtc) {
1952 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
1954 if (!wm->pipe_enabled)
1957 config->sprites_enabled |= wm->sprites_enabled;
1958 config->sprites_scaled |= wm->sprites_scaled;
1959 config->num_pipes_active++;
1963 /* Compute new watermarks for the pipe */
1964 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
1965 const struct ilk_pipe_wm_parameters *params,
1966 struct intel_pipe_wm *pipe_wm)
1968 struct drm_device *dev = crtc->dev;
1969 const struct drm_i915_private *dev_priv = dev->dev_private;
1970 int level, max_level = ilk_wm_max_level(dev);
1971 /* LP0 watermark maximums depend on this pipe alone */
1972 struct intel_wm_config config = {
1973 .num_pipes_active = 1,
1974 .sprites_enabled = params->spr.enabled,
1975 .sprites_scaled = params->spr.scaled,
1977 struct ilk_wm_maximums max;
1979 pipe_wm->pipe_enabled = params->active;
1980 pipe_wm->sprites_enabled = params->spr.enabled;
1981 pipe_wm->sprites_scaled = params->spr.scaled;
1983 /* ILK/SNB: LP2+ watermarks only w/o sprites */
1984 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
1987 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
1988 if (params->spr.scaled)
1991 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
1993 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1994 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
1996 /* LP0 watermarks always use 1/2 DDB partitioning */
1997 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
1999 /* At least LP0 must be valid */
2000 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2003 ilk_compute_wm_reg_maximums(dev, 1, &max);
2005 for (level = 1; level <= max_level; level++) {
2006 struct intel_wm_level wm = {};
2008 ilk_compute_wm_level(dev_priv, level, params, &wm);
2011 * Disable any watermark level that exceeds the
2012 * register maximums since such watermarks are
2015 if (!ilk_validate_wm_level(level, &max, &wm))
2018 pipe_wm->wm[level] = wm;
2025 * Merge the watermarks from all active pipes for a specific level.
2027 static void ilk_merge_wm_level(struct drm_device *dev,
2029 struct intel_wm_level *ret_wm)
2031 const struct intel_crtc *intel_crtc;
2033 ret_wm->enable = true;
2035 for_each_intel_crtc(dev, intel_crtc) {
2036 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2037 const struct intel_wm_level *wm = &active->wm[level];
2039 if (!active->pipe_enabled)
2043 * The watermark values may have been used in the past,
2044 * so we must maintain them in the registers for some
2045 * time even if the level is now disabled.
2048 ret_wm->enable = false;
2050 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2051 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2052 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2053 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2058 * Merge all low power watermarks for all active pipes.
2060 static void ilk_wm_merge(struct drm_device *dev,
2061 const struct intel_wm_config *config,
2062 const struct ilk_wm_maximums *max,
2063 struct intel_pipe_wm *merged)
2065 int level, max_level = ilk_wm_max_level(dev);
2066 int last_enabled_level = max_level;
2068 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2069 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2070 config->num_pipes_active > 1)
2073 /* ILK: FBC WM must be disabled always */
2074 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2076 /* merge each WM1+ level */
2077 for (level = 1; level <= max_level; level++) {
2078 struct intel_wm_level *wm = &merged->wm[level];
2080 ilk_merge_wm_level(dev, level, wm);
2082 if (level > last_enabled_level)
2084 else if (!ilk_validate_wm_level(level, max, wm))
2085 /* make sure all following levels get disabled */
2086 last_enabled_level = level - 1;
2089 * The spec says it is preferred to disable
2090 * FBC WMs instead of disabling a WM level.
2092 if (wm->fbc_val > max->fbc) {
2094 merged->fbc_wm_enabled = false;
2099 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2101 * FIXME this is racy. FBC might get enabled later.
2102 * What we should check here is whether FBC can be
2103 * enabled sometime later.
2105 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2106 for (level = 2; level <= max_level; level++) {
2107 struct intel_wm_level *wm = &merged->wm[level];
2114 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2116 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2117 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2120 /* The value we need to program into the WM_LPx latency field */
2121 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2123 struct drm_i915_private *dev_priv = dev->dev_private;
2125 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2128 return dev_priv->wm.pri_latency[level];
2131 static void ilk_compute_wm_results(struct drm_device *dev,
2132 const struct intel_pipe_wm *merged,
2133 enum intel_ddb_partitioning partitioning,
2134 struct ilk_wm_values *results)
2136 struct intel_crtc *intel_crtc;
2139 results->enable_fbc_wm = merged->fbc_wm_enabled;
2140 results->partitioning = partitioning;
2142 /* LP1+ register values */
2143 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2144 const struct intel_wm_level *r;
2146 level = ilk_wm_lp_to_level(wm_lp, merged);
2148 r = &merged->wm[level];
2151 * Maintain the watermark values even if the level is
2152 * disabled. Doing otherwise could cause underruns.
2154 results->wm_lp[wm_lp - 1] =
2155 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2156 (r->pri_val << WM1_LP_SR_SHIFT) |
2160 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2162 if (INTEL_INFO(dev)->gen >= 8)
2163 results->wm_lp[wm_lp - 1] |=
2164 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2166 results->wm_lp[wm_lp - 1] |=
2167 r->fbc_val << WM1_LP_FBC_SHIFT;
2170 * Always set WM1S_LP_EN when spr_val != 0, even if the
2171 * level is disabled. Doing otherwise could cause underruns.
2173 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2174 WARN_ON(wm_lp != 1);
2175 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2177 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2180 /* LP0 register values */
2181 for_each_intel_crtc(dev, intel_crtc) {
2182 enum pipe pipe = intel_crtc->pipe;
2183 const struct intel_wm_level *r =
2184 &intel_crtc->wm.active.wm[0];
2186 if (WARN_ON(!r->enable))
2189 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2191 results->wm_pipe[pipe] =
2192 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2193 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2198 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2199 * case both are at the same level. Prefer r1 in case they're the same. */
2200 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2201 struct intel_pipe_wm *r1,
2202 struct intel_pipe_wm *r2)
2204 int level, max_level = ilk_wm_max_level(dev);
2205 int level1 = 0, level2 = 0;
2207 for (level = 1; level <= max_level; level++) {
2208 if (r1->wm[level].enable)
2210 if (r2->wm[level].enable)
2214 if (level1 == level2) {
2215 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2219 } else if (level1 > level2) {
2226 /* dirty bits used to track which watermarks need changes */
2227 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2228 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2229 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2230 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2231 #define WM_DIRTY_FBC (1 << 24)
2232 #define WM_DIRTY_DDB (1 << 25)
2234 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2235 const struct ilk_wm_values *old,
2236 const struct ilk_wm_values *new)
2238 unsigned int dirty = 0;
2242 for_each_pipe(dev_priv, pipe) {
2243 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2244 dirty |= WM_DIRTY_LINETIME(pipe);
2245 /* Must disable LP1+ watermarks too */
2246 dirty |= WM_DIRTY_LP_ALL;
2249 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2250 dirty |= WM_DIRTY_PIPE(pipe);
2251 /* Must disable LP1+ watermarks too */
2252 dirty |= WM_DIRTY_LP_ALL;
2256 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2257 dirty |= WM_DIRTY_FBC;
2258 /* Must disable LP1+ watermarks too */
2259 dirty |= WM_DIRTY_LP_ALL;
2262 if (old->partitioning != new->partitioning) {
2263 dirty |= WM_DIRTY_DDB;
2264 /* Must disable LP1+ watermarks too */
2265 dirty |= WM_DIRTY_LP_ALL;
2268 /* LP1+ watermarks already deemed dirty, no need to continue */
2269 if (dirty & WM_DIRTY_LP_ALL)
2272 /* Find the lowest numbered LP1+ watermark in need of an update... */
2273 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2274 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2275 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2279 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2280 for (; wm_lp <= 3; wm_lp++)
2281 dirty |= WM_DIRTY_LP(wm_lp);
2286 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2289 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2290 bool changed = false;
2292 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2293 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2294 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2297 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2298 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2299 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2302 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2303 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2304 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2309 * Don't touch WM1S_LP_EN here.
2310 * Doing so could cause underruns.
2317 * The spec says we shouldn't write when we don't need, because every write
2318 * causes WMs to be re-evaluated, expending some power.
2320 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2321 struct ilk_wm_values *results)
2323 struct drm_device *dev = dev_priv->dev;
2324 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2328 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2332 _ilk_disable_lp_wm(dev_priv, dirty);
2334 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2335 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2336 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2337 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2338 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2339 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2341 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2342 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2343 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2344 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2345 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2346 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2348 if (dirty & WM_DIRTY_DDB) {
2349 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2350 val = I915_READ(WM_MISC);
2351 if (results->partitioning == INTEL_DDB_PART_1_2)
2352 val &= ~WM_MISC_DATA_PARTITION_5_6;
2354 val |= WM_MISC_DATA_PARTITION_5_6;
2355 I915_WRITE(WM_MISC, val);
2357 val = I915_READ(DISP_ARB_CTL2);
2358 if (results->partitioning == INTEL_DDB_PART_1_2)
2359 val &= ~DISP_DATA_PARTITION_5_6;
2361 val |= DISP_DATA_PARTITION_5_6;
2362 I915_WRITE(DISP_ARB_CTL2, val);
2366 if (dirty & WM_DIRTY_FBC) {
2367 val = I915_READ(DISP_ARB_CTL);
2368 if (results->enable_fbc_wm)
2369 val &= ~DISP_FBC_WM_DIS;
2371 val |= DISP_FBC_WM_DIS;
2372 I915_WRITE(DISP_ARB_CTL, val);
2375 if (dirty & WM_DIRTY_LP(1) &&
2376 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2377 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2379 if (INTEL_INFO(dev)->gen >= 7) {
2380 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2381 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2382 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2383 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2386 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2387 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2388 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2389 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2390 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2391 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2393 dev_priv->wm.hw = *results;
2396 static bool ilk_disable_lp_wm(struct drm_device *dev)
2398 struct drm_i915_private *dev_priv = dev->dev_private;
2400 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2404 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2405 * different active planes.
2408 #define SKL_DDB_SIZE 896 /* in blocks */
2411 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2412 struct drm_crtc *for_crtc,
2413 const struct intel_wm_config *config,
2414 const struct skl_pipe_wm_parameters *params,
2415 struct skl_ddb_entry *alloc /* out */)
2417 struct drm_crtc *crtc;
2418 unsigned int pipe_size, ddb_size;
2419 int nth_active_pipe;
2421 if (!params->active) {
2427 ddb_size = SKL_DDB_SIZE;
2429 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2431 nth_active_pipe = 0;
2432 for_each_crtc(dev, crtc) {
2433 if (!intel_crtc_active(crtc))
2436 if (crtc == for_crtc)
2442 pipe_size = ddb_size / config->num_pipes_active;
2443 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
2444 alloc->end = alloc->start + pipe_size;
2447 static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
2449 if (config->num_pipes_active == 1)
2455 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
2457 entry->start = reg & 0x3ff;
2458 entry->end = (reg >> 16) & 0x3ff;
2463 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2464 struct skl_ddb_allocation *ddb /* out */)
2466 struct drm_device *dev = dev_priv->dev;
2471 for_each_pipe(dev_priv, pipe) {
2472 for_each_plane(pipe, plane) {
2473 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2474 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
2478 val = I915_READ(CUR_BUF_CFG(pipe));
2479 skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
2484 skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p)
2486 return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
2490 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
2491 * a 8192x4096@32bpp framebuffer:
2492 * 3 * 4096 * 8192 * 4 < 2^32
2495 skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
2496 const struct skl_pipe_wm_parameters *params)
2498 unsigned int total_data_rate = 0;
2501 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
2502 const struct intel_plane_wm_parameters *p;
2504 p = ¶ms->plane[plane];
2508 total_data_rate += skl_plane_relative_data_rate(p);
2511 return total_data_rate;
2515 skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2516 const struct intel_wm_config *config,
2517 const struct skl_pipe_wm_parameters *params,
2518 struct skl_ddb_allocation *ddb /* out */)
2520 struct drm_device *dev = crtc->dev;
2521 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2522 enum pipe pipe = intel_crtc->pipe;
2523 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
2524 uint16_t alloc_size, start, cursor_blocks;
2525 unsigned int total_data_rate;
2528 skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
2529 alloc_size = skl_ddb_entry_size(alloc);
2530 if (alloc_size == 0) {
2531 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
2532 memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
2536 cursor_blocks = skl_cursor_allocation(config);
2537 ddb->cursor[pipe].start = alloc->end - cursor_blocks;
2538 ddb->cursor[pipe].end = alloc->end;
2540 alloc_size -= cursor_blocks;
2541 alloc->end -= cursor_blocks;
2544 * Each active plane get a portion of the remaining space, in
2545 * proportion to the amount of data they need to fetch from memory.
2547 * FIXME: we may not allocate every single block here.
2549 total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
2551 start = alloc->start;
2552 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
2553 const struct intel_plane_wm_parameters *p;
2554 unsigned int data_rate;
2555 uint16_t plane_blocks;
2557 p = ¶ms->plane[plane];
2561 data_rate = skl_plane_relative_data_rate(p);
2564 * promote the expression to 64 bits to avoid overflowing, the
2565 * result is < available as data_rate / total_data_rate < 1
2567 plane_blocks = div_u64((uint64_t)alloc_size * data_rate,
2570 ddb->plane[pipe][plane].start = start;
2571 ddb->plane[pipe][plane].end = start + plane_blocks;
2573 start += plane_blocks;
2578 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
2580 /* TODO: Take into account the scalers once we support them */
2581 return config->base.adjusted_mode.crtc_clock;
2585 * The max latency should be 257 (max the punit can code is 255 and we add 2us
2586 * for the read latency) and bytes_per_pixel should always be <= 8, so that
2587 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
2588 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
2590 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2593 uint32_t wm_intermediate_val, ret;
2598 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel;
2599 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
2604 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2605 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2608 uint32_t ret, plane_bytes_per_line, wm_intermediate_val;
2613 plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
2614 wm_intermediate_val = latency * pixel_rate;
2615 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
2616 plane_bytes_per_line;
2621 static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
2622 const struct intel_crtc *intel_crtc)
2624 struct drm_device *dev = intel_crtc->base.dev;
2625 struct drm_i915_private *dev_priv = dev->dev_private;
2626 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
2627 enum pipe pipe = intel_crtc->pipe;
2629 if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
2630 sizeof(new_ddb->plane[pipe])))
2633 if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
2634 sizeof(new_ddb->cursor[pipe])))
2640 static void skl_compute_wm_global_parameters(struct drm_device *dev,
2641 struct intel_wm_config *config)
2643 struct drm_crtc *crtc;
2644 struct drm_plane *plane;
2646 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2647 config->num_pipes_active += intel_crtc_active(crtc);
2649 /* FIXME: I don't think we need those two global parameters on SKL */
2650 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2651 struct intel_plane *intel_plane = to_intel_plane(plane);
2653 config->sprites_enabled |= intel_plane->wm.enabled;
2654 config->sprites_scaled |= intel_plane->wm.scaled;
2658 static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
2659 struct skl_pipe_wm_parameters *p)
2661 struct drm_device *dev = crtc->dev;
2662 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2663 enum pipe pipe = intel_crtc->pipe;
2664 struct drm_plane *plane;
2665 int i = 1; /* Index for sprite planes start */
2667 p->active = intel_crtc_active(crtc);
2669 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
2670 p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
2673 * For now, assume primary and cursor planes are always enabled.
2675 p->plane[0].enabled = true;
2676 p->plane[0].bytes_per_pixel =
2677 crtc->primary->fb->bits_per_pixel / 8;
2678 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
2679 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
2681 p->cursor.enabled = true;
2682 p->cursor.bytes_per_pixel = 4;
2683 p->cursor.horiz_pixels = intel_crtc->cursor_width ?
2684 intel_crtc->cursor_width : 64;
2687 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2688 struct intel_plane *intel_plane = to_intel_plane(plane);
2690 if (intel_plane->pipe == pipe &&
2691 plane->type == DRM_PLANE_TYPE_OVERLAY)
2692 p->plane[i++] = intel_plane->wm;
2696 static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p,
2697 struct intel_plane_wm_parameters *p_params,
2698 uint16_t ddb_allocation,
2700 uint16_t *out_blocks, /* out */
2701 uint8_t *out_lines /* out */)
2703 uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines;
2704 uint32_t result_bytes;
2706 if (mem_value == 0 || !p->active || !p_params->enabled)
2709 method1 = skl_wm_method1(p->pixel_rate,
2710 p_params->bytes_per_pixel,
2712 method2 = skl_wm_method2(p->pixel_rate,
2714 p_params->horiz_pixels,
2715 p_params->bytes_per_pixel,
2718 plane_bytes_per_line = p_params->horiz_pixels *
2719 p_params->bytes_per_pixel;
2721 /* For now xtile and linear */
2722 if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1)
2723 result_bytes = min(method1, method2);
2725 result_bytes = method1;
2727 res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1;
2728 res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line);
2730 if (res_blocks > ddb_allocation || res_lines > 31)
2733 *out_blocks = res_blocks;
2734 *out_lines = res_lines;
2739 static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
2740 struct skl_ddb_allocation *ddb,
2741 struct skl_pipe_wm_parameters *p,
2745 struct skl_wm_level *result)
2747 uint16_t latency = dev_priv->wm.skl_latency[level];
2748 uint16_t ddb_blocks;
2751 for (i = 0; i < num_planes; i++) {
2752 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
2754 result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i],
2757 &result->plane_res_b[i],
2758 &result->plane_res_l[i]);
2761 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
2762 result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks,
2763 latency, &result->cursor_res_b,
2764 &result->cursor_res_l);
2768 skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
2770 if (!intel_crtc_active(crtc))
2773 return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
2777 static void skl_compute_transition_wm(struct drm_crtc *crtc,
2778 struct skl_pipe_wm_parameters *params,
2779 struct skl_wm_level *trans_wm /* out */)
2781 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2784 if (!params->active)
2787 /* Until we know more, just disable transition WMs */
2788 for (i = 0; i < intel_num_planes(intel_crtc); i++)
2789 trans_wm->plane_en[i] = false;
2790 trans_wm->cursor_en = false;
2793 static void skl_compute_pipe_wm(struct drm_crtc *crtc,
2794 struct skl_ddb_allocation *ddb,
2795 struct skl_pipe_wm_parameters *params,
2796 struct skl_pipe_wm *pipe_wm)
2798 struct drm_device *dev = crtc->dev;
2799 const struct drm_i915_private *dev_priv = dev->dev_private;
2800 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2801 int level, max_level = ilk_wm_max_level(dev);
2803 for (level = 0; level <= max_level; level++) {
2804 skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
2805 level, intel_num_planes(intel_crtc),
2806 &pipe_wm->wm[level]);
2808 pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
2810 skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
2813 static void skl_compute_wm_results(struct drm_device *dev,
2814 struct skl_pipe_wm_parameters *p,
2815 struct skl_pipe_wm *p_wm,
2816 struct skl_wm_values *r,
2817 struct intel_crtc *intel_crtc)
2819 int level, max_level = ilk_wm_max_level(dev);
2820 enum pipe pipe = intel_crtc->pipe;
2824 for (level = 0; level <= max_level; level++) {
2825 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
2828 temp |= p_wm->wm[level].plane_res_l[i] <<
2829 PLANE_WM_LINES_SHIFT;
2830 temp |= p_wm->wm[level].plane_res_b[i];
2831 if (p_wm->wm[level].plane_en[i])
2832 temp |= PLANE_WM_EN;
2834 r->plane[pipe][i][level] = temp;
2839 temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
2840 temp |= p_wm->wm[level].cursor_res_b;
2842 if (p_wm->wm[level].cursor_en)
2843 temp |= PLANE_WM_EN;
2845 r->cursor[pipe][level] = temp;
2849 /* transition WMs */
2850 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
2852 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
2853 temp |= p_wm->trans_wm.plane_res_b[i];
2854 if (p_wm->trans_wm.plane_en[i])
2855 temp |= PLANE_WM_EN;
2857 r->plane_trans[pipe][i] = temp;
2861 temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
2862 temp |= p_wm->trans_wm.cursor_res_b;
2863 if (p_wm->trans_wm.cursor_en)
2864 temp |= PLANE_WM_EN;
2866 r->cursor_trans[pipe] = temp;
2868 r->wm_linetime[pipe] = p_wm->linetime;
2871 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
2872 const struct skl_ddb_entry *entry)
2875 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
2880 static void skl_write_wm_values(struct drm_i915_private *dev_priv,
2881 const struct skl_wm_values *new)
2883 struct drm_device *dev = dev_priv->dev;
2884 struct intel_crtc *crtc;
2886 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
2887 int i, level, max_level = ilk_wm_max_level(dev);
2888 enum pipe pipe = crtc->pipe;
2890 if (!new->dirty[pipe])
2893 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
2895 for (level = 0; level <= max_level; level++) {
2896 for (i = 0; i < intel_num_planes(crtc); i++)
2897 I915_WRITE(PLANE_WM(pipe, i, level),
2898 new->plane[pipe][i][level]);
2899 I915_WRITE(CUR_WM(pipe, level),
2900 new->cursor[pipe][level]);
2902 for (i = 0; i < intel_num_planes(crtc); i++)
2903 I915_WRITE(PLANE_WM_TRANS(pipe, i),
2904 new->plane_trans[pipe][i]);
2905 I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
2907 for (i = 0; i < intel_num_planes(crtc); i++)
2908 skl_ddb_entry_write(dev_priv,
2909 PLANE_BUF_CFG(pipe, i),
2910 &new->ddb.plane[pipe][i]);
2912 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
2913 &new->ddb.cursor[pipe]);
2918 * When setting up a new DDB allocation arrangement, we need to correctly
2919 * sequence the times at which the new allocations for the pipes are taken into
2920 * account or we'll have pipes fetching from space previously allocated to
2923 * Roughly the sequence looks like:
2924 * 1. re-allocate the pipe(s) with the allocation being reduced and not
2925 * overlapping with a previous light-up pipe (another way to put it is:
2926 * pipes with their new allocation strickly included into their old ones).
2927 * 2. re-allocate the other pipes that get their allocation reduced
2928 * 3. allocate the pipes having their allocation increased
2930 * Steps 1. and 2. are here to take care of the following case:
2931 * - Initially DDB looks like this:
2934 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
2938 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
2942 skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
2944 struct drm_device *dev = dev_priv->dev;
2947 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
2949 for_each_plane(pipe, plane) {
2950 I915_WRITE(PLANE_SURF(pipe, plane),
2951 I915_READ(PLANE_SURF(pipe, plane)));
2953 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
2957 skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
2958 const struct skl_ddb_allocation *new,
2961 uint16_t old_size, new_size;
2963 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
2964 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
2966 return old_size != new_size &&
2967 new->pipe[pipe].start >= old->pipe[pipe].start &&
2968 new->pipe[pipe].end <= old->pipe[pipe].end;
2971 static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
2972 struct skl_wm_values *new_values)
2974 struct drm_device *dev = dev_priv->dev;
2975 struct skl_ddb_allocation *cur_ddb, *new_ddb;
2976 bool reallocated[I915_MAX_PIPES] = {false, false, false};
2977 struct intel_crtc *crtc;
2980 new_ddb = &new_values->ddb;
2981 cur_ddb = &dev_priv->wm.skl_hw.ddb;
2984 * First pass: flush the pipes with the new allocation contained into
2987 * We'll wait for the vblank on those pipes to ensure we can safely
2988 * re-allocate the freed space without this pipe fetching from it.
2990 for_each_intel_crtc(dev, crtc) {
2996 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
2999 skl_wm_flush_pipe(dev_priv, pipe, 1);
3000 intel_wait_for_vblank(dev, pipe);
3002 reallocated[pipe] = true;
3007 * Second pass: flush the pipes that are having their allocation
3008 * reduced, but overlapping with a previous allocation.
3010 * Here as well we need to wait for the vblank to make sure the freed
3011 * space is not used anymore.
3013 for_each_intel_crtc(dev, crtc) {
3019 if (reallocated[pipe])
3022 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3023 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
3024 skl_wm_flush_pipe(dev_priv, pipe, 2);
3025 intel_wait_for_vblank(dev, pipe);
3026 reallocated[pipe] = true;
3031 * Third pass: flush the pipes that got more space allocated.
3033 * We don't need to actively wait for the update here, next vblank
3034 * will just get more DDB space with the correct WM values.
3036 for_each_intel_crtc(dev, crtc) {
3043 * At this point, only the pipes more space than before are
3044 * left to re-allocate.
3046 if (reallocated[pipe])
3049 skl_wm_flush_pipe(dev_priv, pipe, 3);
3053 static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3054 struct skl_pipe_wm_parameters *params,
3055 struct intel_wm_config *config,
3056 struct skl_ddb_allocation *ddb, /* out */
3057 struct skl_pipe_wm *pipe_wm /* out */)
3059 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3061 skl_compute_wm_pipe_parameters(crtc, params);
3062 skl_allocate_pipe_ddb(crtc, config, params, ddb);
3063 skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
3065 if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
3068 intel_crtc->wm.skl_active = *pipe_wm;
3072 static void skl_update_other_pipe_wm(struct drm_device *dev,
3073 struct drm_crtc *crtc,
3074 struct intel_wm_config *config,
3075 struct skl_wm_values *r)
3077 struct intel_crtc *intel_crtc;
3078 struct intel_crtc *this_crtc = to_intel_crtc(crtc);
3081 * If the WM update hasn't changed the allocation for this_crtc (the
3082 * crtc we are currently computing the new WM values for), other
3083 * enabled crtcs will keep the same allocation and we don't need to
3084 * recompute anything for them.
3086 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
3090 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3091 * other active pipes need new DDB allocation and WM values.
3093 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
3095 struct skl_pipe_wm_parameters params = {};
3096 struct skl_pipe_wm pipe_wm = {};
3099 if (this_crtc->pipe == intel_crtc->pipe)
3102 if (!intel_crtc->active)
3105 wm_changed = skl_update_pipe_wm(&intel_crtc->base,
3110 * If we end up re-computing the other pipe WM values, it's
3111 * because it was really needed, so we expect the WM values to
3114 WARN_ON(!wm_changed);
3116 skl_compute_wm_results(dev, ¶ms, &pipe_wm, r, intel_crtc);
3117 r->dirty[intel_crtc->pipe] = true;
3121 static void skl_update_wm(struct drm_crtc *crtc)
3123 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3124 struct drm_device *dev = crtc->dev;
3125 struct drm_i915_private *dev_priv = dev->dev_private;
3126 struct skl_pipe_wm_parameters params = {};
3127 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3128 struct skl_pipe_wm pipe_wm = {};
3129 struct intel_wm_config config = {};
3131 memset(results, 0, sizeof(*results));
3133 skl_compute_wm_global_parameters(dev, &config);
3135 if (!skl_update_pipe_wm(crtc, ¶ms, &config,
3136 &results->ddb, &pipe_wm))
3139 skl_compute_wm_results(dev, ¶ms, &pipe_wm, results, intel_crtc);
3140 results->dirty[intel_crtc->pipe] = true;
3142 skl_update_other_pipe_wm(dev, crtc, &config, results);
3143 skl_write_wm_values(dev_priv, results);
3144 skl_flush_wm_values(dev_priv, results);
3146 /* store the new configuration */
3147 dev_priv->wm.skl_hw = *results;
3151 skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3152 uint32_t sprite_width, uint32_t sprite_height,
3153 int pixel_size, bool enabled, bool scaled)
3155 struct intel_plane *intel_plane = to_intel_plane(plane);
3157 intel_plane->wm.enabled = enabled;
3158 intel_plane->wm.scaled = scaled;
3159 intel_plane->wm.horiz_pixels = sprite_width;
3160 intel_plane->wm.vert_pixels = sprite_height;
3161 intel_plane->wm.bytes_per_pixel = pixel_size;
3163 skl_update_wm(crtc);
3166 static void ilk_update_wm(struct drm_crtc *crtc)
3168 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3169 struct drm_device *dev = crtc->dev;
3170 struct drm_i915_private *dev_priv = dev->dev_private;
3171 struct ilk_wm_maximums max;
3172 struct ilk_pipe_wm_parameters params = {};
3173 struct ilk_wm_values results = {};
3174 enum intel_ddb_partitioning partitioning;
3175 struct intel_pipe_wm pipe_wm = {};
3176 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3177 struct intel_wm_config config = {};
3179 ilk_compute_wm_parameters(crtc, ¶ms);
3181 intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
3183 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
3186 intel_crtc->wm.active = pipe_wm;
3188 ilk_compute_wm_config(dev, &config);
3190 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
3191 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
3193 /* 5/6 split only in single pipe config on IVB+ */
3194 if (INTEL_INFO(dev)->gen >= 7 &&
3195 config.num_pipes_active == 1 && config.sprites_enabled) {
3196 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
3197 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
3199 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
3201 best_lp_wm = &lp_wm_1_2;
3204 partitioning = (best_lp_wm == &lp_wm_1_2) ?
3205 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
3207 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
3209 ilk_write_wm_values(dev_priv, &results);
3213 ilk_update_sprite_wm(struct drm_plane *plane,
3214 struct drm_crtc *crtc,
3215 uint32_t sprite_width, uint32_t sprite_height,
3216 int pixel_size, bool enabled, bool scaled)
3218 struct drm_device *dev = plane->dev;
3219 struct intel_plane *intel_plane = to_intel_plane(plane);
3221 intel_plane->wm.enabled = enabled;
3222 intel_plane->wm.scaled = scaled;
3223 intel_plane->wm.horiz_pixels = sprite_width;
3224 intel_plane->wm.vert_pixels = sprite_width;
3225 intel_plane->wm.bytes_per_pixel = pixel_size;
3228 * IVB workaround: must disable low power watermarks for at least
3229 * one frame before enabling scaling. LP watermarks can be re-enabled
3230 * when scaling is disabled.
3232 * WaCxSRDisabledForSpriteScaling:ivb
3234 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
3235 intel_wait_for_vblank(dev, intel_plane->pipe);
3237 ilk_update_wm(crtc);
3240 static void skl_pipe_wm_active_state(uint32_t val,
3241 struct skl_pipe_wm *active,
3247 bool is_enabled = (val & PLANE_WM_EN) != 0;
3251 active->wm[level].plane_en[i] = is_enabled;
3252 active->wm[level].plane_res_b[i] =
3253 val & PLANE_WM_BLOCKS_MASK;
3254 active->wm[level].plane_res_l[i] =
3255 (val >> PLANE_WM_LINES_SHIFT) &
3256 PLANE_WM_LINES_MASK;
3258 active->wm[level].cursor_en = is_enabled;
3259 active->wm[level].cursor_res_b =
3260 val & PLANE_WM_BLOCKS_MASK;
3261 active->wm[level].cursor_res_l =
3262 (val >> PLANE_WM_LINES_SHIFT) &
3263 PLANE_WM_LINES_MASK;
3267 active->trans_wm.plane_en[i] = is_enabled;
3268 active->trans_wm.plane_res_b[i] =
3269 val & PLANE_WM_BLOCKS_MASK;
3270 active->trans_wm.plane_res_l[i] =
3271 (val >> PLANE_WM_LINES_SHIFT) &
3272 PLANE_WM_LINES_MASK;
3274 active->trans_wm.cursor_en = is_enabled;
3275 active->trans_wm.cursor_res_b =
3276 val & PLANE_WM_BLOCKS_MASK;
3277 active->trans_wm.cursor_res_l =
3278 (val >> PLANE_WM_LINES_SHIFT) &
3279 PLANE_WM_LINES_MASK;
3284 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3286 struct drm_device *dev = crtc->dev;
3287 struct drm_i915_private *dev_priv = dev->dev_private;
3288 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3289 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3290 struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
3291 enum pipe pipe = intel_crtc->pipe;
3292 int level, i, max_level;
3295 max_level = ilk_wm_max_level(dev);
3297 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3299 for (level = 0; level <= max_level; level++) {
3300 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3301 hw->plane[pipe][i][level] =
3302 I915_READ(PLANE_WM(pipe, i, level));
3303 hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
3306 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3307 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
3308 hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
3310 if (!intel_crtc_active(crtc))
3313 hw->dirty[pipe] = true;
3315 active->linetime = hw->wm_linetime[pipe];
3317 for (level = 0; level <= max_level; level++) {
3318 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3319 temp = hw->plane[pipe][i][level];
3320 skl_pipe_wm_active_state(temp, active, false,
3323 temp = hw->cursor[pipe][level];
3324 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3327 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3328 temp = hw->plane_trans[pipe][i];
3329 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3332 temp = hw->cursor_trans[pipe];
3333 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3336 void skl_wm_get_hw_state(struct drm_device *dev)
3338 struct drm_i915_private *dev_priv = dev->dev_private;
3339 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
3340 struct drm_crtc *crtc;
3342 skl_ddb_get_hw_state(dev_priv, ddb);
3343 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3344 skl_pipe_wm_get_hw_state(crtc);
3347 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3349 struct drm_device *dev = crtc->dev;
3350 struct drm_i915_private *dev_priv = dev->dev_private;
3351 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3352 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3353 struct intel_pipe_wm *active = &intel_crtc->wm.active;
3354 enum pipe pipe = intel_crtc->pipe;
3355 static const unsigned int wm0_pipe_reg[] = {
3356 [PIPE_A] = WM0_PIPEA_ILK,
3357 [PIPE_B] = WM0_PIPEB_ILK,
3358 [PIPE_C] = WM0_PIPEC_IVB,
3361 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
3362 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3363 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3365 active->pipe_enabled = intel_crtc_active(crtc);
3367 if (active->pipe_enabled) {
3368 u32 tmp = hw->wm_pipe[pipe];
3371 * For active pipes LP0 watermark is marked as
3372 * enabled, and LP1+ watermaks as disabled since
3373 * we can't really reverse compute them in case
3374 * multiple pipes are active.
3376 active->wm[0].enable = true;
3377 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3378 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3379 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3380 active->linetime = hw->wm_linetime[pipe];
3382 int level, max_level = ilk_wm_max_level(dev);
3385 * For inactive pipes, all watermark levels
3386 * should be marked as enabled but zeroed,
3387 * which is what we'd compute them to.
3389 for (level = 0; level <= max_level; level++)
3390 active->wm[level].enable = true;
3394 void ilk_wm_get_hw_state(struct drm_device *dev)
3396 struct drm_i915_private *dev_priv = dev->dev_private;
3397 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3398 struct drm_crtc *crtc;
3400 for_each_crtc(dev, crtc)
3401 ilk_pipe_wm_get_hw_state(crtc);
3403 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
3404 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
3405 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
3407 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
3408 if (INTEL_INFO(dev)->gen >= 7) {
3409 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
3410 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
3413 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3414 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
3415 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3416 else if (IS_IVYBRIDGE(dev))
3417 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
3418 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3421 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
3425 * intel_update_watermarks - update FIFO watermark values based on current modes
3427 * Calculate watermark values for the various WM regs based on current mode
3428 * and plane configuration.
3430 * There are several cases to deal with here:
3431 * - normal (i.e. non-self-refresh)
3432 * - self-refresh (SR) mode
3433 * - lines are large relative to FIFO size (buffer can hold up to 2)
3434 * - lines are small relative to FIFO size (buffer can hold more than 2
3435 * lines), so need to account for TLB latency
3437 * The normal calculation is:
3438 * watermark = dotclock * bytes per pixel * latency
3439 * where latency is platform & configuration dependent (we assume pessimal
3442 * The SR calculation is:
3443 * watermark = (trunc(latency/line time)+1) * surface width *
3446 * line time = htotal / dotclock
3447 * surface width = hdisplay for normal plane and 64 for cursor
3448 * and latency is assumed to be high, as above.
3450 * The final value programmed to the register should always be rounded up,
3451 * and include an extra 2 entries to account for clock crossings.
3453 * We don't use the sprite, so we can ignore that. And on Crestline we have
3454 * to set the non-SR watermarks to 8.
3456 void intel_update_watermarks(struct drm_crtc *crtc)
3458 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
3460 if (dev_priv->display.update_wm)
3461 dev_priv->display.update_wm(crtc);
3464 void intel_update_sprite_watermarks(struct drm_plane *plane,
3465 struct drm_crtc *crtc,
3466 uint32_t sprite_width,
3467 uint32_t sprite_height,
3469 bool enabled, bool scaled)
3471 struct drm_i915_private *dev_priv = plane->dev->dev_private;
3473 if (dev_priv->display.update_sprite_wm)
3474 dev_priv->display.update_sprite_wm(plane, crtc,
3475 sprite_width, sprite_height,
3476 pixel_size, enabled, scaled);
3479 static struct drm_i915_gem_object *
3480 intel_alloc_context_page(struct drm_device *dev)
3482 struct drm_i915_gem_object *ctx;
3485 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3487 ctx = i915_gem_alloc_object(dev, 4096);
3489 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
3493 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
3495 DRM_ERROR("failed to pin power context: %d\n", ret);
3499 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
3501 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
3508 i915_gem_object_ggtt_unpin(ctx);
3510 drm_gem_object_unreference(&ctx->base);
3515 * Lock protecting IPS related data structures
3517 DEFINE_SPINLOCK(mchdev_lock);
3519 /* Global for IPS driver to get at the current i915 device. Protected by
3521 static struct drm_i915_private *i915_mch_dev;
3523 bool ironlake_set_drps(struct drm_device *dev, u8 val)
3525 struct drm_i915_private *dev_priv = dev->dev_private;
3528 assert_spin_locked(&mchdev_lock);
3530 rgvswctl = I915_READ16(MEMSWCTL);
3531 if (rgvswctl & MEMCTL_CMD_STS) {
3532 DRM_DEBUG("gpu busy, RCS change rejected\n");
3533 return false; /* still busy with another command */
3536 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
3537 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
3538 I915_WRITE16(MEMSWCTL, rgvswctl);
3539 POSTING_READ16(MEMSWCTL);
3541 rgvswctl |= MEMCTL_CMD_STS;
3542 I915_WRITE16(MEMSWCTL, rgvswctl);
3547 static void ironlake_enable_drps(struct drm_device *dev)
3549 struct drm_i915_private *dev_priv = dev->dev_private;
3550 u32 rgvmodectl = I915_READ(MEMMODECTL);
3551 u8 fmax, fmin, fstart, vstart;
3553 spin_lock_irq(&mchdev_lock);
3555 /* Enable temp reporting */
3556 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
3557 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
3559 /* 100ms RC evaluation intervals */
3560 I915_WRITE(RCUPEI, 100000);
3561 I915_WRITE(RCDNEI, 100000);
3563 /* Set max/min thresholds to 90ms and 80ms respectively */
3564 I915_WRITE(RCBMAXAVG, 90000);
3565 I915_WRITE(RCBMINAVG, 80000);
3567 I915_WRITE(MEMIHYST, 1);
3569 /* Set up min, max, and cur for interrupt handling */
3570 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
3571 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
3572 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
3573 MEMMODE_FSTART_SHIFT;
3575 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
3578 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
3579 dev_priv->ips.fstart = fstart;
3581 dev_priv->ips.max_delay = fstart;
3582 dev_priv->ips.min_delay = fmin;
3583 dev_priv->ips.cur_delay = fstart;
3585 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
3586 fmax, fmin, fstart);
3588 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
3591 * Interrupts will be enabled in ironlake_irq_postinstall
3594 I915_WRITE(VIDSTART, vstart);
3595 POSTING_READ(VIDSTART);
3597 rgvmodectl |= MEMMODE_SWMODE_EN;
3598 I915_WRITE(MEMMODECTL, rgvmodectl);
3600 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
3601 DRM_ERROR("stuck trying to change perf mode\n");
3604 ironlake_set_drps(dev, fstart);
3606 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
3608 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
3609 dev_priv->ips.last_count2 = I915_READ(0x112f4);
3610 dev_priv->ips.last_time2 = ktime_get_raw_ns();
3612 spin_unlock_irq(&mchdev_lock);
3615 static void ironlake_disable_drps(struct drm_device *dev)
3617 struct drm_i915_private *dev_priv = dev->dev_private;
3620 spin_lock_irq(&mchdev_lock);
3622 rgvswctl = I915_READ16(MEMSWCTL);
3624 /* Ack interrupts, disable EFC interrupt */
3625 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
3626 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
3627 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
3628 I915_WRITE(DEIIR, DE_PCU_EVENT);
3629 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
3631 /* Go back to the starting frequency */
3632 ironlake_set_drps(dev, dev_priv->ips.fstart);
3634 rgvswctl |= MEMCTL_CMD_STS;
3635 I915_WRITE(MEMSWCTL, rgvswctl);
3638 spin_unlock_irq(&mchdev_lock);
3641 /* There's a funny hw issue where the hw returns all 0 when reading from
3642 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3643 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3644 * all limits and the gpu stuck at whatever frequency it is at atm).
3646 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
3650 /* Only set the down limit when we've reached the lowest level to avoid
3651 * getting more interrupts, otherwise leave this clear. This prevents a
3652 * race in the hw when coming out of rc6: There's a tiny window where
3653 * the hw runs at the minimal clock before selecting the desired
3654 * frequency, if the down threshold expires in that window we will not
3655 * receive a down interrupt. */
3656 limits = dev_priv->rps.max_freq_softlimit << 24;
3657 if (val <= dev_priv->rps.min_freq_softlimit)
3658 limits |= dev_priv->rps.min_freq_softlimit << 16;
3663 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3667 new_power = dev_priv->rps.power;
3668 switch (dev_priv->rps.power) {
3670 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
3671 new_power = BETWEEN;
3675 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
3676 new_power = LOW_POWER;
3677 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
3678 new_power = HIGH_POWER;
3682 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
3683 new_power = BETWEEN;
3686 /* Max/min bins are special */
3687 if (val == dev_priv->rps.min_freq_softlimit)
3688 new_power = LOW_POWER;
3689 if (val == dev_priv->rps.max_freq_softlimit)
3690 new_power = HIGH_POWER;
3691 if (new_power == dev_priv->rps.power)
3694 /* Note the units here are not exactly 1us, but 1280ns. */
3695 switch (new_power) {
3697 /* Upclock if more than 95% busy over 16ms */
3698 I915_WRITE(GEN6_RP_UP_EI, 12500);
3699 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3701 /* Downclock if less than 85% busy over 32ms */
3702 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3703 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3705 I915_WRITE(GEN6_RP_CONTROL,
3706 GEN6_RP_MEDIA_TURBO |
3707 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3708 GEN6_RP_MEDIA_IS_GFX |
3710 GEN6_RP_UP_BUSY_AVG |
3711 GEN6_RP_DOWN_IDLE_AVG);
3715 /* Upclock if more than 90% busy over 13ms */
3716 I915_WRITE(GEN6_RP_UP_EI, 10250);
3717 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3719 /* Downclock if less than 75% busy over 32ms */
3720 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3721 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3723 I915_WRITE(GEN6_RP_CONTROL,
3724 GEN6_RP_MEDIA_TURBO |
3725 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3726 GEN6_RP_MEDIA_IS_GFX |
3728 GEN6_RP_UP_BUSY_AVG |
3729 GEN6_RP_DOWN_IDLE_AVG);
3733 /* Upclock if more than 85% busy over 10ms */
3734 I915_WRITE(GEN6_RP_UP_EI, 8000);
3735 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3737 /* Downclock if less than 60% busy over 32ms */
3738 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3739 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3741 I915_WRITE(GEN6_RP_CONTROL,
3742 GEN6_RP_MEDIA_TURBO |
3743 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3744 GEN6_RP_MEDIA_IS_GFX |
3746 GEN6_RP_UP_BUSY_AVG |
3747 GEN6_RP_DOWN_IDLE_AVG);
3751 dev_priv->rps.power = new_power;
3752 dev_priv->rps.last_adj = 0;
3755 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3759 if (val > dev_priv->rps.min_freq_softlimit)
3760 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3761 if (val < dev_priv->rps.max_freq_softlimit)
3762 mask |= GEN6_PM_RP_UP_THRESHOLD;
3764 mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
3765 mask &= dev_priv->pm_rps_events;
3767 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
3770 /* gen6_set_rps is called to update the frequency request, but should also be
3771 * called when the range (min_delay and max_delay) is modified so that we can
3772 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3773 static void gen6_set_rps(struct drm_device *dev, u8 val)
3775 struct drm_i915_private *dev_priv = dev->dev_private;
3777 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3778 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3779 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3781 /* min/max delay may still have been modified so be sure to
3782 * write the limits value.
3784 if (val != dev_priv->rps.cur_freq) {
3785 gen6_set_rps_thresholds(dev_priv, val);
3787 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3788 I915_WRITE(GEN6_RPNSWREQ,
3789 HSW_FREQUENCY(val));
3791 I915_WRITE(GEN6_RPNSWREQ,
3792 GEN6_FREQUENCY(val) |
3794 GEN6_AGGRESSIVE_TURBO);
3797 /* Make sure we continue to get interrupts
3798 * until we hit the minimum or maximum frequencies.
3800 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3801 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3803 POSTING_READ(GEN6_RPNSWREQ);
3805 dev_priv->rps.cur_freq = val;
3806 trace_intel_gpu_freq_change(val * 50);
3809 static void valleyview_set_rps(struct drm_device *dev, u8 val)
3811 struct drm_i915_private *dev_priv = dev->dev_private;
3813 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3814 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3815 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3817 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
3818 "Odd GPU freq value\n"))
3821 if (val != dev_priv->rps.cur_freq)
3822 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3824 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3826 dev_priv->rps.cur_freq = val;
3827 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
3830 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3832 * * If Gfx is Idle, then
3833 * 1. Mask Turbo interrupts
3834 * 2. Bring up Gfx clock
3835 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3836 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3837 * 5. Unmask Turbo interrupts
3839 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3841 struct drm_device *dev = dev_priv->dev;
3843 /* CHV and latest VLV don't need to force the gfx clock */
3844 if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) {
3845 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3850 * When we are idle. Drop to min voltage state.
3853 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
3856 /* Mask turbo interrupt so that they will not come in between */
3857 I915_WRITE(GEN6_PMINTRMSK,
3858 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
3860 vlv_force_gfx_clock(dev_priv, true);
3862 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
3864 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
3865 dev_priv->rps.min_freq_softlimit);
3867 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3868 & GENFREQSTATUS) == 0, 100))
3869 DRM_ERROR("timed out waiting for Punit\n");
3871 vlv_force_gfx_clock(dev_priv, false);
3873 I915_WRITE(GEN6_PMINTRMSK,
3874 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3877 void gen6_rps_idle(struct drm_i915_private *dev_priv)
3879 struct drm_device *dev = dev_priv->dev;
3881 mutex_lock(&dev_priv->rps.hw_lock);
3882 if (dev_priv->rps.enabled) {
3883 if (IS_VALLEYVIEW(dev))
3884 vlv_set_rps_idle(dev_priv);
3886 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3887 dev_priv->rps.last_adj = 0;
3889 mutex_unlock(&dev_priv->rps.hw_lock);
3892 void gen6_rps_boost(struct drm_i915_private *dev_priv)
3894 mutex_lock(&dev_priv->rps.hw_lock);
3895 if (dev_priv->rps.enabled) {
3896 intel_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3897 dev_priv->rps.last_adj = 0;
3899 mutex_unlock(&dev_priv->rps.hw_lock);
3902 void intel_set_rps(struct drm_device *dev, u8 val)
3904 if (IS_VALLEYVIEW(dev))
3905 valleyview_set_rps(dev, val);
3907 gen6_set_rps(dev, val);
3910 static void gen9_disable_rps(struct drm_device *dev)
3912 struct drm_i915_private *dev_priv = dev->dev_private;
3914 I915_WRITE(GEN6_RC_CONTROL, 0);
3915 I915_WRITE(GEN9_PG_ENABLE, 0);
3918 static void gen6_disable_rps(struct drm_device *dev)
3920 struct drm_i915_private *dev_priv = dev->dev_private;
3922 I915_WRITE(GEN6_RC_CONTROL, 0);
3923 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3926 static void cherryview_disable_rps(struct drm_device *dev)
3928 struct drm_i915_private *dev_priv = dev->dev_private;
3930 I915_WRITE(GEN6_RC_CONTROL, 0);
3933 static void valleyview_disable_rps(struct drm_device *dev)
3935 struct drm_i915_private *dev_priv = dev->dev_private;
3937 /* we're doing forcewake before Disabling RC6,
3938 * This what the BIOS expects when going into suspend */
3939 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
3941 I915_WRITE(GEN6_RC_CONTROL, 0);
3943 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
3946 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3948 if (IS_VALLEYVIEW(dev)) {
3949 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
3950 mode = GEN6_RC_CTL_RC6_ENABLE;
3955 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
3956 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3957 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3958 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3961 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
3962 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
3965 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
3967 /* No RC6 before Ironlake */
3968 if (INTEL_INFO(dev)->gen < 5)
3971 /* RC6 is only on Ironlake mobile not on desktop */
3972 if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
3975 /* Respect the kernel parameter if it is set */
3976 if (enable_rc6 >= 0) {
3980 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
3983 mask = INTEL_RC6_ENABLE;
3985 if ((enable_rc6 & mask) != enable_rc6)
3986 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
3987 enable_rc6 & mask, enable_rc6, mask);
3989 return enable_rc6 & mask;
3992 /* Disable RC6 on Ironlake */
3993 if (INTEL_INFO(dev)->gen == 5)
3996 if (IS_IVYBRIDGE(dev))
3997 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3999 return INTEL_RC6_ENABLE;
4002 int intel_enable_rc6(const struct drm_device *dev)
4004 return i915.enable_rc6;
4007 static void gen6_init_rps_frequencies(struct drm_device *dev)
4009 struct drm_i915_private *dev_priv = dev->dev_private;
4010 uint32_t rp_state_cap;
4011 u32 ddcc_status = 0;
4014 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4015 /* All of these values are in units of 50MHz */
4016 dev_priv->rps.cur_freq = 0;
4017 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4018 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4019 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4020 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
4021 /* hw_max = RP0 until we check for overclocking */
4022 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4024 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4025 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4026 ret = sandybridge_pcode_read(dev_priv,
4027 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4030 dev_priv->rps.efficient_freq =
4032 ((ddcc_status >> 8) & 0xff),
4033 dev_priv->rps.min_freq,
4034 dev_priv->rps.max_freq);
4037 /* Preserve min/max settings in case of re-init */
4038 if (dev_priv->rps.max_freq_softlimit == 0)
4039 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4041 if (dev_priv->rps.min_freq_softlimit == 0) {
4042 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4043 dev_priv->rps.min_freq_softlimit =
4044 /* max(RPe, 450 MHz) */
4045 max(dev_priv->rps.efficient_freq, (u8) 9);
4047 dev_priv->rps.min_freq_softlimit =
4048 dev_priv->rps.min_freq;
4052 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
4053 static void gen9_enable_rps(struct drm_device *dev)
4055 struct drm_i915_private *dev_priv = dev->dev_private;
4057 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4059 gen6_init_rps_frequencies(dev);
4061 I915_WRITE(GEN6_RPNSWREQ, 0xc800000);
4062 I915_WRITE(GEN6_RC_VIDEO_FREQ, 0xc800000);
4064 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
4065 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 0x12060000);
4066 I915_WRITE(GEN6_RP_UP_THRESHOLD, 0xe808);
4067 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 0x3bd08);
4068 I915_WRITE(GEN6_RP_UP_EI, 0x101d0);
4069 I915_WRITE(GEN6_RP_DOWN_EI, 0x55730);
4070 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
4071 I915_WRITE(GEN6_PMINTRMSK, 0x6);
4072 I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO |
4073 GEN6_RP_MEDIA_HW_MODE | GEN6_RP_MEDIA_IS_GFX |
4074 GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG |
4075 GEN6_RP_DOWN_IDLE_AVG);
4077 gen6_enable_rps_interrupts(dev);
4079 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4082 static void gen9_enable_rc6(struct drm_device *dev)
4084 struct drm_i915_private *dev_priv = dev->dev_private;
4085 struct intel_engine_cs *ring;
4086 uint32_t rc6_mask = 0;
4089 /* 1a: Software RC state - RC0 */
4090 I915_WRITE(GEN6_RC_STATE, 0);
4092 /* 1b: Get forcewake during program sequence. Although the driver
4093 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4094 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4096 /* 2a: Disable RC states. */
4097 I915_WRITE(GEN6_RC_CONTROL, 0);
4099 /* 2b: Program RC6 thresholds.*/
4100 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
4101 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4102 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4103 for_each_ring(ring, dev_priv, unused)
4104 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4105 I915_WRITE(GEN6_RC_SLEEP, 0);
4106 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4108 /* 2c: Program Coarse Power Gating Policies. */
4109 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
4110 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4112 /* 3a: Enable RC6 */
4113 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4114 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4115 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4117 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4118 GEN6_RC_CTL_EI_MODE(1) |
4121 /* 3b: Enable Coarse Power Gating only when RC6 is enabled */
4122 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 3 : 0);
4124 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4128 static void gen8_enable_rps(struct drm_device *dev)
4130 struct drm_i915_private *dev_priv = dev->dev_private;
4131 struct intel_engine_cs *ring;
4132 uint32_t rc6_mask = 0;
4135 /* 1a: Software RC state - RC0 */
4136 I915_WRITE(GEN6_RC_STATE, 0);
4138 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4139 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4140 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4142 /* 2a: Disable RC states. */
4143 I915_WRITE(GEN6_RC_CONTROL, 0);
4145 /* Initialize rps frequencies */
4146 gen6_init_rps_frequencies(dev);
4148 /* 2b: Program RC6 thresholds.*/
4149 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4150 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4151 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4152 for_each_ring(ring, dev_priv, unused)
4153 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4154 I915_WRITE(GEN6_RC_SLEEP, 0);
4155 if (IS_BROADWELL(dev))
4156 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4158 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4161 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4162 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4163 intel_print_rc6_info(dev, rc6_mask);
4164 if (IS_BROADWELL(dev))
4165 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4166 GEN7_RC_CTL_TO_MODE |
4169 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4170 GEN6_RC_CTL_EI_MODE(1) |
4173 /* 4 Program defaults and thresholds for RPS*/
4174 I915_WRITE(GEN6_RPNSWREQ,
4175 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4176 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4177 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4178 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4179 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
4181 /* Docs recommend 900MHz, and 300 MHz respectively */
4182 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
4183 dev_priv->rps.max_freq_softlimit << 24 |
4184 dev_priv->rps.min_freq_softlimit << 16);
4186 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
4187 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
4188 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
4189 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
4191 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4194 I915_WRITE(GEN6_RP_CONTROL,
4195 GEN6_RP_MEDIA_TURBO |
4196 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4197 GEN6_RP_MEDIA_IS_GFX |
4199 GEN6_RP_UP_BUSY_AVG |
4200 GEN6_RP_DOWN_IDLE_AVG);
4202 /* 6: Ring frequency + overclocking (our driver does this later */
4204 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4205 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
4207 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4210 static void gen6_enable_rps(struct drm_device *dev)
4212 struct drm_i915_private *dev_priv = dev->dev_private;
4213 struct intel_engine_cs *ring;
4214 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
4219 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4221 /* Here begins a magic sequence of register writes to enable
4222 * auto-downclocking.
4224 * Perhaps there might be some value in exposing these to
4227 I915_WRITE(GEN6_RC_STATE, 0);
4229 /* Clear the DBG now so we don't confuse earlier errors */
4230 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4231 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
4232 I915_WRITE(GTFIFODBG, gtfifodbg);
4235 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4237 /* Initialize rps frequencies */
4238 gen6_init_rps_frequencies(dev);
4240 /* disable the counters and set deterministic thresholds */
4241 I915_WRITE(GEN6_RC_CONTROL, 0);
4243 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
4244 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
4245 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
4246 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4247 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4249 for_each_ring(ring, dev_priv, i)
4250 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4252 I915_WRITE(GEN6_RC_SLEEP, 0);
4253 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
4254 if (IS_IVYBRIDGE(dev))
4255 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
4257 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
4258 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
4259 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
4261 /* Check if we are enabling RC6 */
4262 rc6_mode = intel_enable_rc6(dev_priv->dev);
4263 if (rc6_mode & INTEL_RC6_ENABLE)
4264 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
4266 /* We don't use those on Haswell */
4267 if (!IS_HASWELL(dev)) {
4268 if (rc6_mode & INTEL_RC6p_ENABLE)
4269 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
4271 if (rc6_mode & INTEL_RC6pp_ENABLE)
4272 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
4275 intel_print_rc6_info(dev, rc6_mask);
4277 I915_WRITE(GEN6_RC_CONTROL,
4279 GEN6_RC_CTL_EI_MODE(1) |
4280 GEN6_RC_CTL_HW_ENABLE);
4282 /* Power down if completely idle for over 50ms */
4283 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
4284 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4286 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
4288 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
4290 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
4291 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
4292 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
4293 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
4294 (pcu_mbox & 0xff) * 50);
4295 dev_priv->rps.max_freq = pcu_mbox & 0xff;
4298 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4299 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
4302 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
4303 if (IS_GEN6(dev) && ret) {
4304 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
4305 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
4306 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
4307 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
4308 rc6vids &= 0xffff00;
4309 rc6vids |= GEN6_ENCODE_RC6_VID(450);
4310 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
4312 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
4315 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4318 static void __gen6_update_ring_freq(struct drm_device *dev)
4320 struct drm_i915_private *dev_priv = dev->dev_private;
4322 unsigned int gpu_freq;
4323 unsigned int max_ia_freq, min_ring_freq;
4324 int scaling_factor = 180;
4325 struct cpufreq_policy *policy;
4327 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4329 policy = cpufreq_cpu_get(0);
4331 max_ia_freq = policy->cpuinfo.max_freq;
4332 cpufreq_cpu_put(policy);
4335 * Default to measured freq if none found, PCU will ensure we
4338 max_ia_freq = tsc_khz;
4341 /* Convert from kHz to MHz */
4342 max_ia_freq /= 1000;
4344 min_ring_freq = I915_READ(DCLK) & 0xf;
4345 /* convert DDR frequency from units of 266.6MHz to bandwidth */
4346 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
4349 * For each potential GPU frequency, load a ring frequency we'd like
4350 * to use for memory access. We do this by specifying the IA frequency
4351 * the PCU should use as a reference to determine the ring frequency.
4353 for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
4355 int diff = dev_priv->rps.max_freq - gpu_freq;
4356 unsigned int ia_freq = 0, ring_freq = 0;
4358 if (INTEL_INFO(dev)->gen >= 8) {
4359 /* max(2 * GT, DDR). NB: GT is 50MHz units */
4360 ring_freq = max(min_ring_freq, gpu_freq);
4361 } else if (IS_HASWELL(dev)) {
4362 ring_freq = mult_frac(gpu_freq, 5, 4);
4363 ring_freq = max(min_ring_freq, ring_freq);
4364 /* leave ia_freq as the default, chosen by cpufreq */
4366 /* On older processors, there is no separate ring
4367 * clock domain, so in order to boost the bandwidth
4368 * of the ring, we need to upclock the CPU (ia_freq).
4370 * For GPU frequencies less than 750MHz,
4371 * just use the lowest ring freq.
4373 if (gpu_freq < min_freq)
4376 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
4377 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
4380 sandybridge_pcode_write(dev_priv,
4381 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
4382 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
4383 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
4388 void gen6_update_ring_freq(struct drm_device *dev)
4390 struct drm_i915_private *dev_priv = dev->dev_private;
4392 if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
4395 mutex_lock(&dev_priv->rps.hw_lock);
4396 __gen6_update_ring_freq(dev);
4397 mutex_unlock(&dev_priv->rps.hw_lock);
4400 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
4402 struct drm_device *dev = dev_priv->dev;
4405 if (dev->pdev->revision >= 0x20) {
4406 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
4408 switch (INTEL_INFO(dev)->eu_total) {
4410 /* (2 * 4) config */
4411 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
4414 /* (2 * 6) config */
4415 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
4418 /* (2 * 8) config */
4420 /* Setting (2 * 8) Min RP0 for any other combination */
4421 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
4424 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
4426 /* For pre-production hardware */
4427 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
4428 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
4429 PUNIT_GPU_STATUS_MAX_FREQ_MASK;
4434 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
4438 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
4439 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
4444 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
4446 struct drm_device *dev = dev_priv->dev;
4449 if (dev->pdev->revision >= 0x20) {
4450 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
4451 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
4453 /* For pre-production hardware */
4454 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4455 rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
4456 PUNIT_GPU_STATUS_MAX_FREQ_MASK);
4461 static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
4463 struct drm_device *dev = dev_priv->dev;
4466 if (dev->pdev->revision >= 0x20) {
4467 val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
4468 rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
4469 FB_GFX_FREQ_FUSE_MASK);
4470 } else { /* For pre-production hardware */
4471 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
4472 rpn = ((val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) &
4473 PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK);
4479 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
4483 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
4485 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
4490 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
4494 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
4496 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
4498 rp0 = min_t(u32, rp0, 0xea);
4503 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
4507 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
4508 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
4509 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
4510 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
4515 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
4517 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
4520 /* Check that the pctx buffer wasn't move under us. */
4521 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
4523 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
4525 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
4526 dev_priv->vlv_pctx->stolen->start);
4530 /* Check that the pcbr address is not empty. */
4531 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
4533 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
4535 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
4538 static void cherryview_setup_pctx(struct drm_device *dev)
4540 struct drm_i915_private *dev_priv = dev->dev_private;
4541 unsigned long pctx_paddr, paddr;
4542 struct i915_gtt *gtt = &dev_priv->gtt;
4544 int pctx_size = 32*1024;
4546 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4548 pcbr = I915_READ(VLV_PCBR);
4549 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
4550 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
4551 paddr = (dev_priv->mm.stolen_base +
4552 (gtt->stolen_size - pctx_size));
4554 pctx_paddr = (paddr & (~4095));
4555 I915_WRITE(VLV_PCBR, pctx_paddr);
4558 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
4561 static void valleyview_setup_pctx(struct drm_device *dev)
4563 struct drm_i915_private *dev_priv = dev->dev_private;
4564 struct drm_i915_gem_object *pctx;
4565 unsigned long pctx_paddr;
4567 int pctx_size = 24*1024;
4569 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4571 pcbr = I915_READ(VLV_PCBR);
4573 /* BIOS set it up already, grab the pre-alloc'd space */
4576 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
4577 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
4579 I915_GTT_OFFSET_NONE,
4584 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
4587 * From the Gunit register HAS:
4588 * The Gfx driver is expected to program this register and ensure
4589 * proper allocation within Gfx stolen memory. For example, this
4590 * register should be programmed such than the PCBR range does not
4591 * overlap with other ranges, such as the frame buffer, protected
4592 * memory, or any other relevant ranges.
4594 pctx = i915_gem_object_create_stolen(dev, pctx_size);
4596 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
4600 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
4601 I915_WRITE(VLV_PCBR, pctx_paddr);
4604 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
4605 dev_priv->vlv_pctx = pctx;
4608 static void valleyview_cleanup_pctx(struct drm_device *dev)
4610 struct drm_i915_private *dev_priv = dev->dev_private;
4612 if (WARN_ON(!dev_priv->vlv_pctx))
4615 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
4616 dev_priv->vlv_pctx = NULL;
4619 static void valleyview_init_gt_powersave(struct drm_device *dev)
4621 struct drm_i915_private *dev_priv = dev->dev_private;
4624 valleyview_setup_pctx(dev);
4626 mutex_lock(&dev_priv->rps.hw_lock);
4628 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4629 switch ((val >> 6) & 3) {
4632 dev_priv->mem_freq = 800;
4635 dev_priv->mem_freq = 1066;
4638 dev_priv->mem_freq = 1333;
4641 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
4643 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
4644 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4645 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4646 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4647 dev_priv->rps.max_freq);
4649 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
4650 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4651 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4652 dev_priv->rps.efficient_freq);
4654 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
4655 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
4656 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4657 dev_priv->rps.rp1_freq);
4659 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
4660 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4661 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4662 dev_priv->rps.min_freq);
4664 /* Preserve min/max settings in case of re-init */
4665 if (dev_priv->rps.max_freq_softlimit == 0)
4666 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4668 if (dev_priv->rps.min_freq_softlimit == 0)
4669 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4671 mutex_unlock(&dev_priv->rps.hw_lock);
4674 static void cherryview_init_gt_powersave(struct drm_device *dev)
4676 struct drm_i915_private *dev_priv = dev->dev_private;
4679 cherryview_setup_pctx(dev);
4681 mutex_lock(&dev_priv->rps.hw_lock);
4683 mutex_lock(&dev_priv->dpio_lock);
4684 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
4685 mutex_unlock(&dev_priv->dpio_lock);
4687 switch ((val >> 2) & 0x7) {
4690 dev_priv->rps.cz_freq = 200;
4691 dev_priv->mem_freq = 1600;
4694 dev_priv->rps.cz_freq = 267;
4695 dev_priv->mem_freq = 1600;
4698 dev_priv->rps.cz_freq = 333;
4699 dev_priv->mem_freq = 2000;
4702 dev_priv->rps.cz_freq = 320;
4703 dev_priv->mem_freq = 1600;
4706 dev_priv->rps.cz_freq = 400;
4707 dev_priv->mem_freq = 1600;
4710 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
4712 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
4713 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4714 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4715 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4716 dev_priv->rps.max_freq);
4718 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
4719 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4720 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4721 dev_priv->rps.efficient_freq);
4723 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
4724 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
4725 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4726 dev_priv->rps.rp1_freq);
4728 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
4729 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4730 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4731 dev_priv->rps.min_freq);
4733 WARN_ONCE((dev_priv->rps.max_freq |
4734 dev_priv->rps.efficient_freq |
4735 dev_priv->rps.rp1_freq |
4736 dev_priv->rps.min_freq) & 1,
4737 "Odd GPU freq values\n");
4739 /* Preserve min/max settings in case of re-init */
4740 if (dev_priv->rps.max_freq_softlimit == 0)
4741 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4743 if (dev_priv->rps.min_freq_softlimit == 0)
4744 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4746 mutex_unlock(&dev_priv->rps.hw_lock);
4749 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
4751 valleyview_cleanup_pctx(dev);
4754 static void cherryview_enable_rps(struct drm_device *dev)
4756 struct drm_i915_private *dev_priv = dev->dev_private;
4757 struct intel_engine_cs *ring;
4758 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
4761 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4763 gtfifodbg = I915_READ(GTFIFODBG);
4765 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4767 I915_WRITE(GTFIFODBG, gtfifodbg);
4770 cherryview_check_pctx(dev_priv);
4772 /* 1a & 1b: Get forcewake during program sequence. Although the driver
4773 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4774 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4776 /* Disable RC states. */
4777 I915_WRITE(GEN6_RC_CONTROL, 0);
4779 /* 2a: Program RC6 thresholds.*/
4780 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4781 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4782 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4784 for_each_ring(ring, dev_priv, i)
4785 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4786 I915_WRITE(GEN6_RC_SLEEP, 0);
4788 /* TO threshold set to 1750 us ( 0x557 * 1.28 us) */
4789 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
4791 /* allows RC6 residency counter to work */
4792 I915_WRITE(VLV_COUNTER_CONTROL,
4793 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
4794 VLV_MEDIA_RC6_COUNT_EN |
4795 VLV_RENDER_RC6_COUNT_EN));
4797 /* For now we assume BIOS is allocating and populating the PCBR */
4798 pcbr = I915_READ(VLV_PCBR);
4801 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
4802 (pcbr >> VLV_PCBR_ADDR_SHIFT))
4803 rc6_mode = GEN7_RC_CTL_TO_MODE;
4805 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4807 /* 4 Program defaults and thresholds for RPS*/
4808 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
4809 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4810 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4811 I915_WRITE(GEN6_RP_UP_EI, 66000);
4812 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4814 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4817 I915_WRITE(GEN6_RP_CONTROL,
4818 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4819 GEN6_RP_MEDIA_IS_GFX |
4821 GEN6_RP_UP_BUSY_AVG |
4822 GEN6_RP_DOWN_IDLE_AVG);
4824 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4826 /* RPS code assumes GPLL is used */
4827 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
4829 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
4830 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4832 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4833 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4834 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4835 dev_priv->rps.cur_freq);
4837 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4838 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4839 dev_priv->rps.efficient_freq);
4841 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4843 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4846 static void valleyview_enable_rps(struct drm_device *dev)
4848 struct drm_i915_private *dev_priv = dev->dev_private;
4849 struct intel_engine_cs *ring;
4850 u32 gtfifodbg, val, rc6_mode = 0;
4853 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4855 valleyview_check_pctx(dev_priv);
4857 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4858 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4860 I915_WRITE(GTFIFODBG, gtfifodbg);
4863 /* If VLV, Forcewake all wells, else re-direct to regular path */
4864 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4866 /* Disable RC states. */
4867 I915_WRITE(GEN6_RC_CONTROL, 0);
4869 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
4870 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4871 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4872 I915_WRITE(GEN6_RP_UP_EI, 66000);
4873 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4875 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4877 I915_WRITE(GEN6_RP_CONTROL,
4878 GEN6_RP_MEDIA_TURBO |
4879 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4880 GEN6_RP_MEDIA_IS_GFX |
4882 GEN6_RP_UP_BUSY_AVG |
4883 GEN6_RP_DOWN_IDLE_CONT);
4885 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
4886 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4887 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4889 for_each_ring(ring, dev_priv, i)
4890 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4892 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
4894 /* allows RC6 residency counter to work */
4895 I915_WRITE(VLV_COUNTER_CONTROL,
4896 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
4897 VLV_RENDER_RC0_COUNT_EN |
4898 VLV_MEDIA_RC6_COUNT_EN |
4899 VLV_RENDER_RC6_COUNT_EN));
4901 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4902 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
4904 intel_print_rc6_info(dev, rc6_mode);
4906 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4908 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4910 /* RPS code assumes GPLL is used */
4911 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
4913 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
4914 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4916 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4917 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4918 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4919 dev_priv->rps.cur_freq);
4921 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4922 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4923 dev_priv->rps.efficient_freq);
4925 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4927 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4930 void ironlake_teardown_rc6(struct drm_device *dev)
4932 struct drm_i915_private *dev_priv = dev->dev_private;
4934 if (dev_priv->ips.renderctx) {
4935 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
4936 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
4937 dev_priv->ips.renderctx = NULL;
4940 if (dev_priv->ips.pwrctx) {
4941 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
4942 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
4943 dev_priv->ips.pwrctx = NULL;
4947 static void ironlake_disable_rc6(struct drm_device *dev)
4949 struct drm_i915_private *dev_priv = dev->dev_private;
4951 if (I915_READ(PWRCTXA)) {
4952 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
4953 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
4954 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
4957 I915_WRITE(PWRCTXA, 0);
4958 POSTING_READ(PWRCTXA);
4960 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4961 POSTING_READ(RSTDBYCTL);
4965 static int ironlake_setup_rc6(struct drm_device *dev)
4967 struct drm_i915_private *dev_priv = dev->dev_private;
4969 if (dev_priv->ips.renderctx == NULL)
4970 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
4971 if (!dev_priv->ips.renderctx)
4974 if (dev_priv->ips.pwrctx == NULL)
4975 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
4976 if (!dev_priv->ips.pwrctx) {
4977 ironlake_teardown_rc6(dev);
4984 static void ironlake_enable_rc6(struct drm_device *dev)
4986 struct drm_i915_private *dev_priv = dev->dev_private;
4987 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
4988 bool was_interruptible;
4991 /* rc6 disabled by default due to repeated reports of hanging during
4994 if (!intel_enable_rc6(dev))
4997 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4999 ret = ironlake_setup_rc6(dev);
5003 was_interruptible = dev_priv->mm.interruptible;
5004 dev_priv->mm.interruptible = false;
5007 * GPU can automatically power down the render unit if given a page
5010 ret = intel_ring_begin(ring, 6);
5012 ironlake_teardown_rc6(dev);
5013 dev_priv->mm.interruptible = was_interruptible;
5017 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
5018 intel_ring_emit(ring, MI_SET_CONTEXT);
5019 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
5021 MI_SAVE_EXT_STATE_EN |
5022 MI_RESTORE_EXT_STATE_EN |
5023 MI_RESTORE_INHIBIT);
5024 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
5025 intel_ring_emit(ring, MI_NOOP);
5026 intel_ring_emit(ring, MI_FLUSH);
5027 intel_ring_advance(ring);
5030 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
5031 * does an implicit flush, combined with MI_FLUSH above, it should be
5032 * safe to assume that renderctx is valid
5034 ret = intel_ring_idle(ring);
5035 dev_priv->mm.interruptible = was_interruptible;
5037 DRM_ERROR("failed to enable ironlake power savings\n");
5038 ironlake_teardown_rc6(dev);
5042 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
5043 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
5045 intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
5048 static unsigned long intel_pxfreq(u32 vidfreq)
5051 int div = (vidfreq & 0x3f0000) >> 16;
5052 int post = (vidfreq & 0x3000) >> 12;
5053 int pre = (vidfreq & 0x7);
5058 freq = ((div * 133333) / ((1<<post) * pre));
5063 static const struct cparams {
5069 { 1, 1333, 301, 28664 },
5070 { 1, 1066, 294, 24460 },
5071 { 1, 800, 294, 25192 },
5072 { 0, 1333, 276, 27605 },
5073 { 0, 1066, 276, 27605 },
5074 { 0, 800, 231, 23784 },
5077 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
5079 u64 total_count, diff, ret;
5080 u32 count1, count2, count3, m = 0, c = 0;
5081 unsigned long now = jiffies_to_msecs(jiffies), diff1;
5084 assert_spin_locked(&mchdev_lock);
5086 diff1 = now - dev_priv->ips.last_time1;
5088 /* Prevent division-by-zero if we are asking too fast.
5089 * Also, we don't get interesting results if we are polling
5090 * faster than once in 10ms, so just return the saved value
5094 return dev_priv->ips.chipset_power;
5096 count1 = I915_READ(DMIEC);
5097 count2 = I915_READ(DDREC);
5098 count3 = I915_READ(CSIEC);
5100 total_count = count1 + count2 + count3;
5102 /* FIXME: handle per-counter overflow */
5103 if (total_count < dev_priv->ips.last_count1) {
5104 diff = ~0UL - dev_priv->ips.last_count1;
5105 diff += total_count;
5107 diff = total_count - dev_priv->ips.last_count1;
5110 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
5111 if (cparams[i].i == dev_priv->ips.c_m &&
5112 cparams[i].t == dev_priv->ips.r_t) {
5119 diff = div_u64(diff, diff1);
5120 ret = ((m * diff) + c);
5121 ret = div_u64(ret, 10);
5123 dev_priv->ips.last_count1 = total_count;
5124 dev_priv->ips.last_time1 = now;
5126 dev_priv->ips.chipset_power = ret;
5131 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5133 struct drm_device *dev = dev_priv->dev;
5136 if (INTEL_INFO(dev)->gen != 5)
5139 spin_lock_irq(&mchdev_lock);
5141 val = __i915_chipset_val(dev_priv);
5143 spin_unlock_irq(&mchdev_lock);
5148 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
5150 unsigned long m, x, b;
5153 tsfs = I915_READ(TSFS);
5155 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
5156 x = I915_READ8(TR1);
5158 b = tsfs & TSFS_INTR_MASK;
5160 return ((m * x) / 127) - b;
5163 static int _pxvid_to_vd(u8 pxvid)
5168 if (pxvid >= 8 && pxvid < 31)
5171 return (pxvid + 2) * 125;
5174 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
5176 struct drm_device *dev = dev_priv->dev;
5177 const int vd = _pxvid_to_vd(pxvid);
5178 const int vm = vd - 1125;
5180 if (INTEL_INFO(dev)->is_mobile)
5181 return vm > 0 ? vm : 0;
5186 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
5188 u64 now, diff, diffms;
5191 assert_spin_locked(&mchdev_lock);
5193 now = ktime_get_raw_ns();
5194 diffms = now - dev_priv->ips.last_time2;
5195 do_div(diffms, NSEC_PER_MSEC);
5197 /* Don't divide by 0 */
5201 count = I915_READ(GFXEC);
5203 if (count < dev_priv->ips.last_count2) {
5204 diff = ~0UL - dev_priv->ips.last_count2;
5207 diff = count - dev_priv->ips.last_count2;
5210 dev_priv->ips.last_count2 = count;
5211 dev_priv->ips.last_time2 = now;
5213 /* More magic constants... */
5215 diff = div_u64(diff, diffms * 10);
5216 dev_priv->ips.gfx_power = diff;
5219 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5221 struct drm_device *dev = dev_priv->dev;
5223 if (INTEL_INFO(dev)->gen != 5)
5226 spin_lock_irq(&mchdev_lock);
5228 __i915_update_gfx_val(dev_priv);
5230 spin_unlock_irq(&mchdev_lock);
5233 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
5235 unsigned long t, corr, state1, corr2, state2;
5238 assert_spin_locked(&mchdev_lock);
5240 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
5241 pxvid = (pxvid >> 24) & 0x7f;
5242 ext_v = pvid_to_extvid(dev_priv, pxvid);
5246 t = i915_mch_val(dev_priv);
5248 /* Revel in the empirically derived constants */
5250 /* Correction factor in 1/100000 units */
5252 corr = ((t * 2349) + 135940);
5254 corr = ((t * 964) + 29317);
5256 corr = ((t * 301) + 1004);
5258 corr = corr * ((150142 * state1) / 10000 - 78642);
5260 corr2 = (corr * dev_priv->ips.corr);
5262 state2 = (corr2 * state1) / 10000;
5263 state2 /= 100; /* convert to mW */
5265 __i915_update_gfx_val(dev_priv);
5267 return dev_priv->ips.gfx_power + state2;
5270 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5272 struct drm_device *dev = dev_priv->dev;
5275 if (INTEL_INFO(dev)->gen != 5)
5278 spin_lock_irq(&mchdev_lock);
5280 val = __i915_gfx_val(dev_priv);
5282 spin_unlock_irq(&mchdev_lock);
5288 * i915_read_mch_val - return value for IPS use
5290 * Calculate and return a value for the IPS driver to use when deciding whether
5291 * we have thermal and power headroom to increase CPU or GPU power budget.
5293 unsigned long i915_read_mch_val(void)
5295 struct drm_i915_private *dev_priv;
5296 unsigned long chipset_val, graphics_val, ret = 0;
5298 spin_lock_irq(&mchdev_lock);
5301 dev_priv = i915_mch_dev;
5303 chipset_val = __i915_chipset_val(dev_priv);
5304 graphics_val = __i915_gfx_val(dev_priv);
5306 ret = chipset_val + graphics_val;
5309 spin_unlock_irq(&mchdev_lock);
5313 EXPORT_SYMBOL_GPL(i915_read_mch_val);
5316 * i915_gpu_raise - raise GPU frequency limit
5318 * Raise the limit; IPS indicates we have thermal headroom.
5320 bool i915_gpu_raise(void)
5322 struct drm_i915_private *dev_priv;
5325 spin_lock_irq(&mchdev_lock);
5326 if (!i915_mch_dev) {
5330 dev_priv = i915_mch_dev;
5332 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
5333 dev_priv->ips.max_delay--;
5336 spin_unlock_irq(&mchdev_lock);
5340 EXPORT_SYMBOL_GPL(i915_gpu_raise);
5343 * i915_gpu_lower - lower GPU frequency limit
5345 * IPS indicates we're close to a thermal limit, so throttle back the GPU
5346 * frequency maximum.
5348 bool i915_gpu_lower(void)
5350 struct drm_i915_private *dev_priv;
5353 spin_lock_irq(&mchdev_lock);
5354 if (!i915_mch_dev) {
5358 dev_priv = i915_mch_dev;
5360 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
5361 dev_priv->ips.max_delay++;
5364 spin_unlock_irq(&mchdev_lock);
5368 EXPORT_SYMBOL_GPL(i915_gpu_lower);
5371 * i915_gpu_busy - indicate GPU business to IPS
5373 * Tell the IPS driver whether or not the GPU is busy.
5375 bool i915_gpu_busy(void)
5377 struct drm_i915_private *dev_priv;
5378 struct intel_engine_cs *ring;
5382 spin_lock_irq(&mchdev_lock);
5385 dev_priv = i915_mch_dev;
5387 for_each_ring(ring, dev_priv, i)
5388 ret |= !list_empty(&ring->request_list);
5391 spin_unlock_irq(&mchdev_lock);
5395 EXPORT_SYMBOL_GPL(i915_gpu_busy);
5398 * i915_gpu_turbo_disable - disable graphics turbo
5400 * Disable graphics turbo by resetting the max frequency and setting the
5401 * current frequency to the default.
5403 bool i915_gpu_turbo_disable(void)
5405 struct drm_i915_private *dev_priv;
5408 spin_lock_irq(&mchdev_lock);
5409 if (!i915_mch_dev) {
5413 dev_priv = i915_mch_dev;
5415 dev_priv->ips.max_delay = dev_priv->ips.fstart;
5417 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
5421 spin_unlock_irq(&mchdev_lock);
5425 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
5428 * Tells the intel_ips driver that the i915 driver is now loaded, if
5429 * IPS got loaded first.
5431 * This awkward dance is so that neither module has to depend on the
5432 * other in order for IPS to do the appropriate communication of
5433 * GPU turbo limits to i915.
5436 ips_ping_for_i915_load(void)
5440 link = symbol_get(ips_link_to_i915_driver);
5443 symbol_put(ips_link_to_i915_driver);
5447 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
5449 /* We only register the i915 ips part with intel-ips once everything is
5450 * set up, to avoid intel-ips sneaking in and reading bogus values. */
5451 spin_lock_irq(&mchdev_lock);
5452 i915_mch_dev = dev_priv;
5453 spin_unlock_irq(&mchdev_lock);
5455 ips_ping_for_i915_load();
5458 void intel_gpu_ips_teardown(void)
5460 spin_lock_irq(&mchdev_lock);
5461 i915_mch_dev = NULL;
5462 spin_unlock_irq(&mchdev_lock);
5465 static void intel_init_emon(struct drm_device *dev)
5467 struct drm_i915_private *dev_priv = dev->dev_private;
5472 /* Disable to program */
5476 /* Program energy weights for various events */
5477 I915_WRITE(SDEW, 0x15040d00);
5478 I915_WRITE(CSIEW0, 0x007f0000);
5479 I915_WRITE(CSIEW1, 0x1e220004);
5480 I915_WRITE(CSIEW2, 0x04000004);
5482 for (i = 0; i < 5; i++)
5483 I915_WRITE(PEW + (i * 4), 0);
5484 for (i = 0; i < 3; i++)
5485 I915_WRITE(DEW + (i * 4), 0);
5487 /* Program P-state weights to account for frequency power adjustment */
5488 for (i = 0; i < 16; i++) {
5489 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
5490 unsigned long freq = intel_pxfreq(pxvidfreq);
5491 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
5496 val *= (freq / 1000);
5498 val /= (127*127*900);
5500 DRM_ERROR("bad pxval: %ld\n", val);
5503 /* Render standby states get 0 weight */
5507 for (i = 0; i < 4; i++) {
5508 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
5509 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
5510 I915_WRITE(PXW + (i * 4), val);
5513 /* Adjust magic regs to magic values (more experimental results) */
5514 I915_WRITE(OGW0, 0);
5515 I915_WRITE(OGW1, 0);
5516 I915_WRITE(EG0, 0x00007f00);
5517 I915_WRITE(EG1, 0x0000000e);
5518 I915_WRITE(EG2, 0x000e0000);
5519 I915_WRITE(EG3, 0x68000300);
5520 I915_WRITE(EG4, 0x42000000);
5521 I915_WRITE(EG5, 0x00140031);
5525 for (i = 0; i < 8; i++)
5526 I915_WRITE(PXWL + (i * 4), 0);
5528 /* Enable PMON + select events */
5529 I915_WRITE(ECR, 0x80000019);
5531 lcfuse = I915_READ(LCFUSE02);
5533 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
5536 void intel_init_gt_powersave(struct drm_device *dev)
5538 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
5540 if (IS_CHERRYVIEW(dev))
5541 cherryview_init_gt_powersave(dev);
5542 else if (IS_VALLEYVIEW(dev))
5543 valleyview_init_gt_powersave(dev);
5546 void intel_cleanup_gt_powersave(struct drm_device *dev)
5548 if (IS_CHERRYVIEW(dev))
5550 else if (IS_VALLEYVIEW(dev))
5551 valleyview_cleanup_gt_powersave(dev);
5554 static void gen6_suspend_rps(struct drm_device *dev)
5556 struct drm_i915_private *dev_priv = dev->dev_private;
5558 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
5561 * TODO: disable RPS interrupts on GEN9+ too once RPS support
5564 if (INTEL_INFO(dev)->gen < 9)
5565 gen6_disable_rps_interrupts(dev);
5569 * intel_suspend_gt_powersave - suspend PM work and helper threads
5572 * We don't want to disable RC6 or other features here, we just want
5573 * to make sure any work we've queued has finished and won't bother
5574 * us while we're suspended.
5576 void intel_suspend_gt_powersave(struct drm_device *dev)
5578 struct drm_i915_private *dev_priv = dev->dev_private;
5580 if (INTEL_INFO(dev)->gen < 6)
5583 gen6_suspend_rps(dev);
5585 /* Force GPU to min freq during suspend */
5586 gen6_rps_idle(dev_priv);
5589 void intel_disable_gt_powersave(struct drm_device *dev)
5591 struct drm_i915_private *dev_priv = dev->dev_private;
5593 if (IS_IRONLAKE_M(dev)) {
5594 ironlake_disable_drps(dev);
5595 ironlake_disable_rc6(dev);
5596 } else if (INTEL_INFO(dev)->gen >= 6) {
5597 intel_suspend_gt_powersave(dev);
5599 mutex_lock(&dev_priv->rps.hw_lock);
5600 if (INTEL_INFO(dev)->gen >= 9)
5601 gen9_disable_rps(dev);
5602 else if (IS_CHERRYVIEW(dev))
5603 cherryview_disable_rps(dev);
5604 else if (IS_VALLEYVIEW(dev))
5605 valleyview_disable_rps(dev);
5607 gen6_disable_rps(dev);
5609 dev_priv->rps.enabled = false;
5610 mutex_unlock(&dev_priv->rps.hw_lock);
5614 static void intel_gen6_powersave_work(struct work_struct *work)
5616 struct drm_i915_private *dev_priv =
5617 container_of(work, struct drm_i915_private,
5618 rps.delayed_resume_work.work);
5619 struct drm_device *dev = dev_priv->dev;
5621 mutex_lock(&dev_priv->rps.hw_lock);
5624 * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
5627 if (INTEL_INFO(dev)->gen < 9)
5628 gen6_reset_rps_interrupts(dev);
5630 if (IS_CHERRYVIEW(dev)) {
5631 cherryview_enable_rps(dev);
5632 } else if (IS_VALLEYVIEW(dev)) {
5633 valleyview_enable_rps(dev);
5634 } else if (INTEL_INFO(dev)->gen >= 9) {
5635 gen9_enable_rc6(dev);
5636 gen9_enable_rps(dev);
5637 __gen6_update_ring_freq(dev);
5638 } else if (IS_BROADWELL(dev)) {
5639 gen8_enable_rps(dev);
5640 __gen6_update_ring_freq(dev);
5642 gen6_enable_rps(dev);
5643 __gen6_update_ring_freq(dev);
5645 dev_priv->rps.enabled = true;
5647 if (INTEL_INFO(dev)->gen < 9)
5648 gen6_enable_rps_interrupts(dev);
5650 mutex_unlock(&dev_priv->rps.hw_lock);
5652 intel_runtime_pm_put(dev_priv);
5655 void intel_enable_gt_powersave(struct drm_device *dev)
5657 struct drm_i915_private *dev_priv = dev->dev_private;
5659 /* Powersaving is controlled by the host when inside a VM */
5660 if (intel_vgpu_active(dev))
5663 if (IS_IRONLAKE_M(dev)) {
5664 mutex_lock(&dev->struct_mutex);
5665 ironlake_enable_drps(dev);
5666 ironlake_enable_rc6(dev);
5667 intel_init_emon(dev);
5668 mutex_unlock(&dev->struct_mutex);
5669 } else if (INTEL_INFO(dev)->gen >= 6) {
5671 * PCU communication is slow and this doesn't need to be
5672 * done at any specific time, so do this out of our fast path
5673 * to make resume and init faster.
5675 * We depend on the HW RC6 power context save/restore
5676 * mechanism when entering D3 through runtime PM suspend. So
5677 * disable RPM until RPS/RC6 is properly setup. We can only
5678 * get here via the driver load/system resume/runtime resume
5679 * paths, so the _noresume version is enough (and in case of
5680 * runtime resume it's necessary).
5682 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
5683 round_jiffies_up_relative(HZ)))
5684 intel_runtime_pm_get_noresume(dev_priv);
5688 void intel_reset_gt_powersave(struct drm_device *dev)
5690 struct drm_i915_private *dev_priv = dev->dev_private;
5692 if (INTEL_INFO(dev)->gen < 6)
5695 gen6_suspend_rps(dev);
5696 dev_priv->rps.enabled = false;
5699 static void ibx_init_clock_gating(struct drm_device *dev)
5701 struct drm_i915_private *dev_priv = dev->dev_private;
5704 * On Ibex Peak and Cougar Point, we need to disable clock
5705 * gating for the panel power sequencer or it will fail to
5706 * start up when no ports are active.
5708 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
5711 static void g4x_disable_trickle_feed(struct drm_device *dev)
5713 struct drm_i915_private *dev_priv = dev->dev_private;
5716 for_each_pipe(dev_priv, pipe) {
5717 I915_WRITE(DSPCNTR(pipe),
5718 I915_READ(DSPCNTR(pipe)) |
5719 DISPPLANE_TRICKLE_FEED_DISABLE);
5720 intel_flush_primary_plane(dev_priv, pipe);
5724 static void ilk_init_lp_watermarks(struct drm_device *dev)
5726 struct drm_i915_private *dev_priv = dev->dev_private;
5728 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
5729 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
5730 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
5733 * Don't touch WM1S_LP_EN here.
5734 * Doing so could cause underruns.
5738 static void ironlake_init_clock_gating(struct drm_device *dev)
5740 struct drm_i915_private *dev_priv = dev->dev_private;
5741 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5745 * WaFbcDisableDpfcClockGating:ilk
5747 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
5748 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
5749 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
5751 I915_WRITE(PCH_3DCGDIS0,
5752 MARIUNIT_CLOCK_GATE_DISABLE |
5753 SVSMUNIT_CLOCK_GATE_DISABLE);
5754 I915_WRITE(PCH_3DCGDIS1,
5755 VFMUNIT_CLOCK_GATE_DISABLE);
5758 * According to the spec the following bits should be set in
5759 * order to enable memory self-refresh
5760 * The bit 22/21 of 0x42004
5761 * The bit 5 of 0x42020
5762 * The bit 15 of 0x45000
5764 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5765 (I915_READ(ILK_DISPLAY_CHICKEN2) |
5766 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
5767 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
5768 I915_WRITE(DISP_ARB_CTL,
5769 (I915_READ(DISP_ARB_CTL) |
5772 ilk_init_lp_watermarks(dev);
5775 * Based on the document from hardware guys the following bits
5776 * should be set unconditionally in order to enable FBC.
5777 * The bit 22 of 0x42000
5778 * The bit 22 of 0x42004
5779 * The bit 7,8,9 of 0x42020.
5781 if (IS_IRONLAKE_M(dev)) {
5782 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
5783 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5784 I915_READ(ILK_DISPLAY_CHICKEN1) |
5786 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5787 I915_READ(ILK_DISPLAY_CHICKEN2) |
5791 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5793 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5794 I915_READ(ILK_DISPLAY_CHICKEN2) |
5795 ILK_ELPIN_409_SELECT);
5796 I915_WRITE(_3D_CHICKEN2,
5797 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
5798 _3D_CHICKEN2_WM_READ_PIPELINED);
5800 /* WaDisableRenderCachePipelinedFlush:ilk */
5801 I915_WRITE(CACHE_MODE_0,
5802 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5804 /* WaDisable_RenderCache_OperationalFlush:ilk */
5805 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5807 g4x_disable_trickle_feed(dev);
5809 ibx_init_clock_gating(dev);
5812 static void cpt_init_clock_gating(struct drm_device *dev)
5814 struct drm_i915_private *dev_priv = dev->dev_private;
5819 * On Ibex Peak and Cougar Point, we need to disable clock
5820 * gating for the panel power sequencer or it will fail to
5821 * start up when no ports are active.
5823 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
5824 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
5825 PCH_CPUNIT_CLOCK_GATE_DISABLE);
5826 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
5827 DPLS_EDP_PPS_FIX_DIS);
5828 /* The below fixes the weird display corruption, a few pixels shifted
5829 * downward, on (only) LVDS of some HP laptops with IVY.
5831 for_each_pipe(dev_priv, pipe) {
5832 val = I915_READ(TRANS_CHICKEN2(pipe));
5833 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
5834 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5835 if (dev_priv->vbt.fdi_rx_polarity_inverted)
5836 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5837 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
5838 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
5839 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
5840 I915_WRITE(TRANS_CHICKEN2(pipe), val);
5842 /* WADP0ClockGatingDisable */
5843 for_each_pipe(dev_priv, pipe) {
5844 I915_WRITE(TRANS_CHICKEN1(pipe),
5845 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5849 static void gen6_check_mch_setup(struct drm_device *dev)
5851 struct drm_i915_private *dev_priv = dev->dev_private;
5854 tmp = I915_READ(MCH_SSKPD);
5855 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
5856 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
5860 static void gen6_init_clock_gating(struct drm_device *dev)
5862 struct drm_i915_private *dev_priv = dev->dev_private;
5863 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5865 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5867 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5868 I915_READ(ILK_DISPLAY_CHICKEN2) |
5869 ILK_ELPIN_409_SELECT);
5871 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
5872 I915_WRITE(_3D_CHICKEN,
5873 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
5875 /* WaDisable_RenderCache_OperationalFlush:snb */
5876 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5879 * BSpec recoomends 8x4 when MSAA is used,
5880 * however in practice 16x4 seems fastest.
5882 * Note that PS/WM thread counts depend on the WIZ hashing
5883 * disable bit, which we don't touch here, but it's good
5884 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5886 I915_WRITE(GEN6_GT_MODE,
5887 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
5889 ilk_init_lp_watermarks(dev);
5891 I915_WRITE(CACHE_MODE_0,
5892 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
5894 I915_WRITE(GEN6_UCGCTL1,
5895 I915_READ(GEN6_UCGCTL1) |
5896 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
5897 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5899 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5900 * gating disable must be set. Failure to set it results in
5901 * flickering pixels due to Z write ordering failures after
5902 * some amount of runtime in the Mesa "fire" demo, and Unigine
5903 * Sanctuary and Tropics, and apparently anything else with
5904 * alpha test or pixel discard.
5906 * According to the spec, bit 11 (RCCUNIT) must also be set,
5907 * but we didn't debug actual testcases to find it out.
5909 * WaDisableRCCUnitClockGating:snb
5910 * WaDisableRCPBUnitClockGating:snb
5912 I915_WRITE(GEN6_UCGCTL2,
5913 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
5914 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5916 /* WaStripsFansDisableFastClipPerformanceFix:snb */
5917 I915_WRITE(_3D_CHICKEN3,
5918 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
5922 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
5923 * 3DSTATE_SF number of SF output attributes is more than 16."
5925 I915_WRITE(_3D_CHICKEN3,
5926 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
5929 * According to the spec the following bits should be
5930 * set in order to enable memory self-refresh and fbc:
5931 * The bit21 and bit22 of 0x42000
5932 * The bit21 and bit22 of 0x42004
5933 * The bit5 and bit7 of 0x42020
5934 * The bit14 of 0x70180
5935 * The bit14 of 0x71180
5937 * WaFbcAsynchFlipDisableFbcQueue:snb
5939 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5940 I915_READ(ILK_DISPLAY_CHICKEN1) |
5941 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
5942 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5943 I915_READ(ILK_DISPLAY_CHICKEN2) |
5944 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
5945 I915_WRITE(ILK_DSPCLK_GATE_D,
5946 I915_READ(ILK_DSPCLK_GATE_D) |
5947 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
5948 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
5950 g4x_disable_trickle_feed(dev);
5952 cpt_init_clock_gating(dev);
5954 gen6_check_mch_setup(dev);
5957 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
5959 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
5962 * WaVSThreadDispatchOverride:ivb,vlv
5964 * This actually overrides the dispatch
5965 * mode for all thread types.
5967 reg &= ~GEN7_FF_SCHED_MASK;
5968 reg |= GEN7_FF_TS_SCHED_HW;
5969 reg |= GEN7_FF_VS_SCHED_HW;
5970 reg |= GEN7_FF_DS_SCHED_HW;
5972 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
5975 static void lpt_init_clock_gating(struct drm_device *dev)
5977 struct drm_i915_private *dev_priv = dev->dev_private;
5980 * TODO: this bit should only be enabled when really needed, then
5981 * disabled when not needed anymore in order to save power.
5983 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
5984 I915_WRITE(SOUTH_DSPCLK_GATE_D,
5985 I915_READ(SOUTH_DSPCLK_GATE_D) |
5986 PCH_LP_PARTITION_LEVEL_DISABLE);
5988 /* WADPOClockGatingDisable:hsw */
5989 I915_WRITE(_TRANSA_CHICKEN1,
5990 I915_READ(_TRANSA_CHICKEN1) |
5991 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5994 static void lpt_suspend_hw(struct drm_device *dev)
5996 struct drm_i915_private *dev_priv = dev->dev_private;
5998 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
5999 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6001 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6002 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6006 static void broadwell_init_clock_gating(struct drm_device *dev)
6008 struct drm_i915_private *dev_priv = dev->dev_private;
6011 I915_WRITE(WM3_LP_ILK, 0);
6012 I915_WRITE(WM2_LP_ILK, 0);
6013 I915_WRITE(WM1_LP_ILK, 0);
6015 /* WaSwitchSolVfFArbitrationPriority:bdw */
6016 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6018 /* WaPsrDPAMaskVBlankInSRD:bdw */
6019 I915_WRITE(CHICKEN_PAR1_1,
6020 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6022 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6023 for_each_pipe(dev_priv, pipe) {
6024 I915_WRITE(CHICKEN_PIPESL_1(pipe),
6025 I915_READ(CHICKEN_PIPESL_1(pipe)) |
6026 BDW_DPRS_MASK_VBLANK_SRD);
6029 /* WaVSRefCountFullforceMissDisable:bdw */
6030 /* WaDSRefCountFullforceMissDisable:bdw */
6031 I915_WRITE(GEN7_FF_THREAD_MODE,
6032 I915_READ(GEN7_FF_THREAD_MODE) &
6033 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6035 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6036 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6038 /* WaDisableSDEUnitClockGating:bdw */
6039 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6040 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6042 lpt_init_clock_gating(dev);
6045 static void haswell_init_clock_gating(struct drm_device *dev)
6047 struct drm_i915_private *dev_priv = dev->dev_private;
6049 ilk_init_lp_watermarks(dev);
6051 /* L3 caching of data atomics doesn't work -- disable it. */
6052 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6053 I915_WRITE(HSW_ROW_CHICKEN3,
6054 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6056 /* This is required by WaCatErrorRejectionIssue:hsw */
6057 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6058 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6059 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6061 /* WaVSRefCountFullforceMissDisable:hsw */
6062 I915_WRITE(GEN7_FF_THREAD_MODE,
6063 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
6065 /* WaDisable_RenderCache_OperationalFlush:hsw */
6066 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6068 /* enable HiZ Raw Stall Optimization */
6069 I915_WRITE(CACHE_MODE_0_GEN7,
6070 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6072 /* WaDisable4x2SubspanOptimization:hsw */
6073 I915_WRITE(CACHE_MODE_1,
6074 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6077 * BSpec recommends 8x4 when MSAA is used,
6078 * however in practice 16x4 seems fastest.
6080 * Note that PS/WM thread counts depend on the WIZ hashing
6081 * disable bit, which we don't touch here, but it's good
6082 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6084 I915_WRITE(GEN7_GT_MODE,
6085 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6087 /* WaSampleCChickenBitEnable:hsw */
6088 I915_WRITE(HALF_SLICE_CHICKEN3,
6089 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
6091 /* WaSwitchSolVfFArbitrationPriority:hsw */
6092 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6094 /* WaRsPkgCStateDisplayPMReq:hsw */
6095 I915_WRITE(CHICKEN_PAR1_1,
6096 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
6098 lpt_init_clock_gating(dev);
6101 static void ivybridge_init_clock_gating(struct drm_device *dev)
6103 struct drm_i915_private *dev_priv = dev->dev_private;
6106 ilk_init_lp_watermarks(dev);
6108 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
6110 /* WaDisableEarlyCull:ivb */
6111 I915_WRITE(_3D_CHICKEN3,
6112 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6114 /* WaDisableBackToBackFlipFix:ivb */
6115 I915_WRITE(IVB_CHICKEN3,
6116 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6117 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6119 /* WaDisablePSDDualDispatchEnable:ivb */
6120 if (IS_IVB_GT1(dev))
6121 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6122 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6124 /* WaDisable_RenderCache_OperationalFlush:ivb */
6125 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6127 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6128 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
6129 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
6131 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6132 I915_WRITE(GEN7_L3CNTLREG1,
6133 GEN7_WA_FOR_GEN7_L3_CONTROL);
6134 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
6135 GEN7_WA_L3_CHICKEN_MODE);
6136 if (IS_IVB_GT1(dev))
6137 I915_WRITE(GEN7_ROW_CHICKEN2,
6138 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6140 /* must write both registers */
6141 I915_WRITE(GEN7_ROW_CHICKEN2,
6142 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6143 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
6144 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6147 /* WaForceL3Serialization:ivb */
6148 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6149 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6152 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6153 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
6155 I915_WRITE(GEN6_UCGCTL2,
6156 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6158 /* This is required by WaCatErrorRejectionIssue:ivb */
6159 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6160 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6161 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6163 g4x_disable_trickle_feed(dev);
6165 gen7_setup_fixed_func_scheduler(dev_priv);
6167 if (0) { /* causes HiZ corruption on ivb:gt1 */
6168 /* enable HiZ Raw Stall Optimization */
6169 I915_WRITE(CACHE_MODE_0_GEN7,
6170 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6173 /* WaDisable4x2SubspanOptimization:ivb */
6174 I915_WRITE(CACHE_MODE_1,
6175 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6178 * BSpec recommends 8x4 when MSAA is used,
6179 * however in practice 16x4 seems fastest.
6181 * Note that PS/WM thread counts depend on the WIZ hashing
6182 * disable bit, which we don't touch here, but it's good
6183 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6185 I915_WRITE(GEN7_GT_MODE,
6186 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6188 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6189 snpcr &= ~GEN6_MBC_SNPCR_MASK;
6190 snpcr |= GEN6_MBC_SNPCR_MED;
6191 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
6193 if (!HAS_PCH_NOP(dev))
6194 cpt_init_clock_gating(dev);
6196 gen6_check_mch_setup(dev);
6199 static void valleyview_init_clock_gating(struct drm_device *dev)
6201 struct drm_i915_private *dev_priv = dev->dev_private;
6203 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6205 /* WaDisableEarlyCull:vlv */
6206 I915_WRITE(_3D_CHICKEN3,
6207 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6209 /* WaDisableBackToBackFlipFix:vlv */
6210 I915_WRITE(IVB_CHICKEN3,
6211 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6212 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6214 /* WaPsdDispatchEnable:vlv */
6215 /* WaDisablePSDDualDispatchEnable:vlv */
6216 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6217 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
6218 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6220 /* WaDisable_RenderCache_OperationalFlush:vlv */
6221 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6223 /* WaForceL3Serialization:vlv */
6224 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6225 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6227 /* WaDisableDopClockGating:vlv */
6228 I915_WRITE(GEN7_ROW_CHICKEN2,
6229 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6231 /* This is required by WaCatErrorRejectionIssue:vlv */
6232 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6233 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6234 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6236 gen7_setup_fixed_func_scheduler(dev_priv);
6239 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6240 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
6242 I915_WRITE(GEN6_UCGCTL2,
6243 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6245 /* WaDisableL3Bank2xClockGate:vlv
6246 * Disabling L3 clock gating- MMIO 940c[25] = 1
6247 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6248 I915_WRITE(GEN7_UCGCTL4,
6249 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
6251 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6254 * BSpec says this must be set, even though
6255 * WaDisable4x2SubspanOptimization isn't listed for VLV.
6257 I915_WRITE(CACHE_MODE_1,
6258 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6261 * BSpec recommends 8x4 when MSAA is used,
6262 * however in practice 16x4 seems fastest.
6264 * Note that PS/WM thread counts depend on the WIZ hashing
6265 * disable bit, which we don't touch here, but it's good
6266 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6268 I915_WRITE(GEN7_GT_MODE,
6269 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6272 * WaIncreaseL3CreditsForVLVB0:vlv
6273 * This is the hardware default actually.
6275 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
6278 * WaDisableVLVClockGating_VBIIssue:vlv
6279 * Disable clock gating on th GCFG unit to prevent a delay
6280 * in the reporting of vblank events.
6282 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
6285 static void cherryview_init_clock_gating(struct drm_device *dev)
6287 struct drm_i915_private *dev_priv = dev->dev_private;
6289 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6291 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6293 /* WaVSRefCountFullforceMissDisable:chv */
6294 /* WaDSRefCountFullforceMissDisable:chv */
6295 I915_WRITE(GEN7_FF_THREAD_MODE,
6296 I915_READ(GEN7_FF_THREAD_MODE) &
6297 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6299 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6300 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6301 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6303 /* WaDisableCSUnitClockGating:chv */
6304 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6305 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6307 /* WaDisableSDEUnitClockGating:chv */
6308 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6309 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6312 static void g4x_init_clock_gating(struct drm_device *dev)
6314 struct drm_i915_private *dev_priv = dev->dev_private;
6315 uint32_t dspclk_gate;
6317 I915_WRITE(RENCLK_GATE_D1, 0);
6318 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
6319 GS_UNIT_CLOCK_GATE_DISABLE |
6320 CL_UNIT_CLOCK_GATE_DISABLE);
6321 I915_WRITE(RAMCLK_GATE_D, 0);
6322 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
6323 OVRUNIT_CLOCK_GATE_DISABLE |
6324 OVCUNIT_CLOCK_GATE_DISABLE;
6326 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
6327 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
6329 /* WaDisableRenderCachePipelinedFlush */
6330 I915_WRITE(CACHE_MODE_0,
6331 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6333 /* WaDisable_RenderCache_OperationalFlush:g4x */
6334 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6336 g4x_disable_trickle_feed(dev);
6339 static void crestline_init_clock_gating(struct drm_device *dev)
6341 struct drm_i915_private *dev_priv = dev->dev_private;
6343 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
6344 I915_WRITE(RENCLK_GATE_D2, 0);
6345 I915_WRITE(DSPCLK_GATE_D, 0);
6346 I915_WRITE(RAMCLK_GATE_D, 0);
6347 I915_WRITE16(DEUC, 0);
6348 I915_WRITE(MI_ARB_STATE,
6349 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6351 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6352 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6355 static void broadwater_init_clock_gating(struct drm_device *dev)
6357 struct drm_i915_private *dev_priv = dev->dev_private;
6359 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
6360 I965_RCC_CLOCK_GATE_DISABLE |
6361 I965_RCPB_CLOCK_GATE_DISABLE |
6362 I965_ISC_CLOCK_GATE_DISABLE |
6363 I965_FBC_CLOCK_GATE_DISABLE);
6364 I915_WRITE(RENCLK_GATE_D2, 0);
6365 I915_WRITE(MI_ARB_STATE,
6366 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6368 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6369 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6372 static void gen3_init_clock_gating(struct drm_device *dev)
6374 struct drm_i915_private *dev_priv = dev->dev_private;
6375 u32 dstate = I915_READ(D_STATE);
6377 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
6378 DSTATE_DOT_CLOCK_GATING;
6379 I915_WRITE(D_STATE, dstate);
6381 if (IS_PINEVIEW(dev))
6382 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
6384 /* IIR "flip pending" means done if this bit is set */
6385 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
6387 /* interrupts should cause a wake up from C3 */
6388 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
6390 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
6391 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
6393 I915_WRITE(MI_ARB_STATE,
6394 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6397 static void i85x_init_clock_gating(struct drm_device *dev)
6399 struct drm_i915_private *dev_priv = dev->dev_private;
6401 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
6403 /* interrupts should cause a wake up from C3 */
6404 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
6405 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
6407 I915_WRITE(MEM_MODE,
6408 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
6411 static void i830_init_clock_gating(struct drm_device *dev)
6413 struct drm_i915_private *dev_priv = dev->dev_private;
6415 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
6417 I915_WRITE(MEM_MODE,
6418 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
6419 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
6422 void intel_init_clock_gating(struct drm_device *dev)
6424 struct drm_i915_private *dev_priv = dev->dev_private;
6426 if (dev_priv->display.init_clock_gating)
6427 dev_priv->display.init_clock_gating(dev);
6430 void intel_suspend_hw(struct drm_device *dev)
6432 if (HAS_PCH_LPT(dev))
6433 lpt_suspend_hw(dev);
6436 /* Set up chip specific power management-related functions */
6437 void intel_init_pm(struct drm_device *dev)
6439 struct drm_i915_private *dev_priv = dev->dev_private;
6441 intel_fbc_init(dev_priv);
6444 if (IS_PINEVIEW(dev))
6445 i915_pineview_get_mem_freq(dev);
6446 else if (IS_GEN5(dev))
6447 i915_ironlake_get_mem_freq(dev);
6449 /* For FIFO watermark updates */
6450 if (INTEL_INFO(dev)->gen >= 9) {
6451 skl_setup_wm_latency(dev);
6453 dev_priv->display.init_clock_gating = skl_init_clock_gating;
6454 dev_priv->display.update_wm = skl_update_wm;
6455 dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
6456 } else if (HAS_PCH_SPLIT(dev)) {
6457 ilk_setup_wm_latency(dev);
6459 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
6460 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
6461 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
6462 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
6463 dev_priv->display.update_wm = ilk_update_wm;
6464 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
6466 DRM_DEBUG_KMS("Failed to read display plane latency. "
6471 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
6472 else if (IS_GEN6(dev))
6473 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
6474 else if (IS_IVYBRIDGE(dev))
6475 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6476 else if (IS_HASWELL(dev))
6477 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6478 else if (INTEL_INFO(dev)->gen == 8)
6479 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
6480 } else if (IS_CHERRYVIEW(dev)) {
6481 dev_priv->display.update_wm = cherryview_update_wm;
6482 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
6483 dev_priv->display.init_clock_gating =
6484 cherryview_init_clock_gating;
6485 } else if (IS_VALLEYVIEW(dev)) {
6486 dev_priv->display.update_wm = valleyview_update_wm;
6487 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
6488 dev_priv->display.init_clock_gating =
6489 valleyview_init_clock_gating;
6490 } else if (IS_PINEVIEW(dev)) {
6491 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
6494 dev_priv->mem_freq)) {
6495 DRM_INFO("failed to find known CxSR latency "
6496 "(found ddr%s fsb freq %d, mem freq %d), "
6498 (dev_priv->is_ddr3 == 1) ? "3" : "2",
6499 dev_priv->fsb_freq, dev_priv->mem_freq);
6500 /* Disable CxSR and never update its watermark again */
6501 intel_set_memory_cxsr(dev_priv, false);
6502 dev_priv->display.update_wm = NULL;
6504 dev_priv->display.update_wm = pineview_update_wm;
6505 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6506 } else if (IS_G4X(dev)) {
6507 dev_priv->display.update_wm = g4x_update_wm;
6508 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
6509 } else if (IS_GEN4(dev)) {
6510 dev_priv->display.update_wm = i965_update_wm;
6511 if (IS_CRESTLINE(dev))
6512 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
6513 else if (IS_BROADWATER(dev))
6514 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
6515 } else if (IS_GEN3(dev)) {
6516 dev_priv->display.update_wm = i9xx_update_wm;
6517 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
6518 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6519 } else if (IS_GEN2(dev)) {
6520 if (INTEL_INFO(dev)->num_pipes == 1) {
6521 dev_priv->display.update_wm = i845_update_wm;
6522 dev_priv->display.get_fifo_size = i845_get_fifo_size;
6524 dev_priv->display.update_wm = i9xx_update_wm;
6525 dev_priv->display.get_fifo_size = i830_get_fifo_size;
6528 if (IS_I85X(dev) || IS_I865G(dev))
6529 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6531 dev_priv->display.init_clock_gating = i830_init_clock_gating;
6533 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6537 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
6539 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6541 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6542 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
6546 I915_WRITE(GEN6_PCODE_DATA, *val);
6547 I915_WRITE(GEN6_PCODE_DATA1, 0);
6548 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6550 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6552 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
6556 *val = I915_READ(GEN6_PCODE_DATA);
6557 I915_WRITE(GEN6_PCODE_DATA, 0);
6562 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
6564 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6566 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6567 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
6571 I915_WRITE(GEN6_PCODE_DATA, val);
6572 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6574 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6576 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
6580 I915_WRITE(GEN6_PCODE_DATA, 0);
6585 static int vlv_gpu_freq_div(unsigned int czclk_freq)
6587 switch (czclk_freq) {
6602 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
6604 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
6606 div = vlv_gpu_freq_div(czclk_freq);
6610 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
6613 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
6615 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
6617 mul = vlv_gpu_freq_div(czclk_freq);
6621 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
6624 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
6626 int div, czclk_freq = dev_priv->rps.cz_freq;
6628 div = vlv_gpu_freq_div(czclk_freq) / 2;
6632 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
6635 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
6637 int mul, czclk_freq = dev_priv->rps.cz_freq;
6639 mul = vlv_gpu_freq_div(czclk_freq) / 2;
6643 /* CHV needs even values */
6644 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
6647 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
6649 if (IS_CHERRYVIEW(dev_priv->dev))
6650 return chv_gpu_freq(dev_priv, val);
6651 else if (IS_VALLEYVIEW(dev_priv->dev))
6652 return byt_gpu_freq(dev_priv, val);
6654 return val * GT_FREQUENCY_MULTIPLIER;
6657 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
6659 if (IS_CHERRYVIEW(dev_priv->dev))
6660 return chv_freq_opcode(dev_priv, val);
6661 else if (IS_VALLEYVIEW(dev_priv->dev))
6662 return byt_freq_opcode(dev_priv, val);
6664 return val / GT_FREQUENCY_MULTIPLIER;
6667 void intel_pm_setup(struct drm_device *dev)
6669 struct drm_i915_private *dev_priv = dev->dev_private;
6671 mutex_init(&dev_priv->rps.hw_lock);
6673 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6674 intel_gen6_powersave_work);
6676 dev_priv->pm.suspended = false;