drm/i915/skl: Implement enable/disable for Display C5 state.
[linux-block.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34
35 /**
36  * DOC: runtime pm
37  *
38  * The i915 driver supports dynamic enabling and disabling of entire hardware
39  * blocks at runtime. This is especially important on the display side where
40  * software is supposed to control many power gates manually on recent hardware,
41  * since on the GT side a lot of the power management is done by the hardware.
42  * But even there some manual control at the device level is required.
43  *
44  * Since i915 supports a diverse set of platforms with a unified codebase and
45  * hardware engineers just love to shuffle functionality around between power
46  * domains there's a sizeable amount of indirection required. This file provides
47  * generic functions to the driver for grabbing and releasing references for
48  * abstract power domains. It then maps those to the actual power wells
49  * present for a given platform.
50  */
51
52 #define GEN9_ENABLE_DC5(dev) (IS_SKYLAKE(dev))
53
54 #define for_each_power_well(i, power_well, domain_mask, power_domains)  \
55         for (i = 0;                                                     \
56              i < (power_domains)->power_well_count &&                   \
57                  ((power_well) = &(power_domains)->power_wells[i]);     \
58              i++)                                                       \
59                 if ((power_well)->domains & (domain_mask))
60
61 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
62         for (i = (power_domains)->power_well_count - 1;                  \
63              i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
64              i--)                                                        \
65                 if ((power_well)->domains & (domain_mask))
66
67 /*
68  * We should only use the power well if we explicitly asked the hardware to
69  * enable it, so check if it's enabled and also check if we've requested it to
70  * be enabled.
71  */
72 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
73                                    struct i915_power_well *power_well)
74 {
75         return I915_READ(HSW_PWR_WELL_DRIVER) ==
76                      (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
77 }
78
79 /**
80  * __intel_display_power_is_enabled - unlocked check for a power domain
81  * @dev_priv: i915 device instance
82  * @domain: power domain to check
83  *
84  * This is the unlocked version of intel_display_power_is_enabled() and should
85  * only be used from error capture and recovery code where deadlocks are
86  * possible.
87  *
88  * Returns:
89  * True when the power domain is enabled, false otherwise.
90  */
91 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
92                                       enum intel_display_power_domain domain)
93 {
94         struct i915_power_domains *power_domains;
95         struct i915_power_well *power_well;
96         bool is_enabled;
97         int i;
98
99         if (dev_priv->pm.suspended)
100                 return false;
101
102         power_domains = &dev_priv->power_domains;
103
104         is_enabled = true;
105
106         for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
107                 if (power_well->always_on)
108                         continue;
109
110                 if (!power_well->hw_enabled) {
111                         is_enabled = false;
112                         break;
113                 }
114         }
115
116         return is_enabled;
117 }
118
119 /**
120  * intel_display_power_is_enabled - check for a power domain
121  * @dev_priv: i915 device instance
122  * @domain: power domain to check
123  *
124  * This function can be used to check the hw power domain state. It is mostly
125  * used in hardware state readout functions. Everywhere else code should rely
126  * upon explicit power domain reference counting to ensure that the hardware
127  * block is powered up before accessing it.
128  *
129  * Callers must hold the relevant modesetting locks to ensure that concurrent
130  * threads can't disable the power well while the caller tries to read a few
131  * registers.
132  *
133  * Returns:
134  * True when the power domain is enabled, false otherwise.
135  */
136 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
137                                     enum intel_display_power_domain domain)
138 {
139         struct i915_power_domains *power_domains;
140         bool ret;
141
142         power_domains = &dev_priv->power_domains;
143
144         mutex_lock(&power_domains->lock);
145         ret = __intel_display_power_is_enabled(dev_priv, domain);
146         mutex_unlock(&power_domains->lock);
147
148         return ret;
149 }
150
151 /**
152  * intel_display_set_init_power - set the initial power domain state
153  * @dev_priv: i915 device instance
154  * @enable: whether to enable or disable the initial power domain state
155  *
156  * For simplicity our driver load/unload and system suspend/resume code assumes
157  * that all power domains are always enabled. This functions controls the state
158  * of this little hack. While the initial power domain state is enabled runtime
159  * pm is effectively disabled.
160  */
161 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
162                                   bool enable)
163 {
164         if (dev_priv->power_domains.init_power_on == enable)
165                 return;
166
167         if (enable)
168                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
169         else
170                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
171
172         dev_priv->power_domains.init_power_on = enable;
173 }
174
175 /*
176  * Starting with Haswell, we have a "Power Down Well" that can be turned off
177  * when not needed anymore. We have 4 registers that can request the power well
178  * to be enabled, and it will only be disabled if none of the registers is
179  * requesting it to be enabled.
180  */
181 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
182 {
183         struct drm_device *dev = dev_priv->dev;
184
185         /*
186          * After we re-enable the power well, if we touch VGA register 0x3d5
187          * we'll get unclaimed register interrupts. This stops after we write
188          * anything to the VGA MSR register. The vgacon module uses this
189          * register all the time, so if we unbind our driver and, as a
190          * consequence, bind vgacon, we'll get stuck in an infinite loop at
191          * console_unlock(). So make here we touch the VGA MSR register, making
192          * sure vgacon can keep working normally without triggering interrupts
193          * and error messages.
194          */
195         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
196         outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
197         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
198
199         if (IS_BROADWELL(dev))
200                 gen8_irq_power_well_post_enable(dev_priv,
201                                                 1 << PIPE_C | 1 << PIPE_B);
202 }
203
204 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
205                                        struct i915_power_well *power_well)
206 {
207         struct drm_device *dev = dev_priv->dev;
208
209         /*
210          * After we re-enable the power well, if we touch VGA register 0x3d5
211          * we'll get unclaimed register interrupts. This stops after we write
212          * anything to the VGA MSR register. The vgacon module uses this
213          * register all the time, so if we unbind our driver and, as a
214          * consequence, bind vgacon, we'll get stuck in an infinite loop at
215          * console_unlock(). So make here we touch the VGA MSR register, making
216          * sure vgacon can keep working normally without triggering interrupts
217          * and error messages.
218          */
219         if (power_well->data == SKL_DISP_PW_2) {
220                 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
221                 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
222                 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
223
224                 gen8_irq_power_well_post_enable(dev_priv,
225                                                 1 << PIPE_C | 1 << PIPE_B);
226         }
227
228         if (power_well->data == SKL_DISP_PW_1) {
229                 intel_prepare_ddi(dev);
230                 gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
231         }
232 }
233
234 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
235                                struct i915_power_well *power_well, bool enable)
236 {
237         bool is_enabled, enable_requested;
238         uint32_t tmp;
239
240         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
241         is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
242         enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
243
244         if (enable) {
245                 if (!enable_requested)
246                         I915_WRITE(HSW_PWR_WELL_DRIVER,
247                                    HSW_PWR_WELL_ENABLE_REQUEST);
248
249                 if (!is_enabled) {
250                         DRM_DEBUG_KMS("Enabling power well\n");
251                         if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
252                                       HSW_PWR_WELL_STATE_ENABLED), 20))
253                                 DRM_ERROR("Timeout enabling power well\n");
254                         hsw_power_well_post_enable(dev_priv);
255                 }
256
257         } else {
258                 if (enable_requested) {
259                         I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
260                         POSTING_READ(HSW_PWR_WELL_DRIVER);
261                         DRM_DEBUG_KMS("Requesting to disable the power well\n");
262                 }
263         }
264 }
265
266 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
267         BIT(POWER_DOMAIN_TRANSCODER_A) |                \
268         BIT(POWER_DOMAIN_PIPE_B) |                      \
269         BIT(POWER_DOMAIN_TRANSCODER_B) |                \
270         BIT(POWER_DOMAIN_PIPE_C) |                      \
271         BIT(POWER_DOMAIN_TRANSCODER_C) |                \
272         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
273         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
274         BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |          \
275         BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |          \
276         BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |          \
277         BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |          \
278         BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |          \
279         BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |          \
280         BIT(POWER_DOMAIN_AUX_B) |                       \
281         BIT(POWER_DOMAIN_AUX_C) |                       \
282         BIT(POWER_DOMAIN_AUX_D) |                       \
283         BIT(POWER_DOMAIN_AUDIO) |                       \
284         BIT(POWER_DOMAIN_VGA) |                         \
285         BIT(POWER_DOMAIN_INIT))
286 #define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS (         \
287         SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
288         BIT(POWER_DOMAIN_PLLS) |                        \
289         BIT(POWER_DOMAIN_PIPE_A) |                      \
290         BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
291         BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |         \
292         BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |          \
293         BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |          \
294         BIT(POWER_DOMAIN_AUX_A) |                       \
295         BIT(POWER_DOMAIN_INIT))
296 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (             \
297         BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |          \
298         BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |          \
299         BIT(POWER_DOMAIN_INIT))
300 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS (               \
301         BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |          \
302         BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |          \
303         BIT(POWER_DOMAIN_INIT))
304 #define SKL_DISPLAY_DDI_C_POWER_DOMAINS (               \
305         BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |          \
306         BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |          \
307         BIT(POWER_DOMAIN_INIT))
308 #define SKL_DISPLAY_DDI_D_POWER_DOMAINS (               \
309         BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |          \
310         BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |          \
311         BIT(POWER_DOMAIN_INIT))
312 #define SKL_DISPLAY_MISC_IO_POWER_DOMAINS (             \
313         SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
314 #define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS (           \
315         (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS |  \
316         SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
317         SKL_DISPLAY_DDI_A_E_POWER_DOMAINS |             \
318         SKL_DISPLAY_DDI_B_POWER_DOMAINS |               \
319         SKL_DISPLAY_DDI_C_POWER_DOMAINS |               \
320         SKL_DISPLAY_DDI_D_POWER_DOMAINS |               \
321         SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) |           \
322         BIT(POWER_DOMAIN_INIT))
323
324 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
325         BIT(POWER_DOMAIN_TRANSCODER_A) |                \
326         BIT(POWER_DOMAIN_PIPE_B) |                      \
327         BIT(POWER_DOMAIN_TRANSCODER_B) |                \
328         BIT(POWER_DOMAIN_PIPE_C) |                      \
329         BIT(POWER_DOMAIN_TRANSCODER_C) |                \
330         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
331         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
332         BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |          \
333         BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |          \
334         BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |          \
335         BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |          \
336         BIT(POWER_DOMAIN_AUX_B) |                       \
337         BIT(POWER_DOMAIN_AUX_C) |                       \
338         BIT(POWER_DOMAIN_AUDIO) |                       \
339         BIT(POWER_DOMAIN_VGA) |                         \
340         BIT(POWER_DOMAIN_INIT))
341 #define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (         \
342         BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
343         BIT(POWER_DOMAIN_PIPE_A) |                      \
344         BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
345         BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |         \
346         BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |          \
347         BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |          \
348         BIT(POWER_DOMAIN_AUX_A) |                       \
349         BIT(POWER_DOMAIN_PLLS) |                        \
350         BIT(POWER_DOMAIN_INIT))
351 #define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS (           \
352         (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS |  \
353         BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) |       \
354         BIT(POWER_DOMAIN_INIT))
355
356 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
357 {
358         struct drm_device *dev = dev_priv->dev;
359
360         WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
361         WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
362                 "DC9 already programmed to be enabled.\n");
363         WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
364                 "DC5 still not disabled to enable DC9.\n");
365         WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
366         WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
367
368          /*
369           * TODO: check for the following to verify the conditions to enter DC9
370           * state are satisfied:
371           * 1] Check relevant display engine registers to verify if mode set
372           * disable sequence was followed.
373           * 2] Check if display uninitialize sequence is initialized.
374           */
375 }
376
377 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
378 {
379         WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
380         WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
381                 "DC9 already programmed to be disabled.\n");
382         WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
383                 "DC5 still not disabled.\n");
384
385          /*
386           * TODO: check for the following to verify DC9 state was indeed
387           * entered before programming to disable it:
388           * 1] Check relevant display engine registers to verify if mode
389           *  set disable sequence was followed.
390           * 2] Check if display uninitialize sequence is initialized.
391           */
392 }
393
394 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
395 {
396         uint32_t val;
397
398         assert_can_enable_dc9(dev_priv);
399
400         DRM_DEBUG_KMS("Enabling DC9\n");
401
402         val = I915_READ(DC_STATE_EN);
403         val |= DC_STATE_EN_DC9;
404         I915_WRITE(DC_STATE_EN, val);
405         POSTING_READ(DC_STATE_EN);
406 }
407
408 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
409 {
410         uint32_t val;
411
412         assert_can_disable_dc9(dev_priv);
413
414         DRM_DEBUG_KMS("Disabling DC9\n");
415
416         val = I915_READ(DC_STATE_EN);
417         val &= ~DC_STATE_EN_DC9;
418         I915_WRITE(DC_STATE_EN, val);
419         POSTING_READ(DC_STATE_EN);
420 }
421
422 static void gen9_set_dc_state_debugmask_memory_up(
423                         struct drm_i915_private *dev_priv)
424 {
425         uint32_t val;
426
427         /* The below bit doesn't need to be cleared ever afterwards */
428         val = I915_READ(DC_STATE_DEBUG);
429         if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
430                 val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
431                 I915_WRITE(DC_STATE_DEBUG, val);
432                 POSTING_READ(DC_STATE_DEBUG);
433         }
434 }
435
436 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
437 {
438         struct drm_device *dev = dev_priv->dev;
439         uint32_t val;
440
441         WARN_ON(!IS_GEN9(dev));
442
443         DRM_DEBUG_KMS("Enabling DC5\n");
444
445         gen9_set_dc_state_debugmask_memory_up(dev_priv);
446
447         val = I915_READ(DC_STATE_EN);
448         val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
449         val |= DC_STATE_EN_UPTO_DC5;
450         I915_WRITE(DC_STATE_EN, val);
451         POSTING_READ(DC_STATE_EN);
452 }
453
454 static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
455 {
456         struct drm_device *dev = dev_priv->dev;
457         uint32_t val;
458
459         WARN_ON(!IS_GEN9(dev));
460
461         DRM_DEBUG_KMS("Disabling DC5\n");
462
463         val = I915_READ(DC_STATE_EN);
464         val &= ~DC_STATE_EN_UPTO_DC5;
465         I915_WRITE(DC_STATE_EN, val);
466         POSTING_READ(DC_STATE_EN);
467 }
468
469 static void skl_set_power_well(struct drm_i915_private *dev_priv,
470                         struct i915_power_well *power_well, bool enable)
471 {
472         struct drm_device *dev = dev_priv->dev;
473         uint32_t tmp, fuse_status;
474         uint32_t req_mask, state_mask;
475         bool is_enabled, enable_requested, check_fuse_status = false;
476
477         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
478         fuse_status = I915_READ(SKL_FUSE_STATUS);
479
480         switch (power_well->data) {
481         case SKL_DISP_PW_1:
482                 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
483                         SKL_FUSE_PG0_DIST_STATUS), 1)) {
484                         DRM_ERROR("PG0 not enabled\n");
485                         return;
486                 }
487                 break;
488         case SKL_DISP_PW_2:
489                 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
490                         DRM_ERROR("PG1 in disabled state\n");
491                         return;
492                 }
493                 break;
494         case SKL_DISP_PW_DDI_A_E:
495         case SKL_DISP_PW_DDI_B:
496         case SKL_DISP_PW_DDI_C:
497         case SKL_DISP_PW_DDI_D:
498         case SKL_DISP_PW_MISC_IO:
499                 break;
500         default:
501                 WARN(1, "Unknown power well %lu\n", power_well->data);
502                 return;
503         }
504
505         req_mask = SKL_POWER_WELL_REQ(power_well->data);
506         enable_requested = tmp & req_mask;
507         state_mask = SKL_POWER_WELL_STATE(power_well->data);
508         is_enabled = tmp & state_mask;
509
510         if (enable) {
511                 if (!enable_requested) {
512                         WARN((tmp & state_mask) &&
513                                 !I915_READ(HSW_PWR_WELL_BIOS),
514                                 "Invalid for power well status to be enabled, unless done by the BIOS, \
515                                 when request is to disable!\n");
516                         if (GEN9_ENABLE_DC5(dev) &&
517                                 power_well->data == SKL_DISP_PW_2)
518                                 gen9_disable_dc5(dev_priv);
519                         I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
520                 }
521
522                 if (!is_enabled) {
523                         DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
524                         if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
525                                 state_mask), 1))
526                                 DRM_ERROR("%s enable timeout\n",
527                                         power_well->name);
528                         check_fuse_status = true;
529                 }
530         } else {
531                 if (enable_requested) {
532                         I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
533                         POSTING_READ(HSW_PWR_WELL_DRIVER);
534                         DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
535
536                         if (GEN9_ENABLE_DC5(dev) &&
537                                 power_well->data == SKL_DISP_PW_2) {
538                                 enum csr_state state;
539
540                                 wait_for((state = intel_csr_load_status_get(dev_priv)) !=
541                                                 FW_UNINITIALIZED, 1000);
542                                 if (state != FW_LOADED)
543                                         DRM_ERROR("CSR firmware not ready (%d)\n",
544                                                         state);
545                                 else
546                                         gen9_enable_dc5(dev_priv);
547                         }
548                 }
549         }
550
551         if (check_fuse_status) {
552                 if (power_well->data == SKL_DISP_PW_1) {
553                         if (wait_for((I915_READ(SKL_FUSE_STATUS) &
554                                 SKL_FUSE_PG1_DIST_STATUS), 1))
555                                 DRM_ERROR("PG1 distributing status timeout\n");
556                 } else if (power_well->data == SKL_DISP_PW_2) {
557                         if (wait_for((I915_READ(SKL_FUSE_STATUS) &
558                                 SKL_FUSE_PG2_DIST_STATUS), 1))
559                                 DRM_ERROR("PG2 distributing status timeout\n");
560                 }
561         }
562
563         if (enable && !is_enabled)
564                 skl_power_well_post_enable(dev_priv, power_well);
565 }
566
567 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
568                                    struct i915_power_well *power_well)
569 {
570         hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
571
572         /*
573          * We're taking over the BIOS, so clear any requests made by it since
574          * the driver is in charge now.
575          */
576         if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
577                 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
578 }
579
580 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
581                                   struct i915_power_well *power_well)
582 {
583         hsw_set_power_well(dev_priv, power_well, true);
584 }
585
586 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
587                                    struct i915_power_well *power_well)
588 {
589         hsw_set_power_well(dev_priv, power_well, false);
590 }
591
592 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
593                                         struct i915_power_well *power_well)
594 {
595         uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
596                 SKL_POWER_WELL_STATE(power_well->data);
597
598         return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
599 }
600
601 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
602                                 struct i915_power_well *power_well)
603 {
604         skl_set_power_well(dev_priv, power_well, power_well->count > 0);
605
606         /* Clear any request made by BIOS as driver is taking over */
607         I915_WRITE(HSW_PWR_WELL_BIOS, 0);
608 }
609
610 static void skl_power_well_enable(struct drm_i915_private *dev_priv,
611                                 struct i915_power_well *power_well)
612 {
613         skl_set_power_well(dev_priv, power_well, true);
614 }
615
616 static void skl_power_well_disable(struct drm_i915_private *dev_priv,
617                                 struct i915_power_well *power_well)
618 {
619         skl_set_power_well(dev_priv, power_well, false);
620 }
621
622 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
623                                            struct i915_power_well *power_well)
624 {
625 }
626
627 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
628                                              struct i915_power_well *power_well)
629 {
630         return true;
631 }
632
633 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
634                                struct i915_power_well *power_well, bool enable)
635 {
636         enum punit_power_well power_well_id = power_well->data;
637         u32 mask;
638         u32 state;
639         u32 ctrl;
640
641         mask = PUNIT_PWRGT_MASK(power_well_id);
642         state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
643                          PUNIT_PWRGT_PWR_GATE(power_well_id);
644
645         mutex_lock(&dev_priv->rps.hw_lock);
646
647 #define COND \
648         ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
649
650         if (COND)
651                 goto out;
652
653         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
654         ctrl &= ~mask;
655         ctrl |= state;
656         vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
657
658         if (wait_for(COND, 100))
659                 DRM_ERROR("timout setting power well state %08x (%08x)\n",
660                           state,
661                           vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
662
663 #undef COND
664
665 out:
666         mutex_unlock(&dev_priv->rps.hw_lock);
667 }
668
669 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
670                                    struct i915_power_well *power_well)
671 {
672         vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
673 }
674
675 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
676                                   struct i915_power_well *power_well)
677 {
678         vlv_set_power_well(dev_priv, power_well, true);
679 }
680
681 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
682                                    struct i915_power_well *power_well)
683 {
684         vlv_set_power_well(dev_priv, power_well, false);
685 }
686
687 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
688                                    struct i915_power_well *power_well)
689 {
690         int power_well_id = power_well->data;
691         bool enabled = false;
692         u32 mask;
693         u32 state;
694         u32 ctrl;
695
696         mask = PUNIT_PWRGT_MASK(power_well_id);
697         ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
698
699         mutex_lock(&dev_priv->rps.hw_lock);
700
701         state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
702         /*
703          * We only ever set the power-on and power-gate states, anything
704          * else is unexpected.
705          */
706         WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
707                 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
708         if (state == ctrl)
709                 enabled = true;
710
711         /*
712          * A transient state at this point would mean some unexpected party
713          * is poking at the power controls too.
714          */
715         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
716         WARN_ON(ctrl != state);
717
718         mutex_unlock(&dev_priv->rps.hw_lock);
719
720         return enabled;
721 }
722
723 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
724                                           struct i915_power_well *power_well)
725 {
726         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
727
728         vlv_set_power_well(dev_priv, power_well, true);
729
730         spin_lock_irq(&dev_priv->irq_lock);
731         valleyview_enable_display_irqs(dev_priv);
732         spin_unlock_irq(&dev_priv->irq_lock);
733
734         /*
735          * During driver initialization/resume we can avoid restoring the
736          * part of the HW/SW state that will be inited anyway explicitly.
737          */
738         if (dev_priv->power_domains.initializing)
739                 return;
740
741         intel_hpd_init(dev_priv);
742
743         i915_redisable_vga_power_on(dev_priv->dev);
744 }
745
746 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
747                                            struct i915_power_well *power_well)
748 {
749         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
750
751         spin_lock_irq(&dev_priv->irq_lock);
752         valleyview_disable_display_irqs(dev_priv);
753         spin_unlock_irq(&dev_priv->irq_lock);
754
755         vlv_set_power_well(dev_priv, power_well, false);
756
757         vlv_power_sequencer_reset(dev_priv);
758 }
759
760 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
761                                            struct i915_power_well *power_well)
762 {
763         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
764
765         /*
766          * Enable the CRI clock source so we can get at the
767          * display and the reference clock for VGA
768          * hotplug / manual detection.
769          */
770         I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
771                    DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
772         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
773
774         vlv_set_power_well(dev_priv, power_well, true);
775
776         /*
777          * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
778          *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
779          *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
780          *   b. The other bits such as sfr settings / modesel may all
781          *      be set to 0.
782          *
783          * This should only be done on init and resume from S3 with
784          * both PLLs disabled, or we risk losing DPIO and PLL
785          * synchronization.
786          */
787         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
788 }
789
790 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
791                                             struct i915_power_well *power_well)
792 {
793         enum pipe pipe;
794
795         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
796
797         for_each_pipe(dev_priv, pipe)
798                 assert_pll_disabled(dev_priv, pipe);
799
800         /* Assert common reset */
801         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
802
803         vlv_set_power_well(dev_priv, power_well, false);
804 }
805
806 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
807                                            struct i915_power_well *power_well)
808 {
809         enum dpio_phy phy;
810
811         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
812                      power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
813
814         /*
815          * Enable the CRI clock source so we can get at the
816          * display and the reference clock for VGA
817          * hotplug / manual detection.
818          */
819         if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
820                 phy = DPIO_PHY0;
821                 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
822                            DPLL_REFA_CLK_ENABLE_VLV);
823                 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
824                            DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
825         } else {
826                 phy = DPIO_PHY1;
827                 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
828                            DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
829         }
830         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
831         vlv_set_power_well(dev_priv, power_well, true);
832
833         /* Poll for phypwrgood signal */
834         if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
835                 DRM_ERROR("Display PHY %d is not power up\n", phy);
836
837         I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
838                    PHY_COM_LANE_RESET_DEASSERT(phy));
839 }
840
841 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
842                                             struct i915_power_well *power_well)
843 {
844         enum dpio_phy phy;
845
846         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
847                      power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
848
849         if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
850                 phy = DPIO_PHY0;
851                 assert_pll_disabled(dev_priv, PIPE_A);
852                 assert_pll_disabled(dev_priv, PIPE_B);
853         } else {
854                 phy = DPIO_PHY1;
855                 assert_pll_disabled(dev_priv, PIPE_C);
856         }
857
858         I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
859                    ~PHY_COM_LANE_RESET_DEASSERT(phy));
860
861         vlv_set_power_well(dev_priv, power_well, false);
862 }
863
864 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
865                                         struct i915_power_well *power_well)
866 {
867         enum pipe pipe = power_well->data;
868         bool enabled;
869         u32 state, ctrl;
870
871         mutex_lock(&dev_priv->rps.hw_lock);
872
873         state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
874         /*
875          * We only ever set the power-on and power-gate states, anything
876          * else is unexpected.
877          */
878         WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
879         enabled = state == DP_SSS_PWR_ON(pipe);
880
881         /*
882          * A transient state at this point would mean some unexpected party
883          * is poking at the power controls too.
884          */
885         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
886         WARN_ON(ctrl << 16 != state);
887
888         mutex_unlock(&dev_priv->rps.hw_lock);
889
890         return enabled;
891 }
892
893 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
894                                     struct i915_power_well *power_well,
895                                     bool enable)
896 {
897         enum pipe pipe = power_well->data;
898         u32 state;
899         u32 ctrl;
900
901         state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
902
903         mutex_lock(&dev_priv->rps.hw_lock);
904
905 #define COND \
906         ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
907
908         if (COND)
909                 goto out;
910
911         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
912         ctrl &= ~DP_SSC_MASK(pipe);
913         ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
914         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
915
916         if (wait_for(COND, 100))
917                 DRM_ERROR("timout setting power well state %08x (%08x)\n",
918                           state,
919                           vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
920
921 #undef COND
922
923 out:
924         mutex_unlock(&dev_priv->rps.hw_lock);
925 }
926
927 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
928                                         struct i915_power_well *power_well)
929 {
930         chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
931 }
932
933 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
934                                        struct i915_power_well *power_well)
935 {
936         WARN_ON_ONCE(power_well->data != PIPE_A &&
937                      power_well->data != PIPE_B &&
938                      power_well->data != PIPE_C);
939
940         chv_set_pipe_power_well(dev_priv, power_well, true);
941
942         if (power_well->data == PIPE_A) {
943                 spin_lock_irq(&dev_priv->irq_lock);
944                 valleyview_enable_display_irqs(dev_priv);
945                 spin_unlock_irq(&dev_priv->irq_lock);
946
947                 /*
948                  * During driver initialization/resume we can avoid restoring the
949                  * part of the HW/SW state that will be inited anyway explicitly.
950                  */
951                 if (dev_priv->power_domains.initializing)
952                         return;
953
954                 intel_hpd_init(dev_priv);
955
956                 i915_redisable_vga_power_on(dev_priv->dev);
957         }
958 }
959
960 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
961                                         struct i915_power_well *power_well)
962 {
963         WARN_ON_ONCE(power_well->data != PIPE_A &&
964                      power_well->data != PIPE_B &&
965                      power_well->data != PIPE_C);
966
967         if (power_well->data == PIPE_A) {
968                 spin_lock_irq(&dev_priv->irq_lock);
969                 valleyview_disable_display_irqs(dev_priv);
970                 spin_unlock_irq(&dev_priv->irq_lock);
971         }
972
973         chv_set_pipe_power_well(dev_priv, power_well, false);
974
975         if (power_well->data == PIPE_A)
976                 vlv_power_sequencer_reset(dev_priv);
977 }
978
979 /**
980  * intel_display_power_get - grab a power domain reference
981  * @dev_priv: i915 device instance
982  * @domain: power domain to reference
983  *
984  * This function grabs a power domain reference for @domain and ensures that the
985  * power domain and all its parents are powered up. Therefore users should only
986  * grab a reference to the innermost power domain they need.
987  *
988  * Any power domain reference obtained by this function must have a symmetric
989  * call to intel_display_power_put() to release the reference again.
990  */
991 void intel_display_power_get(struct drm_i915_private *dev_priv,
992                              enum intel_display_power_domain domain)
993 {
994         struct i915_power_domains *power_domains;
995         struct i915_power_well *power_well;
996         int i;
997
998         intel_runtime_pm_get(dev_priv);
999
1000         power_domains = &dev_priv->power_domains;
1001
1002         mutex_lock(&power_domains->lock);
1003
1004         for_each_power_well(i, power_well, BIT(domain), power_domains) {
1005                 if (!power_well->count++) {
1006                         DRM_DEBUG_KMS("enabling %s\n", power_well->name);
1007                         power_well->ops->enable(dev_priv, power_well);
1008                         power_well->hw_enabled = true;
1009                 }
1010         }
1011
1012         power_domains->domain_use_count[domain]++;
1013
1014         mutex_unlock(&power_domains->lock);
1015 }
1016
1017 /**
1018  * intel_display_power_put - release a power domain reference
1019  * @dev_priv: i915 device instance
1020  * @domain: power domain to reference
1021  *
1022  * This function drops the power domain reference obtained by
1023  * intel_display_power_get() and might power down the corresponding hardware
1024  * block right away if this is the last reference.
1025  */
1026 void intel_display_power_put(struct drm_i915_private *dev_priv,
1027                              enum intel_display_power_domain domain)
1028 {
1029         struct i915_power_domains *power_domains;
1030         struct i915_power_well *power_well;
1031         int i;
1032
1033         power_domains = &dev_priv->power_domains;
1034
1035         mutex_lock(&power_domains->lock);
1036
1037         WARN_ON(!power_domains->domain_use_count[domain]);
1038         power_domains->domain_use_count[domain]--;
1039
1040         for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
1041                 WARN_ON(!power_well->count);
1042
1043                 if (!--power_well->count && i915.disable_power_well) {
1044                         DRM_DEBUG_KMS("disabling %s\n", power_well->name);
1045                         power_well->hw_enabled = false;
1046                         power_well->ops->disable(dev_priv, power_well);
1047                 }
1048         }
1049
1050         mutex_unlock(&power_domains->lock);
1051
1052         intel_runtime_pm_put(dev_priv);
1053 }
1054
1055 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
1056
1057 #define HSW_ALWAYS_ON_POWER_DOMAINS (                   \
1058         BIT(POWER_DOMAIN_PIPE_A) |                      \
1059         BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
1060         BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |          \
1061         BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |          \
1062         BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |          \
1063         BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |          \
1064         BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |          \
1065         BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |          \
1066         BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |          \
1067         BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |          \
1068         BIT(POWER_DOMAIN_PORT_CRT) |                    \
1069         BIT(POWER_DOMAIN_PLLS) |                        \
1070         BIT(POWER_DOMAIN_AUX_A) |                       \
1071         BIT(POWER_DOMAIN_AUX_B) |                       \
1072         BIT(POWER_DOMAIN_AUX_C) |                       \
1073         BIT(POWER_DOMAIN_AUX_D) |                       \
1074         BIT(POWER_DOMAIN_INIT))
1075 #define HSW_DISPLAY_POWER_DOMAINS (                             \
1076         (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |    \
1077         BIT(POWER_DOMAIN_INIT))
1078
1079 #define BDW_ALWAYS_ON_POWER_DOMAINS (                   \
1080         HSW_ALWAYS_ON_POWER_DOMAINS |                   \
1081         BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
1082 #define BDW_DISPLAY_POWER_DOMAINS (                             \
1083         (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |    \
1084         BIT(POWER_DOMAIN_INIT))
1085
1086 #define VLV_ALWAYS_ON_POWER_DOMAINS     BIT(POWER_DOMAIN_INIT)
1087 #define VLV_DISPLAY_POWER_DOMAINS       POWER_DOMAIN_MASK
1088
1089 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (         \
1090         BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
1091         BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
1092         BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
1093         BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
1094         BIT(POWER_DOMAIN_PORT_CRT) |            \
1095         BIT(POWER_DOMAIN_AUX_B) |               \
1096         BIT(POWER_DOMAIN_AUX_C) |               \
1097         BIT(POWER_DOMAIN_INIT))
1098
1099 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (  \
1100         BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
1101         BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
1102         BIT(POWER_DOMAIN_AUX_B) |               \
1103         BIT(POWER_DOMAIN_INIT))
1104
1105 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (  \
1106         BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
1107         BIT(POWER_DOMAIN_AUX_B) |               \
1108         BIT(POWER_DOMAIN_INIT))
1109
1110 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (  \
1111         BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
1112         BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
1113         BIT(POWER_DOMAIN_AUX_C) |               \
1114         BIT(POWER_DOMAIN_INIT))
1115
1116 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (  \
1117         BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
1118         BIT(POWER_DOMAIN_AUX_C) |               \
1119         BIT(POWER_DOMAIN_INIT))
1120
1121 #define CHV_PIPE_A_POWER_DOMAINS (      \
1122         BIT(POWER_DOMAIN_PIPE_A) |      \
1123         BIT(POWER_DOMAIN_INIT))
1124
1125 #define CHV_PIPE_B_POWER_DOMAINS (      \
1126         BIT(POWER_DOMAIN_PIPE_B) |      \
1127         BIT(POWER_DOMAIN_INIT))
1128
1129 #define CHV_PIPE_C_POWER_DOMAINS (      \
1130         BIT(POWER_DOMAIN_PIPE_C) |      \
1131         BIT(POWER_DOMAIN_INIT))
1132
1133 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (         \
1134         BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
1135         BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
1136         BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
1137         BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
1138         BIT(POWER_DOMAIN_AUX_B) |               \
1139         BIT(POWER_DOMAIN_AUX_C) |               \
1140         BIT(POWER_DOMAIN_INIT))
1141
1142 #define CHV_DPIO_CMN_D_POWER_DOMAINS (          \
1143         BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |  \
1144         BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
1145         BIT(POWER_DOMAIN_AUX_D) |               \
1146         BIT(POWER_DOMAIN_INIT))
1147
1148 #define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS (  \
1149         BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |  \
1150         BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
1151         BIT(POWER_DOMAIN_AUX_D) |               \
1152         BIT(POWER_DOMAIN_INIT))
1153
1154 #define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS (  \
1155         BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
1156         BIT(POWER_DOMAIN_AUX_D) |               \
1157         BIT(POWER_DOMAIN_INIT))
1158
1159 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1160         .sync_hw = i9xx_always_on_power_well_noop,
1161         .enable = i9xx_always_on_power_well_noop,
1162         .disable = i9xx_always_on_power_well_noop,
1163         .is_enabled = i9xx_always_on_power_well_enabled,
1164 };
1165
1166 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1167         .sync_hw = chv_pipe_power_well_sync_hw,
1168         .enable = chv_pipe_power_well_enable,
1169         .disable = chv_pipe_power_well_disable,
1170         .is_enabled = chv_pipe_power_well_enabled,
1171 };
1172
1173 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1174         .sync_hw = vlv_power_well_sync_hw,
1175         .enable = chv_dpio_cmn_power_well_enable,
1176         .disable = chv_dpio_cmn_power_well_disable,
1177         .is_enabled = vlv_power_well_enabled,
1178 };
1179
1180 static struct i915_power_well i9xx_always_on_power_well[] = {
1181         {
1182                 .name = "always-on",
1183                 .always_on = 1,
1184                 .domains = POWER_DOMAIN_MASK,
1185                 .ops = &i9xx_always_on_power_well_ops,
1186         },
1187 };
1188
1189 static const struct i915_power_well_ops hsw_power_well_ops = {
1190         .sync_hw = hsw_power_well_sync_hw,
1191         .enable = hsw_power_well_enable,
1192         .disable = hsw_power_well_disable,
1193         .is_enabled = hsw_power_well_enabled,
1194 };
1195
1196 static const struct i915_power_well_ops skl_power_well_ops = {
1197         .sync_hw = skl_power_well_sync_hw,
1198         .enable = skl_power_well_enable,
1199         .disable = skl_power_well_disable,
1200         .is_enabled = skl_power_well_enabled,
1201 };
1202
1203 static struct i915_power_well hsw_power_wells[] = {
1204         {
1205                 .name = "always-on",
1206                 .always_on = 1,
1207                 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
1208                 .ops = &i9xx_always_on_power_well_ops,
1209         },
1210         {
1211                 .name = "display",
1212                 .domains = HSW_DISPLAY_POWER_DOMAINS,
1213                 .ops = &hsw_power_well_ops,
1214         },
1215 };
1216
1217 static struct i915_power_well bdw_power_wells[] = {
1218         {
1219                 .name = "always-on",
1220                 .always_on = 1,
1221                 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
1222                 .ops = &i9xx_always_on_power_well_ops,
1223         },
1224         {
1225                 .name = "display",
1226                 .domains = BDW_DISPLAY_POWER_DOMAINS,
1227                 .ops = &hsw_power_well_ops,
1228         },
1229 };
1230
1231 static const struct i915_power_well_ops vlv_display_power_well_ops = {
1232         .sync_hw = vlv_power_well_sync_hw,
1233         .enable = vlv_display_power_well_enable,
1234         .disable = vlv_display_power_well_disable,
1235         .is_enabled = vlv_power_well_enabled,
1236 };
1237
1238 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1239         .sync_hw = vlv_power_well_sync_hw,
1240         .enable = vlv_dpio_cmn_power_well_enable,
1241         .disable = vlv_dpio_cmn_power_well_disable,
1242         .is_enabled = vlv_power_well_enabled,
1243 };
1244
1245 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1246         .sync_hw = vlv_power_well_sync_hw,
1247         .enable = vlv_power_well_enable,
1248         .disable = vlv_power_well_disable,
1249         .is_enabled = vlv_power_well_enabled,
1250 };
1251
1252 static struct i915_power_well vlv_power_wells[] = {
1253         {
1254                 .name = "always-on",
1255                 .always_on = 1,
1256                 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1257                 .ops = &i9xx_always_on_power_well_ops,
1258         },
1259         {
1260                 .name = "display",
1261                 .domains = VLV_DISPLAY_POWER_DOMAINS,
1262                 .data = PUNIT_POWER_WELL_DISP2D,
1263                 .ops = &vlv_display_power_well_ops,
1264         },
1265         {
1266                 .name = "dpio-tx-b-01",
1267                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1268                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1269                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1270                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1271                 .ops = &vlv_dpio_power_well_ops,
1272                 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1273         },
1274         {
1275                 .name = "dpio-tx-b-23",
1276                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1277                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1278                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1279                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1280                 .ops = &vlv_dpio_power_well_ops,
1281                 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1282         },
1283         {
1284                 .name = "dpio-tx-c-01",
1285                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1286                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1287                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1288                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1289                 .ops = &vlv_dpio_power_well_ops,
1290                 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1291         },
1292         {
1293                 .name = "dpio-tx-c-23",
1294                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1295                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1296                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1297                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1298                 .ops = &vlv_dpio_power_well_ops,
1299                 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1300         },
1301         {
1302                 .name = "dpio-common",
1303                 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
1304                 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1305                 .ops = &vlv_dpio_cmn_power_well_ops,
1306         },
1307 };
1308
1309 static struct i915_power_well chv_power_wells[] = {
1310         {
1311                 .name = "always-on",
1312                 .always_on = 1,
1313                 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1314                 .ops = &i9xx_always_on_power_well_ops,
1315         },
1316 #if 0
1317         {
1318                 .name = "display",
1319                 .domains = VLV_DISPLAY_POWER_DOMAINS,
1320                 .data = PUNIT_POWER_WELL_DISP2D,
1321                 .ops = &vlv_display_power_well_ops,
1322         },
1323 #endif
1324         {
1325                 .name = "pipe-a",
1326                 /*
1327                  * FIXME: pipe A power well seems to be the new disp2d well.
1328                  * At least all registers seem to be housed there. Figure
1329                  * out if this a a temporary situation in pre-production
1330                  * hardware or a permanent state of affairs.
1331                  */
1332                 .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
1333                 .data = PIPE_A,
1334                 .ops = &chv_pipe_power_well_ops,
1335         },
1336 #if 0
1337         {
1338                 .name = "pipe-b",
1339                 .domains = CHV_PIPE_B_POWER_DOMAINS,
1340                 .data = PIPE_B,
1341                 .ops = &chv_pipe_power_well_ops,
1342         },
1343         {
1344                 .name = "pipe-c",
1345                 .domains = CHV_PIPE_C_POWER_DOMAINS,
1346                 .data = PIPE_C,
1347                 .ops = &chv_pipe_power_well_ops,
1348         },
1349 #endif
1350         {
1351                 .name = "dpio-common-bc",
1352                 /*
1353                  * XXX: cmnreset for one PHY seems to disturb the other.
1354                  * As a workaround keep both powered on at the same
1355                  * time for now.
1356                  */
1357                 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
1358                 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1359                 .ops = &chv_dpio_cmn_power_well_ops,
1360         },
1361         {
1362                 .name = "dpio-common-d",
1363                 /*
1364                  * XXX: cmnreset for one PHY seems to disturb the other.
1365                  * As a workaround keep both powered on at the same
1366                  * time for now.
1367                  */
1368                 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
1369                 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
1370                 .ops = &chv_dpio_cmn_power_well_ops,
1371         },
1372 #if 0
1373         {
1374                 .name = "dpio-tx-b-01",
1375                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1376                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
1377                 .ops = &vlv_dpio_power_well_ops,
1378                 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1379         },
1380         {
1381                 .name = "dpio-tx-b-23",
1382                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1383                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
1384                 .ops = &vlv_dpio_power_well_ops,
1385                 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1386         },
1387         {
1388                 .name = "dpio-tx-c-01",
1389                 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1390                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1391                 .ops = &vlv_dpio_power_well_ops,
1392                 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1393         },
1394         {
1395                 .name = "dpio-tx-c-23",
1396                 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1397                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1398                 .ops = &vlv_dpio_power_well_ops,
1399                 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1400         },
1401         {
1402                 .name = "dpio-tx-d-01",
1403                 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1404                            CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1405                 .ops = &vlv_dpio_power_well_ops,
1406                 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
1407         },
1408         {
1409                 .name = "dpio-tx-d-23",
1410                 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1411                            CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1412                 .ops = &vlv_dpio_power_well_ops,
1413                 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
1414         },
1415 #endif
1416 };
1417
1418 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1419                                                  enum punit_power_well power_well_id)
1420 {
1421         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1422         struct i915_power_well *power_well;
1423         int i;
1424
1425         for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1426                 if (power_well->data == power_well_id)
1427                         return power_well;
1428         }
1429
1430         return NULL;
1431 }
1432
1433 static struct i915_power_well skl_power_wells[] = {
1434         {
1435                 .name = "always-on",
1436                 .always_on = 1,
1437                 .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1438                 .ops = &i9xx_always_on_power_well_ops,
1439         },
1440         {
1441                 .name = "power well 1",
1442                 .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1443                 .ops = &skl_power_well_ops,
1444                 .data = SKL_DISP_PW_1,
1445         },
1446         {
1447                 .name = "MISC IO power well",
1448                 .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
1449                 .ops = &skl_power_well_ops,
1450                 .data = SKL_DISP_PW_MISC_IO,
1451         },
1452         {
1453                 .name = "power well 2",
1454                 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1455                 .ops = &skl_power_well_ops,
1456                 .data = SKL_DISP_PW_2,
1457         },
1458         {
1459                 .name = "DDI A/E power well",
1460                 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
1461                 .ops = &skl_power_well_ops,
1462                 .data = SKL_DISP_PW_DDI_A_E,
1463         },
1464         {
1465                 .name = "DDI B power well",
1466                 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
1467                 .ops = &skl_power_well_ops,
1468                 .data = SKL_DISP_PW_DDI_B,
1469         },
1470         {
1471                 .name = "DDI C power well",
1472                 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
1473                 .ops = &skl_power_well_ops,
1474                 .data = SKL_DISP_PW_DDI_C,
1475         },
1476         {
1477                 .name = "DDI D power well",
1478                 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
1479                 .ops = &skl_power_well_ops,
1480                 .data = SKL_DISP_PW_DDI_D,
1481         },
1482 };
1483
1484 static struct i915_power_well bxt_power_wells[] = {
1485         {
1486                 .name = "always-on",
1487                 .always_on = 1,
1488                 .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1489                 .ops = &i9xx_always_on_power_well_ops,
1490         },
1491         {
1492                 .name = "power well 1",
1493                 .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1494                 .ops = &skl_power_well_ops,
1495                 .data = SKL_DISP_PW_1,
1496         },
1497         {
1498                 .name = "power well 2",
1499                 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1500                 .ops = &skl_power_well_ops,
1501                 .data = SKL_DISP_PW_2,
1502         }
1503 };
1504
1505 #define set_power_wells(power_domains, __power_wells) ({                \
1506         (power_domains)->power_wells = (__power_wells);                 \
1507         (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
1508 })
1509
1510 /**
1511  * intel_power_domains_init - initializes the power domain structures
1512  * @dev_priv: i915 device instance
1513  *
1514  * Initializes the power domain structures for @dev_priv depending upon the
1515  * supported platform.
1516  */
1517 int intel_power_domains_init(struct drm_i915_private *dev_priv)
1518 {
1519         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1520
1521         mutex_init(&power_domains->lock);
1522
1523         /*
1524          * The enabling order will be from lower to higher indexed wells,
1525          * the disabling order is reversed.
1526          */
1527         if (IS_HASWELL(dev_priv->dev)) {
1528                 set_power_wells(power_domains, hsw_power_wells);
1529         } else if (IS_BROADWELL(dev_priv->dev)) {
1530                 set_power_wells(power_domains, bdw_power_wells);
1531         } else if (IS_SKYLAKE(dev_priv->dev)) {
1532                 set_power_wells(power_domains, skl_power_wells);
1533         } else if (IS_BROXTON(dev_priv->dev)) {
1534                 set_power_wells(power_domains, bxt_power_wells);
1535         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1536                 set_power_wells(power_domains, chv_power_wells);
1537         } else if (IS_VALLEYVIEW(dev_priv->dev)) {
1538                 set_power_wells(power_domains, vlv_power_wells);
1539         } else {
1540                 set_power_wells(power_domains, i9xx_always_on_power_well);
1541         }
1542
1543         return 0;
1544 }
1545
1546 static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1547 {
1548         struct drm_device *dev = dev_priv->dev;
1549         struct device *device = &dev->pdev->dev;
1550
1551         if (!HAS_RUNTIME_PM(dev))
1552                 return;
1553
1554         if (!intel_enable_rc6(dev))
1555                 return;
1556
1557         /* Make sure we're not suspended first. */
1558         pm_runtime_get_sync(device);
1559         pm_runtime_disable(device);
1560 }
1561
1562 /**
1563  * intel_power_domains_fini - finalizes the power domain structures
1564  * @dev_priv: i915 device instance
1565  *
1566  * Finalizes the power domain structures for @dev_priv depending upon the
1567  * supported platform. This function also disables runtime pm and ensures that
1568  * the device stays powered up so that the driver can be reloaded.
1569  */
1570 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
1571 {
1572         intel_runtime_pm_disable(dev_priv);
1573
1574         /* The i915.ko module is still not prepared to be loaded when
1575          * the power well is not enabled, so just enable it in case
1576          * we're going to unload/reload. */
1577         intel_display_set_init_power(dev_priv, true);
1578 }
1579
1580 static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1581 {
1582         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1583         struct i915_power_well *power_well;
1584         int i;
1585
1586         mutex_lock(&power_domains->lock);
1587         for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1588                 power_well->ops->sync_hw(dev_priv, power_well);
1589                 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
1590                                                                      power_well);
1591         }
1592         mutex_unlock(&power_domains->lock);
1593 }
1594
1595 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1596 {
1597         struct i915_power_well *cmn =
1598                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1599         struct i915_power_well *disp2d =
1600                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
1601
1602         /* If the display might be already active skip this */
1603         if (cmn->ops->is_enabled(dev_priv, cmn) &&
1604             disp2d->ops->is_enabled(dev_priv, disp2d) &&
1605             I915_READ(DPIO_CTL) & DPIO_CMNRST)
1606                 return;
1607
1608         DRM_DEBUG_KMS("toggling display PHY side reset\n");
1609
1610         /* cmnlane needs DPLL registers */
1611         disp2d->ops->enable(dev_priv, disp2d);
1612
1613         /*
1614          * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1615          * Need to assert and de-assert PHY SB reset by gating the
1616          * common lane power, then un-gating it.
1617          * Simply ungating isn't enough to reset the PHY enough to get
1618          * ports and lanes running.
1619          */
1620         cmn->ops->disable(dev_priv, cmn);
1621 }
1622
1623 /**
1624  * intel_power_domains_init_hw - initialize hardware power domain state
1625  * @dev_priv: i915 device instance
1626  *
1627  * This function initializes the hardware power domain state and enables all
1628  * power domains using intel_display_set_init_power().
1629  */
1630 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
1631 {
1632         struct drm_device *dev = dev_priv->dev;
1633         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1634
1635         power_domains->initializing = true;
1636
1637         if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
1638                 mutex_lock(&power_domains->lock);
1639                 vlv_cmnlane_wa(dev_priv);
1640                 mutex_unlock(&power_domains->lock);
1641         }
1642
1643         /* For now, we need the power well to be always enabled. */
1644         intel_display_set_init_power(dev_priv, true);
1645         intel_power_domains_resume(dev_priv);
1646         power_domains->initializing = false;
1647 }
1648
1649 /**
1650  * intel_aux_display_runtime_get - grab an auxiliary power domain reference
1651  * @dev_priv: i915 device instance
1652  *
1653  * This function grabs a power domain reference for the auxiliary power domain
1654  * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
1655  * parents are powered up. Therefore users should only grab a reference to the
1656  * innermost power domain they need.
1657  *
1658  * Any power domain reference obtained by this function must have a symmetric
1659  * call to intel_aux_display_runtime_put() to release the reference again.
1660  */
1661 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
1662 {
1663         intel_runtime_pm_get(dev_priv);
1664 }
1665
1666 /**
1667  * intel_aux_display_runtime_put - release an auxiliary power domain reference
1668  * @dev_priv: i915 device instance
1669  *
1670  * This function drops the auxiliary power domain reference obtained by
1671  * intel_aux_display_runtime_get() and might power down the corresponding
1672  * hardware block right away if this is the last reference.
1673  */
1674 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
1675 {
1676         intel_runtime_pm_put(dev_priv);
1677 }
1678
1679 /**
1680  * intel_runtime_pm_get - grab a runtime pm reference
1681  * @dev_priv: i915 device instance
1682  *
1683  * This function grabs a device-level runtime pm reference (mostly used for GEM
1684  * code to ensure the GTT or GT is on) and ensures that it is powered up.
1685  *
1686  * Any runtime pm reference obtained by this function must have a symmetric
1687  * call to intel_runtime_pm_put() to release the reference again.
1688  */
1689 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
1690 {
1691         struct drm_device *dev = dev_priv->dev;
1692         struct device *device = &dev->pdev->dev;
1693
1694         if (!HAS_RUNTIME_PM(dev))
1695                 return;
1696
1697         pm_runtime_get_sync(device);
1698         WARN(dev_priv->pm.suspended, "Device still suspended.\n");
1699 }
1700
1701 /**
1702  * intel_runtime_pm_get_noresume - grab a runtime pm reference
1703  * @dev_priv: i915 device instance
1704  *
1705  * This function grabs a device-level runtime pm reference (mostly used for GEM
1706  * code to ensure the GTT or GT is on).
1707  *
1708  * It will _not_ power up the device but instead only check that it's powered
1709  * on.  Therefore it is only valid to call this functions from contexts where
1710  * the device is known to be powered up and where trying to power it up would
1711  * result in hilarity and deadlocks. That pretty much means only the system
1712  * suspend/resume code where this is used to grab runtime pm references for
1713  * delayed setup down in work items.
1714  *
1715  * Any runtime pm reference obtained by this function must have a symmetric
1716  * call to intel_runtime_pm_put() to release the reference again.
1717  */
1718 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
1719 {
1720         struct drm_device *dev = dev_priv->dev;
1721         struct device *device = &dev->pdev->dev;
1722
1723         if (!HAS_RUNTIME_PM(dev))
1724                 return;
1725
1726         WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
1727         pm_runtime_get_noresume(device);
1728 }
1729
1730 /**
1731  * intel_runtime_pm_put - release a runtime pm reference
1732  * @dev_priv: i915 device instance
1733  *
1734  * This function drops the device-level runtime pm reference obtained by
1735  * intel_runtime_pm_get() and might power down the corresponding
1736  * hardware block right away if this is the last reference.
1737  */
1738 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
1739 {
1740         struct drm_device *dev = dev_priv->dev;
1741         struct device *device = &dev->pdev->dev;
1742
1743         if (!HAS_RUNTIME_PM(dev))
1744                 return;
1745
1746         pm_runtime_mark_last_busy(device);
1747         pm_runtime_put_autosuspend(device);
1748 }
1749
1750 /**
1751  * intel_runtime_pm_enable - enable runtime pm
1752  * @dev_priv: i915 device instance
1753  *
1754  * This function enables runtime pm at the end of the driver load sequence.
1755  *
1756  * Note that this function does currently not enable runtime pm for the
1757  * subordinate display power domains. That is only done on the first modeset
1758  * using intel_display_set_init_power().
1759  */
1760 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
1761 {
1762         struct drm_device *dev = dev_priv->dev;
1763         struct device *device = &dev->pdev->dev;
1764
1765         if (!HAS_RUNTIME_PM(dev))
1766                 return;
1767
1768         pm_runtime_set_active(device);
1769
1770         /*
1771          * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
1772          * requirement.
1773          */
1774         if (!intel_enable_rc6(dev)) {
1775                 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
1776                 return;
1777         }
1778
1779         pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
1780         pm_runtime_mark_last_busy(device);
1781         pm_runtime_use_autosuspend(device);
1782
1783         pm_runtime_put_autosuspend(device);
1784 }
1785