1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <linux/device.h>
31 #include <linux/acpi.h>
33 #include <drm/i915_drm.h>
35 #include "i915_trace.h"
36 #include "intel_drv.h"
38 #include <linux/console.h>
39 #include <linux/module.h>
40 #include <linux/pm_runtime.h>
41 #include <drm/drm_crtc_helper.h>
43 static struct drm_driver driver;
45 #define GEN_DEFAULT_PIPEOFFSETS \
46 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
47 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
48 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
49 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
50 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
52 #define GEN_CHV_PIPEOFFSETS \
53 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
54 CHV_PIPE_C_OFFSET }, \
55 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
56 CHV_TRANSCODER_C_OFFSET, }, \
57 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
58 CHV_PALETTE_C_OFFSET }
60 #define CURSOR_OFFSETS \
61 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
63 #define IVB_CURSOR_OFFSETS \
64 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
66 static const struct intel_device_info intel_i830_info = {
67 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
68 .has_overlay = 1, .overlay_needs_physical = 1,
69 .ring_mask = RENDER_RING,
70 GEN_DEFAULT_PIPEOFFSETS,
74 static const struct intel_device_info intel_845g_info = {
75 .gen = 2, .num_pipes = 1,
76 .has_overlay = 1, .overlay_needs_physical = 1,
77 .ring_mask = RENDER_RING,
78 GEN_DEFAULT_PIPEOFFSETS,
82 static const struct intel_device_info intel_i85x_info = {
83 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
84 .cursor_needs_physical = 1,
85 .has_overlay = 1, .overlay_needs_physical = 1,
87 .ring_mask = RENDER_RING,
88 GEN_DEFAULT_PIPEOFFSETS,
92 static const struct intel_device_info intel_i865g_info = {
93 .gen = 2, .num_pipes = 1,
94 .has_overlay = 1, .overlay_needs_physical = 1,
95 .ring_mask = RENDER_RING,
96 GEN_DEFAULT_PIPEOFFSETS,
100 static const struct intel_device_info intel_i915g_info = {
101 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
102 .has_overlay = 1, .overlay_needs_physical = 1,
103 .ring_mask = RENDER_RING,
104 GEN_DEFAULT_PIPEOFFSETS,
107 static const struct intel_device_info intel_i915gm_info = {
108 .gen = 3, .is_mobile = 1, .num_pipes = 2,
109 .cursor_needs_physical = 1,
110 .has_overlay = 1, .overlay_needs_physical = 1,
113 .ring_mask = RENDER_RING,
114 GEN_DEFAULT_PIPEOFFSETS,
117 static const struct intel_device_info intel_i945g_info = {
118 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
119 .has_overlay = 1, .overlay_needs_physical = 1,
120 .ring_mask = RENDER_RING,
121 GEN_DEFAULT_PIPEOFFSETS,
124 static const struct intel_device_info intel_i945gm_info = {
125 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
126 .has_hotplug = 1, .cursor_needs_physical = 1,
127 .has_overlay = 1, .overlay_needs_physical = 1,
130 .ring_mask = RENDER_RING,
131 GEN_DEFAULT_PIPEOFFSETS,
135 static const struct intel_device_info intel_i965g_info = {
136 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
139 .ring_mask = RENDER_RING,
140 GEN_DEFAULT_PIPEOFFSETS,
144 static const struct intel_device_info intel_i965gm_info = {
145 .gen = 4, .is_crestline = 1, .num_pipes = 2,
146 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
149 .ring_mask = RENDER_RING,
150 GEN_DEFAULT_PIPEOFFSETS,
154 static const struct intel_device_info intel_g33_info = {
155 .gen = 3, .is_g33 = 1, .num_pipes = 2,
156 .need_gfx_hws = 1, .has_hotplug = 1,
158 .ring_mask = RENDER_RING,
159 GEN_DEFAULT_PIPEOFFSETS,
163 static const struct intel_device_info intel_g45_info = {
164 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
165 .has_pipe_cxsr = 1, .has_hotplug = 1,
166 .ring_mask = RENDER_RING | BSD_RING,
167 GEN_DEFAULT_PIPEOFFSETS,
171 static const struct intel_device_info intel_gm45_info = {
172 .gen = 4, .is_g4x = 1, .num_pipes = 2,
173 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
174 .has_pipe_cxsr = 1, .has_hotplug = 1,
176 .ring_mask = RENDER_RING | BSD_RING,
177 GEN_DEFAULT_PIPEOFFSETS,
181 static const struct intel_device_info intel_pineview_info = {
182 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
183 .need_gfx_hws = 1, .has_hotplug = 1,
185 GEN_DEFAULT_PIPEOFFSETS,
189 static const struct intel_device_info intel_ironlake_d_info = {
190 .gen = 5, .num_pipes = 2,
191 .need_gfx_hws = 1, .has_hotplug = 1,
192 .ring_mask = RENDER_RING | BSD_RING,
193 GEN_DEFAULT_PIPEOFFSETS,
197 static const struct intel_device_info intel_ironlake_m_info = {
198 .gen = 5, .is_mobile = 1, .num_pipes = 2,
199 .need_gfx_hws = 1, .has_hotplug = 1,
201 .ring_mask = RENDER_RING | BSD_RING,
202 GEN_DEFAULT_PIPEOFFSETS,
206 static const struct intel_device_info intel_sandybridge_d_info = {
207 .gen = 6, .num_pipes = 2,
208 .need_gfx_hws = 1, .has_hotplug = 1,
210 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
212 GEN_DEFAULT_PIPEOFFSETS,
216 static const struct intel_device_info intel_sandybridge_m_info = {
217 .gen = 6, .is_mobile = 1, .num_pipes = 2,
218 .need_gfx_hws = 1, .has_hotplug = 1,
220 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
222 GEN_DEFAULT_PIPEOFFSETS,
226 #define GEN7_FEATURES \
227 .gen = 7, .num_pipes = 3, \
228 .need_gfx_hws = 1, .has_hotplug = 1, \
230 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
233 static const struct intel_device_info intel_ivybridge_d_info = {
236 GEN_DEFAULT_PIPEOFFSETS,
240 static const struct intel_device_info intel_ivybridge_m_info = {
244 GEN_DEFAULT_PIPEOFFSETS,
248 static const struct intel_device_info intel_ivybridge_q_info = {
251 .num_pipes = 0, /* legal, last one wins */
252 GEN_DEFAULT_PIPEOFFSETS,
256 static const struct intel_device_info intel_valleyview_m_info = {
261 .display_mmio_offset = VLV_DISPLAY_BASE,
262 .has_fbc = 0, /* legal, last one wins */
263 .has_llc = 0, /* legal, last one wins */
264 GEN_DEFAULT_PIPEOFFSETS,
268 static const struct intel_device_info intel_valleyview_d_info = {
272 .display_mmio_offset = VLV_DISPLAY_BASE,
273 .has_fbc = 0, /* legal, last one wins */
274 .has_llc = 0, /* legal, last one wins */
275 GEN_DEFAULT_PIPEOFFSETS,
279 static const struct intel_device_info intel_haswell_d_info = {
284 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
285 GEN_DEFAULT_PIPEOFFSETS,
289 static const struct intel_device_info intel_haswell_m_info = {
295 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
296 GEN_DEFAULT_PIPEOFFSETS,
300 static const struct intel_device_info intel_broadwell_d_info = {
301 .gen = 8, .num_pipes = 3,
302 .need_gfx_hws = 1, .has_hotplug = 1,
303 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
308 GEN_DEFAULT_PIPEOFFSETS,
312 static const struct intel_device_info intel_broadwell_m_info = {
313 .gen = 8, .is_mobile = 1, .num_pipes = 3,
314 .need_gfx_hws = 1, .has_hotplug = 1,
315 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
320 GEN_DEFAULT_PIPEOFFSETS,
324 static const struct intel_device_info intel_broadwell_gt3d_info = {
325 .gen = 8, .num_pipes = 3,
326 .need_gfx_hws = 1, .has_hotplug = 1,
327 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
332 GEN_DEFAULT_PIPEOFFSETS,
336 static const struct intel_device_info intel_broadwell_gt3m_info = {
337 .gen = 8, .is_mobile = 1, .num_pipes = 3,
338 .need_gfx_hws = 1, .has_hotplug = 1,
339 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
344 GEN_DEFAULT_PIPEOFFSETS,
348 static const struct intel_device_info intel_cherryview_info = {
350 .gen = 8, .num_pipes = 3,
351 .need_gfx_hws = 1, .has_hotplug = 1,
352 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
354 .display_mmio_offset = VLV_DISPLAY_BASE,
359 static const struct intel_device_info intel_skylake_info = {
362 .gen = 9, .num_pipes = 3,
363 .need_gfx_hws = 1, .has_hotplug = 1,
364 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
368 GEN_DEFAULT_PIPEOFFSETS,
373 * Make sure any device matches here are from most specific to most
374 * general. For example, since the Quanta match is based on the subsystem
375 * and subvendor IDs, we need it to come before the more general IVB
376 * PCI ID matches, otherwise we'll use the wrong info struct above.
378 #define INTEL_PCI_IDS \
379 INTEL_I830_IDS(&intel_i830_info), \
380 INTEL_I845G_IDS(&intel_845g_info), \
381 INTEL_I85X_IDS(&intel_i85x_info), \
382 INTEL_I865G_IDS(&intel_i865g_info), \
383 INTEL_I915G_IDS(&intel_i915g_info), \
384 INTEL_I915GM_IDS(&intel_i915gm_info), \
385 INTEL_I945G_IDS(&intel_i945g_info), \
386 INTEL_I945GM_IDS(&intel_i945gm_info), \
387 INTEL_I965G_IDS(&intel_i965g_info), \
388 INTEL_G33_IDS(&intel_g33_info), \
389 INTEL_I965GM_IDS(&intel_i965gm_info), \
390 INTEL_GM45_IDS(&intel_gm45_info), \
391 INTEL_G45_IDS(&intel_g45_info), \
392 INTEL_PINEVIEW_IDS(&intel_pineview_info), \
393 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
394 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
395 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
396 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
397 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
398 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
399 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
400 INTEL_HSW_D_IDS(&intel_haswell_d_info), \
401 INTEL_HSW_M_IDS(&intel_haswell_m_info), \
402 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
403 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
404 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
405 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
406 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
407 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
408 INTEL_CHV_IDS(&intel_cherryview_info), \
409 INTEL_SKL_IDS(&intel_skylake_info)
411 static const struct pci_device_id pciidlist[] = { /* aka */
416 #if defined(CONFIG_DRM_I915_KMS)
417 MODULE_DEVICE_TABLE(pci, pciidlist);
420 void intel_detect_pch(struct drm_device *dev)
422 struct drm_i915_private *dev_priv = dev->dev_private;
423 struct pci_dev *pch = NULL;
425 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
426 * (which really amounts to a PCH but no South Display).
428 if (INTEL_INFO(dev)->num_pipes == 0) {
429 dev_priv->pch_type = PCH_NOP;
434 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
435 * make graphics device passthrough work easy for VMM, that only
436 * need to expose ISA bridge to let driver know the real hardware
437 * underneath. This is a requirement from virtualization team.
439 * In some virtualized environments (e.g. XEN), there is irrelevant
440 * ISA bridge in the system. To work reliably, we should scan trhough
441 * all the ISA bridge devices and check for the first match, instead
442 * of only checking the first one.
444 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
445 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
446 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
447 dev_priv->pch_id = id;
449 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
450 dev_priv->pch_type = PCH_IBX;
451 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
452 WARN_ON(!IS_GEN5(dev));
453 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
454 dev_priv->pch_type = PCH_CPT;
455 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
456 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
457 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
458 /* PantherPoint is CPT compatible */
459 dev_priv->pch_type = PCH_CPT;
460 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
461 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
462 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
463 dev_priv->pch_type = PCH_LPT;
464 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
465 WARN_ON(!IS_HASWELL(dev));
466 WARN_ON(IS_HSW_ULT(dev));
467 } else if (IS_BROADWELL(dev)) {
468 dev_priv->pch_type = PCH_LPT;
470 INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
471 DRM_DEBUG_KMS("This is Broadwell, assuming "
472 "LynxPoint LP PCH\n");
473 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
474 dev_priv->pch_type = PCH_LPT;
475 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
476 WARN_ON(!IS_HASWELL(dev));
477 WARN_ON(!IS_HSW_ULT(dev));
478 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
479 dev_priv->pch_type = PCH_SPT;
480 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
481 WARN_ON(!IS_SKYLAKE(dev));
482 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
483 dev_priv->pch_type = PCH_SPT;
484 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
485 WARN_ON(!IS_SKYLAKE(dev));
493 DRM_DEBUG_KMS("No PCH found.\n");
498 bool i915_semaphore_is_enabled(struct drm_device *dev)
500 if (INTEL_INFO(dev)->gen < 6)
503 if (i915.semaphores >= 0)
504 return i915.semaphores;
506 /* TODO: make semaphores and Execlists play nicely together */
507 if (i915.enable_execlists)
510 /* Until we get further testing... */
514 #ifdef CONFIG_INTEL_IOMMU
515 /* Enable semaphores on SNB when IO remapping is off */
516 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
523 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
525 spin_lock_irq(&dev_priv->irq_lock);
527 dev_priv->long_hpd_port_mask = 0;
528 dev_priv->short_hpd_port_mask = 0;
529 dev_priv->hpd_event_bits = 0;
531 spin_unlock_irq(&dev_priv->irq_lock);
533 cancel_work_sync(&dev_priv->dig_port_work);
534 cancel_work_sync(&dev_priv->hotplug_work);
535 cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
538 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
540 struct drm_device *dev = dev_priv->dev;
541 struct drm_encoder *encoder;
543 drm_modeset_lock_all(dev);
544 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
545 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
547 if (intel_encoder->suspend)
548 intel_encoder->suspend(intel_encoder);
550 drm_modeset_unlock_all(dev);
553 static int intel_suspend_complete(struct drm_i915_private *dev_priv);
554 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
557 static int i915_drm_suspend(struct drm_device *dev)
559 struct drm_i915_private *dev_priv = dev->dev_private;
560 struct drm_crtc *crtc;
561 pci_power_t opregion_target_state;
563 /* ignore lid events during suspend */
564 mutex_lock(&dev_priv->modeset_restore_lock);
565 dev_priv->modeset_restore = MODESET_SUSPENDED;
566 mutex_unlock(&dev_priv->modeset_restore_lock);
568 /* We do a lot of poking in a lot of registers, make sure they work
570 intel_display_set_init_power(dev_priv, true);
572 drm_kms_helper_poll_disable(dev);
574 pci_save_state(dev->pdev);
576 /* If KMS is active, we do the leavevt stuff here */
577 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
580 error = i915_gem_suspend(dev);
582 dev_err(&dev->pdev->dev,
583 "GEM idle failed, resume might fail\n");
588 * Disable CRTCs directly since we want to preserve sw state
589 * for _thaw. Also, power gate the CRTC power wells.
591 drm_modeset_lock_all(dev);
592 for_each_crtc(dev, crtc)
593 intel_crtc_control(crtc, false);
594 drm_modeset_unlock_all(dev);
596 intel_dp_mst_suspend(dev);
598 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
600 intel_runtime_pm_disable_interrupts(dev_priv);
601 intel_hpd_cancel_work(dev_priv);
603 intel_suspend_encoders(dev_priv);
605 intel_suspend_gt_powersave(dev);
607 intel_suspend_hw(dev);
610 i915_gem_suspend_gtt_mappings(dev);
612 i915_save_state(dev);
614 opregion_target_state = PCI_D3cold;
615 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
616 if (acpi_target_system_state() < ACPI_STATE_S3)
617 opregion_target_state = PCI_D1;
619 intel_opregion_notify_adapter(dev, opregion_target_state);
621 intel_uncore_forcewake_reset(dev, false);
622 intel_opregion_fini(dev);
624 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
626 dev_priv->suspend_count++;
628 intel_display_set_init_power(dev_priv, false);
633 static int i915_drm_suspend_late(struct drm_device *drm_dev)
635 struct drm_i915_private *dev_priv = drm_dev->dev_private;
638 ret = intel_suspend_complete(dev_priv);
641 DRM_ERROR("Suspend complete failed: %d\n", ret);
646 pci_disable_device(drm_dev->pdev);
647 pci_set_power_state(drm_dev->pdev, PCI_D3hot);
652 int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
656 if (!dev || !dev->dev_private) {
657 DRM_ERROR("dev: %p\n", dev);
658 DRM_ERROR("DRM not initialized, aborting suspend.\n");
662 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
663 state.event != PM_EVENT_FREEZE))
666 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
669 error = i915_drm_suspend(dev);
673 return i915_drm_suspend_late(dev);
676 static int i915_drm_resume(struct drm_device *dev)
678 struct drm_i915_private *dev_priv = dev->dev_private;
680 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
681 mutex_lock(&dev->struct_mutex);
682 i915_gem_restore_gtt_mappings(dev);
683 mutex_unlock(&dev->struct_mutex);
686 i915_restore_state(dev);
687 intel_opregion_setup(dev);
689 /* KMS EnterVT equivalent */
690 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
691 intel_init_pch_refclk(dev);
692 drm_mode_config_reset(dev);
694 mutex_lock(&dev->struct_mutex);
695 if (i915_gem_init_hw(dev)) {
696 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
697 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
699 mutex_unlock(&dev->struct_mutex);
701 /* We need working interrupts for modeset enabling ... */
702 intel_runtime_pm_enable_interrupts(dev_priv);
704 intel_modeset_init_hw(dev);
707 spin_lock_irq(&dev_priv->irq_lock);
708 if (dev_priv->display.hpd_irq_setup)
709 dev_priv->display.hpd_irq_setup(dev);
710 spin_unlock_irq(&dev_priv->irq_lock);
713 intel_dp_mst_resume(dev);
714 drm_modeset_lock_all(dev);
715 intel_modeset_setup_hw_state(dev, true);
716 drm_modeset_unlock_all(dev);
719 * ... but also need to make sure that hotplug processing
720 * doesn't cause havoc. Like in the driver load code we don't
721 * bother with the tiny race here where we might loose hotplug
724 intel_hpd_init(dev_priv);
725 /* Config may have changed between suspend and resume */
726 drm_helper_hpd_irq_event(dev);
729 intel_opregion_init(dev);
731 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
733 mutex_lock(&dev_priv->modeset_restore_lock);
734 dev_priv->modeset_restore = MODESET_DONE;
735 mutex_unlock(&dev_priv->modeset_restore_lock);
737 intel_opregion_notify_adapter(dev, PCI_D0);
739 drm_kms_helper_poll_enable(dev);
744 static int i915_drm_resume_early(struct drm_device *dev)
746 struct drm_i915_private *dev_priv = dev->dev_private;
750 * We have a resume ordering issue with the snd-hda driver also
751 * requiring our device to be power up. Due to the lack of a
752 * parent/child relationship we currently solve this with an early
755 * FIXME: This should be solved with a special hdmi sink device or
756 * similar so that power domains can be employed.
758 if (pci_enable_device(dev->pdev))
761 pci_set_master(dev->pdev);
763 if (IS_VALLEYVIEW(dev_priv))
764 ret = vlv_resume_prepare(dev_priv, false);
766 DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
768 intel_uncore_early_sanitize(dev, true);
770 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
771 hsw_disable_pc8(dev_priv);
773 intel_uncore_sanitize(dev);
774 intel_power_domains_init_hw(dev_priv);
779 int i915_resume_legacy(struct drm_device *dev)
783 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
786 ret = i915_drm_resume_early(dev);
790 return i915_drm_resume(dev);
794 * i915_reset - reset chip after a hang
795 * @dev: drm device to reset
797 * Reset the chip. Useful if a hang is detected. Returns zero on successful
798 * reset or otherwise an error code.
800 * Procedure is fairly simple:
801 * - reset the chip using the reset reg
802 * - re-init context state
803 * - re-init hardware status page
804 * - re-init ring buffer
805 * - re-init interrupt state
808 int i915_reset(struct drm_device *dev)
810 struct drm_i915_private *dev_priv = dev->dev_private;
817 mutex_lock(&dev->struct_mutex);
821 simulated = dev_priv->gpu_error.stop_rings != 0;
823 ret = intel_gpu_reset(dev);
825 /* Also reset the gpu hangman. */
827 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
828 dev_priv->gpu_error.stop_rings = 0;
829 if (ret == -ENODEV) {
830 DRM_INFO("Reset not implemented, but ignoring "
831 "error for simulated gpu hangs\n");
836 if (i915_stop_ring_allow_warn(dev_priv))
837 pr_notice("drm/i915: Resetting chip after gpu hang\n");
840 DRM_ERROR("Failed to reset chip: %i\n", ret);
841 mutex_unlock(&dev->struct_mutex);
845 /* Ok, now get things going again... */
848 * Everything depends on having the GTT running, so we need to start
849 * there. Fortunately we don't need to do this unless we reset the
850 * chip at a PCI level.
852 * Next we need to restore the context, but we don't use those
855 * Ring buffer needs to be re-initialized in the KMS case, or if X
856 * was running at the time of the reset (i.e. we weren't VT
859 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
860 !dev_priv->ums.mm_suspended) {
861 dev_priv->ums.mm_suspended = 0;
863 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
864 dev_priv->gpu_error.reload_in_reset = true;
866 ret = i915_gem_init_hw(dev);
868 dev_priv->gpu_error.reload_in_reset = false;
870 mutex_unlock(&dev->struct_mutex);
872 DRM_ERROR("Failed hw init on reset %d\n", ret);
877 * FIXME: This races pretty badly against concurrent holders of
878 * ring interrupts. This is possible since we've started to drop
879 * dev->struct_mutex in select places when waiting for the gpu.
883 * rps/rc6 re-init is necessary to restore state lost after the
884 * reset and the re-install of gt irqs. Skip for ironlake per
885 * previous concerns that it doesn't respond well to some forms
886 * of re-init after reset.
888 if (INTEL_INFO(dev)->gen > 5)
889 intel_reset_gt_powersave(dev);
891 mutex_unlock(&dev->struct_mutex);
897 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
899 struct intel_device_info *intel_info =
900 (struct intel_device_info *) ent->driver_data;
902 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
903 DRM_INFO("This hardware requires preliminary hardware support.\n"
904 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
908 /* Only bind to function 0 of the device. Early generations
909 * used function 1 as a placeholder for multi-head. This causes
910 * us confusion instead, especially on the systems where both
911 * functions have the same PCI-ID!
913 if (PCI_FUNC(pdev->devfn))
916 driver.driver_features &= ~(DRIVER_USE_AGP);
918 return drm_get_pci_dev(pdev, ent, &driver);
922 i915_pci_remove(struct pci_dev *pdev)
924 struct drm_device *dev = pci_get_drvdata(pdev);
929 static int i915_pm_suspend(struct device *dev)
931 struct pci_dev *pdev = to_pci_dev(dev);
932 struct drm_device *drm_dev = pci_get_drvdata(pdev);
934 if (!drm_dev || !drm_dev->dev_private) {
935 dev_err(dev, "DRM not initialized, aborting suspend.\n");
939 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
942 return i915_drm_suspend(drm_dev);
945 static int i915_pm_suspend_late(struct device *dev)
947 struct pci_dev *pdev = to_pci_dev(dev);
948 struct drm_device *drm_dev = pci_get_drvdata(pdev);
951 * We have a suspedn ordering issue with the snd-hda driver also
952 * requiring our device to be power up. Due to the lack of a
953 * parent/child relationship we currently solve this with an late
956 * FIXME: This should be solved with a special hdmi sink device or
957 * similar so that power domains can be employed.
959 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
962 return i915_drm_suspend_late(drm_dev);
965 static int i915_pm_resume_early(struct device *dev)
967 struct pci_dev *pdev = to_pci_dev(dev);
968 struct drm_device *drm_dev = pci_get_drvdata(pdev);
970 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
973 return i915_drm_resume_early(drm_dev);
976 static int i915_pm_resume(struct device *dev)
978 struct pci_dev *pdev = to_pci_dev(dev);
979 struct drm_device *drm_dev = pci_get_drvdata(pdev);
981 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
984 return i915_drm_resume(drm_dev);
987 static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
989 hsw_enable_pc8(dev_priv);
995 * Save all Gunit registers that may be lost after a D3 and a subsequent
996 * S0i[R123] transition. The list of registers needing a save/restore is
997 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
998 * registers in the following way:
999 * - Driver: saved/restored by the driver
1000 * - Punit : saved/restored by the Punit firmware
1001 * - No, w/o marking: no need to save/restore, since the register is R/O or
1002 * used internally by the HW in a way that doesn't depend
1003 * keeping the content across a suspend/resume.
1004 * - Debug : used for debugging
1006 * We save/restore all registers marked with 'Driver', with the following
1008 * - Registers out of use, including also registers marked with 'Debug'.
1009 * These have no effect on the driver's operation, so we don't save/restore
1010 * them to reduce the overhead.
1011 * - Registers that are fully setup by an initialization function called from
1012 * the resume path. For example many clock gating and RPS/RC6 registers.
1013 * - Registers that provide the right functionality with their reset defaults.
1015 * TODO: Except for registers that based on the above 3 criteria can be safely
1016 * ignored, we save/restore all others, practically treating the HW context as
1017 * a black-box for the driver. Further investigation is needed to reduce the
1018 * saved/restored registers even further, by following the same 3 criteria.
1020 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1022 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1025 /* GAM 0x4000-0x4770 */
1026 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
1027 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
1028 s->arb_mode = I915_READ(ARB_MODE);
1029 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
1030 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
1032 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1033 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
1035 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1036 s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1038 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
1039 s->ecochk = I915_READ(GAM_ECOCHK);
1040 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
1041 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
1043 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
1045 /* MBC 0x9024-0x91D0, 0x8500 */
1046 s->g3dctl = I915_READ(VLV_G3DCTL);
1047 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
1048 s->mbctl = I915_READ(GEN6_MBCTL);
1050 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1051 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
1052 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
1053 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
1054 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
1055 s->rstctl = I915_READ(GEN6_RSTCTL);
1056 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
1058 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1059 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
1060 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
1061 s->rpdeuc = I915_READ(GEN6_RPDEUC);
1062 s->ecobus = I915_READ(ECOBUS);
1063 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
1064 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
1065 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
1066 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
1067 s->rcedata = I915_READ(VLV_RCEDATA);
1068 s->spare2gh = I915_READ(VLV_SPAREG2H);
1070 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1071 s->gt_imr = I915_READ(GTIMR);
1072 s->gt_ier = I915_READ(GTIER);
1073 s->pm_imr = I915_READ(GEN6_PMIMR);
1074 s->pm_ier = I915_READ(GEN6_PMIER);
1076 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1077 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
1079 /* GT SA CZ domain, 0x100000-0x138124 */
1080 s->tilectl = I915_READ(TILECTL);
1081 s->gt_fifoctl = I915_READ(GTFIFOCTL);
1082 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
1083 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1084 s->pmwgicz = I915_READ(VLV_PMWGICZ);
1086 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1087 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
1088 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
1089 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
1092 * Not saving any of:
1093 * DFT, 0x9800-0x9EC0
1094 * SARB, 0xB000-0xB1FC
1095 * GAC, 0x5208-0x524C, 0x14000-0x14C000
1100 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1102 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1106 /* GAM 0x4000-0x4770 */
1107 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
1108 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
1109 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
1110 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
1111 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
1113 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1114 I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
1116 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1117 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
1119 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
1120 I915_WRITE(GAM_ECOCHK, s->ecochk);
1121 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
1122 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
1124 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
1126 /* MBC 0x9024-0x91D0, 0x8500 */
1127 I915_WRITE(VLV_G3DCTL, s->g3dctl);
1128 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
1129 I915_WRITE(GEN6_MBCTL, s->mbctl);
1131 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1132 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
1133 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
1134 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
1135 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
1136 I915_WRITE(GEN6_RSTCTL, s->rstctl);
1137 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
1139 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1140 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
1141 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
1142 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
1143 I915_WRITE(ECOBUS, s->ecobus);
1144 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
1145 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1146 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
1147 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
1148 I915_WRITE(VLV_RCEDATA, s->rcedata);
1149 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
1151 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1152 I915_WRITE(GTIMR, s->gt_imr);
1153 I915_WRITE(GTIER, s->gt_ier);
1154 I915_WRITE(GEN6_PMIMR, s->pm_imr);
1155 I915_WRITE(GEN6_PMIER, s->pm_ier);
1157 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1158 I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
1160 /* GT SA CZ domain, 0x100000-0x138124 */
1161 I915_WRITE(TILECTL, s->tilectl);
1162 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
1164 * Preserve the GT allow wake and GFX force clock bit, they are not
1165 * be restored, as they are used to control the s0ix suspend/resume
1166 * sequence by the caller.
1168 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1169 val &= VLV_GTLC_ALLOWWAKEREQ;
1170 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1171 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1173 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1174 val &= VLV_GFX_CLK_FORCE_ON_BIT;
1175 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1176 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1178 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
1180 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1181 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
1182 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
1183 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
1186 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1191 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1192 WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
1194 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1195 /* Wait for a previous force-off to settle */
1197 err = wait_for(!COND, 20);
1199 DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
1200 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1205 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1206 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1208 val |= VLV_GFX_CLK_FORCE_ON_BIT;
1209 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1214 err = wait_for(COND, 20);
1216 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1217 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1223 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1228 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1229 val &= ~VLV_GTLC_ALLOWWAKEREQ;
1231 val |= VLV_GTLC_ALLOWWAKEREQ;
1232 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1233 POSTING_READ(VLV_GTLC_WAKE_CTRL);
1235 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1237 err = wait_for(COND, 1);
1239 DRM_ERROR("timeout disabling GT waking\n");
1244 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1251 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1252 val = wait_for_on ? mask : 0;
1253 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1257 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1258 wait_for_on ? "on" : "off",
1259 I915_READ(VLV_GTLC_PW_STATUS));
1262 * RC6 transitioning can be delayed up to 2 msec (see
1263 * valleyview_enable_rps), use 3 msec for safety.
1265 err = wait_for(COND, 3);
1267 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1268 wait_for_on ? "on" : "off");
1274 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1276 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1279 DRM_ERROR("GT register access while GT waking disabled\n");
1280 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1283 static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
1289 * Bspec defines the following GT well on flags as debug only, so
1290 * don't treat them as hard failures.
1292 (void)vlv_wait_for_gt_wells(dev_priv, false);
1294 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1295 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1297 vlv_check_no_gt_access(dev_priv);
1299 err = vlv_force_gfx_clock(dev_priv, true);
1303 err = vlv_allow_gt_wake(dev_priv, false);
1306 vlv_save_gunit_s0ix_state(dev_priv);
1308 err = vlv_force_gfx_clock(dev_priv, false);
1315 /* For safety always re-enable waking and disable gfx clock forcing */
1316 vlv_allow_gt_wake(dev_priv, true);
1318 vlv_force_gfx_clock(dev_priv, false);
1323 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1326 struct drm_device *dev = dev_priv->dev;
1331 * If any of the steps fail just try to continue, that's the best we
1332 * can do at this point. Return the first error code (which will also
1333 * leave RPM permanently disabled).
1335 ret = vlv_force_gfx_clock(dev_priv, true);
1337 vlv_restore_gunit_s0ix_state(dev_priv);
1339 err = vlv_allow_gt_wake(dev_priv, true);
1343 err = vlv_force_gfx_clock(dev_priv, false);
1347 vlv_check_no_gt_access(dev_priv);
1350 intel_init_clock_gating(dev);
1351 i915_gem_restore_fences(dev);
1357 static int intel_runtime_suspend(struct device *device)
1359 struct pci_dev *pdev = to_pci_dev(device);
1360 struct drm_device *dev = pci_get_drvdata(pdev);
1361 struct drm_i915_private *dev_priv = dev->dev_private;
1364 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1367 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1370 assert_force_wake_inactive(dev_priv);
1372 DRM_DEBUG_KMS("Suspending device\n");
1375 * We could deadlock here in case another thread holding struct_mutex
1376 * calls RPM suspend concurrently, since the RPM suspend will wait
1377 * first for this RPM suspend to finish. In this case the concurrent
1378 * RPM resume will be followed by its RPM suspend counterpart. Still
1379 * for consistency return -EAGAIN, which will reschedule this suspend.
1381 if (!mutex_trylock(&dev->struct_mutex)) {
1382 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1384 * Bump the expiration timestamp, otherwise the suspend won't
1387 pm_runtime_mark_last_busy(device);
1392 * We are safe here against re-faults, since the fault handler takes
1395 i915_gem_release_all_mmaps(dev_priv);
1396 mutex_unlock(&dev->struct_mutex);
1398 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1399 intel_runtime_pm_disable_interrupts(dev_priv);
1400 intel_suspend_gt_powersave(dev);
1402 ret = intel_suspend_complete(dev_priv);
1404 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1405 intel_runtime_pm_enable_interrupts(dev_priv);
1410 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1411 dev_priv->pm.suspended = true;
1414 * FIXME: We really should find a document that references the arguments
1417 if (IS_HASWELL(dev)) {
1419 * current versions of firmware which depend on this opregion
1420 * notification have repurposed the D1 definition to mean
1421 * "runtime suspended" vs. what you would normally expect (D3)
1422 * to distinguish it from notifications that might be sent via
1425 intel_opregion_notify_adapter(dev, PCI_D1);
1428 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1429 * being detected, and the call we do at intel_runtime_resume()
1430 * won't be able to restore them. Since PCI_D3hot matches the
1431 * actual specification and appears to be working, use it. Let's
1432 * assume the other non-Haswell platforms will stay the same as
1435 intel_opregion_notify_adapter(dev, PCI_D3hot);
1438 DRM_DEBUG_KMS("Device suspended\n");
1442 static int intel_runtime_resume(struct device *device)
1444 struct pci_dev *pdev = to_pci_dev(device);
1445 struct drm_device *dev = pci_get_drvdata(pdev);
1446 struct drm_i915_private *dev_priv = dev->dev_private;
1449 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1452 DRM_DEBUG_KMS("Resuming device\n");
1454 intel_opregion_notify_adapter(dev, PCI_D0);
1455 dev_priv->pm.suspended = false;
1457 if (IS_GEN6(dev_priv))
1458 intel_init_pch_refclk(dev);
1459 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1460 hsw_disable_pc8(dev_priv);
1461 else if (IS_VALLEYVIEW(dev_priv))
1462 ret = vlv_resume_prepare(dev_priv, true);
1465 * No point of rolling back things in case of an error, as the best
1466 * we can do is to hope that things will still work (and disable RPM).
1468 i915_gem_init_swizzling(dev);
1469 gen6_update_ring_freq(dev);
1471 intel_runtime_pm_enable_interrupts(dev_priv);
1472 intel_enable_gt_powersave(dev);
1475 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1477 DRM_DEBUG_KMS("Device resumed\n");
1483 * This function implements common functionality of runtime and system
1486 static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1488 struct drm_device *dev = dev_priv->dev;
1491 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1492 ret = hsw_suspend_complete(dev_priv);
1493 else if (IS_VALLEYVIEW(dev))
1494 ret = vlv_suspend_complete(dev_priv);
1501 static const struct dev_pm_ops i915_pm_ops = {
1503 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1506 .suspend = i915_pm_suspend,
1507 .suspend_late = i915_pm_suspend_late,
1508 .resume_early = i915_pm_resume_early,
1509 .resume = i915_pm_resume,
1513 * @freeze, @freeze_late : called (1) before creating the
1514 * hibernation image [PMSG_FREEZE] and
1515 * (2) after rebooting, before restoring
1516 * the image [PMSG_QUIESCE]
1517 * @thaw, @thaw_early : called (1) after creating the hibernation
1518 * image, before writing it [PMSG_THAW]
1519 * and (2) after failing to create or
1520 * restore the image [PMSG_RECOVER]
1521 * @poweroff, @poweroff_late: called after writing the hibernation
1522 * image, before rebooting [PMSG_HIBERNATE]
1523 * @restore, @restore_early : called after rebooting and restoring the
1524 * hibernation image [PMSG_RESTORE]
1526 .freeze = i915_pm_suspend,
1527 .freeze_late = i915_pm_suspend_late,
1528 .thaw_early = i915_pm_resume_early,
1529 .thaw = i915_pm_resume,
1530 .poweroff = i915_pm_suspend,
1531 .poweroff_late = i915_pm_suspend_late,
1532 .restore_early = i915_pm_resume_early,
1533 .restore = i915_pm_resume,
1535 /* S0ix (via runtime suspend) event handlers */
1536 .runtime_suspend = intel_runtime_suspend,
1537 .runtime_resume = intel_runtime_resume,
1540 static const struct vm_operations_struct i915_gem_vm_ops = {
1541 .fault = i915_gem_fault,
1542 .open = drm_gem_vm_open,
1543 .close = drm_gem_vm_close,
1546 static const struct file_operations i915_driver_fops = {
1547 .owner = THIS_MODULE,
1549 .release = drm_release,
1550 .unlocked_ioctl = drm_ioctl,
1551 .mmap = drm_gem_mmap,
1554 #ifdef CONFIG_COMPAT
1555 .compat_ioctl = i915_compat_ioctl,
1557 .llseek = noop_llseek,
1560 static struct drm_driver driver = {
1561 /* Don't use MTRRs here; the Xserver or userspace app should
1562 * deal with them for Intel hardware.
1566 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1568 .load = i915_driver_load,
1569 .unload = i915_driver_unload,
1570 .open = i915_driver_open,
1571 .lastclose = i915_driver_lastclose,
1572 .preclose = i915_driver_preclose,
1573 .postclose = i915_driver_postclose,
1574 .set_busid = drm_pci_set_busid,
1576 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1577 .suspend = i915_suspend_legacy,
1578 .resume = i915_resume_legacy,
1580 .device_is_agp = i915_driver_device_is_agp,
1581 .master_create = i915_master_create,
1582 .master_destroy = i915_master_destroy,
1583 #if defined(CONFIG_DEBUG_FS)
1584 .debugfs_init = i915_debugfs_init,
1585 .debugfs_cleanup = i915_debugfs_cleanup,
1587 .gem_free_object = i915_gem_free_object,
1588 .gem_vm_ops = &i915_gem_vm_ops,
1590 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1591 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1592 .gem_prime_export = i915_gem_prime_export,
1593 .gem_prime_import = i915_gem_prime_import,
1595 .dumb_create = i915_gem_dumb_create,
1596 .dumb_map_offset = i915_gem_dumb_map_offset,
1597 .dumb_destroy = drm_gem_dumb_destroy,
1598 .ioctls = i915_ioctls,
1599 .fops = &i915_driver_fops,
1600 .name = DRIVER_NAME,
1601 .desc = DRIVER_DESC,
1602 .date = DRIVER_DATE,
1603 .major = DRIVER_MAJOR,
1604 .minor = DRIVER_MINOR,
1605 .patchlevel = DRIVER_PATCHLEVEL,
1608 static struct pci_driver i915_pci_driver = {
1609 .name = DRIVER_NAME,
1610 .id_table = pciidlist,
1611 .probe = i915_pci_probe,
1612 .remove = i915_pci_remove,
1613 .driver.pm = &i915_pm_ops,
1616 static int __init i915_init(void)
1618 driver.num_ioctls = i915_max_ioctl;
1621 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
1622 * explicitly disabled with the module pararmeter.
1624 * Otherwise, just follow the parameter (defaulting to off).
1626 * Allow optional vga_text_mode_force boot option to override
1627 * the default behavior.
1629 #if defined(CONFIG_DRM_I915_KMS)
1630 if (i915.modeset != 0)
1631 driver.driver_features |= DRIVER_MODESET;
1633 if (i915.modeset == 1)
1634 driver.driver_features |= DRIVER_MODESET;
1636 #ifdef CONFIG_VGA_CONSOLE
1637 if (vgacon_text_force() && i915.modeset == -1)
1638 driver.driver_features &= ~DRIVER_MODESET;
1641 if (!(driver.driver_features & DRIVER_MODESET)) {
1642 driver.get_vblank_timestamp = NULL;
1643 #ifndef CONFIG_DRM_I915_UMS
1644 /* Silently fail loading to not upset userspace. */
1645 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1650 return drm_pci_init(&driver, &i915_pci_driver);
1653 static void __exit i915_exit(void)
1655 #ifndef CONFIG_DRM_I915_UMS
1656 if (!(driver.driver_features & DRIVER_MODESET))
1657 return; /* Never loaded a driver. */
1660 drm_pci_exit(&driver, &i915_pci_driver);
1663 module_init(i915_init);
1664 module_exit(i915_exit);
1666 MODULE_AUTHOR("Tungsten Graphics, Inc.");
1667 MODULE_AUTHOR("Intel Corporation");
1669 MODULE_DESCRIPTION(DRIVER_DESC);
1670 MODULE_LICENSE("GPL and additional rights");