1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <linux/acpi.h>
31 #include <linux/device.h>
32 #include <linux/oom.h>
33 #include <linux/module.h>
34 #include <linux/pci.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/pnp.h>
38 #include <linux/slab.h>
39 #include <linux/vgaarb.h>
40 #include <linux/vga_switcheroo.h>
42 #include <acpi/video.h>
44 #include <drm/drm_crtc_helper.h>
45 #include <drm/drm_atomic_helper.h>
46 #include <drm/i915_drm.h>
49 #include "i915_trace.h"
51 #include "i915_reset.h"
52 #include "i915_query.h"
53 #include "i915_vgpu.h"
54 #include "intel_drv.h"
56 #include "intel_workarounds.h"
58 static struct drm_driver driver;
60 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
61 static unsigned int i915_load_fail_count;
63 bool __i915_inject_load_failure(const char *func, int line)
65 if (i915_load_fail_count >= i915_modparams.inject_load_failure)
68 if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
69 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
70 i915_modparams.inject_load_failure, func, line);
71 i915_modparams.inject_load_failure = 0;
78 bool i915_error_injected(void)
80 return i915_load_fail_count && !i915_modparams.inject_load_failure;
85 #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
86 #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
87 "providing the dmesg log by booting with drm.debug=0xf"
90 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
93 static bool shown_bug_once;
94 struct device *kdev = dev_priv->drm.dev;
95 bool is_error = level[1] <= KERN_ERR[1];
96 bool is_debug = level[1] == KERN_DEBUG[1];
100 if (is_debug && !(drm_debug & DRM_UT_DRIVER))
109 dev_printk(level, kdev, "%pV", &vaf);
111 dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
112 __builtin_return_address(0), &vaf);
116 if (is_error && !shown_bug_once) {
118 * Ask the user to file a bug report for the error, except
119 * if they may have caused the bug by fiddling with unsafe
122 if (!test_taint(TAINT_USER))
123 dev_notice(kdev, "%s", FDO_BUG_MSG);
124 shown_bug_once = true;
128 /* Map PCH device id to PCH type, or PCH_NONE if unknown. */
129 static enum intel_pch
130 intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
133 case INTEL_PCH_IBX_DEVICE_ID_TYPE:
134 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
135 WARN_ON(!IS_GEN(dev_priv, 5));
137 case INTEL_PCH_CPT_DEVICE_ID_TYPE:
138 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
139 WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
141 case INTEL_PCH_PPT_DEVICE_ID_TYPE:
142 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
143 WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
144 /* PantherPoint is CPT compatible */
146 case INTEL_PCH_LPT_DEVICE_ID_TYPE:
147 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
148 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
149 WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
151 case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
152 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
153 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
154 WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
156 case INTEL_PCH_WPT_DEVICE_ID_TYPE:
157 DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
158 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
159 WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
160 /* WildcatPoint is LPT compatible */
162 case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
163 DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
164 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
165 WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
166 /* WildcatPoint is LPT compatible */
168 case INTEL_PCH_SPT_DEVICE_ID_TYPE:
169 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
170 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
172 case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
173 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
174 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
176 case INTEL_PCH_KBP_DEVICE_ID_TYPE:
177 DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
178 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
179 !IS_COFFEELAKE(dev_priv));
181 case INTEL_PCH_CNP_DEVICE_ID_TYPE:
182 DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
183 WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
185 case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
186 DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
187 WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
189 case INTEL_PCH_ICP_DEVICE_ID_TYPE:
190 DRM_DEBUG_KMS("Found Ice Lake PCH\n");
191 WARN_ON(!IS_ICELAKE(dev_priv));
198 static bool intel_is_virt_pch(unsigned short id,
199 unsigned short svendor, unsigned short sdevice)
201 return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
202 id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
203 (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
204 svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
205 sdevice == PCI_SUBDEVICE_ID_QEMU));
208 static unsigned short
209 intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
211 unsigned short id = 0;
214 * In a virtualized passthrough environment we can be in a
215 * setup where the ISA bridge is not able to be passed through.
216 * In this case, a south bridge can be emulated and we have to
217 * make an educated guess as to which PCH is really there.
220 if (IS_GEN(dev_priv, 5))
221 id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
222 else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
223 id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
224 else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
225 id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
226 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
227 id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
228 else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
229 id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
230 else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
231 id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
232 else if (IS_ICELAKE(dev_priv))
233 id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
236 DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
238 DRM_DEBUG_KMS("Assuming no PCH\n");
243 static void intel_detect_pch(struct drm_i915_private *dev_priv)
245 struct pci_dev *pch = NULL;
248 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
249 * make graphics device passthrough work easy for VMM, that only
250 * need to expose ISA bridge to let driver know the real hardware
251 * underneath. This is a requirement from virtualization team.
253 * In some virtualized environments (e.g. XEN), there is irrelevant
254 * ISA bridge in the system. To work reliably, we should scan trhough
255 * all the ISA bridge devices and check for the first match, instead
256 * of only checking the first one.
258 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
260 enum intel_pch pch_type;
262 if (pch->vendor != PCI_VENDOR_ID_INTEL)
265 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
267 pch_type = intel_pch_type(dev_priv, id);
268 if (pch_type != PCH_NONE) {
269 dev_priv->pch_type = pch_type;
270 dev_priv->pch_id = id;
272 } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
273 pch->subsystem_device)) {
274 id = intel_virt_detect_pch(dev_priv);
275 pch_type = intel_pch_type(dev_priv, id);
277 /* Sanity check virtual PCH id */
278 if (WARN_ON(id && pch_type == PCH_NONE))
281 dev_priv->pch_type = pch_type;
282 dev_priv->pch_id = id;
288 * Use PCH_NOP (PCH but no South Display) for PCH platforms without
291 if (pch && !HAS_DISPLAY(dev_priv)) {
292 DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
293 dev_priv->pch_type = PCH_NOP;
294 dev_priv->pch_id = 0;
298 DRM_DEBUG_KMS("No PCH found.\n");
303 static int i915_getparam_ioctl(struct drm_device *dev, void *data,
304 struct drm_file *file_priv)
306 struct drm_i915_private *dev_priv = to_i915(dev);
307 struct pci_dev *pdev = dev_priv->drm.pdev;
308 drm_i915_getparam_t *param = data;
311 switch (param->param) {
312 case I915_PARAM_IRQ_ACTIVE:
313 case I915_PARAM_ALLOW_BATCHBUFFER:
314 case I915_PARAM_LAST_DISPATCH:
315 case I915_PARAM_HAS_EXEC_CONSTANTS:
316 /* Reject all old ums/dri params. */
318 case I915_PARAM_CHIPSET_ID:
319 value = pdev->device;
321 case I915_PARAM_REVISION:
322 value = pdev->revision;
324 case I915_PARAM_NUM_FENCES_AVAIL:
325 value = dev_priv->num_fence_regs;
327 case I915_PARAM_HAS_OVERLAY:
328 value = dev_priv->overlay ? 1 : 0;
330 case I915_PARAM_HAS_BSD:
331 value = !!dev_priv->engine[VCS];
333 case I915_PARAM_HAS_BLT:
334 value = !!dev_priv->engine[BCS];
336 case I915_PARAM_HAS_VEBOX:
337 value = !!dev_priv->engine[VECS];
339 case I915_PARAM_HAS_BSD2:
340 value = !!dev_priv->engine[VCS2];
342 case I915_PARAM_HAS_LLC:
343 value = HAS_LLC(dev_priv);
345 case I915_PARAM_HAS_WT:
346 value = HAS_WT(dev_priv);
348 case I915_PARAM_HAS_ALIASING_PPGTT:
349 value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
351 case I915_PARAM_HAS_SEMAPHORES:
354 case I915_PARAM_HAS_SECURE_BATCHES:
355 value = capable(CAP_SYS_ADMIN);
357 case I915_PARAM_CMD_PARSER_VERSION:
358 value = i915_cmd_parser_get_version(dev_priv);
360 case I915_PARAM_SUBSLICE_TOTAL:
361 value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
365 case I915_PARAM_EU_TOTAL:
366 value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
370 case I915_PARAM_HAS_GPU_RESET:
371 value = i915_modparams.enable_hangcheck &&
372 intel_has_gpu_reset(dev_priv);
373 if (value && intel_has_reset_engine(dev_priv))
376 case I915_PARAM_HAS_RESOURCE_STREAMER:
379 case I915_PARAM_HAS_POOLED_EU:
380 value = HAS_POOLED_EU(dev_priv);
382 case I915_PARAM_MIN_EU_IN_POOL:
383 value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
385 case I915_PARAM_HUC_STATUS:
386 value = intel_huc_check_status(&dev_priv->huc);
390 case I915_PARAM_MMAP_GTT_VERSION:
391 /* Though we've started our numbering from 1, and so class all
392 * earlier versions as 0, in effect their value is undefined as
393 * the ioctl will report EINVAL for the unknown param!
395 value = i915_gem_mmap_gtt_version();
397 case I915_PARAM_HAS_SCHEDULER:
398 value = dev_priv->caps.scheduler;
401 case I915_PARAM_MMAP_VERSION:
402 /* Remember to bump this if the version changes! */
403 case I915_PARAM_HAS_GEM:
404 case I915_PARAM_HAS_PAGEFLIPPING:
405 case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
406 case I915_PARAM_HAS_RELAXED_FENCING:
407 case I915_PARAM_HAS_COHERENT_RINGS:
408 case I915_PARAM_HAS_RELAXED_DELTA:
409 case I915_PARAM_HAS_GEN7_SOL_RESET:
410 case I915_PARAM_HAS_WAIT_TIMEOUT:
411 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
412 case I915_PARAM_HAS_PINNED_BATCHES:
413 case I915_PARAM_HAS_EXEC_NO_RELOC:
414 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
415 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
416 case I915_PARAM_HAS_EXEC_SOFTPIN:
417 case I915_PARAM_HAS_EXEC_ASYNC:
418 case I915_PARAM_HAS_EXEC_FENCE:
419 case I915_PARAM_HAS_EXEC_CAPTURE:
420 case I915_PARAM_HAS_EXEC_BATCH_FIRST:
421 case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
422 /* For the time being all of these are always true;
423 * if some supported hardware does not have one of these
424 * features this value needs to be provided from
425 * INTEL_INFO(), a feature macro, or similar.
429 case I915_PARAM_HAS_CONTEXT_ISOLATION:
430 value = intel_engines_has_context_isolation(dev_priv);
432 case I915_PARAM_SLICE_MASK:
433 value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
437 case I915_PARAM_SUBSLICE_MASK:
438 value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
442 case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
443 value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
445 case I915_PARAM_MMAP_GTT_COHERENT:
446 value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
449 DRM_DEBUG("Unknown parameter %d\n", param->param);
453 if (put_user(value, param->value))
459 static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
461 int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
463 dev_priv->bridge_dev =
464 pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
465 if (!dev_priv->bridge_dev) {
466 DRM_ERROR("bridge device not found\n");
472 /* Allocate space for the MCH regs if needed, return nonzero on error */
474 intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
476 int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
477 u32 temp_lo, temp_hi = 0;
481 if (INTEL_GEN(dev_priv) >= 4)
482 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
483 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
484 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
486 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
489 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
493 /* Get some space for it */
494 dev_priv->mch_res.name = "i915 MCHBAR";
495 dev_priv->mch_res.flags = IORESOURCE_MEM;
496 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
498 MCHBAR_SIZE, MCHBAR_SIZE,
500 0, pcibios_align_resource,
501 dev_priv->bridge_dev);
503 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
504 dev_priv->mch_res.start = 0;
508 if (INTEL_GEN(dev_priv) >= 4)
509 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
510 upper_32_bits(dev_priv->mch_res.start));
512 pci_write_config_dword(dev_priv->bridge_dev, reg,
513 lower_32_bits(dev_priv->mch_res.start));
517 /* Setup MCHBAR if possible, return true if we should disable it again */
519 intel_setup_mchbar(struct drm_i915_private *dev_priv)
521 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
525 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
528 dev_priv->mchbar_need_disable = false;
530 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
531 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
532 enabled = !!(temp & DEVEN_MCHBAR_EN);
534 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
538 /* If it's already enabled, don't have to do anything */
542 if (intel_alloc_mchbar_resource(dev_priv))
545 dev_priv->mchbar_need_disable = true;
547 /* Space is allocated or reserved, so enable it. */
548 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
549 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
550 temp | DEVEN_MCHBAR_EN);
552 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
553 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
558 intel_teardown_mchbar(struct drm_i915_private *dev_priv)
560 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
562 if (dev_priv->mchbar_need_disable) {
563 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
566 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
568 deven_val &= ~DEVEN_MCHBAR_EN;
569 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
574 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
577 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
582 if (dev_priv->mch_res.start)
583 release_resource(&dev_priv->mch_res);
586 /* true = enable decode, false = disable decoder */
587 static unsigned int i915_vga_set_decode(void *cookie, bool state)
589 struct drm_i915_private *dev_priv = cookie;
591 intel_modeset_vga_set_state(dev_priv, state);
593 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
594 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
596 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
599 static int i915_resume_switcheroo(struct drm_device *dev);
600 static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
602 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
604 struct drm_device *dev = pci_get_drvdata(pdev);
605 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
607 if (state == VGA_SWITCHEROO_ON) {
608 pr_info("switched on\n");
609 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
610 /* i915 resume handler doesn't set to D0 */
611 pci_set_power_state(pdev, PCI_D0);
612 i915_resume_switcheroo(dev);
613 dev->switch_power_state = DRM_SWITCH_POWER_ON;
615 pr_info("switched off\n");
616 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
617 i915_suspend_switcheroo(dev, pmm);
618 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
622 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
624 struct drm_device *dev = pci_get_drvdata(pdev);
627 * FIXME: open_count is protected by drm_global_mutex but that would lead to
628 * locking inversion with the driver load path. And the access here is
629 * completely racy anyway. So don't bother with locking for now.
631 return dev->open_count == 0;
634 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
635 .set_gpu_state = i915_switcheroo_set_state,
637 .can_switch = i915_switcheroo_can_switch,
640 static int i915_load_modeset_init(struct drm_device *dev)
642 struct drm_i915_private *dev_priv = to_i915(dev);
643 struct pci_dev *pdev = dev_priv->drm.pdev;
646 if (i915_inject_load_failure())
649 if (HAS_DISPLAY(dev_priv)) {
650 ret = drm_vblank_init(&dev_priv->drm,
651 INTEL_INFO(dev_priv)->num_pipes);
656 intel_bios_init(dev_priv);
658 /* If we have > 1 VGA cards, then we need to arbitrate access
659 * to the common VGA resources.
661 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
662 * then we do not take part in VGA arbitration and the
663 * vga_client_register() fails with -ENODEV.
665 ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
666 if (ret && ret != -ENODEV)
669 intel_register_dsm_handler();
671 ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
673 goto cleanup_vga_client;
675 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
676 intel_update_rawclk(dev_priv);
678 intel_power_domains_init_hw(dev_priv, false);
680 intel_csr_ucode_init(dev_priv);
682 ret = intel_irq_install(dev_priv);
686 intel_setup_gmbus(dev_priv);
688 /* Important: The output setup functions called by modeset_init need
689 * working irqs for e.g. gmbus and dp aux transfers. */
690 ret = intel_modeset_init(dev);
694 ret = i915_gem_init(dev_priv);
696 goto cleanup_modeset;
698 intel_overlay_setup(dev_priv);
700 if (!HAS_DISPLAY(dev_priv))
703 ret = intel_fbdev_init(dev);
707 /* Only enable hotplug handling once the fbdev is fully set up. */
708 intel_hpd_init(dev_priv);
710 intel_init_ipc(dev_priv);
715 if (i915_gem_suspend(dev_priv))
716 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
717 i915_gem_fini(dev_priv);
719 intel_modeset_cleanup(dev);
721 drm_irq_uninstall(dev);
722 intel_teardown_gmbus(dev_priv);
724 intel_csr_ucode_fini(dev_priv);
725 intel_power_domains_fini_hw(dev_priv);
726 vga_switcheroo_unregister_client(pdev);
728 vga_client_register(pdev, NULL, NULL, NULL);
733 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
735 struct apertures_struct *ap;
736 struct pci_dev *pdev = dev_priv->drm.pdev;
737 struct i915_ggtt *ggtt = &dev_priv->ggtt;
741 ap = alloc_apertures(1);
745 ap->ranges[0].base = ggtt->gmadr.start;
746 ap->ranges[0].size = ggtt->mappable_end;
749 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
751 ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
758 #if !defined(CONFIG_VGA_CONSOLE)
759 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
763 #elif !defined(CONFIG_DUMMY_CONSOLE)
764 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
769 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
773 DRM_INFO("Replacing VGA console driver\n");
776 if (con_is_bound(&vga_con))
777 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
779 ret = do_unregister_con_driver(&vga_con);
781 /* Ignore "already unregistered". */
791 static void intel_init_dpio(struct drm_i915_private *dev_priv)
794 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
795 * CHV x1 PHY (DP/HDMI D)
796 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
798 if (IS_CHERRYVIEW(dev_priv)) {
799 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
800 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
801 } else if (IS_VALLEYVIEW(dev_priv)) {
802 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
806 static int i915_workqueues_init(struct drm_i915_private *dev_priv)
809 * The i915 workqueue is primarily used for batched retirement of
810 * requests (and thus managing bo) once the task has been completed
811 * by the GPU. i915_retire_requests() is called directly when we
812 * need high-priority retirement, such as waiting for an explicit
815 * It is also used for periodic low-priority events, such as
816 * idle-timers and recording error state.
818 * All tasks on the workqueue are expected to acquire the dev mutex
819 * so there is no point in running more than one instance of the
820 * workqueue at any time. Use an ordered one.
822 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
823 if (dev_priv->wq == NULL)
826 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
827 if (dev_priv->hotplug.dp_wq == NULL)
833 destroy_workqueue(dev_priv->wq);
835 DRM_ERROR("Failed to allocate workqueues.\n");
840 static void i915_engines_cleanup(struct drm_i915_private *i915)
842 struct intel_engine_cs *engine;
843 enum intel_engine_id id;
845 for_each_engine(engine, i915, id)
849 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
851 destroy_workqueue(dev_priv->hotplug.dp_wq);
852 destroy_workqueue(dev_priv->wq);
856 * We don't keep the workarounds for pre-production hardware, so we expect our
857 * driver to fail on these machines in one way or another. A little warning on
858 * dmesg may help both the user and the bug triagers.
860 * Our policy for removing pre-production workarounds is to keep the
861 * current gen workarounds as a guide to the bring-up of the next gen
862 * (workarounds have a habit of persisting!). Anything older than that
863 * should be removed along with the complications they introduce.
865 static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
869 pre |= IS_HSW_EARLY_SDV(dev_priv);
870 pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
871 pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
872 pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
875 DRM_ERROR("This is a pre-production stepping. "
876 "It may not be fully functional.\n");
877 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
882 * i915_driver_init_early - setup state not requiring device access
883 * @dev_priv: device private
885 * Initialize everything that is a "SW-only" state, that is state not
886 * requiring accessing the device or exposing the driver via kernel internal
887 * or userspace interfaces. Example steps belonging here: lock initialization,
888 * system memory allocation, setting up device specific attributes and
889 * function hooks not requiring accessing the device.
891 static int i915_driver_init_early(struct drm_i915_private *dev_priv)
895 if (i915_inject_load_failure())
898 spin_lock_init(&dev_priv->irq_lock);
899 spin_lock_init(&dev_priv->gpu_error.lock);
900 mutex_init(&dev_priv->backlight_lock);
901 spin_lock_init(&dev_priv->uncore.lock);
903 mutex_init(&dev_priv->sb_lock);
904 mutex_init(&dev_priv->av_mutex);
905 mutex_init(&dev_priv->wm.wm_mutex);
906 mutex_init(&dev_priv->pps_mutex);
908 i915_memcpy_init_early(dev_priv);
909 intel_runtime_pm_init_early(dev_priv);
911 ret = i915_workqueues_init(dev_priv);
915 ret = i915_gem_init_early(dev_priv);
919 /* This must be called before any calls to HAS_PCH_* */
920 intel_detect_pch(dev_priv);
922 intel_wopcm_init_early(&dev_priv->wopcm);
923 intel_uc_init_early(dev_priv);
924 intel_pm_setup(dev_priv);
925 intel_init_dpio(dev_priv);
926 ret = intel_power_domains_init(dev_priv);
929 intel_irq_init(dev_priv);
930 intel_hangcheck_init(dev_priv);
931 intel_init_display_hooks(dev_priv);
932 intel_init_clock_gating_hooks(dev_priv);
933 intel_init_audio_hooks(dev_priv);
934 intel_display_crc_init(dev_priv);
936 intel_detect_preproduction_hw(dev_priv);
941 intel_uc_cleanup_early(dev_priv);
942 i915_gem_cleanup_early(dev_priv);
944 i915_workqueues_cleanup(dev_priv);
946 i915_engines_cleanup(dev_priv);
951 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
952 * @dev_priv: device private
954 static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
956 intel_irq_fini(dev_priv);
957 intel_power_domains_cleanup(dev_priv);
958 intel_uc_cleanup_early(dev_priv);
959 i915_gem_cleanup_early(dev_priv);
960 i915_workqueues_cleanup(dev_priv);
961 i915_engines_cleanup(dev_priv);
964 static int i915_mmio_setup(struct drm_i915_private *dev_priv)
966 struct pci_dev *pdev = dev_priv->drm.pdev;
970 mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
972 * Before gen4, the registers and the GTT are behind different BARs.
973 * However, from gen4 onwards, the registers and the GTT are shared
974 * in the same BAR, so we want to restrict this ioremap from
975 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
976 * the register BAR remains the same size for all the earlier
977 * generations up to Ironlake.
979 if (INTEL_GEN(dev_priv) < 5)
980 mmio_size = 512 * 1024;
982 mmio_size = 2 * 1024 * 1024;
983 dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
984 if (dev_priv->regs == NULL) {
985 DRM_ERROR("failed to map registers\n");
990 /* Try to make sure MCHBAR is enabled before poking at it */
991 intel_setup_mchbar(dev_priv);
996 static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
998 struct pci_dev *pdev = dev_priv->drm.pdev;
1000 intel_teardown_mchbar(dev_priv);
1001 pci_iounmap(pdev, dev_priv->regs);
1005 * i915_driver_init_mmio - setup device MMIO
1006 * @dev_priv: device private
1008 * Setup minimal device state necessary for MMIO accesses later in the
1009 * initialization sequence. The setup here should avoid any other device-wide
1010 * side effects or exposing the driver via kernel internal or user space
1013 static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
1017 if (i915_inject_load_failure())
1020 if (i915_get_bridge_dev(dev_priv))
1023 ret = i915_mmio_setup(dev_priv);
1027 intel_uncore_init(dev_priv);
1029 intel_device_info_init_mmio(dev_priv);
1031 intel_uncore_prune(dev_priv);
1033 intel_uc_init_mmio(dev_priv);
1035 ret = intel_engines_init_mmio(dev_priv);
1039 i915_gem_init_mmio(dev_priv);
1044 intel_uncore_fini(dev_priv);
1045 i915_mmio_cleanup(dev_priv);
1047 pci_dev_put(dev_priv->bridge_dev);
1053 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
1054 * @dev_priv: device private
1056 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1058 intel_uncore_fini(dev_priv);
1059 i915_mmio_cleanup(dev_priv);
1060 pci_dev_put(dev_priv->bridge_dev);
1063 static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1065 intel_gvt_sanitize_options(dev_priv);
1068 static enum dram_rank skl_get_dimm_rank(u8 size, u32 rank)
1071 return I915_DRAM_RANK_INVALID;
1072 if (rank == SKL_DRAM_RANK_SINGLE)
1073 return I915_DRAM_RANK_SINGLE;
1074 else if (rank == SKL_DRAM_RANK_DUAL)
1075 return I915_DRAM_RANK_DUAL;
1077 return I915_DRAM_RANK_INVALID;
1081 skl_is_16gb_dimm(enum dram_rank rank, u8 size, u8 width)
1083 if (rank == I915_DRAM_RANK_SINGLE && width == 8 && size == 16)
1085 else if (rank == I915_DRAM_RANK_DUAL && width == 8 && size == 32)
1087 else if (rank == SKL_DRAM_RANK_SINGLE && width == 16 && size == 8)
1089 else if (rank == SKL_DRAM_RANK_DUAL && width == 16 && size == 16)
1096 skl_dram_get_channel_info(struct dram_channel_info *ch, u32 val)
1099 u32 s_val = val >> SKL_DRAM_S_SHIFT;
1104 tmp_l = val & SKL_DRAM_SIZE_MASK;
1105 tmp_s = s_val & SKL_DRAM_SIZE_MASK;
1107 if (tmp_l == 0 && tmp_s == 0)
1110 ch->l_info.size = tmp_l;
1111 ch->s_info.size = tmp_s;
1113 tmp_l = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
1114 tmp_s = (s_val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
1115 ch->l_info.width = (1 << tmp_l) * 8;
1116 ch->s_info.width = (1 << tmp_s) * 8;
1118 tmp_l = val & SKL_DRAM_RANK_MASK;
1119 tmp_s = s_val & SKL_DRAM_RANK_MASK;
1120 ch->l_info.rank = skl_get_dimm_rank(ch->l_info.size, tmp_l);
1121 ch->s_info.rank = skl_get_dimm_rank(ch->s_info.size, tmp_s);
1123 if (ch->l_info.rank == I915_DRAM_RANK_DUAL ||
1124 ch->s_info.rank == I915_DRAM_RANK_DUAL)
1125 ch->rank = I915_DRAM_RANK_DUAL;
1126 else if (ch->l_info.rank == I915_DRAM_RANK_SINGLE &&
1127 ch->s_info.rank == I915_DRAM_RANK_SINGLE)
1128 ch->rank = I915_DRAM_RANK_DUAL;
1130 ch->rank = I915_DRAM_RANK_SINGLE;
1132 ch->is_16gb_dimm = skl_is_16gb_dimm(ch->l_info.rank, ch->l_info.size,
1133 ch->l_info.width) ||
1134 skl_is_16gb_dimm(ch->s_info.rank, ch->s_info.size,
1137 DRM_DEBUG_KMS("(size:width:rank) L(%dGB:X%d:%s) S(%dGB:X%d:%s)\n",
1138 ch->l_info.size, ch->l_info.width,
1139 ch->l_info.rank ? "dual" : "single",
1140 ch->s_info.size, ch->s_info.width,
1141 ch->s_info.rank ? "dual" : "single");
1147 intel_is_dram_symmetric(u32 val_ch0, u32 val_ch1,
1148 struct dram_channel_info *ch0)
1150 return (val_ch0 == val_ch1 &&
1151 (ch0->s_info.size == 0 ||
1152 (ch0->l_info.size == ch0->s_info.size &&
1153 ch0->l_info.width == ch0->s_info.width &&
1154 ch0->l_info.rank == ch0->s_info.rank)));
1158 skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
1160 struct dram_info *dram_info = &dev_priv->dram_info;
1161 struct dram_channel_info ch0, ch1;
1162 u32 val_ch0, val_ch1;
1165 val_ch0 = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
1166 ret = skl_dram_get_channel_info(&ch0, val_ch0);
1168 dram_info->num_channels++;
1170 val_ch1 = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
1171 ret = skl_dram_get_channel_info(&ch1, val_ch1);
1173 dram_info->num_channels++;
1175 if (dram_info->num_channels == 0) {
1176 DRM_INFO("Number of memory channels is zero\n");
1181 * If any of the channel is single rank channel, worst case output
1182 * will be same as if single rank memory, so consider single rank
1185 if (ch0.rank == I915_DRAM_RANK_SINGLE ||
1186 ch1.rank == I915_DRAM_RANK_SINGLE)
1187 dram_info->rank = I915_DRAM_RANK_SINGLE;
1189 dram_info->rank = max(ch0.rank, ch1.rank);
1191 if (dram_info->rank == I915_DRAM_RANK_INVALID) {
1192 DRM_INFO("couldn't get memory rank information\n");
1196 dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
1198 dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
1202 DRM_DEBUG_KMS("memory configuration is %sSymmetric memory\n",
1203 dev_priv->dram_info.symmetric_memory ? "" : "not ");
1208 skl_get_dram_info(struct drm_i915_private *dev_priv)
1210 struct dram_info *dram_info = &dev_priv->dram_info;
1211 u32 mem_freq_khz, val;
1214 ret = skl_dram_get_channels_info(dev_priv);
1218 val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
1219 mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
1220 SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
1222 dram_info->bandwidth_kbps = dram_info->num_channels *
1225 if (dram_info->bandwidth_kbps == 0) {
1226 DRM_INFO("Couldn't get system memory bandwidth\n");
1230 dram_info->valid = true;
1235 bxt_get_dram_info(struct drm_i915_private *dev_priv)
1237 struct dram_info *dram_info = &dev_priv->dram_info;
1239 u32 mem_freq_khz, val;
1240 u8 num_active_channels;
1243 val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
1244 mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
1245 BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
1247 dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
1248 num_active_channels = hweight32(dram_channels);
1250 /* Each active bit represents 4-byte channel */
1251 dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
1253 if (dram_info->bandwidth_kbps == 0) {
1254 DRM_INFO("Couldn't get system memory bandwidth\n");
1259 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
1261 for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
1263 enum dram_rank rank;
1266 val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
1267 if (val == 0xFFFFFFFF)
1270 dram_info->num_channels++;
1271 tmp = val & BXT_DRAM_RANK_MASK;
1273 if (tmp == BXT_DRAM_RANK_SINGLE)
1274 rank = I915_DRAM_RANK_SINGLE;
1275 else if (tmp == BXT_DRAM_RANK_DUAL)
1276 rank = I915_DRAM_RANK_DUAL;
1278 rank = I915_DRAM_RANK_INVALID;
1280 tmp = val & BXT_DRAM_SIZE_MASK;
1281 if (tmp == BXT_DRAM_SIZE_4GB)
1283 else if (tmp == BXT_DRAM_SIZE_6GB)
1285 else if (tmp == BXT_DRAM_SIZE_8GB)
1287 else if (tmp == BXT_DRAM_SIZE_12GB)
1289 else if (tmp == BXT_DRAM_SIZE_16GB)
1294 tmp = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
1295 width = (1 << tmp) * 8;
1296 DRM_DEBUG_KMS("dram size:%dGB width:X%d rank:%s\n", size,
1297 width, rank == I915_DRAM_RANK_SINGLE ? "single" :
1298 rank == I915_DRAM_RANK_DUAL ? "dual" : "unknown");
1301 * If any of the channel is single rank channel,
1302 * worst case output will be same as if single rank
1303 * memory, so consider single rank memory.
1305 if (dram_info->rank == I915_DRAM_RANK_INVALID)
1306 dram_info->rank = rank;
1307 else if (rank == I915_DRAM_RANK_SINGLE)
1308 dram_info->rank = I915_DRAM_RANK_SINGLE;
1311 if (dram_info->rank == I915_DRAM_RANK_INVALID) {
1312 DRM_INFO("couldn't get memory rank information\n");
1316 dram_info->valid = true;
1321 intel_get_dram_info(struct drm_i915_private *dev_priv)
1323 struct dram_info *dram_info = &dev_priv->dram_info;
1324 char bandwidth_str[32];
1327 dram_info->valid = false;
1328 dram_info->rank = I915_DRAM_RANK_INVALID;
1329 dram_info->bandwidth_kbps = 0;
1330 dram_info->num_channels = 0;
1333 * Assume 16Gb DIMMs are present until proven otherwise.
1334 * This is only used for the level 0 watermark latency
1335 * w/a which does not apply to bxt/glk.
1337 dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
1339 if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
1342 /* Need to calculate bandwidth only for Gen9 */
1343 if (IS_BROXTON(dev_priv))
1344 ret = bxt_get_dram_info(dev_priv);
1345 else if (IS_GEN(dev_priv, 9))
1346 ret = skl_get_dram_info(dev_priv);
1348 ret = skl_dram_get_channels_info(dev_priv);
1352 if (dram_info->bandwidth_kbps)
1353 sprintf(bandwidth_str, "%d KBps", dram_info->bandwidth_kbps);
1355 sprintf(bandwidth_str, "unknown");
1356 DRM_DEBUG_KMS("DRAM bandwidth:%s, total-channels: %u\n",
1357 bandwidth_str, dram_info->num_channels);
1358 DRM_DEBUG_KMS("DRAM rank: %s rank 16GB-dimm:%s\n",
1359 (dram_info->rank == I915_DRAM_RANK_DUAL) ?
1360 "dual" : "single", yesno(dram_info->is_16gb_dimm));
1364 * i915_driver_init_hw - setup state requiring device access
1365 * @dev_priv: device private
1367 * Setup state that requires accessing the device, but doesn't require
1368 * exposing the driver via kernel internal or userspace interfaces.
1370 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1372 struct pci_dev *pdev = dev_priv->drm.pdev;
1375 if (i915_inject_load_failure())
1378 intel_device_info_runtime_init(dev_priv);
1380 if (HAS_PPGTT(dev_priv)) {
1381 if (intel_vgpu_active(dev_priv) &&
1382 !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
1383 i915_report_error(dev_priv,
1384 "incompatible vGPU found, support for isolated ppGTT required\n");
1389 if (HAS_EXECLISTS(dev_priv)) {
1391 * Older GVT emulation depends upon intercepting CSB mmio,
1392 * which we no longer use, preferring to use the HWSP cache
1395 if (intel_vgpu_active(dev_priv) &&
1396 !intel_vgpu_has_hwsp_emulation(dev_priv)) {
1397 i915_report_error(dev_priv,
1398 "old vGPU host found, support for HWSP emulation required\n");
1403 intel_sanitize_options(dev_priv);
1405 i915_perf_init(dev_priv);
1407 ret = i915_ggtt_probe_hw(dev_priv);
1412 * WARNING: Apparently we must kick fbdev drivers before vgacon,
1413 * otherwise the vga fbdev driver falls over.
1415 ret = i915_kick_out_firmware_fb(dev_priv);
1417 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1421 ret = i915_kick_out_vgacon(dev_priv);
1423 DRM_ERROR("failed to remove conflicting VGA console\n");
1427 ret = i915_ggtt_init_hw(dev_priv);
1431 ret = i915_ggtt_enable_hw(dev_priv);
1433 DRM_ERROR("failed to enable GGTT\n");
1437 pci_set_master(pdev);
1439 /* overlay on gen2 is broken and can't address above 1G */
1440 if (IS_GEN(dev_priv, 2)) {
1441 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
1443 DRM_ERROR("failed to set DMA mask\n");
1449 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1450 * using 32bit addressing, overwriting memory if HWS is located
1453 * The documentation also mentions an issue with undefined
1454 * behaviour if any general state is accessed within a page above 4GB,
1455 * which also needs to be handled carefully.
1457 if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
1458 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1461 DRM_ERROR("failed to set DMA mask\n");
1467 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1468 PM_QOS_DEFAULT_VALUE);
1470 intel_uncore_sanitize(dev_priv);
1472 intel_gt_init_workarounds(dev_priv);
1473 i915_gem_load_init_fences(dev_priv);
1475 /* On the 945G/GM, the chipset reports the MSI capability on the
1476 * integrated graphics even though the support isn't actually there
1477 * according to the published specs. It doesn't appear to function
1478 * correctly in testing on 945G.
1479 * This may be a side effect of MSI having been made available for PEG
1480 * and the registers being closely associated.
1482 * According to chipset errata, on the 965GM, MSI interrupts may
1483 * be lost or delayed, and was defeatured. MSI interrupts seem to
1484 * get lost on g4x as well, and interrupt delivery seems to stay
1485 * properly dead afterwards. So we'll just disable them for all
1486 * pre-gen5 chipsets.
1488 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
1489 * interrupts even when in MSI mode. This results in spurious
1490 * interrupt warnings if the legacy irq no. is shared with another
1491 * device. The kernel then disables that interrupt source and so
1492 * prevents the other device from working properly.
1494 if (INTEL_GEN(dev_priv) >= 5) {
1495 if (pci_enable_msi(pdev) < 0)
1496 DRM_DEBUG_DRIVER("can't enable MSI");
1499 ret = intel_gvt_init(dev_priv);
1503 intel_opregion_setup(dev_priv);
1505 * Fill the dram structure to get the system raw bandwidth and
1506 * dram info. This will be used for memory latency calculation.
1508 intel_get_dram_info(dev_priv);
1514 if (pdev->msi_enabled)
1515 pci_disable_msi(pdev);
1516 pm_qos_remove_request(&dev_priv->pm_qos);
1518 i915_ggtt_cleanup_hw(dev_priv);
1520 i915_perf_fini(dev_priv);
1525 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1526 * @dev_priv: device private
1528 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1530 struct pci_dev *pdev = dev_priv->drm.pdev;
1532 i915_perf_fini(dev_priv);
1534 if (pdev->msi_enabled)
1535 pci_disable_msi(pdev);
1537 pm_qos_remove_request(&dev_priv->pm_qos);
1538 i915_ggtt_cleanup_hw(dev_priv);
1542 * i915_driver_register - register the driver with the rest of the system
1543 * @dev_priv: device private
1545 * Perform any steps necessary to make the driver available via kernel
1546 * internal or userspace interfaces.
1548 static void i915_driver_register(struct drm_i915_private *dev_priv)
1550 struct drm_device *dev = &dev_priv->drm;
1552 i915_gem_shrinker_register(dev_priv);
1553 i915_pmu_register(dev_priv);
1556 * Notify a valid surface after modesetting,
1557 * when running inside a VM.
1559 if (intel_vgpu_active(dev_priv))
1560 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1562 /* Reveal our presence to userspace */
1563 if (drm_dev_register(dev, 0) == 0) {
1564 i915_debugfs_register(dev_priv);
1565 i915_setup_sysfs(dev_priv);
1567 /* Depends on sysfs having been initialized */
1568 i915_perf_register(dev_priv);
1570 DRM_ERROR("Failed to register driver for userspace access!\n");
1572 if (HAS_DISPLAY(dev_priv)) {
1573 /* Must be done after probing outputs */
1574 intel_opregion_register(dev_priv);
1575 acpi_video_register();
1578 if (IS_GEN(dev_priv, 5))
1579 intel_gpu_ips_init(dev_priv);
1581 intel_audio_init(dev_priv);
1584 * Some ports require correctly set-up hpd registers for detection to
1585 * work properly (leading to ghost connected connector status), e.g. VGA
1586 * on gm45. Hence we can only set up the initial fbdev config after hpd
1587 * irqs are fully enabled. We do it last so that the async config
1588 * cannot run before the connectors are registered.
1590 intel_fbdev_initial_config_async(dev);
1593 * We need to coordinate the hotplugs with the asynchronous fbdev
1594 * configuration, for which we use the fbdev->async_cookie.
1596 if (HAS_DISPLAY(dev_priv))
1597 drm_kms_helper_poll_init(dev);
1599 intel_power_domains_enable(dev_priv);
1600 intel_runtime_pm_enable(dev_priv);
1604 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1605 * @dev_priv: device private
1607 static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1609 intel_runtime_pm_disable(dev_priv);
1610 intel_power_domains_disable(dev_priv);
1612 intel_fbdev_unregister(dev_priv);
1613 intel_audio_deinit(dev_priv);
1616 * After flushing the fbdev (incl. a late async config which will
1617 * have delayed queuing of a hotplug event), then flush the hotplug
1620 drm_kms_helper_poll_fini(&dev_priv->drm);
1622 intel_gpu_ips_teardown();
1623 acpi_video_unregister();
1624 intel_opregion_unregister(dev_priv);
1626 i915_perf_unregister(dev_priv);
1627 i915_pmu_unregister(dev_priv);
1629 i915_teardown_sysfs(dev_priv);
1630 drm_dev_unregister(&dev_priv->drm);
1632 i915_gem_shrinker_unregister(dev_priv);
1635 static void i915_welcome_messages(struct drm_i915_private *dev_priv)
1637 if (drm_debug & DRM_UT_DRIVER) {
1638 struct drm_printer p = drm_debug_printer("i915 device info:");
1640 drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
1641 INTEL_DEVID(dev_priv),
1642 INTEL_REVID(dev_priv),
1643 intel_platform_name(INTEL_INFO(dev_priv)->platform),
1644 INTEL_GEN(dev_priv));
1646 intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
1647 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
1650 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1651 DRM_INFO("DRM_I915_DEBUG enabled\n");
1652 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1653 DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
1654 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
1655 DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
1658 static struct drm_i915_private *
1659 i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
1661 const struct intel_device_info *match_info =
1662 (struct intel_device_info *)ent->driver_data;
1663 struct intel_device_info *device_info;
1664 struct drm_i915_private *i915;
1667 i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
1669 return ERR_PTR(-ENOMEM);
1671 err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
1674 return ERR_PTR(err);
1677 i915->drm.pdev = pdev;
1678 i915->drm.dev_private = i915;
1679 pci_set_drvdata(pdev, &i915->drm);
1681 /* Setup the write-once "constant" device info */
1682 device_info = mkwrite_device_info(i915);
1683 memcpy(device_info, match_info, sizeof(*device_info));
1684 RUNTIME_INFO(i915)->device_id = pdev->device;
1686 BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1687 BITS_PER_TYPE(device_info->platform_mask));
1688 BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
1693 static void i915_driver_destroy(struct drm_i915_private *i915)
1695 struct pci_dev *pdev = i915->drm.pdev;
1697 drm_dev_fini(&i915->drm);
1700 /* And make sure we never chase our dangling pointer from pci_dev */
1701 pci_set_drvdata(pdev, NULL);
1705 * i915_driver_load - setup chip and create an initial config
1707 * @ent: matching PCI ID entry
1709 * The driver load routine has to do several things:
1710 * - drive output discovery via intel_modeset_init()
1711 * - initialize the memory manager
1712 * - allocate initial config memory
1713 * - setup the DRM framebuffer with the allocated memory
1715 int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1717 const struct intel_device_info *match_info =
1718 (struct intel_device_info *)ent->driver_data;
1719 struct drm_i915_private *dev_priv;
1722 dev_priv = i915_driver_create(pdev, ent);
1723 if (IS_ERR(dev_priv))
1724 return PTR_ERR(dev_priv);
1726 /* Disable nuclear pageflip by default on pre-ILK */
1727 if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
1728 dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
1730 ret = pci_enable_device(pdev);
1734 ret = i915_driver_init_early(dev_priv);
1736 goto out_pci_disable;
1738 disable_rpm_wakeref_asserts(dev_priv);
1740 ret = i915_driver_init_mmio(dev_priv);
1742 goto out_runtime_pm_put;
1744 ret = i915_driver_init_hw(dev_priv);
1746 goto out_cleanup_mmio;
1748 ret = i915_load_modeset_init(&dev_priv->drm);
1750 goto out_cleanup_hw;
1752 i915_driver_register(dev_priv);
1754 enable_rpm_wakeref_asserts(dev_priv);
1756 i915_welcome_messages(dev_priv);
1761 i915_driver_cleanup_hw(dev_priv);
1763 i915_driver_cleanup_mmio(dev_priv);
1765 enable_rpm_wakeref_asserts(dev_priv);
1766 i915_driver_cleanup_early(dev_priv);
1768 pci_disable_device(pdev);
1770 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
1771 i915_driver_destroy(dev_priv);
1775 void i915_driver_unload(struct drm_device *dev)
1777 struct drm_i915_private *dev_priv = to_i915(dev);
1778 struct pci_dev *pdev = dev_priv->drm.pdev;
1780 disable_rpm_wakeref_asserts(dev_priv);
1782 i915_driver_unregister(dev_priv);
1784 /* Flush any external code that still may be under the RCU lock */
1787 if (i915_gem_suspend(dev_priv))
1788 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
1790 drm_atomic_helper_shutdown(dev);
1792 intel_gvt_cleanup(dev_priv);
1794 intel_modeset_cleanup(dev);
1796 intel_bios_cleanup(dev_priv);
1798 vga_switcheroo_unregister_client(pdev);
1799 vga_client_register(pdev, NULL, NULL, NULL);
1801 intel_csr_ucode_fini(dev_priv);
1803 /* Free error state after interrupts are fully disabled. */
1804 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1805 i915_reset_error_state(dev_priv);
1807 i915_gem_fini(dev_priv);
1809 intel_power_domains_fini_hw(dev_priv);
1811 i915_driver_cleanup_hw(dev_priv);
1812 i915_driver_cleanup_mmio(dev_priv);
1814 enable_rpm_wakeref_asserts(dev_priv);
1815 intel_runtime_pm_cleanup(dev_priv);
1818 static void i915_driver_release(struct drm_device *dev)
1820 struct drm_i915_private *dev_priv = to_i915(dev);
1822 i915_driver_cleanup_early(dev_priv);
1823 i915_driver_destroy(dev_priv);
1826 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1828 struct drm_i915_private *i915 = to_i915(dev);
1831 ret = i915_gem_open(i915, file);
1839 * i915_driver_lastclose - clean up after all DRM clients have exited
1842 * Take care of cleaning up after all DRM clients have exited. In the
1843 * mode setting case, we want to restore the kernel's initial mode (just
1844 * in case the last client left us in a bad state).
1846 * Additionally, in the non-mode setting case, we'll tear down the GTT
1847 * and DMA structures, since the kernel won't be using them, and clea
1850 static void i915_driver_lastclose(struct drm_device *dev)
1852 intel_fbdev_restore_mode(dev);
1853 vga_switcheroo_process_delayed_switch();
1856 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1858 struct drm_i915_file_private *file_priv = file->driver_priv;
1860 mutex_lock(&dev->struct_mutex);
1861 i915_gem_context_close(file);
1862 i915_gem_release(dev, file);
1863 mutex_unlock(&dev->struct_mutex);
1868 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1870 struct drm_device *dev = &dev_priv->drm;
1871 struct intel_encoder *encoder;
1873 drm_modeset_lock_all(dev);
1874 for_each_intel_encoder(dev, encoder)
1875 if (encoder->suspend)
1876 encoder->suspend(encoder);
1877 drm_modeset_unlock_all(dev);
1880 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1882 static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
1884 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1886 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
1887 if (acpi_target_system_state() < ACPI_STATE_S3)
1893 static int i915_drm_prepare(struct drm_device *dev)
1895 struct drm_i915_private *i915 = to_i915(dev);
1899 * NB intel_display_suspend() may issue new requests after we've
1900 * ostensibly marked the GPU as ready-to-sleep here. We need to
1901 * split out that work and pull it forward so that after point,
1902 * the GPU is not woken again.
1904 err = i915_gem_suspend(i915);
1906 dev_err(&i915->drm.pdev->dev,
1907 "GEM idle failed, suspend/resume might fail\n");
1912 static int i915_drm_suspend(struct drm_device *dev)
1914 struct drm_i915_private *dev_priv = to_i915(dev);
1915 struct pci_dev *pdev = dev_priv->drm.pdev;
1916 pci_power_t opregion_target_state;
1918 disable_rpm_wakeref_asserts(dev_priv);
1920 /* We do a lot of poking in a lot of registers, make sure they work
1922 intel_power_domains_disable(dev_priv);
1924 drm_kms_helper_poll_disable(dev);
1926 pci_save_state(pdev);
1928 intel_display_suspend(dev);
1930 intel_dp_mst_suspend(dev_priv);
1932 intel_runtime_pm_disable_interrupts(dev_priv);
1933 intel_hpd_cancel_work(dev_priv);
1935 intel_suspend_encoders(dev_priv);
1937 intel_suspend_hw(dev_priv);
1939 i915_gem_suspend_gtt_mappings(dev_priv);
1941 i915_save_state(dev_priv);
1943 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1944 intel_opregion_suspend(dev_priv, opregion_target_state);
1946 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1948 dev_priv->suspend_count++;
1950 intel_csr_ucode_suspend(dev_priv);
1952 enable_rpm_wakeref_asserts(dev_priv);
1957 static enum i915_drm_suspend_mode
1958 get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
1961 return I915_DRM_SUSPEND_HIBERNATE;
1963 if (suspend_to_idle(dev_priv))
1964 return I915_DRM_SUSPEND_IDLE;
1966 return I915_DRM_SUSPEND_MEM;
1969 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1971 struct drm_i915_private *dev_priv = to_i915(dev);
1972 struct pci_dev *pdev = dev_priv->drm.pdev;
1975 disable_rpm_wakeref_asserts(dev_priv);
1977 i915_gem_suspend_late(dev_priv);
1979 intel_uncore_suspend(dev_priv);
1981 intel_power_domains_suspend(dev_priv,
1982 get_suspend_mode(dev_priv, hibernation));
1985 if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
1986 bxt_enable_dc9(dev_priv);
1987 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1988 hsw_enable_pc8(dev_priv);
1989 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1990 ret = vlv_suspend_complete(dev_priv);
1993 DRM_ERROR("Suspend complete failed: %d\n", ret);
1994 intel_power_domains_resume(dev_priv);
1999 pci_disable_device(pdev);
2001 * During hibernation on some platforms the BIOS may try to access
2002 * the device even though it's already in D3 and hang the machine. So
2003 * leave the device in D0 on those platforms and hope the BIOS will
2004 * power down the device properly. The issue was seen on multiple old
2005 * GENs with different BIOS vendors, so having an explicit blacklist
2006 * is inpractical; apply the workaround on everything pre GEN6. The
2007 * platforms where the issue was seen:
2008 * Lenovo Thinkpad X301, X61s, X60, T60, X41
2012 if (!(hibernation && INTEL_GEN(dev_priv) < 6))
2013 pci_set_power_state(pdev, PCI_D3hot);
2016 enable_rpm_wakeref_asserts(dev_priv);
2017 if (!dev_priv->uncore.user_forcewake.count)
2018 intel_runtime_pm_cleanup(dev_priv);
2023 static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
2028 DRM_ERROR("dev: %p\n", dev);
2029 DRM_ERROR("DRM not initialized, aborting suspend.\n");
2033 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
2034 state.event != PM_EVENT_FREEZE))
2037 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2040 error = i915_drm_suspend(dev);
2044 return i915_drm_suspend_late(dev, false);
2047 static int i915_drm_resume(struct drm_device *dev)
2049 struct drm_i915_private *dev_priv = to_i915(dev);
2052 disable_rpm_wakeref_asserts(dev_priv);
2053 intel_sanitize_gt_powersave(dev_priv);
2055 i915_gem_sanitize(dev_priv);
2057 ret = i915_ggtt_enable_hw(dev_priv);
2059 DRM_ERROR("failed to re-enable GGTT\n");
2061 intel_csr_ucode_resume(dev_priv);
2063 i915_restore_state(dev_priv);
2064 intel_pps_unlock_regs_wa(dev_priv);
2066 intel_init_pch_refclk(dev_priv);
2069 * Interrupts have to be enabled before any batches are run. If not the
2070 * GPU will hang. i915_gem_init_hw() will initiate batches to
2071 * update/restore the context.
2073 * drm_mode_config_reset() needs AUX interrupts.
2075 * Modeset enabling in intel_modeset_init_hw() also needs working
2078 intel_runtime_pm_enable_interrupts(dev_priv);
2080 drm_mode_config_reset(dev);
2082 i915_gem_resume(dev_priv);
2084 intel_modeset_init_hw(dev);
2085 intel_init_clock_gating(dev_priv);
2087 spin_lock_irq(&dev_priv->irq_lock);
2088 if (dev_priv->display.hpd_irq_setup)
2089 dev_priv->display.hpd_irq_setup(dev_priv);
2090 spin_unlock_irq(&dev_priv->irq_lock);
2092 intel_dp_mst_resume(dev_priv);
2094 intel_display_resume(dev);
2096 drm_kms_helper_poll_enable(dev);
2099 * ... but also need to make sure that hotplug processing
2100 * doesn't cause havoc. Like in the driver load code we don't
2101 * bother with the tiny race here where we might lose hotplug
2104 intel_hpd_init(dev_priv);
2106 intel_opregion_resume(dev_priv);
2108 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
2110 intel_power_domains_enable(dev_priv);
2112 enable_rpm_wakeref_asserts(dev_priv);
2117 static int i915_drm_resume_early(struct drm_device *dev)
2119 struct drm_i915_private *dev_priv = to_i915(dev);
2120 struct pci_dev *pdev = dev_priv->drm.pdev;
2124 * We have a resume ordering issue with the snd-hda driver also
2125 * requiring our device to be power up. Due to the lack of a
2126 * parent/child relationship we currently solve this with an early
2129 * FIXME: This should be solved with a special hdmi sink device or
2130 * similar so that power domains can be employed.
2134 * Note that we need to set the power state explicitly, since we
2135 * powered off the device during freeze and the PCI core won't power
2136 * it back up for us during thaw. Powering off the device during
2137 * freeze is not a hard requirement though, and during the
2138 * suspend/resume phases the PCI core makes sure we get here with the
2139 * device powered on. So in case we change our freeze logic and keep
2140 * the device powered we can also remove the following set power state
2143 ret = pci_set_power_state(pdev, PCI_D0);
2145 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
2150 * Note that pci_enable_device() first enables any parent bridge
2151 * device and only then sets the power state for this device. The
2152 * bridge enabling is a nop though, since bridge devices are resumed
2153 * first. The order of enabling power and enabling the device is
2154 * imposed by the PCI core as described above, so here we preserve the
2155 * same order for the freeze/thaw phases.
2157 * TODO: eventually we should remove pci_disable_device() /
2158 * pci_enable_enable_device() from suspend/resume. Due to how they
2159 * depend on the device enable refcount we can't anyway depend on them
2160 * disabling/enabling the device.
2162 if (pci_enable_device(pdev))
2165 pci_set_master(pdev);
2167 disable_rpm_wakeref_asserts(dev_priv);
2169 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2170 ret = vlv_resume_prepare(dev_priv, false);
2172 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
2175 intel_uncore_resume_early(dev_priv);
2177 if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
2178 gen9_sanitize_dc_state(dev_priv);
2179 bxt_disable_dc9(dev_priv);
2180 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2181 hsw_disable_pc8(dev_priv);
2184 intel_uncore_sanitize(dev_priv);
2186 intel_power_domains_resume(dev_priv);
2188 intel_engines_sanitize(dev_priv, true);
2190 enable_rpm_wakeref_asserts(dev_priv);
2195 static int i915_resume_switcheroo(struct drm_device *dev)
2199 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2202 ret = i915_drm_resume_early(dev);
2206 return i915_drm_resume(dev);
2209 static int i915_pm_prepare(struct device *kdev)
2211 struct pci_dev *pdev = to_pci_dev(kdev);
2212 struct drm_device *dev = pci_get_drvdata(pdev);
2215 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2219 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2222 return i915_drm_prepare(dev);
2225 static int i915_pm_suspend(struct device *kdev)
2227 struct pci_dev *pdev = to_pci_dev(kdev);
2228 struct drm_device *dev = pci_get_drvdata(pdev);
2231 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2235 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2238 return i915_drm_suspend(dev);
2241 static int i915_pm_suspend_late(struct device *kdev)
2243 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2246 * We have a suspend ordering issue with the snd-hda driver also
2247 * requiring our device to be power up. Due to the lack of a
2248 * parent/child relationship we currently solve this with an late
2251 * FIXME: This should be solved with a special hdmi sink device or
2252 * similar so that power domains can be employed.
2254 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2257 return i915_drm_suspend_late(dev, false);
2260 static int i915_pm_poweroff_late(struct device *kdev)
2262 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2264 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2267 return i915_drm_suspend_late(dev, true);
2270 static int i915_pm_resume_early(struct device *kdev)
2272 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2274 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2277 return i915_drm_resume_early(dev);
2280 static int i915_pm_resume(struct device *kdev)
2282 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2284 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2287 return i915_drm_resume(dev);
2290 /* freeze: before creating the hibernation_image */
2291 static int i915_pm_freeze(struct device *kdev)
2293 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2296 if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
2297 ret = i915_drm_suspend(dev);
2302 ret = i915_gem_freeze(kdev_to_i915(kdev));
2309 static int i915_pm_freeze_late(struct device *kdev)
2311 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2314 if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
2315 ret = i915_drm_suspend_late(dev, true);
2320 ret = i915_gem_freeze_late(kdev_to_i915(kdev));
2327 /* thaw: called after creating the hibernation image, but before turning off. */
2328 static int i915_pm_thaw_early(struct device *kdev)
2330 return i915_pm_resume_early(kdev);
2333 static int i915_pm_thaw(struct device *kdev)
2335 return i915_pm_resume(kdev);
2338 /* restore: called after loading the hibernation image. */
2339 static int i915_pm_restore_early(struct device *kdev)
2341 return i915_pm_resume_early(kdev);
2344 static int i915_pm_restore(struct device *kdev)
2346 return i915_pm_resume(kdev);
2350 * Save all Gunit registers that may be lost after a D3 and a subsequent
2351 * S0i[R123] transition. The list of registers needing a save/restore is
2352 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
2353 * registers in the following way:
2354 * - Driver: saved/restored by the driver
2355 * - Punit : saved/restored by the Punit firmware
2356 * - No, w/o marking: no need to save/restore, since the register is R/O or
2357 * used internally by the HW in a way that doesn't depend
2358 * keeping the content across a suspend/resume.
2359 * - Debug : used for debugging
2361 * We save/restore all registers marked with 'Driver', with the following
2363 * - Registers out of use, including also registers marked with 'Debug'.
2364 * These have no effect on the driver's operation, so we don't save/restore
2365 * them to reduce the overhead.
2366 * - Registers that are fully setup by an initialization function called from
2367 * the resume path. For example many clock gating and RPS/RC6 registers.
2368 * - Registers that provide the right functionality with their reset defaults.
2370 * TODO: Except for registers that based on the above 3 criteria can be safely
2371 * ignored, we save/restore all others, practically treating the HW context as
2372 * a black-box for the driver. Further investigation is needed to reduce the
2373 * saved/restored registers even further, by following the same 3 criteria.
2375 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2377 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2380 /* GAM 0x4000-0x4770 */
2381 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
2382 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
2383 s->arb_mode = I915_READ(ARB_MODE);
2384 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
2385 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
2387 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2388 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
2390 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
2391 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
2393 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
2394 s->ecochk = I915_READ(GAM_ECOCHK);
2395 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
2396 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
2398 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
2400 /* MBC 0x9024-0x91D0, 0x8500 */
2401 s->g3dctl = I915_READ(VLV_G3DCTL);
2402 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
2403 s->mbctl = I915_READ(GEN6_MBCTL);
2405 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2406 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
2407 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
2408 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
2409 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
2410 s->rstctl = I915_READ(GEN6_RSTCTL);
2411 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
2413 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2414 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
2415 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
2416 s->rpdeuc = I915_READ(GEN6_RPDEUC);
2417 s->ecobus = I915_READ(ECOBUS);
2418 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
2419 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
2420 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
2421 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
2422 s->rcedata = I915_READ(VLV_RCEDATA);
2423 s->spare2gh = I915_READ(VLV_SPAREG2H);
2425 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2426 s->gt_imr = I915_READ(GTIMR);
2427 s->gt_ier = I915_READ(GTIER);
2428 s->pm_imr = I915_READ(GEN6_PMIMR);
2429 s->pm_ier = I915_READ(GEN6_PMIER);
2431 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2432 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
2434 /* GT SA CZ domain, 0x100000-0x138124 */
2435 s->tilectl = I915_READ(TILECTL);
2436 s->gt_fifoctl = I915_READ(GTFIFOCTL);
2437 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
2438 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2439 s->pmwgicz = I915_READ(VLV_PMWGICZ);
2441 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2442 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
2443 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
2444 s->pcbr = I915_READ(VLV_PCBR);
2445 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
2448 * Not saving any of:
2449 * DFT, 0x9800-0x9EC0
2450 * SARB, 0xB000-0xB1FC
2451 * GAC, 0x5208-0x524C, 0x14000-0x14C000
2456 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2458 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2462 /* GAM 0x4000-0x4770 */
2463 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
2464 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
2465 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
2466 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
2467 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
2469 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2470 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
2472 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
2473 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
2475 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
2476 I915_WRITE(GAM_ECOCHK, s->ecochk);
2477 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
2478 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
2480 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
2482 /* MBC 0x9024-0x91D0, 0x8500 */
2483 I915_WRITE(VLV_G3DCTL, s->g3dctl);
2484 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
2485 I915_WRITE(GEN6_MBCTL, s->mbctl);
2487 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2488 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
2489 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
2490 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
2491 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
2492 I915_WRITE(GEN6_RSTCTL, s->rstctl);
2493 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
2495 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2496 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
2497 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
2498 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
2499 I915_WRITE(ECOBUS, s->ecobus);
2500 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
2501 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
2502 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
2503 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
2504 I915_WRITE(VLV_RCEDATA, s->rcedata);
2505 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
2507 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2508 I915_WRITE(GTIMR, s->gt_imr);
2509 I915_WRITE(GTIER, s->gt_ier);
2510 I915_WRITE(GEN6_PMIMR, s->pm_imr);
2511 I915_WRITE(GEN6_PMIER, s->pm_ier);
2513 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2514 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
2516 /* GT SA CZ domain, 0x100000-0x138124 */
2517 I915_WRITE(TILECTL, s->tilectl);
2518 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
2520 * Preserve the GT allow wake and GFX force clock bit, they are not
2521 * be restored, as they are used to control the s0ix suspend/resume
2522 * sequence by the caller.
2524 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2525 val &= VLV_GTLC_ALLOWWAKEREQ;
2526 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
2527 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2529 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2530 val &= VLV_GFX_CLK_FORCE_ON_BIT;
2531 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
2532 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2534 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
2536 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2537 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
2538 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
2539 I915_WRITE(VLV_PCBR, s->pcbr);
2540 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
2543 static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
2546 /* The HW does not like us polling for PW_STATUS frequently, so
2547 * use the sleeping loop rather than risk the busy spin within
2548 * intel_wait_for_register().
2550 * Transitioning between RC6 states should be at most 2ms (see
2551 * valleyview_enable_rps) so use a 3ms timeout.
2553 return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val,
2557 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2562 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2563 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
2565 val |= VLV_GFX_CLK_FORCE_ON_BIT;
2566 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2571 err = intel_wait_for_register(dev_priv,
2572 VLV_GTLC_SURVIVABILITY_REG,
2573 VLV_GFX_CLK_STATUS_BIT,
2574 VLV_GFX_CLK_STATUS_BIT,
2577 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
2578 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
2583 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2589 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2590 val &= ~VLV_GTLC_ALLOWWAKEREQ;
2592 val |= VLV_GTLC_ALLOWWAKEREQ;
2593 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2594 POSTING_READ(VLV_GTLC_WAKE_CTRL);
2596 mask = VLV_GTLC_ALLOWWAKEACK;
2597 val = allow ? mask : 0;
2599 err = vlv_wait_for_pw_status(dev_priv, mask, val);
2601 DRM_ERROR("timeout disabling GT waking\n");
2606 static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
2612 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
2613 val = wait_for_on ? mask : 0;
2616 * RC6 transitioning can be delayed up to 2 msec (see
2617 * valleyview_enable_rps), use 3 msec for safety.
2619 * This can fail to turn off the rc6 if the GPU is stuck after a failed
2620 * reset and we are trying to force the machine to sleep.
2622 if (vlv_wait_for_pw_status(dev_priv, mask, val))
2623 DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
2624 onoff(wait_for_on));
2627 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
2629 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
2632 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
2633 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
2636 static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
2642 * Bspec defines the following GT well on flags as debug only, so
2643 * don't treat them as hard failures.
2645 vlv_wait_for_gt_wells(dev_priv, false);
2647 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2648 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
2650 vlv_check_no_gt_access(dev_priv);
2652 err = vlv_force_gfx_clock(dev_priv, true);
2656 err = vlv_allow_gt_wake(dev_priv, false);
2660 if (!IS_CHERRYVIEW(dev_priv))
2661 vlv_save_gunit_s0ix_state(dev_priv);
2663 err = vlv_force_gfx_clock(dev_priv, false);
2670 /* For safety always re-enable waking and disable gfx clock forcing */
2671 vlv_allow_gt_wake(dev_priv, true);
2673 vlv_force_gfx_clock(dev_priv, false);
2678 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2685 * If any of the steps fail just try to continue, that's the best we
2686 * can do at this point. Return the first error code (which will also
2687 * leave RPM permanently disabled).
2689 ret = vlv_force_gfx_clock(dev_priv, true);
2691 if (!IS_CHERRYVIEW(dev_priv))
2692 vlv_restore_gunit_s0ix_state(dev_priv);
2694 err = vlv_allow_gt_wake(dev_priv, true);
2698 err = vlv_force_gfx_clock(dev_priv, false);
2702 vlv_check_no_gt_access(dev_priv);
2705 intel_init_clock_gating(dev_priv);
2710 static int intel_runtime_suspend(struct device *kdev)
2712 struct pci_dev *pdev = to_pci_dev(kdev);
2713 struct drm_device *dev = pci_get_drvdata(pdev);
2714 struct drm_i915_private *dev_priv = to_i915(dev);
2717 if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
2720 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2723 DRM_DEBUG_KMS("Suspending device\n");
2725 disable_rpm_wakeref_asserts(dev_priv);
2728 * We are safe here against re-faults, since the fault handler takes
2731 i915_gem_runtime_suspend(dev_priv);
2733 intel_uc_suspend(dev_priv);
2735 intel_runtime_pm_disable_interrupts(dev_priv);
2737 intel_uncore_suspend(dev_priv);
2740 if (INTEL_GEN(dev_priv) >= 11) {
2741 icl_display_core_uninit(dev_priv);
2742 bxt_enable_dc9(dev_priv);
2743 } else if (IS_GEN9_LP(dev_priv)) {
2744 bxt_display_core_uninit(dev_priv);
2745 bxt_enable_dc9(dev_priv);
2746 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2747 hsw_enable_pc8(dev_priv);
2748 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2749 ret = vlv_suspend_complete(dev_priv);
2753 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
2754 intel_uncore_runtime_resume(dev_priv);
2756 intel_runtime_pm_enable_interrupts(dev_priv);
2758 intel_uc_resume(dev_priv);
2760 i915_gem_init_swizzling(dev_priv);
2761 i915_gem_restore_fences(dev_priv);
2763 enable_rpm_wakeref_asserts(dev_priv);
2768 enable_rpm_wakeref_asserts(dev_priv);
2769 intel_runtime_pm_cleanup(dev_priv);
2771 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
2772 DRM_ERROR("Unclaimed access detected prior to suspending\n");
2774 dev_priv->runtime_pm.suspended = true;
2777 * FIXME: We really should find a document that references the arguments
2780 if (IS_BROADWELL(dev_priv)) {
2782 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
2783 * being detected, and the call we do at intel_runtime_resume()
2784 * won't be able to restore them. Since PCI_D3hot matches the
2785 * actual specification and appears to be working, use it.
2787 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
2790 * current versions of firmware which depend on this opregion
2791 * notification have repurposed the D1 definition to mean
2792 * "runtime suspended" vs. what you would normally expect (D3)
2793 * to distinguish it from notifications that might be sent via
2796 intel_opregion_notify_adapter(dev_priv, PCI_D1);
2799 assert_forcewakes_inactive(dev_priv);
2801 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2802 intel_hpd_poll_init(dev_priv);
2804 DRM_DEBUG_KMS("Device suspended\n");
2808 static int intel_runtime_resume(struct device *kdev)
2810 struct pci_dev *pdev = to_pci_dev(kdev);
2811 struct drm_device *dev = pci_get_drvdata(pdev);
2812 struct drm_i915_private *dev_priv = to_i915(dev);
2815 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2818 DRM_DEBUG_KMS("Resuming device\n");
2820 WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
2821 disable_rpm_wakeref_asserts(dev_priv);
2823 intel_opregion_notify_adapter(dev_priv, PCI_D0);
2824 dev_priv->runtime_pm.suspended = false;
2825 if (intel_uncore_unclaimed_mmio(dev_priv))
2826 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
2828 if (INTEL_GEN(dev_priv) >= 11) {
2829 bxt_disable_dc9(dev_priv);
2830 icl_display_core_init(dev_priv, true);
2831 if (dev_priv->csr.dmc_payload) {
2832 if (dev_priv->csr.allowed_dc_mask &
2833 DC_STATE_EN_UPTO_DC6)
2834 skl_enable_dc6(dev_priv);
2835 else if (dev_priv->csr.allowed_dc_mask &
2836 DC_STATE_EN_UPTO_DC5)
2837 gen9_enable_dc5(dev_priv);
2839 } else if (IS_GEN9_LP(dev_priv)) {
2840 bxt_disable_dc9(dev_priv);
2841 bxt_display_core_init(dev_priv, true);
2842 if (dev_priv->csr.dmc_payload &&
2843 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2844 gen9_enable_dc5(dev_priv);
2845 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2846 hsw_disable_pc8(dev_priv);
2847 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2848 ret = vlv_resume_prepare(dev_priv, true);
2851 intel_uncore_runtime_resume(dev_priv);
2853 intel_runtime_pm_enable_interrupts(dev_priv);
2855 intel_uc_resume(dev_priv);
2858 * No point of rolling back things in case of an error, as the best
2859 * we can do is to hope that things will still work (and disable RPM).
2861 i915_gem_init_swizzling(dev_priv);
2862 i915_gem_restore_fences(dev_priv);
2865 * On VLV/CHV display interrupts are part of the display
2866 * power well, so hpd is reinitialized from there. For
2867 * everyone else do it here.
2869 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2870 intel_hpd_init(dev_priv);
2872 intel_enable_ipc(dev_priv);
2874 enable_rpm_wakeref_asserts(dev_priv);
2877 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
2879 DRM_DEBUG_KMS("Device resumed\n");
2884 const struct dev_pm_ops i915_pm_ops = {
2886 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
2889 .prepare = i915_pm_prepare,
2890 .suspend = i915_pm_suspend,
2891 .suspend_late = i915_pm_suspend_late,
2892 .resume_early = i915_pm_resume_early,
2893 .resume = i915_pm_resume,
2897 * @freeze, @freeze_late : called (1) before creating the
2898 * hibernation image [PMSG_FREEZE] and
2899 * (2) after rebooting, before restoring
2900 * the image [PMSG_QUIESCE]
2901 * @thaw, @thaw_early : called (1) after creating the hibernation
2902 * image, before writing it [PMSG_THAW]
2903 * and (2) after failing to create or
2904 * restore the image [PMSG_RECOVER]
2905 * @poweroff, @poweroff_late: called after writing the hibernation
2906 * image, before rebooting [PMSG_HIBERNATE]
2907 * @restore, @restore_early : called after rebooting and restoring the
2908 * hibernation image [PMSG_RESTORE]
2910 .freeze = i915_pm_freeze,
2911 .freeze_late = i915_pm_freeze_late,
2912 .thaw_early = i915_pm_thaw_early,
2913 .thaw = i915_pm_thaw,
2914 .poweroff = i915_pm_suspend,
2915 .poweroff_late = i915_pm_poweroff_late,
2916 .restore_early = i915_pm_restore_early,
2917 .restore = i915_pm_restore,
2919 /* S0ix (via runtime suspend) event handlers */
2920 .runtime_suspend = intel_runtime_suspend,
2921 .runtime_resume = intel_runtime_resume,
2924 static const struct vm_operations_struct i915_gem_vm_ops = {
2925 .fault = i915_gem_fault,
2926 .open = drm_gem_vm_open,
2927 .close = drm_gem_vm_close,
2930 static const struct file_operations i915_driver_fops = {
2931 .owner = THIS_MODULE,
2933 .release = drm_release,
2934 .unlocked_ioctl = drm_ioctl,
2935 .mmap = drm_gem_mmap,
2938 .compat_ioctl = i915_compat_ioctl,
2939 .llseek = noop_llseek,
2943 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
2944 struct drm_file *file)
2949 static const struct drm_ioctl_desc i915_ioctls[] = {
2950 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2951 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
2952 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
2953 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
2954 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
2955 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2956 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2957 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2958 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2959 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2960 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2961 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
2962 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2963 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2964 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
2965 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2966 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2967 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2968 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
2969 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2970 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2971 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2972 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2973 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
2974 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
2975 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2976 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2977 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2978 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
2979 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
2980 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
2981 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
2982 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
2983 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
2984 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
2985 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
2986 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
2987 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
2988 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
2989 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
2990 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
2991 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
2992 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
2993 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
2994 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2995 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
2996 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
2997 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
2998 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
2999 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
3000 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
3001 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
3002 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
3003 DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3004 DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3005 DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3008 static struct drm_driver driver = {
3009 /* Don't use MTRRs here; the Xserver or userspace app should
3010 * deal with them for Intel hardware.
3013 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
3014 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
3015 .release = i915_driver_release,
3016 .open = i915_driver_open,
3017 .lastclose = i915_driver_lastclose,
3018 .postclose = i915_driver_postclose,
3020 .gem_close_object = i915_gem_close_object,
3021 .gem_free_object_unlocked = i915_gem_free_object,
3022 .gem_vm_ops = &i915_gem_vm_ops,
3024 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
3025 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
3026 .gem_prime_export = i915_gem_prime_export,
3027 .gem_prime_import = i915_gem_prime_import,
3029 .dumb_create = i915_gem_dumb_create,
3030 .dumb_map_offset = i915_gem_mmap_gtt,
3031 .ioctls = i915_ioctls,
3032 .num_ioctls = ARRAY_SIZE(i915_ioctls),
3033 .fops = &i915_driver_fops,
3034 .name = DRIVER_NAME,
3035 .desc = DRIVER_DESC,
3036 .date = DRIVER_DATE,
3037 .major = DRIVER_MAJOR,
3038 .minor = DRIVER_MINOR,
3039 .patchlevel = DRIVER_PATCHLEVEL,
3042 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3043 #include "selftests/mock_drm.c"