drm/i915: Move more GEM objects under gem/
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29
30 #include <linux/acpi.h>
31 #include <linux/device.h>
32 #include <linux/oom.h>
33 #include <linux/module.h>
34 #include <linux/pci.h>
35 #include <linux/pm.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/pnp.h>
38 #include <linux/slab.h>
39 #include <linux/vgaarb.h>
40 #include <linux/vga_switcheroo.h>
41 #include <linux/vt.h>
42 #include <acpi/video.h>
43
44 #include <drm/drm_atomic_helper.h>
45 #include <drm/drm_ioctl.h>
46 #include <drm/drm_irq.h>
47 #include <drm/drm_probe_helper.h>
48 #include <drm/i915_drm.h>
49
50 #include "gem/i915_gem_context.h"
51 #include "gem/i915_gem_ioctls.h"
52 #include "gt/intel_gt_pm.h"
53 #include "gt/intel_reset.h"
54 #include "gt/intel_workarounds.h"
55
56 #include "i915_debugfs.h"
57 #include "i915_drv.h"
58 #include "i915_irq.h"
59 #include "i915_pmu.h"
60 #include "i915_query.h"
61 #include "i915_trace.h"
62 #include "i915_vgpu.h"
63 #include "intel_acpi.h"
64 #include "intel_audio.h"
65 #include "intel_bw.h"
66 #include "intel_cdclk.h"
67 #include "intel_csr.h"
68 #include "intel_dp.h"
69 #include "intel_drv.h"
70 #include "intel_fbdev.h"
71 #include "intel_gmbus.h"
72 #include "intel_hotplug.h"
73 #include "intel_overlay.h"
74 #include "intel_pipe_crc.h"
75 #include "intel_pm.h"
76 #include "intel_sprite.h"
77 #include "intel_uc.h"
78
79 static struct drm_driver driver;
80
81 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
82 static unsigned int i915_load_fail_count;
83
84 bool __i915_inject_load_failure(const char *func, int line)
85 {
86         if (i915_load_fail_count >= i915_modparams.inject_load_failure)
87                 return false;
88
89         if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
90                 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
91                          i915_modparams.inject_load_failure, func, line);
92                 i915_modparams.inject_load_failure = 0;
93                 return true;
94         }
95
96         return false;
97 }
98
99 bool i915_error_injected(void)
100 {
101         return i915_load_fail_count && !i915_modparams.inject_load_failure;
102 }
103
104 #endif
105
106 #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
107 #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
108                     "providing the dmesg log by booting with drm.debug=0xf"
109
110 void
111 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
112               const char *fmt, ...)
113 {
114         static bool shown_bug_once;
115         struct device *kdev = dev_priv->drm.dev;
116         bool is_error = level[1] <= KERN_ERR[1];
117         bool is_debug = level[1] == KERN_DEBUG[1];
118         struct va_format vaf;
119         va_list args;
120
121         if (is_debug && !(drm_debug & DRM_UT_DRIVER))
122                 return;
123
124         va_start(args, fmt);
125
126         vaf.fmt = fmt;
127         vaf.va = &args;
128
129         if (is_error)
130                 dev_printk(level, kdev, "%pV", &vaf);
131         else
132                 dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
133                            __builtin_return_address(0), &vaf);
134
135         va_end(args);
136
137         if (is_error && !shown_bug_once) {
138                 /*
139                  * Ask the user to file a bug report for the error, except
140                  * if they may have caused the bug by fiddling with unsafe
141                  * module parameters.
142                  */
143                 if (!test_taint(TAINT_USER))
144                         dev_notice(kdev, "%s", FDO_BUG_MSG);
145                 shown_bug_once = true;
146         }
147 }
148
149 /* Map PCH device id to PCH type, or PCH_NONE if unknown. */
150 static enum intel_pch
151 intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
152 {
153         switch (id) {
154         case INTEL_PCH_IBX_DEVICE_ID_TYPE:
155                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
156                 WARN_ON(!IS_GEN(dev_priv, 5));
157                 return PCH_IBX;
158         case INTEL_PCH_CPT_DEVICE_ID_TYPE:
159                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
160                 WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
161                 return PCH_CPT;
162         case INTEL_PCH_PPT_DEVICE_ID_TYPE:
163                 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
164                 WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
165                 /* PantherPoint is CPT compatible */
166                 return PCH_CPT;
167         case INTEL_PCH_LPT_DEVICE_ID_TYPE:
168                 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
169                 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
170                 WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
171                 return PCH_LPT;
172         case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
173                 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
174                 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
175                 WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
176                 return PCH_LPT;
177         case INTEL_PCH_WPT_DEVICE_ID_TYPE:
178                 DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
179                 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
180                 WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
181                 /* WildcatPoint is LPT compatible */
182                 return PCH_LPT;
183         case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
184                 DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
185                 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
186                 WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
187                 /* WildcatPoint is LPT compatible */
188                 return PCH_LPT;
189         case INTEL_PCH_SPT_DEVICE_ID_TYPE:
190                 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
191                 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
192                 return PCH_SPT;
193         case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
194                 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
195                 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
196                 return PCH_SPT;
197         case INTEL_PCH_KBP_DEVICE_ID_TYPE:
198                 DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
199                 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
200                         !IS_COFFEELAKE(dev_priv));
201                 /* KBP is SPT compatible */
202                 return PCH_SPT;
203         case INTEL_PCH_CNP_DEVICE_ID_TYPE:
204                 DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
205                 WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
206                 return PCH_CNP;
207         case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
208                 DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
209                 WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
210                 return PCH_CNP;
211         case INTEL_PCH_CMP_DEVICE_ID_TYPE:
212                 DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
213                 WARN_ON(!IS_COFFEELAKE(dev_priv));
214                 /* CometPoint is CNP Compatible */
215                 return PCH_CNP;
216         case INTEL_PCH_ICP_DEVICE_ID_TYPE:
217                 DRM_DEBUG_KMS("Found Ice Lake PCH\n");
218                 WARN_ON(!IS_ICELAKE(dev_priv));
219                 return PCH_ICP;
220         default:
221                 return PCH_NONE;
222         }
223 }
224
225 static bool intel_is_virt_pch(unsigned short id,
226                               unsigned short svendor, unsigned short sdevice)
227 {
228         return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
229                 id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
230                 (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
231                  svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
232                  sdevice == PCI_SUBDEVICE_ID_QEMU));
233 }
234
235 static unsigned short
236 intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
237 {
238         unsigned short id = 0;
239
240         /*
241          * In a virtualized passthrough environment we can be in a
242          * setup where the ISA bridge is not able to be passed through.
243          * In this case, a south bridge can be emulated and we have to
244          * make an educated guess as to which PCH is really there.
245          */
246
247         if (IS_ICELAKE(dev_priv))
248                 id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
249         else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
250                 id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
251         else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
252                 id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
253         else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
254                 id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
255         else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
256                 id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
257         else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
258                 id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
259         else if (IS_GEN(dev_priv, 5))
260                 id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
261
262         if (id)
263                 DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
264         else
265                 DRM_DEBUG_KMS("Assuming no PCH\n");
266
267         return id;
268 }
269
270 static void intel_detect_pch(struct drm_i915_private *dev_priv)
271 {
272         struct pci_dev *pch = NULL;
273
274         /*
275          * The reason to probe ISA bridge instead of Dev31:Fun0 is to
276          * make graphics device passthrough work easy for VMM, that only
277          * need to expose ISA bridge to let driver know the real hardware
278          * underneath. This is a requirement from virtualization team.
279          *
280          * In some virtualized environments (e.g. XEN), there is irrelevant
281          * ISA bridge in the system. To work reliably, we should scan trhough
282          * all the ISA bridge devices and check for the first match, instead
283          * of only checking the first one.
284          */
285         while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
286                 unsigned short id;
287                 enum intel_pch pch_type;
288
289                 if (pch->vendor != PCI_VENDOR_ID_INTEL)
290                         continue;
291
292                 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
293
294                 pch_type = intel_pch_type(dev_priv, id);
295                 if (pch_type != PCH_NONE) {
296                         dev_priv->pch_type = pch_type;
297                         dev_priv->pch_id = id;
298                         break;
299                 } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
300                                          pch->subsystem_device)) {
301                         id = intel_virt_detect_pch(dev_priv);
302                         pch_type = intel_pch_type(dev_priv, id);
303
304                         /* Sanity check virtual PCH id */
305                         if (WARN_ON(id && pch_type == PCH_NONE))
306                                 id = 0;
307
308                         dev_priv->pch_type = pch_type;
309                         dev_priv->pch_id = id;
310                         break;
311                 }
312         }
313
314         /*
315          * Use PCH_NOP (PCH but no South Display) for PCH platforms without
316          * display.
317          */
318         if (pch && !HAS_DISPLAY(dev_priv)) {
319                 DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
320                 dev_priv->pch_type = PCH_NOP;
321                 dev_priv->pch_id = 0;
322         }
323
324         if (!pch)
325                 DRM_DEBUG_KMS("No PCH found.\n");
326
327         pci_dev_put(pch);
328 }
329
330 static int i915_getparam_ioctl(struct drm_device *dev, void *data,
331                                struct drm_file *file_priv)
332 {
333         struct drm_i915_private *dev_priv = to_i915(dev);
334         struct pci_dev *pdev = dev_priv->drm.pdev;
335         drm_i915_getparam_t *param = data;
336         int value;
337
338         switch (param->param) {
339         case I915_PARAM_IRQ_ACTIVE:
340         case I915_PARAM_ALLOW_BATCHBUFFER:
341         case I915_PARAM_LAST_DISPATCH:
342         case I915_PARAM_HAS_EXEC_CONSTANTS:
343                 /* Reject all old ums/dri params. */
344                 return -ENODEV;
345         case I915_PARAM_CHIPSET_ID:
346                 value = pdev->device;
347                 break;
348         case I915_PARAM_REVISION:
349                 value = pdev->revision;
350                 break;
351         case I915_PARAM_NUM_FENCES_AVAIL:
352                 value = dev_priv->num_fence_regs;
353                 break;
354         case I915_PARAM_HAS_OVERLAY:
355                 value = dev_priv->overlay ? 1 : 0;
356                 break;
357         case I915_PARAM_HAS_BSD:
358                 value = !!dev_priv->engine[VCS0];
359                 break;
360         case I915_PARAM_HAS_BLT:
361                 value = !!dev_priv->engine[BCS0];
362                 break;
363         case I915_PARAM_HAS_VEBOX:
364                 value = !!dev_priv->engine[VECS0];
365                 break;
366         case I915_PARAM_HAS_BSD2:
367                 value = !!dev_priv->engine[VCS1];
368                 break;
369         case I915_PARAM_HAS_LLC:
370                 value = HAS_LLC(dev_priv);
371                 break;
372         case I915_PARAM_HAS_WT:
373                 value = HAS_WT(dev_priv);
374                 break;
375         case I915_PARAM_HAS_ALIASING_PPGTT:
376                 value = INTEL_PPGTT(dev_priv);
377                 break;
378         case I915_PARAM_HAS_SEMAPHORES:
379                 value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
380                 break;
381         case I915_PARAM_HAS_SECURE_BATCHES:
382                 value = capable(CAP_SYS_ADMIN);
383                 break;
384         case I915_PARAM_CMD_PARSER_VERSION:
385                 value = i915_cmd_parser_get_version(dev_priv);
386                 break;
387         case I915_PARAM_SUBSLICE_TOTAL:
388                 value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
389                 if (!value)
390                         return -ENODEV;
391                 break;
392         case I915_PARAM_EU_TOTAL:
393                 value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
394                 if (!value)
395                         return -ENODEV;
396                 break;
397         case I915_PARAM_HAS_GPU_RESET:
398                 value = i915_modparams.enable_hangcheck &&
399                         intel_has_gpu_reset(dev_priv);
400                 if (value && intel_has_reset_engine(dev_priv))
401                         value = 2;
402                 break;
403         case I915_PARAM_HAS_RESOURCE_STREAMER:
404                 value = 0;
405                 break;
406         case I915_PARAM_HAS_POOLED_EU:
407                 value = HAS_POOLED_EU(dev_priv);
408                 break;
409         case I915_PARAM_MIN_EU_IN_POOL:
410                 value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
411                 break;
412         case I915_PARAM_HUC_STATUS:
413                 value = intel_huc_check_status(&dev_priv->huc);
414                 if (value < 0)
415                         return value;
416                 break;
417         case I915_PARAM_MMAP_GTT_VERSION:
418                 /* Though we've started our numbering from 1, and so class all
419                  * earlier versions as 0, in effect their value is undefined as
420                  * the ioctl will report EINVAL for the unknown param!
421                  */
422                 value = i915_gem_mmap_gtt_version();
423                 break;
424         case I915_PARAM_HAS_SCHEDULER:
425                 value = dev_priv->caps.scheduler;
426                 break;
427
428         case I915_PARAM_MMAP_VERSION:
429                 /* Remember to bump this if the version changes! */
430         case I915_PARAM_HAS_GEM:
431         case I915_PARAM_HAS_PAGEFLIPPING:
432         case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
433         case I915_PARAM_HAS_RELAXED_FENCING:
434         case I915_PARAM_HAS_COHERENT_RINGS:
435         case I915_PARAM_HAS_RELAXED_DELTA:
436         case I915_PARAM_HAS_GEN7_SOL_RESET:
437         case I915_PARAM_HAS_WAIT_TIMEOUT:
438         case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
439         case I915_PARAM_HAS_PINNED_BATCHES:
440         case I915_PARAM_HAS_EXEC_NO_RELOC:
441         case I915_PARAM_HAS_EXEC_HANDLE_LUT:
442         case I915_PARAM_HAS_COHERENT_PHYS_GTT:
443         case I915_PARAM_HAS_EXEC_SOFTPIN:
444         case I915_PARAM_HAS_EXEC_ASYNC:
445         case I915_PARAM_HAS_EXEC_FENCE:
446         case I915_PARAM_HAS_EXEC_CAPTURE:
447         case I915_PARAM_HAS_EXEC_BATCH_FIRST:
448         case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
449         case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
450                 /* For the time being all of these are always true;
451                  * if some supported hardware does not have one of these
452                  * features this value needs to be provided from
453                  * INTEL_INFO(), a feature macro, or similar.
454                  */
455                 value = 1;
456                 break;
457         case I915_PARAM_HAS_CONTEXT_ISOLATION:
458                 value = intel_engines_has_context_isolation(dev_priv);
459                 break;
460         case I915_PARAM_SLICE_MASK:
461                 value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
462                 if (!value)
463                         return -ENODEV;
464                 break;
465         case I915_PARAM_SUBSLICE_MASK:
466                 value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
467                 if (!value)
468                         return -ENODEV;
469                 break;
470         case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
471                 value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
472                 break;
473         case I915_PARAM_MMAP_GTT_COHERENT:
474                 value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
475                 break;
476         default:
477                 DRM_DEBUG("Unknown parameter %d\n", param->param);
478                 return -EINVAL;
479         }
480
481         if (put_user(value, param->value))
482                 return -EFAULT;
483
484         return 0;
485 }
486
487 static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
488 {
489         int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
490
491         dev_priv->bridge_dev =
492                 pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
493         if (!dev_priv->bridge_dev) {
494                 DRM_ERROR("bridge device not found\n");
495                 return -1;
496         }
497         return 0;
498 }
499
500 /* Allocate space for the MCH regs if needed, return nonzero on error */
501 static int
502 intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
503 {
504         int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
505         u32 temp_lo, temp_hi = 0;
506         u64 mchbar_addr;
507         int ret;
508
509         if (INTEL_GEN(dev_priv) >= 4)
510                 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
511         pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
512         mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
513
514         /* If ACPI doesn't have it, assume we need to allocate it ourselves */
515 #ifdef CONFIG_PNP
516         if (mchbar_addr &&
517             pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
518                 return 0;
519 #endif
520
521         /* Get some space for it */
522         dev_priv->mch_res.name = "i915 MCHBAR";
523         dev_priv->mch_res.flags = IORESOURCE_MEM;
524         ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
525                                      &dev_priv->mch_res,
526                                      MCHBAR_SIZE, MCHBAR_SIZE,
527                                      PCIBIOS_MIN_MEM,
528                                      0, pcibios_align_resource,
529                                      dev_priv->bridge_dev);
530         if (ret) {
531                 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
532                 dev_priv->mch_res.start = 0;
533                 return ret;
534         }
535
536         if (INTEL_GEN(dev_priv) >= 4)
537                 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
538                                        upper_32_bits(dev_priv->mch_res.start));
539
540         pci_write_config_dword(dev_priv->bridge_dev, reg,
541                                lower_32_bits(dev_priv->mch_res.start));
542         return 0;
543 }
544
545 /* Setup MCHBAR if possible, return true if we should disable it again */
546 static void
547 intel_setup_mchbar(struct drm_i915_private *dev_priv)
548 {
549         int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
550         u32 temp;
551         bool enabled;
552
553         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
554                 return;
555
556         dev_priv->mchbar_need_disable = false;
557
558         if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
559                 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
560                 enabled = !!(temp & DEVEN_MCHBAR_EN);
561         } else {
562                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
563                 enabled = temp & 1;
564         }
565
566         /* If it's already enabled, don't have to do anything */
567         if (enabled)
568                 return;
569
570         if (intel_alloc_mchbar_resource(dev_priv))
571                 return;
572
573         dev_priv->mchbar_need_disable = true;
574
575         /* Space is allocated or reserved, so enable it. */
576         if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
577                 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
578                                        temp | DEVEN_MCHBAR_EN);
579         } else {
580                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
581                 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
582         }
583 }
584
585 static void
586 intel_teardown_mchbar(struct drm_i915_private *dev_priv)
587 {
588         int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
589
590         if (dev_priv->mchbar_need_disable) {
591                 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
592                         u32 deven_val;
593
594                         pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
595                                               &deven_val);
596                         deven_val &= ~DEVEN_MCHBAR_EN;
597                         pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
598                                                deven_val);
599                 } else {
600                         u32 mchbar_val;
601
602                         pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
603                                               &mchbar_val);
604                         mchbar_val &= ~1;
605                         pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
606                                                mchbar_val);
607                 }
608         }
609
610         if (dev_priv->mch_res.start)
611                 release_resource(&dev_priv->mch_res);
612 }
613
614 /* true = enable decode, false = disable decoder */
615 static unsigned int i915_vga_set_decode(void *cookie, bool state)
616 {
617         struct drm_i915_private *dev_priv = cookie;
618
619         intel_modeset_vga_set_state(dev_priv, state);
620         if (state)
621                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
622                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
623         else
624                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
625 }
626
627 static int i915_resume_switcheroo(struct drm_device *dev);
628 static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
629
630 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
631 {
632         struct drm_device *dev = pci_get_drvdata(pdev);
633         pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
634
635         if (state == VGA_SWITCHEROO_ON) {
636                 pr_info("switched on\n");
637                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
638                 /* i915 resume handler doesn't set to D0 */
639                 pci_set_power_state(pdev, PCI_D0);
640                 i915_resume_switcheroo(dev);
641                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
642         } else {
643                 pr_info("switched off\n");
644                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
645                 i915_suspend_switcheroo(dev, pmm);
646                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
647         }
648 }
649
650 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
651 {
652         struct drm_device *dev = pci_get_drvdata(pdev);
653
654         /*
655          * FIXME: open_count is protected by drm_global_mutex but that would lead to
656          * locking inversion with the driver load path. And the access here is
657          * completely racy anyway. So don't bother with locking for now.
658          */
659         return dev->open_count == 0;
660 }
661
662 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
663         .set_gpu_state = i915_switcheroo_set_state,
664         .reprobe = NULL,
665         .can_switch = i915_switcheroo_can_switch,
666 };
667
668 static int i915_load_modeset_init(struct drm_device *dev)
669 {
670         struct drm_i915_private *dev_priv = to_i915(dev);
671         struct pci_dev *pdev = dev_priv->drm.pdev;
672         int ret;
673
674         if (i915_inject_load_failure())
675                 return -ENODEV;
676
677         if (HAS_DISPLAY(dev_priv)) {
678                 ret = drm_vblank_init(&dev_priv->drm,
679                                       INTEL_INFO(dev_priv)->num_pipes);
680                 if (ret)
681                         goto out;
682         }
683
684         intel_bios_init(dev_priv);
685
686         /* If we have > 1 VGA cards, then we need to arbitrate access
687          * to the common VGA resources.
688          *
689          * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
690          * then we do not take part in VGA arbitration and the
691          * vga_client_register() fails with -ENODEV.
692          */
693         ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
694         if (ret && ret != -ENODEV)
695                 goto out;
696
697         intel_register_dsm_handler();
698
699         ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
700         if (ret)
701                 goto cleanup_vga_client;
702
703         /* must happen before intel_power_domains_init_hw() on VLV/CHV */
704         intel_update_rawclk(dev_priv);
705
706         intel_power_domains_init_hw(dev_priv, false);
707
708         intel_csr_ucode_init(dev_priv);
709
710         ret = intel_irq_install(dev_priv);
711         if (ret)
712                 goto cleanup_csr;
713
714         intel_gmbus_setup(dev_priv);
715
716         /* Important: The output setup functions called by modeset_init need
717          * working irqs for e.g. gmbus and dp aux transfers. */
718         ret = intel_modeset_init(dev);
719         if (ret)
720                 goto cleanup_irq;
721
722         ret = i915_gem_init(dev_priv);
723         if (ret)
724                 goto cleanup_modeset;
725
726         intel_overlay_setup(dev_priv);
727
728         if (!HAS_DISPLAY(dev_priv))
729                 return 0;
730
731         ret = intel_fbdev_init(dev);
732         if (ret)
733                 goto cleanup_gem;
734
735         /* Only enable hotplug handling once the fbdev is fully set up. */
736         intel_hpd_init(dev_priv);
737
738         intel_init_ipc(dev_priv);
739
740         return 0;
741
742 cleanup_gem:
743         i915_gem_suspend(dev_priv);
744         i915_gem_fini(dev_priv);
745 cleanup_modeset:
746         intel_modeset_cleanup(dev);
747 cleanup_irq:
748         drm_irq_uninstall(dev);
749         intel_gmbus_teardown(dev_priv);
750 cleanup_csr:
751         intel_csr_ucode_fini(dev_priv);
752         intel_power_domains_fini_hw(dev_priv);
753         vga_switcheroo_unregister_client(pdev);
754 cleanup_vga_client:
755         vga_client_register(pdev, NULL, NULL, NULL);
756 out:
757         return ret;
758 }
759
760 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
761 {
762         struct apertures_struct *ap;
763         struct pci_dev *pdev = dev_priv->drm.pdev;
764         struct i915_ggtt *ggtt = &dev_priv->ggtt;
765         bool primary;
766         int ret;
767
768         ap = alloc_apertures(1);
769         if (!ap)
770                 return -ENOMEM;
771
772         ap->ranges[0].base = ggtt->gmadr.start;
773         ap->ranges[0].size = ggtt->mappable_end;
774
775         primary =
776                 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
777
778         ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
779
780         kfree(ap);
781
782         return ret;
783 }
784
785 static void intel_init_dpio(struct drm_i915_private *dev_priv)
786 {
787         /*
788          * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
789          * CHV x1 PHY (DP/HDMI D)
790          * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
791          */
792         if (IS_CHERRYVIEW(dev_priv)) {
793                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
794                 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
795         } else if (IS_VALLEYVIEW(dev_priv)) {
796                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
797         }
798 }
799
800 static int i915_workqueues_init(struct drm_i915_private *dev_priv)
801 {
802         /*
803          * The i915 workqueue is primarily used for batched retirement of
804          * requests (and thus managing bo) once the task has been completed
805          * by the GPU. i915_retire_requests() is called directly when we
806          * need high-priority retirement, such as waiting for an explicit
807          * bo.
808          *
809          * It is also used for periodic low-priority events, such as
810          * idle-timers and recording error state.
811          *
812          * All tasks on the workqueue are expected to acquire the dev mutex
813          * so there is no point in running more than one instance of the
814          * workqueue at any time.  Use an ordered one.
815          */
816         dev_priv->wq = alloc_ordered_workqueue("i915", 0);
817         if (dev_priv->wq == NULL)
818                 goto out_err;
819
820         dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
821         if (dev_priv->hotplug.dp_wq == NULL)
822                 goto out_free_wq;
823
824         return 0;
825
826 out_free_wq:
827         destroy_workqueue(dev_priv->wq);
828 out_err:
829         DRM_ERROR("Failed to allocate workqueues.\n");
830
831         return -ENOMEM;
832 }
833
834 static void i915_engines_cleanup(struct drm_i915_private *i915)
835 {
836         struct intel_engine_cs *engine;
837         enum intel_engine_id id;
838
839         for_each_engine(engine, i915, id)
840                 kfree(engine);
841 }
842
843 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
844 {
845         destroy_workqueue(dev_priv->hotplug.dp_wq);
846         destroy_workqueue(dev_priv->wq);
847 }
848
849 /*
850  * We don't keep the workarounds for pre-production hardware, so we expect our
851  * driver to fail on these machines in one way or another. A little warning on
852  * dmesg may help both the user and the bug triagers.
853  *
854  * Our policy for removing pre-production workarounds is to keep the
855  * current gen workarounds as a guide to the bring-up of the next gen
856  * (workarounds have a habit of persisting!). Anything older than that
857  * should be removed along with the complications they introduce.
858  */
859 static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
860 {
861         bool pre = false;
862
863         pre |= IS_HSW_EARLY_SDV(dev_priv);
864         pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
865         pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
866         pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
867
868         if (pre) {
869                 DRM_ERROR("This is a pre-production stepping. "
870                           "It may not be fully functional.\n");
871                 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
872         }
873 }
874
875 /**
876  * i915_driver_init_early - setup state not requiring device access
877  * @dev_priv: device private
878  *
879  * Initialize everything that is a "SW-only" state, that is state not
880  * requiring accessing the device or exposing the driver via kernel internal
881  * or userspace interfaces. Example steps belonging here: lock initialization,
882  * system memory allocation, setting up device specific attributes and
883  * function hooks not requiring accessing the device.
884  */
885 static int i915_driver_init_early(struct drm_i915_private *dev_priv)
886 {
887         int ret = 0;
888
889         if (i915_inject_load_failure())
890                 return -ENODEV;
891
892         intel_device_info_subplatform_init(dev_priv);
893
894         intel_uncore_init_early(&dev_priv->uncore);
895
896         spin_lock_init(&dev_priv->irq_lock);
897         spin_lock_init(&dev_priv->gpu_error.lock);
898         mutex_init(&dev_priv->backlight_lock);
899
900         mutex_init(&dev_priv->sb_lock);
901         pm_qos_add_request(&dev_priv->sb_qos,
902                            PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
903
904         mutex_init(&dev_priv->av_mutex);
905         mutex_init(&dev_priv->wm.wm_mutex);
906         mutex_init(&dev_priv->pps_mutex);
907         mutex_init(&dev_priv->hdcp_comp_mutex);
908
909         i915_memcpy_init_early(dev_priv);
910         intel_runtime_pm_init_early(dev_priv);
911
912         ret = i915_workqueues_init(dev_priv);
913         if (ret < 0)
914                 goto err_engines;
915
916         ret = i915_gem_init_early(dev_priv);
917         if (ret < 0)
918                 goto err_workqueues;
919
920         /* This must be called before any calls to HAS_PCH_* */
921         intel_detect_pch(dev_priv);
922
923         intel_wopcm_init_early(&dev_priv->wopcm);
924         intel_uc_init_early(dev_priv);
925         intel_pm_setup(dev_priv);
926         intel_init_dpio(dev_priv);
927         ret = intel_power_domains_init(dev_priv);
928         if (ret < 0)
929                 goto err_uc;
930         intel_irq_init(dev_priv);
931         intel_hangcheck_init(dev_priv);
932         intel_init_display_hooks(dev_priv);
933         intel_init_clock_gating_hooks(dev_priv);
934         intel_init_audio_hooks(dev_priv);
935         intel_display_crc_init(dev_priv);
936
937         intel_detect_preproduction_hw(dev_priv);
938
939         return 0;
940
941 err_uc:
942         intel_uc_cleanup_early(dev_priv);
943         i915_gem_cleanup_early(dev_priv);
944 err_workqueues:
945         i915_workqueues_cleanup(dev_priv);
946 err_engines:
947         i915_engines_cleanup(dev_priv);
948         return ret;
949 }
950
951 /**
952  * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
953  * @dev_priv: device private
954  */
955 static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
956 {
957         intel_irq_fini(dev_priv);
958         intel_power_domains_cleanup(dev_priv);
959         intel_uc_cleanup_early(dev_priv);
960         i915_gem_cleanup_early(dev_priv);
961         i915_workqueues_cleanup(dev_priv);
962         i915_engines_cleanup(dev_priv);
963
964         pm_qos_remove_request(&dev_priv->sb_qos);
965         mutex_destroy(&dev_priv->sb_lock);
966 }
967
968 /**
969  * i915_driver_init_mmio - setup device MMIO
970  * @dev_priv: device private
971  *
972  * Setup minimal device state necessary for MMIO accesses later in the
973  * initialization sequence. The setup here should avoid any other device-wide
974  * side effects or exposing the driver via kernel internal or user space
975  * interfaces.
976  */
977 static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
978 {
979         int ret;
980
981         if (i915_inject_load_failure())
982                 return -ENODEV;
983
984         if (i915_get_bridge_dev(dev_priv))
985                 return -EIO;
986
987         ret = intel_uncore_init_mmio(&dev_priv->uncore);
988         if (ret < 0)
989                 goto err_bridge;
990
991         /* Try to make sure MCHBAR is enabled before poking at it */
992         intel_setup_mchbar(dev_priv);
993
994         intel_device_info_init_mmio(dev_priv);
995
996         intel_uncore_prune_mmio_domains(&dev_priv->uncore);
997
998         intel_uc_init_mmio(dev_priv);
999
1000         ret = intel_engines_init_mmio(dev_priv);
1001         if (ret)
1002                 goto err_uncore;
1003
1004         i915_gem_init_mmio(dev_priv);
1005
1006         return 0;
1007
1008 err_uncore:
1009         intel_teardown_mchbar(dev_priv);
1010         intel_uncore_fini_mmio(&dev_priv->uncore);
1011 err_bridge:
1012         pci_dev_put(dev_priv->bridge_dev);
1013
1014         return ret;
1015 }
1016
1017 /**
1018  * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
1019  * @dev_priv: device private
1020  */
1021 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1022 {
1023         intel_teardown_mchbar(dev_priv);
1024         intel_uncore_fini_mmio(&dev_priv->uncore);
1025         pci_dev_put(dev_priv->bridge_dev);
1026 }
1027
1028 static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1029 {
1030         intel_gvt_sanitize_options(dev_priv);
1031 }
1032
1033 #define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
1034
1035 static const char *intel_dram_type_str(enum intel_dram_type type)
1036 {
1037         static const char * const str[] = {
1038                 DRAM_TYPE_STR(UNKNOWN),
1039                 DRAM_TYPE_STR(DDR3),
1040                 DRAM_TYPE_STR(DDR4),
1041                 DRAM_TYPE_STR(LPDDR3),
1042                 DRAM_TYPE_STR(LPDDR4),
1043         };
1044
1045         if (type >= ARRAY_SIZE(str))
1046                 type = INTEL_DRAM_UNKNOWN;
1047
1048         return str[type];
1049 }
1050
1051 #undef DRAM_TYPE_STR
1052
1053 static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
1054 {
1055         return dimm->ranks * 64 / (dimm->width ?: 1);
1056 }
1057
1058 /* Returns total GB for the whole DIMM */
1059 static int skl_get_dimm_size(u16 val)
1060 {
1061         return val & SKL_DRAM_SIZE_MASK;
1062 }
1063
1064 static int skl_get_dimm_width(u16 val)
1065 {
1066         if (skl_get_dimm_size(val) == 0)
1067                 return 0;
1068
1069         switch (val & SKL_DRAM_WIDTH_MASK) {
1070         case SKL_DRAM_WIDTH_X8:
1071         case SKL_DRAM_WIDTH_X16:
1072         case SKL_DRAM_WIDTH_X32:
1073                 val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
1074                 return 8 << val;
1075         default:
1076                 MISSING_CASE(val);
1077                 return 0;
1078         }
1079 }
1080
1081 static int skl_get_dimm_ranks(u16 val)
1082 {
1083         if (skl_get_dimm_size(val) == 0)
1084                 return 0;
1085
1086         val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
1087
1088         return val + 1;
1089 }
1090
1091 /* Returns total GB for the whole DIMM */
1092 static int cnl_get_dimm_size(u16 val)
1093 {
1094         return (val & CNL_DRAM_SIZE_MASK) / 2;
1095 }
1096
1097 static int cnl_get_dimm_width(u16 val)
1098 {
1099         if (cnl_get_dimm_size(val) == 0)
1100                 return 0;
1101
1102         switch (val & CNL_DRAM_WIDTH_MASK) {
1103         case CNL_DRAM_WIDTH_X8:
1104         case CNL_DRAM_WIDTH_X16:
1105         case CNL_DRAM_WIDTH_X32:
1106                 val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
1107                 return 8 << val;
1108         default:
1109                 MISSING_CASE(val);
1110                 return 0;
1111         }
1112 }
1113
1114 static int cnl_get_dimm_ranks(u16 val)
1115 {
1116         if (cnl_get_dimm_size(val) == 0)
1117                 return 0;
1118
1119         val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
1120
1121         return val + 1;
1122 }
1123
1124 static bool
1125 skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
1126 {
1127         /* Convert total GB to Gb per DRAM device */
1128         return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
1129 }
1130
1131 static void
1132 skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
1133                        struct dram_dimm_info *dimm,
1134                        int channel, char dimm_name, u16 val)
1135 {
1136         if (INTEL_GEN(dev_priv) >= 10) {
1137                 dimm->size = cnl_get_dimm_size(val);
1138                 dimm->width = cnl_get_dimm_width(val);
1139                 dimm->ranks = cnl_get_dimm_ranks(val);
1140         } else {
1141                 dimm->size = skl_get_dimm_size(val);
1142                 dimm->width = skl_get_dimm_width(val);
1143                 dimm->ranks = skl_get_dimm_ranks(val);
1144         }
1145
1146         DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
1147                       channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
1148                       yesno(skl_is_16gb_dimm(dimm)));
1149 }
1150
1151 static int
1152 skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
1153                           struct dram_channel_info *ch,
1154                           int channel, u32 val)
1155 {
1156         skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
1157                                channel, 'L', val & 0xffff);
1158         skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
1159                                channel, 'S', val >> 16);
1160
1161         if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
1162                 DRM_DEBUG_KMS("CH%u not populated\n", channel);
1163                 return -EINVAL;
1164         }
1165
1166         if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
1167                 ch->ranks = 2;
1168         else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
1169                 ch->ranks = 2;
1170         else
1171                 ch->ranks = 1;
1172
1173         ch->is_16gb_dimm =
1174                 skl_is_16gb_dimm(&ch->dimm_l) ||
1175                 skl_is_16gb_dimm(&ch->dimm_s);
1176
1177         DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
1178                       channel, ch->ranks, yesno(ch->is_16gb_dimm));
1179
1180         return 0;
1181 }
1182
1183 static bool
1184 intel_is_dram_symmetric(const struct dram_channel_info *ch0,
1185                         const struct dram_channel_info *ch1)
1186 {
1187         return !memcmp(ch0, ch1, sizeof(*ch0)) &&
1188                 (ch0->dimm_s.size == 0 ||
1189                  !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
1190 }
1191
1192 static int
1193 skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
1194 {
1195         struct dram_info *dram_info = &dev_priv->dram_info;
1196         struct dram_channel_info ch0 = {}, ch1 = {};
1197         u32 val;
1198         int ret;
1199
1200         val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
1201         ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
1202         if (ret == 0)
1203                 dram_info->num_channels++;
1204
1205         val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
1206         ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
1207         if (ret == 0)
1208                 dram_info->num_channels++;
1209
1210         if (dram_info->num_channels == 0) {
1211                 DRM_INFO("Number of memory channels is zero\n");
1212                 return -EINVAL;
1213         }
1214
1215         /*
1216          * If any of the channel is single rank channel, worst case output
1217          * will be same as if single rank memory, so consider single rank
1218          * memory.
1219          */
1220         if (ch0.ranks == 1 || ch1.ranks == 1)
1221                 dram_info->ranks = 1;
1222         else
1223                 dram_info->ranks = max(ch0.ranks, ch1.ranks);
1224
1225         if (dram_info->ranks == 0) {
1226                 DRM_INFO("couldn't get memory rank information\n");
1227                 return -EINVAL;
1228         }
1229
1230         dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
1231
1232         dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
1233
1234         DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
1235                       yesno(dram_info->symmetric_memory));
1236         return 0;
1237 }
1238
1239 static enum intel_dram_type
1240 skl_get_dram_type(struct drm_i915_private *dev_priv)
1241 {
1242         u32 val;
1243
1244         val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
1245
1246         switch (val & SKL_DRAM_DDR_TYPE_MASK) {
1247         case SKL_DRAM_DDR_TYPE_DDR3:
1248                 return INTEL_DRAM_DDR3;
1249         case SKL_DRAM_DDR_TYPE_DDR4:
1250                 return INTEL_DRAM_DDR4;
1251         case SKL_DRAM_DDR_TYPE_LPDDR3:
1252                 return INTEL_DRAM_LPDDR3;
1253         case SKL_DRAM_DDR_TYPE_LPDDR4:
1254                 return INTEL_DRAM_LPDDR4;
1255         default:
1256                 MISSING_CASE(val);
1257                 return INTEL_DRAM_UNKNOWN;
1258         }
1259 }
1260
1261 static int
1262 skl_get_dram_info(struct drm_i915_private *dev_priv)
1263 {
1264         struct dram_info *dram_info = &dev_priv->dram_info;
1265         u32 mem_freq_khz, val;
1266         int ret;
1267
1268         dram_info->type = skl_get_dram_type(dev_priv);
1269         DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
1270
1271         ret = skl_dram_get_channels_info(dev_priv);
1272         if (ret)
1273                 return ret;
1274
1275         val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
1276         mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
1277                                     SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
1278
1279         dram_info->bandwidth_kbps = dram_info->num_channels *
1280                                                         mem_freq_khz * 8;
1281
1282         if (dram_info->bandwidth_kbps == 0) {
1283                 DRM_INFO("Couldn't get system memory bandwidth\n");
1284                 return -EINVAL;
1285         }
1286
1287         dram_info->valid = true;
1288         return 0;
1289 }
1290
1291 /* Returns Gb per DRAM device */
1292 static int bxt_get_dimm_size(u32 val)
1293 {
1294         switch (val & BXT_DRAM_SIZE_MASK) {
1295         case BXT_DRAM_SIZE_4GBIT:
1296                 return 4;
1297         case BXT_DRAM_SIZE_6GBIT:
1298                 return 6;
1299         case BXT_DRAM_SIZE_8GBIT:
1300                 return 8;
1301         case BXT_DRAM_SIZE_12GBIT:
1302                 return 12;
1303         case BXT_DRAM_SIZE_16GBIT:
1304                 return 16;
1305         default:
1306                 MISSING_CASE(val);
1307                 return 0;
1308         }
1309 }
1310
1311 static int bxt_get_dimm_width(u32 val)
1312 {
1313         if (!bxt_get_dimm_size(val))
1314                 return 0;
1315
1316         val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
1317
1318         return 8 << val;
1319 }
1320
1321 static int bxt_get_dimm_ranks(u32 val)
1322 {
1323         if (!bxt_get_dimm_size(val))
1324                 return 0;
1325
1326         switch (val & BXT_DRAM_RANK_MASK) {
1327         case BXT_DRAM_RANK_SINGLE:
1328                 return 1;
1329         case BXT_DRAM_RANK_DUAL:
1330                 return 2;
1331         default:
1332                 MISSING_CASE(val);
1333                 return 0;
1334         }
1335 }
1336
1337 static enum intel_dram_type bxt_get_dimm_type(u32 val)
1338 {
1339         if (!bxt_get_dimm_size(val))
1340                 return INTEL_DRAM_UNKNOWN;
1341
1342         switch (val & BXT_DRAM_TYPE_MASK) {
1343         case BXT_DRAM_TYPE_DDR3:
1344                 return INTEL_DRAM_DDR3;
1345         case BXT_DRAM_TYPE_LPDDR3:
1346                 return INTEL_DRAM_LPDDR3;
1347         case BXT_DRAM_TYPE_DDR4:
1348                 return INTEL_DRAM_DDR4;
1349         case BXT_DRAM_TYPE_LPDDR4:
1350                 return INTEL_DRAM_LPDDR4;
1351         default:
1352                 MISSING_CASE(val);
1353                 return INTEL_DRAM_UNKNOWN;
1354         }
1355 }
1356
1357 static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
1358                               u32 val)
1359 {
1360         dimm->width = bxt_get_dimm_width(val);
1361         dimm->ranks = bxt_get_dimm_ranks(val);
1362
1363         /*
1364          * Size in register is Gb per DRAM device. Convert to total
1365          * GB to match the way we report this for non-LP platforms.
1366          */
1367         dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
1368 }
1369
1370 static int
1371 bxt_get_dram_info(struct drm_i915_private *dev_priv)
1372 {
1373         struct dram_info *dram_info = &dev_priv->dram_info;
1374         u32 dram_channels;
1375         u32 mem_freq_khz, val;
1376         u8 num_active_channels;
1377         int i;
1378
1379         val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
1380         mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
1381                                     BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
1382
1383         dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
1384         num_active_channels = hweight32(dram_channels);
1385
1386         /* Each active bit represents 4-byte channel */
1387         dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
1388
1389         if (dram_info->bandwidth_kbps == 0) {
1390                 DRM_INFO("Couldn't get system memory bandwidth\n");
1391                 return -EINVAL;
1392         }
1393
1394         /*
1395          * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
1396          */
1397         for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
1398                 struct dram_dimm_info dimm;
1399                 enum intel_dram_type type;
1400
1401                 val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
1402                 if (val == 0xFFFFFFFF)
1403                         continue;
1404
1405                 dram_info->num_channels++;
1406
1407                 bxt_get_dimm_info(&dimm, val);
1408                 type = bxt_get_dimm_type(val);
1409
1410                 WARN_ON(type != INTEL_DRAM_UNKNOWN &&
1411                         dram_info->type != INTEL_DRAM_UNKNOWN &&
1412                         dram_info->type != type);
1413
1414                 DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
1415                               i - BXT_D_CR_DRP0_DUNIT_START,
1416                               dimm.size, dimm.width, dimm.ranks,
1417                               intel_dram_type_str(type));
1418
1419                 /*
1420                  * If any of the channel is single rank channel,
1421                  * worst case output will be same as if single rank
1422                  * memory, so consider single rank memory.
1423                  */
1424                 if (dram_info->ranks == 0)
1425                         dram_info->ranks = dimm.ranks;
1426                 else if (dimm.ranks == 1)
1427                         dram_info->ranks = 1;
1428
1429                 if (type != INTEL_DRAM_UNKNOWN)
1430                         dram_info->type = type;
1431         }
1432
1433         if (dram_info->type == INTEL_DRAM_UNKNOWN ||
1434             dram_info->ranks == 0) {
1435                 DRM_INFO("couldn't get memory information\n");
1436                 return -EINVAL;
1437         }
1438
1439         dram_info->valid = true;
1440         return 0;
1441 }
1442
1443 static void
1444 intel_get_dram_info(struct drm_i915_private *dev_priv)
1445 {
1446         struct dram_info *dram_info = &dev_priv->dram_info;
1447         int ret;
1448
1449         /*
1450          * Assume 16Gb DIMMs are present until proven otherwise.
1451          * This is only used for the level 0 watermark latency
1452          * w/a which does not apply to bxt/glk.
1453          */
1454         dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
1455
1456         if (INTEL_GEN(dev_priv) < 9)
1457                 return;
1458
1459         if (IS_GEN9_LP(dev_priv))
1460                 ret = bxt_get_dram_info(dev_priv);
1461         else
1462                 ret = skl_get_dram_info(dev_priv);
1463         if (ret)
1464                 return;
1465
1466         DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
1467                       dram_info->bandwidth_kbps,
1468                       dram_info->num_channels);
1469
1470         DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
1471                       dram_info->ranks, yesno(dram_info->is_16gb_dimm));
1472 }
1473
1474 static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
1475 {
1476         const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
1477         const unsigned int sets[4] = { 1, 1, 2, 2 };
1478
1479         return EDRAM_NUM_BANKS(cap) *
1480                 ways[EDRAM_WAYS_IDX(cap)] *
1481                 sets[EDRAM_SETS_IDX(cap)];
1482 }
1483
1484 static void edram_detect(struct drm_i915_private *dev_priv)
1485 {
1486         u32 edram_cap = 0;
1487
1488         if (!(IS_HASWELL(dev_priv) ||
1489               IS_BROADWELL(dev_priv) ||
1490               INTEL_GEN(dev_priv) >= 9))
1491                 return;
1492
1493         edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
1494
1495         /* NB: We can't write IDICR yet because we don't have gt funcs set up */
1496
1497         if (!(edram_cap & EDRAM_ENABLED))
1498                 return;
1499
1500         /*
1501          * The needed capability bits for size calculation are not there with
1502          * pre gen9 so return 128MB always.
1503          */
1504         if (INTEL_GEN(dev_priv) < 9)
1505                 dev_priv->edram_size_mb = 128;
1506         else
1507                 dev_priv->edram_size_mb =
1508                         gen9_edram_size_mb(dev_priv, edram_cap);
1509
1510         DRM_INFO("Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
1511 }
1512
1513 /**
1514  * i915_driver_init_hw - setup state requiring device access
1515  * @dev_priv: device private
1516  *
1517  * Setup state that requires accessing the device, but doesn't require
1518  * exposing the driver via kernel internal or userspace interfaces.
1519  */
1520 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1521 {
1522         struct pci_dev *pdev = dev_priv->drm.pdev;
1523         int ret;
1524
1525         if (i915_inject_load_failure())
1526                 return -ENODEV;
1527
1528         intel_device_info_runtime_init(dev_priv);
1529
1530         if (HAS_PPGTT(dev_priv)) {
1531                 if (intel_vgpu_active(dev_priv) &&
1532                     !intel_vgpu_has_full_ppgtt(dev_priv)) {
1533                         i915_report_error(dev_priv,
1534                                           "incompatible vGPU found, support for isolated ppGTT required\n");
1535                         return -ENXIO;
1536                 }
1537         }
1538
1539         if (HAS_EXECLISTS(dev_priv)) {
1540                 /*
1541                  * Older GVT emulation depends upon intercepting CSB mmio,
1542                  * which we no longer use, preferring to use the HWSP cache
1543                  * instead.
1544                  */
1545                 if (intel_vgpu_active(dev_priv) &&
1546                     !intel_vgpu_has_hwsp_emulation(dev_priv)) {
1547                         i915_report_error(dev_priv,
1548                                           "old vGPU host found, support for HWSP emulation required\n");
1549                         return -ENXIO;
1550                 }
1551         }
1552
1553         intel_sanitize_options(dev_priv);
1554
1555         /* needs to be done before ggtt probe */
1556         edram_detect(dev_priv);
1557
1558         i915_perf_init(dev_priv);
1559
1560         ret = i915_ggtt_probe_hw(dev_priv);
1561         if (ret)
1562                 goto err_perf;
1563
1564         /*
1565          * WARNING: Apparently we must kick fbdev drivers before vgacon,
1566          * otherwise the vga fbdev driver falls over.
1567          */
1568         ret = i915_kick_out_firmware_fb(dev_priv);
1569         if (ret) {
1570                 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1571                 goto err_ggtt;
1572         }
1573
1574         ret = vga_remove_vgacon(pdev);
1575         if (ret) {
1576                 DRM_ERROR("failed to remove conflicting VGA console\n");
1577                 goto err_ggtt;
1578         }
1579
1580         ret = i915_ggtt_init_hw(dev_priv);
1581         if (ret)
1582                 goto err_ggtt;
1583
1584         ret = i915_ggtt_enable_hw(dev_priv);
1585         if (ret) {
1586                 DRM_ERROR("failed to enable GGTT\n");
1587                 goto err_ggtt;
1588         }
1589
1590         pci_set_master(pdev);
1591
1592         /* overlay on gen2 is broken and can't address above 1G */
1593         if (IS_GEN(dev_priv, 2)) {
1594                 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
1595                 if (ret) {
1596                         DRM_ERROR("failed to set DMA mask\n");
1597
1598                         goto err_ggtt;
1599                 }
1600         }
1601
1602         /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1603          * using 32bit addressing, overwriting memory if HWS is located
1604          * above 4GB.
1605          *
1606          * The documentation also mentions an issue with undefined
1607          * behaviour if any general state is accessed within a page above 4GB,
1608          * which also needs to be handled carefully.
1609          */
1610         if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
1611                 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1612
1613                 if (ret) {
1614                         DRM_ERROR("failed to set DMA mask\n");
1615
1616                         goto err_ggtt;
1617                 }
1618         }
1619
1620         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1621                            PM_QOS_DEFAULT_VALUE);
1622
1623         intel_uncore_sanitize(dev_priv);
1624
1625         intel_gt_init_workarounds(dev_priv);
1626         i915_gem_load_init_fences(dev_priv);
1627
1628         /* On the 945G/GM, the chipset reports the MSI capability on the
1629          * integrated graphics even though the support isn't actually there
1630          * according to the published specs.  It doesn't appear to function
1631          * correctly in testing on 945G.
1632          * This may be a side effect of MSI having been made available for PEG
1633          * and the registers being closely associated.
1634          *
1635          * According to chipset errata, on the 965GM, MSI interrupts may
1636          * be lost or delayed, and was defeatured. MSI interrupts seem to
1637          * get lost on g4x as well, and interrupt delivery seems to stay
1638          * properly dead afterwards. So we'll just disable them for all
1639          * pre-gen5 chipsets.
1640          *
1641          * dp aux and gmbus irq on gen4 seems to be able to generate legacy
1642          * interrupts even when in MSI mode. This results in spurious
1643          * interrupt warnings if the legacy irq no. is shared with another
1644          * device. The kernel then disables that interrupt source and so
1645          * prevents the other device from working properly.
1646          */
1647         if (INTEL_GEN(dev_priv) >= 5) {
1648                 if (pci_enable_msi(pdev) < 0)
1649                         DRM_DEBUG_DRIVER("can't enable MSI");
1650         }
1651
1652         ret = intel_gvt_init(dev_priv);
1653         if (ret)
1654                 goto err_msi;
1655
1656         intel_opregion_setup(dev_priv);
1657         /*
1658          * Fill the dram structure to get the system raw bandwidth and
1659          * dram info. This will be used for memory latency calculation.
1660          */
1661         intel_get_dram_info(dev_priv);
1662
1663         intel_bw_init_hw(dev_priv);
1664
1665         return 0;
1666
1667 err_msi:
1668         if (pdev->msi_enabled)
1669                 pci_disable_msi(pdev);
1670         pm_qos_remove_request(&dev_priv->pm_qos);
1671 err_ggtt:
1672         i915_ggtt_cleanup_hw(dev_priv);
1673 err_perf:
1674         i915_perf_fini(dev_priv);
1675         return ret;
1676 }
1677
1678 /**
1679  * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1680  * @dev_priv: device private
1681  */
1682 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1683 {
1684         struct pci_dev *pdev = dev_priv->drm.pdev;
1685
1686         i915_perf_fini(dev_priv);
1687
1688         if (pdev->msi_enabled)
1689                 pci_disable_msi(pdev);
1690
1691         pm_qos_remove_request(&dev_priv->pm_qos);
1692         i915_ggtt_cleanup_hw(dev_priv);
1693 }
1694
1695 /**
1696  * i915_driver_register - register the driver with the rest of the system
1697  * @dev_priv: device private
1698  *
1699  * Perform any steps necessary to make the driver available via kernel
1700  * internal or userspace interfaces.
1701  */
1702 static void i915_driver_register(struct drm_i915_private *dev_priv)
1703 {
1704         struct drm_device *dev = &dev_priv->drm;
1705
1706         i915_gem_shrinker_register(dev_priv);
1707         i915_pmu_register(dev_priv);
1708
1709         /*
1710          * Notify a valid surface after modesetting,
1711          * when running inside a VM.
1712          */
1713         if (intel_vgpu_active(dev_priv))
1714                 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1715
1716         /* Reveal our presence to userspace */
1717         if (drm_dev_register(dev, 0) == 0) {
1718                 i915_debugfs_register(dev_priv);
1719                 i915_setup_sysfs(dev_priv);
1720
1721                 /* Depends on sysfs having been initialized */
1722                 i915_perf_register(dev_priv);
1723         } else
1724                 DRM_ERROR("Failed to register driver for userspace access!\n");
1725
1726         if (HAS_DISPLAY(dev_priv)) {
1727                 /* Must be done after probing outputs */
1728                 intel_opregion_register(dev_priv);
1729                 acpi_video_register();
1730         }
1731
1732         if (IS_GEN(dev_priv, 5))
1733                 intel_gpu_ips_init(dev_priv);
1734
1735         intel_audio_init(dev_priv);
1736
1737         /*
1738          * Some ports require correctly set-up hpd registers for detection to
1739          * work properly (leading to ghost connected connector status), e.g. VGA
1740          * on gm45.  Hence we can only set up the initial fbdev config after hpd
1741          * irqs are fully enabled. We do it last so that the async config
1742          * cannot run before the connectors are registered.
1743          */
1744         intel_fbdev_initial_config_async(dev);
1745
1746         /*
1747          * We need to coordinate the hotplugs with the asynchronous fbdev
1748          * configuration, for which we use the fbdev->async_cookie.
1749          */
1750         if (HAS_DISPLAY(dev_priv))
1751                 drm_kms_helper_poll_init(dev);
1752
1753         intel_power_domains_enable(dev_priv);
1754         intel_runtime_pm_enable(dev_priv);
1755 }
1756
1757 /**
1758  * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1759  * @dev_priv: device private
1760  */
1761 static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1762 {
1763         intel_runtime_pm_disable(dev_priv);
1764         intel_power_domains_disable(dev_priv);
1765
1766         intel_fbdev_unregister(dev_priv);
1767         intel_audio_deinit(dev_priv);
1768
1769         /*
1770          * After flushing the fbdev (incl. a late async config which will
1771          * have delayed queuing of a hotplug event), then flush the hotplug
1772          * events.
1773          */
1774         drm_kms_helper_poll_fini(&dev_priv->drm);
1775
1776         intel_gpu_ips_teardown();
1777         acpi_video_unregister();
1778         intel_opregion_unregister(dev_priv);
1779
1780         i915_perf_unregister(dev_priv);
1781         i915_pmu_unregister(dev_priv);
1782
1783         i915_teardown_sysfs(dev_priv);
1784         drm_dev_unplug(&dev_priv->drm);
1785
1786         i915_gem_shrinker_unregister(dev_priv);
1787 }
1788
1789 static void i915_welcome_messages(struct drm_i915_private *dev_priv)
1790 {
1791         if (drm_debug & DRM_UT_DRIVER) {
1792                 struct drm_printer p = drm_debug_printer("i915 device info:");
1793
1794                 drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
1795                            INTEL_DEVID(dev_priv),
1796                            INTEL_REVID(dev_priv),
1797                            intel_platform_name(INTEL_INFO(dev_priv)->platform),
1798                            intel_subplatform(RUNTIME_INFO(dev_priv),
1799                                              INTEL_INFO(dev_priv)->platform),
1800                            INTEL_GEN(dev_priv));
1801
1802                 intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
1803                 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
1804         }
1805
1806         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1807                 DRM_INFO("DRM_I915_DEBUG enabled\n");
1808         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1809                 DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
1810         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
1811                 DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
1812 }
1813
1814 static struct drm_i915_private *
1815 i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
1816 {
1817         const struct intel_device_info *match_info =
1818                 (struct intel_device_info *)ent->driver_data;
1819         struct intel_device_info *device_info;
1820         struct drm_i915_private *i915;
1821         int err;
1822
1823         i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
1824         if (!i915)
1825                 return ERR_PTR(-ENOMEM);
1826
1827         err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
1828         if (err) {
1829                 kfree(i915);
1830                 return ERR_PTR(err);
1831         }
1832
1833         i915->drm.pdev = pdev;
1834         i915->drm.dev_private = i915;
1835         pci_set_drvdata(pdev, &i915->drm);
1836
1837         /* Setup the write-once "constant" device info */
1838         device_info = mkwrite_device_info(i915);
1839         memcpy(device_info, match_info, sizeof(*device_info));
1840         RUNTIME_INFO(i915)->device_id = pdev->device;
1841
1842         BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
1843
1844         return i915;
1845 }
1846
1847 static void i915_driver_destroy(struct drm_i915_private *i915)
1848 {
1849         struct pci_dev *pdev = i915->drm.pdev;
1850
1851         drm_dev_fini(&i915->drm);
1852         kfree(i915);
1853
1854         /* And make sure we never chase our dangling pointer from pci_dev */
1855         pci_set_drvdata(pdev, NULL);
1856 }
1857
1858 /**
1859  * i915_driver_load - setup chip and create an initial config
1860  * @pdev: PCI device
1861  * @ent: matching PCI ID entry
1862  *
1863  * The driver load routine has to do several things:
1864  *   - drive output discovery via intel_modeset_init()
1865  *   - initialize the memory manager
1866  *   - allocate initial config memory
1867  *   - setup the DRM framebuffer with the allocated memory
1868  */
1869 int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1870 {
1871         const struct intel_device_info *match_info =
1872                 (struct intel_device_info *)ent->driver_data;
1873         struct drm_i915_private *dev_priv;
1874         int ret;
1875
1876         dev_priv = i915_driver_create(pdev, ent);
1877         if (IS_ERR(dev_priv))
1878                 return PTR_ERR(dev_priv);
1879
1880         /* Disable nuclear pageflip by default on pre-ILK */
1881         if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
1882                 dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
1883
1884         ret = pci_enable_device(pdev);
1885         if (ret)
1886                 goto out_fini;
1887
1888         ret = i915_driver_init_early(dev_priv);
1889         if (ret < 0)
1890                 goto out_pci_disable;
1891
1892         disable_rpm_wakeref_asserts(dev_priv);
1893
1894         ret = i915_driver_init_mmio(dev_priv);
1895         if (ret < 0)
1896                 goto out_runtime_pm_put;
1897
1898         ret = i915_driver_init_hw(dev_priv);
1899         if (ret < 0)
1900                 goto out_cleanup_mmio;
1901
1902         ret = i915_load_modeset_init(&dev_priv->drm);
1903         if (ret < 0)
1904                 goto out_cleanup_hw;
1905
1906         i915_driver_register(dev_priv);
1907
1908         enable_rpm_wakeref_asserts(dev_priv);
1909
1910         i915_welcome_messages(dev_priv);
1911
1912         return 0;
1913
1914 out_cleanup_hw:
1915         i915_driver_cleanup_hw(dev_priv);
1916 out_cleanup_mmio:
1917         i915_driver_cleanup_mmio(dev_priv);
1918 out_runtime_pm_put:
1919         enable_rpm_wakeref_asserts(dev_priv);
1920         i915_driver_cleanup_early(dev_priv);
1921 out_pci_disable:
1922         pci_disable_device(pdev);
1923 out_fini:
1924         i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
1925         i915_driver_destroy(dev_priv);
1926         return ret;
1927 }
1928
1929 void i915_driver_unload(struct drm_device *dev)
1930 {
1931         struct drm_i915_private *dev_priv = to_i915(dev);
1932         struct pci_dev *pdev = dev_priv->drm.pdev;
1933
1934         disable_rpm_wakeref_asserts(dev_priv);
1935
1936         i915_driver_unregister(dev_priv);
1937
1938         /*
1939          * After unregistering the device to prevent any new users, cancel
1940          * all in-flight requests so that we can quickly unbind the active
1941          * resources.
1942          */
1943         i915_gem_set_wedged(dev_priv);
1944
1945         /* Flush any external code that still may be under the RCU lock */
1946         synchronize_rcu();
1947
1948         i915_gem_suspend(dev_priv);
1949
1950         drm_atomic_helper_shutdown(dev);
1951
1952         intel_gvt_cleanup(dev_priv);
1953
1954         intel_modeset_cleanup(dev);
1955
1956         intel_bios_cleanup(dev_priv);
1957
1958         vga_switcheroo_unregister_client(pdev);
1959         vga_client_register(pdev, NULL, NULL, NULL);
1960
1961         intel_csr_ucode_fini(dev_priv);
1962
1963         /* Free error state after interrupts are fully disabled. */
1964         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1965         i915_reset_error_state(dev_priv);
1966
1967         i915_gem_fini(dev_priv);
1968
1969         intel_power_domains_fini_hw(dev_priv);
1970
1971         i915_driver_cleanup_hw(dev_priv);
1972         i915_driver_cleanup_mmio(dev_priv);
1973
1974         enable_rpm_wakeref_asserts(dev_priv);
1975         intel_runtime_pm_cleanup(dev_priv);
1976 }
1977
1978 static void i915_driver_release(struct drm_device *dev)
1979 {
1980         struct drm_i915_private *dev_priv = to_i915(dev);
1981
1982         i915_driver_cleanup_early(dev_priv);
1983         i915_driver_destroy(dev_priv);
1984 }
1985
1986 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1987 {
1988         struct drm_i915_private *i915 = to_i915(dev);
1989         int ret;
1990
1991         ret = i915_gem_open(i915, file);
1992         if (ret)
1993                 return ret;
1994
1995         return 0;
1996 }
1997
1998 /**
1999  * i915_driver_lastclose - clean up after all DRM clients have exited
2000  * @dev: DRM device
2001  *
2002  * Take care of cleaning up after all DRM clients have exited.  In the
2003  * mode setting case, we want to restore the kernel's initial mode (just
2004  * in case the last client left us in a bad state).
2005  *
2006  * Additionally, in the non-mode setting case, we'll tear down the GTT
2007  * and DMA structures, since the kernel won't be using them, and clea
2008  * up any GEM state.
2009  */
2010 static void i915_driver_lastclose(struct drm_device *dev)
2011 {
2012         intel_fbdev_restore_mode(dev);
2013         vga_switcheroo_process_delayed_switch();
2014 }
2015
2016 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
2017 {
2018         struct drm_i915_file_private *file_priv = file->driver_priv;
2019
2020         mutex_lock(&dev->struct_mutex);
2021         i915_gem_context_close(file);
2022         i915_gem_release(dev, file);
2023         mutex_unlock(&dev->struct_mutex);
2024
2025         kfree(file_priv);
2026 }
2027
2028 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
2029 {
2030         struct drm_device *dev = &dev_priv->drm;
2031         struct intel_encoder *encoder;
2032
2033         drm_modeset_lock_all(dev);
2034         for_each_intel_encoder(dev, encoder)
2035                 if (encoder->suspend)
2036                         encoder->suspend(encoder);
2037         drm_modeset_unlock_all(dev);
2038 }
2039
2040 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2041                               bool rpm_resume);
2042 static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
2043
2044 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
2045 {
2046 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
2047         if (acpi_target_system_state() < ACPI_STATE_S3)
2048                 return true;
2049 #endif
2050         return false;
2051 }
2052
2053 static int i915_drm_prepare(struct drm_device *dev)
2054 {
2055         struct drm_i915_private *i915 = to_i915(dev);
2056
2057         /*
2058          * NB intel_display_suspend() may issue new requests after we've
2059          * ostensibly marked the GPU as ready-to-sleep here. We need to
2060          * split out that work and pull it forward so that after point,
2061          * the GPU is not woken again.
2062          */
2063         i915_gem_suspend(i915);
2064
2065         return 0;
2066 }
2067
2068 static int i915_drm_suspend(struct drm_device *dev)
2069 {
2070         struct drm_i915_private *dev_priv = to_i915(dev);
2071         struct pci_dev *pdev = dev_priv->drm.pdev;
2072         pci_power_t opregion_target_state;
2073
2074         disable_rpm_wakeref_asserts(dev_priv);
2075
2076         /* We do a lot of poking in a lot of registers, make sure they work
2077          * properly. */
2078         intel_power_domains_disable(dev_priv);
2079
2080         drm_kms_helper_poll_disable(dev);
2081
2082         pci_save_state(pdev);
2083
2084         intel_display_suspend(dev);
2085
2086         intel_dp_mst_suspend(dev_priv);
2087
2088         intel_runtime_pm_disable_interrupts(dev_priv);
2089         intel_hpd_cancel_work(dev_priv);
2090
2091         intel_suspend_encoders(dev_priv);
2092
2093         intel_suspend_hw(dev_priv);
2094
2095         i915_gem_suspend_gtt_mappings(dev_priv);
2096
2097         i915_save_state(dev_priv);
2098
2099         opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
2100         intel_opregion_suspend(dev_priv, opregion_target_state);
2101
2102         intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
2103
2104         dev_priv->suspend_count++;
2105
2106         intel_csr_ucode_suspend(dev_priv);
2107
2108         enable_rpm_wakeref_asserts(dev_priv);
2109
2110         return 0;
2111 }
2112
2113 static enum i915_drm_suspend_mode
2114 get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
2115 {
2116         if (hibernate)
2117                 return I915_DRM_SUSPEND_HIBERNATE;
2118
2119         if (suspend_to_idle(dev_priv))
2120                 return I915_DRM_SUSPEND_IDLE;
2121
2122         return I915_DRM_SUSPEND_MEM;
2123 }
2124
2125 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
2126 {
2127         struct drm_i915_private *dev_priv = to_i915(dev);
2128         struct pci_dev *pdev = dev_priv->drm.pdev;
2129         int ret;
2130
2131         disable_rpm_wakeref_asserts(dev_priv);
2132
2133         i915_gem_suspend_late(dev_priv);
2134
2135         intel_uncore_suspend(&dev_priv->uncore);
2136
2137         intel_power_domains_suspend(dev_priv,
2138                                     get_suspend_mode(dev_priv, hibernation));
2139
2140         ret = 0;
2141         if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
2142                 bxt_enable_dc9(dev_priv);
2143         else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2144                 hsw_enable_pc8(dev_priv);
2145         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2146                 ret = vlv_suspend_complete(dev_priv);
2147
2148         if (ret) {
2149                 DRM_ERROR("Suspend complete failed: %d\n", ret);
2150                 intel_power_domains_resume(dev_priv);
2151
2152                 goto out;
2153         }
2154
2155         pci_disable_device(pdev);
2156         /*
2157          * During hibernation on some platforms the BIOS may try to access
2158          * the device even though it's already in D3 and hang the machine. So
2159          * leave the device in D0 on those platforms and hope the BIOS will
2160          * power down the device properly. The issue was seen on multiple old
2161          * GENs with different BIOS vendors, so having an explicit blacklist
2162          * is inpractical; apply the workaround on everything pre GEN6. The
2163          * platforms where the issue was seen:
2164          * Lenovo Thinkpad X301, X61s, X60, T60, X41
2165          * Fujitsu FSC S7110
2166          * Acer Aspire 1830T
2167          */
2168         if (!(hibernation && INTEL_GEN(dev_priv) < 6))
2169                 pci_set_power_state(pdev, PCI_D3hot);
2170
2171 out:
2172         enable_rpm_wakeref_asserts(dev_priv);
2173         if (!dev_priv->uncore.user_forcewake.count)
2174                 intel_runtime_pm_cleanup(dev_priv);
2175
2176         return ret;
2177 }
2178
2179 static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
2180 {
2181         int error;
2182
2183         if (!dev) {
2184                 DRM_ERROR("dev: %p\n", dev);
2185                 DRM_ERROR("DRM not initialized, aborting suspend.\n");
2186                 return -ENODEV;
2187         }
2188
2189         if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
2190                          state.event != PM_EVENT_FREEZE))
2191                 return -EINVAL;
2192
2193         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2194                 return 0;
2195
2196         error = i915_drm_suspend(dev);
2197         if (error)
2198                 return error;
2199
2200         return i915_drm_suspend_late(dev, false);
2201 }
2202
2203 static int i915_drm_resume(struct drm_device *dev)
2204 {
2205         struct drm_i915_private *dev_priv = to_i915(dev);
2206         int ret;
2207
2208         disable_rpm_wakeref_asserts(dev_priv);
2209         intel_sanitize_gt_powersave(dev_priv);
2210
2211         i915_gem_sanitize(dev_priv);
2212
2213         ret = i915_ggtt_enable_hw(dev_priv);
2214         if (ret)
2215                 DRM_ERROR("failed to re-enable GGTT\n");
2216
2217         intel_csr_ucode_resume(dev_priv);
2218
2219         i915_restore_state(dev_priv);
2220         intel_pps_unlock_regs_wa(dev_priv);
2221
2222         intel_init_pch_refclk(dev_priv);
2223
2224         /*
2225          * Interrupts have to be enabled before any batches are run. If not the
2226          * GPU will hang. i915_gem_init_hw() will initiate batches to
2227          * update/restore the context.
2228          *
2229          * drm_mode_config_reset() needs AUX interrupts.
2230          *
2231          * Modeset enabling in intel_modeset_init_hw() also needs working
2232          * interrupts.
2233          */
2234         intel_runtime_pm_enable_interrupts(dev_priv);
2235
2236         drm_mode_config_reset(dev);
2237
2238         i915_gem_resume(dev_priv);
2239
2240         intel_modeset_init_hw(dev);
2241         intel_init_clock_gating(dev_priv);
2242
2243         spin_lock_irq(&dev_priv->irq_lock);
2244         if (dev_priv->display.hpd_irq_setup)
2245                 dev_priv->display.hpd_irq_setup(dev_priv);
2246         spin_unlock_irq(&dev_priv->irq_lock);
2247
2248         intel_dp_mst_resume(dev_priv);
2249
2250         intel_display_resume(dev);
2251
2252         drm_kms_helper_poll_enable(dev);
2253
2254         /*
2255          * ... but also need to make sure that hotplug processing
2256          * doesn't cause havoc. Like in the driver load code we don't
2257          * bother with the tiny race here where we might lose hotplug
2258          * notifications.
2259          * */
2260         intel_hpd_init(dev_priv);
2261
2262         intel_opregion_resume(dev_priv);
2263
2264         intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
2265
2266         intel_power_domains_enable(dev_priv);
2267
2268         enable_rpm_wakeref_asserts(dev_priv);
2269
2270         return 0;
2271 }
2272
2273 static int i915_drm_resume_early(struct drm_device *dev)
2274 {
2275         struct drm_i915_private *dev_priv = to_i915(dev);
2276         struct pci_dev *pdev = dev_priv->drm.pdev;
2277         int ret;
2278
2279         /*
2280          * We have a resume ordering issue with the snd-hda driver also
2281          * requiring our device to be power up. Due to the lack of a
2282          * parent/child relationship we currently solve this with an early
2283          * resume hook.
2284          *
2285          * FIXME: This should be solved with a special hdmi sink device or
2286          * similar so that power domains can be employed.
2287          */
2288
2289         /*
2290          * Note that we need to set the power state explicitly, since we
2291          * powered off the device during freeze and the PCI core won't power
2292          * it back up for us during thaw. Powering off the device during
2293          * freeze is not a hard requirement though, and during the
2294          * suspend/resume phases the PCI core makes sure we get here with the
2295          * device powered on. So in case we change our freeze logic and keep
2296          * the device powered we can also remove the following set power state
2297          * call.
2298          */
2299         ret = pci_set_power_state(pdev, PCI_D0);
2300         if (ret) {
2301                 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
2302                 return ret;
2303         }
2304
2305         /*
2306          * Note that pci_enable_device() first enables any parent bridge
2307          * device and only then sets the power state for this device. The
2308          * bridge enabling is a nop though, since bridge devices are resumed
2309          * first. The order of enabling power and enabling the device is
2310          * imposed by the PCI core as described above, so here we preserve the
2311          * same order for the freeze/thaw phases.
2312          *
2313          * TODO: eventually we should remove pci_disable_device() /
2314          * pci_enable_enable_device() from suspend/resume. Due to how they
2315          * depend on the device enable refcount we can't anyway depend on them
2316          * disabling/enabling the device.
2317          */
2318         if (pci_enable_device(pdev))
2319                 return -EIO;
2320
2321         pci_set_master(pdev);
2322
2323         disable_rpm_wakeref_asserts(dev_priv);
2324
2325         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2326                 ret = vlv_resume_prepare(dev_priv, false);
2327         if (ret)
2328                 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
2329                           ret);
2330
2331         intel_uncore_resume_early(&dev_priv->uncore);
2332
2333         i915_check_and_clear_faults(dev_priv);
2334
2335         if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
2336                 gen9_sanitize_dc_state(dev_priv);
2337                 bxt_disable_dc9(dev_priv);
2338         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2339                 hsw_disable_pc8(dev_priv);
2340         }
2341
2342         intel_uncore_sanitize(dev_priv);
2343
2344         intel_power_domains_resume(dev_priv);
2345
2346         intel_gt_sanitize(dev_priv, true);
2347
2348         enable_rpm_wakeref_asserts(dev_priv);
2349
2350         return ret;
2351 }
2352
2353 static int i915_resume_switcheroo(struct drm_device *dev)
2354 {
2355         int ret;
2356
2357         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2358                 return 0;
2359
2360         ret = i915_drm_resume_early(dev);
2361         if (ret)
2362                 return ret;
2363
2364         return i915_drm_resume(dev);
2365 }
2366
2367 static int i915_pm_prepare(struct device *kdev)
2368 {
2369         struct pci_dev *pdev = to_pci_dev(kdev);
2370         struct drm_device *dev = pci_get_drvdata(pdev);
2371
2372         if (!dev) {
2373                 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2374                 return -ENODEV;
2375         }
2376
2377         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2378                 return 0;
2379
2380         return i915_drm_prepare(dev);
2381 }
2382
2383 static int i915_pm_suspend(struct device *kdev)
2384 {
2385         struct pci_dev *pdev = to_pci_dev(kdev);
2386         struct drm_device *dev = pci_get_drvdata(pdev);
2387
2388         if (!dev) {
2389                 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2390                 return -ENODEV;
2391         }
2392
2393         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2394                 return 0;
2395
2396         return i915_drm_suspend(dev);
2397 }
2398
2399 static int i915_pm_suspend_late(struct device *kdev)
2400 {
2401         struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2402
2403         /*
2404          * We have a suspend ordering issue with the snd-hda driver also
2405          * requiring our device to be power up. Due to the lack of a
2406          * parent/child relationship we currently solve this with an late
2407          * suspend hook.
2408          *
2409          * FIXME: This should be solved with a special hdmi sink device or
2410          * similar so that power domains can be employed.
2411          */
2412         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2413                 return 0;
2414
2415         return i915_drm_suspend_late(dev, false);
2416 }
2417
2418 static int i915_pm_poweroff_late(struct device *kdev)
2419 {
2420         struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2421
2422         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2423                 return 0;
2424
2425         return i915_drm_suspend_late(dev, true);
2426 }
2427
2428 static int i915_pm_resume_early(struct device *kdev)
2429 {
2430         struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2431
2432         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2433                 return 0;
2434
2435         return i915_drm_resume_early(dev);
2436 }
2437
2438 static int i915_pm_resume(struct device *kdev)
2439 {
2440         struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2441
2442         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2443                 return 0;
2444
2445         return i915_drm_resume(dev);
2446 }
2447
2448 /* freeze: before creating the hibernation_image */
2449 static int i915_pm_freeze(struct device *kdev)
2450 {
2451         struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2452         int ret;
2453
2454         if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
2455                 ret = i915_drm_suspend(dev);
2456                 if (ret)
2457                         return ret;
2458         }
2459
2460         ret = i915_gem_freeze(kdev_to_i915(kdev));
2461         if (ret)
2462                 return ret;
2463
2464         return 0;
2465 }
2466
2467 static int i915_pm_freeze_late(struct device *kdev)
2468 {
2469         struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2470         int ret;
2471
2472         if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
2473                 ret = i915_drm_suspend_late(dev, true);
2474                 if (ret)
2475                         return ret;
2476         }
2477
2478         ret = i915_gem_freeze_late(kdev_to_i915(kdev));
2479         if (ret)
2480                 return ret;
2481
2482         return 0;
2483 }
2484
2485 /* thaw: called after creating the hibernation image, but before turning off. */
2486 static int i915_pm_thaw_early(struct device *kdev)
2487 {
2488         return i915_pm_resume_early(kdev);
2489 }
2490
2491 static int i915_pm_thaw(struct device *kdev)
2492 {
2493         return i915_pm_resume(kdev);
2494 }
2495
2496 /* restore: called after loading the hibernation image. */
2497 static int i915_pm_restore_early(struct device *kdev)
2498 {
2499         return i915_pm_resume_early(kdev);
2500 }
2501
2502 static int i915_pm_restore(struct device *kdev)
2503 {
2504         return i915_pm_resume(kdev);
2505 }
2506
2507 /*
2508  * Save all Gunit registers that may be lost after a D3 and a subsequent
2509  * S0i[R123] transition. The list of registers needing a save/restore is
2510  * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
2511  * registers in the following way:
2512  * - Driver: saved/restored by the driver
2513  * - Punit : saved/restored by the Punit firmware
2514  * - No, w/o marking: no need to save/restore, since the register is R/O or
2515  *                    used internally by the HW in a way that doesn't depend
2516  *                    keeping the content across a suspend/resume.
2517  * - Debug : used for debugging
2518  *
2519  * We save/restore all registers marked with 'Driver', with the following
2520  * exceptions:
2521  * - Registers out of use, including also registers marked with 'Debug'.
2522  *   These have no effect on the driver's operation, so we don't save/restore
2523  *   them to reduce the overhead.
2524  * - Registers that are fully setup by an initialization function called from
2525  *   the resume path. For example many clock gating and RPS/RC6 registers.
2526  * - Registers that provide the right functionality with their reset defaults.
2527  *
2528  * TODO: Except for registers that based on the above 3 criteria can be safely
2529  * ignored, we save/restore all others, practically treating the HW context as
2530  * a black-box for the driver. Further investigation is needed to reduce the
2531  * saved/restored registers even further, by following the same 3 criteria.
2532  */
2533 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2534 {
2535         struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2536         int i;
2537
2538         /* GAM 0x4000-0x4770 */
2539         s->wr_watermark         = I915_READ(GEN7_WR_WATERMARK);
2540         s->gfx_prio_ctrl        = I915_READ(GEN7_GFX_PRIO_CTRL);
2541         s->arb_mode             = I915_READ(ARB_MODE);
2542         s->gfx_pend_tlb0        = I915_READ(GEN7_GFX_PEND_TLB0);
2543         s->gfx_pend_tlb1        = I915_READ(GEN7_GFX_PEND_TLB1);
2544
2545         for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2546                 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
2547
2548         s->media_max_req_count  = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
2549         s->gfx_max_req_count    = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
2550
2551         s->render_hwsp          = I915_READ(RENDER_HWS_PGA_GEN7);
2552         s->ecochk               = I915_READ(GAM_ECOCHK);
2553         s->bsd_hwsp             = I915_READ(BSD_HWS_PGA_GEN7);
2554         s->blt_hwsp             = I915_READ(BLT_HWS_PGA_GEN7);
2555
2556         s->tlb_rd_addr          = I915_READ(GEN7_TLB_RD_ADDR);
2557
2558         /* MBC 0x9024-0x91D0, 0x8500 */
2559         s->g3dctl               = I915_READ(VLV_G3DCTL);
2560         s->gsckgctl             = I915_READ(VLV_GSCKGCTL);
2561         s->mbctl                = I915_READ(GEN6_MBCTL);
2562
2563         /* GCP 0x9400-0x9424, 0x8100-0x810C */
2564         s->ucgctl1              = I915_READ(GEN6_UCGCTL1);
2565         s->ucgctl3              = I915_READ(GEN6_UCGCTL3);
2566         s->rcgctl1              = I915_READ(GEN6_RCGCTL1);
2567         s->rcgctl2              = I915_READ(GEN6_RCGCTL2);
2568         s->rstctl               = I915_READ(GEN6_RSTCTL);
2569         s->misccpctl            = I915_READ(GEN7_MISCCPCTL);
2570
2571         /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2572         s->gfxpause             = I915_READ(GEN6_GFXPAUSE);
2573         s->rpdeuhwtc            = I915_READ(GEN6_RPDEUHWTC);
2574         s->rpdeuc               = I915_READ(GEN6_RPDEUC);
2575         s->ecobus               = I915_READ(ECOBUS);
2576         s->pwrdwnupctl          = I915_READ(VLV_PWRDWNUPCTL);
2577         s->rp_down_timeout      = I915_READ(GEN6_RP_DOWN_TIMEOUT);
2578         s->rp_deucsw            = I915_READ(GEN6_RPDEUCSW);
2579         s->rcubmabdtmr          = I915_READ(GEN6_RCUBMABDTMR);
2580         s->rcedata              = I915_READ(VLV_RCEDATA);
2581         s->spare2gh             = I915_READ(VLV_SPAREG2H);
2582
2583         /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2584         s->gt_imr               = I915_READ(GTIMR);
2585         s->gt_ier               = I915_READ(GTIER);
2586         s->pm_imr               = I915_READ(GEN6_PMIMR);
2587         s->pm_ier               = I915_READ(GEN6_PMIER);
2588
2589         for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2590                 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
2591
2592         /* GT SA CZ domain, 0x100000-0x138124 */
2593         s->tilectl              = I915_READ(TILECTL);
2594         s->gt_fifoctl           = I915_READ(GTFIFOCTL);
2595         s->gtlc_wake_ctrl       = I915_READ(VLV_GTLC_WAKE_CTRL);
2596         s->gtlc_survive         = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2597         s->pmwgicz              = I915_READ(VLV_PMWGICZ);
2598
2599         /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2600         s->gu_ctl0              = I915_READ(VLV_GU_CTL0);
2601         s->gu_ctl1              = I915_READ(VLV_GU_CTL1);
2602         s->pcbr                 = I915_READ(VLV_PCBR);
2603         s->clock_gate_dis2      = I915_READ(VLV_GUNIT_CLOCK_GATE2);
2604
2605         /*
2606          * Not saving any of:
2607          * DFT,         0x9800-0x9EC0
2608          * SARB,        0xB000-0xB1FC
2609          * GAC,         0x5208-0x524C, 0x14000-0x14C000
2610          * PCI CFG
2611          */
2612 }
2613
2614 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2615 {
2616         struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2617         u32 val;
2618         int i;
2619
2620         /* GAM 0x4000-0x4770 */
2621         I915_WRITE(GEN7_WR_WATERMARK,   s->wr_watermark);
2622         I915_WRITE(GEN7_GFX_PRIO_CTRL,  s->gfx_prio_ctrl);
2623         I915_WRITE(ARB_MODE,            s->arb_mode | (0xffff << 16));
2624         I915_WRITE(GEN7_GFX_PEND_TLB0,  s->gfx_pend_tlb0);
2625         I915_WRITE(GEN7_GFX_PEND_TLB1,  s->gfx_pend_tlb1);
2626
2627         for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2628                 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
2629
2630         I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
2631         I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
2632
2633         I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
2634         I915_WRITE(GAM_ECOCHK,          s->ecochk);
2635         I915_WRITE(BSD_HWS_PGA_GEN7,    s->bsd_hwsp);
2636         I915_WRITE(BLT_HWS_PGA_GEN7,    s->blt_hwsp);
2637
2638         I915_WRITE(GEN7_TLB_RD_ADDR,    s->tlb_rd_addr);
2639
2640         /* MBC 0x9024-0x91D0, 0x8500 */
2641         I915_WRITE(VLV_G3DCTL,          s->g3dctl);
2642         I915_WRITE(VLV_GSCKGCTL,        s->gsckgctl);
2643         I915_WRITE(GEN6_MBCTL,          s->mbctl);
2644
2645         /* GCP 0x9400-0x9424, 0x8100-0x810C */
2646         I915_WRITE(GEN6_UCGCTL1,        s->ucgctl1);
2647         I915_WRITE(GEN6_UCGCTL3,        s->ucgctl3);
2648         I915_WRITE(GEN6_RCGCTL1,        s->rcgctl1);
2649         I915_WRITE(GEN6_RCGCTL2,        s->rcgctl2);
2650         I915_WRITE(GEN6_RSTCTL,         s->rstctl);
2651         I915_WRITE(GEN7_MISCCPCTL,      s->misccpctl);
2652
2653         /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2654         I915_WRITE(GEN6_GFXPAUSE,       s->gfxpause);
2655         I915_WRITE(GEN6_RPDEUHWTC,      s->rpdeuhwtc);
2656         I915_WRITE(GEN6_RPDEUC,         s->rpdeuc);
2657         I915_WRITE(ECOBUS,              s->ecobus);
2658         I915_WRITE(VLV_PWRDWNUPCTL,     s->pwrdwnupctl);
2659         I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
2660         I915_WRITE(GEN6_RPDEUCSW,       s->rp_deucsw);
2661         I915_WRITE(GEN6_RCUBMABDTMR,    s->rcubmabdtmr);
2662         I915_WRITE(VLV_RCEDATA,         s->rcedata);
2663         I915_WRITE(VLV_SPAREG2H,        s->spare2gh);
2664
2665         /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2666         I915_WRITE(GTIMR,               s->gt_imr);
2667         I915_WRITE(GTIER,               s->gt_ier);
2668         I915_WRITE(GEN6_PMIMR,          s->pm_imr);
2669         I915_WRITE(GEN6_PMIER,          s->pm_ier);
2670
2671         for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2672                 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
2673
2674         /* GT SA CZ domain, 0x100000-0x138124 */
2675         I915_WRITE(TILECTL,                     s->tilectl);
2676         I915_WRITE(GTFIFOCTL,                   s->gt_fifoctl);
2677         /*
2678          * Preserve the GT allow wake and GFX force clock bit, they are not
2679          * be restored, as they are used to control the s0ix suspend/resume
2680          * sequence by the caller.
2681          */
2682         val = I915_READ(VLV_GTLC_WAKE_CTRL);
2683         val &= VLV_GTLC_ALLOWWAKEREQ;
2684         val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
2685         I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2686
2687         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2688         val &= VLV_GFX_CLK_FORCE_ON_BIT;
2689         val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
2690         I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2691
2692         I915_WRITE(VLV_PMWGICZ,                 s->pmwgicz);
2693
2694         /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2695         I915_WRITE(VLV_GU_CTL0,                 s->gu_ctl0);
2696         I915_WRITE(VLV_GU_CTL1,                 s->gu_ctl1);
2697         I915_WRITE(VLV_PCBR,                    s->pcbr);
2698         I915_WRITE(VLV_GUNIT_CLOCK_GATE2,       s->clock_gate_dis2);
2699 }
2700
2701 static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
2702                                   u32 mask, u32 val)
2703 {
2704         i915_reg_t reg = VLV_GTLC_PW_STATUS;
2705         u32 reg_value;
2706         int ret;
2707
2708         /* The HW does not like us polling for PW_STATUS frequently, so
2709          * use the sleeping loop rather than risk the busy spin within
2710          * intel_wait_for_register().
2711          *
2712          * Transitioning between RC6 states should be at most 2ms (see
2713          * valleyview_enable_rps) so use a 3ms timeout.
2714          */
2715         ret = wait_for(((reg_value = I915_READ_NOTRACE(reg)) & mask) == val, 3);
2716
2717         /* just trace the final value */
2718         trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2719
2720         return ret;
2721 }
2722
2723 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2724 {
2725         u32 val;
2726         int err;
2727
2728         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2729         val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
2730         if (force_on)
2731                 val |= VLV_GFX_CLK_FORCE_ON_BIT;
2732         I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2733
2734         if (!force_on)
2735                 return 0;
2736
2737         err = intel_wait_for_register(&dev_priv->uncore,
2738                                       VLV_GTLC_SURVIVABILITY_REG,
2739                                       VLV_GFX_CLK_STATUS_BIT,
2740                                       VLV_GFX_CLK_STATUS_BIT,
2741                                       20);
2742         if (err)
2743                 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
2744                           I915_READ(VLV_GTLC_SURVIVABILITY_REG));
2745
2746         return err;
2747 }
2748
2749 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2750 {
2751         u32 mask;
2752         u32 val;
2753         int err;
2754
2755         val = I915_READ(VLV_GTLC_WAKE_CTRL);
2756         val &= ~VLV_GTLC_ALLOWWAKEREQ;
2757         if (allow)
2758                 val |= VLV_GTLC_ALLOWWAKEREQ;
2759         I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2760         POSTING_READ(VLV_GTLC_WAKE_CTRL);
2761
2762         mask = VLV_GTLC_ALLOWWAKEACK;
2763         val = allow ? mask : 0;
2764
2765         err = vlv_wait_for_pw_status(dev_priv, mask, val);
2766         if (err)
2767                 DRM_ERROR("timeout disabling GT waking\n");
2768
2769         return err;
2770 }
2771
2772 static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
2773                                   bool wait_for_on)
2774 {
2775         u32 mask;
2776         u32 val;
2777
2778         mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
2779         val = wait_for_on ? mask : 0;
2780
2781         /*
2782          * RC6 transitioning can be delayed up to 2 msec (see
2783          * valleyview_enable_rps), use 3 msec for safety.
2784          *
2785          * This can fail to turn off the rc6 if the GPU is stuck after a failed
2786          * reset and we are trying to force the machine to sleep.
2787          */
2788         if (vlv_wait_for_pw_status(dev_priv, mask, val))
2789                 DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
2790                                  onoff(wait_for_on));
2791 }
2792
2793 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
2794 {
2795         if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
2796                 return;
2797
2798         DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
2799         I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
2800 }
2801
2802 static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
2803 {
2804         u32 mask;
2805         int err;
2806
2807         /*
2808          * Bspec defines the following GT well on flags as debug only, so
2809          * don't treat them as hard failures.
2810          */
2811         vlv_wait_for_gt_wells(dev_priv, false);
2812
2813         mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2814         WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
2815
2816         vlv_check_no_gt_access(dev_priv);
2817
2818         err = vlv_force_gfx_clock(dev_priv, true);
2819         if (err)
2820                 goto err1;
2821
2822         err = vlv_allow_gt_wake(dev_priv, false);
2823         if (err)
2824                 goto err2;
2825
2826         if (!IS_CHERRYVIEW(dev_priv))
2827                 vlv_save_gunit_s0ix_state(dev_priv);
2828
2829         err = vlv_force_gfx_clock(dev_priv, false);
2830         if (err)
2831                 goto err2;
2832
2833         return 0;
2834
2835 err2:
2836         /* For safety always re-enable waking and disable gfx clock forcing */
2837         vlv_allow_gt_wake(dev_priv, true);
2838 err1:
2839         vlv_force_gfx_clock(dev_priv, false);
2840
2841         return err;
2842 }
2843
2844 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2845                                 bool rpm_resume)
2846 {
2847         int err;
2848         int ret;
2849
2850         /*
2851          * If any of the steps fail just try to continue, that's the best we
2852          * can do at this point. Return the first error code (which will also
2853          * leave RPM permanently disabled).
2854          */
2855         ret = vlv_force_gfx_clock(dev_priv, true);
2856
2857         if (!IS_CHERRYVIEW(dev_priv))
2858                 vlv_restore_gunit_s0ix_state(dev_priv);
2859
2860         err = vlv_allow_gt_wake(dev_priv, true);
2861         if (!ret)
2862                 ret = err;
2863
2864         err = vlv_force_gfx_clock(dev_priv, false);
2865         if (!ret)
2866                 ret = err;
2867
2868         vlv_check_no_gt_access(dev_priv);
2869
2870         if (rpm_resume)
2871                 intel_init_clock_gating(dev_priv);
2872
2873         return ret;
2874 }
2875
2876 static int intel_runtime_suspend(struct device *kdev)
2877 {
2878         struct pci_dev *pdev = to_pci_dev(kdev);
2879         struct drm_device *dev = pci_get_drvdata(pdev);
2880         struct drm_i915_private *dev_priv = to_i915(dev);
2881         int ret;
2882
2883         if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
2884                 return -ENODEV;
2885
2886         if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2887                 return -ENODEV;
2888
2889         DRM_DEBUG_KMS("Suspending device\n");
2890
2891         disable_rpm_wakeref_asserts(dev_priv);
2892
2893         /*
2894          * We are safe here against re-faults, since the fault handler takes
2895          * an RPM reference.
2896          */
2897         i915_gem_runtime_suspend(dev_priv);
2898
2899         intel_uc_runtime_suspend(dev_priv);
2900
2901         intel_runtime_pm_disable_interrupts(dev_priv);
2902
2903         intel_uncore_suspend(&dev_priv->uncore);
2904
2905         ret = 0;
2906         if (INTEL_GEN(dev_priv) >= 11) {
2907                 icl_display_core_uninit(dev_priv);
2908                 bxt_enable_dc9(dev_priv);
2909         } else if (IS_GEN9_LP(dev_priv)) {
2910                 bxt_display_core_uninit(dev_priv);
2911                 bxt_enable_dc9(dev_priv);
2912         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2913                 hsw_enable_pc8(dev_priv);
2914         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2915                 ret = vlv_suspend_complete(dev_priv);
2916         }
2917
2918         if (ret) {
2919                 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
2920                 intel_uncore_runtime_resume(&dev_priv->uncore);
2921
2922                 intel_runtime_pm_enable_interrupts(dev_priv);
2923
2924                 intel_uc_resume(dev_priv);
2925
2926                 i915_gem_init_swizzling(dev_priv);
2927                 i915_gem_restore_fences(dev_priv);
2928
2929                 enable_rpm_wakeref_asserts(dev_priv);
2930
2931                 return ret;
2932         }
2933
2934         enable_rpm_wakeref_asserts(dev_priv);
2935         intel_runtime_pm_cleanup(dev_priv);
2936
2937         if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
2938                 DRM_ERROR("Unclaimed access detected prior to suspending\n");
2939
2940         dev_priv->runtime_pm.suspended = true;
2941
2942         /*
2943          * FIXME: We really should find a document that references the arguments
2944          * used below!
2945          */
2946         if (IS_BROADWELL(dev_priv)) {
2947                 /*
2948                  * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
2949                  * being detected, and the call we do at intel_runtime_resume()
2950                  * won't be able to restore them. Since PCI_D3hot matches the
2951                  * actual specification and appears to be working, use it.
2952                  */
2953                 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
2954         } else {
2955                 /*
2956                  * current versions of firmware which depend on this opregion
2957                  * notification have repurposed the D1 definition to mean
2958                  * "runtime suspended" vs. what you would normally expect (D3)
2959                  * to distinguish it from notifications that might be sent via
2960                  * the suspend path.
2961                  */
2962                 intel_opregion_notify_adapter(dev_priv, PCI_D1);
2963         }
2964
2965         assert_forcewakes_inactive(&dev_priv->uncore);
2966
2967         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2968                 intel_hpd_poll_init(dev_priv);
2969
2970         DRM_DEBUG_KMS("Device suspended\n");
2971         return 0;
2972 }
2973
2974 static int intel_runtime_resume(struct device *kdev)
2975 {
2976         struct pci_dev *pdev = to_pci_dev(kdev);
2977         struct drm_device *dev = pci_get_drvdata(pdev);
2978         struct drm_i915_private *dev_priv = to_i915(dev);
2979         int ret = 0;
2980
2981         if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2982                 return -ENODEV;
2983
2984         DRM_DEBUG_KMS("Resuming device\n");
2985
2986         WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
2987         disable_rpm_wakeref_asserts(dev_priv);
2988
2989         intel_opregion_notify_adapter(dev_priv, PCI_D0);
2990         dev_priv->runtime_pm.suspended = false;
2991         if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
2992                 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
2993
2994         if (INTEL_GEN(dev_priv) >= 11) {
2995                 bxt_disable_dc9(dev_priv);
2996                 icl_display_core_init(dev_priv, true);
2997                 if (dev_priv->csr.dmc_payload) {
2998                         if (dev_priv->csr.allowed_dc_mask &
2999                             DC_STATE_EN_UPTO_DC6)
3000                                 skl_enable_dc6(dev_priv);
3001                         else if (dev_priv->csr.allowed_dc_mask &
3002                                  DC_STATE_EN_UPTO_DC5)
3003                                 gen9_enable_dc5(dev_priv);
3004                 }
3005         } else if (IS_GEN9_LP(dev_priv)) {
3006                 bxt_disable_dc9(dev_priv);
3007                 bxt_display_core_init(dev_priv, true);
3008                 if (dev_priv->csr.dmc_payload &&
3009                     (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
3010                         gen9_enable_dc5(dev_priv);
3011         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3012                 hsw_disable_pc8(dev_priv);
3013         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3014                 ret = vlv_resume_prepare(dev_priv, true);
3015         }
3016
3017         intel_uncore_runtime_resume(&dev_priv->uncore);
3018
3019         intel_runtime_pm_enable_interrupts(dev_priv);
3020
3021         intel_uc_resume(dev_priv);
3022
3023         /*
3024          * No point of rolling back things in case of an error, as the best
3025          * we can do is to hope that things will still work (and disable RPM).
3026          */
3027         i915_gem_init_swizzling(dev_priv);
3028         i915_gem_restore_fences(dev_priv);
3029
3030         /*
3031          * On VLV/CHV display interrupts are part of the display
3032          * power well, so hpd is reinitialized from there. For
3033          * everyone else do it here.
3034          */
3035         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
3036                 intel_hpd_init(dev_priv);
3037
3038         intel_enable_ipc(dev_priv);
3039
3040         enable_rpm_wakeref_asserts(dev_priv);
3041
3042         if (ret)
3043                 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
3044         else
3045                 DRM_DEBUG_KMS("Device resumed\n");
3046
3047         return ret;
3048 }
3049
3050 const struct dev_pm_ops i915_pm_ops = {
3051         /*
3052          * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
3053          * PMSG_RESUME]
3054          */
3055         .prepare = i915_pm_prepare,
3056         .suspend = i915_pm_suspend,
3057         .suspend_late = i915_pm_suspend_late,
3058         .resume_early = i915_pm_resume_early,
3059         .resume = i915_pm_resume,
3060
3061         /*
3062          * S4 event handlers
3063          * @freeze, @freeze_late    : called (1) before creating the
3064          *                            hibernation image [PMSG_FREEZE] and
3065          *                            (2) after rebooting, before restoring
3066          *                            the image [PMSG_QUIESCE]
3067          * @thaw, @thaw_early       : called (1) after creating the hibernation
3068          *                            image, before writing it [PMSG_THAW]
3069          *                            and (2) after failing to create or
3070          *                            restore the image [PMSG_RECOVER]
3071          * @poweroff, @poweroff_late: called after writing the hibernation
3072          *                            image, before rebooting [PMSG_HIBERNATE]
3073          * @restore, @restore_early : called after rebooting and restoring the
3074          *                            hibernation image [PMSG_RESTORE]
3075          */
3076         .freeze = i915_pm_freeze,
3077         .freeze_late = i915_pm_freeze_late,
3078         .thaw_early = i915_pm_thaw_early,
3079         .thaw = i915_pm_thaw,
3080         .poweroff = i915_pm_suspend,
3081         .poweroff_late = i915_pm_poweroff_late,
3082         .restore_early = i915_pm_restore_early,
3083         .restore = i915_pm_restore,
3084
3085         /* S0ix (via runtime suspend) event handlers */
3086         .runtime_suspend = intel_runtime_suspend,
3087         .runtime_resume = intel_runtime_resume,
3088 };
3089
3090 static const struct vm_operations_struct i915_gem_vm_ops = {
3091         .fault = i915_gem_fault,
3092         .open = drm_gem_vm_open,
3093         .close = drm_gem_vm_close,
3094 };
3095
3096 static const struct file_operations i915_driver_fops = {
3097         .owner = THIS_MODULE,
3098         .open = drm_open,
3099         .release = drm_release,
3100         .unlocked_ioctl = drm_ioctl,
3101         .mmap = drm_gem_mmap,
3102         .poll = drm_poll,
3103         .read = drm_read,
3104         .compat_ioctl = i915_compat_ioctl,
3105         .llseek = noop_llseek,
3106 };
3107
3108 static int
3109 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
3110                           struct drm_file *file)
3111 {
3112         return -ENODEV;
3113 }
3114
3115 static const struct drm_ioctl_desc i915_ioctls[] = {
3116         DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3117         DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
3118         DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
3119         DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
3120         DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
3121         DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
3122         DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
3123         DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3124         DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
3125         DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
3126         DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3127         DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
3128         DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3129         DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3130         DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
3131         DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
3132         DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3133         DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3134         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
3135         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
3136         DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
3137         DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
3138         DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
3139         DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
3140         DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
3141         DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
3142         DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3143         DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3144         DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
3145         DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
3146         DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
3147         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
3148         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
3149         DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
3150         DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
3151         DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
3152         DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
3153         DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
3154         DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
3155         DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
3156         DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
3157         DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
3158         DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
3159         DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
3160         DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
3161         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
3162         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
3163         DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
3164         DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
3165         DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
3166         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
3167         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
3168         DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
3169         DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3170         DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3171         DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3172         DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
3173         DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
3174 };
3175
3176 static struct drm_driver driver = {
3177         /* Don't use MTRRs here; the Xserver or userspace app should
3178          * deal with them for Intel hardware.
3179          */
3180         .driver_features =
3181             DRIVER_GEM | DRIVER_PRIME |
3182             DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
3183         .release = i915_driver_release,
3184         .open = i915_driver_open,
3185         .lastclose = i915_driver_lastclose,
3186         .postclose = i915_driver_postclose,
3187
3188         .gem_close_object = i915_gem_close_object,
3189         .gem_free_object_unlocked = i915_gem_free_object,
3190         .gem_vm_ops = &i915_gem_vm_ops,
3191
3192         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
3193         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
3194         .gem_prime_export = i915_gem_prime_export,
3195         .gem_prime_import = i915_gem_prime_import,
3196
3197         .dumb_create = i915_gem_dumb_create,
3198         .dumb_map_offset = i915_gem_mmap_gtt,
3199         .ioctls = i915_ioctls,
3200         .num_ioctls = ARRAY_SIZE(i915_ioctls),
3201         .fops = &i915_driver_fops,
3202         .name = DRIVER_NAME,
3203         .desc = DRIVER_DESC,
3204         .date = DRIVER_DATE,
3205         .major = DRIVER_MAJOR,
3206         .minor = DRIVER_MINOR,
3207         .patchlevel = DRIVER_PATCHLEVEL,
3208 };
3209
3210 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3211 #include "selftests/mock_drm.c"
3212 #endif