Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- |
2 | */ | |
0d6aa60b | 3 | /* |
bc54fd1a | 4 | * |
1da177e4 LT |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
6 | * All Rights Reserved. | |
bc54fd1a DA |
7 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | |
9 | * copy of this software and associated documentation files (the | |
10 | * "Software"), to deal in the Software without restriction, including | |
11 | * without limitation the rights to use, copy, modify, merge, publish, | |
12 | * distribute, sub license, and/or sell copies of the Software, and to | |
13 | * permit persons to whom the Software is furnished to do so, subject to | |
14 | * the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the | |
17 | * next paragraph) shall be included in all copies or substantial portions | |
18 | * of the Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
27 | * | |
0d6aa60b | 28 | */ |
1da177e4 | 29 | |
e5747e3a | 30 | #include <linux/acpi.h> |
0673ad47 CW |
31 | #include <linux/device.h> |
32 | #include <linux/oom.h> | |
e0cd3608 | 33 | #include <linux/module.h> |
0673ad47 CW |
34 | #include <linux/pci.h> |
35 | #include <linux/pm.h> | |
d6102977 | 36 | #include <linux/pm_runtime.h> |
0673ad47 CW |
37 | #include <linux/pnp.h> |
38 | #include <linux/slab.h> | |
39 | #include <linux/vgaarb.h> | |
704ab614 | 40 | #include <linux/vga_switcheroo.h> |
0673ad47 CW |
41 | #include <linux/vt.h> |
42 | #include <acpi/video.h> | |
43 | ||
44 | #include <drm/drmP.h> | |
760285e7 | 45 | #include <drm/drm_crtc_helper.h> |
a667fb40 | 46 | #include <drm/drm_atomic_helper.h> |
0673ad47 CW |
47 | #include <drm/i915_drm.h> |
48 | ||
49 | #include "i915_drv.h" | |
50 | #include "i915_trace.h" | |
b46a33e2 | 51 | #include "i915_pmu.h" |
0673ad47 CW |
52 | #include "i915_vgpu.h" |
53 | #include "intel_drv.h" | |
5464cd65 | 54 | #include "intel_uc.h" |
79e53945 | 55 | |
112b715e KH |
56 | static struct drm_driver driver; |
57 | ||
fae919f0 | 58 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) |
0673ad47 CW |
59 | static unsigned int i915_load_fail_count; |
60 | ||
61 | bool __i915_inject_load_failure(const char *func, int line) | |
62 | { | |
4f044a88 | 63 | if (i915_load_fail_count >= i915_modparams.inject_load_failure) |
0673ad47 CW |
64 | return false; |
65 | ||
4f044a88 | 66 | if (++i915_load_fail_count == i915_modparams.inject_load_failure) { |
0673ad47 | 67 | DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", |
4f044a88 | 68 | i915_modparams.inject_load_failure, func, line); |
0673ad47 CW |
69 | return true; |
70 | } | |
71 | ||
72 | return false; | |
73 | } | |
fae919f0 | 74 | #endif |
0673ad47 CW |
75 | |
76 | #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" | |
77 | #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ | |
78 | "providing the dmesg log by booting with drm.debug=0xf" | |
79 | ||
80 | void | |
81 | __i915_printk(struct drm_i915_private *dev_priv, const char *level, | |
82 | const char *fmt, ...) | |
83 | { | |
84 | static bool shown_bug_once; | |
c49d13ee | 85 | struct device *kdev = dev_priv->drm.dev; |
0673ad47 CW |
86 | bool is_error = level[1] <= KERN_ERR[1]; |
87 | bool is_debug = level[1] == KERN_DEBUG[1]; | |
88 | struct va_format vaf; | |
89 | va_list args; | |
90 | ||
91 | if (is_debug && !(drm_debug & DRM_UT_DRIVER)) | |
92 | return; | |
93 | ||
94 | va_start(args, fmt); | |
95 | ||
96 | vaf.fmt = fmt; | |
97 | vaf.va = &args; | |
98 | ||
c49d13ee | 99 | dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV", |
0673ad47 CW |
100 | __builtin_return_address(0), &vaf); |
101 | ||
102 | if (is_error && !shown_bug_once) { | |
c49d13ee | 103 | dev_notice(kdev, "%s", FDO_BUG_MSG); |
0673ad47 CW |
104 | shown_bug_once = true; |
105 | } | |
106 | ||
107 | va_end(args); | |
108 | } | |
109 | ||
110 | static bool i915_error_injected(struct drm_i915_private *dev_priv) | |
111 | { | |
fae919f0 | 112 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) |
4f044a88 MW |
113 | return i915_modparams.inject_load_failure && |
114 | i915_load_fail_count == i915_modparams.inject_load_failure; | |
fae919f0 MW |
115 | #else |
116 | return false; | |
117 | #endif | |
0673ad47 CW |
118 | } |
119 | ||
120 | #define i915_load_error(dev_priv, fmt, ...) \ | |
121 | __i915_printk(dev_priv, \ | |
122 | i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ | |
123 | fmt, ##__VA_ARGS__) | |
124 | ||
125 | ||
fd6b8f43 | 126 | static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) |
0673ad47 CW |
127 | { |
128 | enum intel_pch ret = PCH_NOP; | |
129 | ||
130 | /* | |
131 | * In a virtualized passthrough environment we can be in a | |
132 | * setup where the ISA bridge is not able to be passed through. | |
133 | * In this case, a south bridge can be emulated and we have to | |
134 | * make an educated guess as to which PCH is really there. | |
135 | */ | |
136 | ||
fd6b8f43 | 137 | if (IS_GEN5(dev_priv)) { |
0673ad47 CW |
138 | ret = PCH_IBX; |
139 | DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); | |
fd6b8f43 | 140 | } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) { |
0673ad47 | 141 | ret = PCH_CPT; |
aa032130 | 142 | DRM_DEBUG_KMS("Assuming CougarPoint PCH\n"); |
fd6b8f43 | 143 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
0673ad47 | 144 | ret = PCH_LPT; |
817aef5d XZ |
145 | if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) |
146 | dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; | |
147 | else | |
148 | dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE; | |
0673ad47 | 149 | DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); |
fd6b8f43 | 150 | } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { |
0673ad47 CW |
151 | ret = PCH_SPT; |
152 | DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); | |
80937819 | 153 | } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { |
acf1dba6 | 154 | ret = PCH_CNP; |
80937819 | 155 | DRM_DEBUG_KMS("Assuming CannonPoint PCH\n"); |
0673ad47 CW |
156 | } |
157 | ||
158 | return ret; | |
159 | } | |
160 | ||
da5f53bf | 161 | static void intel_detect_pch(struct drm_i915_private *dev_priv) |
0673ad47 | 162 | { |
0673ad47 CW |
163 | struct pci_dev *pch = NULL; |
164 | ||
165 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting | |
166 | * (which really amounts to a PCH but no South Display). | |
167 | */ | |
b7f05d4a | 168 | if (INTEL_INFO(dev_priv)->num_pipes == 0) { |
0673ad47 CW |
169 | dev_priv->pch_type = PCH_NOP; |
170 | return; | |
171 | } | |
172 | ||
173 | /* | |
174 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to | |
175 | * make graphics device passthrough work easy for VMM, that only | |
176 | * need to expose ISA bridge to let driver know the real hardware | |
177 | * underneath. This is a requirement from virtualization team. | |
178 | * | |
179 | * In some virtualized environments (e.g. XEN), there is irrelevant | |
180 | * ISA bridge in the system. To work reliably, we should scan trhough | |
181 | * all the ISA bridge devices and check for the first match, instead | |
182 | * of only checking the first one. | |
183 | */ | |
184 | while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { | |
d67c0ac1 JN |
185 | unsigned short id; |
186 | ||
187 | if (pch->vendor != PCI_VENDOR_ID_INTEL) | |
188 | continue; | |
189 | ||
190 | id = pch->device & INTEL_PCH_DEVICE_ID_MASK; | |
191 | ||
192 | dev_priv->pch_id = id; | |
193 | ||
194 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { | |
195 | dev_priv->pch_type = PCH_IBX; | |
196 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); | |
197 | WARN_ON(!IS_GEN5(dev_priv)); | |
198 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { | |
199 | dev_priv->pch_type = PCH_CPT; | |
200 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); | |
201 | WARN_ON(!IS_GEN6(dev_priv) && | |
202 | !IS_IVYBRIDGE(dev_priv)); | |
203 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { | |
204 | /* PantherPoint is CPT compatible */ | |
205 | dev_priv->pch_type = PCH_CPT; | |
206 | DRM_DEBUG_KMS("Found PantherPoint PCH\n"); | |
207 | WARN_ON(!IS_GEN6(dev_priv) && | |
208 | !IS_IVYBRIDGE(dev_priv)); | |
209 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { | |
210 | dev_priv->pch_type = PCH_LPT; | |
211 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); | |
212 | WARN_ON(!IS_HASWELL(dev_priv) && | |
213 | !IS_BROADWELL(dev_priv)); | |
214 | WARN_ON(IS_HSW_ULT(dev_priv) || | |
215 | IS_BDW_ULT(dev_priv)); | |
216 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | |
217 | dev_priv->pch_type = PCH_LPT; | |
218 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); | |
219 | WARN_ON(!IS_HASWELL(dev_priv) && | |
220 | !IS_BROADWELL(dev_priv)); | |
221 | WARN_ON(!IS_HSW_ULT(dev_priv) && | |
222 | !IS_BDW_ULT(dev_priv)); | |
223 | } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) { | |
224 | /* WildcatPoint is LPT compatible */ | |
225 | dev_priv->pch_type = PCH_LPT; | |
226 | DRM_DEBUG_KMS("Found WildcatPoint PCH\n"); | |
227 | WARN_ON(!IS_HASWELL(dev_priv) && | |
228 | !IS_BROADWELL(dev_priv)); | |
229 | WARN_ON(IS_HSW_ULT(dev_priv) || | |
230 | IS_BDW_ULT(dev_priv)); | |
231 | } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) { | |
232 | /* WildcatPoint is LPT compatible */ | |
233 | dev_priv->pch_type = PCH_LPT; | |
234 | DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n"); | |
235 | WARN_ON(!IS_HASWELL(dev_priv) && | |
236 | !IS_BROADWELL(dev_priv)); | |
237 | WARN_ON(!IS_HSW_ULT(dev_priv) && | |
238 | !IS_BDW_ULT(dev_priv)); | |
239 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { | |
240 | dev_priv->pch_type = PCH_SPT; | |
241 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); | |
242 | WARN_ON(!IS_SKYLAKE(dev_priv) && | |
243 | !IS_KABYLAKE(dev_priv)); | |
244 | } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { | |
245 | dev_priv->pch_type = PCH_SPT; | |
246 | DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); | |
247 | WARN_ON(!IS_SKYLAKE(dev_priv) && | |
248 | !IS_KABYLAKE(dev_priv)); | |
249 | } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { | |
250 | dev_priv->pch_type = PCH_KBP; | |
251 | DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n"); | |
252 | WARN_ON(!IS_SKYLAKE(dev_priv) && | |
253 | !IS_KABYLAKE(dev_priv) && | |
254 | !IS_COFFEELAKE(dev_priv)); | |
255 | } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) { | |
256 | dev_priv->pch_type = PCH_CNP; | |
257 | DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n"); | |
258 | WARN_ON(!IS_CANNONLAKE(dev_priv) && | |
259 | !IS_COFFEELAKE(dev_priv)); | |
260 | } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) { | |
261 | dev_priv->pch_type = PCH_CNP; | |
262 | DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n"); | |
263 | WARN_ON(!IS_CANNONLAKE(dev_priv) && | |
264 | !IS_COFFEELAKE(dev_priv)); | |
265 | } else if (id == INTEL_PCH_ICP_DEVICE_ID_TYPE) { | |
266 | dev_priv->pch_type = PCH_ICP; | |
267 | DRM_DEBUG_KMS("Found Ice Lake PCH\n"); | |
268 | WARN_ON(!IS_ICELAKE(dev_priv)); | |
269 | } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE || | |
270 | id == INTEL_PCH_P3X_DEVICE_ID_TYPE || | |
271 | (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE && | |
272 | pch->subsystem_vendor == | |
273 | PCI_SUBVENDOR_ID_REDHAT_QUMRANET && | |
274 | pch->subsystem_device == | |
275 | PCI_SUBDEVICE_ID_QEMU)) { | |
276 | dev_priv->pch_type = intel_virt_detect_pch(dev_priv); | |
277 | } else { | |
278 | continue; | |
0673ad47 | 279 | } |
d67c0ac1 JN |
280 | |
281 | break; | |
0673ad47 CW |
282 | } |
283 | if (!pch) | |
284 | DRM_DEBUG_KMS("No PCH found.\n"); | |
285 | ||
286 | pci_dev_put(pch); | |
287 | } | |
288 | ||
0673ad47 CW |
289 | static int i915_getparam(struct drm_device *dev, void *data, |
290 | struct drm_file *file_priv) | |
291 | { | |
fac5e23e | 292 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 293 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
294 | drm_i915_getparam_t *param = data; |
295 | int value; | |
296 | ||
297 | switch (param->param) { | |
298 | case I915_PARAM_IRQ_ACTIVE: | |
299 | case I915_PARAM_ALLOW_BATCHBUFFER: | |
300 | case I915_PARAM_LAST_DISPATCH: | |
ef0f411f | 301 | case I915_PARAM_HAS_EXEC_CONSTANTS: |
0673ad47 CW |
302 | /* Reject all old ums/dri params. */ |
303 | return -ENODEV; | |
304 | case I915_PARAM_CHIPSET_ID: | |
52a05c30 | 305 | value = pdev->device; |
0673ad47 CW |
306 | break; |
307 | case I915_PARAM_REVISION: | |
52a05c30 | 308 | value = pdev->revision; |
0673ad47 | 309 | break; |
0673ad47 CW |
310 | case I915_PARAM_NUM_FENCES_AVAIL: |
311 | value = dev_priv->num_fence_regs; | |
312 | break; | |
313 | case I915_PARAM_HAS_OVERLAY: | |
314 | value = dev_priv->overlay ? 1 : 0; | |
315 | break; | |
0673ad47 | 316 | case I915_PARAM_HAS_BSD: |
3b3f1650 | 317 | value = !!dev_priv->engine[VCS]; |
0673ad47 CW |
318 | break; |
319 | case I915_PARAM_HAS_BLT: | |
3b3f1650 | 320 | value = !!dev_priv->engine[BCS]; |
0673ad47 CW |
321 | break; |
322 | case I915_PARAM_HAS_VEBOX: | |
3b3f1650 | 323 | value = !!dev_priv->engine[VECS]; |
0673ad47 CW |
324 | break; |
325 | case I915_PARAM_HAS_BSD2: | |
3b3f1650 | 326 | value = !!dev_priv->engine[VCS2]; |
0673ad47 | 327 | break; |
0673ad47 | 328 | case I915_PARAM_HAS_LLC: |
16162470 | 329 | value = HAS_LLC(dev_priv); |
0673ad47 CW |
330 | break; |
331 | case I915_PARAM_HAS_WT: | |
16162470 | 332 | value = HAS_WT(dev_priv); |
0673ad47 CW |
333 | break; |
334 | case I915_PARAM_HAS_ALIASING_PPGTT: | |
16162470 | 335 | value = USES_PPGTT(dev_priv); |
0673ad47 CW |
336 | break; |
337 | case I915_PARAM_HAS_SEMAPHORES: | |
93c6e966 | 338 | value = HAS_LEGACY_SEMAPHORES(dev_priv); |
0673ad47 | 339 | break; |
0673ad47 CW |
340 | case I915_PARAM_HAS_SECURE_BATCHES: |
341 | value = capable(CAP_SYS_ADMIN); | |
342 | break; | |
0673ad47 CW |
343 | case I915_PARAM_CMD_PARSER_VERSION: |
344 | value = i915_cmd_parser_get_version(dev_priv); | |
345 | break; | |
0673ad47 | 346 | case I915_PARAM_SUBSLICE_TOTAL: |
57ec171e | 347 | value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu); |
0673ad47 CW |
348 | if (!value) |
349 | return -ENODEV; | |
350 | break; | |
351 | case I915_PARAM_EU_TOTAL: | |
43b67998 | 352 | value = INTEL_INFO(dev_priv)->sseu.eu_total; |
0673ad47 CW |
353 | if (!value) |
354 | return -ENODEV; | |
355 | break; | |
356 | case I915_PARAM_HAS_GPU_RESET: | |
4f044a88 MW |
357 | value = i915_modparams.enable_hangcheck && |
358 | intel_has_gpu_reset(dev_priv); | |
142bc7d9 MT |
359 | if (value && intel_has_reset_engine(dev_priv)) |
360 | value = 2; | |
0673ad47 CW |
361 | break; |
362 | case I915_PARAM_HAS_RESOURCE_STREAMER: | |
16162470 | 363 | value = HAS_RESOURCE_STREAMER(dev_priv); |
0673ad47 | 364 | break; |
37f501af | 365 | case I915_PARAM_HAS_POOLED_EU: |
16162470 | 366 | value = HAS_POOLED_EU(dev_priv); |
37f501af | 367 | break; |
368 | case I915_PARAM_MIN_EU_IN_POOL: | |
43b67998 | 369 | value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool; |
37f501af | 370 | break; |
5464cd65 | 371 | case I915_PARAM_HUC_STATUS: |
3582ad13 | 372 | intel_runtime_pm_get(dev_priv); |
5464cd65 | 373 | value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; |
3582ad13 | 374 | intel_runtime_pm_put(dev_priv); |
5464cd65 | 375 | break; |
4cc69075 CW |
376 | case I915_PARAM_MMAP_GTT_VERSION: |
377 | /* Though we've started our numbering from 1, and so class all | |
378 | * earlier versions as 0, in effect their value is undefined as | |
379 | * the ioctl will report EINVAL for the unknown param! | |
380 | */ | |
381 | value = i915_gem_mmap_gtt_version(); | |
382 | break; | |
0de9136d | 383 | case I915_PARAM_HAS_SCHEDULER: |
3fed1808 | 384 | value = dev_priv->caps.scheduler; |
0de9136d | 385 | break; |
beecec90 | 386 | |
16162470 DW |
387 | case I915_PARAM_MMAP_VERSION: |
388 | /* Remember to bump this if the version changes! */ | |
389 | case I915_PARAM_HAS_GEM: | |
390 | case I915_PARAM_HAS_PAGEFLIPPING: | |
391 | case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */ | |
392 | case I915_PARAM_HAS_RELAXED_FENCING: | |
393 | case I915_PARAM_HAS_COHERENT_RINGS: | |
394 | case I915_PARAM_HAS_RELAXED_DELTA: | |
395 | case I915_PARAM_HAS_GEN7_SOL_RESET: | |
396 | case I915_PARAM_HAS_WAIT_TIMEOUT: | |
397 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: | |
398 | case I915_PARAM_HAS_PINNED_BATCHES: | |
399 | case I915_PARAM_HAS_EXEC_NO_RELOC: | |
400 | case I915_PARAM_HAS_EXEC_HANDLE_LUT: | |
401 | case I915_PARAM_HAS_COHERENT_PHYS_GTT: | |
402 | case I915_PARAM_HAS_EXEC_SOFTPIN: | |
77ae9957 | 403 | case I915_PARAM_HAS_EXEC_ASYNC: |
fec0445c | 404 | case I915_PARAM_HAS_EXEC_FENCE: |
b0fd47ad | 405 | case I915_PARAM_HAS_EXEC_CAPTURE: |
1a71cf2f | 406 | case I915_PARAM_HAS_EXEC_BATCH_FIRST: |
cf6e7bac | 407 | case I915_PARAM_HAS_EXEC_FENCE_ARRAY: |
16162470 DW |
408 | /* For the time being all of these are always true; |
409 | * if some supported hardware does not have one of these | |
410 | * features this value needs to be provided from | |
411 | * INTEL_INFO(), a feature macro, or similar. | |
412 | */ | |
413 | value = 1; | |
414 | break; | |
d2b4b979 CW |
415 | case I915_PARAM_HAS_CONTEXT_ISOLATION: |
416 | value = intel_engines_has_context_isolation(dev_priv); | |
417 | break; | |
7fed555c RB |
418 | case I915_PARAM_SLICE_MASK: |
419 | value = INTEL_INFO(dev_priv)->sseu.slice_mask; | |
420 | if (!value) | |
421 | return -ENODEV; | |
422 | break; | |
f5320233 RB |
423 | case I915_PARAM_SUBSLICE_MASK: |
424 | value = INTEL_INFO(dev_priv)->sseu.subslice_mask; | |
425 | if (!value) | |
426 | return -ENODEV; | |
427 | break; | |
dab91783 | 428 | case I915_PARAM_CS_TIMESTAMP_FREQUENCY: |
f577a03b | 429 | value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz; |
dab91783 | 430 | break; |
0673ad47 CW |
431 | default: |
432 | DRM_DEBUG("Unknown parameter %d\n", param->param); | |
433 | return -EINVAL; | |
434 | } | |
435 | ||
dda33009 | 436 | if (put_user(value, param->value)) |
0673ad47 | 437 | return -EFAULT; |
0673ad47 CW |
438 | |
439 | return 0; | |
440 | } | |
441 | ||
da5f53bf | 442 | static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) |
0673ad47 | 443 | { |
0673ad47 CW |
444 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); |
445 | if (!dev_priv->bridge_dev) { | |
446 | DRM_ERROR("bridge device not found\n"); | |
447 | return -1; | |
448 | } | |
449 | return 0; | |
450 | } | |
451 | ||
452 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | |
453 | static int | |
da5f53bf | 454 | intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv) |
0673ad47 | 455 | { |
514e1d64 | 456 | int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
0673ad47 CW |
457 | u32 temp_lo, temp_hi = 0; |
458 | u64 mchbar_addr; | |
459 | int ret; | |
460 | ||
514e1d64 | 461 | if (INTEL_GEN(dev_priv) >= 4) |
0673ad47 CW |
462 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); |
463 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | |
464 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | |
465 | ||
466 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | |
467 | #ifdef CONFIG_PNP | |
468 | if (mchbar_addr && | |
469 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) | |
470 | return 0; | |
471 | #endif | |
472 | ||
473 | /* Get some space for it */ | |
474 | dev_priv->mch_res.name = "i915 MCHBAR"; | |
475 | dev_priv->mch_res.flags = IORESOURCE_MEM; | |
476 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, | |
477 | &dev_priv->mch_res, | |
478 | MCHBAR_SIZE, MCHBAR_SIZE, | |
479 | PCIBIOS_MIN_MEM, | |
480 | 0, pcibios_align_resource, | |
481 | dev_priv->bridge_dev); | |
482 | if (ret) { | |
483 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | |
484 | dev_priv->mch_res.start = 0; | |
485 | return ret; | |
486 | } | |
487 | ||
514e1d64 | 488 | if (INTEL_GEN(dev_priv) >= 4) |
0673ad47 CW |
489 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, |
490 | upper_32_bits(dev_priv->mch_res.start)); | |
491 | ||
492 | pci_write_config_dword(dev_priv->bridge_dev, reg, | |
493 | lower_32_bits(dev_priv->mch_res.start)); | |
494 | return 0; | |
495 | } | |
496 | ||
497 | /* Setup MCHBAR if possible, return true if we should disable it again */ | |
498 | static void | |
da5f53bf | 499 | intel_setup_mchbar(struct drm_i915_private *dev_priv) |
0673ad47 | 500 | { |
514e1d64 | 501 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
0673ad47 CW |
502 | u32 temp; |
503 | bool enabled; | |
504 | ||
920a14b2 | 505 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
0673ad47 CW |
506 | return; |
507 | ||
508 | dev_priv->mchbar_need_disable = false; | |
509 | ||
50a0bc90 | 510 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
0673ad47 CW |
511 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); |
512 | enabled = !!(temp & DEVEN_MCHBAR_EN); | |
513 | } else { | |
514 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
515 | enabled = temp & 1; | |
516 | } | |
517 | ||
518 | /* If it's already enabled, don't have to do anything */ | |
519 | if (enabled) | |
520 | return; | |
521 | ||
da5f53bf | 522 | if (intel_alloc_mchbar_resource(dev_priv)) |
0673ad47 CW |
523 | return; |
524 | ||
525 | dev_priv->mchbar_need_disable = true; | |
526 | ||
527 | /* Space is allocated or reserved, so enable it. */ | |
50a0bc90 | 528 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
0673ad47 CW |
529 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, |
530 | temp | DEVEN_MCHBAR_EN); | |
531 | } else { | |
532 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
533 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); | |
534 | } | |
535 | } | |
536 | ||
537 | static void | |
da5f53bf | 538 | intel_teardown_mchbar(struct drm_i915_private *dev_priv) |
0673ad47 | 539 | { |
514e1d64 | 540 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
0673ad47 CW |
541 | |
542 | if (dev_priv->mchbar_need_disable) { | |
50a0bc90 | 543 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
0673ad47 CW |
544 | u32 deven_val; |
545 | ||
546 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, | |
547 | &deven_val); | |
548 | deven_val &= ~DEVEN_MCHBAR_EN; | |
549 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, | |
550 | deven_val); | |
551 | } else { | |
552 | u32 mchbar_val; | |
553 | ||
554 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, | |
555 | &mchbar_val); | |
556 | mchbar_val &= ~1; | |
557 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, | |
558 | mchbar_val); | |
559 | } | |
560 | } | |
561 | ||
562 | if (dev_priv->mch_res.start) | |
563 | release_resource(&dev_priv->mch_res); | |
564 | } | |
565 | ||
566 | /* true = enable decode, false = disable decoder */ | |
567 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | |
568 | { | |
da5f53bf | 569 | struct drm_i915_private *dev_priv = cookie; |
0673ad47 | 570 | |
da5f53bf | 571 | intel_modeset_vga_set_state(dev_priv, state); |
0673ad47 CW |
572 | if (state) |
573 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | |
574 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
575 | else | |
576 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
577 | } | |
578 | ||
7f26cb88 TU |
579 | static int i915_resume_switcheroo(struct drm_device *dev); |
580 | static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); | |
581 | ||
0673ad47 CW |
582 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) |
583 | { | |
584 | struct drm_device *dev = pci_get_drvdata(pdev); | |
585 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | |
586 | ||
587 | if (state == VGA_SWITCHEROO_ON) { | |
588 | pr_info("switched on\n"); | |
589 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
590 | /* i915 resume handler doesn't set to D0 */ | |
52a05c30 | 591 | pci_set_power_state(pdev, PCI_D0); |
0673ad47 CW |
592 | i915_resume_switcheroo(dev); |
593 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | |
594 | } else { | |
595 | pr_info("switched off\n"); | |
596 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
597 | i915_suspend_switcheroo(dev, pmm); | |
598 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | |
599 | } | |
600 | } | |
601 | ||
602 | static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | |
603 | { | |
604 | struct drm_device *dev = pci_get_drvdata(pdev); | |
605 | ||
606 | /* | |
607 | * FIXME: open_count is protected by drm_global_mutex but that would lead to | |
608 | * locking inversion with the driver load path. And the access here is | |
609 | * completely racy anyway. So don't bother with locking for now. | |
610 | */ | |
611 | return dev->open_count == 0; | |
612 | } | |
613 | ||
614 | static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { | |
615 | .set_gpu_state = i915_switcheroo_set_state, | |
616 | .reprobe = NULL, | |
617 | .can_switch = i915_switcheroo_can_switch, | |
618 | }; | |
619 | ||
fbbd37b3 | 620 | static void i915_gem_fini(struct drm_i915_private *dev_priv) |
0673ad47 | 621 | { |
3b19f16a CW |
622 | /* Flush any outstanding unpin_work. */ |
623 | i915_gem_drain_workqueue(dev_priv); | |
5f09a9c8 | 624 | |
fbbd37b3 | 625 | mutex_lock(&dev_priv->drm.struct_mutex); |
b8991403 | 626 | intel_uc_fini_hw(dev_priv); |
61b5c158 | 627 | intel_uc_fini(dev_priv); |
cb15d9f8 | 628 | i915_gem_cleanup_engines(dev_priv); |
829a0af2 | 629 | i915_gem_contexts_fini(dev_priv); |
fbbd37b3 | 630 | mutex_unlock(&dev_priv->drm.struct_mutex); |
0673ad47 | 631 | |
70deeadd | 632 | intel_uc_fini_misc(dev_priv); |
7c781423 CW |
633 | i915_gem_cleanup_userptr(dev_priv); |
634 | ||
bdeb9785 | 635 | i915_gem_drain_freed_objects(dev_priv); |
fbbd37b3 | 636 | |
829a0af2 | 637 | WARN_ON(!list_empty(&dev_priv->contexts.list)); |
0673ad47 CW |
638 | } |
639 | ||
640 | static int i915_load_modeset_init(struct drm_device *dev) | |
641 | { | |
fac5e23e | 642 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 643 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
644 | int ret; |
645 | ||
646 | if (i915_inject_load_failure()) | |
647 | return -ENODEV; | |
648 | ||
66578857 | 649 | intel_bios_init(dev_priv); |
0673ad47 CW |
650 | |
651 | /* If we have > 1 VGA cards, then we need to arbitrate access | |
652 | * to the common VGA resources. | |
653 | * | |
654 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), | |
655 | * then we do not take part in VGA arbitration and the | |
656 | * vga_client_register() fails with -ENODEV. | |
657 | */ | |
da5f53bf | 658 | ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode); |
0673ad47 CW |
659 | if (ret && ret != -ENODEV) |
660 | goto out; | |
661 | ||
662 | intel_register_dsm_handler(); | |
663 | ||
52a05c30 | 664 | ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false); |
0673ad47 CW |
665 | if (ret) |
666 | goto cleanup_vga_client; | |
667 | ||
668 | /* must happen before intel_power_domains_init_hw() on VLV/CHV */ | |
669 | intel_update_rawclk(dev_priv); | |
670 | ||
671 | intel_power_domains_init_hw(dev_priv, false); | |
672 | ||
673 | intel_csr_ucode_init(dev_priv); | |
674 | ||
675 | ret = intel_irq_install(dev_priv); | |
676 | if (ret) | |
677 | goto cleanup_csr; | |
678 | ||
40196446 | 679 | intel_setup_gmbus(dev_priv); |
0673ad47 CW |
680 | |
681 | /* Important: The output setup functions called by modeset_init need | |
682 | * working irqs for e.g. gmbus and dp aux transfers. */ | |
b079bd17 VS |
683 | ret = intel_modeset_init(dev); |
684 | if (ret) | |
685 | goto cleanup_irq; | |
0673ad47 | 686 | |
29ad6a30 | 687 | intel_uc_init_fw(dev_priv); |
0673ad47 | 688 | |
bf9e8429 | 689 | ret = i915_gem_init(dev_priv); |
0673ad47 | 690 | if (ret) |
3950bf3d | 691 | goto cleanup_uc; |
0673ad47 | 692 | |
d378a3ef | 693 | intel_setup_overlay(dev_priv); |
0673ad47 | 694 | |
b7f05d4a | 695 | if (INTEL_INFO(dev_priv)->num_pipes == 0) |
0673ad47 CW |
696 | return 0; |
697 | ||
698 | ret = intel_fbdev_init(dev); | |
699 | if (ret) | |
700 | goto cleanup_gem; | |
701 | ||
702 | /* Only enable hotplug handling once the fbdev is fully set up. */ | |
703 | intel_hpd_init(dev_priv); | |
704 | ||
0673ad47 CW |
705 | return 0; |
706 | ||
707 | cleanup_gem: | |
bf9e8429 | 708 | if (i915_gem_suspend(dev_priv)) |
1c777c5d | 709 | DRM_ERROR("failed to idle hardware; continuing to unload!\n"); |
fbbd37b3 | 710 | i915_gem_fini(dev_priv); |
3950bf3d OM |
711 | cleanup_uc: |
712 | intel_uc_fini_fw(dev_priv); | |
0673ad47 | 713 | cleanup_irq: |
0673ad47 | 714 | drm_irq_uninstall(dev); |
40196446 | 715 | intel_teardown_gmbus(dev_priv); |
0673ad47 CW |
716 | cleanup_csr: |
717 | intel_csr_ucode_fini(dev_priv); | |
718 | intel_power_domains_fini(dev_priv); | |
52a05c30 | 719 | vga_switcheroo_unregister_client(pdev); |
0673ad47 | 720 | cleanup_vga_client: |
52a05c30 | 721 | vga_client_register(pdev, NULL, NULL, NULL); |
0673ad47 CW |
722 | out: |
723 | return ret; | |
724 | } | |
725 | ||
0673ad47 CW |
726 | static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) |
727 | { | |
728 | struct apertures_struct *ap; | |
91c8a326 | 729 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
730 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
731 | bool primary; | |
732 | int ret; | |
733 | ||
734 | ap = alloc_apertures(1); | |
735 | if (!ap) | |
736 | return -ENOMEM; | |
737 | ||
73ebd503 | 738 | ap->ranges[0].base = ggtt->gmadr.start; |
0673ad47 CW |
739 | ap->ranges[0].size = ggtt->mappable_end; |
740 | ||
741 | primary = | |
742 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | |
743 | ||
44adece5 | 744 | ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary); |
0673ad47 CW |
745 | |
746 | kfree(ap); | |
747 | ||
748 | return ret; | |
749 | } | |
0673ad47 CW |
750 | |
751 | #if !defined(CONFIG_VGA_CONSOLE) | |
752 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
753 | { | |
754 | return 0; | |
755 | } | |
756 | #elif !defined(CONFIG_DUMMY_CONSOLE) | |
757 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
758 | { | |
759 | return -ENODEV; | |
760 | } | |
761 | #else | |
762 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | |
763 | { | |
764 | int ret = 0; | |
765 | ||
766 | DRM_INFO("Replacing VGA console driver\n"); | |
767 | ||
768 | console_lock(); | |
769 | if (con_is_bound(&vga_con)) | |
770 | ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); | |
771 | if (ret == 0) { | |
772 | ret = do_unregister_con_driver(&vga_con); | |
773 | ||
774 | /* Ignore "already unregistered". */ | |
775 | if (ret == -ENODEV) | |
776 | ret = 0; | |
777 | } | |
778 | console_unlock(); | |
779 | ||
780 | return ret; | |
781 | } | |
782 | #endif | |
783 | ||
0673ad47 CW |
784 | static void intel_init_dpio(struct drm_i915_private *dev_priv) |
785 | { | |
786 | /* | |
787 | * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), | |
788 | * CHV x1 PHY (DP/HDMI D) | |
789 | * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) | |
790 | */ | |
791 | if (IS_CHERRYVIEW(dev_priv)) { | |
792 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; | |
793 | DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; | |
794 | } else if (IS_VALLEYVIEW(dev_priv)) { | |
795 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; | |
796 | } | |
797 | } | |
798 | ||
799 | static int i915_workqueues_init(struct drm_i915_private *dev_priv) | |
800 | { | |
801 | /* | |
802 | * The i915 workqueue is primarily used for batched retirement of | |
803 | * requests (and thus managing bo) once the task has been completed | |
804 | * by the GPU. i915_gem_retire_requests() is called directly when we | |
805 | * need high-priority retirement, such as waiting for an explicit | |
806 | * bo. | |
807 | * | |
808 | * It is also used for periodic low-priority events, such as | |
809 | * idle-timers and recording error state. | |
810 | * | |
811 | * All tasks on the workqueue are expected to acquire the dev mutex | |
812 | * so there is no point in running more than one instance of the | |
813 | * workqueue at any time. Use an ordered one. | |
814 | */ | |
815 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); | |
816 | if (dev_priv->wq == NULL) | |
817 | goto out_err; | |
818 | ||
819 | dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); | |
820 | if (dev_priv->hotplug.dp_wq == NULL) | |
821 | goto out_free_wq; | |
822 | ||
0673ad47 CW |
823 | return 0; |
824 | ||
0673ad47 CW |
825 | out_free_wq: |
826 | destroy_workqueue(dev_priv->wq); | |
827 | out_err: | |
828 | DRM_ERROR("Failed to allocate workqueues.\n"); | |
829 | ||
830 | return -ENOMEM; | |
831 | } | |
832 | ||
bb8f0f5a CW |
833 | static void i915_engines_cleanup(struct drm_i915_private *i915) |
834 | { | |
835 | struct intel_engine_cs *engine; | |
836 | enum intel_engine_id id; | |
837 | ||
838 | for_each_engine(engine, i915, id) | |
839 | kfree(engine); | |
840 | } | |
841 | ||
0673ad47 CW |
842 | static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) |
843 | { | |
0673ad47 CW |
844 | destroy_workqueue(dev_priv->hotplug.dp_wq); |
845 | destroy_workqueue(dev_priv->wq); | |
846 | } | |
847 | ||
4fc7e845 PZ |
848 | /* |
849 | * We don't keep the workarounds for pre-production hardware, so we expect our | |
850 | * driver to fail on these machines in one way or another. A little warning on | |
851 | * dmesg may help both the user and the bug triagers. | |
6a7a6a98 CW |
852 | * |
853 | * Our policy for removing pre-production workarounds is to keep the | |
854 | * current gen workarounds as a guide to the bring-up of the next gen | |
855 | * (workarounds have a habit of persisting!). Anything older than that | |
856 | * should be removed along with the complications they introduce. | |
4fc7e845 PZ |
857 | */ |
858 | static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) | |
859 | { | |
248a124d CW |
860 | bool pre = false; |
861 | ||
862 | pre |= IS_HSW_EARLY_SDV(dev_priv); | |
863 | pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); | |
0102ba1f | 864 | pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); |
248a124d | 865 | |
7c5ff4a2 | 866 | if (pre) { |
4fc7e845 PZ |
867 | DRM_ERROR("This is a pre-production stepping. " |
868 | "It may not be fully functional.\n"); | |
7c5ff4a2 CW |
869 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); |
870 | } | |
4fc7e845 PZ |
871 | } |
872 | ||
0673ad47 CW |
873 | /** |
874 | * i915_driver_init_early - setup state not requiring device access | |
875 | * @dev_priv: device private | |
34e07e42 | 876 | * @ent: the matching pci_device_id |
0673ad47 CW |
877 | * |
878 | * Initialize everything that is a "SW-only" state, that is state not | |
879 | * requiring accessing the device or exposing the driver via kernel internal | |
880 | * or userspace interfaces. Example steps belonging here: lock initialization, | |
881 | * system memory allocation, setting up device specific attributes and | |
882 | * function hooks not requiring accessing the device. | |
883 | */ | |
884 | static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |
885 | const struct pci_device_id *ent) | |
886 | { | |
887 | const struct intel_device_info *match_info = | |
888 | (struct intel_device_info *)ent->driver_data; | |
889 | struct intel_device_info *device_info; | |
890 | int ret = 0; | |
891 | ||
892 | if (i915_inject_load_failure()) | |
893 | return -ENODEV; | |
894 | ||
895 | /* Setup the write-once "constant" device info */ | |
94b4f3ba | 896 | device_info = mkwrite_device_info(dev_priv); |
0673ad47 CW |
897 | memcpy(device_info, match_info, sizeof(*device_info)); |
898 | device_info->device_id = dev_priv->drm.pdev->device; | |
899 | ||
ae7617f0 TU |
900 | BUILD_BUG_ON(INTEL_MAX_PLATFORMS > |
901 | sizeof(device_info->platform_mask) * BITS_PER_BYTE); | |
902 | device_info->platform_mask = BIT(device_info->platform); | |
903 | ||
0673ad47 CW |
904 | BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); |
905 | device_info->gen_mask = BIT(device_info->gen - 1); | |
906 | ||
907 | spin_lock_init(&dev_priv->irq_lock); | |
908 | spin_lock_init(&dev_priv->gpu_error.lock); | |
909 | mutex_init(&dev_priv->backlight_lock); | |
910 | spin_lock_init(&dev_priv->uncore.lock); | |
317eaa95 | 911 | |
0673ad47 CW |
912 | mutex_init(&dev_priv->sb_lock); |
913 | mutex_init(&dev_priv->modeset_restore_lock); | |
914 | mutex_init(&dev_priv->av_mutex); | |
915 | mutex_init(&dev_priv->wm.wm_mutex); | |
916 | mutex_init(&dev_priv->pps_mutex); | |
917 | ||
413e8fdb | 918 | intel_uc_init_early(dev_priv); |
0b1de5d5 CW |
919 | i915_memcpy_init_early(dev_priv); |
920 | ||
0673ad47 CW |
921 | ret = i915_workqueues_init(dev_priv); |
922 | if (ret < 0) | |
bb8f0f5a | 923 | goto err_engines; |
0673ad47 | 924 | |
0673ad47 | 925 | /* This must be called before any calls to HAS_PCH_* */ |
da5f53bf | 926 | intel_detect_pch(dev_priv); |
0673ad47 | 927 | |
192aa181 | 928 | intel_pm_setup(dev_priv); |
0673ad47 CW |
929 | intel_init_dpio(dev_priv); |
930 | intel_power_domains_init(dev_priv); | |
931 | intel_irq_init(dev_priv); | |
3ac168a7 | 932 | intel_hangcheck_init(dev_priv); |
0673ad47 CW |
933 | intel_init_display_hooks(dev_priv); |
934 | intel_init_clock_gating_hooks(dev_priv); | |
935 | intel_init_audio_hooks(dev_priv); | |
cb15d9f8 | 936 | ret = i915_gem_load_init(dev_priv); |
73cb9701 | 937 | if (ret < 0) |
cefcff8f | 938 | goto err_irq; |
0673ad47 | 939 | |
36cdd013 | 940 | intel_display_crc_init(dev_priv); |
0673ad47 | 941 | |
4fc7e845 | 942 | intel_detect_preproduction_hw(dev_priv); |
0673ad47 CW |
943 | |
944 | return 0; | |
945 | ||
cefcff8f JL |
946 | err_irq: |
947 | intel_irq_fini(dev_priv); | |
0673ad47 | 948 | i915_workqueues_cleanup(dev_priv); |
bb8f0f5a CW |
949 | err_engines: |
950 | i915_engines_cleanup(dev_priv); | |
0673ad47 CW |
951 | return ret; |
952 | } | |
953 | ||
954 | /** | |
955 | * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() | |
956 | * @dev_priv: device private | |
957 | */ | |
958 | static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) | |
959 | { | |
cb15d9f8 | 960 | i915_gem_load_cleanup(dev_priv); |
cefcff8f | 961 | intel_irq_fini(dev_priv); |
0673ad47 | 962 | i915_workqueues_cleanup(dev_priv); |
bb8f0f5a | 963 | i915_engines_cleanup(dev_priv); |
0673ad47 CW |
964 | } |
965 | ||
da5f53bf | 966 | static int i915_mmio_setup(struct drm_i915_private *dev_priv) |
0673ad47 | 967 | { |
52a05c30 | 968 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
969 | int mmio_bar; |
970 | int mmio_size; | |
971 | ||
5db94019 | 972 | mmio_bar = IS_GEN2(dev_priv) ? 1 : 0; |
0673ad47 CW |
973 | /* |
974 | * Before gen4, the registers and the GTT are behind different BARs. | |
975 | * However, from gen4 onwards, the registers and the GTT are shared | |
976 | * in the same BAR, so we want to restrict this ioremap from | |
977 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, | |
978 | * the register BAR remains the same size for all the earlier | |
979 | * generations up to Ironlake. | |
980 | */ | |
514e1d64 | 981 | if (INTEL_GEN(dev_priv) < 5) |
0673ad47 CW |
982 | mmio_size = 512 * 1024; |
983 | else | |
984 | mmio_size = 2 * 1024 * 1024; | |
52a05c30 | 985 | dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size); |
0673ad47 CW |
986 | if (dev_priv->regs == NULL) { |
987 | DRM_ERROR("failed to map registers\n"); | |
988 | ||
989 | return -EIO; | |
990 | } | |
991 | ||
992 | /* Try to make sure MCHBAR is enabled before poking at it */ | |
da5f53bf | 993 | intel_setup_mchbar(dev_priv); |
0673ad47 CW |
994 | |
995 | return 0; | |
996 | } | |
997 | ||
da5f53bf | 998 | static void i915_mmio_cleanup(struct drm_i915_private *dev_priv) |
0673ad47 | 999 | { |
52a05c30 | 1000 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 | 1001 | |
da5f53bf | 1002 | intel_teardown_mchbar(dev_priv); |
52a05c30 | 1003 | pci_iounmap(pdev, dev_priv->regs); |
0673ad47 CW |
1004 | } |
1005 | ||
1006 | /** | |
1007 | * i915_driver_init_mmio - setup device MMIO | |
1008 | * @dev_priv: device private | |
1009 | * | |
1010 | * Setup minimal device state necessary for MMIO accesses later in the | |
1011 | * initialization sequence. The setup here should avoid any other device-wide | |
1012 | * side effects or exposing the driver via kernel internal or user space | |
1013 | * interfaces. | |
1014 | */ | |
1015 | static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) | |
1016 | { | |
0673ad47 CW |
1017 | int ret; |
1018 | ||
1019 | if (i915_inject_load_failure()) | |
1020 | return -ENODEV; | |
1021 | ||
da5f53bf | 1022 | if (i915_get_bridge_dev(dev_priv)) |
0673ad47 CW |
1023 | return -EIO; |
1024 | ||
da5f53bf | 1025 | ret = i915_mmio_setup(dev_priv); |
0673ad47 | 1026 | if (ret < 0) |
63ffbcda | 1027 | goto err_bridge; |
0673ad47 CW |
1028 | |
1029 | intel_uncore_init(dev_priv); | |
63ffbcda | 1030 | |
1fc556fa SAK |
1031 | intel_uc_init_mmio(dev_priv); |
1032 | ||
63ffbcda JL |
1033 | ret = intel_engines_init_mmio(dev_priv); |
1034 | if (ret) | |
1035 | goto err_uncore; | |
1036 | ||
24145517 | 1037 | i915_gem_init_mmio(dev_priv); |
0673ad47 CW |
1038 | |
1039 | return 0; | |
1040 | ||
63ffbcda JL |
1041 | err_uncore: |
1042 | intel_uncore_fini(dev_priv); | |
1043 | err_bridge: | |
0673ad47 CW |
1044 | pci_dev_put(dev_priv->bridge_dev); |
1045 | ||
1046 | return ret; | |
1047 | } | |
1048 | ||
1049 | /** | |
1050 | * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() | |
1051 | * @dev_priv: device private | |
1052 | */ | |
1053 | static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) | |
1054 | { | |
0673ad47 | 1055 | intel_uncore_fini(dev_priv); |
da5f53bf | 1056 | i915_mmio_cleanup(dev_priv); |
0673ad47 CW |
1057 | pci_dev_put(dev_priv->bridge_dev); |
1058 | } | |
1059 | ||
94b4f3ba CW |
1060 | static void intel_sanitize_options(struct drm_i915_private *dev_priv) |
1061 | { | |
94b4f3ba CW |
1062 | /* |
1063 | * i915.enable_ppgtt is read-only, so do an early pass to validate the | |
1064 | * user's requested state against the hardware/driver capabilities. We | |
1065 | * do this now so that we can print out any log messages once rather | |
1066 | * than every time we check intel_enable_ppgtt(). | |
1067 | */ | |
4f044a88 MW |
1068 | i915_modparams.enable_ppgtt = |
1069 | intel_sanitize_enable_ppgtt(dev_priv, | |
1070 | i915_modparams.enable_ppgtt); | |
1071 | DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt); | |
39df9190 | 1072 | |
d2be9f2f | 1073 | intel_uc_sanitize_options(dev_priv); |
67b7f33e CD |
1074 | |
1075 | intel_gvt_sanitize_options(dev_priv); | |
94b4f3ba CW |
1076 | } |
1077 | ||
0673ad47 CW |
1078 | /** |
1079 | * i915_driver_init_hw - setup state requiring device access | |
1080 | * @dev_priv: device private | |
1081 | * | |
1082 | * Setup state that requires accessing the device, but doesn't require | |
1083 | * exposing the driver via kernel internal or userspace interfaces. | |
1084 | */ | |
1085 | static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |
1086 | { | |
52a05c30 | 1087 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
1088 | int ret; |
1089 | ||
1090 | if (i915_inject_load_failure()) | |
1091 | return -ENODEV; | |
1092 | ||
6a7e51f3 | 1093 | intel_device_info_runtime_init(mkwrite_device_info(dev_priv)); |
94b4f3ba CW |
1094 | |
1095 | intel_sanitize_options(dev_priv); | |
0673ad47 | 1096 | |
9f9b2792 LL |
1097 | i915_perf_init(dev_priv); |
1098 | ||
97d6d7ab | 1099 | ret = i915_ggtt_probe_hw(dev_priv); |
0673ad47 CW |
1100 | if (ret) |
1101 | return ret; | |
1102 | ||
0673ad47 CW |
1103 | /* WARNING: Apparently we must kick fbdev drivers before vgacon, |
1104 | * otherwise the vga fbdev driver falls over. */ | |
1105 | ret = i915_kick_out_firmware_fb(dev_priv); | |
1106 | if (ret) { | |
1107 | DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); | |
1108 | goto out_ggtt; | |
1109 | } | |
1110 | ||
1111 | ret = i915_kick_out_vgacon(dev_priv); | |
1112 | if (ret) { | |
1113 | DRM_ERROR("failed to remove conflicting VGA console\n"); | |
1114 | goto out_ggtt; | |
1115 | } | |
1116 | ||
97d6d7ab | 1117 | ret = i915_ggtt_init_hw(dev_priv); |
0088e522 CW |
1118 | if (ret) |
1119 | return ret; | |
1120 | ||
97d6d7ab | 1121 | ret = i915_ggtt_enable_hw(dev_priv); |
0088e522 CW |
1122 | if (ret) { |
1123 | DRM_ERROR("failed to enable GGTT\n"); | |
1124 | goto out_ggtt; | |
1125 | } | |
1126 | ||
52a05c30 | 1127 | pci_set_master(pdev); |
0673ad47 CW |
1128 | |
1129 | /* overlay on gen2 is broken and can't address above 1G */ | |
5db94019 | 1130 | if (IS_GEN2(dev_priv)) { |
52a05c30 | 1131 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); |
0673ad47 CW |
1132 | if (ret) { |
1133 | DRM_ERROR("failed to set DMA mask\n"); | |
1134 | ||
1135 | goto out_ggtt; | |
1136 | } | |
1137 | } | |
1138 | ||
0673ad47 CW |
1139 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
1140 | * using 32bit addressing, overwriting memory if HWS is located | |
1141 | * above 4GB. | |
1142 | * | |
1143 | * The documentation also mentions an issue with undefined | |
1144 | * behaviour if any general state is accessed within a page above 4GB, | |
1145 | * which also needs to be handled carefully. | |
1146 | */ | |
c0f86832 | 1147 | if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) { |
52a05c30 | 1148 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
0673ad47 CW |
1149 | |
1150 | if (ret) { | |
1151 | DRM_ERROR("failed to set DMA mask\n"); | |
1152 | ||
1153 | goto out_ggtt; | |
1154 | } | |
1155 | } | |
1156 | ||
0673ad47 CW |
1157 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, |
1158 | PM_QOS_DEFAULT_VALUE); | |
1159 | ||
1160 | intel_uncore_sanitize(dev_priv); | |
1161 | ||
1162 | intel_opregion_setup(dev_priv); | |
1163 | ||
1164 | i915_gem_load_init_fences(dev_priv); | |
1165 | ||
1166 | /* On the 945G/GM, the chipset reports the MSI capability on the | |
1167 | * integrated graphics even though the support isn't actually there | |
1168 | * according to the published specs. It doesn't appear to function | |
1169 | * correctly in testing on 945G. | |
1170 | * This may be a side effect of MSI having been made available for PEG | |
1171 | * and the registers being closely associated. | |
1172 | * | |
1173 | * According to chipset errata, on the 965GM, MSI interrupts may | |
e38c2da0 VS |
1174 | * be lost or delayed, and was defeatured. MSI interrupts seem to |
1175 | * get lost on g4x as well, and interrupt delivery seems to stay | |
1176 | * properly dead afterwards. So we'll just disable them for all | |
1177 | * pre-gen5 chipsets. | |
0673ad47 | 1178 | */ |
e38c2da0 | 1179 | if (INTEL_GEN(dev_priv) >= 5) { |
52a05c30 | 1180 | if (pci_enable_msi(pdev) < 0) |
0673ad47 CW |
1181 | DRM_DEBUG_DRIVER("can't enable MSI"); |
1182 | } | |
1183 | ||
26f837e8 ZW |
1184 | ret = intel_gvt_init(dev_priv); |
1185 | if (ret) | |
1186 | goto out_ggtt; | |
1187 | ||
0673ad47 CW |
1188 | return 0; |
1189 | ||
1190 | out_ggtt: | |
97d6d7ab | 1191 | i915_ggtt_cleanup_hw(dev_priv); |
0673ad47 CW |
1192 | |
1193 | return ret; | |
1194 | } | |
1195 | ||
1196 | /** | |
1197 | * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() | |
1198 | * @dev_priv: device private | |
1199 | */ | |
1200 | static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) | |
1201 | { | |
52a05c30 | 1202 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 | 1203 | |
9f9b2792 LL |
1204 | i915_perf_fini(dev_priv); |
1205 | ||
52a05c30 DW |
1206 | if (pdev->msi_enabled) |
1207 | pci_disable_msi(pdev); | |
0673ad47 CW |
1208 | |
1209 | pm_qos_remove_request(&dev_priv->pm_qos); | |
97d6d7ab | 1210 | i915_ggtt_cleanup_hw(dev_priv); |
0673ad47 CW |
1211 | } |
1212 | ||
1213 | /** | |
1214 | * i915_driver_register - register the driver with the rest of the system | |
1215 | * @dev_priv: device private | |
1216 | * | |
1217 | * Perform any steps necessary to make the driver available via kernel | |
1218 | * internal or userspace interfaces. | |
1219 | */ | |
1220 | static void i915_driver_register(struct drm_i915_private *dev_priv) | |
1221 | { | |
91c8a326 | 1222 | struct drm_device *dev = &dev_priv->drm; |
0673ad47 | 1223 | |
848b365d | 1224 | i915_gem_shrinker_register(dev_priv); |
b46a33e2 | 1225 | i915_pmu_register(dev_priv); |
0673ad47 CW |
1226 | |
1227 | /* | |
1228 | * Notify a valid surface after modesetting, | |
1229 | * when running inside a VM. | |
1230 | */ | |
1231 | if (intel_vgpu_active(dev_priv)) | |
1232 | I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); | |
1233 | ||
1234 | /* Reveal our presence to userspace */ | |
1235 | if (drm_dev_register(dev, 0) == 0) { | |
1236 | i915_debugfs_register(dev_priv); | |
f9cda048 | 1237 | i915_guc_log_register(dev_priv); |
694c2828 | 1238 | i915_setup_sysfs(dev_priv); |
442b8c06 RB |
1239 | |
1240 | /* Depends on sysfs having been initialized */ | |
1241 | i915_perf_register(dev_priv); | |
0673ad47 CW |
1242 | } else |
1243 | DRM_ERROR("Failed to register driver for userspace access!\n"); | |
1244 | ||
1245 | if (INTEL_INFO(dev_priv)->num_pipes) { | |
1246 | /* Must be done after probing outputs */ | |
1247 | intel_opregion_register(dev_priv); | |
1248 | acpi_video_register(); | |
1249 | } | |
1250 | ||
1251 | if (IS_GEN5(dev_priv)) | |
1252 | intel_gpu_ips_init(dev_priv); | |
1253 | ||
eef57324 | 1254 | intel_audio_init(dev_priv); |
0673ad47 CW |
1255 | |
1256 | /* | |
1257 | * Some ports require correctly set-up hpd registers for detection to | |
1258 | * work properly (leading to ghost connected connector status), e.g. VGA | |
1259 | * on gm45. Hence we can only set up the initial fbdev config after hpd | |
1260 | * irqs are fully enabled. We do it last so that the async config | |
1261 | * cannot run before the connectors are registered. | |
1262 | */ | |
1263 | intel_fbdev_initial_config_async(dev); | |
448aa911 CW |
1264 | |
1265 | /* | |
1266 | * We need to coordinate the hotplugs with the asynchronous fbdev | |
1267 | * configuration, for which we use the fbdev->async_cookie. | |
1268 | */ | |
1269 | if (INTEL_INFO(dev_priv)->num_pipes) | |
1270 | drm_kms_helper_poll_init(dev); | |
0673ad47 CW |
1271 | } |
1272 | ||
1273 | /** | |
1274 | * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() | |
1275 | * @dev_priv: device private | |
1276 | */ | |
1277 | static void i915_driver_unregister(struct drm_i915_private *dev_priv) | |
1278 | { | |
4f256d82 | 1279 | intel_fbdev_unregister(dev_priv); |
eef57324 | 1280 | intel_audio_deinit(dev_priv); |
0673ad47 | 1281 | |
448aa911 CW |
1282 | /* |
1283 | * After flushing the fbdev (incl. a late async config which will | |
1284 | * have delayed queuing of a hotplug event), then flush the hotplug | |
1285 | * events. | |
1286 | */ | |
1287 | drm_kms_helper_poll_fini(&dev_priv->drm); | |
1288 | ||
0673ad47 CW |
1289 | intel_gpu_ips_teardown(); |
1290 | acpi_video_unregister(); | |
1291 | intel_opregion_unregister(dev_priv); | |
1292 | ||
442b8c06 | 1293 | i915_perf_unregister(dev_priv); |
b46a33e2 | 1294 | i915_pmu_unregister(dev_priv); |
442b8c06 | 1295 | |
694c2828 | 1296 | i915_teardown_sysfs(dev_priv); |
f9cda048 | 1297 | i915_guc_log_unregister(dev_priv); |
91c8a326 | 1298 | drm_dev_unregister(&dev_priv->drm); |
0673ad47 | 1299 | |
848b365d | 1300 | i915_gem_shrinker_unregister(dev_priv); |
0673ad47 CW |
1301 | } |
1302 | ||
27d558a1 MW |
1303 | static void i915_welcome_messages(struct drm_i915_private *dev_priv) |
1304 | { | |
1305 | if (drm_debug & DRM_UT_DRIVER) { | |
1306 | struct drm_printer p = drm_debug_printer("i915 device info:"); | |
1307 | ||
1308 | intel_device_info_dump(&dev_priv->info, &p); | |
1309 | intel_device_info_dump_runtime(&dev_priv->info, &p); | |
1310 | } | |
1311 | ||
1312 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) | |
1313 | DRM_INFO("DRM_I915_DEBUG enabled\n"); | |
1314 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) | |
1315 | DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); | |
1316 | } | |
1317 | ||
0673ad47 CW |
1318 | /** |
1319 | * i915_driver_load - setup chip and create an initial config | |
d2ad3ae4 JL |
1320 | * @pdev: PCI device |
1321 | * @ent: matching PCI ID entry | |
0673ad47 CW |
1322 | * |
1323 | * The driver load routine has to do several things: | |
1324 | * - drive output discovery via intel_modeset_init() | |
1325 | * - initialize the memory manager | |
1326 | * - allocate initial config memory | |
1327 | * - setup the DRM framebuffer with the allocated memory | |
1328 | */ | |
42f5551d | 1329 | int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) |
0673ad47 | 1330 | { |
8d2b47dd ML |
1331 | const struct intel_device_info *match_info = |
1332 | (struct intel_device_info *)ent->driver_data; | |
0673ad47 CW |
1333 | struct drm_i915_private *dev_priv; |
1334 | int ret; | |
7d87a7f7 | 1335 | |
ff4c3b76 | 1336 | /* Enable nuclear pageflip on ILK+ */ |
4f044a88 | 1337 | if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) |
8d2b47dd | 1338 | driver.driver_features &= ~DRIVER_ATOMIC; |
a09d0ba1 | 1339 | |
0673ad47 CW |
1340 | ret = -ENOMEM; |
1341 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | |
1342 | if (dev_priv) | |
1343 | ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); | |
1344 | if (ret) { | |
87a6752c | 1345 | DRM_DEV_ERROR(&pdev->dev, "allocation failed\n"); |
cad3688f | 1346 | goto out_free; |
0673ad47 | 1347 | } |
72bbf0af | 1348 | |
0673ad47 CW |
1349 | dev_priv->drm.pdev = pdev; |
1350 | dev_priv->drm.dev_private = dev_priv; | |
719388e1 | 1351 | |
0673ad47 CW |
1352 | ret = pci_enable_device(pdev); |
1353 | if (ret) | |
cad3688f | 1354 | goto out_fini; |
1347f5b4 | 1355 | |
0673ad47 | 1356 | pci_set_drvdata(pdev, &dev_priv->drm); |
adfdf85d ID |
1357 | /* |
1358 | * Disable the system suspend direct complete optimization, which can | |
1359 | * leave the device suspended skipping the driver's suspend handlers | |
1360 | * if the device was already runtime suspended. This is needed due to | |
1361 | * the difference in our runtime and system suspend sequence and | |
1362 | * becaue the HDA driver may require us to enable the audio power | |
1363 | * domain during system suspend. | |
1364 | */ | |
c2eac4d3 | 1365 | dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); |
ef11bdb3 | 1366 | |
0673ad47 CW |
1367 | ret = i915_driver_init_early(dev_priv, ent); |
1368 | if (ret < 0) | |
1369 | goto out_pci_disable; | |
ef11bdb3 | 1370 | |
0673ad47 | 1371 | intel_runtime_pm_get(dev_priv); |
1da177e4 | 1372 | |
0673ad47 CW |
1373 | ret = i915_driver_init_mmio(dev_priv); |
1374 | if (ret < 0) | |
1375 | goto out_runtime_pm_put; | |
79e53945 | 1376 | |
0673ad47 CW |
1377 | ret = i915_driver_init_hw(dev_priv); |
1378 | if (ret < 0) | |
1379 | goto out_cleanup_mmio; | |
30c964a6 RB |
1380 | |
1381 | /* | |
0673ad47 CW |
1382 | * TODO: move the vblank init and parts of modeset init steps into one |
1383 | * of the i915_driver_init_/i915_driver_register functions according | |
1384 | * to the role/effect of the given init step. | |
30c964a6 | 1385 | */ |
0673ad47 | 1386 | if (INTEL_INFO(dev_priv)->num_pipes) { |
91c8a326 | 1387 | ret = drm_vblank_init(&dev_priv->drm, |
0673ad47 CW |
1388 | INTEL_INFO(dev_priv)->num_pipes); |
1389 | if (ret) | |
1390 | goto out_cleanup_hw; | |
30c964a6 RB |
1391 | } |
1392 | ||
91c8a326 | 1393 | ret = i915_load_modeset_init(&dev_priv->drm); |
0673ad47 | 1394 | if (ret < 0) |
baf54385 | 1395 | goto out_cleanup_hw; |
0673ad47 CW |
1396 | |
1397 | i915_driver_register(dev_priv); | |
1398 | ||
1399 | intel_runtime_pm_enable(dev_priv); | |
1400 | ||
2503a0fe | 1401 | intel_init_ipc(dev_priv); |
a3a8986c | 1402 | |
0673ad47 CW |
1403 | intel_runtime_pm_put(dev_priv); |
1404 | ||
27d558a1 MW |
1405 | i915_welcome_messages(dev_priv); |
1406 | ||
0673ad47 CW |
1407 | return 0; |
1408 | ||
0673ad47 CW |
1409 | out_cleanup_hw: |
1410 | i915_driver_cleanup_hw(dev_priv); | |
1411 | out_cleanup_mmio: | |
1412 | i915_driver_cleanup_mmio(dev_priv); | |
1413 | out_runtime_pm_put: | |
1414 | intel_runtime_pm_put(dev_priv); | |
1415 | i915_driver_cleanup_early(dev_priv); | |
1416 | out_pci_disable: | |
1417 | pci_disable_device(pdev); | |
cad3688f | 1418 | out_fini: |
0673ad47 | 1419 | i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); |
cad3688f CW |
1420 | drm_dev_fini(&dev_priv->drm); |
1421 | out_free: | |
1422 | kfree(dev_priv); | |
30c964a6 RB |
1423 | return ret; |
1424 | } | |
1425 | ||
42f5551d | 1426 | void i915_driver_unload(struct drm_device *dev) |
3bad0781 | 1427 | { |
fac5e23e | 1428 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1429 | struct pci_dev *pdev = dev_priv->drm.pdev; |
3bad0781 | 1430 | |
99c539be DV |
1431 | i915_driver_unregister(dev_priv); |
1432 | ||
bf9e8429 | 1433 | if (i915_gem_suspend(dev_priv)) |
42f5551d | 1434 | DRM_ERROR("failed to idle hardware; continuing to unload!\n"); |
ce1bb329 | 1435 | |
0673ad47 CW |
1436 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); |
1437 | ||
18dddadc | 1438 | drm_atomic_helper_shutdown(dev); |
a667fb40 | 1439 | |
26f837e8 ZW |
1440 | intel_gvt_cleanup(dev_priv); |
1441 | ||
0673ad47 CW |
1442 | intel_modeset_cleanup(dev); |
1443 | ||
3bad0781 | 1444 | /* |
0673ad47 CW |
1445 | * free the memory space allocated for the child device |
1446 | * config parsed from VBT | |
3bad0781 | 1447 | */ |
0673ad47 CW |
1448 | if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { |
1449 | kfree(dev_priv->vbt.child_dev); | |
1450 | dev_priv->vbt.child_dev = NULL; | |
1451 | dev_priv->vbt.child_dev_num = 0; | |
1452 | } | |
1453 | kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); | |
1454 | dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; | |
1455 | kfree(dev_priv->vbt.lfp_lvds_vbt_mode); | |
1456 | dev_priv->vbt.lfp_lvds_vbt_mode = NULL; | |
3bad0781 | 1457 | |
52a05c30 DW |
1458 | vga_switcheroo_unregister_client(pdev); |
1459 | vga_client_register(pdev, NULL, NULL, NULL); | |
bcdb72ac | 1460 | |
0673ad47 | 1461 | intel_csr_ucode_fini(dev_priv); |
bcdb72ac | 1462 | |
0673ad47 CW |
1463 | /* Free error state after interrupts are fully disabled. */ |
1464 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | |
5a4c6f1b | 1465 | i915_reset_error_state(dev_priv); |
0673ad47 | 1466 | |
fbbd37b3 | 1467 | i915_gem_fini(dev_priv); |
3950bf3d | 1468 | intel_uc_fini_fw(dev_priv); |
0673ad47 CW |
1469 | intel_fbc_cleanup_cfb(dev_priv); |
1470 | ||
1471 | intel_power_domains_fini(dev_priv); | |
1472 | ||
1473 | i915_driver_cleanup_hw(dev_priv); | |
1474 | i915_driver_cleanup_mmio(dev_priv); | |
1475 | ||
1476 | intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); | |
cad3688f CW |
1477 | } |
1478 | ||
1479 | static void i915_driver_release(struct drm_device *dev) | |
1480 | { | |
1481 | struct drm_i915_private *dev_priv = to_i915(dev); | |
0673ad47 CW |
1482 | |
1483 | i915_driver_cleanup_early(dev_priv); | |
cad3688f CW |
1484 | drm_dev_fini(&dev_priv->drm); |
1485 | ||
1486 | kfree(dev_priv); | |
3bad0781 ZW |
1487 | } |
1488 | ||
0673ad47 | 1489 | static int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
2911a35b | 1490 | { |
829a0af2 | 1491 | struct drm_i915_private *i915 = to_i915(dev); |
0673ad47 | 1492 | int ret; |
2911a35b | 1493 | |
829a0af2 | 1494 | ret = i915_gem_open(i915, file); |
0673ad47 CW |
1495 | if (ret) |
1496 | return ret; | |
2911a35b | 1497 | |
0673ad47 CW |
1498 | return 0; |
1499 | } | |
71386ef9 | 1500 | |
0673ad47 CW |
1501 | /** |
1502 | * i915_driver_lastclose - clean up after all DRM clients have exited | |
1503 | * @dev: DRM device | |
1504 | * | |
1505 | * Take care of cleaning up after all DRM clients have exited. In the | |
1506 | * mode setting case, we want to restore the kernel's initial mode (just | |
1507 | * in case the last client left us in a bad state). | |
1508 | * | |
1509 | * Additionally, in the non-mode setting case, we'll tear down the GTT | |
1510 | * and DMA structures, since the kernel won't be using them, and clea | |
1511 | * up any GEM state. | |
1512 | */ | |
1513 | static void i915_driver_lastclose(struct drm_device *dev) | |
1514 | { | |
1515 | intel_fbdev_restore_mode(dev); | |
1516 | vga_switcheroo_process_delayed_switch(); | |
1517 | } | |
2911a35b | 1518 | |
7d2ec881 | 1519 | static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
0673ad47 | 1520 | { |
7d2ec881 DV |
1521 | struct drm_i915_file_private *file_priv = file->driver_priv; |
1522 | ||
0673ad47 | 1523 | mutex_lock(&dev->struct_mutex); |
829a0af2 | 1524 | i915_gem_context_close(file); |
0673ad47 CW |
1525 | i915_gem_release(dev, file); |
1526 | mutex_unlock(&dev->struct_mutex); | |
0673ad47 CW |
1527 | |
1528 | kfree(file_priv); | |
2911a35b BW |
1529 | } |
1530 | ||
07f9cd0b ID |
1531 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) |
1532 | { | |
91c8a326 | 1533 | struct drm_device *dev = &dev_priv->drm; |
19c8054c | 1534 | struct intel_encoder *encoder; |
07f9cd0b ID |
1535 | |
1536 | drm_modeset_lock_all(dev); | |
19c8054c JN |
1537 | for_each_intel_encoder(dev, encoder) |
1538 | if (encoder->suspend) | |
1539 | encoder->suspend(encoder); | |
07f9cd0b ID |
1540 | drm_modeset_unlock_all(dev); |
1541 | } | |
1542 | ||
1a5df187 PZ |
1543 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
1544 | bool rpm_resume); | |
507e126e | 1545 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv); |
f75a1985 | 1546 | |
bc87229f ID |
1547 | static bool suspend_to_idle(struct drm_i915_private *dev_priv) |
1548 | { | |
1549 | #if IS_ENABLED(CONFIG_ACPI_SLEEP) | |
1550 | if (acpi_target_system_state() < ACPI_STATE_S3) | |
1551 | return true; | |
1552 | #endif | |
1553 | return false; | |
1554 | } | |
ebc32824 | 1555 | |
5e365c39 | 1556 | static int i915_drm_suspend(struct drm_device *dev) |
ba8bbcf6 | 1557 | { |
fac5e23e | 1558 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1559 | struct pci_dev *pdev = dev_priv->drm.pdev; |
e5747e3a | 1560 | pci_power_t opregion_target_state; |
d5818938 | 1561 | int error; |
61caf87c | 1562 | |
b8efb17b ZR |
1563 | /* ignore lid events during suspend */ |
1564 | mutex_lock(&dev_priv->modeset_restore_lock); | |
1565 | dev_priv->modeset_restore = MODESET_SUSPENDED; | |
1566 | mutex_unlock(&dev_priv->modeset_restore_lock); | |
1567 | ||
1f814dac ID |
1568 | disable_rpm_wakeref_asserts(dev_priv); |
1569 | ||
c67a470b PZ |
1570 | /* We do a lot of poking in a lot of registers, make sure they work |
1571 | * properly. */ | |
da7e29bd | 1572 | intel_display_set_init_power(dev_priv, true); |
cb10799c | 1573 | |
5bcf719b DA |
1574 | drm_kms_helper_poll_disable(dev); |
1575 | ||
52a05c30 | 1576 | pci_save_state(pdev); |
ba8bbcf6 | 1577 | |
bf9e8429 | 1578 | error = i915_gem_suspend(dev_priv); |
d5818938 | 1579 | if (error) { |
52a05c30 | 1580 | dev_err(&pdev->dev, |
d5818938 | 1581 | "GEM idle failed, resume might fail\n"); |
1f814dac | 1582 | goto out; |
d5818938 | 1583 | } |
db1b76ca | 1584 | |
6b72d486 | 1585 | intel_display_suspend(dev); |
2eb5252e | 1586 | |
d5818938 | 1587 | intel_dp_mst_suspend(dev); |
7d708ee4 | 1588 | |
d5818938 DV |
1589 | intel_runtime_pm_disable_interrupts(dev_priv); |
1590 | intel_hpd_cancel_work(dev_priv); | |
09b64267 | 1591 | |
d5818938 | 1592 | intel_suspend_encoders(dev_priv); |
0e32b39c | 1593 | |
712bf364 | 1594 | intel_suspend_hw(dev_priv); |
5669fcac | 1595 | |
275a991c | 1596 | i915_gem_suspend_gtt_mappings(dev_priv); |
828c7908 | 1597 | |
af6dc742 | 1598 | i915_save_state(dev_priv); |
9e06dd39 | 1599 | |
bc87229f | 1600 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; |
6f9f4b7a | 1601 | intel_opregion_notify_adapter(dev_priv, opregion_target_state); |
e5747e3a | 1602 | |
68f60946 | 1603 | intel_uncore_suspend(dev_priv); |
03d92e47 | 1604 | intel_opregion_unregister(dev_priv); |
8ee1c3db | 1605 | |
82e3b8c1 | 1606 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); |
3fa016a0 | 1607 | |
62d5d69b MK |
1608 | dev_priv->suspend_count++; |
1609 | ||
f74ed08d | 1610 | intel_csr_ucode_suspend(dev_priv); |
f514c2d8 | 1611 | |
1f814dac ID |
1612 | out: |
1613 | enable_rpm_wakeref_asserts(dev_priv); | |
1614 | ||
1615 | return error; | |
84b79f8d RW |
1616 | } |
1617 | ||
c49d13ee | 1618 | static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) |
c3c09c95 | 1619 | { |
c49d13ee | 1620 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1621 | struct pci_dev *pdev = dev_priv->drm.pdev; |
bc87229f | 1622 | bool fw_csr; |
c3c09c95 ID |
1623 | int ret; |
1624 | ||
1f814dac ID |
1625 | disable_rpm_wakeref_asserts(dev_priv); |
1626 | ||
4c494a57 ID |
1627 | intel_display_set_init_power(dev_priv, false); |
1628 | ||
dd9f31c7 | 1629 | fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation && |
a7c8125f | 1630 | suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; |
bc87229f ID |
1631 | /* |
1632 | * In case of firmware assisted context save/restore don't manually | |
1633 | * deinit the power domains. This also means the CSR/DMC firmware will | |
1634 | * stay active, it will power down any HW resources as required and | |
1635 | * also enable deeper system power states that would be blocked if the | |
1636 | * firmware was inactive. | |
1637 | */ | |
1638 | if (!fw_csr) | |
1639 | intel_power_domains_suspend(dev_priv); | |
73dfc227 | 1640 | |
507e126e | 1641 | ret = 0; |
b9fd799e | 1642 | if (IS_GEN9_LP(dev_priv)) |
507e126e | 1643 | bxt_enable_dc9(dev_priv); |
b8aea3d1 | 1644 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
507e126e ID |
1645 | hsw_enable_pc8(dev_priv); |
1646 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | |
1647 | ret = vlv_suspend_complete(dev_priv); | |
c3c09c95 ID |
1648 | |
1649 | if (ret) { | |
1650 | DRM_ERROR("Suspend complete failed: %d\n", ret); | |
bc87229f ID |
1651 | if (!fw_csr) |
1652 | intel_power_domains_init_hw(dev_priv, true); | |
c3c09c95 | 1653 | |
1f814dac | 1654 | goto out; |
c3c09c95 ID |
1655 | } |
1656 | ||
52a05c30 | 1657 | pci_disable_device(pdev); |
ab3be73f | 1658 | /* |
54875571 | 1659 | * During hibernation on some platforms the BIOS may try to access |
ab3be73f ID |
1660 | * the device even though it's already in D3 and hang the machine. So |
1661 | * leave the device in D0 on those platforms and hope the BIOS will | |
54875571 ID |
1662 | * power down the device properly. The issue was seen on multiple old |
1663 | * GENs with different BIOS vendors, so having an explicit blacklist | |
1664 | * is inpractical; apply the workaround on everything pre GEN6. The | |
1665 | * platforms where the issue was seen: | |
1666 | * Lenovo Thinkpad X301, X61s, X60, T60, X41 | |
1667 | * Fujitsu FSC S7110 | |
1668 | * Acer Aspire 1830T | |
ab3be73f | 1669 | */ |
514e1d64 | 1670 | if (!(hibernation && INTEL_GEN(dev_priv) < 6)) |
52a05c30 | 1671 | pci_set_power_state(pdev, PCI_D3hot); |
c3c09c95 | 1672 | |
bc87229f ID |
1673 | dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); |
1674 | ||
1f814dac ID |
1675 | out: |
1676 | enable_rpm_wakeref_asserts(dev_priv); | |
1677 | ||
1678 | return ret; | |
c3c09c95 ID |
1679 | } |
1680 | ||
a9a251c2 | 1681 | static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) |
84b79f8d RW |
1682 | { |
1683 | int error; | |
1684 | ||
ded8b07d | 1685 | if (!dev) { |
84b79f8d RW |
1686 | DRM_ERROR("dev: %p\n", dev); |
1687 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); | |
1688 | return -ENODEV; | |
1689 | } | |
1690 | ||
0b14cbd2 ID |
1691 | if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && |
1692 | state.event != PM_EVENT_FREEZE)) | |
1693 | return -EINVAL; | |
5bcf719b DA |
1694 | |
1695 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
1696 | return 0; | |
6eecba33 | 1697 | |
5e365c39 | 1698 | error = i915_drm_suspend(dev); |
84b79f8d RW |
1699 | if (error) |
1700 | return error; | |
1701 | ||
ab3be73f | 1702 | return i915_drm_suspend_late(dev, false); |
ba8bbcf6 JB |
1703 | } |
1704 | ||
5e365c39 | 1705 | static int i915_drm_resume(struct drm_device *dev) |
76c4b250 | 1706 | { |
fac5e23e | 1707 | struct drm_i915_private *dev_priv = to_i915(dev); |
ac840ae5 | 1708 | int ret; |
9d49c0ef | 1709 | |
1f814dac | 1710 | disable_rpm_wakeref_asserts(dev_priv); |
abc80abd | 1711 | intel_sanitize_gt_powersave(dev_priv); |
1f814dac | 1712 | |
97d6d7ab | 1713 | ret = i915_ggtt_enable_hw(dev_priv); |
ac840ae5 VS |
1714 | if (ret) |
1715 | DRM_ERROR("failed to re-enable GGTT\n"); | |
1716 | ||
f74ed08d ID |
1717 | intel_csr_ucode_resume(dev_priv); |
1718 | ||
af6dc742 | 1719 | i915_restore_state(dev_priv); |
8090ba8c | 1720 | intel_pps_unlock_regs_wa(dev_priv); |
6f9f4b7a | 1721 | intel_opregion_setup(dev_priv); |
61caf87c | 1722 | |
c39055b0 | 1723 | intel_init_pch_refclk(dev_priv); |
1833b134 | 1724 | |
364aece0 PA |
1725 | /* |
1726 | * Interrupts have to be enabled before any batches are run. If not the | |
1727 | * GPU will hang. i915_gem_init_hw() will initiate batches to | |
1728 | * update/restore the context. | |
1729 | * | |
908764f6 ID |
1730 | * drm_mode_config_reset() needs AUX interrupts. |
1731 | * | |
364aece0 PA |
1732 | * Modeset enabling in intel_modeset_init_hw() also needs working |
1733 | * interrupts. | |
1734 | */ | |
1735 | intel_runtime_pm_enable_interrupts(dev_priv); | |
1736 | ||
908764f6 ID |
1737 | drm_mode_config_reset(dev); |
1738 | ||
37cd3300 | 1739 | i915_gem_resume(dev_priv); |
226485e9 | 1740 | |
d5818938 | 1741 | intel_modeset_init_hw(dev); |
675f7ff3 | 1742 | intel_init_clock_gating(dev_priv); |
24576d23 | 1743 | |
d5818938 DV |
1744 | spin_lock_irq(&dev_priv->irq_lock); |
1745 | if (dev_priv->display.hpd_irq_setup) | |
91d14251 | 1746 | dev_priv->display.hpd_irq_setup(dev_priv); |
d5818938 | 1747 | spin_unlock_irq(&dev_priv->irq_lock); |
0e32b39c | 1748 | |
d5818938 | 1749 | intel_dp_mst_resume(dev); |
e7d6f7d7 | 1750 | |
a16b7658 L |
1751 | intel_display_resume(dev); |
1752 | ||
e0b70061 L |
1753 | drm_kms_helper_poll_enable(dev); |
1754 | ||
d5818938 DV |
1755 | /* |
1756 | * ... but also need to make sure that hotplug processing | |
1757 | * doesn't cause havoc. Like in the driver load code we don't | |
1758 | * bother with the tiny race here where we might loose hotplug | |
1759 | * notifications. | |
1760 | * */ | |
1761 | intel_hpd_init(dev_priv); | |
1daed3fb | 1762 | |
03d92e47 | 1763 | intel_opregion_register(dev_priv); |
44834a67 | 1764 | |
82e3b8c1 | 1765 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); |
073f34d9 | 1766 | |
b8efb17b ZR |
1767 | mutex_lock(&dev_priv->modeset_restore_lock); |
1768 | dev_priv->modeset_restore = MODESET_DONE; | |
1769 | mutex_unlock(&dev_priv->modeset_restore_lock); | |
8a187455 | 1770 | |
6f9f4b7a | 1771 | intel_opregion_notify_adapter(dev_priv, PCI_D0); |
e5747e3a | 1772 | |
1f814dac ID |
1773 | enable_rpm_wakeref_asserts(dev_priv); |
1774 | ||
074c6ada | 1775 | return 0; |
84b79f8d RW |
1776 | } |
1777 | ||
5e365c39 | 1778 | static int i915_drm_resume_early(struct drm_device *dev) |
84b79f8d | 1779 | { |
fac5e23e | 1780 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1781 | struct pci_dev *pdev = dev_priv->drm.pdev; |
44410cd0 | 1782 | int ret; |
36d61e67 | 1783 | |
76c4b250 ID |
1784 | /* |
1785 | * We have a resume ordering issue with the snd-hda driver also | |
1786 | * requiring our device to be power up. Due to the lack of a | |
1787 | * parent/child relationship we currently solve this with an early | |
1788 | * resume hook. | |
1789 | * | |
1790 | * FIXME: This should be solved with a special hdmi sink device or | |
1791 | * similar so that power domains can be employed. | |
1792 | */ | |
44410cd0 ID |
1793 | |
1794 | /* | |
1795 | * Note that we need to set the power state explicitly, since we | |
1796 | * powered off the device during freeze and the PCI core won't power | |
1797 | * it back up for us during thaw. Powering off the device during | |
1798 | * freeze is not a hard requirement though, and during the | |
1799 | * suspend/resume phases the PCI core makes sure we get here with the | |
1800 | * device powered on. So in case we change our freeze logic and keep | |
1801 | * the device powered we can also remove the following set power state | |
1802 | * call. | |
1803 | */ | |
52a05c30 | 1804 | ret = pci_set_power_state(pdev, PCI_D0); |
44410cd0 ID |
1805 | if (ret) { |
1806 | DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); | |
1807 | goto out; | |
1808 | } | |
1809 | ||
1810 | /* | |
1811 | * Note that pci_enable_device() first enables any parent bridge | |
1812 | * device and only then sets the power state for this device. The | |
1813 | * bridge enabling is a nop though, since bridge devices are resumed | |
1814 | * first. The order of enabling power and enabling the device is | |
1815 | * imposed by the PCI core as described above, so here we preserve the | |
1816 | * same order for the freeze/thaw phases. | |
1817 | * | |
1818 | * TODO: eventually we should remove pci_disable_device() / | |
1819 | * pci_enable_enable_device() from suspend/resume. Due to how they | |
1820 | * depend on the device enable refcount we can't anyway depend on them | |
1821 | * disabling/enabling the device. | |
1822 | */ | |
52a05c30 | 1823 | if (pci_enable_device(pdev)) { |
bc87229f ID |
1824 | ret = -EIO; |
1825 | goto out; | |
1826 | } | |
84b79f8d | 1827 | |
52a05c30 | 1828 | pci_set_master(pdev); |
84b79f8d | 1829 | |
1f814dac ID |
1830 | disable_rpm_wakeref_asserts(dev_priv); |
1831 | ||
666a4537 | 1832 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
1a5df187 | 1833 | ret = vlv_resume_prepare(dev_priv, false); |
36d61e67 | 1834 | if (ret) |
ff0b187f DL |
1835 | DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", |
1836 | ret); | |
36d61e67 | 1837 | |
68f60946 | 1838 | intel_uncore_resume_early(dev_priv); |
efee833a | 1839 | |
b9fd799e | 1840 | if (IS_GEN9_LP(dev_priv)) { |
da2f41d1 ID |
1841 | if (!dev_priv->suspended_to_idle) |
1842 | gen9_sanitize_dc_state(dev_priv); | |
507e126e | 1843 | bxt_disable_dc9(dev_priv); |
da2f41d1 | 1844 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
a9a6b73a | 1845 | hsw_disable_pc8(dev_priv); |
da2f41d1 | 1846 | } |
efee833a | 1847 | |
dc97997a | 1848 | intel_uncore_sanitize(dev_priv); |
bc87229f | 1849 | |
b9fd799e | 1850 | if (IS_GEN9_LP(dev_priv) || |
a7c8125f | 1851 | !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) |
bc87229f | 1852 | intel_power_domains_init_hw(dev_priv, true); |
ac25dfed ML |
1853 | else |
1854 | intel_display_set_init_power(dev_priv, true); | |
bc87229f | 1855 | |
24145517 CW |
1856 | i915_gem_sanitize(dev_priv); |
1857 | ||
6e35e8ab ID |
1858 | enable_rpm_wakeref_asserts(dev_priv); |
1859 | ||
bc87229f ID |
1860 | out: |
1861 | dev_priv->suspended_to_idle = false; | |
36d61e67 ID |
1862 | |
1863 | return ret; | |
76c4b250 ID |
1864 | } |
1865 | ||
7f26cb88 | 1866 | static int i915_resume_switcheroo(struct drm_device *dev) |
76c4b250 | 1867 | { |
50a0072f | 1868 | int ret; |
76c4b250 | 1869 | |
097dd837 ID |
1870 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1871 | return 0; | |
1872 | ||
5e365c39 | 1873 | ret = i915_drm_resume_early(dev); |
50a0072f ID |
1874 | if (ret) |
1875 | return ret; | |
1876 | ||
5a17514e ID |
1877 | return i915_drm_resume(dev); |
1878 | } | |
1879 | ||
11ed50ec | 1880 | /** |
f3953dcb | 1881 | * i915_reset - reset chip after a hang |
535275d3 CW |
1882 | * @i915: #drm_i915_private to reset |
1883 | * @flags: Instructions | |
11ed50ec | 1884 | * |
780f262a CW |
1885 | * Reset the chip. Useful if a hang is detected. Marks the device as wedged |
1886 | * on failure. | |
11ed50ec | 1887 | * |
221fe799 CW |
1888 | * Caller must hold the struct_mutex. |
1889 | * | |
11ed50ec BG |
1890 | * Procedure is fairly simple: |
1891 | * - reset the chip using the reset reg | |
1892 | * - re-init context state | |
1893 | * - re-init hardware status page | |
1894 | * - re-init ring buffer | |
1895 | * - re-init interrupt state | |
1896 | * - re-init display | |
1897 | */ | |
535275d3 | 1898 | void i915_reset(struct drm_i915_private *i915, unsigned int flags) |
11ed50ec | 1899 | { |
535275d3 | 1900 | struct i915_gpu_error *error = &i915->gpu_error; |
0573ed4a | 1901 | int ret; |
f7096d40 | 1902 | int i; |
11ed50ec | 1903 | |
f7096d40 | 1904 | might_sleep(); |
535275d3 | 1905 | lockdep_assert_held(&i915->drm.struct_mutex); |
8c185eca | 1906 | GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); |
221fe799 | 1907 | |
8c185eca | 1908 | if (!test_bit(I915_RESET_HANDOFF, &error->flags)) |
780f262a | 1909 | return; |
11ed50ec | 1910 | |
d98c52cf | 1911 | /* Clear any previous failed attempts at recovery. Time to try again. */ |
535275d3 | 1912 | if (!i915_gem_unset_wedged(i915)) |
2e8f9d32 CW |
1913 | goto wakeup; |
1914 | ||
535275d3 CW |
1915 | if (!(flags & I915_RESET_QUIET)) |
1916 | dev_notice(i915->drm.dev, "Resetting chip after gpu hang\n"); | |
8af29b0c | 1917 | error->reset_count++; |
d98c52cf | 1918 | |
535275d3 CW |
1919 | disable_irq(i915->drm.irq); |
1920 | ret = i915_gem_reset_prepare(i915); | |
0e178aef | 1921 | if (ret) { |
107783d0 | 1922 | dev_err(i915->drm.dev, "GPU recovery failed\n"); |
535275d3 | 1923 | intel_gpu_reset(i915, ALL_ENGINES); |
107783d0 | 1924 | goto taint; |
0e178aef | 1925 | } |
9e60ab03 | 1926 | |
f7096d40 | 1927 | if (!intel_has_gpu_reset(i915)) { |
3ef98f50 CW |
1928 | if (i915_modparams.reset) |
1929 | dev_err(i915->drm.dev, "GPU reset not supported\n"); | |
1930 | else | |
1931 | DRM_DEBUG_DRIVER("GPU reset disabled\n"); | |
f7096d40 CW |
1932 | goto error; |
1933 | } | |
1934 | ||
1935 | for (i = 0; i < 3; i++) { | |
1936 | ret = intel_gpu_reset(i915, ALL_ENGINES); | |
1937 | if (ret == 0) | |
1938 | break; | |
1939 | ||
1940 | msleep(100); | |
1941 | } | |
0573ed4a | 1942 | if (ret) { |
f7096d40 | 1943 | dev_err(i915->drm.dev, "Failed to reset chip\n"); |
107783d0 | 1944 | goto taint; |
11ed50ec BG |
1945 | } |
1946 | ||
1947 | /* Ok, now get things going again... */ | |
1948 | ||
1949 | /* | |
1950 | * Everything depends on having the GTT running, so we need to start | |
0db8c961 CW |
1951 | * there. |
1952 | */ | |
1953 | ret = i915_ggtt_enable_hw(i915); | |
1954 | if (ret) { | |
8177e112 CW |
1955 | DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n", |
1956 | ret); | |
0db8c961 CW |
1957 | goto error; |
1958 | } | |
1959 | ||
a31d73c3 CW |
1960 | i915_gem_reset(i915); |
1961 | intel_overlay_reset(i915); | |
1962 | ||
0db8c961 | 1963 | /* |
11ed50ec BG |
1964 | * Next we need to restore the context, but we don't use those |
1965 | * yet either... | |
1966 | * | |
1967 | * Ring buffer needs to be re-initialized in the KMS case, or if X | |
1968 | * was running at the time of the reset (i.e. we weren't VT | |
1969 | * switched away). | |
1970 | */ | |
535275d3 | 1971 | ret = i915_gem_init_hw(i915); |
33d30a9c | 1972 | if (ret) { |
8177e112 CW |
1973 | DRM_ERROR("Failed to initialise HW following reset (%d)\n", |
1974 | ret); | |
d98c52cf | 1975 | goto error; |
11ed50ec BG |
1976 | } |
1977 | ||
535275d3 | 1978 | i915_queue_hangcheck(i915); |
c2a126a4 | 1979 | |
2e8f9d32 | 1980 | finish: |
535275d3 CW |
1981 | i915_gem_reset_finish(i915); |
1982 | enable_irq(i915->drm.irq); | |
8c185eca | 1983 | |
2e8f9d32 | 1984 | wakeup: |
8c185eca CW |
1985 | clear_bit(I915_RESET_HANDOFF, &error->flags); |
1986 | wake_up_bit(&error->flags, I915_RESET_HANDOFF); | |
780f262a | 1987 | return; |
d98c52cf | 1988 | |
107783d0 CW |
1989 | taint: |
1990 | /* | |
1991 | * History tells us that if we cannot reset the GPU now, we | |
1992 | * never will. This then impacts everything that is run | |
1993 | * subsequently. On failing the reset, we mark the driver | |
1994 | * as wedged, preventing further execution on the GPU. | |
1995 | * We also want to go one step further and add a taint to the | |
1996 | * kernel so that any subsequent faults can be traced back to | |
1997 | * this failure. This is important for CI, where if the | |
1998 | * GPU/driver fails we would like to reboot and restart testing | |
1999 | * rather than continue on into oblivion. For everyone else, | |
2000 | * the system should still plod along, but they have been warned! | |
2001 | */ | |
2002 | add_taint(TAINT_WARN, LOCKDEP_STILL_OK); | |
d98c52cf | 2003 | error: |
535275d3 CW |
2004 | i915_gem_set_wedged(i915); |
2005 | i915_gem_retire_requests(i915); | |
2e8f9d32 | 2006 | goto finish; |
11ed50ec BG |
2007 | } |
2008 | ||
6acbea89 MT |
2009 | static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv, |
2010 | struct intel_engine_cs *engine) | |
2011 | { | |
2012 | return intel_gpu_reset(dev_priv, intel_engine_flag(engine)); | |
2013 | } | |
2014 | ||
142bc7d9 MT |
2015 | /** |
2016 | * i915_reset_engine - reset GPU engine to recover from a hang | |
2017 | * @engine: engine to reset | |
535275d3 | 2018 | * @flags: options |
142bc7d9 MT |
2019 | * |
2020 | * Reset a specific GPU engine. Useful if a hang is detected. | |
2021 | * Returns zero on successful reset or otherwise an error code. | |
a1ef70e1 MT |
2022 | * |
2023 | * Procedure is: | |
2024 | * - identifies the request that caused the hang and it is dropped | |
2025 | * - reset engine (which will force the engine to idle) | |
2026 | * - re-init/configure engine | |
142bc7d9 | 2027 | */ |
535275d3 | 2028 | int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags) |
142bc7d9 | 2029 | { |
a1ef70e1 MT |
2030 | struct i915_gpu_error *error = &engine->i915->gpu_error; |
2031 | struct drm_i915_gem_request *active_request; | |
2032 | int ret; | |
2033 | ||
2034 | GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); | |
2035 | ||
f6ba181a CW |
2036 | active_request = i915_gem_reset_prepare_engine(engine); |
2037 | if (IS_ERR_OR_NULL(active_request)) { | |
2038 | /* Either the previous reset failed, or we pardon the reset. */ | |
2039 | ret = PTR_ERR(active_request); | |
2040 | goto out; | |
2041 | } | |
2042 | ||
535275d3 CW |
2043 | if (!(flags & I915_RESET_QUIET)) { |
2044 | dev_notice(engine->i915->drm.dev, | |
2045 | "Resetting %s after gpu hang\n", engine->name); | |
2046 | } | |
7367612f | 2047 | error->reset_engine_count[engine->id]++; |
a1ef70e1 | 2048 | |
6acbea89 MT |
2049 | if (!engine->i915->guc.execbuf_client) |
2050 | ret = intel_gt_reset_engine(engine->i915, engine); | |
2051 | else | |
2052 | ret = intel_guc_reset_engine(&engine->i915->guc, engine); | |
0364cd19 CW |
2053 | if (ret) { |
2054 | /* If we fail here, we expect to fallback to a global reset */ | |
6acbea89 MT |
2055 | DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n", |
2056 | engine->i915->guc.execbuf_client ? "GuC " : "", | |
0364cd19 CW |
2057 | engine->name, ret); |
2058 | goto out; | |
2059 | } | |
b4f3e163 | 2060 | |
a1ef70e1 MT |
2061 | /* |
2062 | * The request that caused the hang is stuck on elsp, we know the | |
2063 | * active request and can drop it, adjust head to skip the offending | |
2064 | * request to resume executing remaining requests in the queue. | |
2065 | */ | |
2066 | i915_gem_reset_engine(engine, active_request); | |
2067 | ||
a1ef70e1 MT |
2068 | /* |
2069 | * The engine and its registers (and workarounds in case of render) | |
2070 | * have been reset to their default values. Follow the init_ring | |
2071 | * process to program RING_MODE, HWSP and re-enable submission. | |
2072 | */ | |
2073 | ret = engine->init_hw(engine); | |
702c8f8e MT |
2074 | if (ret) |
2075 | goto out; | |
a1ef70e1 MT |
2076 | |
2077 | out: | |
0364cd19 | 2078 | i915_gem_reset_finish_engine(engine); |
a1ef70e1 | 2079 | return ret; |
142bc7d9 MT |
2080 | } |
2081 | ||
c49d13ee | 2082 | static int i915_pm_suspend(struct device *kdev) |
112b715e | 2083 | { |
c49d13ee DW |
2084 | struct pci_dev *pdev = to_pci_dev(kdev); |
2085 | struct drm_device *dev = pci_get_drvdata(pdev); | |
112b715e | 2086 | |
c49d13ee DW |
2087 | if (!dev) { |
2088 | dev_err(kdev, "DRM not initialized, aborting suspend.\n"); | |
84b79f8d RW |
2089 | return -ENODEV; |
2090 | } | |
112b715e | 2091 | |
c49d13ee | 2092 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
5bcf719b DA |
2093 | return 0; |
2094 | ||
c49d13ee | 2095 | return i915_drm_suspend(dev); |
76c4b250 ID |
2096 | } |
2097 | ||
c49d13ee | 2098 | static int i915_pm_suspend_late(struct device *kdev) |
76c4b250 | 2099 | { |
c49d13ee | 2100 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
76c4b250 ID |
2101 | |
2102 | /* | |
c965d995 | 2103 | * We have a suspend ordering issue with the snd-hda driver also |
76c4b250 ID |
2104 | * requiring our device to be power up. Due to the lack of a |
2105 | * parent/child relationship we currently solve this with an late | |
2106 | * suspend hook. | |
2107 | * | |
2108 | * FIXME: This should be solved with a special hdmi sink device or | |
2109 | * similar so that power domains can be employed. | |
2110 | */ | |
c49d13ee | 2111 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
76c4b250 | 2112 | return 0; |
112b715e | 2113 | |
c49d13ee | 2114 | return i915_drm_suspend_late(dev, false); |
ab3be73f ID |
2115 | } |
2116 | ||
c49d13ee | 2117 | static int i915_pm_poweroff_late(struct device *kdev) |
ab3be73f | 2118 | { |
c49d13ee | 2119 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
ab3be73f | 2120 | |
c49d13ee | 2121 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
ab3be73f ID |
2122 | return 0; |
2123 | ||
c49d13ee | 2124 | return i915_drm_suspend_late(dev, true); |
cbda12d7 ZW |
2125 | } |
2126 | ||
c49d13ee | 2127 | static int i915_pm_resume_early(struct device *kdev) |
76c4b250 | 2128 | { |
c49d13ee | 2129 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
76c4b250 | 2130 | |
c49d13ee | 2131 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
097dd837 ID |
2132 | return 0; |
2133 | ||
c49d13ee | 2134 | return i915_drm_resume_early(dev); |
76c4b250 ID |
2135 | } |
2136 | ||
c49d13ee | 2137 | static int i915_pm_resume(struct device *kdev) |
cbda12d7 | 2138 | { |
c49d13ee | 2139 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
84b79f8d | 2140 | |
c49d13ee | 2141 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
097dd837 ID |
2142 | return 0; |
2143 | ||
c49d13ee | 2144 | return i915_drm_resume(dev); |
cbda12d7 ZW |
2145 | } |
2146 | ||
1f19ac2a | 2147 | /* freeze: before creating the hibernation_image */ |
c49d13ee | 2148 | static int i915_pm_freeze(struct device *kdev) |
1f19ac2a | 2149 | { |
dd9f31c7 | 2150 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
6a800eab CW |
2151 | int ret; |
2152 | ||
dd9f31c7 ID |
2153 | if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) { |
2154 | ret = i915_drm_suspend(dev); | |
2155 | if (ret) | |
2156 | return ret; | |
2157 | } | |
6a800eab CW |
2158 | |
2159 | ret = i915_gem_freeze(kdev_to_i915(kdev)); | |
2160 | if (ret) | |
2161 | return ret; | |
2162 | ||
2163 | return 0; | |
1f19ac2a CW |
2164 | } |
2165 | ||
c49d13ee | 2166 | static int i915_pm_freeze_late(struct device *kdev) |
1f19ac2a | 2167 | { |
dd9f31c7 | 2168 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
461fb99c CW |
2169 | int ret; |
2170 | ||
dd9f31c7 ID |
2171 | if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) { |
2172 | ret = i915_drm_suspend_late(dev, true); | |
2173 | if (ret) | |
2174 | return ret; | |
2175 | } | |
461fb99c | 2176 | |
c49d13ee | 2177 | ret = i915_gem_freeze_late(kdev_to_i915(kdev)); |
461fb99c CW |
2178 | if (ret) |
2179 | return ret; | |
2180 | ||
2181 | return 0; | |
1f19ac2a CW |
2182 | } |
2183 | ||
2184 | /* thaw: called after creating the hibernation image, but before turning off. */ | |
c49d13ee | 2185 | static int i915_pm_thaw_early(struct device *kdev) |
1f19ac2a | 2186 | { |
c49d13ee | 2187 | return i915_pm_resume_early(kdev); |
1f19ac2a CW |
2188 | } |
2189 | ||
c49d13ee | 2190 | static int i915_pm_thaw(struct device *kdev) |
1f19ac2a | 2191 | { |
c49d13ee | 2192 | return i915_pm_resume(kdev); |
1f19ac2a CW |
2193 | } |
2194 | ||
2195 | /* restore: called after loading the hibernation image. */ | |
c49d13ee | 2196 | static int i915_pm_restore_early(struct device *kdev) |
1f19ac2a | 2197 | { |
c49d13ee | 2198 | return i915_pm_resume_early(kdev); |
1f19ac2a CW |
2199 | } |
2200 | ||
c49d13ee | 2201 | static int i915_pm_restore(struct device *kdev) |
1f19ac2a | 2202 | { |
c49d13ee | 2203 | return i915_pm_resume(kdev); |
1f19ac2a CW |
2204 | } |
2205 | ||
ddeea5b0 ID |
2206 | /* |
2207 | * Save all Gunit registers that may be lost after a D3 and a subsequent | |
2208 | * S0i[R123] transition. The list of registers needing a save/restore is | |
2209 | * defined in the VLV2_S0IXRegs document. This documents marks all Gunit | |
2210 | * registers in the following way: | |
2211 | * - Driver: saved/restored by the driver | |
2212 | * - Punit : saved/restored by the Punit firmware | |
2213 | * - No, w/o marking: no need to save/restore, since the register is R/O or | |
2214 | * used internally by the HW in a way that doesn't depend | |
2215 | * keeping the content across a suspend/resume. | |
2216 | * - Debug : used for debugging | |
2217 | * | |
2218 | * We save/restore all registers marked with 'Driver', with the following | |
2219 | * exceptions: | |
2220 | * - Registers out of use, including also registers marked with 'Debug'. | |
2221 | * These have no effect on the driver's operation, so we don't save/restore | |
2222 | * them to reduce the overhead. | |
2223 | * - Registers that are fully setup by an initialization function called from | |
2224 | * the resume path. For example many clock gating and RPS/RC6 registers. | |
2225 | * - Registers that provide the right functionality with their reset defaults. | |
2226 | * | |
2227 | * TODO: Except for registers that based on the above 3 criteria can be safely | |
2228 | * ignored, we save/restore all others, practically treating the HW context as | |
2229 | * a black-box for the driver. Further investigation is needed to reduce the | |
2230 | * saved/restored registers even further, by following the same 3 criteria. | |
2231 | */ | |
2232 | static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |
2233 | { | |
2234 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; | |
2235 | int i; | |
2236 | ||
2237 | /* GAM 0x4000-0x4770 */ | |
2238 | s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); | |
2239 | s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); | |
2240 | s->arb_mode = I915_READ(ARB_MODE); | |
2241 | s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); | |
2242 | s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); | |
2243 | ||
2244 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) | |
22dfe79f | 2245 | s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); |
ddeea5b0 ID |
2246 | |
2247 | s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); | |
b5f1c97f | 2248 | s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); |
ddeea5b0 ID |
2249 | |
2250 | s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); | |
2251 | s->ecochk = I915_READ(GAM_ECOCHK); | |
2252 | s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); | |
2253 | s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); | |
2254 | ||
2255 | s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); | |
2256 | ||
2257 | /* MBC 0x9024-0x91D0, 0x8500 */ | |
2258 | s->g3dctl = I915_READ(VLV_G3DCTL); | |
2259 | s->gsckgctl = I915_READ(VLV_GSCKGCTL); | |
2260 | s->mbctl = I915_READ(GEN6_MBCTL); | |
2261 | ||
2262 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ | |
2263 | s->ucgctl1 = I915_READ(GEN6_UCGCTL1); | |
2264 | s->ucgctl3 = I915_READ(GEN6_UCGCTL3); | |
2265 | s->rcgctl1 = I915_READ(GEN6_RCGCTL1); | |
2266 | s->rcgctl2 = I915_READ(GEN6_RCGCTL2); | |
2267 | s->rstctl = I915_READ(GEN6_RSTCTL); | |
2268 | s->misccpctl = I915_READ(GEN7_MISCCPCTL); | |
2269 | ||
2270 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ | |
2271 | s->gfxpause = I915_READ(GEN6_GFXPAUSE); | |
2272 | s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); | |
2273 | s->rpdeuc = I915_READ(GEN6_RPDEUC); | |
2274 | s->ecobus = I915_READ(ECOBUS); | |
2275 | s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); | |
2276 | s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); | |
2277 | s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); | |
2278 | s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); | |
2279 | s->rcedata = I915_READ(VLV_RCEDATA); | |
2280 | s->spare2gh = I915_READ(VLV_SPAREG2H); | |
2281 | ||
2282 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ | |
2283 | s->gt_imr = I915_READ(GTIMR); | |
2284 | s->gt_ier = I915_READ(GTIER); | |
2285 | s->pm_imr = I915_READ(GEN6_PMIMR); | |
2286 | s->pm_ier = I915_READ(GEN6_PMIER); | |
2287 | ||
2288 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) | |
22dfe79f | 2289 | s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); |
ddeea5b0 ID |
2290 | |
2291 | /* GT SA CZ domain, 0x100000-0x138124 */ | |
2292 | s->tilectl = I915_READ(TILECTL); | |
2293 | s->gt_fifoctl = I915_READ(GTFIFOCTL); | |
2294 | s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2295 | s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
2296 | s->pmwgicz = I915_READ(VLV_PMWGICZ); | |
2297 | ||
2298 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | |
2299 | s->gu_ctl0 = I915_READ(VLV_GU_CTL0); | |
2300 | s->gu_ctl1 = I915_READ(VLV_GU_CTL1); | |
9c25210f | 2301 | s->pcbr = I915_READ(VLV_PCBR); |
ddeea5b0 ID |
2302 | s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); |
2303 | ||
2304 | /* | |
2305 | * Not saving any of: | |
2306 | * DFT, 0x9800-0x9EC0 | |
2307 | * SARB, 0xB000-0xB1FC | |
2308 | * GAC, 0x5208-0x524C, 0x14000-0x14C000 | |
2309 | * PCI CFG | |
2310 | */ | |
2311 | } | |
2312 | ||
2313 | static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |
2314 | { | |
2315 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; | |
2316 | u32 val; | |
2317 | int i; | |
2318 | ||
2319 | /* GAM 0x4000-0x4770 */ | |
2320 | I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); | |
2321 | I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); | |
2322 | I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); | |
2323 | I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); | |
2324 | I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); | |
2325 | ||
2326 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) | |
22dfe79f | 2327 | I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); |
ddeea5b0 ID |
2328 | |
2329 | I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); | |
b5f1c97f | 2330 | I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); |
ddeea5b0 ID |
2331 | |
2332 | I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); | |
2333 | I915_WRITE(GAM_ECOCHK, s->ecochk); | |
2334 | I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); | |
2335 | I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); | |
2336 | ||
2337 | I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); | |
2338 | ||
2339 | /* MBC 0x9024-0x91D0, 0x8500 */ | |
2340 | I915_WRITE(VLV_G3DCTL, s->g3dctl); | |
2341 | I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); | |
2342 | I915_WRITE(GEN6_MBCTL, s->mbctl); | |
2343 | ||
2344 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ | |
2345 | I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); | |
2346 | I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); | |
2347 | I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); | |
2348 | I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); | |
2349 | I915_WRITE(GEN6_RSTCTL, s->rstctl); | |
2350 | I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); | |
2351 | ||
2352 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ | |
2353 | I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); | |
2354 | I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); | |
2355 | I915_WRITE(GEN6_RPDEUC, s->rpdeuc); | |
2356 | I915_WRITE(ECOBUS, s->ecobus); | |
2357 | I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); | |
2358 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); | |
2359 | I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); | |
2360 | I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); | |
2361 | I915_WRITE(VLV_RCEDATA, s->rcedata); | |
2362 | I915_WRITE(VLV_SPAREG2H, s->spare2gh); | |
2363 | ||
2364 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ | |
2365 | I915_WRITE(GTIMR, s->gt_imr); | |
2366 | I915_WRITE(GTIER, s->gt_ier); | |
2367 | I915_WRITE(GEN6_PMIMR, s->pm_imr); | |
2368 | I915_WRITE(GEN6_PMIER, s->pm_ier); | |
2369 | ||
2370 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) | |
22dfe79f | 2371 | I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); |
ddeea5b0 ID |
2372 | |
2373 | /* GT SA CZ domain, 0x100000-0x138124 */ | |
2374 | I915_WRITE(TILECTL, s->tilectl); | |
2375 | I915_WRITE(GTFIFOCTL, s->gt_fifoctl); | |
2376 | /* | |
2377 | * Preserve the GT allow wake and GFX force clock bit, they are not | |
2378 | * be restored, as they are used to control the s0ix suspend/resume | |
2379 | * sequence by the caller. | |
2380 | */ | |
2381 | val = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2382 | val &= VLV_GTLC_ALLOWWAKEREQ; | |
2383 | val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; | |
2384 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); | |
2385 | ||
2386 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | |
2387 | val &= VLV_GFX_CLK_FORCE_ON_BIT; | |
2388 | val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; | |
2389 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); | |
2390 | ||
2391 | I915_WRITE(VLV_PMWGICZ, s->pmwgicz); | |
2392 | ||
2393 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | |
2394 | I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); | |
2395 | I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); | |
9c25210f | 2396 | I915_WRITE(VLV_PCBR, s->pcbr); |
ddeea5b0 ID |
2397 | I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); |
2398 | } | |
2399 | ||
3dd14c04 CW |
2400 | static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv, |
2401 | u32 mask, u32 val) | |
2402 | { | |
2403 | /* The HW does not like us polling for PW_STATUS frequently, so | |
2404 | * use the sleeping loop rather than risk the busy spin within | |
2405 | * intel_wait_for_register(). | |
2406 | * | |
2407 | * Transitioning between RC6 states should be at most 2ms (see | |
2408 | * valleyview_enable_rps) so use a 3ms timeout. | |
2409 | */ | |
2410 | return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val, | |
2411 | 3); | |
2412 | } | |
2413 | ||
650ad970 ID |
2414 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) |
2415 | { | |
2416 | u32 val; | |
2417 | int err; | |
2418 | ||
650ad970 ID |
2419 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
2420 | val &= ~VLV_GFX_CLK_FORCE_ON_BIT; | |
2421 | if (force_on) | |
2422 | val |= VLV_GFX_CLK_FORCE_ON_BIT; | |
2423 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); | |
2424 | ||
2425 | if (!force_on) | |
2426 | return 0; | |
2427 | ||
c6ddc5f3 CW |
2428 | err = intel_wait_for_register(dev_priv, |
2429 | VLV_GTLC_SURVIVABILITY_REG, | |
2430 | VLV_GFX_CLK_STATUS_BIT, | |
2431 | VLV_GFX_CLK_STATUS_BIT, | |
2432 | 20); | |
650ad970 ID |
2433 | if (err) |
2434 | DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", | |
2435 | I915_READ(VLV_GTLC_SURVIVABILITY_REG)); | |
2436 | ||
2437 | return err; | |
650ad970 ID |
2438 | } |
2439 | ||
ddeea5b0 ID |
2440 | static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) |
2441 | { | |
3dd14c04 | 2442 | u32 mask; |
ddeea5b0 | 2443 | u32 val; |
3dd14c04 | 2444 | int err; |
ddeea5b0 ID |
2445 | |
2446 | val = I915_READ(VLV_GTLC_WAKE_CTRL); | |
2447 | val &= ~VLV_GTLC_ALLOWWAKEREQ; | |
2448 | if (allow) | |
2449 | val |= VLV_GTLC_ALLOWWAKEREQ; | |
2450 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); | |
2451 | POSTING_READ(VLV_GTLC_WAKE_CTRL); | |
2452 | ||
3dd14c04 CW |
2453 | mask = VLV_GTLC_ALLOWWAKEACK; |
2454 | val = allow ? mask : 0; | |
2455 | ||
2456 | err = vlv_wait_for_pw_status(dev_priv, mask, val); | |
ddeea5b0 ID |
2457 | if (err) |
2458 | DRM_ERROR("timeout disabling GT waking\n"); | |
b2736695 | 2459 | |
ddeea5b0 | 2460 | return err; |
ddeea5b0 ID |
2461 | } |
2462 | ||
3dd14c04 CW |
2463 | static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, |
2464 | bool wait_for_on) | |
ddeea5b0 ID |
2465 | { |
2466 | u32 mask; | |
2467 | u32 val; | |
ddeea5b0 ID |
2468 | |
2469 | mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; | |
2470 | val = wait_for_on ? mask : 0; | |
ddeea5b0 ID |
2471 | |
2472 | /* | |
2473 | * RC6 transitioning can be delayed up to 2 msec (see | |
2474 | * valleyview_enable_rps), use 3 msec for safety. | |
2475 | */ | |
3dd14c04 | 2476 | if (vlv_wait_for_pw_status(dev_priv, mask, val)) |
ddeea5b0 | 2477 | DRM_ERROR("timeout waiting for GT wells to go %s\n", |
87ad3212 | 2478 | onoff(wait_for_on)); |
ddeea5b0 ID |
2479 | } |
2480 | ||
2481 | static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) | |
2482 | { | |
2483 | if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) | |
2484 | return; | |
2485 | ||
6fa283b0 | 2486 | DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); |
ddeea5b0 ID |
2487 | I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); |
2488 | } | |
2489 | ||
ebc32824 | 2490 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv) |
ddeea5b0 ID |
2491 | { |
2492 | u32 mask; | |
2493 | int err; | |
2494 | ||
2495 | /* | |
2496 | * Bspec defines the following GT well on flags as debug only, so | |
2497 | * don't treat them as hard failures. | |
2498 | */ | |
3dd14c04 | 2499 | vlv_wait_for_gt_wells(dev_priv, false); |
ddeea5b0 ID |
2500 | |
2501 | mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; | |
2502 | WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); | |
2503 | ||
2504 | vlv_check_no_gt_access(dev_priv); | |
2505 | ||
2506 | err = vlv_force_gfx_clock(dev_priv, true); | |
2507 | if (err) | |
2508 | goto err1; | |
2509 | ||
2510 | err = vlv_allow_gt_wake(dev_priv, false); | |
2511 | if (err) | |
2512 | goto err2; | |
98711167 | 2513 | |
2d1fe073 | 2514 | if (!IS_CHERRYVIEW(dev_priv)) |
98711167 | 2515 | vlv_save_gunit_s0ix_state(dev_priv); |
ddeea5b0 ID |
2516 | |
2517 | err = vlv_force_gfx_clock(dev_priv, false); | |
2518 | if (err) | |
2519 | goto err2; | |
2520 | ||
2521 | return 0; | |
2522 | ||
2523 | err2: | |
2524 | /* For safety always re-enable waking and disable gfx clock forcing */ | |
2525 | vlv_allow_gt_wake(dev_priv, true); | |
2526 | err1: | |
2527 | vlv_force_gfx_clock(dev_priv, false); | |
2528 | ||
2529 | return err; | |
2530 | } | |
2531 | ||
016970be SK |
2532 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
2533 | bool rpm_resume) | |
ddeea5b0 | 2534 | { |
ddeea5b0 ID |
2535 | int err; |
2536 | int ret; | |
2537 | ||
2538 | /* | |
2539 | * If any of the steps fail just try to continue, that's the best we | |
2540 | * can do at this point. Return the first error code (which will also | |
2541 | * leave RPM permanently disabled). | |
2542 | */ | |
2543 | ret = vlv_force_gfx_clock(dev_priv, true); | |
2544 | ||
2d1fe073 | 2545 | if (!IS_CHERRYVIEW(dev_priv)) |
98711167 | 2546 | vlv_restore_gunit_s0ix_state(dev_priv); |
ddeea5b0 ID |
2547 | |
2548 | err = vlv_allow_gt_wake(dev_priv, true); | |
2549 | if (!ret) | |
2550 | ret = err; | |
2551 | ||
2552 | err = vlv_force_gfx_clock(dev_priv, false); | |
2553 | if (!ret) | |
2554 | ret = err; | |
2555 | ||
2556 | vlv_check_no_gt_access(dev_priv); | |
2557 | ||
7c108fd8 | 2558 | if (rpm_resume) |
46f16e63 | 2559 | intel_init_clock_gating(dev_priv); |
ddeea5b0 ID |
2560 | |
2561 | return ret; | |
2562 | } | |
2563 | ||
c49d13ee | 2564 | static int intel_runtime_suspend(struct device *kdev) |
8a187455 | 2565 | { |
c49d13ee | 2566 | struct pci_dev *pdev = to_pci_dev(kdev); |
8a187455 | 2567 | struct drm_device *dev = pci_get_drvdata(pdev); |
fac5e23e | 2568 | struct drm_i915_private *dev_priv = to_i915(dev); |
0ab9cfeb | 2569 | int ret; |
8a187455 | 2570 | |
fb6db0f5 | 2571 | if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv)))) |
c6df39b5 ID |
2572 | return -ENODEV; |
2573 | ||
6772ffe0 | 2574 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) |
604effb7 ID |
2575 | return -ENODEV; |
2576 | ||
8a187455 PZ |
2577 | DRM_DEBUG_KMS("Suspending device\n"); |
2578 | ||
1f814dac ID |
2579 | disable_rpm_wakeref_asserts(dev_priv); |
2580 | ||
d6102977 ID |
2581 | /* |
2582 | * We are safe here against re-faults, since the fault handler takes | |
2583 | * an RPM reference. | |
2584 | */ | |
7c108fd8 | 2585 | i915_gem_runtime_suspend(dev_priv); |
d6102977 | 2586 | |
bf9e8429 | 2587 | intel_guc_suspend(dev_priv); |
a1c41994 | 2588 | |
2eb5252e | 2589 | intel_runtime_pm_disable_interrupts(dev_priv); |
b5478bcd | 2590 | |
01c799c9 HG |
2591 | intel_uncore_suspend(dev_priv); |
2592 | ||
507e126e | 2593 | ret = 0; |
b9fd799e | 2594 | if (IS_GEN9_LP(dev_priv)) { |
507e126e ID |
2595 | bxt_display_core_uninit(dev_priv); |
2596 | bxt_enable_dc9(dev_priv); | |
2597 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | |
2598 | hsw_enable_pc8(dev_priv); | |
2599 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | |
2600 | ret = vlv_suspend_complete(dev_priv); | |
2601 | } | |
2602 | ||
0ab9cfeb ID |
2603 | if (ret) { |
2604 | DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); | |
01c799c9 HG |
2605 | intel_uncore_runtime_resume(dev_priv); |
2606 | ||
b963291c | 2607 | intel_runtime_pm_enable_interrupts(dev_priv); |
0ab9cfeb | 2608 | |
1ed21cb4 SAK |
2609 | intel_guc_resume(dev_priv); |
2610 | ||
2611 | i915_gem_init_swizzling(dev_priv); | |
2612 | i915_gem_restore_fences(dev_priv); | |
2613 | ||
1f814dac ID |
2614 | enable_rpm_wakeref_asserts(dev_priv); |
2615 | ||
0ab9cfeb ID |
2616 | return ret; |
2617 | } | |
a8a8bd54 | 2618 | |
1f814dac | 2619 | enable_rpm_wakeref_asserts(dev_priv); |
ad1443f0 | 2620 | WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count)); |
55ec45c2 | 2621 | |
bc3b9346 | 2622 | if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) |
55ec45c2 MK |
2623 | DRM_ERROR("Unclaimed access detected prior to suspending\n"); |
2624 | ||
ad1443f0 | 2625 | dev_priv->runtime_pm.suspended = true; |
1fb2362b KCA |
2626 | |
2627 | /* | |
c8a0bd42 PZ |
2628 | * FIXME: We really should find a document that references the arguments |
2629 | * used below! | |
1fb2362b | 2630 | */ |
6f9f4b7a | 2631 | if (IS_BROADWELL(dev_priv)) { |
d37ae19a PZ |
2632 | /* |
2633 | * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop | |
2634 | * being detected, and the call we do at intel_runtime_resume() | |
2635 | * won't be able to restore them. Since PCI_D3hot matches the | |
2636 | * actual specification and appears to be working, use it. | |
2637 | */ | |
6f9f4b7a | 2638 | intel_opregion_notify_adapter(dev_priv, PCI_D3hot); |
d37ae19a | 2639 | } else { |
c8a0bd42 PZ |
2640 | /* |
2641 | * current versions of firmware which depend on this opregion | |
2642 | * notification have repurposed the D1 definition to mean | |
2643 | * "runtime suspended" vs. what you would normally expect (D3) | |
2644 | * to distinguish it from notifications that might be sent via | |
2645 | * the suspend path. | |
2646 | */ | |
6f9f4b7a | 2647 | intel_opregion_notify_adapter(dev_priv, PCI_D1); |
c8a0bd42 | 2648 | } |
8a187455 | 2649 | |
59bad947 | 2650 | assert_forcewakes_inactive(dev_priv); |
dc9fb09c | 2651 | |
21d6e0bd | 2652 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
19625e85 L |
2653 | intel_hpd_poll_init(dev_priv); |
2654 | ||
a8a8bd54 | 2655 | DRM_DEBUG_KMS("Device suspended\n"); |
8a187455 PZ |
2656 | return 0; |
2657 | } | |
2658 | ||
c49d13ee | 2659 | static int intel_runtime_resume(struct device *kdev) |
8a187455 | 2660 | { |
c49d13ee | 2661 | struct pci_dev *pdev = to_pci_dev(kdev); |
8a187455 | 2662 | struct drm_device *dev = pci_get_drvdata(pdev); |
fac5e23e | 2663 | struct drm_i915_private *dev_priv = to_i915(dev); |
1a5df187 | 2664 | int ret = 0; |
8a187455 | 2665 | |
6772ffe0 | 2666 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) |
604effb7 | 2667 | return -ENODEV; |
8a187455 PZ |
2668 | |
2669 | DRM_DEBUG_KMS("Resuming device\n"); | |
2670 | ||
ad1443f0 | 2671 | WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count)); |
1f814dac ID |
2672 | disable_rpm_wakeref_asserts(dev_priv); |
2673 | ||
6f9f4b7a | 2674 | intel_opregion_notify_adapter(dev_priv, PCI_D0); |
ad1443f0 | 2675 | dev_priv->runtime_pm.suspended = false; |
55ec45c2 MK |
2676 | if (intel_uncore_unclaimed_mmio(dev_priv)) |
2677 | DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); | |
8a187455 | 2678 | |
b9fd799e | 2679 | if (IS_GEN9_LP(dev_priv)) { |
507e126e ID |
2680 | bxt_disable_dc9(dev_priv); |
2681 | bxt_display_core_init(dev_priv, true); | |
f62c79b3 ID |
2682 | if (dev_priv->csr.dmc_payload && |
2683 | (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) | |
2684 | gen9_enable_dc5(dev_priv); | |
507e126e | 2685 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
1a5df187 | 2686 | hsw_disable_pc8(dev_priv); |
507e126e | 2687 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
1a5df187 | 2688 | ret = vlv_resume_prepare(dev_priv, true); |
507e126e | 2689 | } |
1a5df187 | 2690 | |
bedf4d79 HG |
2691 | intel_uncore_runtime_resume(dev_priv); |
2692 | ||
1ed21cb4 SAK |
2693 | intel_runtime_pm_enable_interrupts(dev_priv); |
2694 | ||
2695 | intel_guc_resume(dev_priv); | |
2696 | ||
0ab9cfeb ID |
2697 | /* |
2698 | * No point of rolling back things in case of an error, as the best | |
2699 | * we can do is to hope that things will still work (and disable RPM). | |
2700 | */ | |
c6be607a | 2701 | i915_gem_init_swizzling(dev_priv); |
83bf6d55 | 2702 | i915_gem_restore_fences(dev_priv); |
92b806d3 | 2703 | |
08d8a232 VS |
2704 | /* |
2705 | * On VLV/CHV display interrupts are part of the display | |
2706 | * power well, so hpd is reinitialized from there. For | |
2707 | * everyone else do it here. | |
2708 | */ | |
666a4537 | 2709 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
08d8a232 VS |
2710 | intel_hpd_init(dev_priv); |
2711 | ||
2503a0fe KM |
2712 | intel_enable_ipc(dev_priv); |
2713 | ||
1f814dac ID |
2714 | enable_rpm_wakeref_asserts(dev_priv); |
2715 | ||
0ab9cfeb ID |
2716 | if (ret) |
2717 | DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); | |
2718 | else | |
2719 | DRM_DEBUG_KMS("Device resumed\n"); | |
2720 | ||
2721 | return ret; | |
8a187455 PZ |
2722 | } |
2723 | ||
42f5551d | 2724 | const struct dev_pm_ops i915_pm_ops = { |
5545dbbf ID |
2725 | /* |
2726 | * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, | |
2727 | * PMSG_RESUME] | |
2728 | */ | |
0206e353 | 2729 | .suspend = i915_pm_suspend, |
76c4b250 ID |
2730 | .suspend_late = i915_pm_suspend_late, |
2731 | .resume_early = i915_pm_resume_early, | |
0206e353 | 2732 | .resume = i915_pm_resume, |
5545dbbf ID |
2733 | |
2734 | /* | |
2735 | * S4 event handlers | |
2736 | * @freeze, @freeze_late : called (1) before creating the | |
2737 | * hibernation image [PMSG_FREEZE] and | |
2738 | * (2) after rebooting, before restoring | |
2739 | * the image [PMSG_QUIESCE] | |
2740 | * @thaw, @thaw_early : called (1) after creating the hibernation | |
2741 | * image, before writing it [PMSG_THAW] | |
2742 | * and (2) after failing to create or | |
2743 | * restore the image [PMSG_RECOVER] | |
2744 | * @poweroff, @poweroff_late: called after writing the hibernation | |
2745 | * image, before rebooting [PMSG_HIBERNATE] | |
2746 | * @restore, @restore_early : called after rebooting and restoring the | |
2747 | * hibernation image [PMSG_RESTORE] | |
2748 | */ | |
1f19ac2a CW |
2749 | .freeze = i915_pm_freeze, |
2750 | .freeze_late = i915_pm_freeze_late, | |
2751 | .thaw_early = i915_pm_thaw_early, | |
2752 | .thaw = i915_pm_thaw, | |
36d61e67 | 2753 | .poweroff = i915_pm_suspend, |
ab3be73f | 2754 | .poweroff_late = i915_pm_poweroff_late, |
1f19ac2a CW |
2755 | .restore_early = i915_pm_restore_early, |
2756 | .restore = i915_pm_restore, | |
5545dbbf ID |
2757 | |
2758 | /* S0ix (via runtime suspend) event handlers */ | |
97bea207 PZ |
2759 | .runtime_suspend = intel_runtime_suspend, |
2760 | .runtime_resume = intel_runtime_resume, | |
cbda12d7 ZW |
2761 | }; |
2762 | ||
78b68556 | 2763 | static const struct vm_operations_struct i915_gem_vm_ops = { |
de151cf6 | 2764 | .fault = i915_gem_fault, |
ab00b3e5 JB |
2765 | .open = drm_gem_vm_open, |
2766 | .close = drm_gem_vm_close, | |
de151cf6 JB |
2767 | }; |
2768 | ||
e08e96de AV |
2769 | static const struct file_operations i915_driver_fops = { |
2770 | .owner = THIS_MODULE, | |
2771 | .open = drm_open, | |
2772 | .release = drm_release, | |
2773 | .unlocked_ioctl = drm_ioctl, | |
2774 | .mmap = drm_gem_mmap, | |
2775 | .poll = drm_poll, | |
e08e96de | 2776 | .read = drm_read, |
e08e96de | 2777 | .compat_ioctl = i915_compat_ioctl, |
e08e96de AV |
2778 | .llseek = noop_llseek, |
2779 | }; | |
2780 | ||
0673ad47 CW |
2781 | static int |
2782 | i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, | |
2783 | struct drm_file *file) | |
2784 | { | |
2785 | return -ENODEV; | |
2786 | } | |
2787 | ||
2788 | static const struct drm_ioctl_desc i915_ioctls[] = { | |
2789 | DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2790 | DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), | |
2791 | DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), | |
2792 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), | |
2793 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), | |
2794 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), | |
2795 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), | |
2796 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2797 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), | |
2798 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), | |
2799 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2800 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), | |
2801 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2802 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2803 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), | |
2804 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), | |
2805 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2806 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2807 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), | |
fec0445c | 2808 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), |
0673ad47 CW |
2809 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
2810 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | |
2811 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
2812 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), | |
2813 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), | |
2814 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
2815 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2816 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2817 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), | |
2818 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), | |
2819 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), | |
2820 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), | |
2821 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), | |
2822 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), | |
2823 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), | |
111dbcab CW |
2824 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), |
2825 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), | |
0673ad47 CW |
2826 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), |
2827 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), | |
2828 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), | |
2829 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2830 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2831 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2832 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), | |
2833 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), | |
2834 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), | |
2835 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), | |
2836 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), | |
2837 | DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), | |
2838 | DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), | |
2839 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), | |
2840 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), | |
eec688e1 | 2841 | DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), |
f89823c2 LL |
2842 | DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
2843 | DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
0673ad47 CW |
2844 | }; |
2845 | ||
1da177e4 | 2846 | static struct drm_driver driver = { |
0c54781b MW |
2847 | /* Don't use MTRRs here; the Xserver or userspace app should |
2848 | * deal with them for Intel hardware. | |
792d2b9a | 2849 | */ |
673a394b | 2850 | .driver_features = |
10ba5012 | 2851 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | |
cf6e7bac | 2852 | DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ, |
cad3688f | 2853 | .release = i915_driver_release, |
673a394b | 2854 | .open = i915_driver_open, |
22eae947 | 2855 | .lastclose = i915_driver_lastclose, |
673a394b | 2856 | .postclose = i915_driver_postclose, |
d8e29209 | 2857 | |
b1f788c6 | 2858 | .gem_close_object = i915_gem_close_object, |
f0cd5182 | 2859 | .gem_free_object_unlocked = i915_gem_free_object, |
de151cf6 | 2860 | .gem_vm_ops = &i915_gem_vm_ops, |
1286ff73 DV |
2861 | |
2862 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | |
2863 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | |
2864 | .gem_prime_export = i915_gem_prime_export, | |
2865 | .gem_prime_import = i915_gem_prime_import, | |
2866 | ||
ff72145b | 2867 | .dumb_create = i915_gem_dumb_create, |
da6b51d0 | 2868 | .dumb_map_offset = i915_gem_mmap_gtt, |
1da177e4 | 2869 | .ioctls = i915_ioctls, |
0673ad47 | 2870 | .num_ioctls = ARRAY_SIZE(i915_ioctls), |
e08e96de | 2871 | .fops = &i915_driver_fops, |
22eae947 DA |
2872 | .name = DRIVER_NAME, |
2873 | .desc = DRIVER_DESC, | |
2874 | .date = DRIVER_DATE, | |
2875 | .major = DRIVER_MAJOR, | |
2876 | .minor = DRIVER_MINOR, | |
2877 | .patchlevel = DRIVER_PATCHLEVEL, | |
1da177e4 | 2878 | }; |
66d9cb5d CW |
2879 | |
2880 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
2881 | #include "selftests/mock_drm.c" | |
2882 | #endif |