Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- |
2 | */ | |
0d6aa60b | 3 | /* |
bc54fd1a | 4 | * |
1da177e4 LT |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
6 | * All Rights Reserved. | |
bc54fd1a DA |
7 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | |
9 | * copy of this software and associated documentation files (the | |
10 | * "Software"), to deal in the Software without restriction, including | |
11 | * without limitation the rights to use, copy, modify, merge, publish, | |
12 | * distribute, sub license, and/or sell copies of the Software, and to | |
13 | * permit persons to whom the Software is furnished to do so, subject to | |
14 | * the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the | |
17 | * next paragraph) shall be included in all copies or substantial portions | |
18 | * of the Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
27 | * | |
0d6aa60b | 28 | */ |
1da177e4 | 29 | |
e5747e3a | 30 | #include <linux/acpi.h> |
0673ad47 CW |
31 | #include <linux/device.h> |
32 | #include <linux/oom.h> | |
e0cd3608 | 33 | #include <linux/module.h> |
0673ad47 CW |
34 | #include <linux/pci.h> |
35 | #include <linux/pm.h> | |
d6102977 | 36 | #include <linux/pm_runtime.h> |
0673ad47 CW |
37 | #include <linux/pnp.h> |
38 | #include <linux/slab.h> | |
704ab614 | 39 | #include <linux/vga_switcheroo.h> |
0673ad47 CW |
40 | #include <linux/vt.h> |
41 | #include <acpi/video.h> | |
42 | ||
a667fb40 | 43 | #include <drm/drm_atomic_helper.h> |
d0e93599 SR |
44 | #include <drm/drm_ioctl.h> |
45 | #include <drm/drm_irq.h> | |
7fb81e9d | 46 | #include <drm/drm_managed.h> |
d0e93599 | 47 | #include <drm/drm_probe_helper.h> |
0673ad47 | 48 | |
df0566a6 JN |
49 | #include "display/intel_acpi.h" |
50 | #include "display/intel_audio.h" | |
51 | #include "display/intel_bw.h" | |
52 | #include "display/intel_cdclk.h" | |
06d3ff6e | 53 | #include "display/intel_csr.h" |
926b005c | 54 | #include "display/intel_display_debugfs.h" |
1d455f8d | 55 | #include "display/intel_display_types.h" |
379bc100 | 56 | #include "display/intel_dp.h" |
df0566a6 | 57 | #include "display/intel_fbdev.h" |
df0566a6 JN |
58 | #include "display/intel_hotplug.h" |
59 | #include "display/intel_overlay.h" | |
60 | #include "display/intel_pipe_crc.h" | |
61 | #include "display/intel_sprite.h" | |
4fb87831 | 62 | #include "display/intel_vga.h" |
379bc100 | 63 | |
10be98a7 | 64 | #include "gem/i915_gem_context.h" |
afa13085 | 65 | #include "gem/i915_gem_ioctls.h" |
cc662126 | 66 | #include "gem/i915_gem_mman.h" |
24635c51 | 67 | #include "gt/intel_gt.h" |
79ffac85 | 68 | #include "gt/intel_gt_pm.h" |
2248a283 | 69 | #include "gt/intel_rc6.h" |
112ed2d3 | 70 | |
2126d3e9 | 71 | #include "i915_debugfs.h" |
0673ad47 | 72 | #include "i915_drv.h" |
062705be | 73 | #include "i915_ioc32.h" |
440e2b3d | 74 | #include "i915_irq.h" |
9c9082b9 | 75 | #include "i915_memcpy.h" |
db94e9f1 | 76 | #include "i915_perf.h" |
a446ae2c | 77 | #include "i915_query.h" |
bdd1510c | 78 | #include "i915_suspend.h" |
63bf8301 | 79 | #include "i915_switcheroo.h" |
be68261d | 80 | #include "i915_sysfs.h" |
331c201a | 81 | #include "i915_trace.h" |
0673ad47 | 82 | #include "i915_vgpu.h" |
d28ae3b2 | 83 | #include "intel_dram.h" |
6e482b96 | 84 | #include "intel_gvt.h" |
3fc794f2 | 85 | #include "intel_memory_region.h" |
696173b0 | 86 | #include "intel_pm.h" |
f9c730ed | 87 | #include "intel_sideband.h" |
fb5f432a | 88 | #include "vlv_suspend.h" |
79e53945 | 89 | |
70a59dd8 | 90 | static const struct drm_driver driver; |
112b715e | 91 | |
da5f53bf | 92 | static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) |
0673ad47 | 93 | { |
57b29646 SK |
94 | int domain = pci_domain_nr(dev_priv->drm.pdev->bus); |
95 | ||
96 | dev_priv->bridge_dev = | |
97 | pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0)); | |
0673ad47 | 98 | if (!dev_priv->bridge_dev) { |
00376ccf | 99 | drm_err(&dev_priv->drm, "bridge device not found\n"); |
0673ad47 CW |
100 | return -1; |
101 | } | |
102 | return 0; | |
103 | } | |
104 | ||
105 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | |
106 | static int | |
da5f53bf | 107 | intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv) |
0673ad47 | 108 | { |
514e1d64 | 109 | int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
0673ad47 CW |
110 | u32 temp_lo, temp_hi = 0; |
111 | u64 mchbar_addr; | |
112 | int ret; | |
113 | ||
514e1d64 | 114 | if (INTEL_GEN(dev_priv) >= 4) |
0673ad47 CW |
115 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); |
116 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | |
117 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | |
118 | ||
119 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | |
120 | #ifdef CONFIG_PNP | |
121 | if (mchbar_addr && | |
122 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) | |
123 | return 0; | |
124 | #endif | |
125 | ||
126 | /* Get some space for it */ | |
127 | dev_priv->mch_res.name = "i915 MCHBAR"; | |
128 | dev_priv->mch_res.flags = IORESOURCE_MEM; | |
129 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, | |
130 | &dev_priv->mch_res, | |
131 | MCHBAR_SIZE, MCHBAR_SIZE, | |
132 | PCIBIOS_MIN_MEM, | |
133 | 0, pcibios_align_resource, | |
134 | dev_priv->bridge_dev); | |
135 | if (ret) { | |
00376ccf | 136 | drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret); |
0673ad47 CW |
137 | dev_priv->mch_res.start = 0; |
138 | return ret; | |
139 | } | |
140 | ||
514e1d64 | 141 | if (INTEL_GEN(dev_priv) >= 4) |
0673ad47 CW |
142 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, |
143 | upper_32_bits(dev_priv->mch_res.start)); | |
144 | ||
145 | pci_write_config_dword(dev_priv->bridge_dev, reg, | |
146 | lower_32_bits(dev_priv->mch_res.start)); | |
147 | return 0; | |
148 | } | |
149 | ||
150 | /* Setup MCHBAR if possible, return true if we should disable it again */ | |
151 | static void | |
da5f53bf | 152 | intel_setup_mchbar(struct drm_i915_private *dev_priv) |
0673ad47 | 153 | { |
514e1d64 | 154 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
0673ad47 CW |
155 | u32 temp; |
156 | bool enabled; | |
157 | ||
920a14b2 | 158 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
0673ad47 CW |
159 | return; |
160 | ||
161 | dev_priv->mchbar_need_disable = false; | |
162 | ||
50a0bc90 | 163 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
0673ad47 CW |
164 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); |
165 | enabled = !!(temp & DEVEN_MCHBAR_EN); | |
166 | } else { | |
167 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
168 | enabled = temp & 1; | |
169 | } | |
170 | ||
171 | /* If it's already enabled, don't have to do anything */ | |
172 | if (enabled) | |
173 | return; | |
174 | ||
da5f53bf | 175 | if (intel_alloc_mchbar_resource(dev_priv)) |
0673ad47 CW |
176 | return; |
177 | ||
178 | dev_priv->mchbar_need_disable = true; | |
179 | ||
180 | /* Space is allocated or reserved, so enable it. */ | |
50a0bc90 | 181 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
0673ad47 CW |
182 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, |
183 | temp | DEVEN_MCHBAR_EN); | |
184 | } else { | |
185 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
186 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); | |
187 | } | |
188 | } | |
189 | ||
190 | static void | |
da5f53bf | 191 | intel_teardown_mchbar(struct drm_i915_private *dev_priv) |
0673ad47 | 192 | { |
514e1d64 | 193 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
0673ad47 CW |
194 | |
195 | if (dev_priv->mchbar_need_disable) { | |
50a0bc90 | 196 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
0673ad47 CW |
197 | u32 deven_val; |
198 | ||
199 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, | |
200 | &deven_val); | |
201 | deven_val &= ~DEVEN_MCHBAR_EN; | |
202 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, | |
203 | deven_val); | |
204 | } else { | |
205 | u32 mchbar_val; | |
206 | ||
207 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, | |
208 | &mchbar_val); | |
209 | mchbar_val &= ~1; | |
210 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, | |
211 | mchbar_val); | |
212 | } | |
213 | } | |
214 | ||
215 | if (dev_priv->mch_res.start) | |
216 | release_resource(&dev_priv->mch_res); | |
217 | } | |
218 | ||
0673ad47 CW |
219 | static int i915_workqueues_init(struct drm_i915_private *dev_priv) |
220 | { | |
221 | /* | |
222 | * The i915 workqueue is primarily used for batched retirement of | |
223 | * requests (and thus managing bo) once the task has been completed | |
e61e0f51 | 224 | * by the GPU. i915_retire_requests() is called directly when we |
0673ad47 CW |
225 | * need high-priority retirement, such as waiting for an explicit |
226 | * bo. | |
227 | * | |
228 | * It is also used for periodic low-priority events, such as | |
229 | * idle-timers and recording error state. | |
230 | * | |
231 | * All tasks on the workqueue are expected to acquire the dev mutex | |
232 | * so there is no point in running more than one instance of the | |
233 | * workqueue at any time. Use an ordered one. | |
234 | */ | |
235 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); | |
236 | if (dev_priv->wq == NULL) | |
237 | goto out_err; | |
238 | ||
239 | dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); | |
240 | if (dev_priv->hotplug.dp_wq == NULL) | |
241 | goto out_free_wq; | |
242 | ||
0673ad47 CW |
243 | return 0; |
244 | ||
0673ad47 CW |
245 | out_free_wq: |
246 | destroy_workqueue(dev_priv->wq); | |
247 | out_err: | |
00376ccf | 248 | drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n"); |
0673ad47 CW |
249 | |
250 | return -ENOMEM; | |
251 | } | |
252 | ||
253 | static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) | |
254 | { | |
0673ad47 CW |
255 | destroy_workqueue(dev_priv->hotplug.dp_wq); |
256 | destroy_workqueue(dev_priv->wq); | |
257 | } | |
258 | ||
4fc7e845 PZ |
259 | /* |
260 | * We don't keep the workarounds for pre-production hardware, so we expect our | |
261 | * driver to fail on these machines in one way or another. A little warning on | |
262 | * dmesg may help both the user and the bug triagers. | |
6a7a6a98 CW |
263 | * |
264 | * Our policy for removing pre-production workarounds is to keep the | |
265 | * current gen workarounds as a guide to the bring-up of the next gen | |
266 | * (workarounds have a habit of persisting!). Anything older than that | |
267 | * should be removed along with the complications they introduce. | |
4fc7e845 PZ |
268 | */ |
269 | static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) | |
270 | { | |
248a124d CW |
271 | bool pre = false; |
272 | ||
273 | pre |= IS_HSW_EARLY_SDV(dev_priv); | |
274 | pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); | |
0102ba1f | 275 | pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); |
96c5a15f | 276 | pre |= IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_A0); |
834c6bb7 | 277 | pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2); |
248a124d | 278 | |
7c5ff4a2 | 279 | if (pre) { |
00376ccf | 280 | drm_err(&dev_priv->drm, "This is a pre-production stepping. " |
4fc7e845 | 281 | "It may not be fully functional.\n"); |
7c5ff4a2 CW |
282 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); |
283 | } | |
4fc7e845 PZ |
284 | } |
285 | ||
640b50fa CW |
286 | static void sanitize_gpu(struct drm_i915_private *i915) |
287 | { | |
288 | if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) | |
289 | __intel_gt_reset(&i915->gt, ALL_ENGINES); | |
290 | } | |
291 | ||
0673ad47 | 292 | /** |
0b61b8b0 | 293 | * i915_driver_early_probe - setup state not requiring device access |
0673ad47 CW |
294 | * @dev_priv: device private |
295 | * | |
296 | * Initialize everything that is a "SW-only" state, that is state not | |
297 | * requiring accessing the device or exposing the driver via kernel internal | |
298 | * or userspace interfaces. Example steps belonging here: lock initialization, | |
299 | * system memory allocation, setting up device specific attributes and | |
300 | * function hooks not requiring accessing the device. | |
301 | */ | |
0b61b8b0 | 302 | static int i915_driver_early_probe(struct drm_i915_private *dev_priv) |
0673ad47 | 303 | { |
0673ad47 CW |
304 | int ret = 0; |
305 | ||
50d84418 | 306 | if (i915_inject_probe_failure(dev_priv)) |
0673ad47 CW |
307 | return -ENODEV; |
308 | ||
805446c8 TU |
309 | intel_device_info_subplatform_init(dev_priv); |
310 | ||
0a9b2630 | 311 | intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug); |
01385758 | 312 | intel_uncore_init_early(&dev_priv->uncore, dev_priv); |
6cbe8830 | 313 | |
0673ad47 CW |
314 | spin_lock_init(&dev_priv->irq_lock); |
315 | spin_lock_init(&dev_priv->gpu_error.lock); | |
316 | mutex_init(&dev_priv->backlight_lock); | |
317eaa95 | 317 | |
0673ad47 | 318 | mutex_init(&dev_priv->sb_lock); |
4d4dda48 | 319 | cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); |
a75d035f | 320 | |
0673ad47 CW |
321 | mutex_init(&dev_priv->av_mutex); |
322 | mutex_init(&dev_priv->wm.wm_mutex); | |
323 | mutex_init(&dev_priv->pps_mutex); | |
9055aac7 | 324 | mutex_init(&dev_priv->hdcp_comp_mutex); |
0673ad47 | 325 | |
0b1de5d5 | 326 | i915_memcpy_init_early(dev_priv); |
69c66355 | 327 | intel_runtime_pm_init_early(&dev_priv->runtime_pm); |
0b1de5d5 | 328 | |
0673ad47 CW |
329 | ret = i915_workqueues_init(dev_priv); |
330 | if (ret < 0) | |
f3bcb0cc | 331 | return ret; |
0673ad47 | 332 | |
fb5f432a | 333 | ret = vlv_suspend_init(dev_priv); |
1bcd8688 DCS |
334 | if (ret < 0) |
335 | goto err_workqueues; | |
336 | ||
6f76098f DCS |
337 | intel_wopcm_init_early(&dev_priv->wopcm); |
338 | ||
724e9564 | 339 | intel_gt_init_early(&dev_priv->gt, dev_priv); |
24635c51 | 340 | |
a3f356b2 | 341 | i915_gem_init_early(dev_priv); |
a0de908d | 342 | |
0673ad47 | 343 | /* This must be called before any calls to HAS_PCH_* */ |
da5f53bf | 344 | intel_detect_pch(dev_priv); |
0673ad47 | 345 | |
192aa181 | 346 | intel_pm_setup(dev_priv); |
f28ec6f4 ID |
347 | ret = intel_power_domains_init(dev_priv); |
348 | if (ret < 0) | |
6f76098f | 349 | goto err_gem; |
0673ad47 CW |
350 | intel_irq_init(dev_priv); |
351 | intel_init_display_hooks(dev_priv); | |
352 | intel_init_clock_gating_hooks(dev_priv); | |
353 | intel_init_audio_hooks(dev_priv); | |
0673ad47 | 354 | |
4fc7e845 | 355 | intel_detect_preproduction_hw(dev_priv); |
0673ad47 CW |
356 | |
357 | return 0; | |
358 | ||
6f76098f | 359 | err_gem: |
f28ec6f4 | 360 | i915_gem_cleanup_early(dev_priv); |
6cf72db6 | 361 | intel_gt_driver_late_release(&dev_priv->gt); |
fb5f432a | 362 | vlv_suspend_cleanup(dev_priv); |
1bcd8688 | 363 | err_workqueues: |
0673ad47 CW |
364 | i915_workqueues_cleanup(dev_priv); |
365 | return ret; | |
366 | } | |
367 | ||
368 | /** | |
3b58a945 | 369 | * i915_driver_late_release - cleanup the setup done in |
0b61b8b0 | 370 | * i915_driver_early_probe() |
0673ad47 CW |
371 | * @dev_priv: device private |
372 | */ | |
3b58a945 | 373 | static void i915_driver_late_release(struct drm_i915_private *dev_priv) |
0673ad47 | 374 | { |
cefcff8f | 375 | intel_irq_fini(dev_priv); |
f28ec6f4 | 376 | intel_power_domains_cleanup(dev_priv); |
a0de908d | 377 | i915_gem_cleanup_early(dev_priv); |
6cf72db6 | 378 | intel_gt_driver_late_release(&dev_priv->gt); |
fb5f432a | 379 | vlv_suspend_cleanup(dev_priv); |
0673ad47 | 380 | i915_workqueues_cleanup(dev_priv); |
a75d035f | 381 | |
4d4dda48 | 382 | cpu_latency_qos_remove_request(&dev_priv->sb_qos); |
a75d035f | 383 | mutex_destroy(&dev_priv->sb_lock); |
8a25c4be JN |
384 | |
385 | i915_params_free(&dev_priv->params); | |
0673ad47 CW |
386 | } |
387 | ||
0673ad47 | 388 | /** |
0b61b8b0 | 389 | * i915_driver_mmio_probe - setup device MMIO |
0673ad47 CW |
390 | * @dev_priv: device private |
391 | * | |
392 | * Setup minimal device state necessary for MMIO accesses later in the | |
393 | * initialization sequence. The setup here should avoid any other device-wide | |
394 | * side effects or exposing the driver via kernel internal or user space | |
395 | * interfaces. | |
396 | */ | |
0b61b8b0 | 397 | static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) |
0673ad47 | 398 | { |
0673ad47 CW |
399 | int ret; |
400 | ||
50d84418 | 401 | if (i915_inject_probe_failure(dev_priv)) |
0673ad47 CW |
402 | return -ENODEV; |
403 | ||
da5f53bf | 404 | if (i915_get_bridge_dev(dev_priv)) |
0673ad47 CW |
405 | return -EIO; |
406 | ||
3de6f852 | 407 | ret = intel_uncore_init_mmio(&dev_priv->uncore); |
0673ad47 | 408 | if (ret < 0) |
63ffbcda | 409 | goto err_bridge; |
0673ad47 | 410 | |
25286aac DCS |
411 | /* Try to make sure MCHBAR is enabled before poking at it */ |
412 | intel_setup_mchbar(dev_priv); | |
c864e9ab | 413 | intel_device_info_runtime_init(dev_priv); |
63ffbcda | 414 | |
d0eb6866 | 415 | ret = intel_gt_init_mmio(&dev_priv->gt); |
63ffbcda JL |
416 | if (ret) |
417 | goto err_uncore; | |
418 | ||
640b50fa CW |
419 | /* As early as possible, scrub existing GPU state before clobbering */ |
420 | sanitize_gpu(dev_priv); | |
421 | ||
0673ad47 CW |
422 | return 0; |
423 | ||
63ffbcda | 424 | err_uncore: |
25286aac | 425 | intel_teardown_mchbar(dev_priv); |
3de6f852 | 426 | intel_uncore_fini_mmio(&dev_priv->uncore); |
63ffbcda | 427 | err_bridge: |
0673ad47 CW |
428 | pci_dev_put(dev_priv->bridge_dev); |
429 | ||
430 | return ret; | |
431 | } | |
432 | ||
433 | /** | |
0b61b8b0 | 434 | * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe() |
0673ad47 CW |
435 | * @dev_priv: device private |
436 | */ | |
3b58a945 | 437 | static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) |
0673ad47 | 438 | { |
25286aac | 439 | intel_teardown_mchbar(dev_priv); |
3de6f852 | 440 | intel_uncore_fini_mmio(&dev_priv->uncore); |
0673ad47 CW |
441 | pci_dev_put(dev_priv->bridge_dev); |
442 | } | |
443 | ||
94b4f3ba CW |
444 | static void intel_sanitize_options(struct drm_i915_private *dev_priv) |
445 | { | |
67b7f33e | 446 | intel_gvt_sanitize_options(dev_priv); |
94b4f3ba CW |
447 | } |
448 | ||
31a02eb7 MR |
449 | /** |
450 | * i915_set_dma_info - set all relevant PCI dma info as configured for the | |
451 | * platform | |
452 | * @i915: valid i915 instance | |
453 | * | |
454 | * Set the dma max segment size, device and coherent masks. The dma mask set | |
455 | * needs to occur before i915_ggtt_probe_hw. | |
456 | * | |
457 | * A couple of platforms have special needs. Address them as well. | |
458 | * | |
459 | */ | |
460 | static int i915_set_dma_info(struct drm_i915_private *i915) | |
461 | { | |
462 | struct pci_dev *pdev = i915->drm.pdev; | |
463 | unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size; | |
464 | int ret; | |
465 | ||
466 | GEM_BUG_ON(!mask_size); | |
467 | ||
468 | /* | |
469 | * We don't have a max segment size, so set it to the max so sg's | |
470 | * debugging layer doesn't complain | |
471 | */ | |
472 | dma_set_max_seg_size(&pdev->dev, UINT_MAX); | |
473 | ||
474 | ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size)); | |
475 | if (ret) | |
476 | goto mask_err; | |
477 | ||
478 | /* overlay on gen2 is broken and can't address above 1G */ | |
479 | if (IS_GEN(i915, 2)) | |
480 | mask_size = 30; | |
481 | ||
482 | /* | |
483 | * 965GM sometimes incorrectly writes to hardware status page (HWS) | |
484 | * using 32bit addressing, overwriting memory if HWS is located | |
485 | * above 4GB. | |
486 | * | |
487 | * The documentation also mentions an issue with undefined | |
488 | * behaviour if any general state is accessed within a page above 4GB, | |
489 | * which also needs to be handled carefully. | |
490 | */ | |
491 | if (IS_I965G(i915) || IS_I965GM(i915)) | |
492 | mask_size = 32; | |
493 | ||
494 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size)); | |
495 | if (ret) | |
496 | goto mask_err; | |
497 | ||
498 | return 0; | |
499 | ||
500 | mask_err: | |
501 | drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret); | |
502 | return ret; | |
503 | } | |
504 | ||
0673ad47 | 505 | /** |
0b61b8b0 | 506 | * i915_driver_hw_probe - setup state requiring device access |
0673ad47 CW |
507 | * @dev_priv: device private |
508 | * | |
509 | * Setup state that requires accessing the device, but doesn't require | |
510 | * exposing the driver via kernel internal or userspace interfaces. | |
511 | */ | |
0b61b8b0 | 512 | static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) |
0673ad47 | 513 | { |
52a05c30 | 514 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 CW |
515 | int ret; |
516 | ||
50d84418 | 517 | if (i915_inject_probe_failure(dev_priv)) |
0673ad47 CW |
518 | return -ENODEV; |
519 | ||
4bdafb9d CW |
520 | if (HAS_PPGTT(dev_priv)) { |
521 | if (intel_vgpu_active(dev_priv) && | |
ca6ac684 | 522 | !intel_vgpu_has_full_ppgtt(dev_priv)) { |
4bdafb9d CW |
523 | i915_report_error(dev_priv, |
524 | "incompatible vGPU found, support for isolated ppGTT required\n"); | |
525 | return -ENXIO; | |
526 | } | |
527 | } | |
528 | ||
46592892 CW |
529 | if (HAS_EXECLISTS(dev_priv)) { |
530 | /* | |
531 | * Older GVT emulation depends upon intercepting CSB mmio, | |
532 | * which we no longer use, preferring to use the HWSP cache | |
533 | * instead. | |
534 | */ | |
535 | if (intel_vgpu_active(dev_priv) && | |
536 | !intel_vgpu_has_hwsp_emulation(dev_priv)) { | |
537 | i915_report_error(dev_priv, | |
538 | "old vGPU host found, support for HWSP emulation required\n"); | |
539 | return -ENXIO; | |
540 | } | |
541 | } | |
542 | ||
94b4f3ba | 543 | intel_sanitize_options(dev_priv); |
0673ad47 | 544 | |
f6ac993f | 545 | /* needs to be done before ggtt probe */ |
d28ae3b2 | 546 | intel_dram_edram_detect(dev_priv); |
f6ac993f | 547 | |
31a02eb7 MR |
548 | ret = i915_set_dma_info(dev_priv); |
549 | if (ret) | |
550 | return ret; | |
551 | ||
9f9b2792 LL |
552 | i915_perf_init(dev_priv); |
553 | ||
97d6d7ab | 554 | ret = i915_ggtt_probe_hw(dev_priv); |
0673ad47 | 555 | if (ret) |
9f172f6f | 556 | goto err_perf; |
0673ad47 | 557 | |
f2521f77 GH |
558 | ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb"); |
559 | if (ret) | |
9f172f6f | 560 | goto err_ggtt; |
0673ad47 | 561 | |
97d6d7ab | 562 | ret = i915_ggtt_init_hw(dev_priv); |
0088e522 | 563 | if (ret) |
9f172f6f | 564 | goto err_ggtt; |
0088e522 | 565 | |
3fc794f2 CW |
566 | ret = intel_memory_regions_hw_probe(dev_priv); |
567 | if (ret) | |
568 | goto err_ggtt; | |
569 | ||
797a6153 | 570 | intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt); |
d8a44248 | 571 | |
97d6d7ab | 572 | ret = i915_ggtt_enable_hw(dev_priv); |
0088e522 | 573 | if (ret) { |
00376ccf | 574 | drm_err(&dev_priv->drm, "failed to enable GGTT\n"); |
3fc794f2 | 575 | goto err_mem_regions; |
0088e522 CW |
576 | } |
577 | ||
52a05c30 | 578 | pci_set_master(pdev); |
0673ad47 | 579 | |
25d140fa | 580 | intel_gt_init_workarounds(dev_priv); |
0673ad47 CW |
581 | |
582 | /* On the 945G/GM, the chipset reports the MSI capability on the | |
583 | * integrated graphics even though the support isn't actually there | |
584 | * according to the published specs. It doesn't appear to function | |
585 | * correctly in testing on 945G. | |
586 | * This may be a side effect of MSI having been made available for PEG | |
587 | * and the registers being closely associated. | |
588 | * | |
589 | * According to chipset errata, on the 965GM, MSI interrupts may | |
e38c2da0 VS |
590 | * be lost or delayed, and was defeatured. MSI interrupts seem to |
591 | * get lost on g4x as well, and interrupt delivery seems to stay | |
592 | * properly dead afterwards. So we'll just disable them for all | |
593 | * pre-gen5 chipsets. | |
8a29c778 LDM |
594 | * |
595 | * dp aux and gmbus irq on gen4 seems to be able to generate legacy | |
596 | * interrupts even when in MSI mode. This results in spurious | |
597 | * interrupt warnings if the legacy irq no. is shared with another | |
598 | * device. The kernel then disables that interrupt source and so | |
599 | * prevents the other device from working properly. | |
0673ad47 | 600 | */ |
e38c2da0 | 601 | if (INTEL_GEN(dev_priv) >= 5) { |
52a05c30 | 602 | if (pci_enable_msi(pdev) < 0) |
00376ccf | 603 | drm_dbg(&dev_priv->drm, "can't enable MSI"); |
0673ad47 CW |
604 | } |
605 | ||
26f837e8 ZW |
606 | ret = intel_gvt_init(dev_priv); |
607 | if (ret) | |
7ab87ede CW |
608 | goto err_msi; |
609 | ||
610 | intel_opregion_setup(dev_priv); | |
cbfa59d4 MK |
611 | /* |
612 | * Fill the dram structure to get the system raw bandwidth and | |
613 | * dram info. This will be used for memory latency calculation. | |
614 | */ | |
d28ae3b2 | 615 | intel_dram_detect(dev_priv); |
cbfa59d4 | 616 | |
f9c730ed MR |
617 | intel_pcode_init(dev_priv); |
618 | ||
c457d9cf | 619 | intel_bw_init_hw(dev_priv); |
26f837e8 | 620 | |
0673ad47 CW |
621 | return 0; |
622 | ||
7ab87ede CW |
623 | err_msi: |
624 | if (pdev->msi_enabled) | |
625 | pci_disable_msi(pdev); | |
3fc794f2 CW |
626 | err_mem_regions: |
627 | intel_memory_regions_driver_release(dev_priv); | |
9f172f6f | 628 | err_ggtt: |
3b58a945 | 629 | i915_ggtt_driver_release(dev_priv); |
9f172f6f CW |
630 | err_perf: |
631 | i915_perf_fini(dev_priv); | |
0673ad47 CW |
632 | return ret; |
633 | } | |
634 | ||
635 | /** | |
78dae1ac | 636 | * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe() |
0673ad47 CW |
637 | * @dev_priv: device private |
638 | */ | |
78dae1ac | 639 | static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) |
0673ad47 | 640 | { |
52a05c30 | 641 | struct pci_dev *pdev = dev_priv->drm.pdev; |
0673ad47 | 642 | |
9f9b2792 LL |
643 | i915_perf_fini(dev_priv); |
644 | ||
52a05c30 DW |
645 | if (pdev->msi_enabled) |
646 | pci_disable_msi(pdev); | |
0673ad47 CW |
647 | } |
648 | ||
649 | /** | |
650 | * i915_driver_register - register the driver with the rest of the system | |
651 | * @dev_priv: device private | |
652 | * | |
653 | * Perform any steps necessary to make the driver available via kernel | |
654 | * internal or userspace interfaces. | |
655 | */ | |
656 | static void i915_driver_register(struct drm_i915_private *dev_priv) | |
657 | { | |
91c8a326 | 658 | struct drm_device *dev = &dev_priv->drm; |
0673ad47 | 659 | |
c29579d2 | 660 | i915_gem_driver_register(dev_priv); |
b46a33e2 | 661 | i915_pmu_register(dev_priv); |
0673ad47 | 662 | |
9e859eb9 | 663 | intel_vgpu_register(dev_priv); |
0673ad47 CW |
664 | |
665 | /* Reveal our presence to userspace */ | |
666 | if (drm_dev_register(dev, 0) == 0) { | |
667 | i915_debugfs_register(dev_priv); | |
949ab9d2 LDM |
668 | if (HAS_DISPLAY(dev_priv)) |
669 | intel_display_debugfs_register(dev_priv); | |
694c2828 | 670 | i915_setup_sysfs(dev_priv); |
442b8c06 RB |
671 | |
672 | /* Depends on sysfs having been initialized */ | |
673 | i915_perf_register(dev_priv); | |
0673ad47 | 674 | } else |
00376ccf WK |
675 | drm_err(&dev_priv->drm, |
676 | "Failed to register driver for userspace access!\n"); | |
0673ad47 | 677 | |
da27bd41 | 678 | if (HAS_DISPLAY(dev_priv)) { |
0673ad47 CW |
679 | /* Must be done after probing outputs */ |
680 | intel_opregion_register(dev_priv); | |
681 | acpi_video_register(); | |
682 | } | |
683 | ||
42014f69 | 684 | intel_gt_driver_register(&dev_priv->gt); |
0673ad47 | 685 | |
eef57324 | 686 | intel_audio_init(dev_priv); |
0673ad47 CW |
687 | |
688 | /* | |
689 | * Some ports require correctly set-up hpd registers for detection to | |
690 | * work properly (leading to ghost connected connector status), e.g. VGA | |
691 | * on gm45. Hence we can only set up the initial fbdev config after hpd | |
692 | * irqs are fully enabled. We do it last so that the async config | |
693 | * cannot run before the connectors are registered. | |
694 | */ | |
695 | intel_fbdev_initial_config_async(dev); | |
448aa911 CW |
696 | |
697 | /* | |
698 | * We need to coordinate the hotplugs with the asynchronous fbdev | |
699 | * configuration, for which we use the fbdev->async_cookie. | |
700 | */ | |
da27bd41 | 701 | if (HAS_DISPLAY(dev_priv)) |
448aa911 | 702 | drm_kms_helper_poll_init(dev); |
07d80572 | 703 | |
2cd9a689 | 704 | intel_power_domains_enable(dev_priv); |
69c66355 | 705 | intel_runtime_pm_enable(&dev_priv->runtime_pm); |
46edcdbd JN |
706 | |
707 | intel_register_dsm_handler(); | |
708 | ||
709 | if (i915_switcheroo_register(dev_priv)) | |
710 | drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n"); | |
0673ad47 CW |
711 | } |
712 | ||
713 | /** | |
714 | * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() | |
715 | * @dev_priv: device private | |
716 | */ | |
717 | static void i915_driver_unregister(struct drm_i915_private *dev_priv) | |
718 | { | |
46edcdbd JN |
719 | i915_switcheroo_unregister(dev_priv); |
720 | ||
721 | intel_unregister_dsm_handler(); | |
722 | ||
69c66355 | 723 | intel_runtime_pm_disable(&dev_priv->runtime_pm); |
2cd9a689 | 724 | intel_power_domains_disable(dev_priv); |
07d80572 | 725 | |
4f256d82 | 726 | intel_fbdev_unregister(dev_priv); |
eef57324 | 727 | intel_audio_deinit(dev_priv); |
0673ad47 | 728 | |
448aa911 CW |
729 | /* |
730 | * After flushing the fbdev (incl. a late async config which will | |
731 | * have delayed queuing of a hotplug event), then flush the hotplug | |
732 | * events. | |
733 | */ | |
734 | drm_kms_helper_poll_fini(&dev_priv->drm); | |
9d8fddf8 | 735 | drm_atomic_helper_shutdown(&dev_priv->drm); |
448aa911 | 736 | |
42014f69 | 737 | intel_gt_driver_unregister(&dev_priv->gt); |
0673ad47 CW |
738 | acpi_video_unregister(); |
739 | intel_opregion_unregister(dev_priv); | |
740 | ||
442b8c06 | 741 | i915_perf_unregister(dev_priv); |
b46a33e2 | 742 | i915_pmu_unregister(dev_priv); |
442b8c06 | 743 | |
694c2828 | 744 | i915_teardown_sysfs(dev_priv); |
d69990e0 | 745 | drm_dev_unplug(&dev_priv->drm); |
0673ad47 | 746 | |
c29579d2 | 747 | i915_gem_driver_unregister(dev_priv); |
0673ad47 CW |
748 | } |
749 | ||
27d558a1 MW |
750 | static void i915_welcome_messages(struct drm_i915_private *dev_priv) |
751 | { | |
bdbf43d7 | 752 | if (drm_debug_enabled(DRM_UT_DRIVER)) { |
27d558a1 MW |
753 | struct drm_printer p = drm_debug_printer("i915 device info:"); |
754 | ||
805446c8 | 755 | drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n", |
1787a984 JN |
756 | INTEL_DEVID(dev_priv), |
757 | INTEL_REVID(dev_priv), | |
758 | intel_platform_name(INTEL_INFO(dev_priv)->platform), | |
805446c8 TU |
759 | intel_subplatform(RUNTIME_INFO(dev_priv), |
760 | INTEL_INFO(dev_priv)->platform), | |
1787a984 JN |
761 | INTEL_GEN(dev_priv)); |
762 | ||
72404978 CW |
763 | intel_device_info_print_static(INTEL_INFO(dev_priv), &p); |
764 | intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); | |
792592e7 | 765 | intel_gt_info_print(&dev_priv->gt.info, &p); |
27d558a1 MW |
766 | } |
767 | ||
768 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) | |
00376ccf | 769 | drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n"); |
27d558a1 | 770 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) |
00376ccf | 771 | drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n"); |
6dfc4a8f | 772 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) |
00376ccf WK |
773 | drm_info(&dev_priv->drm, |
774 | "DRM_I915_DEBUG_RUNTIME_PM enabled\n"); | |
27d558a1 MW |
775 | } |
776 | ||
55ac5a16 CW |
777 | static struct drm_i915_private * |
778 | i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) | |
779 | { | |
780 | const struct intel_device_info *match_info = | |
781 | (struct intel_device_info *)ent->driver_data; | |
782 | struct intel_device_info *device_info; | |
783 | struct drm_i915_private *i915; | |
784 | ||
274ed9e9 DV |
785 | i915 = devm_drm_dev_alloc(&pdev->dev, &driver, |
786 | struct drm_i915_private, drm); | |
787 | if (IS_ERR(i915)) | |
788 | return i915; | |
55ac5a16 | 789 | |
361f9dc2 CW |
790 | i915->drm.pdev = pdev; |
791 | pci_set_drvdata(pdev, i915); | |
55ac5a16 | 792 | |
8a25c4be JN |
793 | /* Device parameters start as a copy of module parameters. */ |
794 | i915_params_copy(&i915->params, &i915_modparams); | |
795 | ||
55ac5a16 CW |
796 | /* Setup the write-once "constant" device info */ |
797 | device_info = mkwrite_device_info(i915); | |
798 | memcpy(device_info, match_info, sizeof(*device_info)); | |
0258404f | 799 | RUNTIME_INFO(i915)->device_id = pdev->device; |
55ac5a16 | 800 | |
74f6e183 | 801 | BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask)); |
55ac5a16 CW |
802 | |
803 | return i915; | |
804 | } | |
805 | ||
0673ad47 | 806 | /** |
b01558e5 | 807 | * i915_driver_probe - setup chip and create an initial config |
d2ad3ae4 JL |
808 | * @pdev: PCI device |
809 | * @ent: matching PCI ID entry | |
0673ad47 | 810 | * |
b01558e5 | 811 | * The driver probe routine has to do several things: |
0673ad47 CW |
812 | * - drive output discovery via intel_modeset_init() |
813 | * - initialize the memory manager | |
814 | * - allocate initial config memory | |
815 | * - setup the DRM framebuffer with the allocated memory | |
816 | */ | |
b01558e5 | 817 | int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
0673ad47 | 818 | { |
8d2b47dd ML |
819 | const struct intel_device_info *match_info = |
820 | (struct intel_device_info *)ent->driver_data; | |
8eecfb39 | 821 | struct drm_i915_private *i915; |
0673ad47 | 822 | int ret; |
7d87a7f7 | 823 | |
8eecfb39 JN |
824 | i915 = i915_driver_create(pdev, ent); |
825 | if (IS_ERR(i915)) | |
826 | return PTR_ERR(i915); | |
719388e1 | 827 | |
1feb64c4 | 828 | /* Disable nuclear pageflip by default on pre-ILK */ |
8a25c4be | 829 | if (!i915->params.nuclear_pageflip && match_info->gen < 5) |
8eecfb39 | 830 | i915->drm.driver_features &= ~DRIVER_ATOMIC; |
1feb64c4 | 831 | |
16292243 MA |
832 | /* |
833 | * Check if we support fake LMEM -- for now we only unleash this for | |
834 | * the live selftests(test-and-exit). | |
835 | */ | |
292a27b0 | 836 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
16292243 | 837 | if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) { |
8eecfb39 | 838 | if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 && |
8a25c4be | 839 | i915->params.fake_lmem_start) { |
8eecfb39 | 840 | mkwrite_device_info(i915)->memory_regions = |
16292243 | 841 | REGION_SMEM | REGION_LMEM | REGION_STOLEN; |
8eecfb39 | 842 | GEM_BUG_ON(!HAS_LMEM(i915)); |
16292243 MA |
843 | } |
844 | } | |
292a27b0 | 845 | #endif |
16292243 | 846 | |
0673ad47 CW |
847 | ret = pci_enable_device(pdev); |
848 | if (ret) | |
cad3688f | 849 | goto out_fini; |
1347f5b4 | 850 | |
8eecfb39 | 851 | ret = i915_driver_early_probe(i915); |
0673ad47 CW |
852 | if (ret < 0) |
853 | goto out_pci_disable; | |
ef11bdb3 | 854 | |
8eecfb39 | 855 | disable_rpm_wakeref_asserts(&i915->runtime_pm); |
1da177e4 | 856 | |
9e859eb9 | 857 | intel_vgpu_detect(i915); |
9e138ea1 | 858 | |
8eecfb39 | 859 | ret = i915_driver_mmio_probe(i915); |
0673ad47 CW |
860 | if (ret < 0) |
861 | goto out_runtime_pm_put; | |
79e53945 | 862 | |
8eecfb39 | 863 | ret = i915_driver_hw_probe(i915); |
0673ad47 CW |
864 | if (ret < 0) |
865 | goto out_cleanup_mmio; | |
30c964a6 | 866 | |
d6843dda | 867 | ret = intel_modeset_init_noirq(i915); |
0673ad47 | 868 | if (ret < 0) |
baf54385 | 869 | goto out_cleanup_hw; |
0673ad47 | 870 | |
b664259f JN |
871 | ret = intel_irq_install(i915); |
872 | if (ret) | |
873 | goto out_cleanup_modeset; | |
874 | ||
d6843dda JN |
875 | ret = intel_modeset_init_nogem(i915); |
876 | if (ret) | |
b664259f JN |
877 | goto out_cleanup_irq; |
878 | ||
d6843dda JN |
879 | ret = i915_gem_init(i915); |
880 | if (ret) | |
881 | goto out_cleanup_modeset2; | |
882 | ||
883 | ret = intel_modeset_init(i915); | |
884 | if (ret) | |
885 | goto out_cleanup_gem; | |
886 | ||
8eecfb39 | 887 | i915_driver_register(i915); |
0673ad47 | 888 | |
8eecfb39 | 889 | enable_rpm_wakeref_asserts(&i915->runtime_pm); |
0673ad47 | 890 | |
8eecfb39 | 891 | i915_welcome_messages(i915); |
27d558a1 | 892 | |
7fb81e9d DV |
893 | i915->do_release = true; |
894 | ||
0673ad47 CW |
895 | return 0; |
896 | ||
d6843dda JN |
897 | out_cleanup_gem: |
898 | i915_gem_suspend(i915); | |
899 | i915_gem_driver_remove(i915); | |
900 | i915_gem_driver_release(i915); | |
901 | out_cleanup_modeset2: | |
902 | /* FIXME clean up the error path */ | |
903 | intel_modeset_driver_remove(i915); | |
904 | intel_irq_uninstall(i915); | |
905 | intel_modeset_driver_remove_noirq(i915); | |
906 | goto out_cleanup_modeset; | |
b664259f JN |
907 | out_cleanup_irq: |
908 | intel_irq_uninstall(i915); | |
909 | out_cleanup_modeset: | |
eb4612d8 | 910 | intel_modeset_driver_remove_nogem(i915); |
0673ad47 | 911 | out_cleanup_hw: |
8eecfb39 JN |
912 | i915_driver_hw_remove(i915); |
913 | intel_memory_regions_driver_release(i915); | |
914 | i915_ggtt_driver_release(i915); | |
0673ad47 | 915 | out_cleanup_mmio: |
8eecfb39 | 916 | i915_driver_mmio_release(i915); |
0673ad47 | 917 | out_runtime_pm_put: |
8eecfb39 JN |
918 | enable_rpm_wakeref_asserts(&i915->runtime_pm); |
919 | i915_driver_late_release(i915); | |
0673ad47 CW |
920 | out_pci_disable: |
921 | pci_disable_device(pdev); | |
cad3688f | 922 | out_fini: |
8eecfb39 | 923 | i915_probe_error(i915, "Device initialization failed (%d)\n", ret); |
30c964a6 RB |
924 | return ret; |
925 | } | |
926 | ||
361f9dc2 | 927 | void i915_driver_remove(struct drm_i915_private *i915) |
3bad0781 | 928 | { |
361f9dc2 | 929 | disable_rpm_wakeref_asserts(&i915->runtime_pm); |
07d80572 | 930 | |
361f9dc2 | 931 | i915_driver_unregister(i915); |
99c539be | 932 | |
4a8ab5ea CW |
933 | /* Flush any external code that still may be under the RCU lock */ |
934 | synchronize_rcu(); | |
935 | ||
361f9dc2 | 936 | i915_gem_suspend(i915); |
ce1bb329 | 937 | |
361f9dc2 | 938 | intel_gvt_driver_remove(i915); |
26f837e8 | 939 | |
eb4612d8 | 940 | intel_modeset_driver_remove(i915); |
bcdb72ac | 941 | |
f20a60fb JN |
942 | intel_irq_uninstall(i915); |
943 | ||
c0ff9e5e | 944 | intel_modeset_driver_remove_noirq(i915); |
f20a60fb | 945 | |
361f9dc2 | 946 | i915_reset_error_state(i915); |
361f9dc2 | 947 | i915_gem_driver_remove(i915); |
0673ad47 | 948 | |
eb4612d8 | 949 | intel_modeset_driver_remove_nogem(i915); |
0673ad47 | 950 | |
361f9dc2 | 951 | i915_driver_hw_remove(i915); |
0673ad47 | 952 | |
361f9dc2 | 953 | enable_rpm_wakeref_asserts(&i915->runtime_pm); |
cad3688f CW |
954 | } |
955 | ||
956 | static void i915_driver_release(struct drm_device *dev) | |
957 | { | |
958 | struct drm_i915_private *dev_priv = to_i915(dev); | |
69c66355 | 959 | struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; |
0673ad47 | 960 | |
7fb81e9d DV |
961 | if (!dev_priv->do_release) |
962 | return; | |
963 | ||
69c66355 | 964 | disable_rpm_wakeref_asserts(rpm); |
47bc28d7 | 965 | |
3b58a945 | 966 | i915_gem_driver_release(dev_priv); |
47bc28d7 | 967 | |
3fc794f2 | 968 | intel_memory_regions_driver_release(dev_priv); |
3b58a945 | 969 | i915_ggtt_driver_release(dev_priv); |
89351925 | 970 | i915_gem_drain_freed_objects(dev_priv); |
19e0a8d4 | 971 | |
3b58a945 | 972 | i915_driver_mmio_release(dev_priv); |
47bc28d7 | 973 | |
69c66355 | 974 | enable_rpm_wakeref_asserts(rpm); |
3b58a945 | 975 | intel_runtime_pm_driver_release(rpm); |
47bc28d7 | 976 | |
3b58a945 | 977 | i915_driver_late_release(dev_priv); |
3bad0781 ZW |
978 | } |
979 | ||
0673ad47 | 980 | static int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
2911a35b | 981 | { |
829a0af2 | 982 | struct drm_i915_private *i915 = to_i915(dev); |
0673ad47 | 983 | int ret; |
2911a35b | 984 | |
829a0af2 | 985 | ret = i915_gem_open(i915, file); |
0673ad47 CW |
986 | if (ret) |
987 | return ret; | |
2911a35b | 988 | |
0673ad47 CW |
989 | return 0; |
990 | } | |
71386ef9 | 991 | |
0673ad47 CW |
992 | /** |
993 | * i915_driver_lastclose - clean up after all DRM clients have exited | |
994 | * @dev: DRM device | |
995 | * | |
996 | * Take care of cleaning up after all DRM clients have exited. In the | |
997 | * mode setting case, we want to restore the kernel's initial mode (just | |
998 | * in case the last client left us in a bad state). | |
999 | * | |
1000 | * Additionally, in the non-mode setting case, we'll tear down the GTT | |
1001 | * and DMA structures, since the kernel won't be using them, and clea | |
1002 | * up any GEM state. | |
1003 | */ | |
1004 | static void i915_driver_lastclose(struct drm_device *dev) | |
1005 | { | |
1006 | intel_fbdev_restore_mode(dev); | |
1007 | vga_switcheroo_process_delayed_switch(); | |
1008 | } | |
2911a35b | 1009 | |
7d2ec881 | 1010 | static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
0673ad47 | 1011 | { |
7d2ec881 DV |
1012 | struct drm_i915_file_private *file_priv = file->driver_priv; |
1013 | ||
829a0af2 | 1014 | i915_gem_context_close(file); |
0673ad47 | 1015 | |
77715906 | 1016 | kfree_rcu(file_priv, rcu); |
515b8b7e CW |
1017 | |
1018 | /* Catch up with all the deferred frees from "this" client */ | |
1019 | i915_gem_flush_free_objects(to_i915(dev)); | |
2911a35b BW |
1020 | } |
1021 | ||
07f9cd0b ID |
1022 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) |
1023 | { | |
91c8a326 | 1024 | struct drm_device *dev = &dev_priv->drm; |
19c8054c | 1025 | struct intel_encoder *encoder; |
07f9cd0b ID |
1026 | |
1027 | drm_modeset_lock_all(dev); | |
19c8054c JN |
1028 | for_each_intel_encoder(dev, encoder) |
1029 | if (encoder->suspend) | |
1030 | encoder->suspend(encoder); | |
07f9cd0b ID |
1031 | drm_modeset_unlock_all(dev); |
1032 | } | |
1033 | ||
100fe4c0 VS |
1034 | static void intel_shutdown_encoders(struct drm_i915_private *dev_priv) |
1035 | { | |
1036 | struct drm_device *dev = &dev_priv->drm; | |
1037 | struct intel_encoder *encoder; | |
1038 | ||
1039 | drm_modeset_lock_all(dev); | |
1040 | for_each_intel_encoder(dev, encoder) | |
1041 | if (encoder->shutdown) | |
1042 | encoder->shutdown(encoder); | |
1043 | drm_modeset_unlock_all(dev); | |
1044 | } | |
1045 | ||
fe0f1e3b VS |
1046 | void i915_driver_shutdown(struct drm_i915_private *i915) |
1047 | { | |
19fe4ac6 CW |
1048 | disable_rpm_wakeref_asserts(&i915->runtime_pm); |
1049 | ||
fe0f1e3b VS |
1050 | i915_gem_suspend(i915); |
1051 | ||
1052 | drm_kms_helper_poll_disable(&i915->drm); | |
1053 | ||
1054 | drm_atomic_helper_shutdown(&i915->drm); | |
1055 | ||
1056 | intel_dp_mst_suspend(i915); | |
1057 | ||
1058 | intel_runtime_pm_disable_interrupts(i915); | |
1059 | intel_hpd_cancel_work(i915); | |
1060 | ||
1061 | intel_suspend_encoders(i915); | |
100fe4c0 | 1062 | intel_shutdown_encoders(i915); |
19fe4ac6 CW |
1063 | |
1064 | enable_rpm_wakeref_asserts(&i915->runtime_pm); | |
fe0f1e3b VS |
1065 | } |
1066 | ||
bc87229f ID |
1067 | static bool suspend_to_idle(struct drm_i915_private *dev_priv) |
1068 | { | |
1069 | #if IS_ENABLED(CONFIG_ACPI_SLEEP) | |
1070 | if (acpi_target_system_state() < ACPI_STATE_S3) | |
1071 | return true; | |
1072 | #endif | |
1073 | return false; | |
1074 | } | |
ebc32824 | 1075 | |
73b66f87 CW |
1076 | static int i915_drm_prepare(struct drm_device *dev) |
1077 | { | |
1078 | struct drm_i915_private *i915 = to_i915(dev); | |
73b66f87 CW |
1079 | |
1080 | /* | |
1081 | * NB intel_display_suspend() may issue new requests after we've | |
1082 | * ostensibly marked the GPU as ready-to-sleep here. We need to | |
1083 | * split out that work and pull it forward so that after point, | |
1084 | * the GPU is not woken again. | |
1085 | */ | |
5861b013 | 1086 | i915_gem_suspend(i915); |
73b66f87 | 1087 | |
5861b013 | 1088 | return 0; |
73b66f87 CW |
1089 | } |
1090 | ||
5e365c39 | 1091 | static int i915_drm_suspend(struct drm_device *dev) |
ba8bbcf6 | 1092 | { |
fac5e23e | 1093 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1094 | struct pci_dev *pdev = dev_priv->drm.pdev; |
e5747e3a | 1095 | pci_power_t opregion_target_state; |
61caf87c | 1096 | |
9102650f | 1097 | disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
1f814dac | 1098 | |
c67a470b PZ |
1099 | /* We do a lot of poking in a lot of registers, make sure they work |
1100 | * properly. */ | |
2cd9a689 | 1101 | intel_power_domains_disable(dev_priv); |
cb10799c | 1102 | |
5bcf719b DA |
1103 | drm_kms_helper_poll_disable(dev); |
1104 | ||
52a05c30 | 1105 | pci_save_state(pdev); |
ba8bbcf6 | 1106 | |
6b72d486 | 1107 | intel_display_suspend(dev); |
2eb5252e | 1108 | |
1a4313d1 | 1109 | intel_dp_mst_suspend(dev_priv); |
7d708ee4 | 1110 | |
d5818938 DV |
1111 | intel_runtime_pm_disable_interrupts(dev_priv); |
1112 | intel_hpd_cancel_work(dev_priv); | |
09b64267 | 1113 | |
d5818938 | 1114 | intel_suspend_encoders(dev_priv); |
0e32b39c | 1115 | |
712bf364 | 1116 | intel_suspend_hw(dev_priv); |
5669fcac | 1117 | |
e986209c | 1118 | i915_ggtt_suspend(&dev_priv->ggtt); |
828c7908 | 1119 | |
0f8d2a2b | 1120 | i915_save_display(dev_priv); |
9e06dd39 | 1121 | |
bc87229f | 1122 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; |
a950adc6 | 1123 | intel_opregion_suspend(dev_priv, opregion_target_state); |
8ee1c3db | 1124 | |
82e3b8c1 | 1125 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); |
3fa016a0 | 1126 | |
62d5d69b MK |
1127 | dev_priv->suspend_count++; |
1128 | ||
f74ed08d | 1129 | intel_csr_ucode_suspend(dev_priv); |
f514c2d8 | 1130 | |
9102650f | 1131 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
1f814dac | 1132 | |
73b66f87 | 1133 | return 0; |
84b79f8d RW |
1134 | } |
1135 | ||
2cd9a689 ID |
1136 | static enum i915_drm_suspend_mode |
1137 | get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate) | |
1138 | { | |
1139 | if (hibernate) | |
1140 | return I915_DRM_SUSPEND_HIBERNATE; | |
1141 | ||
1142 | if (suspend_to_idle(dev_priv)) | |
1143 | return I915_DRM_SUSPEND_IDLE; | |
1144 | ||
1145 | return I915_DRM_SUSPEND_MEM; | |
1146 | } | |
1147 | ||
c49d13ee | 1148 | static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) |
c3c09c95 | 1149 | { |
c49d13ee | 1150 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1151 | struct pci_dev *pdev = dev_priv->drm.pdev; |
69c66355 | 1152 | struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; |
fb5f432a | 1153 | int ret; |
c3c09c95 | 1154 | |
69c66355 | 1155 | disable_rpm_wakeref_asserts(rpm); |
1f814dac | 1156 | |
ec92ad00 CW |
1157 | i915_gem_suspend_late(dev_priv); |
1158 | ||
f7de5027 | 1159 | intel_uncore_suspend(&dev_priv->uncore); |
4c494a57 | 1160 | |
2cd9a689 ID |
1161 | intel_power_domains_suspend(dev_priv, |
1162 | get_suspend_mode(dev_priv, hibernation)); | |
73dfc227 | 1163 | |
071b68cc RV |
1164 | intel_display_power_suspend_late(dev_priv); |
1165 | ||
fb5f432a | 1166 | ret = vlv_suspend_complete(dev_priv); |
c3c09c95 | 1167 | if (ret) { |
00376ccf | 1168 | drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret); |
2cd9a689 | 1169 | intel_power_domains_resume(dev_priv); |
c3c09c95 | 1170 | |
1f814dac | 1171 | goto out; |
c3c09c95 ID |
1172 | } |
1173 | ||
52a05c30 | 1174 | pci_disable_device(pdev); |
ab3be73f | 1175 | /* |
54875571 | 1176 | * During hibernation on some platforms the BIOS may try to access |
ab3be73f ID |
1177 | * the device even though it's already in D3 and hang the machine. So |
1178 | * leave the device in D0 on those platforms and hope the BIOS will | |
54875571 ID |
1179 | * power down the device properly. The issue was seen on multiple old |
1180 | * GENs with different BIOS vendors, so having an explicit blacklist | |
1181 | * is inpractical; apply the workaround on everything pre GEN6. The | |
1182 | * platforms where the issue was seen: | |
1183 | * Lenovo Thinkpad X301, X61s, X60, T60, X41 | |
1184 | * Fujitsu FSC S7110 | |
1185 | * Acer Aspire 1830T | |
ab3be73f | 1186 | */ |
514e1d64 | 1187 | if (!(hibernation && INTEL_GEN(dev_priv) < 6)) |
52a05c30 | 1188 | pci_set_power_state(pdev, PCI_D3hot); |
c3c09c95 | 1189 | |
1f814dac | 1190 | out: |
69c66355 | 1191 | enable_rpm_wakeref_asserts(rpm); |
0a9b2630 | 1192 | if (!dev_priv->uncore.user_forcewake_count) |
3b58a945 | 1193 | intel_runtime_pm_driver_release(rpm); |
1f814dac ID |
1194 | |
1195 | return ret; | |
c3c09c95 ID |
1196 | } |
1197 | ||
63bf8301 | 1198 | int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state) |
84b79f8d RW |
1199 | { |
1200 | int error; | |
1201 | ||
48a1b8d4 PB |
1202 | if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND && |
1203 | state.event != PM_EVENT_FREEZE)) | |
0b14cbd2 | 1204 | return -EINVAL; |
5bcf719b | 1205 | |
361f9dc2 | 1206 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
5bcf719b | 1207 | return 0; |
6eecba33 | 1208 | |
361f9dc2 | 1209 | error = i915_drm_suspend(&i915->drm); |
84b79f8d RW |
1210 | if (error) |
1211 | return error; | |
1212 | ||
361f9dc2 | 1213 | return i915_drm_suspend_late(&i915->drm, false); |
ba8bbcf6 JB |
1214 | } |
1215 | ||
5e365c39 | 1216 | static int i915_drm_resume(struct drm_device *dev) |
76c4b250 | 1217 | { |
fac5e23e | 1218 | struct drm_i915_private *dev_priv = to_i915(dev); |
ac840ae5 | 1219 | int ret; |
9d49c0ef | 1220 | |
9102650f | 1221 | disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
1f814dac | 1222 | |
640b50fa CW |
1223 | sanitize_gpu(dev_priv); |
1224 | ||
97d6d7ab | 1225 | ret = i915_ggtt_enable_hw(dev_priv); |
ac840ae5 | 1226 | if (ret) |
00376ccf | 1227 | drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); |
ac840ae5 | 1228 | |
e986209c | 1229 | i915_ggtt_resume(&dev_priv->ggtt); |
cec5ca08 | 1230 | |
f74ed08d ID |
1231 | intel_csr_ucode_resume(dev_priv); |
1232 | ||
0f8d2a2b | 1233 | i915_restore_display(dev_priv); |
8090ba8c | 1234 | intel_pps_unlock_regs_wa(dev_priv); |
61caf87c | 1235 | |
c39055b0 | 1236 | intel_init_pch_refclk(dev_priv); |
1833b134 | 1237 | |
364aece0 PA |
1238 | /* |
1239 | * Interrupts have to be enabled before any batches are run. If not the | |
1240 | * GPU will hang. i915_gem_init_hw() will initiate batches to | |
1241 | * update/restore the context. | |
1242 | * | |
908764f6 ID |
1243 | * drm_mode_config_reset() needs AUX interrupts. |
1244 | * | |
364aece0 PA |
1245 | * Modeset enabling in intel_modeset_init_hw() also needs working |
1246 | * interrupts. | |
1247 | */ | |
1248 | intel_runtime_pm_enable_interrupts(dev_priv); | |
1249 | ||
908764f6 ID |
1250 | drm_mode_config_reset(dev); |
1251 | ||
37cd3300 | 1252 | i915_gem_resume(dev_priv); |
226485e9 | 1253 | |
6cd02e77 | 1254 | intel_modeset_init_hw(dev_priv); |
675f7ff3 | 1255 | intel_init_clock_gating(dev_priv); |
4c8d4651 | 1256 | intel_hpd_init(dev_priv); |
24576d23 | 1257 | |
4c8d4651 | 1258 | /* MST sideband requires HPD interrupts enabled */ |
1a4313d1 | 1259 | intel_dp_mst_resume(dev_priv); |
a16b7658 L |
1260 | intel_display_resume(dev); |
1261 | ||
4c8d4651 | 1262 | intel_hpd_poll_disable(dev_priv); |
e0b70061 L |
1263 | drm_kms_helper_poll_enable(dev); |
1264 | ||
a950adc6 | 1265 | intel_opregion_resume(dev_priv); |
44834a67 | 1266 | |
82e3b8c1 | 1267 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); |
073f34d9 | 1268 | |
2cd9a689 ID |
1269 | intel_power_domains_enable(dev_priv); |
1270 | ||
385fc38c CX |
1271 | intel_gvt_resume(dev_priv); |
1272 | ||
9102650f | 1273 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
1f814dac | 1274 | |
074c6ada | 1275 | return 0; |
84b79f8d RW |
1276 | } |
1277 | ||
5e365c39 | 1278 | static int i915_drm_resume_early(struct drm_device *dev) |
84b79f8d | 1279 | { |
fac5e23e | 1280 | struct drm_i915_private *dev_priv = to_i915(dev); |
52a05c30 | 1281 | struct pci_dev *pdev = dev_priv->drm.pdev; |
44410cd0 | 1282 | int ret; |
36d61e67 | 1283 | |
76c4b250 ID |
1284 | /* |
1285 | * We have a resume ordering issue with the snd-hda driver also | |
1286 | * requiring our device to be power up. Due to the lack of a | |
1287 | * parent/child relationship we currently solve this with an early | |
1288 | * resume hook. | |
1289 | * | |
1290 | * FIXME: This should be solved with a special hdmi sink device or | |
1291 | * similar so that power domains can be employed. | |
1292 | */ | |
44410cd0 ID |
1293 | |
1294 | /* | |
1295 | * Note that we need to set the power state explicitly, since we | |
1296 | * powered off the device during freeze and the PCI core won't power | |
1297 | * it back up for us during thaw. Powering off the device during | |
1298 | * freeze is not a hard requirement though, and during the | |
1299 | * suspend/resume phases the PCI core makes sure we get here with the | |
1300 | * device powered on. So in case we change our freeze logic and keep | |
1301 | * the device powered we can also remove the following set power state | |
1302 | * call. | |
1303 | */ | |
52a05c30 | 1304 | ret = pci_set_power_state(pdev, PCI_D0); |
44410cd0 | 1305 | if (ret) { |
00376ccf WK |
1306 | drm_err(&dev_priv->drm, |
1307 | "failed to set PCI D0 power state (%d)\n", ret); | |
2cd9a689 | 1308 | return ret; |
44410cd0 ID |
1309 | } |
1310 | ||
1311 | /* | |
1312 | * Note that pci_enable_device() first enables any parent bridge | |
1313 | * device and only then sets the power state for this device. The | |
1314 | * bridge enabling is a nop though, since bridge devices are resumed | |
1315 | * first. The order of enabling power and enabling the device is | |
1316 | * imposed by the PCI core as described above, so here we preserve the | |
1317 | * same order for the freeze/thaw phases. | |
1318 | * | |
1319 | * TODO: eventually we should remove pci_disable_device() / | |
1320 | * pci_enable_enable_device() from suspend/resume. Due to how they | |
1321 | * depend on the device enable refcount we can't anyway depend on them | |
1322 | * disabling/enabling the device. | |
1323 | */ | |
2cd9a689 ID |
1324 | if (pci_enable_device(pdev)) |
1325 | return -EIO; | |
84b79f8d | 1326 | |
52a05c30 | 1327 | pci_set_master(pdev); |
84b79f8d | 1328 | |
9102650f | 1329 | disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
1f814dac | 1330 | |
fb5f432a | 1331 | ret = vlv_resume_prepare(dev_priv, false); |
36d61e67 | 1332 | if (ret) |
00376ccf | 1333 | drm_err(&dev_priv->drm, |
fb5f432a | 1334 | "Resume prepare failed: %d, continuing anyway\n", ret); |
36d61e67 | 1335 | |
f7de5027 DCS |
1336 | intel_uncore_resume_early(&dev_priv->uncore); |
1337 | ||
eaf522f6 | 1338 | intel_gt_check_and_clear_faults(&dev_priv->gt); |
efee833a | 1339 | |
071b68cc | 1340 | intel_display_power_resume_early(dev_priv); |
efee833a | 1341 | |
2cd9a689 | 1342 | intel_power_domains_resume(dev_priv); |
bc87229f | 1343 | |
9102650f | 1344 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
6e35e8ab | 1345 | |
36d61e67 | 1346 | return ret; |
76c4b250 ID |
1347 | } |
1348 | ||
63bf8301 | 1349 | int i915_resume_switcheroo(struct drm_i915_private *i915) |
76c4b250 | 1350 | { |
50a0072f | 1351 | int ret; |
76c4b250 | 1352 | |
361f9dc2 | 1353 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
097dd837 ID |
1354 | return 0; |
1355 | ||
361f9dc2 | 1356 | ret = i915_drm_resume_early(&i915->drm); |
50a0072f ID |
1357 | if (ret) |
1358 | return ret; | |
1359 | ||
361f9dc2 | 1360 | return i915_drm_resume(&i915->drm); |
5a17514e ID |
1361 | } |
1362 | ||
73b66f87 CW |
1363 | static int i915_pm_prepare(struct device *kdev) |
1364 | { | |
361f9dc2 | 1365 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
73b66f87 | 1366 | |
361f9dc2 | 1367 | if (!i915) { |
73b66f87 CW |
1368 | dev_err(kdev, "DRM not initialized, aborting suspend.\n"); |
1369 | return -ENODEV; | |
1370 | } | |
1371 | ||
361f9dc2 | 1372 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
73b66f87 CW |
1373 | return 0; |
1374 | ||
361f9dc2 | 1375 | return i915_drm_prepare(&i915->drm); |
73b66f87 CW |
1376 | } |
1377 | ||
c49d13ee | 1378 | static int i915_pm_suspend(struct device *kdev) |
112b715e | 1379 | { |
361f9dc2 | 1380 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
112b715e | 1381 | |
361f9dc2 | 1382 | if (!i915) { |
c49d13ee | 1383 | dev_err(kdev, "DRM not initialized, aborting suspend.\n"); |
84b79f8d RW |
1384 | return -ENODEV; |
1385 | } | |
112b715e | 1386 | |
361f9dc2 | 1387 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
5bcf719b DA |
1388 | return 0; |
1389 | ||
361f9dc2 | 1390 | return i915_drm_suspend(&i915->drm); |
76c4b250 ID |
1391 | } |
1392 | ||
c49d13ee | 1393 | static int i915_pm_suspend_late(struct device *kdev) |
76c4b250 | 1394 | { |
361f9dc2 | 1395 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
76c4b250 ID |
1396 | |
1397 | /* | |
c965d995 | 1398 | * We have a suspend ordering issue with the snd-hda driver also |
76c4b250 ID |
1399 | * requiring our device to be power up. Due to the lack of a |
1400 | * parent/child relationship we currently solve this with an late | |
1401 | * suspend hook. | |
1402 | * | |
1403 | * FIXME: This should be solved with a special hdmi sink device or | |
1404 | * similar so that power domains can be employed. | |
1405 | */ | |
361f9dc2 | 1406 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
76c4b250 | 1407 | return 0; |
112b715e | 1408 | |
361f9dc2 | 1409 | return i915_drm_suspend_late(&i915->drm, false); |
ab3be73f ID |
1410 | } |
1411 | ||
c49d13ee | 1412 | static int i915_pm_poweroff_late(struct device *kdev) |
ab3be73f | 1413 | { |
361f9dc2 | 1414 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
ab3be73f | 1415 | |
361f9dc2 | 1416 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
ab3be73f ID |
1417 | return 0; |
1418 | ||
361f9dc2 | 1419 | return i915_drm_suspend_late(&i915->drm, true); |
cbda12d7 ZW |
1420 | } |
1421 | ||
c49d13ee | 1422 | static int i915_pm_resume_early(struct device *kdev) |
76c4b250 | 1423 | { |
361f9dc2 | 1424 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
76c4b250 | 1425 | |
361f9dc2 | 1426 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
097dd837 ID |
1427 | return 0; |
1428 | ||
361f9dc2 | 1429 | return i915_drm_resume_early(&i915->drm); |
76c4b250 ID |
1430 | } |
1431 | ||
c49d13ee | 1432 | static int i915_pm_resume(struct device *kdev) |
cbda12d7 | 1433 | { |
361f9dc2 | 1434 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
84b79f8d | 1435 | |
361f9dc2 | 1436 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
097dd837 ID |
1437 | return 0; |
1438 | ||
361f9dc2 | 1439 | return i915_drm_resume(&i915->drm); |
cbda12d7 ZW |
1440 | } |
1441 | ||
1f19ac2a | 1442 | /* freeze: before creating the hibernation_image */ |
c49d13ee | 1443 | static int i915_pm_freeze(struct device *kdev) |
1f19ac2a | 1444 | { |
361f9dc2 | 1445 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
6a800eab CW |
1446 | int ret; |
1447 | ||
361f9dc2 CW |
1448 | if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { |
1449 | ret = i915_drm_suspend(&i915->drm); | |
dd9f31c7 ID |
1450 | if (ret) |
1451 | return ret; | |
1452 | } | |
6a800eab | 1453 | |
361f9dc2 | 1454 | ret = i915_gem_freeze(i915); |
6a800eab CW |
1455 | if (ret) |
1456 | return ret; | |
1457 | ||
1458 | return 0; | |
1f19ac2a CW |
1459 | } |
1460 | ||
c49d13ee | 1461 | static int i915_pm_freeze_late(struct device *kdev) |
1f19ac2a | 1462 | { |
361f9dc2 | 1463 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
461fb99c CW |
1464 | int ret; |
1465 | ||
361f9dc2 CW |
1466 | if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { |
1467 | ret = i915_drm_suspend_late(&i915->drm, true); | |
dd9f31c7 ID |
1468 | if (ret) |
1469 | return ret; | |
1470 | } | |
461fb99c | 1471 | |
361f9dc2 | 1472 | ret = i915_gem_freeze_late(i915); |
461fb99c CW |
1473 | if (ret) |
1474 | return ret; | |
1475 | ||
1476 | return 0; | |
1f19ac2a CW |
1477 | } |
1478 | ||
1479 | /* thaw: called after creating the hibernation image, but before turning off. */ | |
c49d13ee | 1480 | static int i915_pm_thaw_early(struct device *kdev) |
1f19ac2a | 1481 | { |
c49d13ee | 1482 | return i915_pm_resume_early(kdev); |
1f19ac2a CW |
1483 | } |
1484 | ||
c49d13ee | 1485 | static int i915_pm_thaw(struct device *kdev) |
1f19ac2a | 1486 | { |
c49d13ee | 1487 | return i915_pm_resume(kdev); |
1f19ac2a CW |
1488 | } |
1489 | ||
1490 | /* restore: called after loading the hibernation image. */ | |
c49d13ee | 1491 | static int i915_pm_restore_early(struct device *kdev) |
1f19ac2a | 1492 | { |
c49d13ee | 1493 | return i915_pm_resume_early(kdev); |
1f19ac2a CW |
1494 | } |
1495 | ||
c49d13ee | 1496 | static int i915_pm_restore(struct device *kdev) |
1f19ac2a | 1497 | { |
c49d13ee | 1498 | return i915_pm_resume(kdev); |
1f19ac2a CW |
1499 | } |
1500 | ||
c49d13ee | 1501 | static int intel_runtime_suspend(struct device *kdev) |
8a187455 | 1502 | { |
361f9dc2 | 1503 | struct drm_i915_private *dev_priv = kdev_to_i915(kdev); |
1bf676cc | 1504 | struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; |
fb5f432a | 1505 | int ret; |
8a187455 | 1506 | |
48a1b8d4 | 1507 | if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) |
604effb7 ID |
1508 | return -ENODEV; |
1509 | ||
00376ccf | 1510 | drm_dbg_kms(&dev_priv->drm, "Suspending device\n"); |
8a187455 | 1511 | |
9102650f | 1512 | disable_rpm_wakeref_asserts(rpm); |
1f814dac | 1513 | |
d6102977 ID |
1514 | /* |
1515 | * We are safe here against re-faults, since the fault handler takes | |
1516 | * an RPM reference. | |
1517 | */ | |
7c108fd8 | 1518 | i915_gem_runtime_suspend(dev_priv); |
d6102977 | 1519 | |
9dfe3459 | 1520 | intel_gt_runtime_suspend(&dev_priv->gt); |
a1c41994 | 1521 | |
2eb5252e | 1522 | intel_runtime_pm_disable_interrupts(dev_priv); |
b5478bcd | 1523 | |
f7de5027 | 1524 | intel_uncore_suspend(&dev_priv->uncore); |
01c799c9 | 1525 | |
071b68cc RV |
1526 | intel_display_power_suspend(dev_priv); |
1527 | ||
fb5f432a | 1528 | ret = vlv_suspend_complete(dev_priv); |
0ab9cfeb | 1529 | if (ret) { |
00376ccf WK |
1530 | drm_err(&dev_priv->drm, |
1531 | "Runtime suspend failed, disabling it (%d)\n", ret); | |
f7de5027 | 1532 | intel_uncore_runtime_resume(&dev_priv->uncore); |
01c799c9 | 1533 | |
b963291c | 1534 | intel_runtime_pm_enable_interrupts(dev_priv); |
0ab9cfeb | 1535 | |
9dfe3459 | 1536 | intel_gt_runtime_resume(&dev_priv->gt); |
1ed21cb4 | 1537 | |
9102650f | 1538 | enable_rpm_wakeref_asserts(rpm); |
1f814dac | 1539 | |
0ab9cfeb ID |
1540 | return ret; |
1541 | } | |
a8a8bd54 | 1542 | |
9102650f | 1543 | enable_rpm_wakeref_asserts(rpm); |
3b58a945 | 1544 | intel_runtime_pm_driver_release(rpm); |
55ec45c2 | 1545 | |
2cf7bf6f | 1546 | if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) |
00376ccf WK |
1547 | drm_err(&dev_priv->drm, |
1548 | "Unclaimed access detected prior to suspending\n"); | |
55ec45c2 | 1549 | |
9102650f | 1550 | rpm->suspended = true; |
1fb2362b KCA |
1551 | |
1552 | /* | |
c8a0bd42 PZ |
1553 | * FIXME: We really should find a document that references the arguments |
1554 | * used below! | |
1fb2362b | 1555 | */ |
6f9f4b7a | 1556 | if (IS_BROADWELL(dev_priv)) { |
d37ae19a PZ |
1557 | /* |
1558 | * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop | |
1559 | * being detected, and the call we do at intel_runtime_resume() | |
1560 | * won't be able to restore them. Since PCI_D3hot matches the | |
1561 | * actual specification and appears to be working, use it. | |
1562 | */ | |
6f9f4b7a | 1563 | intel_opregion_notify_adapter(dev_priv, PCI_D3hot); |
d37ae19a | 1564 | } else { |
c8a0bd42 PZ |
1565 | /* |
1566 | * current versions of firmware which depend on this opregion | |
1567 | * notification have repurposed the D1 definition to mean | |
1568 | * "runtime suspended" vs. what you would normally expect (D3) | |
1569 | * to distinguish it from notifications that might be sent via | |
1570 | * the suspend path. | |
1571 | */ | |
6f9f4b7a | 1572 | intel_opregion_notify_adapter(dev_priv, PCI_D1); |
c8a0bd42 | 1573 | } |
8a187455 | 1574 | |
f568eeee | 1575 | assert_forcewakes_inactive(&dev_priv->uncore); |
dc9fb09c | 1576 | |
21d6e0bd | 1577 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
4c8d4651 | 1578 | intel_hpd_poll_enable(dev_priv); |
19625e85 | 1579 | |
00376ccf | 1580 | drm_dbg_kms(&dev_priv->drm, "Device suspended\n"); |
8a187455 PZ |
1581 | return 0; |
1582 | } | |
1583 | ||
c49d13ee | 1584 | static int intel_runtime_resume(struct device *kdev) |
8a187455 | 1585 | { |
361f9dc2 | 1586 | struct drm_i915_private *dev_priv = kdev_to_i915(kdev); |
1bf676cc | 1587 | struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; |
fb5f432a | 1588 | int ret; |
8a187455 | 1589 | |
48a1b8d4 | 1590 | if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) |
604effb7 | 1591 | return -ENODEV; |
8a187455 | 1592 | |
00376ccf | 1593 | drm_dbg_kms(&dev_priv->drm, "Resuming device\n"); |
8a187455 | 1594 | |
48a1b8d4 | 1595 | drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count)); |
9102650f | 1596 | disable_rpm_wakeref_asserts(rpm); |
1f814dac | 1597 | |
6f9f4b7a | 1598 | intel_opregion_notify_adapter(dev_priv, PCI_D0); |
9102650f | 1599 | rpm->suspended = false; |
2cf7bf6f | 1600 | if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) |
00376ccf WK |
1601 | drm_dbg(&dev_priv->drm, |
1602 | "Unclaimed access during suspend, bios?\n"); | |
8a187455 | 1603 | |
071b68cc RV |
1604 | intel_display_power_resume(dev_priv); |
1605 | ||
fb5f432a | 1606 | ret = vlv_resume_prepare(dev_priv, true); |
1a5df187 | 1607 | |
f7de5027 | 1608 | intel_uncore_runtime_resume(&dev_priv->uncore); |
bedf4d79 | 1609 | |
1ed21cb4 SAK |
1610 | intel_runtime_pm_enable_interrupts(dev_priv); |
1611 | ||
0ab9cfeb ID |
1612 | /* |
1613 | * No point of rolling back things in case of an error, as the best | |
1614 | * we can do is to hope that things will still work (and disable RPM). | |
1615 | */ | |
9dfe3459 | 1616 | intel_gt_runtime_resume(&dev_priv->gt); |
92b806d3 | 1617 | |
08d8a232 VS |
1618 | /* |
1619 | * On VLV/CHV display interrupts are part of the display | |
1620 | * power well, so hpd is reinitialized from there. For | |
1621 | * everyone else do it here. | |
1622 | */ | |
4c8d4651 | 1623 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { |
08d8a232 | 1624 | intel_hpd_init(dev_priv); |
4c8d4651 VS |
1625 | intel_hpd_poll_disable(dev_priv); |
1626 | } | |
08d8a232 | 1627 | |
2503a0fe KM |
1628 | intel_enable_ipc(dev_priv); |
1629 | ||
9102650f | 1630 | enable_rpm_wakeref_asserts(rpm); |
1f814dac | 1631 | |
0ab9cfeb | 1632 | if (ret) |
00376ccf WK |
1633 | drm_err(&dev_priv->drm, |
1634 | "Runtime resume failed, disabling it (%d)\n", ret); | |
0ab9cfeb | 1635 | else |
00376ccf | 1636 | drm_dbg_kms(&dev_priv->drm, "Device resumed\n"); |
0ab9cfeb ID |
1637 | |
1638 | return ret; | |
8a187455 PZ |
1639 | } |
1640 | ||
42f5551d | 1641 | const struct dev_pm_ops i915_pm_ops = { |
5545dbbf ID |
1642 | /* |
1643 | * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, | |
1644 | * PMSG_RESUME] | |
1645 | */ | |
73b66f87 | 1646 | .prepare = i915_pm_prepare, |
0206e353 | 1647 | .suspend = i915_pm_suspend, |
76c4b250 ID |
1648 | .suspend_late = i915_pm_suspend_late, |
1649 | .resume_early = i915_pm_resume_early, | |
0206e353 | 1650 | .resume = i915_pm_resume, |
5545dbbf ID |
1651 | |
1652 | /* | |
1653 | * S4 event handlers | |
1654 | * @freeze, @freeze_late : called (1) before creating the | |
1655 | * hibernation image [PMSG_FREEZE] and | |
1656 | * (2) after rebooting, before restoring | |
1657 | * the image [PMSG_QUIESCE] | |
1658 | * @thaw, @thaw_early : called (1) after creating the hibernation | |
1659 | * image, before writing it [PMSG_THAW] | |
1660 | * and (2) after failing to create or | |
1661 | * restore the image [PMSG_RECOVER] | |
1662 | * @poweroff, @poweroff_late: called after writing the hibernation | |
1663 | * image, before rebooting [PMSG_HIBERNATE] | |
1664 | * @restore, @restore_early : called after rebooting and restoring the | |
1665 | * hibernation image [PMSG_RESTORE] | |
1666 | */ | |
1f19ac2a CW |
1667 | .freeze = i915_pm_freeze, |
1668 | .freeze_late = i915_pm_freeze_late, | |
1669 | .thaw_early = i915_pm_thaw_early, | |
1670 | .thaw = i915_pm_thaw, | |
36d61e67 | 1671 | .poweroff = i915_pm_suspend, |
ab3be73f | 1672 | .poweroff_late = i915_pm_poweroff_late, |
1f19ac2a CW |
1673 | .restore_early = i915_pm_restore_early, |
1674 | .restore = i915_pm_restore, | |
5545dbbf ID |
1675 | |
1676 | /* S0ix (via runtime suspend) event handlers */ | |
97bea207 PZ |
1677 | .runtime_suspend = intel_runtime_suspend, |
1678 | .runtime_resume = intel_runtime_resume, | |
cbda12d7 ZW |
1679 | }; |
1680 | ||
e08e96de AV |
1681 | static const struct file_operations i915_driver_fops = { |
1682 | .owner = THIS_MODULE, | |
1683 | .open = drm_open, | |
7a2c65dd | 1684 | .release = drm_release_noglobal, |
e08e96de | 1685 | .unlocked_ioctl = drm_ioctl, |
cc662126 | 1686 | .mmap = i915_gem_mmap, |
e08e96de | 1687 | .poll = drm_poll, |
e08e96de | 1688 | .read = drm_read, |
062705be | 1689 | .compat_ioctl = i915_ioc32_compat_ioctl, |
e08e96de AV |
1690 | .llseek = noop_llseek, |
1691 | }; | |
1692 | ||
0673ad47 CW |
1693 | static int |
1694 | i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, | |
1695 | struct drm_file *file) | |
1696 | { | |
1697 | return -ENODEV; | |
1698 | } | |
1699 | ||
1700 | static const struct drm_ioctl_desc i915_ioctls[] = { | |
1701 | DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1702 | DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), | |
1703 | DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), | |
1704 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), | |
1705 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), | |
1706 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), | |
b972fffa | 1707 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW), |
0673ad47 CW |
1708 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1709 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), | |
1710 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), | |
1711 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1712 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), | |
1713 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1714 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1715 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), | |
1716 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), | |
1717 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1718 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
6a20fe7b | 1719 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH), |
b972fffa | 1720 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW), |
0673ad47 CW |
1721 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
1722 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | |
b972fffa | 1723 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW), |
0673ad47 CW |
1724 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), |
1725 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), | |
b972fffa | 1726 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW), |
0673ad47 CW |
1727 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1728 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1729 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), | |
1730 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), | |
1731 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), | |
1732 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), | |
cc662126 | 1733 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW), |
0673ad47 CW |
1734 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), |
1735 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), | |
111dbcab CW |
1736 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), |
1737 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), | |
0673ad47 | 1738 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), |
6a20fe7b | 1739 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0), |
0673ad47 | 1740 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), |
0cd54b03 DV |
1741 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER), |
1742 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER), | |
1743 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER), | |
1744 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER), | |
b972fffa | 1745 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW), |
b9171541 | 1746 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), |
0673ad47 CW |
1747 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), |
1748 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), | |
1749 | DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), | |
1750 | DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), | |
1751 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), | |
1752 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), | |
eec688e1 | 1753 | DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), |
b4023756 EV |
1754 | DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW), |
1755 | DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW), | |
1756 | DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW), | |
7f3f317a CW |
1757 | DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW), |
1758 | DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), | |
0673ad47 CW |
1759 | }; |
1760 | ||
70a59dd8 | 1761 | static const struct drm_driver driver = { |
0c54781b MW |
1762 | /* Don't use MTRRs here; the Xserver or userspace app should |
1763 | * deal with them for Intel hardware. | |
792d2b9a | 1764 | */ |
673a394b | 1765 | .driver_features = |
0424fdaf | 1766 | DRIVER_GEM | |
13149e8b LL |
1767 | DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ | |
1768 | DRIVER_SYNCOBJ_TIMELINE, | |
cad3688f | 1769 | .release = i915_driver_release, |
673a394b | 1770 | .open = i915_driver_open, |
22eae947 | 1771 | .lastclose = i915_driver_lastclose, |
673a394b | 1772 | .postclose = i915_driver_postclose, |
d8e29209 | 1773 | |
1286ff73 DV |
1774 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
1775 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | |
1286ff73 DV |
1776 | .gem_prime_import = i915_gem_prime_import, |
1777 | ||
ff72145b | 1778 | .dumb_create = i915_gem_dumb_create, |
cc662126 AJ |
1779 | .dumb_map_offset = i915_gem_dumb_mmap_offset, |
1780 | ||
1da177e4 | 1781 | .ioctls = i915_ioctls, |
0673ad47 | 1782 | .num_ioctls = ARRAY_SIZE(i915_ioctls), |
e08e96de | 1783 | .fops = &i915_driver_fops, |
22eae947 DA |
1784 | .name = DRIVER_NAME, |
1785 | .desc = DRIVER_DESC, | |
1786 | .date = DRIVER_DATE, | |
1787 | .major = DRIVER_MAJOR, | |
1788 | .minor = DRIVER_MINOR, | |
1789 | .patchlevel = DRIVER_PATCHLEVEL, | |
1da177e4 | 1790 | }; |