drm/i915: Check for driver readyness before handling an underrun interrupt
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_drv.c
CommitLineData
1da177e4
LT
1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
0d6aa60b 3/*
bc54fd1a 4 *
1da177e4
LT
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
bc54fd1a
DA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
0d6aa60b 28 */
1da177e4 29
5669fcac 30#include <linux/device.h>
e5747e3a 31#include <linux/acpi.h>
760285e7
DH
32#include <drm/drmP.h>
33#include <drm/i915_drm.h>
1da177e4 34#include "i915_drv.h"
990bbdad 35#include "i915_trace.h"
f49f0586 36#include "intel_drv.h"
1da177e4 37
79e53945 38#include <linux/console.h>
e0cd3608 39#include <linux/module.h>
d6102977 40#include <linux/pm_runtime.h>
760285e7 41#include <drm/drm_crtc_helper.h>
79e53945 42
112b715e
KH
43static struct drm_driver driver;
44
a57c774a
AK
45#define GEN_DEFAULT_PIPEOFFSETS \
46 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
47 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
48 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
49 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
a57c774a
AK
50 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
51
84fd4f4e
RB
52#define GEN_CHV_PIPEOFFSETS \
53 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
54 CHV_PIPE_C_OFFSET }, \
55 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
56 CHV_TRANSCODER_C_OFFSET, }, \
84fd4f4e
RB
57 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
58 CHV_PALETTE_C_OFFSET }
a57c774a 59
5efb3e28
VS
60#define CURSOR_OFFSETS \
61 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
62
63#define IVB_CURSOR_OFFSETS \
64 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
65
9a7e8492 66static const struct intel_device_info intel_i830_info = {
7eb552ae 67 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
31578148 68 .has_overlay = 1, .overlay_needs_physical = 1,
73ae478c 69 .ring_mask = RENDER_RING,
a57c774a 70 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 71 CURSOR_OFFSETS,
cfdf1fa2
KH
72};
73
9a7e8492 74static const struct intel_device_info intel_845g_info = {
7eb552ae 75 .gen = 2, .num_pipes = 1,
31578148 76 .has_overlay = 1, .overlay_needs_physical = 1,
73ae478c 77 .ring_mask = RENDER_RING,
a57c774a 78 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 79 CURSOR_OFFSETS,
cfdf1fa2
KH
80};
81
9a7e8492 82static const struct intel_device_info intel_i85x_info = {
7eb552ae 83 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
5ce8ba7c 84 .cursor_needs_physical = 1,
31578148 85 .has_overlay = 1, .overlay_needs_physical = 1,
fd70d52a 86 .has_fbc = 1,
73ae478c 87 .ring_mask = RENDER_RING,
a57c774a 88 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 89 CURSOR_OFFSETS,
cfdf1fa2
KH
90};
91
9a7e8492 92static const struct intel_device_info intel_i865g_info = {
7eb552ae 93 .gen = 2, .num_pipes = 1,
31578148 94 .has_overlay = 1, .overlay_needs_physical = 1,
73ae478c 95 .ring_mask = RENDER_RING,
a57c774a 96 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 97 CURSOR_OFFSETS,
cfdf1fa2
KH
98};
99
9a7e8492 100static const struct intel_device_info intel_i915g_info = {
7eb552ae 101 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
31578148 102 .has_overlay = 1, .overlay_needs_physical = 1,
73ae478c 103 .ring_mask = RENDER_RING,
a57c774a 104 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 105 CURSOR_OFFSETS,
cfdf1fa2 106};
9a7e8492 107static const struct intel_device_info intel_i915gm_info = {
7eb552ae 108 .gen = 3, .is_mobile = 1, .num_pipes = 2,
b295d1b6 109 .cursor_needs_physical = 1,
31578148 110 .has_overlay = 1, .overlay_needs_physical = 1,
a6c45cf0 111 .supports_tv = 1,
fd70d52a 112 .has_fbc = 1,
73ae478c 113 .ring_mask = RENDER_RING,
a57c774a 114 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 115 CURSOR_OFFSETS,
cfdf1fa2 116};
9a7e8492 117static const struct intel_device_info intel_i945g_info = {
7eb552ae 118 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
31578148 119 .has_overlay = 1, .overlay_needs_physical = 1,
73ae478c 120 .ring_mask = RENDER_RING,
a57c774a 121 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 122 CURSOR_OFFSETS,
cfdf1fa2 123};
9a7e8492 124static const struct intel_device_info intel_i945gm_info = {
7eb552ae 125 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
b295d1b6 126 .has_hotplug = 1, .cursor_needs_physical = 1,
31578148 127 .has_overlay = 1, .overlay_needs_physical = 1,
a6c45cf0 128 .supports_tv = 1,
fd70d52a 129 .has_fbc = 1,
73ae478c 130 .ring_mask = RENDER_RING,
a57c774a 131 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 132 CURSOR_OFFSETS,
cfdf1fa2
KH
133};
134
9a7e8492 135static const struct intel_device_info intel_i965g_info = {
7eb552ae 136 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
c96c3a8c 137 .has_hotplug = 1,
31578148 138 .has_overlay = 1,
73ae478c 139 .ring_mask = RENDER_RING,
a57c774a 140 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 141 CURSOR_OFFSETS,
cfdf1fa2
KH
142};
143
9a7e8492 144static const struct intel_device_info intel_i965gm_info = {
7eb552ae 145 .gen = 4, .is_crestline = 1, .num_pipes = 2,
e3c4e5dd 146 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
31578148 147 .has_overlay = 1,
a6c45cf0 148 .supports_tv = 1,
73ae478c 149 .ring_mask = RENDER_RING,
a57c774a 150 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 151 CURSOR_OFFSETS,
cfdf1fa2
KH
152};
153
9a7e8492 154static const struct intel_device_info intel_g33_info = {
7eb552ae 155 .gen = 3, .is_g33 = 1, .num_pipes = 2,
c96c3a8c 156 .need_gfx_hws = 1, .has_hotplug = 1,
31578148 157 .has_overlay = 1,
73ae478c 158 .ring_mask = RENDER_RING,
a57c774a 159 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 160 CURSOR_OFFSETS,
cfdf1fa2
KH
161};
162
9a7e8492 163static const struct intel_device_info intel_g45_info = {
7eb552ae 164 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
c96c3a8c 165 .has_pipe_cxsr = 1, .has_hotplug = 1,
73ae478c 166 .ring_mask = RENDER_RING | BSD_RING,
a57c774a 167 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 168 CURSOR_OFFSETS,
cfdf1fa2
KH
169};
170
9a7e8492 171static const struct intel_device_info intel_gm45_info = {
7eb552ae 172 .gen = 4, .is_g4x = 1, .num_pipes = 2,
e3c4e5dd 173 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
c96c3a8c 174 .has_pipe_cxsr = 1, .has_hotplug = 1,
a6c45cf0 175 .supports_tv = 1,
73ae478c 176 .ring_mask = RENDER_RING | BSD_RING,
a57c774a 177 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 178 CURSOR_OFFSETS,
cfdf1fa2
KH
179};
180
9a7e8492 181static const struct intel_device_info intel_pineview_info = {
7eb552ae 182 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
c96c3a8c 183 .need_gfx_hws = 1, .has_hotplug = 1,
31578148 184 .has_overlay = 1,
a57c774a 185 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 186 CURSOR_OFFSETS,
cfdf1fa2
KH
187};
188
9a7e8492 189static const struct intel_device_info intel_ironlake_d_info = {
7eb552ae 190 .gen = 5, .num_pipes = 2,
5a117db7 191 .need_gfx_hws = 1, .has_hotplug = 1,
73ae478c 192 .ring_mask = RENDER_RING | BSD_RING,
a57c774a 193 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 194 CURSOR_OFFSETS,
cfdf1fa2
KH
195};
196
9a7e8492 197static const struct intel_device_info intel_ironlake_m_info = {
7eb552ae 198 .gen = 5, .is_mobile = 1, .num_pipes = 2,
e3c4e5dd 199 .need_gfx_hws = 1, .has_hotplug = 1,
c1a9f047 200 .has_fbc = 1,
73ae478c 201 .ring_mask = RENDER_RING | BSD_RING,
a57c774a 202 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 203 CURSOR_OFFSETS,
cfdf1fa2
KH
204};
205
9a7e8492 206static const struct intel_device_info intel_sandybridge_d_info = {
7eb552ae 207 .gen = 6, .num_pipes = 2,
c96c3a8c 208 .need_gfx_hws = 1, .has_hotplug = 1,
cbaef0f1 209 .has_fbc = 1,
73ae478c 210 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3d29b842 211 .has_llc = 1,
a57c774a 212 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 213 CURSOR_OFFSETS,
f6e450a6
EA
214};
215
9a7e8492 216static const struct intel_device_info intel_sandybridge_m_info = {
7eb552ae 217 .gen = 6, .is_mobile = 1, .num_pipes = 2,
c96c3a8c 218 .need_gfx_hws = 1, .has_hotplug = 1,
9c04f015 219 .has_fbc = 1,
73ae478c 220 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3d29b842 221 .has_llc = 1,
a57c774a 222 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 223 CURSOR_OFFSETS,
a13e4093
EA
224};
225
219f4fdb
BW
226#define GEN7_FEATURES \
227 .gen = 7, .num_pipes = 3, \
228 .need_gfx_hws = 1, .has_hotplug = 1, \
cbaef0f1 229 .has_fbc = 1, \
73ae478c 230 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
ab484f8f 231 .has_llc = 1
219f4fdb 232
c76b615c 233static const struct intel_device_info intel_ivybridge_d_info = {
219f4fdb
BW
234 GEN7_FEATURES,
235 .is_ivybridge = 1,
a57c774a 236 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 237 IVB_CURSOR_OFFSETS,
c76b615c
JB
238};
239
240static const struct intel_device_info intel_ivybridge_m_info = {
219f4fdb
BW
241 GEN7_FEATURES,
242 .is_ivybridge = 1,
243 .is_mobile = 1,
a57c774a 244 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 245 IVB_CURSOR_OFFSETS,
c76b615c
JB
246};
247
999bcdea
BW
248static const struct intel_device_info intel_ivybridge_q_info = {
249 GEN7_FEATURES,
250 .is_ivybridge = 1,
251 .num_pipes = 0, /* legal, last one wins */
a57c774a 252 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 253 IVB_CURSOR_OFFSETS,
999bcdea
BW
254};
255
70a3eb7a 256static const struct intel_device_info intel_valleyview_m_info = {
219f4fdb
BW
257 GEN7_FEATURES,
258 .is_mobile = 1,
259 .num_pipes = 2,
70a3eb7a 260 .is_valleyview = 1,
fba5d532 261 .display_mmio_offset = VLV_DISPLAY_BASE,
cbaef0f1 262 .has_fbc = 0, /* legal, last one wins */
30ccd964 263 .has_llc = 0, /* legal, last one wins */
a57c774a 264 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 265 CURSOR_OFFSETS,
70a3eb7a
JB
266};
267
268static const struct intel_device_info intel_valleyview_d_info = {
219f4fdb
BW
269 GEN7_FEATURES,
270 .num_pipes = 2,
70a3eb7a 271 .is_valleyview = 1,
fba5d532 272 .display_mmio_offset = VLV_DISPLAY_BASE,
cbaef0f1 273 .has_fbc = 0, /* legal, last one wins */
30ccd964 274 .has_llc = 0, /* legal, last one wins */
a57c774a 275 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 276 CURSOR_OFFSETS,
70a3eb7a
JB
277};
278
4cae9ae0 279static const struct intel_device_info intel_haswell_d_info = {
219f4fdb
BW
280 GEN7_FEATURES,
281 .is_haswell = 1,
dd93be58 282 .has_ddi = 1,
30568c45 283 .has_fpga_dbg = 1,
73ae478c 284 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
a57c774a 285 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 286 IVB_CURSOR_OFFSETS,
4cae9ae0
ED
287};
288
289static const struct intel_device_info intel_haswell_m_info = {
219f4fdb
BW
290 GEN7_FEATURES,
291 .is_haswell = 1,
292 .is_mobile = 1,
dd93be58 293 .has_ddi = 1,
30568c45 294 .has_fpga_dbg = 1,
73ae478c 295 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
a57c774a 296 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 297 IVB_CURSOR_OFFSETS,
c76b615c
JB
298};
299
4d4dead6 300static const struct intel_device_info intel_broadwell_d_info = {
4b30553d 301 .gen = 8, .num_pipes = 3,
4d4dead6
BW
302 .need_gfx_hws = 1, .has_hotplug = 1,
303 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
304 .has_llc = 1,
305 .has_ddi = 1,
66bc2cab 306 .has_fpga_dbg = 1,
8f94d24b 307 .has_fbc = 1,
a57c774a 308 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 309 IVB_CURSOR_OFFSETS,
4d4dead6
BW
310};
311
312static const struct intel_device_info intel_broadwell_m_info = {
4b30553d 313 .gen = 8, .is_mobile = 1, .num_pipes = 3,
4d4dead6
BW
314 .need_gfx_hws = 1, .has_hotplug = 1,
315 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
316 .has_llc = 1,
317 .has_ddi = 1,
66bc2cab 318 .has_fpga_dbg = 1,
8f94d24b 319 .has_fbc = 1,
a57c774a 320 GEN_DEFAULT_PIPEOFFSETS,
15d24aa5 321 IVB_CURSOR_OFFSETS,
4d4dead6
BW
322};
323
fd3c269f
ZY
324static const struct intel_device_info intel_broadwell_gt3d_info = {
325 .gen = 8, .num_pipes = 3,
326 .need_gfx_hws = 1, .has_hotplug = 1,
845f74a7 327 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
fd3c269f
ZY
328 .has_llc = 1,
329 .has_ddi = 1,
66bc2cab 330 .has_fpga_dbg = 1,
fd3c269f
ZY
331 .has_fbc = 1,
332 GEN_DEFAULT_PIPEOFFSETS,
15d24aa5 333 IVB_CURSOR_OFFSETS,
fd3c269f
ZY
334};
335
336static const struct intel_device_info intel_broadwell_gt3m_info = {
337 .gen = 8, .is_mobile = 1, .num_pipes = 3,
338 .need_gfx_hws = 1, .has_hotplug = 1,
845f74a7 339 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
fd3c269f
ZY
340 .has_llc = 1,
341 .has_ddi = 1,
66bc2cab 342 .has_fpga_dbg = 1,
fd3c269f
ZY
343 .has_fbc = 1,
344 GEN_DEFAULT_PIPEOFFSETS,
5efb3e28 345 IVB_CURSOR_OFFSETS,
fd3c269f
ZY
346};
347
7d87a7f7
VS
348static const struct intel_device_info intel_cherryview_info = {
349 .is_preliminary = 1,
07fddb14 350 .gen = 8, .num_pipes = 3,
7d87a7f7
VS
351 .need_gfx_hws = 1, .has_hotplug = 1,
352 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
353 .is_valleyview = 1,
354 .display_mmio_offset = VLV_DISPLAY_BASE,
84fd4f4e 355 GEN_CHV_PIPEOFFSETS,
5efb3e28 356 CURSOR_OFFSETS,
7d87a7f7
VS
357};
358
72bbf0af
DL
359static const struct intel_device_info intel_skylake_info = {
360 .is_preliminary = 1,
7201c0b3 361 .is_skylake = 1,
72bbf0af
DL
362 .gen = 9, .num_pipes = 3,
363 .need_gfx_hws = 1, .has_hotplug = 1,
364 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
365 .has_llc = 1,
366 .has_ddi = 1,
043efb11 367 .has_fbc = 1,
72bbf0af
DL
368 GEN_DEFAULT_PIPEOFFSETS,
369 IVB_CURSOR_OFFSETS,
370};
371
a0a18075
JB
372/*
373 * Make sure any device matches here are from most specific to most
374 * general. For example, since the Quanta match is based on the subsystem
375 * and subvendor IDs, we need it to come before the more general IVB
376 * PCI ID matches, otherwise we'll use the wrong info struct above.
377 */
378#define INTEL_PCI_IDS \
379 INTEL_I830_IDS(&intel_i830_info), \
380 INTEL_I845G_IDS(&intel_845g_info), \
381 INTEL_I85X_IDS(&intel_i85x_info), \
382 INTEL_I865G_IDS(&intel_i865g_info), \
383 INTEL_I915G_IDS(&intel_i915g_info), \
384 INTEL_I915GM_IDS(&intel_i915gm_info), \
385 INTEL_I945G_IDS(&intel_i945g_info), \
386 INTEL_I945GM_IDS(&intel_i945gm_info), \
387 INTEL_I965G_IDS(&intel_i965g_info), \
388 INTEL_G33_IDS(&intel_g33_info), \
389 INTEL_I965GM_IDS(&intel_i965gm_info), \
390 INTEL_GM45_IDS(&intel_gm45_info), \
391 INTEL_G45_IDS(&intel_g45_info), \
392 INTEL_PINEVIEW_IDS(&intel_pineview_info), \
393 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
394 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
395 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
396 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
397 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
398 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
399 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
400 INTEL_HSW_D_IDS(&intel_haswell_d_info), \
401 INTEL_HSW_M_IDS(&intel_haswell_m_info), \
402 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
4d4dead6 403 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
fd3c269f
ZY
404 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
405 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
406 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
7d87a7f7 407 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
72bbf0af
DL
408 INTEL_CHV_IDS(&intel_cherryview_info), \
409 INTEL_SKL_IDS(&intel_skylake_info)
a0a18075 410
6103da0d 411static const struct pci_device_id pciidlist[] = { /* aka */
a0a18075 412 INTEL_PCI_IDS,
49ae35f2 413 {0, 0, 0}
1da177e4
LT
414};
415
79e53945
JB
416#if defined(CONFIG_DRM_I915_KMS)
417MODULE_DEVICE_TABLE(pci, pciidlist);
418#endif
419
0206e353 420void intel_detect_pch(struct drm_device *dev)
3bad0781
ZW
421{
422 struct drm_i915_private *dev_priv = dev->dev_private;
bcdb72ac 423 struct pci_dev *pch = NULL;
3bad0781 424
ce1bb329
BW
425 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
426 * (which really amounts to a PCH but no South Display).
427 */
428 if (INTEL_INFO(dev)->num_pipes == 0) {
429 dev_priv->pch_type = PCH_NOP;
ce1bb329
BW
430 return;
431 }
432
3bad0781
ZW
433 /*
434 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
435 * make graphics device passthrough work easy for VMM, that only
436 * need to expose ISA bridge to let driver know the real hardware
437 * underneath. This is a requirement from virtualization team.
6a9c4b35
RG
438 *
439 * In some virtualized environments (e.g. XEN), there is irrelevant
440 * ISA bridge in the system. To work reliably, we should scan trhough
441 * all the ISA bridge devices and check for the first match, instead
442 * of only checking the first one.
3bad0781 443 */
bcdb72ac 444 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
3bad0781 445 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
bcdb72ac 446 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
17a303ec 447 dev_priv->pch_id = id;
3bad0781 448
90711d50
JB
449 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
450 dev_priv->pch_type = PCH_IBX;
451 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
7fcb83cd 452 WARN_ON(!IS_GEN5(dev));
90711d50 453 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
3bad0781
ZW
454 dev_priv->pch_type = PCH_CPT;
455 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
7fcb83cd 456 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
c792513b
JB
457 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
458 /* PantherPoint is CPT compatible */
459 dev_priv->pch_type = PCH_CPT;
492ab669 460 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
7fcb83cd 461 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
eb877ebf
ED
462 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
463 dev_priv->pch_type = PCH_LPT;
464 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
a35cc9d0
RV
465 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
466 WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
e76e0634
BW
467 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
468 dev_priv->pch_type = PCH_LPT;
469 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
a35cc9d0
RV
470 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
471 WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
e7e7ea20
S
472 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
473 dev_priv->pch_type = PCH_SPT;
474 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
475 WARN_ON(!IS_SKYLAKE(dev));
e7e7ea20
S
476 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
477 dev_priv->pch_type = PCH_SPT;
478 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
479 WARN_ON(!IS_SKYLAKE(dev));
bcdb72ac
ID
480 } else
481 continue;
482
6a9c4b35 483 break;
3bad0781 484 }
3bad0781 485 }
6a9c4b35 486 if (!pch)
bcdb72ac
ID
487 DRM_DEBUG_KMS("No PCH found.\n");
488
489 pci_dev_put(pch);
3bad0781
ZW
490}
491
2911a35b
BW
492bool i915_semaphore_is_enabled(struct drm_device *dev)
493{
494 if (INTEL_INFO(dev)->gen < 6)
a08acaf2 495 return false;
2911a35b 496
d330a953
JN
497 if (i915.semaphores >= 0)
498 return i915.semaphores;
2911a35b 499
71386ef9
OM
500 /* TODO: make semaphores and Execlists play nicely together */
501 if (i915.enable_execlists)
502 return false;
503
be71eabe
RV
504 /* Until we get further testing... */
505 if (IS_GEN8(dev))
506 return false;
507
59de3295 508#ifdef CONFIG_INTEL_IOMMU
2911a35b 509 /* Enable semaphores on SNB when IO remapping is off */
59de3295
DV
510 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
511 return false;
512#endif
2911a35b 513
a08acaf2 514 return true;
2911a35b
BW
515}
516
1d0d343a
ID
517void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
518{
519 spin_lock_irq(&dev_priv->irq_lock);
520
521 dev_priv->long_hpd_port_mask = 0;
522 dev_priv->short_hpd_port_mask = 0;
523 dev_priv->hpd_event_bits = 0;
524
525 spin_unlock_irq(&dev_priv->irq_lock);
526
527 cancel_work_sync(&dev_priv->dig_port_work);
528 cancel_work_sync(&dev_priv->hotplug_work);
529 cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
530}
531
07f9cd0b
ID
532static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
533{
534 struct drm_device *dev = dev_priv->dev;
535 struct drm_encoder *encoder;
536
537 drm_modeset_lock_all(dev);
538 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
539 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
540
541 if (intel_encoder->suspend)
542 intel_encoder->suspend(intel_encoder);
543 }
544 drm_modeset_unlock_all(dev);
545}
546
ebc32824 547static int intel_suspend_complete(struct drm_i915_private *dev_priv);
1a5df187
PZ
548static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
549 bool rpm_resume);
ebc32824 550
5e365c39 551static int i915_drm_suspend(struct drm_device *dev)
ba8bbcf6 552{
61caf87c 553 struct drm_i915_private *dev_priv = dev->dev_private;
24576d23 554 struct drm_crtc *crtc;
e5747e3a 555 pci_power_t opregion_target_state;
61caf87c 556
b8efb17b
ZR
557 /* ignore lid events during suspend */
558 mutex_lock(&dev_priv->modeset_restore_lock);
559 dev_priv->modeset_restore = MODESET_SUSPENDED;
560 mutex_unlock(&dev_priv->modeset_restore_lock);
561
c67a470b
PZ
562 /* We do a lot of poking in a lot of registers, make sure they work
563 * properly. */
da7e29bd 564 intel_display_set_init_power(dev_priv, true);
cb10799c 565
5bcf719b
DA
566 drm_kms_helper_poll_disable(dev);
567
ba8bbcf6 568 pci_save_state(dev->pdev);
ba8bbcf6 569
5669fcac 570 /* If KMS is active, we do the leavevt stuff here */
226485e9 571 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
db1b76ca
DV
572 int error;
573
45c5f202 574 error = i915_gem_suspend(dev);
84b79f8d 575 if (error) {
226485e9 576 dev_err(&dev->pdev->dev,
84b79f8d
RW
577 "GEM idle failed, resume might fail\n");
578 return error;
579 }
a261b246 580
2eb5252e
ID
581 intel_suspend_gt_powersave(dev);
582
24576d23
JB
583 /*
584 * Disable CRTCs directly since we want to preserve sw state
b04c5bd6 585 * for _thaw. Also, power gate the CRTC power wells.
24576d23 586 */
6e9f798d 587 drm_modeset_lock_all(dev);
b04c5bd6
BF
588 for_each_crtc(dev, crtc)
589 intel_crtc_control(crtc, false);
6e9f798d 590 drm_modeset_unlock_all(dev);
7d708ee4 591
0e32b39c 592 intel_dp_mst_suspend(dev);
09b64267 593
b963291c 594 intel_runtime_pm_disable_interrupts(dev_priv);
1d0d343a 595 intel_hpd_cancel_work(dev_priv);
0e32b39c 596
07f9cd0b
ID
597 intel_suspend_encoders(dev_priv);
598
970104fa 599 intel_suspend_hw(dev);
5669fcac
JB
600 }
601
828c7908
BW
602 i915_gem_suspend_gtt_mappings(dev);
603
9e06dd39
JB
604 i915_save_state(dev);
605
95fa2eee
ID
606 opregion_target_state = PCI_D3cold;
607#if IS_ENABLED(CONFIG_ACPI_SLEEP)
608 if (acpi_target_system_state() < ACPI_STATE_S3)
e5747e3a 609 opregion_target_state = PCI_D1;
95fa2eee 610#endif
e5747e3a
JB
611 intel_opregion_notify_adapter(dev, opregion_target_state);
612
156c7ca0 613 intel_uncore_forcewake_reset(dev, false);
44834a67 614 intel_opregion_fini(dev);
8ee1c3db 615
82e3b8c1 616 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
3fa016a0 617
62d5d69b
MK
618 dev_priv->suspend_count++;
619
85e90679
KCA
620 intel_display_set_init_power(dev_priv, false);
621
61caf87c 622 return 0;
84b79f8d
RW
623}
624
c3c09c95
ID
625static int i915_drm_suspend_late(struct drm_device *drm_dev)
626{
627 struct drm_i915_private *dev_priv = drm_dev->dev_private;
628 int ret;
629
630 ret = intel_suspend_complete(dev_priv);
631
632 if (ret) {
633 DRM_ERROR("Suspend complete failed: %d\n", ret);
634
635 return ret;
636 }
637
638 pci_disable_device(drm_dev->pdev);
639 pci_set_power_state(drm_dev->pdev, PCI_D3hot);
640
641 return 0;
642}
643
fc49b3da 644int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
84b79f8d
RW
645{
646 int error;
647
648 if (!dev || !dev->dev_private) {
649 DRM_ERROR("dev: %p\n", dev);
650 DRM_ERROR("DRM not initialized, aborting suspend.\n");
651 return -ENODEV;
652 }
653
0b14cbd2
ID
654 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
655 state.event != PM_EVENT_FREEZE))
656 return -EINVAL;
5bcf719b
DA
657
658 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
659 return 0;
6eecba33 660
5e365c39 661 error = i915_drm_suspend(dev);
84b79f8d
RW
662 if (error)
663 return error;
664
5a17514e 665 return i915_drm_suspend_late(dev);
ba8bbcf6
JB
666}
667
5e365c39 668static int i915_drm_resume(struct drm_device *dev)
76c4b250
ID
669{
670 struct drm_i915_private *dev_priv = dev->dev_private;
9d49c0ef 671
f4a12ead 672 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
9d49c0ef
PZ
673 mutex_lock(&dev->struct_mutex);
674 i915_gem_restore_gtt_mappings(dev);
675 mutex_unlock(&dev->struct_mutex);
676 }
677
61caf87c 678 i915_restore_state(dev);
44834a67 679 intel_opregion_setup(dev);
61caf87c 680
5669fcac
JB
681 /* KMS EnterVT equivalent */
682 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
dde86e2d 683 intel_init_pch_refclk(dev);
754970ee 684 drm_mode_config_reset(dev);
1833b134 685
5669fcac 686 mutex_lock(&dev->struct_mutex);
074c6ada
CW
687 if (i915_gem_init_hw(dev)) {
688 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
689 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
690 }
5669fcac 691 mutex_unlock(&dev->struct_mutex);
226485e9 692
2363d8c9 693 /* We need working interrupts for modeset enabling ... */
b963291c 694 intel_runtime_pm_enable_interrupts(dev_priv);
15239099 695
1833b134 696 intel_modeset_init_hw(dev);
24576d23 697
5ea13be5
JN
698 spin_lock_irq(&dev_priv->irq_lock);
699 if (dev_priv->display.hpd_irq_setup)
700 dev_priv->display.hpd_irq_setup(dev);
701 spin_unlock_irq(&dev_priv->irq_lock);
0e32b39c 702
24576d23
JB
703 drm_modeset_lock_all(dev);
704 intel_modeset_setup_hw_state(dev, true);
705 drm_modeset_unlock_all(dev);
15239099 706
e7d6f7d7
DA
707 intel_dp_mst_resume(dev);
708
15239099
DV
709 /*
710 * ... but also need to make sure that hotplug processing
711 * doesn't cause havoc. Like in the driver load code we don't
712 * bother with the tiny race here where we might loose hotplug
713 * notifications.
714 * */
b963291c 715 intel_hpd_init(dev_priv);
bb60b969 716 /* Config may have changed between suspend and resume */
1ff74cf1 717 drm_helper_hpd_irq_event(dev);
d5bb081b 718 }
1daed3fb 719
44834a67
CW
720 intel_opregion_init(dev);
721
82e3b8c1 722 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
073f34d9 723
b8efb17b
ZR
724 mutex_lock(&dev_priv->modeset_restore_lock);
725 dev_priv->modeset_restore = MODESET_DONE;
726 mutex_unlock(&dev_priv->modeset_restore_lock);
8a187455 727
e5747e3a
JB
728 intel_opregion_notify_adapter(dev, PCI_D0);
729
ee6f280e
ID
730 drm_kms_helper_poll_enable(dev);
731
074c6ada 732 return 0;
84b79f8d
RW
733}
734
5e365c39 735static int i915_drm_resume_early(struct drm_device *dev)
84b79f8d 736{
36d61e67 737 struct drm_i915_private *dev_priv = dev->dev_private;
1a5df187 738 int ret = 0;
36d61e67 739
76c4b250
ID
740 /*
741 * We have a resume ordering issue with the snd-hda driver also
742 * requiring our device to be power up. Due to the lack of a
743 * parent/child relationship we currently solve this with an early
744 * resume hook.
745 *
746 * FIXME: This should be solved with a special hdmi sink device or
747 * similar so that power domains can be employed.
748 */
84b79f8d
RW
749 if (pci_enable_device(dev->pdev))
750 return -EIO;
751
752 pci_set_master(dev->pdev);
753
efee833a 754 if (IS_VALLEYVIEW(dev_priv))
1a5df187 755 ret = vlv_resume_prepare(dev_priv, false);
36d61e67
ID
756 if (ret)
757 DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
758
759 intel_uncore_early_sanitize(dev, true);
efee833a
PZ
760
761 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
762 hsw_disable_pc8(dev_priv);
763
36d61e67
ID
764 intel_uncore_sanitize(dev);
765 intel_power_domains_init_hw(dev_priv);
766
767 return ret;
76c4b250
ID
768}
769
fc49b3da 770int i915_resume_legacy(struct drm_device *dev)
76c4b250 771{
50a0072f 772 int ret;
76c4b250 773
097dd837
ID
774 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
775 return 0;
776
5e365c39 777 ret = i915_drm_resume_early(dev);
50a0072f
ID
778 if (ret)
779 return ret;
780
5a17514e
ID
781 return i915_drm_resume(dev);
782}
783
11ed50ec 784/**
f3953dcb 785 * i915_reset - reset chip after a hang
11ed50ec 786 * @dev: drm device to reset
11ed50ec
BG
787 *
788 * Reset the chip. Useful if a hang is detected. Returns zero on successful
789 * reset or otherwise an error code.
790 *
791 * Procedure is fairly simple:
792 * - reset the chip using the reset reg
793 * - re-init context state
794 * - re-init hardware status page
795 * - re-init ring buffer
796 * - re-init interrupt state
797 * - re-init display
798 */
d4b8bb2a 799int i915_reset(struct drm_device *dev)
11ed50ec 800{
50227e1c 801 struct drm_i915_private *dev_priv = dev->dev_private;
2e7c8ee7 802 bool simulated;
0573ed4a 803 int ret;
11ed50ec 804
d330a953 805 if (!i915.reset)
d78cb50b
CW
806 return 0;
807
dbea3cea
ID
808 intel_reset_gt_powersave(dev);
809
d54a02c0 810 mutex_lock(&dev->struct_mutex);
11ed50ec 811
069efc1d 812 i915_gem_reset(dev);
77f01230 813
2e7c8ee7
CW
814 simulated = dev_priv->gpu_error.stop_rings != 0;
815
be62acb4
MK
816 ret = intel_gpu_reset(dev);
817
818 /* Also reset the gpu hangman. */
819 if (simulated) {
820 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
821 dev_priv->gpu_error.stop_rings = 0;
822 if (ret == -ENODEV) {
f2d91a2c
DV
823 DRM_INFO("Reset not implemented, but ignoring "
824 "error for simulated gpu hangs\n");
be62acb4
MK
825 ret = 0;
826 }
2e7c8ee7 827 }
be62acb4 828
d8f2716a
DV
829 if (i915_stop_ring_allow_warn(dev_priv))
830 pr_notice("drm/i915: Resetting chip after gpu hang\n");
831
0573ed4a 832 if (ret) {
f2d91a2c 833 DRM_ERROR("Failed to reset chip: %i\n", ret);
f953c935 834 mutex_unlock(&dev->struct_mutex);
f803aa55 835 return ret;
11ed50ec
BG
836 }
837
1362b776
VS
838 intel_overlay_reset(dev_priv);
839
11ed50ec
BG
840 /* Ok, now get things going again... */
841
842 /*
843 * Everything depends on having the GTT running, so we need to start
844 * there. Fortunately we don't need to do this unless we reset the
845 * chip at a PCI level.
846 *
847 * Next we need to restore the context, but we don't use those
848 * yet either...
849 *
850 * Ring buffer needs to be re-initialized in the KMS case, or if X
851 * was running at the time of the reset (i.e. we weren't VT
852 * switched away).
853 */
87255483 854 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
6689c167
MA
855 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
856 dev_priv->gpu_error.reload_in_reset = true;
857
3d57e5bd 858 ret = i915_gem_init_hw(dev);
6689c167
MA
859
860 dev_priv->gpu_error.reload_in_reset = false;
861
8e88a2bd 862 mutex_unlock(&dev->struct_mutex);
3d57e5bd
BW
863 if (ret) {
864 DRM_ERROR("Failed hw init on reset %d\n", ret);
865 return ret;
866 }
f817586c 867
e090c53b 868 /*
78ad455f
DV
869 * FIXME: This races pretty badly against concurrent holders of
870 * ring interrupts. This is possible since we've started to drop
871 * dev->struct_mutex in select places when waiting for the gpu.
e090c53b 872 */
dd0a1aa1 873
78ad455f
DV
874 /*
875 * rps/rc6 re-init is necessary to restore state lost after the
876 * reset and the re-install of gt irqs. Skip for ironlake per
dd0a1aa1 877 * previous concerns that it doesn't respond well to some forms
78ad455f
DV
878 * of re-init after reset.
879 */
dc1d0136 880 if (INTEL_INFO(dev)->gen > 5)
dbea3cea 881 intel_enable_gt_powersave(dev);
bcbc324a
DV
882 } else {
883 mutex_unlock(&dev->struct_mutex);
11ed50ec
BG
884 }
885
11ed50ec
BG
886 return 0;
887}
888
56550d94 889static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
112b715e 890{
01a06850
DV
891 struct intel_device_info *intel_info =
892 (struct intel_device_info *) ent->driver_data;
893
d330a953 894 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
b833d685
BW
895 DRM_INFO("This hardware requires preliminary hardware support.\n"
896 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
897 return -ENODEV;
898 }
899
5fe49d86
CW
900 /* Only bind to function 0 of the device. Early generations
901 * used function 1 as a placeholder for multi-head. This causes
902 * us confusion instead, especially on the systems where both
903 * functions have the same PCI-ID!
904 */
905 if (PCI_FUNC(pdev->devfn))
906 return -ENODEV;
907
24986ee0 908 driver.driver_features &= ~(DRIVER_USE_AGP);
01a06850 909
dcdb1674 910 return drm_get_pci_dev(pdev, ent, &driver);
112b715e
KH
911}
912
913static void
914i915_pci_remove(struct pci_dev *pdev)
915{
916 struct drm_device *dev = pci_get_drvdata(pdev);
917
918 drm_put_dev(dev);
919}
920
84b79f8d 921static int i915_pm_suspend(struct device *dev)
112b715e 922{
84b79f8d
RW
923 struct pci_dev *pdev = to_pci_dev(dev);
924 struct drm_device *drm_dev = pci_get_drvdata(pdev);
112b715e 925
84b79f8d
RW
926 if (!drm_dev || !drm_dev->dev_private) {
927 dev_err(dev, "DRM not initialized, aborting suspend.\n");
928 return -ENODEV;
929 }
112b715e 930
5bcf719b
DA
931 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
932 return 0;
933
5e365c39 934 return i915_drm_suspend(drm_dev);
76c4b250
ID
935}
936
937static int i915_pm_suspend_late(struct device *dev)
938{
888d0d42 939 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
76c4b250
ID
940
941 /*
942 * We have a suspedn ordering issue with the snd-hda driver also
943 * requiring our device to be power up. Due to the lack of a
944 * parent/child relationship we currently solve this with an late
945 * suspend hook.
946 *
947 * FIXME: This should be solved with a special hdmi sink device or
948 * similar so that power domains can be employed.
949 */
950 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
951 return 0;
112b715e 952
c3c09c95 953 return i915_drm_suspend_late(drm_dev);
cbda12d7
ZW
954}
955
76c4b250
ID
956static int i915_pm_resume_early(struct device *dev)
957{
888d0d42 958 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
76c4b250 959
097dd837
ID
960 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
961 return 0;
962
5e365c39 963 return i915_drm_resume_early(drm_dev);
76c4b250
ID
964}
965
84b79f8d 966static int i915_pm_resume(struct device *dev)
cbda12d7 967{
888d0d42 968 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
84b79f8d 969
097dd837
ID
970 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
971 return 0;
972
5a17514e 973 return i915_drm_resume(drm_dev);
cbda12d7
ZW
974}
975
ebc32824 976static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
97bea207 977{
414de7a0 978 hsw_enable_pc8(dev_priv);
0ab9cfeb
ID
979
980 return 0;
97bea207
PZ
981}
982
ddeea5b0
ID
983/*
984 * Save all Gunit registers that may be lost after a D3 and a subsequent
985 * S0i[R123] transition. The list of registers needing a save/restore is
986 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
987 * registers in the following way:
988 * - Driver: saved/restored by the driver
989 * - Punit : saved/restored by the Punit firmware
990 * - No, w/o marking: no need to save/restore, since the register is R/O or
991 * used internally by the HW in a way that doesn't depend
992 * keeping the content across a suspend/resume.
993 * - Debug : used for debugging
994 *
995 * We save/restore all registers marked with 'Driver', with the following
996 * exceptions:
997 * - Registers out of use, including also registers marked with 'Debug'.
998 * These have no effect on the driver's operation, so we don't save/restore
999 * them to reduce the overhead.
1000 * - Registers that are fully setup by an initialization function called from
1001 * the resume path. For example many clock gating and RPS/RC6 registers.
1002 * - Registers that provide the right functionality with their reset defaults.
1003 *
1004 * TODO: Except for registers that based on the above 3 criteria can be safely
1005 * ignored, we save/restore all others, practically treating the HW context as
1006 * a black-box for the driver. Further investigation is needed to reduce the
1007 * saved/restored registers even further, by following the same 3 criteria.
1008 */
1009static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1010{
1011 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1012 int i;
1013
1014 /* GAM 0x4000-0x4770 */
1015 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
1016 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
1017 s->arb_mode = I915_READ(ARB_MODE);
1018 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
1019 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
1020
1021 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1022 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
1023
1024 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1025 s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1026
1027 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
1028 s->ecochk = I915_READ(GAM_ECOCHK);
1029 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
1030 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
1031
1032 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
1033
1034 /* MBC 0x9024-0x91D0, 0x8500 */
1035 s->g3dctl = I915_READ(VLV_G3DCTL);
1036 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
1037 s->mbctl = I915_READ(GEN6_MBCTL);
1038
1039 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1040 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
1041 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
1042 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
1043 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
1044 s->rstctl = I915_READ(GEN6_RSTCTL);
1045 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
1046
1047 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1048 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
1049 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
1050 s->rpdeuc = I915_READ(GEN6_RPDEUC);
1051 s->ecobus = I915_READ(ECOBUS);
1052 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
1053 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
1054 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
1055 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
1056 s->rcedata = I915_READ(VLV_RCEDATA);
1057 s->spare2gh = I915_READ(VLV_SPAREG2H);
1058
1059 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1060 s->gt_imr = I915_READ(GTIMR);
1061 s->gt_ier = I915_READ(GTIER);
1062 s->pm_imr = I915_READ(GEN6_PMIMR);
1063 s->pm_ier = I915_READ(GEN6_PMIER);
1064
1065 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1066 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
1067
1068 /* GT SA CZ domain, 0x100000-0x138124 */
1069 s->tilectl = I915_READ(TILECTL);
1070 s->gt_fifoctl = I915_READ(GTFIFOCTL);
1071 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
1072 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1073 s->pmwgicz = I915_READ(VLV_PMWGICZ);
1074
1075 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1076 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
1077 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
1078 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
1079
1080 /*
1081 * Not saving any of:
1082 * DFT, 0x9800-0x9EC0
1083 * SARB, 0xB000-0xB1FC
1084 * GAC, 0x5208-0x524C, 0x14000-0x14C000
1085 * PCI CFG
1086 */
1087}
1088
1089static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1090{
1091 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1092 u32 val;
1093 int i;
1094
1095 /* GAM 0x4000-0x4770 */
1096 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
1097 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
1098 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
1099 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
1100 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
1101
1102 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1103 I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
1104
1105 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1106 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
1107
1108 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
1109 I915_WRITE(GAM_ECOCHK, s->ecochk);
1110 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
1111 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
1112
1113 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
1114
1115 /* MBC 0x9024-0x91D0, 0x8500 */
1116 I915_WRITE(VLV_G3DCTL, s->g3dctl);
1117 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
1118 I915_WRITE(GEN6_MBCTL, s->mbctl);
1119
1120 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1121 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
1122 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
1123 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
1124 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
1125 I915_WRITE(GEN6_RSTCTL, s->rstctl);
1126 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
1127
1128 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1129 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
1130 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
1131 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
1132 I915_WRITE(ECOBUS, s->ecobus);
1133 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
1134 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1135 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
1136 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
1137 I915_WRITE(VLV_RCEDATA, s->rcedata);
1138 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
1139
1140 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1141 I915_WRITE(GTIMR, s->gt_imr);
1142 I915_WRITE(GTIER, s->gt_ier);
1143 I915_WRITE(GEN6_PMIMR, s->pm_imr);
1144 I915_WRITE(GEN6_PMIER, s->pm_ier);
1145
1146 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1147 I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
1148
1149 /* GT SA CZ domain, 0x100000-0x138124 */
1150 I915_WRITE(TILECTL, s->tilectl);
1151 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
1152 /*
1153 * Preserve the GT allow wake and GFX force clock bit, they are not
1154 * be restored, as they are used to control the s0ix suspend/resume
1155 * sequence by the caller.
1156 */
1157 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1158 val &= VLV_GTLC_ALLOWWAKEREQ;
1159 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1160 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1161
1162 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1163 val &= VLV_GFX_CLK_FORCE_ON_BIT;
1164 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1165 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1166
1167 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
1168
1169 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1170 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
1171 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
1172 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
1173}
1174
650ad970
ID
1175int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1176{
1177 u32 val;
1178 int err;
1179
1180 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1181 WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
1182
1183#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1184 /* Wait for a previous force-off to settle */
1185 if (force_on) {
8d4eee9c 1186 err = wait_for(!COND, 20);
650ad970
ID
1187 if (err) {
1188 DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
1189 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1190 return err;
1191 }
1192 }
1193
1194 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1195 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1196 if (force_on)
1197 val |= VLV_GFX_CLK_FORCE_ON_BIT;
1198 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1199
1200 if (!force_on)
1201 return 0;
1202
8d4eee9c 1203 err = wait_for(COND, 20);
650ad970
ID
1204 if (err)
1205 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1206 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1207
1208 return err;
1209#undef COND
1210}
1211
ddeea5b0
ID
1212static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1213{
1214 u32 val;
1215 int err = 0;
1216
1217 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1218 val &= ~VLV_GTLC_ALLOWWAKEREQ;
1219 if (allow)
1220 val |= VLV_GTLC_ALLOWWAKEREQ;
1221 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1222 POSTING_READ(VLV_GTLC_WAKE_CTRL);
1223
1224#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1225 allow)
1226 err = wait_for(COND, 1);
1227 if (err)
1228 DRM_ERROR("timeout disabling GT waking\n");
1229 return err;
1230#undef COND
1231}
1232
1233static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1234 bool wait_for_on)
1235{
1236 u32 mask;
1237 u32 val;
1238 int err;
1239
1240 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1241 val = wait_for_on ? mask : 0;
1242#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1243 if (COND)
1244 return 0;
1245
1246 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1247 wait_for_on ? "on" : "off",
1248 I915_READ(VLV_GTLC_PW_STATUS));
1249
1250 /*
1251 * RC6 transitioning can be delayed up to 2 msec (see
1252 * valleyview_enable_rps), use 3 msec for safety.
1253 */
1254 err = wait_for(COND, 3);
1255 if (err)
1256 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1257 wait_for_on ? "on" : "off");
1258
1259 return err;
1260#undef COND
1261}
1262
1263static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1264{
1265 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1266 return;
1267
1268 DRM_ERROR("GT register access while GT waking disabled\n");
1269 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1270}
1271
ebc32824 1272static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
ddeea5b0
ID
1273{
1274 u32 mask;
1275 int err;
1276
1277 /*
1278 * Bspec defines the following GT well on flags as debug only, so
1279 * don't treat them as hard failures.
1280 */
1281 (void)vlv_wait_for_gt_wells(dev_priv, false);
1282
1283 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1284 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1285
1286 vlv_check_no_gt_access(dev_priv);
1287
1288 err = vlv_force_gfx_clock(dev_priv, true);
1289 if (err)
1290 goto err1;
1291
1292 err = vlv_allow_gt_wake(dev_priv, false);
1293 if (err)
1294 goto err2;
98711167
D
1295
1296 if (!IS_CHERRYVIEW(dev_priv->dev))
1297 vlv_save_gunit_s0ix_state(dev_priv);
ddeea5b0
ID
1298
1299 err = vlv_force_gfx_clock(dev_priv, false);
1300 if (err)
1301 goto err2;
1302
1303 return 0;
1304
1305err2:
1306 /* For safety always re-enable waking and disable gfx clock forcing */
1307 vlv_allow_gt_wake(dev_priv, true);
1308err1:
1309 vlv_force_gfx_clock(dev_priv, false);
1310
1311 return err;
1312}
1313
016970be
SK
1314static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1315 bool rpm_resume)
ddeea5b0
ID
1316{
1317 struct drm_device *dev = dev_priv->dev;
1318 int err;
1319 int ret;
1320
1321 /*
1322 * If any of the steps fail just try to continue, that's the best we
1323 * can do at this point. Return the first error code (which will also
1324 * leave RPM permanently disabled).
1325 */
1326 ret = vlv_force_gfx_clock(dev_priv, true);
1327
98711167
D
1328 if (!IS_CHERRYVIEW(dev_priv->dev))
1329 vlv_restore_gunit_s0ix_state(dev_priv);
ddeea5b0
ID
1330
1331 err = vlv_allow_gt_wake(dev_priv, true);
1332 if (!ret)
1333 ret = err;
1334
1335 err = vlv_force_gfx_clock(dev_priv, false);
1336 if (!ret)
1337 ret = err;
1338
1339 vlv_check_no_gt_access(dev_priv);
1340
016970be
SK
1341 if (rpm_resume) {
1342 intel_init_clock_gating(dev);
1343 i915_gem_restore_fences(dev);
1344 }
ddeea5b0
ID
1345
1346 return ret;
1347}
1348
97bea207 1349static int intel_runtime_suspend(struct device *device)
8a187455
PZ
1350{
1351 struct pci_dev *pdev = to_pci_dev(device);
1352 struct drm_device *dev = pci_get_drvdata(pdev);
1353 struct drm_i915_private *dev_priv = dev->dev_private;
0ab9cfeb 1354 int ret;
8a187455 1355
aeab0b5a 1356 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
c6df39b5
ID
1357 return -ENODEV;
1358
604effb7
ID
1359 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1360 return -ENODEV;
1361
8a187455
PZ
1362 DRM_DEBUG_KMS("Suspending device\n");
1363
d6102977
ID
1364 /*
1365 * We could deadlock here in case another thread holding struct_mutex
1366 * calls RPM suspend concurrently, since the RPM suspend will wait
1367 * first for this RPM suspend to finish. In this case the concurrent
1368 * RPM resume will be followed by its RPM suspend counterpart. Still
1369 * for consistency return -EAGAIN, which will reschedule this suspend.
1370 */
1371 if (!mutex_trylock(&dev->struct_mutex)) {
1372 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1373 /*
1374 * Bump the expiration timestamp, otherwise the suspend won't
1375 * be rescheduled.
1376 */
1377 pm_runtime_mark_last_busy(device);
1378
1379 return -EAGAIN;
1380 }
1381 /*
1382 * We are safe here against re-faults, since the fault handler takes
1383 * an RPM reference.
1384 */
1385 i915_gem_release_all_mmaps(dev_priv);
1386 mutex_unlock(&dev->struct_mutex);
1387
fac6adb0 1388 intel_suspend_gt_powersave(dev);
2eb5252e 1389 intel_runtime_pm_disable_interrupts(dev_priv);
b5478bcd 1390
ebc32824 1391 ret = intel_suspend_complete(dev_priv);
0ab9cfeb
ID
1392 if (ret) {
1393 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
b963291c 1394 intel_runtime_pm_enable_interrupts(dev_priv);
0ab9cfeb
ID
1395
1396 return ret;
1397 }
a8a8bd54 1398
737b1506 1399 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
dc9fb09c 1400 intel_uncore_forcewake_reset(dev, false);
8a187455 1401 dev_priv->pm.suspended = true;
1fb2362b
KCA
1402
1403 /*
c8a0bd42
PZ
1404 * FIXME: We really should find a document that references the arguments
1405 * used below!
1fb2362b 1406 */
c8a0bd42
PZ
1407 if (IS_HASWELL(dev)) {
1408 /*
1409 * current versions of firmware which depend on this opregion
1410 * notification have repurposed the D1 definition to mean
1411 * "runtime suspended" vs. what you would normally expect (D3)
1412 * to distinguish it from notifications that might be sent via
1413 * the suspend path.
1414 */
1415 intel_opregion_notify_adapter(dev, PCI_D1);
1416 } else {
1417 /*
1418 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1419 * being detected, and the call we do at intel_runtime_resume()
1420 * won't be able to restore them. Since PCI_D3hot matches the
1421 * actual specification and appears to be working, use it. Let's
1422 * assume the other non-Haswell platforms will stay the same as
1423 * Broadwell.
1424 */
1425 intel_opregion_notify_adapter(dev, PCI_D3hot);
1426 }
8a187455 1427
59bad947 1428 assert_forcewakes_inactive(dev_priv);
dc9fb09c 1429
a8a8bd54 1430 DRM_DEBUG_KMS("Device suspended\n");
8a187455
PZ
1431 return 0;
1432}
1433
97bea207 1434static int intel_runtime_resume(struct device *device)
8a187455
PZ
1435{
1436 struct pci_dev *pdev = to_pci_dev(device);
1437 struct drm_device *dev = pci_get_drvdata(pdev);
1438 struct drm_i915_private *dev_priv = dev->dev_private;
1a5df187 1439 int ret = 0;
8a187455 1440
604effb7
ID
1441 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1442 return -ENODEV;
8a187455
PZ
1443
1444 DRM_DEBUG_KMS("Resuming device\n");
1445
cd2e9e90 1446 intel_opregion_notify_adapter(dev, PCI_D0);
8a187455
PZ
1447 dev_priv->pm.suspended = false;
1448
1a5df187
PZ
1449 if (IS_GEN6(dev_priv))
1450 intel_init_pch_refclk(dev);
1451 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1452 hsw_disable_pc8(dev_priv);
1453 else if (IS_VALLEYVIEW(dev_priv))
1454 ret = vlv_resume_prepare(dev_priv, true);
1455
0ab9cfeb
ID
1456 /*
1457 * No point of rolling back things in case of an error, as the best
1458 * we can do is to hope that things will still work (and disable RPM).
1459 */
92b806d3
ID
1460 i915_gem_init_swizzling(dev);
1461 gen6_update_ring_freq(dev);
1462
b963291c 1463 intel_runtime_pm_enable_interrupts(dev_priv);
fac6adb0 1464 intel_enable_gt_powersave(dev);
b5478bcd 1465
0ab9cfeb
ID
1466 if (ret)
1467 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1468 else
1469 DRM_DEBUG_KMS("Device resumed\n");
1470
1471 return ret;
8a187455
PZ
1472}
1473
016970be
SK
1474/*
1475 * This function implements common functionality of runtime and system
1476 * suspend sequence.
1477 */
ebc32824
SK
1478static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1479{
1480 struct drm_device *dev = dev_priv->dev;
1481 int ret;
1482
604effb7 1483 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ebc32824 1484 ret = hsw_suspend_complete(dev_priv);
604effb7 1485 else if (IS_VALLEYVIEW(dev))
ebc32824 1486 ret = vlv_suspend_complete(dev_priv);
604effb7
ID
1487 else
1488 ret = 0;
ebc32824
SK
1489
1490 return ret;
1491}
1492
b4b78d12 1493static const struct dev_pm_ops i915_pm_ops = {
5545dbbf
ID
1494 /*
1495 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1496 * PMSG_RESUME]
1497 */
0206e353 1498 .suspend = i915_pm_suspend,
76c4b250
ID
1499 .suspend_late = i915_pm_suspend_late,
1500 .resume_early = i915_pm_resume_early,
0206e353 1501 .resume = i915_pm_resume,
5545dbbf
ID
1502
1503 /*
1504 * S4 event handlers
1505 * @freeze, @freeze_late : called (1) before creating the
1506 * hibernation image [PMSG_FREEZE] and
1507 * (2) after rebooting, before restoring
1508 * the image [PMSG_QUIESCE]
1509 * @thaw, @thaw_early : called (1) after creating the hibernation
1510 * image, before writing it [PMSG_THAW]
1511 * and (2) after failing to create or
1512 * restore the image [PMSG_RECOVER]
1513 * @poweroff, @poweroff_late: called after writing the hibernation
1514 * image, before rebooting [PMSG_HIBERNATE]
1515 * @restore, @restore_early : called after rebooting and restoring the
1516 * hibernation image [PMSG_RESTORE]
1517 */
36d61e67
ID
1518 .freeze = i915_pm_suspend,
1519 .freeze_late = i915_pm_suspend_late,
1520 .thaw_early = i915_pm_resume_early,
1521 .thaw = i915_pm_resume,
1522 .poweroff = i915_pm_suspend,
da2bc1b9 1523 .poweroff_late = i915_pm_suspend_late,
76c4b250 1524 .restore_early = i915_pm_resume_early,
0206e353 1525 .restore = i915_pm_resume,
5545dbbf
ID
1526
1527 /* S0ix (via runtime suspend) event handlers */
97bea207
PZ
1528 .runtime_suspend = intel_runtime_suspend,
1529 .runtime_resume = intel_runtime_resume,
cbda12d7
ZW
1530};
1531
78b68556 1532static const struct vm_operations_struct i915_gem_vm_ops = {
de151cf6 1533 .fault = i915_gem_fault,
ab00b3e5
JB
1534 .open = drm_gem_vm_open,
1535 .close = drm_gem_vm_close,
de151cf6
JB
1536};
1537
e08e96de
AV
1538static const struct file_operations i915_driver_fops = {
1539 .owner = THIS_MODULE,
1540 .open = drm_open,
1541 .release = drm_release,
1542 .unlocked_ioctl = drm_ioctl,
1543 .mmap = drm_gem_mmap,
1544 .poll = drm_poll,
e08e96de
AV
1545 .read = drm_read,
1546#ifdef CONFIG_COMPAT
1547 .compat_ioctl = i915_compat_ioctl,
1548#endif
1549 .llseek = noop_llseek,
1550};
1551
1da177e4 1552static struct drm_driver driver = {
0c54781b
MW
1553 /* Don't use MTRRs here; the Xserver or userspace app should
1554 * deal with them for Intel hardware.
792d2b9a 1555 */
673a394b 1556 .driver_features =
24986ee0 1557 DRIVER_USE_AGP |
10ba5012
KH
1558 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1559 DRIVER_RENDER,
22eae947 1560 .load = i915_driver_load,
ba8bbcf6 1561 .unload = i915_driver_unload,
673a394b 1562 .open = i915_driver_open,
22eae947
DA
1563 .lastclose = i915_driver_lastclose,
1564 .preclose = i915_driver_preclose,
673a394b 1565 .postclose = i915_driver_postclose,
915b4d11 1566 .set_busid = drm_pci_set_busid,
d8e29209
RW
1567
1568 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
fc49b3da 1569 .suspend = i915_suspend_legacy,
76c4b250 1570 .resume = i915_resume_legacy,
d8e29209 1571
cda17380 1572 .device_is_agp = i915_driver_device_is_agp,
955b12de 1573#if defined(CONFIG_DEBUG_FS)
27c202ad
BG
1574 .debugfs_init = i915_debugfs_init,
1575 .debugfs_cleanup = i915_debugfs_cleanup,
955b12de 1576#endif
673a394b 1577 .gem_free_object = i915_gem_free_object,
de151cf6 1578 .gem_vm_ops = &i915_gem_vm_ops,
1286ff73
DV
1579
1580 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1581 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1582 .gem_prime_export = i915_gem_prime_export,
1583 .gem_prime_import = i915_gem_prime_import,
1584
ff72145b 1585 .dumb_create = i915_gem_dumb_create,
da6b51d0 1586 .dumb_map_offset = i915_gem_mmap_gtt,
43387b37 1587 .dumb_destroy = drm_gem_dumb_destroy,
1da177e4 1588 .ioctls = i915_ioctls,
e08e96de 1589 .fops = &i915_driver_fops,
22eae947
DA
1590 .name = DRIVER_NAME,
1591 .desc = DRIVER_DESC,
1592 .date = DRIVER_DATE,
1593 .major = DRIVER_MAJOR,
1594 .minor = DRIVER_MINOR,
1595 .patchlevel = DRIVER_PATCHLEVEL,
1da177e4
LT
1596};
1597
8410ea3b
DA
1598static struct pci_driver i915_pci_driver = {
1599 .name = DRIVER_NAME,
1600 .id_table = pciidlist,
1601 .probe = i915_pci_probe,
1602 .remove = i915_pci_remove,
1603 .driver.pm = &i915_pm_ops,
1604};
1605
1da177e4
LT
1606static int __init i915_init(void)
1607{
1608 driver.num_ioctls = i915_max_ioctl;
79e53945
JB
1609
1610 /*
1611 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
1612 * explicitly disabled with the module pararmeter.
1613 *
1614 * Otherwise, just follow the parameter (defaulting to off).
1615 *
1616 * Allow optional vga_text_mode_force boot option to override
1617 * the default behavior.
1618 */
1619#if defined(CONFIG_DRM_I915_KMS)
d330a953 1620 if (i915.modeset != 0)
79e53945
JB
1621 driver.driver_features |= DRIVER_MODESET;
1622#endif
d330a953 1623 if (i915.modeset == 1)
79e53945
JB
1624 driver.driver_features |= DRIVER_MODESET;
1625
1626#ifdef CONFIG_VGA_CONSOLE
d330a953 1627 if (vgacon_text_force() && i915.modeset == -1)
79e53945
JB
1628 driver.driver_features &= ~DRIVER_MODESET;
1629#endif
1630
b30324ad 1631 if (!(driver.driver_features & DRIVER_MODESET)) {
3885c6bb 1632 driver.get_vblank_timestamp = NULL;
b30324ad
DV
1633#ifndef CONFIG_DRM_I915_UMS
1634 /* Silently fail loading to not upset userspace. */
c9cd7b65 1635 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
b30324ad
DV
1636 return 0;
1637#endif
1638 }
3885c6bb 1639
b2e7723b
MR
1640 /*
1641 * FIXME: Note that we're lying to the DRM core here so that we can get access
1642 * to the atomic ioctl and the atomic properties. Only plane operations on
1643 * a single CRTC will actually work.
1644 */
1645 if (i915.nuclear_pageflip)
1646 driver.driver_features |= DRIVER_ATOMIC;
1647
8410ea3b 1648 return drm_pci_init(&driver, &i915_pci_driver);
1da177e4
LT
1649}
1650
1651static void __exit i915_exit(void)
1652{
b33ecdd1
DV
1653#ifndef CONFIG_DRM_I915_UMS
1654 if (!(driver.driver_features & DRIVER_MODESET))
1655 return; /* Never loaded a driver. */
1656#endif
1657
8410ea3b 1658 drm_pci_exit(&driver, &i915_pci_driver);
1da177e4
LT
1659}
1660
1661module_init(i915_init);
1662module_exit(i915_exit);
1663
0a6d1631 1664MODULE_AUTHOR("Tungsten Graphics, Inc.");
1eab9234 1665MODULE_AUTHOR("Intel Corporation");
0a6d1631 1666
b5e89ed5 1667MODULE_DESCRIPTION(DRIVER_DESC);
1da177e4 1668MODULE_LICENSE("GPL and additional rights");