Commit | Line | Data |
---|---|---|
907b28c5 CW |
1 | /* |
2 | * Copyright © 2013 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | */ | |
23 | ||
24 | #include "i915_drv.h" | |
25 | #include "intel_drv.h" | |
26 | ||
27 | #define FORCEWAKE_ACK_TIMEOUT_MS 2 | |
28 | ||
6af5d92f CW |
29 | #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) |
30 | #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) | |
31 | ||
32 | #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) | |
33 | #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) | |
34 | ||
35 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) | |
36 | #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) | |
37 | ||
38 | #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) | |
39 | #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) | |
40 | ||
41 | #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) | |
42 | ||
43 | ||
907b28c5 CW |
44 | static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) |
45 | { | |
46 | u32 gt_thread_status_mask; | |
47 | ||
48 | if (IS_HASWELL(dev_priv->dev)) | |
49 | gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; | |
50 | else | |
51 | gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; | |
52 | ||
53 | /* w/a for a sporadic read returning 0 by waiting for the GT | |
54 | * thread to wake up. | |
55 | */ | |
6af5d92f | 56 | if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) |
907b28c5 CW |
57 | DRM_ERROR("GT thread status wait timed out\n"); |
58 | } | |
59 | ||
60 | static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) | |
61 | { | |
6af5d92f CW |
62 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); |
63 | /* something from same cacheline, but !FORCEWAKE */ | |
64 | __raw_posting_read(dev_priv, ECOBUS); | |
907b28c5 CW |
65 | } |
66 | ||
67 | static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | |
68 | { | |
6af5d92f | 69 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, |
907b28c5 CW |
70 | FORCEWAKE_ACK_TIMEOUT_MS)) |
71 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | |
72 | ||
6af5d92f CW |
73 | __raw_i915_write32(dev_priv, FORCEWAKE, 1); |
74 | /* something from same cacheline, but !FORCEWAKE */ | |
75 | __raw_posting_read(dev_priv, ECOBUS); | |
907b28c5 | 76 | |
6af5d92f | 77 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1), |
907b28c5 CW |
78 | FORCEWAKE_ACK_TIMEOUT_MS)) |
79 | DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); | |
80 | ||
81 | /* WaRsForcewakeWaitTC0:snb */ | |
82 | __gen6_gt_wait_for_thread_c0(dev_priv); | |
83 | } | |
84 | ||
85 | static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) | |
86 | { | |
6af5d92f | 87 | __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); |
907b28c5 | 88 | /* something from same cacheline, but !FORCEWAKE_MT */ |
6af5d92f | 89 | __raw_posting_read(dev_priv, ECOBUS); |
907b28c5 CW |
90 | } |
91 | ||
92 | static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) | |
93 | { | |
94 | u32 forcewake_ack; | |
95 | ||
96 | if (IS_HASWELL(dev_priv->dev)) | |
97 | forcewake_ack = FORCEWAKE_ACK_HSW; | |
98 | else | |
99 | forcewake_ack = FORCEWAKE_MT_ACK; | |
100 | ||
6af5d92f | 101 | if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0, |
907b28c5 CW |
102 | FORCEWAKE_ACK_TIMEOUT_MS)) |
103 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | |
104 | ||
6af5d92f CW |
105 | __raw_i915_write32(dev_priv, FORCEWAKE_MT, |
106 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); | |
907b28c5 | 107 | /* something from same cacheline, but !FORCEWAKE_MT */ |
6af5d92f | 108 | __raw_posting_read(dev_priv, ECOBUS); |
907b28c5 | 109 | |
6af5d92f | 110 | if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL), |
907b28c5 CW |
111 | FORCEWAKE_ACK_TIMEOUT_MS)) |
112 | DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); | |
113 | ||
114 | /* WaRsForcewakeWaitTC0:ivb,hsw */ | |
115 | __gen6_gt_wait_for_thread_c0(dev_priv); | |
116 | } | |
117 | ||
118 | static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) | |
119 | { | |
120 | u32 gtfifodbg; | |
6af5d92f CW |
121 | |
122 | gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); | |
907b28c5 CW |
123 | if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, |
124 | "MMIO read or write has been dropped %x\n", gtfifodbg)) | |
6af5d92f | 125 | __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); |
907b28c5 CW |
126 | } |
127 | ||
128 | static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | |
129 | { | |
6af5d92f | 130 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); |
907b28c5 | 131 | /* something from same cacheline, but !FORCEWAKE */ |
6af5d92f | 132 | __raw_posting_read(dev_priv, ECOBUS); |
907b28c5 CW |
133 | gen6_gt_check_fifodbg(dev_priv); |
134 | } | |
135 | ||
136 | static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) | |
137 | { | |
6af5d92f CW |
138 | __raw_i915_write32(dev_priv, FORCEWAKE_MT, |
139 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | |
907b28c5 | 140 | /* something from same cacheline, but !FORCEWAKE_MT */ |
6af5d92f | 141 | __raw_posting_read(dev_priv, ECOBUS); |
907b28c5 CW |
142 | gen6_gt_check_fifodbg(dev_priv); |
143 | } | |
144 | ||
145 | static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | |
146 | { | |
147 | int ret = 0; | |
148 | ||
149 | if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { | |
150 | int loop = 500; | |
6af5d92f | 151 | u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); |
907b28c5 CW |
152 | while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { |
153 | udelay(10); | |
6af5d92f | 154 | fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); |
907b28c5 CW |
155 | } |
156 | if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) | |
157 | ++ret; | |
158 | dev_priv->uncore.fifo_count = fifo; | |
159 | } | |
160 | dev_priv->uncore.fifo_count--; | |
161 | ||
162 | return ret; | |
163 | } | |
164 | ||
165 | static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) | |
166 | { | |
6af5d92f CW |
167 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, |
168 | _MASKED_BIT_DISABLE(0xffff)); | |
907b28c5 | 169 | /* something from same cacheline, but !FORCEWAKE_VLV */ |
6af5d92f | 170 | __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); |
907b28c5 CW |
171 | } |
172 | ||
173 | static void vlv_force_wake_get(struct drm_i915_private *dev_priv) | |
174 | { | |
6af5d92f | 175 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, |
907b28c5 CW |
176 | FORCEWAKE_ACK_TIMEOUT_MS)) |
177 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | |
178 | ||
6af5d92f CW |
179 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, |
180 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); | |
181 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, | |
907b28c5 CW |
182 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); |
183 | ||
6af5d92f | 184 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), |
907b28c5 CW |
185 | FORCEWAKE_ACK_TIMEOUT_MS)) |
186 | DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n"); | |
187 | ||
6af5d92f | 188 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) & |
907b28c5 CW |
189 | FORCEWAKE_KERNEL), |
190 | FORCEWAKE_ACK_TIMEOUT_MS)) | |
191 | DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); | |
192 | ||
193 | /* WaRsForcewakeWaitTC0:vlv */ | |
194 | __gen6_gt_wait_for_thread_c0(dev_priv); | |
195 | } | |
196 | ||
197 | static void vlv_force_wake_put(struct drm_i915_private *dev_priv) | |
198 | { | |
6af5d92f CW |
199 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, |
200 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | |
201 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, | |
907b28c5 CW |
202 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
203 | /* The below doubles as a POSTING_READ */ | |
204 | gen6_gt_check_fifodbg(dev_priv); | |
205 | } | |
206 | ||
aec347ab CW |
207 | static void gen6_force_wake_work(struct work_struct *work) |
208 | { | |
209 | struct drm_i915_private *dev_priv = | |
210 | container_of(work, typeof(*dev_priv), uncore.force_wake_work.work); | |
211 | unsigned long irqflags; | |
212 | ||
213 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | |
214 | if (--dev_priv->uncore.forcewake_count == 0) | |
215 | dev_priv->uncore.funcs.force_wake_put(dev_priv); | |
216 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | |
217 | } | |
218 | ||
907b28c5 CW |
219 | void intel_uncore_early_sanitize(struct drm_device *dev) |
220 | { | |
221 | struct drm_i915_private *dev_priv = dev->dev_private; | |
222 | ||
223 | if (HAS_FPGA_DBG_UNCLAIMED(dev)) | |
6af5d92f | 224 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
18ce3994 BW |
225 | |
226 | if (IS_HASWELL(dev) && | |
227 | (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) { | |
228 | /* The docs do not explain exactly how the calculation can be | |
229 | * made. It is somewhat guessable, but for now, it's always | |
230 | * 128MB. | |
231 | * NB: We can't write IDICR yet because we do not have gt funcs | |
232 | * set up */ | |
233 | dev_priv->ellc_size = 128; | |
234 | DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); | |
235 | } | |
907b28c5 CW |
236 | } |
237 | ||
238 | void intel_uncore_init(struct drm_device *dev) | |
239 | { | |
240 | struct drm_i915_private *dev_priv = dev->dev_private; | |
241 | ||
aec347ab CW |
242 | INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work, |
243 | gen6_force_wake_work); | |
244 | ||
907b28c5 CW |
245 | if (IS_VALLEYVIEW(dev)) { |
246 | dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; | |
247 | dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; | |
248 | } else if (IS_HASWELL(dev)) { | |
249 | dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; | |
250 | dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; | |
251 | } else if (IS_IVYBRIDGE(dev)) { | |
252 | u32 ecobus; | |
253 | ||
254 | /* IVB configs may use multi-threaded forcewake */ | |
255 | ||
256 | /* A small trick here - if the bios hasn't configured | |
257 | * MT forcewake, and if the device is in RC6, then | |
258 | * force_wake_mt_get will not wake the device and the | |
259 | * ECOBUS read will return zero. Which will be | |
260 | * (correctly) interpreted by the test below as MT | |
261 | * forcewake being disabled. | |
262 | */ | |
263 | mutex_lock(&dev->struct_mutex); | |
264 | __gen6_gt_force_wake_mt_get(dev_priv); | |
6af5d92f | 265 | ecobus = __raw_i915_read32(dev_priv, ECOBUS); |
907b28c5 CW |
266 | __gen6_gt_force_wake_mt_put(dev_priv); |
267 | mutex_unlock(&dev->struct_mutex); | |
268 | ||
269 | if (ecobus & FORCEWAKE_MT_ENABLE) { | |
270 | dev_priv->uncore.funcs.force_wake_get = | |
271 | __gen6_gt_force_wake_mt_get; | |
272 | dev_priv->uncore.funcs.force_wake_put = | |
273 | __gen6_gt_force_wake_mt_put; | |
274 | } else { | |
275 | DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); | |
276 | DRM_INFO("when using vblank-synced partial screen updates.\n"); | |
277 | dev_priv->uncore.funcs.force_wake_get = | |
278 | __gen6_gt_force_wake_get; | |
279 | dev_priv->uncore.funcs.force_wake_put = | |
280 | __gen6_gt_force_wake_put; | |
281 | } | |
282 | } else if (IS_GEN6(dev)) { | |
283 | dev_priv->uncore.funcs.force_wake_get = | |
284 | __gen6_gt_force_wake_get; | |
285 | dev_priv->uncore.funcs.force_wake_put = | |
286 | __gen6_gt_force_wake_put; | |
287 | } | |
288 | } | |
289 | ||
aec347ab CW |
290 | void intel_uncore_fini(struct drm_device *dev) |
291 | { | |
292 | struct drm_i915_private *dev_priv = dev->dev_private; | |
293 | ||
294 | flush_delayed_work(&dev_priv->uncore.force_wake_work); | |
295 | ||
296 | /* Paranoia: make sure we have disabled everything before we exit. */ | |
297 | intel_uncore_sanitize(dev); | |
298 | } | |
299 | ||
521198a2 | 300 | static void intel_uncore_forcewake_reset(struct drm_device *dev) |
907b28c5 CW |
301 | { |
302 | struct drm_i915_private *dev_priv = dev->dev_private; | |
303 | ||
304 | if (IS_VALLEYVIEW(dev)) { | |
305 | vlv_force_wake_reset(dev_priv); | |
306 | } else if (INTEL_INFO(dev)->gen >= 6) { | |
307 | __gen6_gt_force_wake_reset(dev_priv); | |
308 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | |
309 | __gen6_gt_force_wake_mt_reset(dev_priv); | |
310 | } | |
521198a2 MK |
311 | } |
312 | ||
313 | void intel_uncore_sanitize(struct drm_device *dev) | |
314 | { | |
02f4c9e0 CML |
315 | struct drm_i915_private *dev_priv = dev->dev_private; |
316 | u32 reg_val; | |
317 | ||
521198a2 | 318 | intel_uncore_forcewake_reset(dev); |
907b28c5 CW |
319 | |
320 | /* BIOS often leaves RC6 enabled, but disable it for hw init */ | |
321 | intel_disable_gt_powersave(dev); | |
02f4c9e0 CML |
322 | |
323 | /* Turn off power gate, require especially for the BIOS less system */ | |
324 | if (IS_VALLEYVIEW(dev)) { | |
325 | ||
326 | mutex_lock(&dev_priv->rps.hw_lock); | |
327 | reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS); | |
328 | ||
329 | if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT)) | |
330 | vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0); | |
331 | ||
332 | mutex_unlock(&dev_priv->rps.hw_lock); | |
333 | ||
334 | } | |
907b28c5 CW |
335 | } |
336 | ||
337 | /* | |
338 | * Generally this is called implicitly by the register read function. However, | |
339 | * if some sequence requires the GT to not power down then this function should | |
340 | * be called at the beginning of the sequence followed by a call to | |
341 | * gen6_gt_force_wake_put() at the end of the sequence. | |
342 | */ | |
343 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | |
344 | { | |
345 | unsigned long irqflags; | |
346 | ||
347 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | |
348 | if (dev_priv->uncore.forcewake_count++ == 0) | |
349 | dev_priv->uncore.funcs.force_wake_get(dev_priv); | |
350 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | |
351 | } | |
352 | ||
353 | /* | |
354 | * see gen6_gt_force_wake_get() | |
355 | */ | |
356 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | |
357 | { | |
358 | unsigned long irqflags; | |
359 | ||
360 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | |
aec347ab CW |
361 | if (--dev_priv->uncore.forcewake_count == 0) { |
362 | dev_priv->uncore.forcewake_count++; | |
363 | mod_delayed_work(dev_priv->wq, | |
364 | &dev_priv->uncore.force_wake_work, | |
365 | 1); | |
366 | } | |
907b28c5 CW |
367 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
368 | } | |
369 | ||
370 | /* We give fast paths for the really cool registers */ | |
371 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ | |
372 | ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ | |
373 | ((reg) < 0x40000) && \ | |
374 | ((reg) != FORCEWAKE)) | |
375 | ||
376 | static void | |
377 | ilk_dummy_write(struct drm_i915_private *dev_priv) | |
378 | { | |
379 | /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up | |
380 | * the chip from rc6 before touching it for real. MI_MODE is masked, | |
381 | * hence harmless to write 0 into. */ | |
6af5d92f | 382 | __raw_i915_write32(dev_priv, MI_MODE, 0); |
907b28c5 CW |
383 | } |
384 | ||
385 | static void | |
386 | hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) | |
387 | { | |
388 | if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && | |
6af5d92f | 389 | (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { |
907b28c5 CW |
390 | DRM_ERROR("Unknown unclaimed register before writing to %x\n", |
391 | reg); | |
6af5d92f | 392 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
907b28c5 CW |
393 | } |
394 | } | |
395 | ||
396 | static void | |
397 | hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) | |
398 | { | |
399 | if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && | |
6af5d92f | 400 | (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { |
907b28c5 | 401 | DRM_ERROR("Unclaimed write to %x\n", reg); |
6af5d92f | 402 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
907b28c5 CW |
403 | } |
404 | } | |
405 | ||
6af5d92f | 406 | #define __i915_read(x) \ |
dba8e41f | 407 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \ |
907b28c5 CW |
408 | unsigned long irqflags; \ |
409 | u##x val = 0; \ | |
410 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ | |
a7f31ee0 | 411 | if (dev_priv->info->gen == 5) \ |
907b28c5 CW |
412 | ilk_dummy_write(dev_priv); \ |
413 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | |
414 | if (dev_priv->uncore.forcewake_count == 0) \ | |
415 | dev_priv->uncore.funcs.force_wake_get(dev_priv); \ | |
6af5d92f | 416 | val = __raw_i915_read##x(dev_priv, reg); \ |
907b28c5 CW |
417 | if (dev_priv->uncore.forcewake_count == 0) \ |
418 | dev_priv->uncore.funcs.force_wake_put(dev_priv); \ | |
419 | } else { \ | |
6af5d92f | 420 | val = __raw_i915_read##x(dev_priv, reg); \ |
907b28c5 CW |
421 | } \ |
422 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | |
ed71f1b4 | 423 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ |
907b28c5 CW |
424 | return val; \ |
425 | } | |
426 | ||
6af5d92f CW |
427 | __i915_read(8) |
428 | __i915_read(16) | |
429 | __i915_read(32) | |
430 | __i915_read(64) | |
907b28c5 CW |
431 | #undef __i915_read |
432 | ||
6af5d92f | 433 | #define __i915_write(x) \ |
dba8e41f | 434 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \ |
907b28c5 CW |
435 | unsigned long irqflags; \ |
436 | u32 __fifo_ret = 0; \ | |
ed71f1b4 | 437 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ |
907b28c5 CW |
438 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ |
439 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | |
440 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | |
441 | } \ | |
a7f31ee0 | 442 | if (dev_priv->info->gen == 5) \ |
907b28c5 CW |
443 | ilk_dummy_write(dev_priv); \ |
444 | hsw_unclaimed_reg_clear(dev_priv, reg); \ | |
6af5d92f | 445 | __raw_i915_write##x(dev_priv, reg, val); \ |
907b28c5 CW |
446 | if (unlikely(__fifo_ret)) { \ |
447 | gen6_gt_check_fifodbg(dev_priv); \ | |
448 | } \ | |
449 | hsw_unclaimed_reg_check(dev_priv, reg); \ | |
450 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | |
451 | } | |
6af5d92f CW |
452 | __i915_write(8) |
453 | __i915_write(16) | |
454 | __i915_write(32) | |
455 | __i915_write(64) | |
907b28c5 CW |
456 | #undef __i915_write |
457 | ||
458 | static const struct register_whitelist { | |
459 | uint64_t offset; | |
460 | uint32_t size; | |
461 | uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ | |
462 | } whitelist[] = { | |
463 | { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, | |
464 | }; | |
465 | ||
466 | int i915_reg_read_ioctl(struct drm_device *dev, | |
467 | void *data, struct drm_file *file) | |
468 | { | |
469 | struct drm_i915_private *dev_priv = dev->dev_private; | |
470 | struct drm_i915_reg_read *reg = data; | |
471 | struct register_whitelist const *entry = whitelist; | |
472 | int i; | |
473 | ||
474 | for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { | |
475 | if (entry->offset == reg->offset && | |
476 | (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) | |
477 | break; | |
478 | } | |
479 | ||
480 | if (i == ARRAY_SIZE(whitelist)) | |
481 | return -EINVAL; | |
482 | ||
483 | switch (entry->size) { | |
484 | case 8: | |
485 | reg->val = I915_READ64(reg->offset); | |
486 | break; | |
487 | case 4: | |
488 | reg->val = I915_READ(reg->offset); | |
489 | break; | |
490 | case 2: | |
491 | reg->val = I915_READ16(reg->offset); | |
492 | break; | |
493 | case 1: | |
494 | reg->val = I915_READ8(reg->offset); | |
495 | break; | |
496 | default: | |
497 | WARN_ON(1); | |
498 | return -EINVAL; | |
499 | } | |
500 | ||
501 | return 0; | |
502 | } | |
503 | ||
907b28c5 CW |
504 | static int i965_reset_complete(struct drm_device *dev) |
505 | { | |
506 | u8 gdrst; | |
507 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); | |
508 | return (gdrst & GRDOM_RESET_ENABLE) == 0; | |
509 | } | |
510 | ||
511 | static int i965_do_reset(struct drm_device *dev) | |
512 | { | |
513 | int ret; | |
514 | ||
515 | /* | |
516 | * Set the domains we want to reset (GRDOM/bits 2 and 3) as | |
517 | * well as the reset bit (GR/bit 0). Setting the GR bit | |
518 | * triggers the reset; when done, the hardware will clear it. | |
519 | */ | |
520 | pci_write_config_byte(dev->pdev, I965_GDRST, | |
521 | GRDOM_RENDER | GRDOM_RESET_ENABLE); | |
522 | ret = wait_for(i965_reset_complete(dev), 500); | |
523 | if (ret) | |
524 | return ret; | |
525 | ||
526 | /* We can't reset render&media without also resetting display ... */ | |
527 | pci_write_config_byte(dev->pdev, I965_GDRST, | |
528 | GRDOM_MEDIA | GRDOM_RESET_ENABLE); | |
529 | ||
530 | ret = wait_for(i965_reset_complete(dev), 500); | |
531 | if (ret) | |
532 | return ret; | |
533 | ||
534 | pci_write_config_byte(dev->pdev, I965_GDRST, 0); | |
535 | ||
536 | return 0; | |
537 | } | |
538 | ||
539 | static int ironlake_do_reset(struct drm_device *dev) | |
540 | { | |
541 | struct drm_i915_private *dev_priv = dev->dev_private; | |
542 | u32 gdrst; | |
543 | int ret; | |
544 | ||
545 | gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); | |
546 | gdrst &= ~GRDOM_MASK; | |
547 | I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, | |
548 | gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); | |
549 | ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); | |
550 | if (ret) | |
551 | return ret; | |
552 | ||
553 | /* We can't reset render&media without also resetting display ... */ | |
554 | gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); | |
555 | gdrst &= ~GRDOM_MASK; | |
556 | I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, | |
557 | gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); | |
558 | return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); | |
559 | } | |
560 | ||
561 | static int gen6_do_reset(struct drm_device *dev) | |
562 | { | |
563 | struct drm_i915_private *dev_priv = dev->dev_private; | |
564 | int ret; | |
565 | unsigned long irqflags; | |
566 | ||
567 | /* Hold uncore.lock across reset to prevent any register access | |
568 | * with forcewake not set correctly | |
569 | */ | |
570 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | |
571 | ||
572 | /* Reset the chip */ | |
573 | ||
574 | /* GEN6_GDRST is not in the gt power well, no need to check | |
575 | * for fifo space for the write or forcewake the chip for | |
576 | * the read | |
577 | */ | |
6af5d92f | 578 | __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); |
907b28c5 CW |
579 | |
580 | /* Spin waiting for the device to ack the reset request */ | |
6af5d92f | 581 | ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); |
907b28c5 | 582 | |
521198a2 MK |
583 | intel_uncore_forcewake_reset(dev); |
584 | ||
907b28c5 CW |
585 | /* If reset with a user forcewake, try to restore, otherwise turn it off */ |
586 | if (dev_priv->uncore.forcewake_count) | |
587 | dev_priv->uncore.funcs.force_wake_get(dev_priv); | |
588 | else | |
589 | dev_priv->uncore.funcs.force_wake_put(dev_priv); | |
590 | ||
591 | /* Restore fifo count */ | |
6af5d92f | 592 | dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); |
907b28c5 CW |
593 | |
594 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | |
595 | return ret; | |
596 | } | |
597 | ||
598 | int intel_gpu_reset(struct drm_device *dev) | |
599 | { | |
600 | switch (INTEL_INFO(dev)->gen) { | |
601 | case 7: | |
602 | case 6: return gen6_do_reset(dev); | |
603 | case 5: return ironlake_do_reset(dev); | |
604 | case 4: return i965_do_reset(dev); | |
907b28c5 CW |
605 | default: return -ENODEV; |
606 | } | |
607 | } | |
608 | ||
609 | void intel_uncore_clear_errors(struct drm_device *dev) | |
610 | { | |
611 | struct drm_i915_private *dev_priv = dev->dev_private; | |
612 | ||
6af5d92f | 613 | /* XXX needs spinlock around caller's grouping */ |
907b28c5 | 614 | if (HAS_FPGA_DBG_UNCLAIMED(dev)) |
6af5d92f | 615 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
907b28c5 CW |
616 | } |
617 | ||
618 | void intel_uncore_check_errors(struct drm_device *dev) | |
619 | { | |
620 | struct drm_i915_private *dev_priv = dev->dev_private; | |
621 | ||
622 | if (HAS_FPGA_DBG_UNCLAIMED(dev) && | |
6af5d92f | 623 | (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { |
907b28c5 | 624 | DRM_ERROR("Unclaimed register before interrupt\n"); |
6af5d92f | 625 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
907b28c5 CW |
626 | } |
627 | } |