drm/i915: Don't do posting reads on getting forcewake
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_uncore.c
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include "i915_drv.h"
25 #include "intel_drv.h"
26
27 #include <linux/pm_runtime.h>
28
29 #define FORCEWAKE_ACK_TIMEOUT_MS 2
30
31 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
32 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
33
34 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
35 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
36
37 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
38 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
39
40 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
41 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
42
43 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
44
45 static const char * const forcewake_domain_names[] = {
46         "render",
47         "blitter",
48         "media",
49 };
50
51 const char *
52 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
53 {
54         BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) !=
55                      FW_DOMAIN_ID_COUNT);
56
57         if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
58                 return forcewake_domain_names[id];
59
60         WARN_ON(id);
61
62         return "unknown";
63 }
64
65 static void
66 assert_device_not_suspended(struct drm_i915_private *dev_priv)
67 {
68         WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
69                   "Device suspended\n");
70 }
71
72 static inline void
73 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
74 {
75         WARN_ON(d->reg_set == 0);
76         __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
77 }
78
79 static inline void
80 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
81 {
82         mod_timer_pinned(&d->timer, jiffies + 1);
83 }
84
85 static inline void
86 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
87 {
88         if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
89                              FORCEWAKE_KERNEL) == 0,
90                             FORCEWAKE_ACK_TIMEOUT_MS))
91                 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
92                           intel_uncore_forcewake_domain_to_str(d->id));
93 }
94
95 static inline void
96 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
97 {
98         __raw_i915_write32(d->i915, d->reg_set, d->val_set);
99 }
100
101 static inline void
102 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
103 {
104         if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
105                              FORCEWAKE_KERNEL),
106                             FORCEWAKE_ACK_TIMEOUT_MS))
107                 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
108                           intel_uncore_forcewake_domain_to_str(d->id));
109 }
110
111 static inline void
112 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
113 {
114         __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
115 }
116
117 static inline void
118 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
119 {
120         /* something from same cacheline, but not from the set register */
121         if (d->reg_post)
122                 __raw_posting_read(d->i915, d->reg_post);
123 }
124
125 static void
126 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
127 {
128         struct intel_uncore_forcewake_domain *d;
129         enum forcewake_domain_id id;
130
131         for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
132                 fw_domain_wait_ack_clear(d);
133                 fw_domain_get(d);
134                 fw_domain_wait_ack(d);
135         }
136 }
137
138 static void
139 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
140 {
141         struct intel_uncore_forcewake_domain *d;
142         enum forcewake_domain_id id;
143
144         for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
145                 fw_domain_put(d);
146                 fw_domain_posting_read(d);
147         }
148 }
149
150 static void
151 fw_domains_posting_read(struct drm_i915_private *dev_priv)
152 {
153         struct intel_uncore_forcewake_domain *d;
154         enum forcewake_domain_id id;
155
156         /* No need to do for all, just do for first found */
157         for_each_fw_domain(d, dev_priv, id) {
158                 fw_domain_posting_read(d);
159                 break;
160         }
161 }
162
163 static void
164 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
165 {
166         struct intel_uncore_forcewake_domain *d;
167         enum forcewake_domain_id id;
168
169         WARN_ON(dev_priv->uncore.fw_domains == 0);
170
171         for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
172                 fw_domain_reset(d);
173
174         fw_domains_posting_read(dev_priv);
175 }
176
177 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
178 {
179         /* w/a for a sporadic read returning 0 by waiting for the GT
180          * thread to wake up.
181          */
182         if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
183                                 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
184                 DRM_ERROR("GT thread status wait timed out\n");
185 }
186
187 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
188                                               enum forcewake_domains fw_domains)
189 {
190         fw_domains_get(dev_priv, fw_domains);
191
192         /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
193         __gen6_gt_wait_for_thread_c0(dev_priv);
194 }
195
196 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
197 {
198         u32 gtfifodbg;
199
200         gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
201         if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
202                 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
203 }
204
205 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
206                                      enum forcewake_domains fw_domains)
207 {
208         fw_domains_put(dev_priv, fw_domains);
209         gen6_gt_check_fifodbg(dev_priv);
210 }
211
212 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
213 {
214         int ret = 0;
215
216         /* On VLV, FIFO will be shared by both SW and HW.
217          * So, we need to read the FREE_ENTRIES everytime */
218         if (IS_VALLEYVIEW(dev_priv->dev))
219                 dev_priv->uncore.fifo_count =
220                         __raw_i915_read32(dev_priv, GTFIFOCTL) &
221                                                 GT_FIFO_FREE_ENTRIES_MASK;
222
223         if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
224                 int loop = 500;
225                 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
226                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
227                         udelay(10);
228                         fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
229                 }
230                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
231                         ++ret;
232                 dev_priv->uncore.fifo_count = fifo;
233         }
234         dev_priv->uncore.fifo_count--;
235
236         return ret;
237 }
238
239 static void intel_uncore_fw_release_timer(unsigned long arg)
240 {
241         struct intel_uncore_forcewake_domain *domain = (void *)arg;
242         unsigned long irqflags;
243
244         assert_device_not_suspended(domain->i915);
245
246         spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
247         if (WARN_ON(domain->wake_count == 0))
248                 domain->wake_count++;
249
250         if (--domain->wake_count == 0)
251                 domain->i915->uncore.funcs.force_wake_put(domain->i915,
252                                                           1 << domain->id);
253
254         spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
255 }
256
257 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
258 {
259         struct drm_i915_private *dev_priv = dev->dev_private;
260         unsigned long irqflags;
261         struct intel_uncore_forcewake_domain *domain;
262         int retry_count = 100;
263         enum forcewake_domain_id id;
264         enum forcewake_domains fw = 0, active_domains;
265
266         /* Hold uncore.lock across reset to prevent any register access
267          * with forcewake not set correctly. Wait until all pending
268          * timers are run before holding.
269          */
270         while (1) {
271                 active_domains = 0;
272
273                 for_each_fw_domain(domain, dev_priv, id) {
274                         if (del_timer_sync(&domain->timer) == 0)
275                                 continue;
276
277                         intel_uncore_fw_release_timer((unsigned long)domain);
278                 }
279
280                 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
281
282                 for_each_fw_domain(domain, dev_priv, id) {
283                         if (timer_pending(&domain->timer))
284                                 active_domains |= (1 << id);
285                 }
286
287                 if (active_domains == 0)
288                         break;
289
290                 if (--retry_count == 0) {
291                         DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
292                         break;
293                 }
294
295                 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
296                 cond_resched();
297         }
298
299         WARN_ON(active_domains);
300
301         for_each_fw_domain(domain, dev_priv, id)
302                 if (domain->wake_count)
303                         fw |= 1 << id;
304
305         if (fw)
306                 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
307
308         fw_domains_reset(dev_priv, FORCEWAKE_ALL);
309
310         if (restore) { /* If reset with a user forcewake, try to restore */
311                 if (fw)
312                         dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
313
314                 if (IS_GEN6(dev) || IS_GEN7(dev))
315                         dev_priv->uncore.fifo_count =
316                                 __raw_i915_read32(dev_priv, GTFIFOCTL) &
317                                 GT_FIFO_FREE_ENTRIES_MASK;
318         }
319
320         if (!restore)
321                 assert_forcewakes_inactive(dev_priv);
322
323         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
324 }
325
326 static void intel_uncore_ellc_detect(struct drm_device *dev)
327 {
328         struct drm_i915_private *dev_priv = dev->dev_private;
329
330         if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
331             (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
332                 /* The docs do not explain exactly how the calculation can be
333                  * made. It is somewhat guessable, but for now, it's always
334                  * 128MB.
335                  * NB: We can't write IDICR yet because we do not have gt funcs
336                  * set up */
337                 dev_priv->ellc_size = 128;
338                 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
339         }
340 }
341
342 static void __intel_uncore_early_sanitize(struct drm_device *dev,
343                                           bool restore_forcewake)
344 {
345         struct drm_i915_private *dev_priv = dev->dev_private;
346
347         if (HAS_FPGA_DBG_UNCLAIMED(dev))
348                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
349
350         /* clear out old GT FIFO errors */
351         if (IS_GEN6(dev) || IS_GEN7(dev))
352                 __raw_i915_write32(dev_priv, GTFIFODBG,
353                                    __raw_i915_read32(dev_priv, GTFIFODBG));
354
355         intel_uncore_forcewake_reset(dev, restore_forcewake);
356 }
357
358 void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
359 {
360         __intel_uncore_early_sanitize(dev, restore_forcewake);
361         i915_check_and_clear_faults(dev);
362 }
363
364 void intel_uncore_sanitize(struct drm_device *dev)
365 {
366         /* BIOS often leaves RC6 enabled, but disable it for hw init */
367         intel_disable_gt_powersave(dev);
368 }
369
370 /**
371  * intel_uncore_forcewake_get - grab forcewake domain references
372  * @dev_priv: i915 device instance
373  * @fw_domains: forcewake domains to get reference on
374  *
375  * This function can be used get GT's forcewake domain references.
376  * Normal register access will handle the forcewake domains automatically.
377  * However if some sequence requires the GT to not power down a particular
378  * forcewake domains this function should be called at the beginning of the
379  * sequence. And subsequently the reference should be dropped by symmetric
380  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
381  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
382  */
383 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
384                                 enum forcewake_domains fw_domains)
385 {
386         unsigned long irqflags;
387         struct intel_uncore_forcewake_domain *domain;
388         enum forcewake_domain_id id;
389
390         if (!dev_priv->uncore.funcs.force_wake_get)
391                 return;
392
393         WARN_ON(dev_priv->pm.suspended);
394
395         fw_domains &= dev_priv->uncore.fw_domains;
396
397         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
398
399         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
400                 if (domain->wake_count++)
401                         fw_domains &= ~(1 << id);
402         }
403
404         if (fw_domains)
405                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
406
407         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
408 }
409
410 /**
411  * intel_uncore_forcewake_put - release a forcewake domain reference
412  * @dev_priv: i915 device instance
413  * @fw_domains: forcewake domains to put references
414  *
415  * This function drops the device-level forcewakes for specified
416  * domains obtained by intel_uncore_forcewake_get().
417  */
418 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
419                                 enum forcewake_domains fw_domains)
420 {
421         unsigned long irqflags;
422         struct intel_uncore_forcewake_domain *domain;
423         enum forcewake_domain_id id;
424
425         if (!dev_priv->uncore.funcs.force_wake_put)
426                 return;
427
428         fw_domains &= dev_priv->uncore.fw_domains;
429
430         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
431
432         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
433                 if (WARN_ON(domain->wake_count == 0))
434                         continue;
435
436                 if (--domain->wake_count)
437                         continue;
438
439                 domain->wake_count++;
440                 fw_domain_arm_timer(domain);
441         }
442
443         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
444 }
445
446 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
447 {
448         struct intel_uncore_forcewake_domain *domain;
449         enum forcewake_domain_id id;
450
451         if (!dev_priv->uncore.funcs.force_wake_get)
452                 return;
453
454         for_each_fw_domain(domain, dev_priv, id)
455                 WARN_ON(domain->wake_count);
456 }
457
458 /* We give fast paths for the really cool registers */
459 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
460          ((reg) < 0x40000 && (reg) != FORCEWAKE)
461
462 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
463
464 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
465         (REG_RANGE((reg), 0x2000, 0x4000) || \
466          REG_RANGE((reg), 0x5000, 0x8000) || \
467          REG_RANGE((reg), 0xB000, 0x12000) || \
468          REG_RANGE((reg), 0x2E000, 0x30000))
469
470 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
471         (REG_RANGE((reg), 0x12000, 0x14000) || \
472          REG_RANGE((reg), 0x22000, 0x24000) || \
473          REG_RANGE((reg), 0x30000, 0x40000))
474
475 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
476         (REG_RANGE((reg), 0x2000, 0x4000) || \
477          REG_RANGE((reg), 0x5200, 0x8000) || \
478          REG_RANGE((reg), 0x8300, 0x8500) || \
479          REG_RANGE((reg), 0xB000, 0xB480) || \
480          REG_RANGE((reg), 0xE000, 0xE800))
481
482 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
483         (REG_RANGE((reg), 0x8800, 0x8900) || \
484          REG_RANGE((reg), 0xD000, 0xD800) || \
485          REG_RANGE((reg), 0x12000, 0x14000) || \
486          REG_RANGE((reg), 0x1A000, 0x1C000) || \
487          REG_RANGE((reg), 0x1E800, 0x1EA00) || \
488          REG_RANGE((reg), 0x30000, 0x38000))
489
490 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
491         (REG_RANGE((reg), 0x4000, 0x5000) || \
492          REG_RANGE((reg), 0x8000, 0x8300) || \
493          REG_RANGE((reg), 0x8500, 0x8600) || \
494          REG_RANGE((reg), 0x9000, 0xB000) || \
495          REG_RANGE((reg), 0xF000, 0x10000))
496
497 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
498         REG_RANGE((reg), 0xB00,  0x2000)
499
500 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
501         (REG_RANGE((reg), 0x2000, 0x2700) || \
502          REG_RANGE((reg), 0x3000, 0x4000) || \
503          REG_RANGE((reg), 0x5200, 0x8000) || \
504          REG_RANGE((reg), 0x8140, 0x8160) || \
505          REG_RANGE((reg), 0x8300, 0x8500) || \
506          REG_RANGE((reg), 0x8C00, 0x8D00) || \
507          REG_RANGE((reg), 0xB000, 0xB480) || \
508          REG_RANGE((reg), 0xE000, 0xE900) || \
509          REG_RANGE((reg), 0x24400, 0x24800))
510
511 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
512         (REG_RANGE((reg), 0x8130, 0x8140) || \
513          REG_RANGE((reg), 0x8800, 0x8A00) || \
514          REG_RANGE((reg), 0xD000, 0xD800) || \
515          REG_RANGE((reg), 0x12000, 0x14000) || \
516          REG_RANGE((reg), 0x1A000, 0x1EA00) || \
517          REG_RANGE((reg), 0x30000, 0x40000))
518
519 #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
520         REG_RANGE((reg), 0x9400, 0x9800)
521
522 #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
523         ((reg) < 0x40000 &&\
524          !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
525          !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
526          !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
527          !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
528
529 static void
530 ilk_dummy_write(struct drm_i915_private *dev_priv)
531 {
532         /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
533          * the chip from rc6 before touching it for real. MI_MODE is masked,
534          * hence harmless to write 0 into. */
535         __raw_i915_write32(dev_priv, MI_MODE, 0);
536 }
537
538 static void
539 hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
540                         bool before)
541 {
542         const char *op = read ? "reading" : "writing to";
543         const char *when = before ? "before" : "after";
544
545         if (!i915.mmio_debug)
546                 return;
547
548         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
549                 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
550                      when, op, reg);
551                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
552         }
553 }
554
555 static void
556 hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
557 {
558         if (i915.mmio_debug)
559                 return;
560
561         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
562                 DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
563                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
564         }
565 }
566
567 #define GEN2_READ_HEADER(x) \
568         u##x val = 0; \
569         assert_device_not_suspended(dev_priv);
570
571 #define GEN2_READ_FOOTER \
572         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
573         return val
574
575 #define __gen2_read(x) \
576 static u##x \
577 gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
578         GEN2_READ_HEADER(x); \
579         val = __raw_i915_read##x(dev_priv, reg); \
580         GEN2_READ_FOOTER; \
581 }
582
583 #define __gen5_read(x) \
584 static u##x \
585 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
586         GEN2_READ_HEADER(x); \
587         ilk_dummy_write(dev_priv); \
588         val = __raw_i915_read##x(dev_priv, reg); \
589         GEN2_READ_FOOTER; \
590 }
591
592 __gen5_read(8)
593 __gen5_read(16)
594 __gen5_read(32)
595 __gen5_read(64)
596 __gen2_read(8)
597 __gen2_read(16)
598 __gen2_read(32)
599 __gen2_read(64)
600
601 #undef __gen5_read
602 #undef __gen2_read
603
604 #undef GEN2_READ_FOOTER
605 #undef GEN2_READ_HEADER
606
607 #define GEN6_READ_HEADER(x) \
608         unsigned long irqflags; \
609         u##x val = 0; \
610         assert_device_not_suspended(dev_priv); \
611         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
612
613 #define GEN6_READ_FOOTER \
614         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
615         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
616         return val
617
618 static inline void __force_wake_get(struct drm_i915_private *dev_priv,
619                                     enum forcewake_domains fw_domains)
620 {
621         struct intel_uncore_forcewake_domain *domain;
622         enum forcewake_domain_id id;
623
624         if (WARN_ON(!fw_domains))
625                 return;
626
627         /* Ideally GCC would be constant-fold and eliminate this loop */
628         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
629                 if (domain->wake_count) {
630                         fw_domains &= ~(1 << id);
631                         continue;
632                 }
633
634                 domain->wake_count++;
635                 fw_domain_arm_timer(domain);
636         }
637
638         if (fw_domains)
639                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
640 }
641
642 #define __gen6_read(x) \
643 static u##x \
644 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
645         GEN6_READ_HEADER(x); \
646         hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
647         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \
648                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
649         val = __raw_i915_read##x(dev_priv, reg); \
650         hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
651         GEN6_READ_FOOTER; \
652 }
653
654 #define __vlv_read(x) \
655 static u##x \
656 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
657         GEN6_READ_HEADER(x); \
658         if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
659                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
660         else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
661                 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
662         val = __raw_i915_read##x(dev_priv, reg); \
663         GEN6_READ_FOOTER; \
664 }
665
666 #define __chv_read(x) \
667 static u##x \
668 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
669         GEN6_READ_HEADER(x); \
670         if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
671                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
672         else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
673                 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
674         else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
675                 __force_wake_get(dev_priv, \
676                                  FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
677         val = __raw_i915_read##x(dev_priv, reg); \
678         GEN6_READ_FOOTER; \
679 }
680
681 #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg)     \
682          ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
683
684 #define __gen9_read(x) \
685 static u##x \
686 gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
687         enum forcewake_domains fw_engine; \
688         GEN6_READ_HEADER(x); \
689         if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)))   \
690                 fw_engine = 0; \
691         else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg))       \
692                 fw_engine = FORCEWAKE_RENDER; \
693         else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
694                 fw_engine = FORCEWAKE_MEDIA; \
695         else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
696                 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
697         else \
698                 fw_engine = FORCEWAKE_BLITTER; \
699         if (fw_engine) \
700                 __force_wake_get(dev_priv, fw_engine); \
701         val = __raw_i915_read##x(dev_priv, reg); \
702         GEN6_READ_FOOTER; \
703 }
704
705 __gen9_read(8)
706 __gen9_read(16)
707 __gen9_read(32)
708 __gen9_read(64)
709 __chv_read(8)
710 __chv_read(16)
711 __chv_read(32)
712 __chv_read(64)
713 __vlv_read(8)
714 __vlv_read(16)
715 __vlv_read(32)
716 __vlv_read(64)
717 __gen6_read(8)
718 __gen6_read(16)
719 __gen6_read(32)
720 __gen6_read(64)
721
722 #undef __gen9_read
723 #undef __chv_read
724 #undef __vlv_read
725 #undef __gen6_read
726 #undef GEN6_READ_FOOTER
727 #undef GEN6_READ_HEADER
728
729 #define GEN2_WRITE_HEADER \
730         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
731         assert_device_not_suspended(dev_priv); \
732
733 #define GEN2_WRITE_FOOTER
734
735 #define __gen2_write(x) \
736 static void \
737 gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
738         GEN2_WRITE_HEADER; \
739         __raw_i915_write##x(dev_priv, reg, val); \
740         GEN2_WRITE_FOOTER; \
741 }
742
743 #define __gen5_write(x) \
744 static void \
745 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
746         GEN2_WRITE_HEADER; \
747         ilk_dummy_write(dev_priv); \
748         __raw_i915_write##x(dev_priv, reg, val); \
749         GEN2_WRITE_FOOTER; \
750 }
751
752 __gen5_write(8)
753 __gen5_write(16)
754 __gen5_write(32)
755 __gen5_write(64)
756 __gen2_write(8)
757 __gen2_write(16)
758 __gen2_write(32)
759 __gen2_write(64)
760
761 #undef __gen5_write
762 #undef __gen2_write
763
764 #undef GEN2_WRITE_FOOTER
765 #undef GEN2_WRITE_HEADER
766
767 #define GEN6_WRITE_HEADER \
768         unsigned long irqflags; \
769         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
770         assert_device_not_suspended(dev_priv); \
771         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
772
773 #define GEN6_WRITE_FOOTER \
774         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
775
776 #define __gen6_write(x) \
777 static void \
778 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
779         u32 __fifo_ret = 0; \
780         GEN6_WRITE_HEADER; \
781         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
782                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
783         } \
784         __raw_i915_write##x(dev_priv, reg, val); \
785         if (unlikely(__fifo_ret)) { \
786                 gen6_gt_check_fifodbg(dev_priv); \
787         } \
788         GEN6_WRITE_FOOTER; \
789 }
790
791 #define __hsw_write(x) \
792 static void \
793 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
794         u32 __fifo_ret = 0; \
795         GEN6_WRITE_HEADER; \
796         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
797                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
798         } \
799         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
800         __raw_i915_write##x(dev_priv, reg, val); \
801         if (unlikely(__fifo_ret)) { \
802                 gen6_gt_check_fifodbg(dev_priv); \
803         } \
804         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
805         hsw_unclaimed_reg_detect(dev_priv); \
806         GEN6_WRITE_FOOTER; \
807 }
808
809 static const u32 gen8_shadowed_regs[] = {
810         FORCEWAKE_MT,
811         GEN6_RPNSWREQ,
812         GEN6_RC_VIDEO_FREQ,
813         RING_TAIL(RENDER_RING_BASE),
814         RING_TAIL(GEN6_BSD_RING_BASE),
815         RING_TAIL(VEBOX_RING_BASE),
816         RING_TAIL(BLT_RING_BASE),
817         /* TODO: Other registers are not yet used */
818 };
819
820 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
821 {
822         int i;
823         for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
824                 if (reg == gen8_shadowed_regs[i])
825                         return true;
826
827         return false;
828 }
829
830 #define __gen8_write(x) \
831 static void \
832 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
833         GEN6_WRITE_HEADER; \
834         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
835         if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
836                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
837         __raw_i915_write##x(dev_priv, reg, val); \
838         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
839         hsw_unclaimed_reg_detect(dev_priv); \
840         GEN6_WRITE_FOOTER; \
841 }
842
843 #define __chv_write(x) \
844 static void \
845 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
846         bool shadowed = is_gen8_shadowed(dev_priv, reg); \
847         GEN6_WRITE_HEADER; \
848         if (!shadowed) { \
849                 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
850                         __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
851                 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
852                         __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
853                 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
854                         __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
855         } \
856         __raw_i915_write##x(dev_priv, reg, val); \
857         GEN6_WRITE_FOOTER; \
858 }
859
860 static const u32 gen9_shadowed_regs[] = {
861         RING_TAIL(RENDER_RING_BASE),
862         RING_TAIL(GEN6_BSD_RING_BASE),
863         RING_TAIL(VEBOX_RING_BASE),
864         RING_TAIL(BLT_RING_BASE),
865         FORCEWAKE_BLITTER_GEN9,
866         FORCEWAKE_RENDER_GEN9,
867         FORCEWAKE_MEDIA_GEN9,
868         GEN6_RPNSWREQ,
869         GEN6_RC_VIDEO_FREQ,
870         /* TODO: Other registers are not yet used */
871 };
872
873 static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
874 {
875         int i;
876         for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
877                 if (reg == gen9_shadowed_regs[i])
878                         return true;
879
880         return false;
881 }
882
883 #define __gen9_write(x) \
884 static void \
885 gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
886                 bool trace) { \
887         enum forcewake_domains fw_engine; \
888         GEN6_WRITE_HEADER; \
889         if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
890             is_gen9_shadowed(dev_priv, reg)) \
891                 fw_engine = 0; \
892         else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
893                 fw_engine = FORCEWAKE_RENDER; \
894         else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
895                 fw_engine = FORCEWAKE_MEDIA; \
896         else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
897                 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
898         else \
899                 fw_engine = FORCEWAKE_BLITTER; \
900         if (fw_engine) \
901                 __force_wake_get(dev_priv, fw_engine); \
902         __raw_i915_write##x(dev_priv, reg, val); \
903         GEN6_WRITE_FOOTER; \
904 }
905
906 __gen9_write(8)
907 __gen9_write(16)
908 __gen9_write(32)
909 __gen9_write(64)
910 __chv_write(8)
911 __chv_write(16)
912 __chv_write(32)
913 __chv_write(64)
914 __gen8_write(8)
915 __gen8_write(16)
916 __gen8_write(32)
917 __gen8_write(64)
918 __hsw_write(8)
919 __hsw_write(16)
920 __hsw_write(32)
921 __hsw_write(64)
922 __gen6_write(8)
923 __gen6_write(16)
924 __gen6_write(32)
925 __gen6_write(64)
926
927 #undef __gen9_write
928 #undef __chv_write
929 #undef __gen8_write
930 #undef __hsw_write
931 #undef __gen6_write
932 #undef GEN6_WRITE_FOOTER
933 #undef GEN6_WRITE_HEADER
934
935 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
936 do { \
937         dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
938         dev_priv->uncore.funcs.mmio_writew = x##_write16; \
939         dev_priv->uncore.funcs.mmio_writel = x##_write32; \
940         dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
941 } while (0)
942
943 #define ASSIGN_READ_MMIO_VFUNCS(x) \
944 do { \
945         dev_priv->uncore.funcs.mmio_readb = x##_read8; \
946         dev_priv->uncore.funcs.mmio_readw = x##_read16; \
947         dev_priv->uncore.funcs.mmio_readl = x##_read32; \
948         dev_priv->uncore.funcs.mmio_readq = x##_read64; \
949 } while (0)
950
951
952 static void fw_domain_init(struct drm_i915_private *dev_priv,
953                            enum forcewake_domain_id domain_id,
954                            u32 reg_set, u32 reg_ack)
955 {
956         struct intel_uncore_forcewake_domain *d;
957
958         if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
959                 return;
960
961         d = &dev_priv->uncore.fw_domain[domain_id];
962
963         WARN_ON(d->wake_count);
964
965         d->wake_count = 0;
966         d->reg_set = reg_set;
967         d->reg_ack = reg_ack;
968
969         if (IS_GEN6(dev_priv)) {
970                 d->val_reset = 0;
971                 d->val_set = FORCEWAKE_KERNEL;
972                 d->val_clear = 0;
973         } else {
974                 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
975                 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
976                 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
977         }
978
979         if (IS_VALLEYVIEW(dev_priv))
980                 d->reg_post = FORCEWAKE_ACK_VLV;
981         else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
982                 d->reg_post = ECOBUS;
983         else
984                 d->reg_post = 0;
985
986         d->i915 = dev_priv;
987         d->id = domain_id;
988
989         setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
990
991         dev_priv->uncore.fw_domains |= (1 << domain_id);
992
993         fw_domain_reset(d);
994 }
995
996 static void intel_uncore_fw_domains_init(struct drm_device *dev)
997 {
998         struct drm_i915_private *dev_priv = dev->dev_private;
999
1000         if (IS_GEN9(dev)) {
1001                 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1002                 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1003                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1004                                FORCEWAKE_RENDER_GEN9,
1005                                FORCEWAKE_ACK_RENDER_GEN9);
1006                 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1007                                FORCEWAKE_BLITTER_GEN9,
1008                                FORCEWAKE_ACK_BLITTER_GEN9);
1009                 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1010                                FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1011         } else if (IS_VALLEYVIEW(dev)) {
1012                 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1013                 if (!IS_CHERRYVIEW(dev))
1014                         dev_priv->uncore.funcs.force_wake_put =
1015                                 fw_domains_put_with_fifo;
1016                 else
1017                         dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1018                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1019                                FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1020                 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1021                                FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1022         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1023                 dev_priv->uncore.funcs.force_wake_get =
1024                         fw_domains_get_with_thread_status;
1025                 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1026                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1027                                FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1028         } else if (IS_IVYBRIDGE(dev)) {
1029                 u32 ecobus;
1030
1031                 /* IVB configs may use multi-threaded forcewake */
1032
1033                 /* A small trick here - if the bios hasn't configured
1034                  * MT forcewake, and if the device is in RC6, then
1035                  * force_wake_mt_get will not wake the device and the
1036                  * ECOBUS read will return zero. Which will be
1037                  * (correctly) interpreted by the test below as MT
1038                  * forcewake being disabled.
1039                  */
1040                 dev_priv->uncore.funcs.force_wake_get =
1041                         fw_domains_get_with_thread_status;
1042                 dev_priv->uncore.funcs.force_wake_put =
1043                         fw_domains_put_with_fifo;
1044
1045                 /* We need to init first for ECOBUS access and then
1046                  * determine later if we want to reinit, in case of MT access is
1047                  * not working
1048                  */
1049                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1050                                FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1051
1052                 mutex_lock(&dev->struct_mutex);
1053                 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1054                 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1055                 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1056                 mutex_unlock(&dev->struct_mutex);
1057
1058                 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1059                         DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1060                         DRM_INFO("when using vblank-synced partial screen updates.\n");
1061                         fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1062                                        FORCEWAKE, FORCEWAKE_ACK);
1063                 }
1064         } else if (IS_GEN6(dev)) {
1065                 dev_priv->uncore.funcs.force_wake_get =
1066                         fw_domains_get_with_thread_status;
1067                 dev_priv->uncore.funcs.force_wake_put =
1068                         fw_domains_put_with_fifo;
1069                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1070                                FORCEWAKE, FORCEWAKE_ACK);
1071         }
1072 }
1073
1074 void intel_uncore_init(struct drm_device *dev)
1075 {
1076         struct drm_i915_private *dev_priv = dev->dev_private;
1077
1078         intel_uncore_ellc_detect(dev);
1079         intel_uncore_fw_domains_init(dev);
1080         __intel_uncore_early_sanitize(dev, false);
1081
1082         switch (INTEL_INFO(dev)->gen) {
1083         default:
1084                 MISSING_CASE(INTEL_INFO(dev)->gen);
1085                 return;
1086         case 9:
1087                 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1088                 ASSIGN_READ_MMIO_VFUNCS(gen9);
1089                 break;
1090         case 8:
1091                 if (IS_CHERRYVIEW(dev)) {
1092                         ASSIGN_WRITE_MMIO_VFUNCS(chv);
1093                         ASSIGN_READ_MMIO_VFUNCS(chv);
1094
1095                 } else {
1096                         ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1097                         ASSIGN_READ_MMIO_VFUNCS(gen6);
1098                 }
1099                 break;
1100         case 7:
1101         case 6:
1102                 if (IS_HASWELL(dev)) {
1103                         ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1104                 } else {
1105                         ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1106                 }
1107
1108                 if (IS_VALLEYVIEW(dev)) {
1109                         ASSIGN_READ_MMIO_VFUNCS(vlv);
1110                 } else {
1111                         ASSIGN_READ_MMIO_VFUNCS(gen6);
1112                 }
1113                 break;
1114         case 5:
1115                 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1116                 ASSIGN_READ_MMIO_VFUNCS(gen5);
1117                 break;
1118         case 4:
1119         case 3:
1120         case 2:
1121                 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1122                 ASSIGN_READ_MMIO_VFUNCS(gen2);
1123                 break;
1124         }
1125
1126         i915_check_and_clear_faults(dev);
1127 }
1128 #undef ASSIGN_WRITE_MMIO_VFUNCS
1129 #undef ASSIGN_READ_MMIO_VFUNCS
1130
1131 void intel_uncore_fini(struct drm_device *dev)
1132 {
1133         /* Paranoia: make sure we have disabled everything before we exit. */
1134         intel_uncore_sanitize(dev);
1135         intel_uncore_forcewake_reset(dev, false);
1136 }
1137
1138 #define GEN_RANGE(l, h) GENMASK(h, l)
1139
1140 static const struct register_whitelist {
1141         uint64_t offset;
1142         uint32_t size;
1143         /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1144         uint32_t gen_bitmask;
1145 } whitelist[] = {
1146         { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
1147 };
1148
1149 int i915_reg_read_ioctl(struct drm_device *dev,
1150                         void *data, struct drm_file *file)
1151 {
1152         struct drm_i915_private *dev_priv = dev->dev_private;
1153         struct drm_i915_reg_read *reg = data;
1154         struct register_whitelist const *entry = whitelist;
1155         int i, ret = 0;
1156
1157         for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1158                 if (entry->offset == reg->offset &&
1159                     (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1160                         break;
1161         }
1162
1163         if (i == ARRAY_SIZE(whitelist))
1164                 return -EINVAL;
1165
1166         intel_runtime_pm_get(dev_priv);
1167
1168         switch (entry->size) {
1169         case 8:
1170                 reg->val = I915_READ64(reg->offset);
1171                 break;
1172         case 4:
1173                 reg->val = I915_READ(reg->offset);
1174                 break;
1175         case 2:
1176                 reg->val = I915_READ16(reg->offset);
1177                 break;
1178         case 1:
1179                 reg->val = I915_READ8(reg->offset);
1180                 break;
1181         default:
1182                 MISSING_CASE(entry->size);
1183                 ret = -EINVAL;
1184                 goto out;
1185         }
1186
1187 out:
1188         intel_runtime_pm_put(dev_priv);
1189         return ret;
1190 }
1191
1192 int i915_get_reset_stats_ioctl(struct drm_device *dev,
1193                                void *data, struct drm_file *file)
1194 {
1195         struct drm_i915_private *dev_priv = dev->dev_private;
1196         struct drm_i915_reset_stats *args = data;
1197         struct i915_ctx_hang_stats *hs;
1198         struct intel_context *ctx;
1199         int ret;
1200
1201         if (args->flags || args->pad)
1202                 return -EINVAL;
1203
1204         if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1205                 return -EPERM;
1206
1207         ret = mutex_lock_interruptible(&dev->struct_mutex);
1208         if (ret)
1209                 return ret;
1210
1211         ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1212         if (IS_ERR(ctx)) {
1213                 mutex_unlock(&dev->struct_mutex);
1214                 return PTR_ERR(ctx);
1215         }
1216         hs = &ctx->hang_stats;
1217
1218         if (capable(CAP_SYS_ADMIN))
1219                 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1220         else
1221                 args->reset_count = 0;
1222
1223         args->batch_active = hs->batch_active;
1224         args->batch_pending = hs->batch_pending;
1225
1226         mutex_unlock(&dev->struct_mutex);
1227
1228         return 0;
1229 }
1230
1231 static int i915_reset_complete(struct drm_device *dev)
1232 {
1233         u8 gdrst;
1234         pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1235         return (gdrst & GRDOM_RESET_STATUS) == 0;
1236 }
1237
1238 static int i915_do_reset(struct drm_device *dev)
1239 {
1240         /* assert reset for at least 20 usec */
1241         pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1242         udelay(20);
1243         pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1244
1245         return wait_for(i915_reset_complete(dev), 500);
1246 }
1247
1248 static int g4x_reset_complete(struct drm_device *dev)
1249 {
1250         u8 gdrst;
1251         pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1252         return (gdrst & GRDOM_RESET_ENABLE) == 0;
1253 }
1254
1255 static int g33_do_reset(struct drm_device *dev)
1256 {
1257         pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1258         return wait_for(g4x_reset_complete(dev), 500);
1259 }
1260
1261 static int g4x_do_reset(struct drm_device *dev)
1262 {
1263         struct drm_i915_private *dev_priv = dev->dev_private;
1264         int ret;
1265
1266         pci_write_config_byte(dev->pdev, I915_GDRST,
1267                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
1268         ret =  wait_for(g4x_reset_complete(dev), 500);
1269         if (ret)
1270                 return ret;
1271
1272         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1273         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1274         POSTING_READ(VDECCLK_GATE_D);
1275
1276         pci_write_config_byte(dev->pdev, I915_GDRST,
1277                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1278         ret =  wait_for(g4x_reset_complete(dev), 500);
1279         if (ret)
1280                 return ret;
1281
1282         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1283         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1284         POSTING_READ(VDECCLK_GATE_D);
1285
1286         pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1287
1288         return 0;
1289 }
1290
1291 static int ironlake_do_reset(struct drm_device *dev)
1292 {
1293         struct drm_i915_private *dev_priv = dev->dev_private;
1294         int ret;
1295
1296         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1297                    ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1298         ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1299                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
1300         if (ret)
1301                 return ret;
1302
1303         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1304                    ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1305         ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1306                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
1307         if (ret)
1308                 return ret;
1309
1310         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
1311
1312         return 0;
1313 }
1314
1315 static int gen6_do_reset(struct drm_device *dev)
1316 {
1317         struct drm_i915_private *dev_priv = dev->dev_private;
1318         int     ret;
1319
1320         /* Reset the chip */
1321
1322         /* GEN6_GDRST is not in the gt power well, no need to check
1323          * for fifo space for the write or forcewake the chip for
1324          * the read
1325          */
1326         __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1327
1328         /* Spin waiting for the device to ack the reset request */
1329         ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1330
1331         intel_uncore_forcewake_reset(dev, true);
1332
1333         return ret;
1334 }
1335
1336 int intel_gpu_reset(struct drm_device *dev)
1337 {
1338         if (INTEL_INFO(dev)->gen >= 6)
1339                 return gen6_do_reset(dev);
1340         else if (IS_GEN5(dev))
1341                 return ironlake_do_reset(dev);
1342         else if (IS_G4X(dev))
1343                 return g4x_do_reset(dev);
1344         else if (IS_G33(dev))
1345                 return g33_do_reset(dev);
1346         else if (INTEL_INFO(dev)->gen >= 3)
1347                 return i915_do_reset(dev);
1348         else
1349                 return -ENODEV;
1350 }
1351
1352 void intel_uncore_check_errors(struct drm_device *dev)
1353 {
1354         struct drm_i915_private *dev_priv = dev->dev_private;
1355
1356         if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1357             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1358                 DRM_ERROR("Unclaimed register before interrupt\n");
1359                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1360         }
1361 }