2b4256afc40e7c89c57dd70be6b4544279208c12
[linux-block.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
37 #include <drm/drmP.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
41 #include "i915_drv.h"
42
43 enum {
44         ACTIVE_LIST,
45         INACTIVE_LIST,
46         PINNED_LIST,
47 };
48
49 /* As the drm_debugfs_init() routines are called before dev->dev_private is
50  * allocated we need to hook into the minor for release. */
51 static int
52 drm_add_fake_info_node(struct drm_minor *minor,
53                        struct dentry *ent,
54                        const void *key)
55 {
56         struct drm_info_node *node;
57
58         node = kmalloc(sizeof(*node), GFP_KERNEL);
59         if (node == NULL) {
60                 debugfs_remove(ent);
61                 return -ENOMEM;
62         }
63
64         node->minor = minor;
65         node->dent = ent;
66         node->info_ent = (void *) key;
67
68         mutex_lock(&minor->debugfs_lock);
69         list_add(&node->list, &minor->debugfs_list);
70         mutex_unlock(&minor->debugfs_lock);
71
72         return 0;
73 }
74
75 static int i915_capabilities(struct seq_file *m, void *data)
76 {
77         struct drm_info_node *node = m->private;
78         struct drm_device *dev = node->minor->dev;
79         const struct intel_device_info *info = INTEL_INFO(dev);
80
81         seq_printf(m, "gen: %d\n", info->gen);
82         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
83 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
84 #define SEP_SEMICOLON ;
85         DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
86 #undef PRINT_FLAG
87 #undef SEP_SEMICOLON
88
89         return 0;
90 }
91
92 static const char get_active_flag(struct drm_i915_gem_object *obj)
93 {
94         return obj->active ? '*' : ' ';
95 }
96
97 static const char get_pin_flag(struct drm_i915_gem_object *obj)
98 {
99         return obj->pin_display ? 'p' : ' ';
100 }
101
102 static const char get_tiling_flag(struct drm_i915_gem_object *obj)
103 {
104         switch (obj->tiling_mode) {
105         default:
106         case I915_TILING_NONE: return ' ';
107         case I915_TILING_X: return 'X';
108         case I915_TILING_Y: return 'Y';
109         }
110 }
111
112 static inline const char get_global_flag(struct drm_i915_gem_object *obj)
113 {
114         return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
115 }
116
117 static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
118 {
119         return obj->mapping ? 'M' : ' ';
120 }
121
122 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
123 {
124         u64 size = 0;
125         struct i915_vma *vma;
126
127         list_for_each_entry(vma, &obj->vma_list, obj_link) {
128                 if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
129                         size += vma->node.size;
130         }
131
132         return size;
133 }
134
135 static void
136 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
137 {
138         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
139         struct intel_engine_cs *engine;
140         struct i915_vma *vma;
141         int pin_count = 0;
142         enum intel_engine_id id;
143
144         lockdep_assert_held(&obj->base.dev->struct_mutex);
145
146         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
147                    &obj->base,
148                    get_active_flag(obj),
149                    get_pin_flag(obj),
150                    get_tiling_flag(obj),
151                    get_global_flag(obj),
152                    get_pin_mapped_flag(obj),
153                    obj->base.size / 1024,
154                    obj->base.read_domains,
155                    obj->base.write_domain);
156         for_each_engine_id(engine, dev_priv, id)
157                 seq_printf(m, "%x ",
158                                 i915_gem_request_get_seqno(obj->last_read_req[id]));
159         seq_printf(m, "] %x %x%s%s%s",
160                    i915_gem_request_get_seqno(obj->last_write_req),
161                    i915_gem_request_get_seqno(obj->last_fenced_req),
162                    i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
163                    obj->dirty ? " dirty" : "",
164                    obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
165         if (obj->base.name)
166                 seq_printf(m, " (name: %d)", obj->base.name);
167         list_for_each_entry(vma, &obj->vma_list, obj_link) {
168                 if (vma->pin_count > 0)
169                         pin_count++;
170         }
171         seq_printf(m, " (pinned x %d)", pin_count);
172         if (obj->pin_display)
173                 seq_printf(m, " (display)");
174         if (obj->fence_reg != I915_FENCE_REG_NONE)
175                 seq_printf(m, " (fence: %d)", obj->fence_reg);
176         list_for_each_entry(vma, &obj->vma_list, obj_link) {
177                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
178                            vma->is_ggtt ? "g" : "pp",
179                            vma->node.start, vma->node.size);
180                 if (vma->is_ggtt)
181                         seq_printf(m, ", type: %u", vma->ggtt_view.type);
182                 seq_puts(m, ")");
183         }
184         if (obj->stolen)
185                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
186         if (obj->pin_display || obj->fault_mappable) {
187                 char s[3], *t = s;
188                 if (obj->pin_display)
189                         *t++ = 'p';
190                 if (obj->fault_mappable)
191                         *t++ = 'f';
192                 *t = '\0';
193                 seq_printf(m, " (%s mappable)", s);
194         }
195         if (obj->last_write_req != NULL)
196                 seq_printf(m, " (%s)",
197                            i915_gem_request_get_engine(obj->last_write_req)->name);
198         if (obj->frontbuffer_bits)
199                 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
200 }
201
202 static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
203 {
204         seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
205         seq_putc(m, ctx->remap_slice ? 'R' : 'r');
206         seq_putc(m, ' ');
207 }
208
209 static int i915_gem_object_list_info(struct seq_file *m, void *data)
210 {
211         struct drm_info_node *node = m->private;
212         uintptr_t list = (uintptr_t) node->info_ent->data;
213         struct list_head *head;
214         struct drm_device *dev = node->minor->dev;
215         struct drm_i915_private *dev_priv = to_i915(dev);
216         struct i915_ggtt *ggtt = &dev_priv->ggtt;
217         struct i915_vma *vma;
218         u64 total_obj_size, total_gtt_size;
219         int count, ret;
220
221         ret = mutex_lock_interruptible(&dev->struct_mutex);
222         if (ret)
223                 return ret;
224
225         /* FIXME: the user of this interface might want more than just GGTT */
226         switch (list) {
227         case ACTIVE_LIST:
228                 seq_puts(m, "Active:\n");
229                 head = &ggtt->base.active_list;
230                 break;
231         case INACTIVE_LIST:
232                 seq_puts(m, "Inactive:\n");
233                 head = &ggtt->base.inactive_list;
234                 break;
235         default:
236                 mutex_unlock(&dev->struct_mutex);
237                 return -EINVAL;
238         }
239
240         total_obj_size = total_gtt_size = count = 0;
241         list_for_each_entry(vma, head, vm_link) {
242                 seq_printf(m, "   ");
243                 describe_obj(m, vma->obj);
244                 seq_printf(m, "\n");
245                 total_obj_size += vma->obj->base.size;
246                 total_gtt_size += vma->node.size;
247                 count++;
248         }
249         mutex_unlock(&dev->struct_mutex);
250
251         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
252                    count, total_obj_size, total_gtt_size);
253         return 0;
254 }
255
256 static int obj_rank_by_stolen(void *priv,
257                               struct list_head *A, struct list_head *B)
258 {
259         struct drm_i915_gem_object *a =
260                 container_of(A, struct drm_i915_gem_object, obj_exec_link);
261         struct drm_i915_gem_object *b =
262                 container_of(B, struct drm_i915_gem_object, obj_exec_link);
263
264         if (a->stolen->start < b->stolen->start)
265                 return -1;
266         if (a->stolen->start > b->stolen->start)
267                 return 1;
268         return 0;
269 }
270
271 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
272 {
273         struct drm_info_node *node = m->private;
274         struct drm_device *dev = node->minor->dev;
275         struct drm_i915_private *dev_priv = dev->dev_private;
276         struct drm_i915_gem_object *obj;
277         u64 total_obj_size, total_gtt_size;
278         LIST_HEAD(stolen);
279         int count, ret;
280
281         ret = mutex_lock_interruptible(&dev->struct_mutex);
282         if (ret)
283                 return ret;
284
285         total_obj_size = total_gtt_size = count = 0;
286         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
287                 if (obj->stolen == NULL)
288                         continue;
289
290                 list_add(&obj->obj_exec_link, &stolen);
291
292                 total_obj_size += obj->base.size;
293                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
294                 count++;
295         }
296         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
297                 if (obj->stolen == NULL)
298                         continue;
299
300                 list_add(&obj->obj_exec_link, &stolen);
301
302                 total_obj_size += obj->base.size;
303                 count++;
304         }
305         list_sort(NULL, &stolen, obj_rank_by_stolen);
306         seq_puts(m, "Stolen:\n");
307         while (!list_empty(&stolen)) {
308                 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
309                 seq_puts(m, "   ");
310                 describe_obj(m, obj);
311                 seq_putc(m, '\n');
312                 list_del_init(&obj->obj_exec_link);
313         }
314         mutex_unlock(&dev->struct_mutex);
315
316         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
317                    count, total_obj_size, total_gtt_size);
318         return 0;
319 }
320
321 #define count_objects(list, member) do { \
322         list_for_each_entry(obj, list, member) { \
323                 size += i915_gem_obj_total_ggtt_size(obj); \
324                 ++count; \
325                 if (obj->map_and_fenceable) { \
326                         mappable_size += i915_gem_obj_ggtt_size(obj); \
327                         ++mappable_count; \
328                 } \
329         } \
330 } while (0)
331
332 struct file_stats {
333         struct drm_i915_file_private *file_priv;
334         unsigned long count;
335         u64 total, unbound;
336         u64 global, shared;
337         u64 active, inactive;
338 };
339
340 static int per_file_stats(int id, void *ptr, void *data)
341 {
342         struct drm_i915_gem_object *obj = ptr;
343         struct file_stats *stats = data;
344         struct i915_vma *vma;
345
346         stats->count++;
347         stats->total += obj->base.size;
348
349         if (obj->base.name || obj->base.dma_buf)
350                 stats->shared += obj->base.size;
351
352         if (USES_FULL_PPGTT(obj->base.dev)) {
353                 list_for_each_entry(vma, &obj->vma_list, obj_link) {
354                         struct i915_hw_ppgtt *ppgtt;
355
356                         if (!drm_mm_node_allocated(&vma->node))
357                                 continue;
358
359                         if (vma->is_ggtt) {
360                                 stats->global += obj->base.size;
361                                 continue;
362                         }
363
364                         ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
365                         if (ppgtt->file_priv != stats->file_priv)
366                                 continue;
367
368                         if (obj->active) /* XXX per-vma statistic */
369                                 stats->active += obj->base.size;
370                         else
371                                 stats->inactive += obj->base.size;
372
373                         return 0;
374                 }
375         } else {
376                 if (i915_gem_obj_ggtt_bound(obj)) {
377                         stats->global += obj->base.size;
378                         if (obj->active)
379                                 stats->active += obj->base.size;
380                         else
381                                 stats->inactive += obj->base.size;
382                         return 0;
383                 }
384         }
385
386         if (!list_empty(&obj->global_list))
387                 stats->unbound += obj->base.size;
388
389         return 0;
390 }
391
392 #define print_file_stats(m, name, stats) do { \
393         if (stats.count) \
394                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
395                            name, \
396                            stats.count, \
397                            stats.total, \
398                            stats.active, \
399                            stats.inactive, \
400                            stats.global, \
401                            stats.shared, \
402                            stats.unbound); \
403 } while (0)
404
405 static void print_batch_pool_stats(struct seq_file *m,
406                                    struct drm_i915_private *dev_priv)
407 {
408         struct drm_i915_gem_object *obj;
409         struct file_stats stats;
410         struct intel_engine_cs *engine;
411         int j;
412
413         memset(&stats, 0, sizeof(stats));
414
415         for_each_engine(engine, dev_priv) {
416                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
417                         list_for_each_entry(obj,
418                                             &engine->batch_pool.cache_list[j],
419                                             batch_pool_link)
420                                 per_file_stats(0, obj, &stats);
421                 }
422         }
423
424         print_file_stats(m, "[k]batch pool", stats);
425 }
426
427 #define count_vmas(list, member) do { \
428         list_for_each_entry(vma, list, member) { \
429                 size += i915_gem_obj_total_ggtt_size(vma->obj); \
430                 ++count; \
431                 if (vma->obj->map_and_fenceable) { \
432                         mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
433                         ++mappable_count; \
434                 } \
435         } \
436 } while (0)
437
438 static int i915_gem_object_info(struct seq_file *m, void* data)
439 {
440         struct drm_info_node *node = m->private;
441         struct drm_device *dev = node->minor->dev;
442         struct drm_i915_private *dev_priv = to_i915(dev);
443         struct i915_ggtt *ggtt = &dev_priv->ggtt;
444         u32 count, mappable_count, purgeable_count;
445         u64 size, mappable_size, purgeable_size;
446         unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0;
447         u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0;
448         struct drm_i915_gem_object *obj;
449         struct drm_file *file;
450         struct i915_vma *vma;
451         int ret;
452
453         ret = mutex_lock_interruptible(&dev->struct_mutex);
454         if (ret)
455                 return ret;
456
457         seq_printf(m, "%u objects, %zu bytes\n",
458                    dev_priv->mm.object_count,
459                    dev_priv->mm.object_memory);
460
461         size = count = mappable_size = mappable_count = 0;
462         count_objects(&dev_priv->mm.bound_list, global_list);
463         seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
464                    count, mappable_count, size, mappable_size);
465
466         size = count = mappable_size = mappable_count = 0;
467         count_vmas(&ggtt->base.active_list, vm_link);
468         seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
469                    count, mappable_count, size, mappable_size);
470
471         size = count = mappable_size = mappable_count = 0;
472         count_vmas(&ggtt->base.inactive_list, vm_link);
473         seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
474                    count, mappable_count, size, mappable_size);
475
476         size = count = purgeable_size = purgeable_count = 0;
477         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
478                 size += obj->base.size, ++count;
479                 if (obj->madv == I915_MADV_DONTNEED)
480                         purgeable_size += obj->base.size, ++purgeable_count;
481                 if (obj->mapping) {
482                         pin_mapped_count++;
483                         pin_mapped_size += obj->base.size;
484                         if (obj->pages_pin_count == 0) {
485                                 pin_mapped_purgeable_count++;
486                                 pin_mapped_purgeable_size += obj->base.size;
487                         }
488                 }
489         }
490         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
491
492         size = count = mappable_size = mappable_count = 0;
493         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
494                 if (obj->fault_mappable) {
495                         size += i915_gem_obj_ggtt_size(obj);
496                         ++count;
497                 }
498                 if (obj->pin_display) {
499                         mappable_size += i915_gem_obj_ggtt_size(obj);
500                         ++mappable_count;
501                 }
502                 if (obj->madv == I915_MADV_DONTNEED) {
503                         purgeable_size += obj->base.size;
504                         ++purgeable_count;
505                 }
506                 if (obj->mapping) {
507                         pin_mapped_count++;
508                         pin_mapped_size += obj->base.size;
509                         if (obj->pages_pin_count == 0) {
510                                 pin_mapped_purgeable_count++;
511                                 pin_mapped_purgeable_size += obj->base.size;
512                         }
513                 }
514         }
515         seq_printf(m, "%u purgeable objects, %llu bytes\n",
516                    purgeable_count, purgeable_size);
517         seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
518                    mappable_count, mappable_size);
519         seq_printf(m, "%u fault mappable objects, %llu bytes\n",
520                    count, size);
521         seq_printf(m,
522                    "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n",
523                    pin_mapped_count, pin_mapped_purgeable_count,
524                    pin_mapped_size, pin_mapped_purgeable_size);
525
526         seq_printf(m, "%llu [%llu] gtt total\n",
527                    ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
528
529         seq_putc(m, '\n');
530         print_batch_pool_stats(m, dev_priv);
531         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
532                 struct file_stats stats;
533                 struct task_struct *task;
534
535                 memset(&stats, 0, sizeof(stats));
536                 stats.file_priv = file->driver_priv;
537                 spin_lock(&file->table_lock);
538                 idr_for_each(&file->object_idr, per_file_stats, &stats);
539                 spin_unlock(&file->table_lock);
540                 /*
541                  * Although we have a valid reference on file->pid, that does
542                  * not guarantee that the task_struct who called get_pid() is
543                  * still alive (e.g. get_pid(current) => fork() => exit()).
544                  * Therefore, we need to protect this ->comm access using RCU.
545                  */
546                 rcu_read_lock();
547                 task = pid_task(file->pid, PIDTYPE_PID);
548                 print_file_stats(m, task ? task->comm : "<unknown>", stats);
549                 rcu_read_unlock();
550         }
551
552         mutex_unlock(&dev->struct_mutex);
553
554         return 0;
555 }
556
557 static int i915_gem_gtt_info(struct seq_file *m, void *data)
558 {
559         struct drm_info_node *node = m->private;
560         struct drm_device *dev = node->minor->dev;
561         uintptr_t list = (uintptr_t) node->info_ent->data;
562         struct drm_i915_private *dev_priv = dev->dev_private;
563         struct drm_i915_gem_object *obj;
564         u64 total_obj_size, total_gtt_size;
565         int count, ret;
566
567         ret = mutex_lock_interruptible(&dev->struct_mutex);
568         if (ret)
569                 return ret;
570
571         total_obj_size = total_gtt_size = count = 0;
572         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
573                 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
574                         continue;
575
576                 seq_puts(m, "   ");
577                 describe_obj(m, obj);
578                 seq_putc(m, '\n');
579                 total_obj_size += obj->base.size;
580                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
581                 count++;
582         }
583
584         mutex_unlock(&dev->struct_mutex);
585
586         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
587                    count, total_obj_size, total_gtt_size);
588
589         return 0;
590 }
591
592 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
593 {
594         struct drm_info_node *node = m->private;
595         struct drm_device *dev = node->minor->dev;
596         struct drm_i915_private *dev_priv = dev->dev_private;
597         struct intel_crtc *crtc;
598         int ret;
599
600         ret = mutex_lock_interruptible(&dev->struct_mutex);
601         if (ret)
602                 return ret;
603
604         for_each_intel_crtc(dev, crtc) {
605                 const char pipe = pipe_name(crtc->pipe);
606                 const char plane = plane_name(crtc->plane);
607                 struct intel_unpin_work *work;
608
609                 spin_lock_irq(&dev->event_lock);
610                 work = crtc->unpin_work;
611                 if (work == NULL) {
612                         seq_printf(m, "No flip due on pipe %c (plane %c)\n",
613                                    pipe, plane);
614                 } else {
615                         u32 addr;
616
617                         if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
618                                 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
619                                            pipe, plane);
620                         } else {
621                                 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
622                                            pipe, plane);
623                         }
624                         if (work->flip_queued_req) {
625                                 struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
626
627                                 seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
628                                            engine->name,
629                                            i915_gem_request_get_seqno(work->flip_queued_req),
630                                            dev_priv->next_seqno,
631                                            engine->get_seqno(engine),
632                                            i915_gem_request_completed(work->flip_queued_req, true));
633                         } else
634                                 seq_printf(m, "Flip not associated with any ring\n");
635                         seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
636                                    work->flip_queued_vblank,
637                                    work->flip_ready_vblank,
638                                    drm_crtc_vblank_count(&crtc->base));
639                         if (work->enable_stall_check)
640                                 seq_puts(m, "Stall check enabled, ");
641                         else
642                                 seq_puts(m, "Stall check waiting for page flip ioctl, ");
643                         seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
644
645                         if (INTEL_INFO(dev)->gen >= 4)
646                                 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
647                         else
648                                 addr = I915_READ(DSPADDR(crtc->plane));
649                         seq_printf(m, "Current scanout address 0x%08x\n", addr);
650
651                         if (work->pending_flip_obj) {
652                                 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
653                                 seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
654                         }
655                 }
656                 spin_unlock_irq(&dev->event_lock);
657         }
658
659         mutex_unlock(&dev->struct_mutex);
660
661         return 0;
662 }
663
664 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
665 {
666         struct drm_info_node *node = m->private;
667         struct drm_device *dev = node->minor->dev;
668         struct drm_i915_private *dev_priv = dev->dev_private;
669         struct drm_i915_gem_object *obj;
670         struct intel_engine_cs *engine;
671         int total = 0;
672         int ret, j;
673
674         ret = mutex_lock_interruptible(&dev->struct_mutex);
675         if (ret)
676                 return ret;
677
678         for_each_engine(engine, dev_priv) {
679                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
680                         int count;
681
682                         count = 0;
683                         list_for_each_entry(obj,
684                                             &engine->batch_pool.cache_list[j],
685                                             batch_pool_link)
686                                 count++;
687                         seq_printf(m, "%s cache[%d]: %d objects\n",
688                                    engine->name, j, count);
689
690                         list_for_each_entry(obj,
691                                             &engine->batch_pool.cache_list[j],
692                                             batch_pool_link) {
693                                 seq_puts(m, "   ");
694                                 describe_obj(m, obj);
695                                 seq_putc(m, '\n');
696                         }
697
698                         total += count;
699                 }
700         }
701
702         seq_printf(m, "total: %d\n", total);
703
704         mutex_unlock(&dev->struct_mutex);
705
706         return 0;
707 }
708
709 static int i915_gem_request_info(struct seq_file *m, void *data)
710 {
711         struct drm_info_node *node = m->private;
712         struct drm_device *dev = node->minor->dev;
713         struct drm_i915_private *dev_priv = dev->dev_private;
714         struct intel_engine_cs *engine;
715         struct drm_i915_gem_request *req;
716         int ret, any;
717
718         ret = mutex_lock_interruptible(&dev->struct_mutex);
719         if (ret)
720                 return ret;
721
722         any = 0;
723         for_each_engine(engine, dev_priv) {
724                 int count;
725
726                 count = 0;
727                 list_for_each_entry(req, &engine->request_list, list)
728                         count++;
729                 if (count == 0)
730                         continue;
731
732                 seq_printf(m, "%s requests: %d\n", engine->name, count);
733                 list_for_each_entry(req, &engine->request_list, list) {
734                         struct task_struct *task;
735
736                         rcu_read_lock();
737                         task = NULL;
738                         if (req->pid)
739                                 task = pid_task(req->pid, PIDTYPE_PID);
740                         seq_printf(m, "    %x @ %d: %s [%d]\n",
741                                    req->seqno,
742                                    (int) (jiffies - req->emitted_jiffies),
743                                    task ? task->comm : "<unknown>",
744                                    task ? task->pid : -1);
745                         rcu_read_unlock();
746                 }
747
748                 any++;
749         }
750         mutex_unlock(&dev->struct_mutex);
751
752         if (any == 0)
753                 seq_puts(m, "No requests\n");
754
755         return 0;
756 }
757
758 static void i915_ring_seqno_info(struct seq_file *m,
759                                  struct intel_engine_cs *engine)
760 {
761         seq_printf(m, "Current sequence (%s): %x\n",
762                    engine->name, engine->get_seqno(engine));
763         seq_printf(m, "Current user interrupts (%s): %x\n",
764                    engine->name, READ_ONCE(engine->user_interrupts));
765 }
766
767 static int i915_gem_seqno_info(struct seq_file *m, void *data)
768 {
769         struct drm_info_node *node = m->private;
770         struct drm_device *dev = node->minor->dev;
771         struct drm_i915_private *dev_priv = dev->dev_private;
772         struct intel_engine_cs *engine;
773         int ret;
774
775         ret = mutex_lock_interruptible(&dev->struct_mutex);
776         if (ret)
777                 return ret;
778         intel_runtime_pm_get(dev_priv);
779
780         for_each_engine(engine, dev_priv)
781                 i915_ring_seqno_info(m, engine);
782
783         intel_runtime_pm_put(dev_priv);
784         mutex_unlock(&dev->struct_mutex);
785
786         return 0;
787 }
788
789
790 static int i915_interrupt_info(struct seq_file *m, void *data)
791 {
792         struct drm_info_node *node = m->private;
793         struct drm_device *dev = node->minor->dev;
794         struct drm_i915_private *dev_priv = dev->dev_private;
795         struct intel_engine_cs *engine;
796         int ret, i, pipe;
797
798         ret = mutex_lock_interruptible(&dev->struct_mutex);
799         if (ret)
800                 return ret;
801         intel_runtime_pm_get(dev_priv);
802
803         if (IS_CHERRYVIEW(dev)) {
804                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
805                            I915_READ(GEN8_MASTER_IRQ));
806
807                 seq_printf(m, "Display IER:\t%08x\n",
808                            I915_READ(VLV_IER));
809                 seq_printf(m, "Display IIR:\t%08x\n",
810                            I915_READ(VLV_IIR));
811                 seq_printf(m, "Display IIR_RW:\t%08x\n",
812                            I915_READ(VLV_IIR_RW));
813                 seq_printf(m, "Display IMR:\t%08x\n",
814                            I915_READ(VLV_IMR));
815                 for_each_pipe(dev_priv, pipe)
816                         seq_printf(m, "Pipe %c stat:\t%08x\n",
817                                    pipe_name(pipe),
818                                    I915_READ(PIPESTAT(pipe)));
819
820                 seq_printf(m, "Port hotplug:\t%08x\n",
821                            I915_READ(PORT_HOTPLUG_EN));
822                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
823                            I915_READ(VLV_DPFLIPSTAT));
824                 seq_printf(m, "DPINVGTT:\t%08x\n",
825                            I915_READ(DPINVGTT));
826
827                 for (i = 0; i < 4; i++) {
828                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
829                                    i, I915_READ(GEN8_GT_IMR(i)));
830                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
831                                    i, I915_READ(GEN8_GT_IIR(i)));
832                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
833                                    i, I915_READ(GEN8_GT_IER(i)));
834                 }
835
836                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
837                            I915_READ(GEN8_PCU_IMR));
838                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
839                            I915_READ(GEN8_PCU_IIR));
840                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
841                            I915_READ(GEN8_PCU_IER));
842         } else if (INTEL_INFO(dev)->gen >= 8) {
843                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
844                            I915_READ(GEN8_MASTER_IRQ));
845
846                 for (i = 0; i < 4; i++) {
847                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
848                                    i, I915_READ(GEN8_GT_IMR(i)));
849                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
850                                    i, I915_READ(GEN8_GT_IIR(i)));
851                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
852                                    i, I915_READ(GEN8_GT_IER(i)));
853                 }
854
855                 for_each_pipe(dev_priv, pipe) {
856                         enum intel_display_power_domain power_domain;
857
858                         power_domain = POWER_DOMAIN_PIPE(pipe);
859                         if (!intel_display_power_get_if_enabled(dev_priv,
860                                                                 power_domain)) {
861                                 seq_printf(m, "Pipe %c power disabled\n",
862                                            pipe_name(pipe));
863                                 continue;
864                         }
865                         seq_printf(m, "Pipe %c IMR:\t%08x\n",
866                                    pipe_name(pipe),
867                                    I915_READ(GEN8_DE_PIPE_IMR(pipe)));
868                         seq_printf(m, "Pipe %c IIR:\t%08x\n",
869                                    pipe_name(pipe),
870                                    I915_READ(GEN8_DE_PIPE_IIR(pipe)));
871                         seq_printf(m, "Pipe %c IER:\t%08x\n",
872                                    pipe_name(pipe),
873                                    I915_READ(GEN8_DE_PIPE_IER(pipe)));
874
875                         intel_display_power_put(dev_priv, power_domain);
876                 }
877
878                 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
879                            I915_READ(GEN8_DE_PORT_IMR));
880                 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
881                            I915_READ(GEN8_DE_PORT_IIR));
882                 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
883                            I915_READ(GEN8_DE_PORT_IER));
884
885                 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
886                            I915_READ(GEN8_DE_MISC_IMR));
887                 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
888                            I915_READ(GEN8_DE_MISC_IIR));
889                 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
890                            I915_READ(GEN8_DE_MISC_IER));
891
892                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
893                            I915_READ(GEN8_PCU_IMR));
894                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
895                            I915_READ(GEN8_PCU_IIR));
896                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
897                            I915_READ(GEN8_PCU_IER));
898         } else if (IS_VALLEYVIEW(dev)) {
899                 seq_printf(m, "Display IER:\t%08x\n",
900                            I915_READ(VLV_IER));
901                 seq_printf(m, "Display IIR:\t%08x\n",
902                            I915_READ(VLV_IIR));
903                 seq_printf(m, "Display IIR_RW:\t%08x\n",
904                            I915_READ(VLV_IIR_RW));
905                 seq_printf(m, "Display IMR:\t%08x\n",
906                            I915_READ(VLV_IMR));
907                 for_each_pipe(dev_priv, pipe)
908                         seq_printf(m, "Pipe %c stat:\t%08x\n",
909                                    pipe_name(pipe),
910                                    I915_READ(PIPESTAT(pipe)));
911
912                 seq_printf(m, "Master IER:\t%08x\n",
913                            I915_READ(VLV_MASTER_IER));
914
915                 seq_printf(m, "Render IER:\t%08x\n",
916                            I915_READ(GTIER));
917                 seq_printf(m, "Render IIR:\t%08x\n",
918                            I915_READ(GTIIR));
919                 seq_printf(m, "Render IMR:\t%08x\n",
920                            I915_READ(GTIMR));
921
922                 seq_printf(m, "PM IER:\t\t%08x\n",
923                            I915_READ(GEN6_PMIER));
924                 seq_printf(m, "PM IIR:\t\t%08x\n",
925                            I915_READ(GEN6_PMIIR));
926                 seq_printf(m, "PM IMR:\t\t%08x\n",
927                            I915_READ(GEN6_PMIMR));
928
929                 seq_printf(m, "Port hotplug:\t%08x\n",
930                            I915_READ(PORT_HOTPLUG_EN));
931                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
932                            I915_READ(VLV_DPFLIPSTAT));
933                 seq_printf(m, "DPINVGTT:\t%08x\n",
934                            I915_READ(DPINVGTT));
935
936         } else if (!HAS_PCH_SPLIT(dev)) {
937                 seq_printf(m, "Interrupt enable:    %08x\n",
938                            I915_READ(IER));
939                 seq_printf(m, "Interrupt identity:  %08x\n",
940                            I915_READ(IIR));
941                 seq_printf(m, "Interrupt mask:      %08x\n",
942                            I915_READ(IMR));
943                 for_each_pipe(dev_priv, pipe)
944                         seq_printf(m, "Pipe %c stat:         %08x\n",
945                                    pipe_name(pipe),
946                                    I915_READ(PIPESTAT(pipe)));
947         } else {
948                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
949                            I915_READ(DEIER));
950                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
951                            I915_READ(DEIIR));
952                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
953                            I915_READ(DEIMR));
954                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
955                            I915_READ(SDEIER));
956                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
957                            I915_READ(SDEIIR));
958                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
959                            I915_READ(SDEIMR));
960                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
961                            I915_READ(GTIER));
962                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
963                            I915_READ(GTIIR));
964                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
965                            I915_READ(GTIMR));
966         }
967         for_each_engine(engine, dev_priv) {
968                 if (INTEL_INFO(dev)->gen >= 6) {
969                         seq_printf(m,
970                                    "Graphics Interrupt mask (%s):       %08x\n",
971                                    engine->name, I915_READ_IMR(engine));
972                 }
973                 i915_ring_seqno_info(m, engine);
974         }
975         intel_runtime_pm_put(dev_priv);
976         mutex_unlock(&dev->struct_mutex);
977
978         return 0;
979 }
980
981 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
982 {
983         struct drm_info_node *node = m->private;
984         struct drm_device *dev = node->minor->dev;
985         struct drm_i915_private *dev_priv = dev->dev_private;
986         int i, ret;
987
988         ret = mutex_lock_interruptible(&dev->struct_mutex);
989         if (ret)
990                 return ret;
991
992         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
993         for (i = 0; i < dev_priv->num_fence_regs; i++) {
994                 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
995
996                 seq_printf(m, "Fence %d, pin count = %d, object = ",
997                            i, dev_priv->fence_regs[i].pin_count);
998                 if (obj == NULL)
999                         seq_puts(m, "unused");
1000                 else
1001                         describe_obj(m, obj);
1002                 seq_putc(m, '\n');
1003         }
1004
1005         mutex_unlock(&dev->struct_mutex);
1006         return 0;
1007 }
1008
1009 static int i915_hws_info(struct seq_file *m, void *data)
1010 {
1011         struct drm_info_node *node = m->private;
1012         struct drm_device *dev = node->minor->dev;
1013         struct drm_i915_private *dev_priv = dev->dev_private;
1014         struct intel_engine_cs *engine;
1015         const u32 *hws;
1016         int i;
1017
1018         engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
1019         hws = engine->status_page.page_addr;
1020         if (hws == NULL)
1021                 return 0;
1022
1023         for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
1024                 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1025                            i * 4,
1026                            hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
1027         }
1028         return 0;
1029 }
1030
1031 static ssize_t
1032 i915_error_state_write(struct file *filp,
1033                        const char __user *ubuf,
1034                        size_t cnt,
1035                        loff_t *ppos)
1036 {
1037         struct i915_error_state_file_priv *error_priv = filp->private_data;
1038         struct drm_device *dev = error_priv->dev;
1039         int ret;
1040
1041         DRM_DEBUG_DRIVER("Resetting error state\n");
1042
1043         ret = mutex_lock_interruptible(&dev->struct_mutex);
1044         if (ret)
1045                 return ret;
1046
1047         i915_destroy_error_state(dev);
1048         mutex_unlock(&dev->struct_mutex);
1049
1050         return cnt;
1051 }
1052
1053 static int i915_error_state_open(struct inode *inode, struct file *file)
1054 {
1055         struct drm_device *dev = inode->i_private;
1056         struct i915_error_state_file_priv *error_priv;
1057
1058         error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
1059         if (!error_priv)
1060                 return -ENOMEM;
1061
1062         error_priv->dev = dev;
1063
1064         i915_error_state_get(dev, error_priv);
1065
1066         file->private_data = error_priv;
1067
1068         return 0;
1069 }
1070
1071 static int i915_error_state_release(struct inode *inode, struct file *file)
1072 {
1073         struct i915_error_state_file_priv *error_priv = file->private_data;
1074
1075         i915_error_state_put(error_priv);
1076         kfree(error_priv);
1077
1078         return 0;
1079 }
1080
1081 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
1082                                      size_t count, loff_t *pos)
1083 {
1084         struct i915_error_state_file_priv *error_priv = file->private_data;
1085         struct drm_i915_error_state_buf error_str;
1086         loff_t tmp_pos = 0;
1087         ssize_t ret_count = 0;
1088         int ret;
1089
1090         ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
1091         if (ret)
1092                 return ret;
1093
1094         ret = i915_error_state_to_str(&error_str, error_priv);
1095         if (ret)
1096                 goto out;
1097
1098         ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
1099                                             error_str.buf,
1100                                             error_str.bytes);
1101
1102         if (ret_count < 0)
1103                 ret = ret_count;
1104         else
1105                 *pos = error_str.start + ret_count;
1106 out:
1107         i915_error_state_buf_release(&error_str);
1108         return ret ?: ret_count;
1109 }
1110
1111 static const struct file_operations i915_error_state_fops = {
1112         .owner = THIS_MODULE,
1113         .open = i915_error_state_open,
1114         .read = i915_error_state_read,
1115         .write = i915_error_state_write,
1116         .llseek = default_llseek,
1117         .release = i915_error_state_release,
1118 };
1119
1120 static int
1121 i915_next_seqno_get(void *data, u64 *val)
1122 {
1123         struct drm_device *dev = data;
1124         struct drm_i915_private *dev_priv = dev->dev_private;
1125         int ret;
1126
1127         ret = mutex_lock_interruptible(&dev->struct_mutex);
1128         if (ret)
1129                 return ret;
1130
1131         *val = dev_priv->next_seqno;
1132         mutex_unlock(&dev->struct_mutex);
1133
1134         return 0;
1135 }
1136
1137 static int
1138 i915_next_seqno_set(void *data, u64 val)
1139 {
1140         struct drm_device *dev = data;
1141         int ret;
1142
1143         ret = mutex_lock_interruptible(&dev->struct_mutex);
1144         if (ret)
1145                 return ret;
1146
1147         ret = i915_gem_set_seqno(dev, val);
1148         mutex_unlock(&dev->struct_mutex);
1149
1150         return ret;
1151 }
1152
1153 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1154                         i915_next_seqno_get, i915_next_seqno_set,
1155                         "0x%llx\n");
1156
1157 static int i915_frequency_info(struct seq_file *m, void *unused)
1158 {
1159         struct drm_info_node *node = m->private;
1160         struct drm_device *dev = node->minor->dev;
1161         struct drm_i915_private *dev_priv = dev->dev_private;
1162         int ret = 0;
1163
1164         intel_runtime_pm_get(dev_priv);
1165
1166         flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1167
1168         if (IS_GEN5(dev)) {
1169                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1170                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1171
1172                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1173                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1174                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1175                            MEMSTAT_VID_SHIFT);
1176                 seq_printf(m, "Current P-state: %d\n",
1177                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1178         } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1179                 u32 freq_sts;
1180
1181                 mutex_lock(&dev_priv->rps.hw_lock);
1182                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1183                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1184                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1185
1186                 seq_printf(m, "actual GPU freq: %d MHz\n",
1187                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1188
1189                 seq_printf(m, "current GPU freq: %d MHz\n",
1190                            intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1191
1192                 seq_printf(m, "max GPU freq: %d MHz\n",
1193                            intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1194
1195                 seq_printf(m, "min GPU freq: %d MHz\n",
1196                            intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1197
1198                 seq_printf(m, "idle GPU freq: %d MHz\n",
1199                            intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1200
1201                 seq_printf(m,
1202                            "efficient (RPe) frequency: %d MHz\n",
1203                            intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1204                 mutex_unlock(&dev_priv->rps.hw_lock);
1205         } else if (INTEL_INFO(dev)->gen >= 6) {
1206                 u32 rp_state_limits;
1207                 u32 gt_perf_status;
1208                 u32 rp_state_cap;
1209                 u32 rpmodectl, rpinclimit, rpdeclimit;
1210                 u32 rpstat, cagf, reqf;
1211                 u32 rpupei, rpcurup, rpprevup;
1212                 u32 rpdownei, rpcurdown, rpprevdown;
1213                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1214                 int max_freq;
1215
1216                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1217                 if (IS_BROXTON(dev)) {
1218                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1219                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1220                 } else {
1221                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1222                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1223                 }
1224
1225                 /* RPSTAT1 is in the GT power well */
1226                 ret = mutex_lock_interruptible(&dev->struct_mutex);
1227                 if (ret)
1228                         goto out;
1229
1230                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1231
1232                 reqf = I915_READ(GEN6_RPNSWREQ);
1233                 if (IS_GEN9(dev))
1234                         reqf >>= 23;
1235                 else {
1236                         reqf &= ~GEN6_TURBO_DISABLE;
1237                         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1238                                 reqf >>= 24;
1239                         else
1240                                 reqf >>= 25;
1241                 }
1242                 reqf = intel_gpu_freq(dev_priv, reqf);
1243
1244                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1245                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1246                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1247
1248                 rpstat = I915_READ(GEN6_RPSTAT1);
1249                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1250                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1251                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1252                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1253                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1254                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1255                 if (IS_GEN9(dev))
1256                         cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1257                 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1258                         cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1259                 else
1260                         cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1261                 cagf = intel_gpu_freq(dev_priv, cagf);
1262
1263                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1264                 mutex_unlock(&dev->struct_mutex);
1265
1266                 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1267                         pm_ier = I915_READ(GEN6_PMIER);
1268                         pm_imr = I915_READ(GEN6_PMIMR);
1269                         pm_isr = I915_READ(GEN6_PMISR);
1270                         pm_iir = I915_READ(GEN6_PMIIR);
1271                         pm_mask = I915_READ(GEN6_PMINTRMSK);
1272                 } else {
1273                         pm_ier = I915_READ(GEN8_GT_IER(2));
1274                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1275                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1276                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1277                         pm_mask = I915_READ(GEN6_PMINTRMSK);
1278                 }
1279                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1280                            pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1281                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1282                 seq_printf(m, "Render p-state ratio: %d\n",
1283                            (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
1284                 seq_printf(m, "Render p-state VID: %d\n",
1285                            gt_perf_status & 0xff);
1286                 seq_printf(m, "Render p-state limit: %d\n",
1287                            rp_state_limits & 0xff);
1288                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1289                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1290                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1291                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1292                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1293                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1294                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1295                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1296                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1297                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1298                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1299                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1300                 seq_printf(m, "Up threshold: %d%%\n",
1301                            dev_priv->rps.up_threshold);
1302
1303                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1304                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1305                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1306                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1307                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1308                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1309                 seq_printf(m, "Down threshold: %d%%\n",
1310                            dev_priv->rps.down_threshold);
1311
1312                 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
1313                             rp_state_cap >> 16) & 0xff;
1314                 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1315                              GEN9_FREQ_SCALER : 1);
1316                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1317                            intel_gpu_freq(dev_priv, max_freq));
1318
1319                 max_freq = (rp_state_cap & 0xff00) >> 8;
1320                 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1321                              GEN9_FREQ_SCALER : 1);
1322                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1323                            intel_gpu_freq(dev_priv, max_freq));
1324
1325                 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
1326                             rp_state_cap >> 0) & 0xff;
1327                 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1328                              GEN9_FREQ_SCALER : 1);
1329                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1330                            intel_gpu_freq(dev_priv, max_freq));
1331                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1332                            intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1333
1334                 seq_printf(m, "Current freq: %d MHz\n",
1335                            intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1336                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1337                 seq_printf(m, "Idle freq: %d MHz\n",
1338                            intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1339                 seq_printf(m, "Min freq: %d MHz\n",
1340                            intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1341                 seq_printf(m, "Max freq: %d MHz\n",
1342                            intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1343                 seq_printf(m,
1344                            "efficient (RPe) frequency: %d MHz\n",
1345                            intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1346         } else {
1347                 seq_puts(m, "no P-state info available\n");
1348         }
1349
1350         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
1351         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1352         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1353
1354 out:
1355         intel_runtime_pm_put(dev_priv);
1356         return ret;
1357 }
1358
1359 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1360 {
1361         struct drm_info_node *node = m->private;
1362         struct drm_device *dev = node->minor->dev;
1363         struct drm_i915_private *dev_priv = dev->dev_private;
1364         struct intel_engine_cs *engine;
1365         u64 acthd[I915_NUM_ENGINES];
1366         u32 seqno[I915_NUM_ENGINES];
1367         u32 instdone[I915_NUM_INSTDONE_REG];
1368         enum intel_engine_id id;
1369         int j;
1370
1371         if (!i915.enable_hangcheck) {
1372                 seq_printf(m, "Hangcheck disabled\n");
1373                 return 0;
1374         }
1375
1376         intel_runtime_pm_get(dev_priv);
1377
1378         for_each_engine_id(engine, dev_priv, id) {
1379                 acthd[id] = intel_ring_get_active_head(engine);
1380                 seqno[id] = engine->get_seqno(engine);
1381         }
1382
1383         i915_get_extra_instdone(dev, instdone);
1384
1385         intel_runtime_pm_put(dev_priv);
1386
1387         if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
1388                 seq_printf(m, "Hangcheck active, fires in %dms\n",
1389                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1390                                             jiffies));
1391         } else
1392                 seq_printf(m, "Hangcheck inactive\n");
1393
1394         for_each_engine_id(engine, dev_priv, id) {
1395                 seq_printf(m, "%s:\n", engine->name);
1396                 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1397                            engine->hangcheck.seqno,
1398                            seqno[id],
1399                            engine->last_submitted_seqno);
1400                 seq_printf(m, "\tuser interrupts = %x [current %x]\n",
1401                            engine->hangcheck.user_interrupts,
1402                            READ_ONCE(engine->user_interrupts));
1403                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1404                            (long long)engine->hangcheck.acthd,
1405                            (long long)acthd[id]);
1406                 seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
1407                 seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
1408
1409                 if (engine->id == RCS) {
1410                         seq_puts(m, "\tinstdone read =");
1411
1412                         for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
1413                                 seq_printf(m, " 0x%08x", instdone[j]);
1414
1415                         seq_puts(m, "\n\tinstdone accu =");
1416
1417                         for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
1418                                 seq_printf(m, " 0x%08x",
1419                                            engine->hangcheck.instdone[j]);
1420
1421                         seq_puts(m, "\n");
1422                 }
1423         }
1424
1425         return 0;
1426 }
1427
1428 static int ironlake_drpc_info(struct seq_file *m)
1429 {
1430         struct drm_info_node *node = m->private;
1431         struct drm_device *dev = node->minor->dev;
1432         struct drm_i915_private *dev_priv = dev->dev_private;
1433         u32 rgvmodectl, rstdbyctl;
1434         u16 crstandvid;
1435         int ret;
1436
1437         ret = mutex_lock_interruptible(&dev->struct_mutex);
1438         if (ret)
1439                 return ret;
1440         intel_runtime_pm_get(dev_priv);
1441
1442         rgvmodectl = I915_READ(MEMMODECTL);
1443         rstdbyctl = I915_READ(RSTDBYCTL);
1444         crstandvid = I915_READ16(CRSTANDVID);
1445
1446         intel_runtime_pm_put(dev_priv);
1447         mutex_unlock(&dev->struct_mutex);
1448
1449         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1450         seq_printf(m, "Boost freq: %d\n",
1451                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1452                    MEMMODE_BOOST_FREQ_SHIFT);
1453         seq_printf(m, "HW control enabled: %s\n",
1454                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1455         seq_printf(m, "SW control enabled: %s\n",
1456                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1457         seq_printf(m, "Gated voltage change: %s\n",
1458                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1459         seq_printf(m, "Starting frequency: P%d\n",
1460                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1461         seq_printf(m, "Max P-state: P%d\n",
1462                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1463         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1464         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1465         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1466         seq_printf(m, "Render standby enabled: %s\n",
1467                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1468         seq_puts(m, "Current RS state: ");
1469         switch (rstdbyctl & RSX_STATUS_MASK) {
1470         case RSX_STATUS_ON:
1471                 seq_puts(m, "on\n");
1472                 break;
1473         case RSX_STATUS_RC1:
1474                 seq_puts(m, "RC1\n");
1475                 break;
1476         case RSX_STATUS_RC1E:
1477                 seq_puts(m, "RC1E\n");
1478                 break;
1479         case RSX_STATUS_RS1:
1480                 seq_puts(m, "RS1\n");
1481                 break;
1482         case RSX_STATUS_RS2:
1483                 seq_puts(m, "RS2 (RC6)\n");
1484                 break;
1485         case RSX_STATUS_RS3:
1486                 seq_puts(m, "RC3 (RC6+)\n");
1487                 break;
1488         default:
1489                 seq_puts(m, "unknown\n");
1490                 break;
1491         }
1492
1493         return 0;
1494 }
1495
1496 static int i915_forcewake_domains(struct seq_file *m, void *data)
1497 {
1498         struct drm_info_node *node = m->private;
1499         struct drm_device *dev = node->minor->dev;
1500         struct drm_i915_private *dev_priv = dev->dev_private;
1501         struct intel_uncore_forcewake_domain *fw_domain;
1502
1503         spin_lock_irq(&dev_priv->uncore.lock);
1504         for_each_fw_domain(fw_domain, dev_priv) {
1505                 seq_printf(m, "%s.wake_count = %u\n",
1506                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1507                            fw_domain->wake_count);
1508         }
1509         spin_unlock_irq(&dev_priv->uncore.lock);
1510
1511         return 0;
1512 }
1513
1514 static int vlv_drpc_info(struct seq_file *m)
1515 {
1516         struct drm_info_node *node = m->private;
1517         struct drm_device *dev = node->minor->dev;
1518         struct drm_i915_private *dev_priv = dev->dev_private;
1519         u32 rpmodectl1, rcctl1, pw_status;
1520
1521         intel_runtime_pm_get(dev_priv);
1522
1523         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1524         rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1525         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1526
1527         intel_runtime_pm_put(dev_priv);
1528
1529         seq_printf(m, "Video Turbo Mode: %s\n",
1530                    yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1531         seq_printf(m, "Turbo enabled: %s\n",
1532                    yesno(rpmodectl1 & GEN6_RP_ENABLE));
1533         seq_printf(m, "HW control enabled: %s\n",
1534                    yesno(rpmodectl1 & GEN6_RP_ENABLE));
1535         seq_printf(m, "SW control enabled: %s\n",
1536                    yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1537                           GEN6_RP_MEDIA_SW_MODE));
1538         seq_printf(m, "RC6 Enabled: %s\n",
1539                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1540                                         GEN6_RC_CTL_EI_MODE(1))));
1541         seq_printf(m, "Render Power Well: %s\n",
1542                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1543         seq_printf(m, "Media Power Well: %s\n",
1544                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1545
1546         seq_printf(m, "Render RC6 residency since boot: %u\n",
1547                    I915_READ(VLV_GT_RENDER_RC6));
1548         seq_printf(m, "Media RC6 residency since boot: %u\n",
1549                    I915_READ(VLV_GT_MEDIA_RC6));
1550
1551         return i915_forcewake_domains(m, NULL);
1552 }
1553
1554 static int gen6_drpc_info(struct seq_file *m)
1555 {
1556         struct drm_info_node *node = m->private;
1557         struct drm_device *dev = node->minor->dev;
1558         struct drm_i915_private *dev_priv = dev->dev_private;
1559         u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1560         unsigned forcewake_count;
1561         int count = 0, ret;
1562
1563         ret = mutex_lock_interruptible(&dev->struct_mutex);
1564         if (ret)
1565                 return ret;
1566         intel_runtime_pm_get(dev_priv);
1567
1568         spin_lock_irq(&dev_priv->uncore.lock);
1569         forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
1570         spin_unlock_irq(&dev_priv->uncore.lock);
1571
1572         if (forcewake_count) {
1573                 seq_puts(m, "RC information inaccurate because somebody "
1574                             "holds a forcewake reference \n");
1575         } else {
1576                 /* NB: we cannot use forcewake, else we read the wrong values */
1577                 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1578                         udelay(10);
1579                 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1580         }
1581
1582         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1583         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1584
1585         rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1586         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1587         mutex_unlock(&dev->struct_mutex);
1588         mutex_lock(&dev_priv->rps.hw_lock);
1589         sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1590         mutex_unlock(&dev_priv->rps.hw_lock);
1591
1592         intel_runtime_pm_put(dev_priv);
1593
1594         seq_printf(m, "Video Turbo Mode: %s\n",
1595                    yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1596         seq_printf(m, "HW control enabled: %s\n",
1597                    yesno(rpmodectl1 & GEN6_RP_ENABLE));
1598         seq_printf(m, "SW control enabled: %s\n",
1599                    yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1600                           GEN6_RP_MEDIA_SW_MODE));
1601         seq_printf(m, "RC1e Enabled: %s\n",
1602                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1603         seq_printf(m, "RC6 Enabled: %s\n",
1604                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1605         seq_printf(m, "Deep RC6 Enabled: %s\n",
1606                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1607         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1608                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1609         seq_puts(m, "Current RC state: ");
1610         switch (gt_core_status & GEN6_RCn_MASK) {
1611         case GEN6_RC0:
1612                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1613                         seq_puts(m, "Core Power Down\n");
1614                 else
1615                         seq_puts(m, "on\n");
1616                 break;
1617         case GEN6_RC3:
1618                 seq_puts(m, "RC3\n");
1619                 break;
1620         case GEN6_RC6:
1621                 seq_puts(m, "RC6\n");
1622                 break;
1623         case GEN6_RC7:
1624                 seq_puts(m, "RC7\n");
1625                 break;
1626         default:
1627                 seq_puts(m, "Unknown\n");
1628                 break;
1629         }
1630
1631         seq_printf(m, "Core Power Down: %s\n",
1632                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1633
1634         /* Not exactly sure what this is */
1635         seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1636                    I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1637         seq_printf(m, "RC6 residency since boot: %u\n",
1638                    I915_READ(GEN6_GT_GFX_RC6));
1639         seq_printf(m, "RC6+ residency since boot: %u\n",
1640                    I915_READ(GEN6_GT_GFX_RC6p));
1641         seq_printf(m, "RC6++ residency since boot: %u\n",
1642                    I915_READ(GEN6_GT_GFX_RC6pp));
1643
1644         seq_printf(m, "RC6   voltage: %dmV\n",
1645                    GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1646         seq_printf(m, "RC6+  voltage: %dmV\n",
1647                    GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1648         seq_printf(m, "RC6++ voltage: %dmV\n",
1649                    GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1650         return 0;
1651 }
1652
1653 static int i915_drpc_info(struct seq_file *m, void *unused)
1654 {
1655         struct drm_info_node *node = m->private;
1656         struct drm_device *dev = node->minor->dev;
1657
1658         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1659                 return vlv_drpc_info(m);
1660         else if (INTEL_INFO(dev)->gen >= 6)
1661                 return gen6_drpc_info(m);
1662         else
1663                 return ironlake_drpc_info(m);
1664 }
1665
1666 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1667 {
1668         struct drm_info_node *node = m->private;
1669         struct drm_device *dev = node->minor->dev;
1670         struct drm_i915_private *dev_priv = dev->dev_private;
1671
1672         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1673                    dev_priv->fb_tracking.busy_bits);
1674
1675         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1676                    dev_priv->fb_tracking.flip_bits);
1677
1678         return 0;
1679 }
1680
1681 static int i915_fbc_status(struct seq_file *m, void *unused)
1682 {
1683         struct drm_info_node *node = m->private;
1684         struct drm_device *dev = node->minor->dev;
1685         struct drm_i915_private *dev_priv = dev->dev_private;
1686
1687         if (!HAS_FBC(dev)) {
1688                 seq_puts(m, "FBC unsupported on this chipset\n");
1689                 return 0;
1690         }
1691
1692         intel_runtime_pm_get(dev_priv);
1693         mutex_lock(&dev_priv->fbc.lock);
1694
1695         if (intel_fbc_is_active(dev_priv))
1696                 seq_puts(m, "FBC enabled\n");
1697         else
1698                 seq_printf(m, "FBC disabled: %s\n",
1699                            dev_priv->fbc.no_fbc_reason);
1700
1701         if (INTEL_INFO(dev_priv)->gen >= 7)
1702                 seq_printf(m, "Compressing: %s\n",
1703                            yesno(I915_READ(FBC_STATUS2) &
1704                                  FBC_COMPRESSION_MASK));
1705
1706         mutex_unlock(&dev_priv->fbc.lock);
1707         intel_runtime_pm_put(dev_priv);
1708
1709         return 0;
1710 }
1711
1712 static int i915_fbc_fc_get(void *data, u64 *val)
1713 {
1714         struct drm_device *dev = data;
1715         struct drm_i915_private *dev_priv = dev->dev_private;
1716
1717         if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1718                 return -ENODEV;
1719
1720         *val = dev_priv->fbc.false_color;
1721
1722         return 0;
1723 }
1724
1725 static int i915_fbc_fc_set(void *data, u64 val)
1726 {
1727         struct drm_device *dev = data;
1728         struct drm_i915_private *dev_priv = dev->dev_private;
1729         u32 reg;
1730
1731         if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1732                 return -ENODEV;
1733
1734         mutex_lock(&dev_priv->fbc.lock);
1735
1736         reg = I915_READ(ILK_DPFC_CONTROL);
1737         dev_priv->fbc.false_color = val;
1738
1739         I915_WRITE(ILK_DPFC_CONTROL, val ?
1740                    (reg | FBC_CTL_FALSE_COLOR) :
1741                    (reg & ~FBC_CTL_FALSE_COLOR));
1742
1743         mutex_unlock(&dev_priv->fbc.lock);
1744         return 0;
1745 }
1746
1747 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1748                         i915_fbc_fc_get, i915_fbc_fc_set,
1749                         "%llu\n");
1750
1751 static int i915_ips_status(struct seq_file *m, void *unused)
1752 {
1753         struct drm_info_node *node = m->private;
1754         struct drm_device *dev = node->minor->dev;
1755         struct drm_i915_private *dev_priv = dev->dev_private;
1756
1757         if (!HAS_IPS(dev)) {
1758                 seq_puts(m, "not supported\n");
1759                 return 0;
1760         }
1761
1762         intel_runtime_pm_get(dev_priv);
1763
1764         seq_printf(m, "Enabled by kernel parameter: %s\n",
1765                    yesno(i915.enable_ips));
1766
1767         if (INTEL_INFO(dev)->gen >= 8) {
1768                 seq_puts(m, "Currently: unknown\n");
1769         } else {
1770                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1771                         seq_puts(m, "Currently: enabled\n");
1772                 else
1773                         seq_puts(m, "Currently: disabled\n");
1774         }
1775
1776         intel_runtime_pm_put(dev_priv);
1777
1778         return 0;
1779 }
1780
1781 static int i915_sr_status(struct seq_file *m, void *unused)
1782 {
1783         struct drm_info_node *node = m->private;
1784         struct drm_device *dev = node->minor->dev;
1785         struct drm_i915_private *dev_priv = dev->dev_private;
1786         bool sr_enabled = false;
1787
1788         intel_runtime_pm_get(dev_priv);
1789
1790         if (HAS_PCH_SPLIT(dev))
1791                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1792         else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
1793                  IS_I945G(dev) || IS_I945GM(dev))
1794                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1795         else if (IS_I915GM(dev))
1796                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1797         else if (IS_PINEVIEW(dev))
1798                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1799         else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1800                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1801
1802         intel_runtime_pm_put(dev_priv);
1803
1804         seq_printf(m, "self-refresh: %s\n",
1805                    sr_enabled ? "enabled" : "disabled");
1806
1807         return 0;
1808 }
1809
1810 static int i915_emon_status(struct seq_file *m, void *unused)
1811 {
1812         struct drm_info_node *node = m->private;
1813         struct drm_device *dev = node->minor->dev;
1814         struct drm_i915_private *dev_priv = dev->dev_private;
1815         unsigned long temp, chipset, gfx;
1816         int ret;
1817
1818         if (!IS_GEN5(dev))
1819                 return -ENODEV;
1820
1821         ret = mutex_lock_interruptible(&dev->struct_mutex);
1822         if (ret)
1823                 return ret;
1824
1825         temp = i915_mch_val(dev_priv);
1826         chipset = i915_chipset_val(dev_priv);
1827         gfx = i915_gfx_val(dev_priv);
1828         mutex_unlock(&dev->struct_mutex);
1829
1830         seq_printf(m, "GMCH temp: %ld\n", temp);
1831         seq_printf(m, "Chipset power: %ld\n", chipset);
1832         seq_printf(m, "GFX power: %ld\n", gfx);
1833         seq_printf(m, "Total power: %ld\n", chipset + gfx);
1834
1835         return 0;
1836 }
1837
1838 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1839 {
1840         struct drm_info_node *node = m->private;
1841         struct drm_device *dev = node->minor->dev;
1842         struct drm_i915_private *dev_priv = dev->dev_private;
1843         int ret = 0;
1844         int gpu_freq, ia_freq;
1845         unsigned int max_gpu_freq, min_gpu_freq;
1846
1847         if (!HAS_CORE_RING_FREQ(dev)) {
1848                 seq_puts(m, "unsupported on this chipset\n");
1849                 return 0;
1850         }
1851
1852         intel_runtime_pm_get(dev_priv);
1853
1854         flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1855
1856         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1857         if (ret)
1858                 goto out;
1859
1860         if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1861                 /* Convert GT frequency to 50 HZ units */
1862                 min_gpu_freq =
1863                         dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
1864                 max_gpu_freq =
1865                         dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
1866         } else {
1867                 min_gpu_freq = dev_priv->rps.min_freq_softlimit;
1868                 max_gpu_freq = dev_priv->rps.max_freq_softlimit;
1869         }
1870
1871         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1872
1873         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1874                 ia_freq = gpu_freq;
1875                 sandybridge_pcode_read(dev_priv,
1876                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1877                                        &ia_freq);
1878                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1879                            intel_gpu_freq(dev_priv, (gpu_freq *
1880                                 (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1881                                  GEN9_FREQ_SCALER : 1))),
1882                            ((ia_freq >> 0) & 0xff) * 100,
1883                            ((ia_freq >> 8) & 0xff) * 100);
1884         }
1885
1886         mutex_unlock(&dev_priv->rps.hw_lock);
1887
1888 out:
1889         intel_runtime_pm_put(dev_priv);
1890         return ret;
1891 }
1892
1893 static int i915_opregion(struct seq_file *m, void *unused)
1894 {
1895         struct drm_info_node *node = m->private;
1896         struct drm_device *dev = node->minor->dev;
1897         struct drm_i915_private *dev_priv = dev->dev_private;
1898         struct intel_opregion *opregion = &dev_priv->opregion;
1899         int ret;
1900
1901         ret = mutex_lock_interruptible(&dev->struct_mutex);
1902         if (ret)
1903                 goto out;
1904
1905         if (opregion->header)
1906                 seq_write(m, opregion->header, OPREGION_SIZE);
1907
1908         mutex_unlock(&dev->struct_mutex);
1909
1910 out:
1911         return 0;
1912 }
1913
1914 static int i915_vbt(struct seq_file *m, void *unused)
1915 {
1916         struct drm_info_node *node = m->private;
1917         struct drm_device *dev = node->minor->dev;
1918         struct drm_i915_private *dev_priv = dev->dev_private;
1919         struct intel_opregion *opregion = &dev_priv->opregion;
1920
1921         if (opregion->vbt)
1922                 seq_write(m, opregion->vbt, opregion->vbt_size);
1923
1924         return 0;
1925 }
1926
1927 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1928 {
1929         struct drm_info_node *node = m->private;
1930         struct drm_device *dev = node->minor->dev;
1931         struct intel_framebuffer *fbdev_fb = NULL;
1932         struct drm_framebuffer *drm_fb;
1933         int ret;
1934
1935         ret = mutex_lock_interruptible(&dev->struct_mutex);
1936         if (ret)
1937                 return ret;
1938
1939 #ifdef CONFIG_DRM_FBDEV_EMULATION
1940        if (to_i915(dev)->fbdev) {
1941                fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
1942
1943                seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1944                          fbdev_fb->base.width,
1945                          fbdev_fb->base.height,
1946                          fbdev_fb->base.depth,
1947                          fbdev_fb->base.bits_per_pixel,
1948                          fbdev_fb->base.modifier[0],
1949                          atomic_read(&fbdev_fb->base.refcount.refcount));
1950                describe_obj(m, fbdev_fb->obj);
1951                seq_putc(m, '\n');
1952        }
1953 #endif
1954
1955         mutex_lock(&dev->mode_config.fb_lock);
1956         drm_for_each_fb(drm_fb, dev) {
1957                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1958                 if (fb == fbdev_fb)
1959                         continue;
1960
1961                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1962                            fb->base.width,
1963                            fb->base.height,
1964                            fb->base.depth,
1965                            fb->base.bits_per_pixel,
1966                            fb->base.modifier[0],
1967                            atomic_read(&fb->base.refcount.refcount));
1968                 describe_obj(m, fb->obj);
1969                 seq_putc(m, '\n');
1970         }
1971         mutex_unlock(&dev->mode_config.fb_lock);
1972         mutex_unlock(&dev->struct_mutex);
1973
1974         return 0;
1975 }
1976
1977 static void describe_ctx_ringbuf(struct seq_file *m,
1978                                  struct intel_ringbuffer *ringbuf)
1979 {
1980         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1981                    ringbuf->space, ringbuf->head, ringbuf->tail,
1982                    ringbuf->last_retired_head);
1983 }
1984
1985 static int i915_context_status(struct seq_file *m, void *unused)
1986 {
1987         struct drm_info_node *node = m->private;
1988         struct drm_device *dev = node->minor->dev;
1989         struct drm_i915_private *dev_priv = dev->dev_private;
1990         struct intel_engine_cs *engine;
1991         struct intel_context *ctx;
1992         enum intel_engine_id id;
1993         int ret;
1994
1995         ret = mutex_lock_interruptible(&dev->struct_mutex);
1996         if (ret)
1997                 return ret;
1998
1999         list_for_each_entry(ctx, &dev_priv->context_list, link) {
2000                 if (!i915.enable_execlists &&
2001                     ctx->legacy_hw_ctx.rcs_state == NULL)
2002                         continue;
2003
2004                 seq_printf(m, "HW context %u ", ctx->hw_id);
2005                 describe_ctx(m, ctx);
2006                 if (ctx == dev_priv->kernel_context)
2007                         seq_printf(m, "(kernel context) ");
2008
2009                 if (i915.enable_execlists) {
2010                         seq_putc(m, '\n');
2011                         for_each_engine_id(engine, dev_priv, id) {
2012                                 struct drm_i915_gem_object *ctx_obj =
2013                                         ctx->engine[id].state;
2014                                 struct intel_ringbuffer *ringbuf =
2015                                         ctx->engine[id].ringbuf;
2016
2017                                 seq_printf(m, "%s: ", engine->name);
2018                                 if (ctx_obj)
2019                                         describe_obj(m, ctx_obj);
2020                                 if (ringbuf)
2021                                         describe_ctx_ringbuf(m, ringbuf);
2022                                 seq_putc(m, '\n');
2023                         }
2024                 } else {
2025                         describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
2026                 }
2027
2028                 seq_putc(m, '\n');
2029         }
2030
2031         mutex_unlock(&dev->struct_mutex);
2032
2033         return 0;
2034 }
2035
2036 static void i915_dump_lrc_obj(struct seq_file *m,
2037                               struct intel_context *ctx,
2038                               struct intel_engine_cs *engine)
2039 {
2040         struct page *page;
2041         uint32_t *reg_state;
2042         int j;
2043         struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
2044         unsigned long ggtt_offset = 0;
2045
2046         seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
2047
2048         if (ctx_obj == NULL) {
2049                 seq_puts(m, "\tNot allocated\n");
2050                 return;
2051         }
2052
2053         if (!i915_gem_obj_ggtt_bound(ctx_obj))
2054                 seq_puts(m, "\tNot bound in GGTT\n");
2055         else
2056                 ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
2057
2058         if (i915_gem_object_get_pages(ctx_obj)) {
2059                 seq_puts(m, "\tFailed to get pages for context object\n");
2060                 return;
2061         }
2062
2063         page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
2064         if (!WARN_ON(page == NULL)) {
2065                 reg_state = kmap_atomic(page);
2066
2067                 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
2068                         seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
2069                                    ggtt_offset + 4096 + (j * 4),
2070                                    reg_state[j], reg_state[j + 1],
2071                                    reg_state[j + 2], reg_state[j + 3]);
2072                 }
2073                 kunmap_atomic(reg_state);
2074         }
2075
2076         seq_putc(m, '\n');
2077 }
2078
2079 static int i915_dump_lrc(struct seq_file *m, void *unused)
2080 {
2081         struct drm_info_node *node = (struct drm_info_node *) m->private;
2082         struct drm_device *dev = node->minor->dev;
2083         struct drm_i915_private *dev_priv = dev->dev_private;
2084         struct intel_engine_cs *engine;
2085         struct intel_context *ctx;
2086         int ret;
2087
2088         if (!i915.enable_execlists) {
2089                 seq_printf(m, "Logical Ring Contexts are disabled\n");
2090                 return 0;
2091         }
2092
2093         ret = mutex_lock_interruptible(&dev->struct_mutex);
2094         if (ret)
2095                 return ret;
2096
2097         list_for_each_entry(ctx, &dev_priv->context_list, link)
2098                 for_each_engine(engine, dev_priv)
2099                         i915_dump_lrc_obj(m, ctx, engine);
2100
2101         mutex_unlock(&dev->struct_mutex);
2102
2103         return 0;
2104 }
2105
2106 static int i915_execlists(struct seq_file *m, void *data)
2107 {
2108         struct drm_info_node *node = (struct drm_info_node *)m->private;
2109         struct drm_device *dev = node->minor->dev;
2110         struct drm_i915_private *dev_priv = dev->dev_private;
2111         struct intel_engine_cs *engine;
2112         u32 status_pointer;
2113         u8 read_pointer;
2114         u8 write_pointer;
2115         u32 status;
2116         u32 ctx_id;
2117         struct list_head *cursor;
2118         int i, ret;
2119
2120         if (!i915.enable_execlists) {
2121                 seq_puts(m, "Logical Ring Contexts are disabled\n");
2122                 return 0;
2123         }
2124
2125         ret = mutex_lock_interruptible(&dev->struct_mutex);
2126         if (ret)
2127                 return ret;
2128
2129         intel_runtime_pm_get(dev_priv);
2130
2131         for_each_engine(engine, dev_priv) {
2132                 struct drm_i915_gem_request *head_req = NULL;
2133                 int count = 0;
2134
2135                 seq_printf(m, "%s\n", engine->name);
2136
2137                 status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
2138                 ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
2139                 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
2140                            status, ctx_id);
2141
2142                 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
2143                 seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
2144
2145                 read_pointer = engine->next_context_status_buffer;
2146                 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
2147                 if (read_pointer > write_pointer)
2148                         write_pointer += GEN8_CSB_ENTRIES;
2149                 seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
2150                            read_pointer, write_pointer);
2151
2152                 for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
2153                         status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
2154                         ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
2155
2156                         seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
2157                                    i, status, ctx_id);
2158                 }
2159
2160                 spin_lock_bh(&engine->execlist_lock);
2161                 list_for_each(cursor, &engine->execlist_queue)
2162                         count++;
2163                 head_req = list_first_entry_or_null(&engine->execlist_queue,
2164                                                     struct drm_i915_gem_request,
2165                                                     execlist_link);
2166                 spin_unlock_bh(&engine->execlist_lock);
2167
2168                 seq_printf(m, "\t%d requests in queue\n", count);
2169                 if (head_req) {
2170                         seq_printf(m, "\tHead request context: %u\n",
2171                                    head_req->ctx->hw_id);
2172                         seq_printf(m, "\tHead request tail: %u\n",
2173                                    head_req->tail);
2174                 }
2175
2176                 seq_putc(m, '\n');
2177         }
2178
2179         intel_runtime_pm_put(dev_priv);
2180         mutex_unlock(&dev->struct_mutex);
2181
2182         return 0;
2183 }
2184
2185 static const char *swizzle_string(unsigned swizzle)
2186 {
2187         switch (swizzle) {
2188         case I915_BIT_6_SWIZZLE_NONE:
2189                 return "none";
2190         case I915_BIT_6_SWIZZLE_9:
2191                 return "bit9";
2192         case I915_BIT_6_SWIZZLE_9_10:
2193                 return "bit9/bit10";
2194         case I915_BIT_6_SWIZZLE_9_11:
2195                 return "bit9/bit11";
2196         case I915_BIT_6_SWIZZLE_9_10_11:
2197                 return "bit9/bit10/bit11";
2198         case I915_BIT_6_SWIZZLE_9_17:
2199                 return "bit9/bit17";
2200         case I915_BIT_6_SWIZZLE_9_10_17:
2201                 return "bit9/bit10/bit17";
2202         case I915_BIT_6_SWIZZLE_UNKNOWN:
2203                 return "unknown";
2204         }
2205
2206         return "bug";
2207 }
2208
2209 static int i915_swizzle_info(struct seq_file *m, void *data)
2210 {
2211         struct drm_info_node *node = m->private;
2212         struct drm_device *dev = node->minor->dev;
2213         struct drm_i915_private *dev_priv = dev->dev_private;
2214         int ret;
2215
2216         ret = mutex_lock_interruptible(&dev->struct_mutex);
2217         if (ret)
2218                 return ret;
2219         intel_runtime_pm_get(dev_priv);
2220
2221         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2222                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2223         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2224                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2225
2226         if (IS_GEN3(dev) || IS_GEN4(dev)) {
2227                 seq_printf(m, "DDC = 0x%08x\n",
2228                            I915_READ(DCC));
2229                 seq_printf(m, "DDC2 = 0x%08x\n",
2230                            I915_READ(DCC2));
2231                 seq_printf(m, "C0DRB3 = 0x%04x\n",
2232                            I915_READ16(C0DRB3));
2233                 seq_printf(m, "C1DRB3 = 0x%04x\n",
2234                            I915_READ16(C1DRB3));
2235         } else if (INTEL_INFO(dev)->gen >= 6) {
2236                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2237                            I915_READ(MAD_DIMM_C0));
2238                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2239                            I915_READ(MAD_DIMM_C1));
2240                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2241                            I915_READ(MAD_DIMM_C2));
2242                 seq_printf(m, "TILECTL = 0x%08x\n",
2243                            I915_READ(TILECTL));
2244                 if (INTEL_INFO(dev)->gen >= 8)
2245                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2246                                    I915_READ(GAMTARBMODE));
2247                 else
2248                         seq_printf(m, "ARB_MODE = 0x%08x\n",
2249                                    I915_READ(ARB_MODE));
2250                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2251                            I915_READ(DISP_ARB_CTL));
2252         }
2253
2254         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2255                 seq_puts(m, "L-shaped memory detected\n");
2256
2257         intel_runtime_pm_put(dev_priv);
2258         mutex_unlock(&dev->struct_mutex);
2259
2260         return 0;
2261 }
2262
2263 static int per_file_ctx(int id, void *ptr, void *data)
2264 {
2265         struct intel_context *ctx = ptr;
2266         struct seq_file *m = data;
2267         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2268
2269         if (!ppgtt) {
2270                 seq_printf(m, "  no ppgtt for context %d\n",
2271                            ctx->user_handle);
2272                 return 0;
2273         }
2274
2275         if (i915_gem_context_is_default(ctx))
2276                 seq_puts(m, "  default context:\n");
2277         else
2278                 seq_printf(m, "  context %d:\n", ctx->user_handle);
2279         ppgtt->debug_dump(ppgtt, m);
2280
2281         return 0;
2282 }
2283
2284 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2285 {
2286         struct drm_i915_private *dev_priv = dev->dev_private;
2287         struct intel_engine_cs *engine;
2288         struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2289         int i;
2290
2291         if (!ppgtt)
2292                 return;
2293
2294         for_each_engine(engine, dev_priv) {
2295                 seq_printf(m, "%s\n", engine->name);
2296                 for (i = 0; i < 4; i++) {
2297                         u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2298                         pdp <<= 32;
2299                         pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2300                         seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2301                 }
2302         }
2303 }
2304
2305 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2306 {
2307         struct drm_i915_private *dev_priv = dev->dev_private;
2308         struct intel_engine_cs *engine;
2309
2310         if (INTEL_INFO(dev)->gen == 6)
2311                 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2312
2313         for_each_engine(engine, dev_priv) {
2314                 seq_printf(m, "%s\n", engine->name);
2315                 if (INTEL_INFO(dev)->gen == 7)
2316                         seq_printf(m, "GFX_MODE: 0x%08x\n",
2317                                    I915_READ(RING_MODE_GEN7(engine)));
2318                 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2319                            I915_READ(RING_PP_DIR_BASE(engine)));
2320                 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2321                            I915_READ(RING_PP_DIR_BASE_READ(engine)));
2322                 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2323                            I915_READ(RING_PP_DIR_DCLV(engine)));
2324         }
2325         if (dev_priv->mm.aliasing_ppgtt) {
2326                 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2327
2328                 seq_puts(m, "aliasing PPGTT:\n");
2329                 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2330
2331                 ppgtt->debug_dump(ppgtt, m);
2332         }
2333
2334         seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2335 }
2336
2337 static int i915_ppgtt_info(struct seq_file *m, void *data)
2338 {
2339         struct drm_info_node *node = m->private;
2340         struct drm_device *dev = node->minor->dev;
2341         struct drm_i915_private *dev_priv = dev->dev_private;
2342         struct drm_file *file;
2343
2344         int ret = mutex_lock_interruptible(&dev->struct_mutex);
2345         if (ret)
2346                 return ret;
2347         intel_runtime_pm_get(dev_priv);
2348
2349         if (INTEL_INFO(dev)->gen >= 8)
2350                 gen8_ppgtt_info(m, dev);
2351         else if (INTEL_INFO(dev)->gen >= 6)
2352                 gen6_ppgtt_info(m, dev);
2353
2354         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2355                 struct drm_i915_file_private *file_priv = file->driver_priv;
2356                 struct task_struct *task;
2357
2358                 task = get_pid_task(file->pid, PIDTYPE_PID);
2359                 if (!task) {
2360                         ret = -ESRCH;
2361                         goto out_put;
2362                 }
2363                 seq_printf(m, "\nproc: %s\n", task->comm);
2364                 put_task_struct(task);
2365                 idr_for_each(&file_priv->context_idr, per_file_ctx,
2366                              (void *)(unsigned long)m);
2367         }
2368
2369 out_put:
2370         intel_runtime_pm_put(dev_priv);
2371         mutex_unlock(&dev->struct_mutex);
2372
2373         return ret;
2374 }
2375
2376 static int count_irq_waiters(struct drm_i915_private *i915)
2377 {
2378         struct intel_engine_cs *engine;
2379         int count = 0;
2380
2381         for_each_engine(engine, i915)
2382                 count += engine->irq_refcount;
2383
2384         return count;
2385 }
2386
2387 static int i915_rps_boost_info(struct seq_file *m, void *data)
2388 {
2389         struct drm_info_node *node = m->private;
2390         struct drm_device *dev = node->minor->dev;
2391         struct drm_i915_private *dev_priv = dev->dev_private;
2392         struct drm_file *file;
2393
2394         seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
2395         seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
2396         seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2397         seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2398                    intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
2399                    intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2400                    intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
2401                    intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
2402                    intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
2403         spin_lock(&dev_priv->rps.client_lock);
2404         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2405                 struct drm_i915_file_private *file_priv = file->driver_priv;
2406                 struct task_struct *task;
2407
2408                 rcu_read_lock();
2409                 task = pid_task(file->pid, PIDTYPE_PID);
2410                 seq_printf(m, "%s [%d]: %d boosts%s\n",
2411                            task ? task->comm : "<unknown>",
2412                            task ? task->pid : -1,
2413                            file_priv->rps.boosts,
2414                            list_empty(&file_priv->rps.link) ? "" : ", active");
2415                 rcu_read_unlock();
2416         }
2417         seq_printf(m, "Semaphore boosts: %d%s\n",
2418                    dev_priv->rps.semaphores.boosts,
2419                    list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
2420         seq_printf(m, "MMIO flip boosts: %d%s\n",
2421                    dev_priv->rps.mmioflips.boosts,
2422                    list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
2423         seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
2424         spin_unlock(&dev_priv->rps.client_lock);
2425
2426         return 0;
2427 }
2428
2429 static int i915_llc(struct seq_file *m, void *data)
2430 {
2431         struct drm_info_node *node = m->private;
2432         struct drm_device *dev = node->minor->dev;
2433         struct drm_i915_private *dev_priv = dev->dev_private;
2434         const bool edram = INTEL_GEN(dev_priv) > 8;
2435
2436         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
2437         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2438                    intel_uncore_edram_size(dev_priv)/1024/1024);
2439
2440         return 0;
2441 }
2442
2443 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2444 {
2445         struct drm_info_node *node = m->private;
2446         struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
2447         struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
2448         u32 tmp, i;
2449
2450         if (!HAS_GUC_UCODE(dev_priv))
2451                 return 0;
2452
2453         seq_printf(m, "GuC firmware status:\n");
2454         seq_printf(m, "\tpath: %s\n",
2455                 guc_fw->guc_fw_path);
2456         seq_printf(m, "\tfetch: %s\n",
2457                 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
2458         seq_printf(m, "\tload: %s\n",
2459                 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
2460         seq_printf(m, "\tversion wanted: %d.%d\n",
2461                 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
2462         seq_printf(m, "\tversion found: %d.%d\n",
2463                 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
2464         seq_printf(m, "\theader: offset is %d; size = %d\n",
2465                 guc_fw->header_offset, guc_fw->header_size);
2466         seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2467                 guc_fw->ucode_offset, guc_fw->ucode_size);
2468         seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2469                 guc_fw->rsa_offset, guc_fw->rsa_size);
2470
2471         tmp = I915_READ(GUC_STATUS);
2472
2473         seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2474         seq_printf(m, "\tBootrom status = 0x%x\n",
2475                 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2476         seq_printf(m, "\tuKernel status = 0x%x\n",
2477                 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2478         seq_printf(m, "\tMIA Core status = 0x%x\n",
2479                 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2480         seq_puts(m, "\nScratch registers:\n");
2481         for (i = 0; i < 16; i++)
2482                 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2483
2484         return 0;
2485 }
2486
2487 static void i915_guc_client_info(struct seq_file *m,
2488                                  struct drm_i915_private *dev_priv,
2489                                  struct i915_guc_client *client)
2490 {
2491         struct intel_engine_cs *engine;
2492         uint64_t tot = 0;
2493
2494         seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
2495                 client->priority, client->ctx_index, client->proc_desc_offset);
2496         seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
2497                 client->doorbell_id, client->doorbell_offset, client->cookie);
2498         seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2499                 client->wq_size, client->wq_offset, client->wq_tail);
2500
2501         seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
2502         seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
2503         seq_printf(m, "\tLast submission result: %d\n", client->retcode);
2504
2505         for_each_engine(engine, dev_priv) {
2506                 seq_printf(m, "\tSubmissions: %llu %s\n",
2507                                 client->submissions[engine->guc_id],
2508                                 engine->name);
2509                 tot += client->submissions[engine->guc_id];
2510         }
2511         seq_printf(m, "\tTotal: %llu\n", tot);
2512 }
2513
2514 static int i915_guc_info(struct seq_file *m, void *data)
2515 {
2516         struct drm_info_node *node = m->private;
2517         struct drm_device *dev = node->minor->dev;
2518         struct drm_i915_private *dev_priv = dev->dev_private;
2519         struct intel_guc guc;
2520         struct i915_guc_client client = {};
2521         struct intel_engine_cs *engine;
2522         u64 total = 0;
2523
2524         if (!HAS_GUC_SCHED(dev_priv))
2525                 return 0;
2526
2527         if (mutex_lock_interruptible(&dev->struct_mutex))
2528                 return 0;
2529
2530         /* Take a local copy of the GuC data, so we can dump it at leisure */
2531         guc = dev_priv->guc;
2532         if (guc.execbuf_client)
2533                 client = *guc.execbuf_client;
2534
2535         mutex_unlock(&dev->struct_mutex);
2536
2537         seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
2538         seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
2539         seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
2540         seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
2541         seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
2542
2543         seq_printf(m, "\nGuC submissions:\n");
2544         for_each_engine(engine, dev_priv) {
2545                 seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
2546                         engine->name, guc.submissions[engine->guc_id],
2547                         guc.last_seqno[engine->guc_id]);
2548                 total += guc.submissions[engine->guc_id];
2549         }
2550         seq_printf(m, "\t%s: %llu\n", "Total", total);
2551
2552         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
2553         i915_guc_client_info(m, dev_priv, &client);
2554
2555         /* Add more as required ... */
2556
2557         return 0;
2558 }
2559
2560 static int i915_guc_log_dump(struct seq_file *m, void *data)
2561 {
2562         struct drm_info_node *node = m->private;
2563         struct drm_device *dev = node->minor->dev;
2564         struct drm_i915_private *dev_priv = dev->dev_private;
2565         struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
2566         u32 *log;
2567         int i = 0, pg;
2568
2569         if (!log_obj)
2570                 return 0;
2571
2572         for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
2573                 log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));
2574
2575                 for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
2576                         seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2577                                    *(log + i), *(log + i + 1),
2578                                    *(log + i + 2), *(log + i + 3));
2579
2580                 kunmap_atomic(log);
2581         }
2582
2583         seq_putc(m, '\n');
2584
2585         return 0;
2586 }
2587
2588 static int i915_edp_psr_status(struct seq_file *m, void *data)
2589 {
2590         struct drm_info_node *node = m->private;
2591         struct drm_device *dev = node->minor->dev;
2592         struct drm_i915_private *dev_priv = dev->dev_private;
2593         u32 psrperf = 0;
2594         u32 stat[3];
2595         enum pipe pipe;
2596         bool enabled = false;
2597
2598         if (!HAS_PSR(dev)) {
2599                 seq_puts(m, "PSR not supported\n");
2600                 return 0;
2601         }
2602
2603         intel_runtime_pm_get(dev_priv);
2604
2605         mutex_lock(&dev_priv->psr.lock);
2606         seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2607         seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2608         seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2609         seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2610         seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2611                    dev_priv->psr.busy_frontbuffer_bits);
2612         seq_printf(m, "Re-enable work scheduled: %s\n",
2613                    yesno(work_busy(&dev_priv->psr.work.work)));
2614
2615         if (HAS_DDI(dev))
2616                 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2617         else {
2618                 for_each_pipe(dev_priv, pipe) {
2619                         stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2620                                 VLV_EDP_PSR_CURR_STATE_MASK;
2621                         if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2622                             (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2623                                 enabled = true;
2624                 }
2625         }
2626
2627         seq_printf(m, "Main link in standby mode: %s\n",
2628                    yesno(dev_priv->psr.link_standby));
2629
2630         seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2631
2632         if (!HAS_DDI(dev))
2633                 for_each_pipe(dev_priv, pipe) {
2634                         if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2635                             (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2636                                 seq_printf(m, " pipe %c", pipe_name(pipe));
2637                 }
2638         seq_puts(m, "\n");
2639
2640         /*
2641          * VLV/CHV PSR has no kind of performance counter
2642          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2643          */
2644         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2645                 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2646                         EDP_PSR_PERF_CNT_MASK;
2647
2648                 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2649         }
2650         mutex_unlock(&dev_priv->psr.lock);
2651
2652         intel_runtime_pm_put(dev_priv);
2653         return 0;
2654 }
2655
2656 static int i915_sink_crc(struct seq_file *m, void *data)
2657 {
2658         struct drm_info_node *node = m->private;
2659         struct drm_device *dev = node->minor->dev;
2660         struct intel_encoder *encoder;
2661         struct intel_connector *connector;
2662         struct intel_dp *intel_dp = NULL;
2663         int ret;
2664         u8 crc[6];
2665
2666         drm_modeset_lock_all(dev);
2667         for_each_intel_connector(dev, connector) {
2668
2669                 if (connector->base.dpms != DRM_MODE_DPMS_ON)
2670                         continue;
2671
2672                 if (!connector->base.encoder)
2673                         continue;
2674
2675                 encoder = to_intel_encoder(connector->base.encoder);
2676                 if (encoder->type != INTEL_OUTPUT_EDP)
2677                         continue;
2678
2679                 intel_dp = enc_to_intel_dp(&encoder->base);
2680
2681                 ret = intel_dp_sink_crc(intel_dp, crc);
2682                 if (ret)
2683                         goto out;
2684
2685                 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2686                            crc[0], crc[1], crc[2],
2687                            crc[3], crc[4], crc[5]);
2688                 goto out;
2689         }
2690         ret = -ENODEV;
2691 out:
2692         drm_modeset_unlock_all(dev);
2693         return ret;
2694 }
2695
2696 static int i915_energy_uJ(struct seq_file *m, void *data)
2697 {
2698         struct drm_info_node *node = m->private;
2699         struct drm_device *dev = node->minor->dev;
2700         struct drm_i915_private *dev_priv = dev->dev_private;
2701         u64 power;
2702         u32 units;
2703
2704         if (INTEL_INFO(dev)->gen < 6)
2705                 return -ENODEV;
2706
2707         intel_runtime_pm_get(dev_priv);
2708
2709         rdmsrl(MSR_RAPL_POWER_UNIT, power);
2710         power = (power & 0x1f00) >> 8;
2711         units = 1000000 / (1 << power); /* convert to uJ */
2712         power = I915_READ(MCH_SECP_NRG_STTS);
2713         power *= units;
2714
2715         intel_runtime_pm_put(dev_priv);
2716
2717         seq_printf(m, "%llu", (long long unsigned)power);
2718
2719         return 0;
2720 }
2721
2722 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2723 {
2724         struct drm_info_node *node = m->private;
2725         struct drm_device *dev = node->minor->dev;
2726         struct drm_i915_private *dev_priv = dev->dev_private;
2727
2728         if (!HAS_RUNTIME_PM(dev_priv))
2729                 seq_puts(m, "Runtime power management not supported\n");
2730
2731         seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
2732         seq_printf(m, "IRQs disabled: %s\n",
2733                    yesno(!intel_irqs_enabled(dev_priv)));
2734 #ifdef CONFIG_PM
2735         seq_printf(m, "Usage count: %d\n",
2736                    atomic_read(&dev->dev->power.usage_count));
2737 #else
2738         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2739 #endif
2740         seq_printf(m, "PCI device power state: %s [%d]\n",
2741                    pci_power_name(dev_priv->dev->pdev->current_state),
2742                    dev_priv->dev->pdev->current_state);
2743
2744         return 0;
2745 }
2746
2747 static int i915_power_domain_info(struct seq_file *m, void *unused)
2748 {
2749         struct drm_info_node *node = m->private;
2750         struct drm_device *dev = node->minor->dev;
2751         struct drm_i915_private *dev_priv = dev->dev_private;
2752         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2753         int i;
2754
2755         mutex_lock(&power_domains->lock);
2756
2757         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2758         for (i = 0; i < power_domains->power_well_count; i++) {
2759                 struct i915_power_well *power_well;
2760                 enum intel_display_power_domain power_domain;
2761
2762                 power_well = &power_domains->power_wells[i];
2763                 seq_printf(m, "%-25s %d\n", power_well->name,
2764                            power_well->count);
2765
2766                 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2767                      power_domain++) {
2768                         if (!(BIT(power_domain) & power_well->domains))
2769                                 continue;
2770
2771                         seq_printf(m, "  %-23s %d\n",
2772                                  intel_display_power_domain_str(power_domain),
2773                                  power_domains->domain_use_count[power_domain]);
2774                 }
2775         }
2776
2777         mutex_unlock(&power_domains->lock);
2778
2779         return 0;
2780 }
2781
2782 static int i915_dmc_info(struct seq_file *m, void *unused)
2783 {
2784         struct drm_info_node *node = m->private;
2785         struct drm_device *dev = node->minor->dev;
2786         struct drm_i915_private *dev_priv = dev->dev_private;
2787         struct intel_csr *csr;
2788
2789         if (!HAS_CSR(dev)) {
2790                 seq_puts(m, "not supported\n");
2791                 return 0;
2792         }
2793
2794         csr = &dev_priv->csr;
2795
2796         intel_runtime_pm_get(dev_priv);
2797
2798         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2799         seq_printf(m, "path: %s\n", csr->fw_path);
2800
2801         if (!csr->dmc_payload)
2802                 goto out;
2803
2804         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2805                    CSR_VERSION_MINOR(csr->version));
2806
2807         if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
2808                 seq_printf(m, "DC3 -> DC5 count: %d\n",
2809                            I915_READ(SKL_CSR_DC3_DC5_COUNT));
2810                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2811                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2812         } else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
2813                 seq_printf(m, "DC3 -> DC5 count: %d\n",
2814                            I915_READ(BXT_CSR_DC3_DC5_COUNT));
2815         }
2816
2817 out:
2818         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2819         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2820         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2821
2822         intel_runtime_pm_put(dev_priv);
2823
2824         return 0;
2825 }
2826
2827 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2828                                  struct drm_display_mode *mode)
2829 {
2830         int i;
2831
2832         for (i = 0; i < tabs; i++)
2833                 seq_putc(m, '\t');
2834
2835         seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2836                    mode->base.id, mode->name,
2837                    mode->vrefresh, mode->clock,
2838                    mode->hdisplay, mode->hsync_start,
2839                    mode->hsync_end, mode->htotal,
2840                    mode->vdisplay, mode->vsync_start,
2841                    mode->vsync_end, mode->vtotal,
2842                    mode->type, mode->flags);
2843 }
2844
2845 static void intel_encoder_info(struct seq_file *m,
2846                                struct intel_crtc *intel_crtc,
2847                                struct intel_encoder *intel_encoder)
2848 {
2849         struct drm_info_node *node = m->private;
2850         struct drm_device *dev = node->minor->dev;
2851         struct drm_crtc *crtc = &intel_crtc->base;
2852         struct intel_connector *intel_connector;
2853         struct drm_encoder *encoder;
2854
2855         encoder = &intel_encoder->base;
2856         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2857                    encoder->base.id, encoder->name);
2858         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2859                 struct drm_connector *connector = &intel_connector->base;
2860                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2861                            connector->base.id,
2862                            connector->name,
2863                            drm_get_connector_status_name(connector->status));
2864                 if (connector->status == connector_status_connected) {
2865                         struct drm_display_mode *mode = &crtc->mode;
2866                         seq_printf(m, ", mode:\n");
2867                         intel_seq_print_mode(m, 2, mode);
2868                 } else {
2869                         seq_putc(m, '\n');
2870                 }
2871         }
2872 }
2873
2874 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2875 {
2876         struct drm_info_node *node = m->private;
2877         struct drm_device *dev = node->minor->dev;
2878         struct drm_crtc *crtc = &intel_crtc->base;
2879         struct intel_encoder *intel_encoder;
2880         struct drm_plane_state *plane_state = crtc->primary->state;
2881         struct drm_framebuffer *fb = plane_state->fb;
2882
2883         if (fb)
2884                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2885                            fb->base.id, plane_state->src_x >> 16,
2886                            plane_state->src_y >> 16, fb->width, fb->height);
2887         else
2888                 seq_puts(m, "\tprimary plane disabled\n");
2889         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2890                 intel_encoder_info(m, intel_crtc, intel_encoder);
2891 }
2892
2893 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2894 {
2895         struct drm_display_mode *mode = panel->fixed_mode;
2896
2897         seq_printf(m, "\tfixed mode:\n");
2898         intel_seq_print_mode(m, 2, mode);
2899 }
2900
2901 static void intel_dp_info(struct seq_file *m,
2902                           struct intel_connector *intel_connector)
2903 {
2904         struct intel_encoder *intel_encoder = intel_connector->encoder;
2905         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2906
2907         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2908         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2909         if (intel_encoder->type == INTEL_OUTPUT_EDP)
2910                 intel_panel_info(m, &intel_connector->panel);
2911 }
2912
2913 static void intel_hdmi_info(struct seq_file *m,
2914                             struct intel_connector *intel_connector)
2915 {
2916         struct intel_encoder *intel_encoder = intel_connector->encoder;
2917         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2918
2919         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2920 }
2921
2922 static void intel_lvds_info(struct seq_file *m,
2923                             struct intel_connector *intel_connector)
2924 {
2925         intel_panel_info(m, &intel_connector->panel);
2926 }
2927
2928 static void intel_connector_info(struct seq_file *m,
2929                                  struct drm_connector *connector)
2930 {
2931         struct intel_connector *intel_connector = to_intel_connector(connector);
2932         struct intel_encoder *intel_encoder = intel_connector->encoder;
2933         struct drm_display_mode *mode;
2934
2935         seq_printf(m, "connector %d: type %s, status: %s\n",
2936                    connector->base.id, connector->name,
2937                    drm_get_connector_status_name(connector->status));
2938         if (connector->status == connector_status_connected) {
2939                 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2940                 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2941                            connector->display_info.width_mm,
2942                            connector->display_info.height_mm);
2943                 seq_printf(m, "\tsubpixel order: %s\n",
2944                            drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2945                 seq_printf(m, "\tCEA rev: %d\n",
2946                            connector->display_info.cea_rev);
2947         }
2948         if (intel_encoder) {
2949                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2950                     intel_encoder->type == INTEL_OUTPUT_EDP)
2951                         intel_dp_info(m, intel_connector);
2952                 else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2953                         intel_hdmi_info(m, intel_connector);
2954                 else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2955                         intel_lvds_info(m, intel_connector);
2956         }
2957
2958         seq_printf(m, "\tmodes:\n");
2959         list_for_each_entry(mode, &connector->modes, head)
2960                 intel_seq_print_mode(m, 2, mode);
2961 }
2962
2963 static bool cursor_active(struct drm_device *dev, int pipe)
2964 {
2965         struct drm_i915_private *dev_priv = dev->dev_private;
2966         u32 state;
2967
2968         if (IS_845G(dev) || IS_I865G(dev))
2969                 state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
2970         else
2971                 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2972
2973         return state;
2974 }
2975
2976 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2977 {
2978         struct drm_i915_private *dev_priv = dev->dev_private;
2979         u32 pos;
2980
2981         pos = I915_READ(CURPOS(pipe));
2982
2983         *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2984         if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2985                 *x = -*x;
2986
2987         *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2988         if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2989                 *y = -*y;
2990
2991         return cursor_active(dev, pipe);
2992 }
2993
2994 static const char *plane_type(enum drm_plane_type type)
2995 {
2996         switch (type) {
2997         case DRM_PLANE_TYPE_OVERLAY:
2998                 return "OVL";
2999         case DRM_PLANE_TYPE_PRIMARY:
3000                 return "PRI";
3001         case DRM_PLANE_TYPE_CURSOR:
3002                 return "CUR";
3003         /*
3004          * Deliberately omitting default: to generate compiler warnings
3005          * when a new drm_plane_type gets added.
3006          */
3007         }
3008
3009         return "unknown";
3010 }
3011
3012 static const char *plane_rotation(unsigned int rotation)
3013 {
3014         static char buf[48];
3015         /*
3016          * According to doc only one DRM_ROTATE_ is allowed but this
3017          * will print them all to visualize if the values are misused
3018          */
3019         snprintf(buf, sizeof(buf),
3020                  "%s%s%s%s%s%s(0x%08x)",
3021                  (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "",
3022                  (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "",
3023                  (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "",
3024                  (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "",
3025                  (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "",
3026                  (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "",
3027                  rotation);
3028
3029         return buf;
3030 }
3031
3032 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3033 {
3034         struct drm_info_node *node = m->private;
3035         struct drm_device *dev = node->minor->dev;
3036         struct intel_plane *intel_plane;
3037
3038         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3039                 struct drm_plane_state *state;
3040                 struct drm_plane *plane = &intel_plane->base;
3041
3042                 if (!plane->state) {
3043                         seq_puts(m, "plane->state is NULL!\n");
3044                         continue;
3045                 }
3046
3047                 state = plane->state;
3048
3049                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3050                            plane->base.id,
3051                            plane_type(intel_plane->base.type),
3052                            state->crtc_x, state->crtc_y,
3053                            state->crtc_w, state->crtc_h,
3054                            (state->src_x >> 16),
3055                            ((state->src_x & 0xffff) * 15625) >> 10,
3056                            (state->src_y >> 16),
3057                            ((state->src_y & 0xffff) * 15625) >> 10,
3058                            (state->src_w >> 16),
3059                            ((state->src_w & 0xffff) * 15625) >> 10,
3060                            (state->src_h >> 16),
3061                            ((state->src_h & 0xffff) * 15625) >> 10,
3062                            state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
3063                            plane_rotation(state->rotation));
3064         }
3065 }
3066
3067 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3068 {
3069         struct intel_crtc_state *pipe_config;
3070         int num_scalers = intel_crtc->num_scalers;
3071         int i;
3072
3073         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3074
3075         /* Not all platformas have a scaler */
3076         if (num_scalers) {
3077                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3078                            num_scalers,
3079                            pipe_config->scaler_state.scaler_users,
3080                            pipe_config->scaler_state.scaler_id);
3081
3082                 for (i = 0; i < SKL_NUM_SCALERS; i++) {
3083                         struct intel_scaler *sc =
3084                                         &pipe_config->scaler_state.scalers[i];
3085
3086                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3087                                    i, yesno(sc->in_use), sc->mode);
3088                 }
3089                 seq_puts(m, "\n");
3090         } else {
3091                 seq_puts(m, "\tNo scalers available on this platform\n");
3092         }
3093 }
3094
3095 static int i915_display_info(struct seq_file *m, void *unused)
3096 {
3097         struct drm_info_node *node = m->private;
3098         struct drm_device *dev = node->minor->dev;
3099         struct drm_i915_private *dev_priv = dev->dev_private;
3100         struct intel_crtc *crtc;
3101         struct drm_connector *connector;
3102
3103         intel_runtime_pm_get(dev_priv);
3104         drm_modeset_lock_all(dev);
3105         seq_printf(m, "CRTC info\n");
3106         seq_printf(m, "---------\n");
3107         for_each_intel_crtc(dev, crtc) {
3108                 bool active;
3109                 struct intel_crtc_state *pipe_config;
3110                 int x, y;
3111
3112                 pipe_config = to_intel_crtc_state(crtc->base.state);
3113
3114                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3115                            crtc->base.base.id, pipe_name(crtc->pipe),
3116                            yesno(pipe_config->base.active),
3117                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3118                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3119
3120                 if (pipe_config->base.active) {
3121                         intel_crtc_info(m, crtc);
3122
3123                         active = cursor_position(dev, crtc->pipe, &x, &y);
3124                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
3125                                    yesno(crtc->cursor_base),
3126                                    x, y, crtc->base.cursor->state->crtc_w,
3127                                    crtc->base.cursor->state->crtc_h,
3128                                    crtc->cursor_addr, yesno(active));
3129                         intel_scaler_info(m, crtc);
3130                         intel_plane_info(m, crtc);
3131                 }
3132
3133                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3134                            yesno(!crtc->cpu_fifo_underrun_disabled),
3135                            yesno(!crtc->pch_fifo_underrun_disabled));
3136         }
3137
3138         seq_printf(m, "\n");
3139         seq_printf(m, "Connector info\n");
3140         seq_printf(m, "--------------\n");
3141         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3142                 intel_connector_info(m, connector);
3143         }
3144         drm_modeset_unlock_all(dev);
3145         intel_runtime_pm_put(dev_priv);
3146
3147         return 0;
3148 }
3149
3150 static int i915_semaphore_status(struct seq_file *m, void *unused)
3151 {
3152         struct drm_info_node *node = (struct drm_info_node *) m->private;
3153         struct drm_device *dev = node->minor->dev;
3154         struct drm_i915_private *dev_priv = dev->dev_private;
3155         struct intel_engine_cs *engine;
3156         int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
3157         enum intel_engine_id id;
3158         int j, ret;
3159
3160         if (!i915_semaphore_is_enabled(dev)) {
3161                 seq_puts(m, "Semaphores are disabled\n");
3162                 return 0;
3163         }
3164
3165         ret = mutex_lock_interruptible(&dev->struct_mutex);
3166         if (ret)
3167                 return ret;
3168         intel_runtime_pm_get(dev_priv);
3169
3170         if (IS_BROADWELL(dev)) {
3171                 struct page *page;
3172                 uint64_t *seqno;
3173
3174                 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
3175
3176                 seqno = (uint64_t *)kmap_atomic(page);
3177                 for_each_engine_id(engine, dev_priv, id) {
3178                         uint64_t offset;
3179
3180                         seq_printf(m, "%s\n", engine->name);
3181
3182                         seq_puts(m, "  Last signal:");
3183                         for (j = 0; j < num_rings; j++) {
3184                                 offset = id * I915_NUM_ENGINES + j;
3185                                 seq_printf(m, "0x%08llx (0x%02llx) ",
3186                                            seqno[offset], offset * 8);
3187                         }
3188                         seq_putc(m, '\n');
3189
3190                         seq_puts(m, "  Last wait:  ");
3191                         for (j = 0; j < num_rings; j++) {
3192                                 offset = id + (j * I915_NUM_ENGINES);
3193                                 seq_printf(m, "0x%08llx (0x%02llx) ",
3194                                            seqno[offset], offset * 8);
3195                         }
3196                         seq_putc(m, '\n');
3197
3198                 }
3199                 kunmap_atomic(seqno);
3200         } else {
3201                 seq_puts(m, "  Last signal:");
3202                 for_each_engine(engine, dev_priv)
3203                         for (j = 0; j < num_rings; j++)
3204                                 seq_printf(m, "0x%08x\n",
3205                                            I915_READ(engine->semaphore.mbox.signal[j]));
3206                 seq_putc(m, '\n');
3207         }
3208
3209         seq_puts(m, "\nSync seqno:\n");
3210         for_each_engine(engine, dev_priv) {
3211                 for (j = 0; j < num_rings; j++)
3212                         seq_printf(m, "  0x%08x ",
3213                                    engine->semaphore.sync_seqno[j]);
3214                 seq_putc(m, '\n');
3215         }
3216         seq_putc(m, '\n');
3217
3218         intel_runtime_pm_put(dev_priv);
3219         mutex_unlock(&dev->struct_mutex);
3220         return 0;
3221 }
3222
3223 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3224 {
3225         struct drm_info_node *node = (struct drm_info_node *) m->private;
3226         struct drm_device *dev = node->minor->dev;
3227         struct drm_i915_private *dev_priv = dev->dev_private;
3228         int i;
3229
3230         drm_modeset_lock_all(dev);
3231         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3232                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3233
3234                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3235                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3236                            pll->config.crtc_mask, pll->active_mask, yesno(pll->on));
3237                 seq_printf(m, " tracked hardware state:\n");
3238                 seq_printf(m, " dpll:    0x%08x\n", pll->config.hw_state.dpll);
3239                 seq_printf(m, " dpll_md: 0x%08x\n",
3240                            pll->config.hw_state.dpll_md);
3241                 seq_printf(m, " fp0:     0x%08x\n", pll->config.hw_state.fp0);
3242                 seq_printf(m, " fp1:     0x%08x\n", pll->config.hw_state.fp1);
3243                 seq_printf(m, " wrpll:   0x%08x\n", pll->config.hw_state.wrpll);
3244         }
3245         drm_modeset_unlock_all(dev);
3246
3247         return 0;
3248 }
3249
3250 static int i915_wa_registers(struct seq_file *m, void *unused)
3251 {
3252         int i;
3253         int ret;
3254         struct intel_engine_cs *engine;
3255         struct drm_info_node *node = (struct drm_info_node *) m->private;
3256         struct drm_device *dev = node->minor->dev;
3257         struct drm_i915_private *dev_priv = dev->dev_private;
3258         struct i915_workarounds *workarounds = &dev_priv->workarounds;
3259         enum intel_engine_id id;
3260
3261         ret = mutex_lock_interruptible(&dev->struct_mutex);
3262         if (ret)
3263                 return ret;
3264
3265         intel_runtime_pm_get(dev_priv);
3266
3267         seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3268         for_each_engine_id(engine, dev_priv, id)
3269                 seq_printf(m, "HW whitelist count for %s: %d\n",
3270                            engine->name, workarounds->hw_whitelist_count[id]);
3271         for (i = 0; i < workarounds->count; ++i) {
3272                 i915_reg_t addr;
3273                 u32 mask, value, read;
3274                 bool ok;
3275
3276                 addr = workarounds->reg[i].addr;
3277                 mask = workarounds->reg[i].mask;
3278                 value = workarounds->reg[i].value;
3279                 read = I915_READ(addr);
3280                 ok = (value & mask) == (read & mask);
3281                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3282                            i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3283         }
3284
3285         intel_runtime_pm_put(dev_priv);
3286         mutex_unlock(&dev->struct_mutex);
3287
3288         return 0;
3289 }
3290
3291 static int i915_ddb_info(struct seq_file *m, void *unused)
3292 {
3293         struct drm_info_node *node = m->private;
3294         struct drm_device *dev = node->minor->dev;
3295         struct drm_i915_private *dev_priv = dev->dev_private;
3296         struct skl_ddb_allocation *ddb;
3297         struct skl_ddb_entry *entry;
3298         enum pipe pipe;
3299         int plane;
3300
3301         if (INTEL_INFO(dev)->gen < 9)
3302                 return 0;
3303
3304         drm_modeset_lock_all(dev);
3305
3306         ddb = &dev_priv->wm.skl_hw.ddb;
3307
3308         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3309
3310         for_each_pipe(dev_priv, pipe) {
3311                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3312
3313                 for_each_plane(dev_priv, pipe, plane) {
3314                         entry = &ddb->plane[pipe][plane];
3315                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3316                                    entry->start, entry->end,
3317                                    skl_ddb_entry_size(entry));
3318                 }
3319
3320                 entry = &ddb->plane[pipe][PLANE_CURSOR];
3321                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3322                            entry->end, skl_ddb_entry_size(entry));
3323         }
3324
3325         drm_modeset_unlock_all(dev);
3326
3327         return 0;
3328 }
3329
3330 static void drrs_status_per_crtc(struct seq_file *m,
3331                 struct drm_device *dev, struct intel_crtc *intel_crtc)
3332 {
3333         struct intel_encoder *intel_encoder;
3334         struct drm_i915_private *dev_priv = dev->dev_private;
3335         struct i915_drrs *drrs = &dev_priv->drrs;
3336         int vrefresh = 0;
3337
3338         for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
3339                 /* Encoder connected on this CRTC */
3340                 switch (intel_encoder->type) {
3341                 case INTEL_OUTPUT_EDP:
3342                         seq_puts(m, "eDP:\n");
3343                         break;
3344                 case INTEL_OUTPUT_DSI:
3345                         seq_puts(m, "DSI:\n");
3346                         break;
3347                 case INTEL_OUTPUT_HDMI:
3348                         seq_puts(m, "HDMI:\n");
3349                         break;
3350                 case INTEL_OUTPUT_DISPLAYPORT:
3351                         seq_puts(m, "DP:\n");
3352                         break;
3353                 default:
3354                         seq_printf(m, "Other encoder (id=%d).\n",
3355                                                 intel_encoder->type);
3356                         return;
3357                 }
3358         }
3359
3360         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3361                 seq_puts(m, "\tVBT: DRRS_type: Static");
3362         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3363                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3364         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3365                 seq_puts(m, "\tVBT: DRRS_type: None");
3366         else
3367                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3368
3369         seq_puts(m, "\n\n");
3370
3371         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3372                 struct intel_panel *panel;
3373
3374                 mutex_lock(&drrs->mutex);
3375                 /* DRRS Supported */
3376                 seq_puts(m, "\tDRRS Supported: Yes\n");
3377
3378                 /* disable_drrs() will make drrs->dp NULL */
3379                 if (!drrs->dp) {
3380                         seq_puts(m, "Idleness DRRS: Disabled");
3381                         mutex_unlock(&drrs->mutex);
3382                         return;
3383                 }
3384
3385                 panel = &drrs->dp->attached_connector->panel;
3386                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3387                                         drrs->busy_frontbuffer_bits);
3388
3389                 seq_puts(m, "\n\t\t");
3390                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3391                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3392                         vrefresh = panel->fixed_mode->vrefresh;
3393                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3394                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3395                         vrefresh = panel->downclock_mode->vrefresh;
3396                 } else {
3397                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3398                                                 drrs->refresh_rate_type);
3399                         mutex_unlock(&drrs->mutex);
3400                         return;
3401                 }
3402                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3403
3404                 seq_puts(m, "\n\t\t");
3405                 mutex_unlock(&drrs->mutex);
3406         } else {
3407                 /* DRRS not supported. Print the VBT parameter*/
3408                 seq_puts(m, "\tDRRS Supported : No");
3409         }
3410         seq_puts(m, "\n");
3411 }
3412
3413 static int i915_drrs_status(struct seq_file *m, void *unused)
3414 {
3415         struct drm_info_node *node = m->private;
3416         struct drm_device *dev = node->minor->dev;
3417         struct intel_crtc *intel_crtc;
3418         int active_crtc_cnt = 0;
3419
3420         for_each_intel_crtc(dev, intel_crtc) {
3421                 drm_modeset_lock(&intel_crtc->base.mutex, NULL);
3422
3423                 if (intel_crtc->base.state->active) {
3424                         active_crtc_cnt++;
3425                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3426
3427                         drrs_status_per_crtc(m, dev, intel_crtc);
3428                 }
3429
3430                 drm_modeset_unlock(&intel_crtc->base.mutex);
3431         }
3432
3433         if (!active_crtc_cnt)
3434                 seq_puts(m, "No active crtc found\n");
3435
3436         return 0;
3437 }
3438
3439 struct pipe_crc_info {
3440         const char *name;
3441         struct drm_device *dev;
3442         enum pipe pipe;
3443 };
3444
3445 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3446 {
3447         struct drm_info_node *node = (struct drm_info_node *) m->private;
3448         struct drm_device *dev = node->minor->dev;
3449         struct drm_encoder *encoder;
3450         struct intel_encoder *intel_encoder;
3451         struct intel_digital_port *intel_dig_port;
3452         drm_modeset_lock_all(dev);
3453         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3454                 intel_encoder = to_intel_encoder(encoder);
3455                 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
3456                         continue;
3457                 intel_dig_port = enc_to_dig_port(encoder);
3458                 if (!intel_dig_port->dp.can_mst)
3459                         continue;
3460
3461                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3462         }
3463         drm_modeset_unlock_all(dev);
3464         return 0;
3465 }
3466
3467 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
3468 {
3469         struct pipe_crc_info *info = inode->i_private;
3470         struct drm_i915_private *dev_priv = info->dev->dev_private;
3471         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3472
3473         if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
3474                 return -ENODEV;
3475
3476         spin_lock_irq(&pipe_crc->lock);
3477
3478         if (pipe_crc->opened) {
3479                 spin_unlock_irq(&pipe_crc->lock);
3480                 return -EBUSY; /* already open */
3481         }
3482
3483         pipe_crc->opened = true;
3484         filep->private_data = inode->i_private;
3485
3486         spin_unlock_irq(&pipe_crc->lock);
3487
3488         return 0;
3489 }
3490
3491 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
3492 {
3493         struct pipe_crc_info *info = inode->i_private;
3494         struct drm_i915_private *dev_priv = info->dev->dev_private;
3495         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3496
3497         spin_lock_irq(&pipe_crc->lock);
3498         pipe_crc->opened = false;
3499         spin_unlock_irq(&pipe_crc->lock);
3500
3501         return 0;
3502 }
3503
3504 /* (6 fields, 8 chars each, space separated (5) + '\n') */
3505 #define PIPE_CRC_LINE_LEN       (6 * 8 + 5 + 1)
3506 /* account for \'0' */
3507 #define PIPE_CRC_BUFFER_LEN     (PIPE_CRC_LINE_LEN + 1)
3508
3509 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
3510 {
3511         assert_spin_locked(&pipe_crc->lock);
3512         return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3513                         INTEL_PIPE_CRC_ENTRIES_NR);
3514 }
3515
3516 static ssize_t
3517 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
3518                    loff_t *pos)
3519 {
3520         struct pipe_crc_info *info = filep->private_data;
3521         struct drm_device *dev = info->dev;
3522         struct drm_i915_private *dev_priv = dev->dev_private;
3523         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3524         char buf[PIPE_CRC_BUFFER_LEN];
3525         int n_entries;
3526         ssize_t bytes_read;
3527
3528         /*
3529          * Don't allow user space to provide buffers not big enough to hold
3530          * a line of data.
3531          */
3532         if (count < PIPE_CRC_LINE_LEN)
3533                 return -EINVAL;
3534
3535         if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
3536                 return 0;
3537
3538         /* nothing to read */
3539         spin_lock_irq(&pipe_crc->lock);
3540         while (pipe_crc_data_count(pipe_crc) == 0) {
3541                 int ret;
3542
3543                 if (filep->f_flags & O_NONBLOCK) {
3544                         spin_unlock_irq(&pipe_crc->lock);
3545                         return -EAGAIN;
3546                 }
3547
3548                 ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
3549                                 pipe_crc_data_count(pipe_crc), pipe_crc->lock);
3550                 if (ret) {
3551                         spin_unlock_irq(&pipe_crc->lock);
3552                         return ret;
3553                 }
3554         }
3555
3556         /* We now have one or more entries to read */
3557         n_entries = count / PIPE_CRC_LINE_LEN;
3558
3559         bytes_read = 0;
3560         while (n_entries > 0) {
3561                 struct intel_pipe_crc_entry *entry =
3562                         &pipe_crc->entries[pipe_crc->tail];
3563                 int ret;
3564
3565                 if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3566                              INTEL_PIPE_CRC_ENTRIES_NR) < 1)
3567                         break;
3568
3569                 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
3570                 pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
3571
3572                 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
3573                                        "%8u %8x %8x %8x %8x %8x\n",
3574                                        entry->frame, entry->crc[0],
3575                                        entry->crc[1], entry->crc[2],
3576                                        entry->crc[3], entry->crc[4]);
3577
3578                 spin_unlock_irq(&pipe_crc->lock);
3579
3580                 ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
3581                 if (ret == PIPE_CRC_LINE_LEN)
3582                         return -EFAULT;
3583
3584                 user_buf += PIPE_CRC_LINE_LEN;
3585                 n_entries--;
3586
3587                 spin_lock_irq(&pipe_crc->lock);
3588         }
3589
3590         spin_unlock_irq(&pipe_crc->lock);
3591
3592         return bytes_read;
3593 }
3594
3595 static const struct file_operations i915_pipe_crc_fops = {
3596         .owner = THIS_MODULE,
3597         .open = i915_pipe_crc_open,
3598         .read = i915_pipe_crc_read,
3599         .release = i915_pipe_crc_release,
3600 };
3601
3602 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
3603         {
3604                 .name = "i915_pipe_A_crc",
3605                 .pipe = PIPE_A,
3606         },
3607         {
3608                 .name = "i915_pipe_B_crc",
3609                 .pipe = PIPE_B,
3610         },
3611         {
3612                 .name = "i915_pipe_C_crc",
3613                 .pipe = PIPE_C,
3614         },
3615 };
3616
3617 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
3618                                 enum pipe pipe)
3619 {
3620         struct drm_device *dev = minor->dev;
3621         struct dentry *ent;
3622         struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
3623
3624         info->dev = dev;
3625         ent = debugfs_create_file(info->name, S_IRUGO, root, info,
3626                                   &i915_pipe_crc_fops);
3627         if (!ent)
3628                 return -ENOMEM;
3629
3630         return drm_add_fake_info_node(minor, ent, info);
3631 }
3632
3633 static const char * const pipe_crc_sources[] = {
3634         "none",
3635         "plane1",
3636         "plane2",
3637         "pf",
3638         "pipe",
3639         "TV",
3640         "DP-B",
3641         "DP-C",
3642         "DP-D",
3643         "auto",
3644 };
3645
3646 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
3647 {
3648         BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
3649         return pipe_crc_sources[source];
3650 }
3651
3652 static int display_crc_ctl_show(struct seq_file *m, void *data)
3653 {
3654         struct drm_device *dev = m->private;
3655         struct drm_i915_private *dev_priv = dev->dev_private;
3656         int i;
3657
3658         for (i = 0; i < I915_MAX_PIPES; i++)
3659                 seq_printf(m, "%c %s\n", pipe_name(i),
3660                            pipe_crc_source_name(dev_priv->pipe_crc[i].source));
3661
3662         return 0;
3663 }
3664
3665 static int display_crc_ctl_open(struct inode *inode, struct file *file)
3666 {
3667         struct drm_device *dev = inode->i_private;
3668
3669         return single_open(file, display_crc_ctl_show, dev);
3670 }
3671
3672 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3673                                  uint32_t *val)
3674 {
3675         if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3676                 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
3677
3678         switch (*source) {
3679         case INTEL_PIPE_CRC_SOURCE_PIPE:
3680                 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
3681                 break;
3682         case INTEL_PIPE_CRC_SOURCE_NONE:
3683                 *val = 0;
3684                 break;
3685         default:
3686                 return -EINVAL;
3687         }
3688
3689         return 0;
3690 }
3691
3692 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
3693                                      enum intel_pipe_crc_source *source)
3694 {
3695         struct intel_encoder *encoder;
3696         struct intel_crtc *crtc;
3697         struct intel_digital_port *dig_port;
3698         int ret = 0;
3699
3700         *source = INTEL_PIPE_CRC_SOURCE_PIPE;
3701
3702         drm_modeset_lock_all(dev);
3703         for_each_intel_encoder(dev, encoder) {
3704                 if (!encoder->base.crtc)
3705                         continue;
3706
3707                 crtc = to_intel_crtc(encoder->base.crtc);
3708
3709                 if (crtc->pipe != pipe)
3710                         continue;
3711
3712                 switch (encoder->type) {
3713                 case INTEL_OUTPUT_TVOUT:
3714                         *source = INTEL_PIPE_CRC_SOURCE_TV;
3715                         break;
3716                 case INTEL_OUTPUT_DISPLAYPORT:
3717                 case INTEL_OUTPUT_EDP:
3718                         dig_port = enc_to_dig_port(&encoder->base);
3719                         switch (dig_port->port) {
3720                         case PORT_B:
3721                                 *source = INTEL_PIPE_CRC_SOURCE_DP_B;
3722                                 break;
3723                         case PORT_C:
3724                                 *source = INTEL_PIPE_CRC_SOURCE_DP_C;
3725                                 break;
3726                         case PORT_D:
3727                                 *source = INTEL_PIPE_CRC_SOURCE_DP_D;
3728                                 break;
3729                         default:
3730                                 WARN(1, "nonexisting DP port %c\n",
3731                                      port_name(dig_port->port));
3732                                 break;
3733                         }
3734                         break;
3735                 default:
3736                         break;
3737                 }
3738         }
3739         drm_modeset_unlock_all(dev);
3740
3741         return ret;
3742 }
3743
3744 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
3745                                 enum pipe pipe,
3746                                 enum intel_pipe_crc_source *source,
3747                                 uint32_t *val)
3748 {
3749         struct drm_i915_private *dev_priv = dev->dev_private;
3750         bool need_stable_symbols = false;
3751
3752         if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3753                 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3754                 if (ret)
3755                         return ret;
3756         }
3757
3758         switch (*source) {
3759         case INTEL_PIPE_CRC_SOURCE_PIPE:
3760                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
3761                 break;
3762         case INTEL_PIPE_CRC_SOURCE_DP_B:
3763                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
3764                 need_stable_symbols = true;
3765                 break;
3766         case INTEL_PIPE_CRC_SOURCE_DP_C:
3767                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
3768                 need_stable_symbols = true;
3769                 break;
3770         case INTEL_PIPE_CRC_SOURCE_DP_D:
3771                 if (!IS_CHERRYVIEW(dev))
3772                         return -EINVAL;
3773                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
3774                 need_stable_symbols = true;
3775                 break;
3776         case INTEL_PIPE_CRC_SOURCE_NONE:
3777                 *val = 0;
3778                 break;
3779         default:
3780                 return -EINVAL;
3781         }
3782
3783         /*
3784          * When the pipe CRC tap point is after the transcoders we need
3785          * to tweak symbol-level features to produce a deterministic series of
3786          * symbols for a given frame. We need to reset those features only once
3787          * a frame (instead of every nth symbol):
3788          *   - DC-balance: used to ensure a better clock recovery from the data
3789          *     link (SDVO)
3790          *   - DisplayPort scrambling: used for EMI reduction
3791          */
3792         if (need_stable_symbols) {
3793                 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3794
3795                 tmp |= DC_BALANCE_RESET_VLV;
3796                 switch (pipe) {
3797                 case PIPE_A:
3798                         tmp |= PIPE_A_SCRAMBLE_RESET;
3799                         break;
3800                 case PIPE_B:
3801                         tmp |= PIPE_B_SCRAMBLE_RESET;
3802                         break;
3803                 case PIPE_C:
3804                         tmp |= PIPE_C_SCRAMBLE_RESET;
3805                         break;
3806                 default:
3807                         return -EINVAL;
3808                 }
3809                 I915_WRITE(PORT_DFT2_G4X, tmp);
3810         }
3811
3812         return 0;
3813 }
3814
3815 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3816                                  enum pipe pipe,
3817                                  enum intel_pipe_crc_source *source,
3818                                  uint32_t *val)
3819 {
3820         struct drm_i915_private *dev_priv = dev->dev_private;
3821         bool need_stable_symbols = false;
3822
3823         if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3824                 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3825                 if (ret)
3826                         return ret;
3827         }
3828
3829         switch (*source) {
3830         case INTEL_PIPE_CRC_SOURCE_PIPE:
3831                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
3832                 break;
3833         case INTEL_PIPE_CRC_SOURCE_TV:
3834                 if (!SUPPORTS_TV(dev))
3835                         return -EINVAL;
3836                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
3837                 break;
3838         case INTEL_PIPE_CRC_SOURCE_DP_B:
3839                 if (!IS_G4X(dev))
3840                         return -EINVAL;
3841                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
3842                 need_stable_symbols = true;
3843                 break;
3844         case INTEL_PIPE_CRC_SOURCE_DP_C:
3845                 if (!IS_G4X(dev))
3846                         return -EINVAL;
3847                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
3848                 need_stable_symbols = true;
3849                 break;
3850         case INTEL_PIPE_CRC_SOURCE_DP_D:
3851                 if (!IS_G4X(dev))
3852                         return -EINVAL;
3853                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
3854                 need_stable_symbols = true;
3855                 break;
3856         case INTEL_PIPE_CRC_SOURCE_NONE:
3857                 *val = 0;
3858                 break;
3859         default:
3860                 return -EINVAL;
3861         }
3862
3863         /*
3864          * When the pipe CRC tap point is after the transcoders we need
3865          * to tweak symbol-level features to produce a deterministic series of
3866          * symbols for a given frame. We need to reset those features only once
3867          * a frame (instead of every nth symbol):
3868          *   - DC-balance: used to ensure a better clock recovery from the data
3869          *     link (SDVO)
3870          *   - DisplayPort scrambling: used for EMI reduction
3871          */
3872         if (need_stable_symbols) {
3873                 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3874
3875                 WARN_ON(!IS_G4X(dev));
3876
3877                 I915_WRITE(PORT_DFT_I9XX,
3878                            I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
3879
3880                 if (pipe == PIPE_A)
3881                         tmp |= PIPE_A_SCRAMBLE_RESET;
3882                 else
3883                         tmp |= PIPE_B_SCRAMBLE_RESET;
3884
3885                 I915_WRITE(PORT_DFT2_G4X, tmp);
3886         }
3887
3888         return 0;
3889 }
3890
3891 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3892                                          enum pipe pipe)
3893 {
3894         struct drm_i915_private *dev_priv = dev->dev_private;
3895         uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3896
3897         switch (pipe) {
3898         case PIPE_A:
3899                 tmp &= ~PIPE_A_SCRAMBLE_RESET;
3900                 break;
3901         case PIPE_B:
3902                 tmp &= ~PIPE_B_SCRAMBLE_RESET;
3903                 break;
3904         case PIPE_C:
3905                 tmp &= ~PIPE_C_SCRAMBLE_RESET;
3906                 break;
3907         default:
3908                 return;
3909         }
3910         if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
3911                 tmp &= ~DC_BALANCE_RESET_VLV;
3912         I915_WRITE(PORT_DFT2_G4X, tmp);
3913
3914 }
3915
3916 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
3917                                          enum pipe pipe)
3918 {
3919         struct drm_i915_private *dev_priv = dev->dev_private;
3920         uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3921
3922         if (pipe == PIPE_A)
3923                 tmp &= ~PIPE_A_SCRAMBLE_RESET;
3924         else
3925                 tmp &= ~PIPE_B_SCRAMBLE_RESET;
3926         I915_WRITE(PORT_DFT2_G4X, tmp);
3927
3928         if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
3929                 I915_WRITE(PORT_DFT_I9XX,
3930                            I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
3931         }
3932 }
3933
3934 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3935                                 uint32_t *val)
3936 {
3937         if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3938                 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
3939
3940         switch (*source) {
3941         case INTEL_PIPE_CRC_SOURCE_PLANE1:
3942                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
3943                 break;
3944         case INTEL_PIPE_CRC_SOURCE_PLANE2:
3945                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
3946                 break;
3947         case INTEL_PIPE_CRC_SOURCE_PIPE:
3948                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
3949                 break;
3950         case INTEL_PIPE_CRC_SOURCE_NONE:
3951                 *val = 0;
3952                 break;
3953         default:
3954                 return -EINVAL;
3955         }
3956
3957         return 0;
3958 }
3959
3960 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
3961 {
3962         struct drm_i915_private *dev_priv = dev->dev_private;
3963         struct intel_crtc *crtc =
3964                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3965         struct intel_crtc_state *pipe_config;
3966         struct drm_atomic_state *state;
3967         int ret = 0;
3968
3969         drm_modeset_lock_all(dev);
3970         state = drm_atomic_state_alloc(dev);
3971         if (!state) {
3972                 ret = -ENOMEM;
3973                 goto out;
3974         }
3975
3976         state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
3977         pipe_config = intel_atomic_get_crtc_state(state, crtc);
3978         if (IS_ERR(pipe_config)) {
3979                 ret = PTR_ERR(pipe_config);
3980                 goto out;
3981         }
3982
3983         pipe_config->pch_pfit.force_thru = enable;
3984         if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
3985             pipe_config->pch_pfit.enabled != enable)
3986                 pipe_config->base.connectors_changed = true;
3987
3988         ret = drm_atomic_commit(state);
3989 out:
3990         drm_modeset_unlock_all(dev);
3991         WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
3992         if (ret)
3993                 drm_atomic_state_free(state);
3994 }
3995
3996 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
3997                                 enum pipe pipe,
3998                                 enum intel_pipe_crc_source *source,
3999                                 uint32_t *val)
4000 {
4001         if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
4002                 *source = INTEL_PIPE_CRC_SOURCE_PF;
4003
4004         switch (*source) {
4005         case INTEL_PIPE_CRC_SOURCE_PLANE1:
4006                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
4007                 break;
4008         case INTEL_PIPE_CRC_SOURCE_PLANE2:
4009                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
4010                 break;
4011         case INTEL_PIPE_CRC_SOURCE_PF:
4012                 if (IS_HASWELL(dev) && pipe == PIPE_A)
4013                         hsw_trans_edp_pipe_A_crc_wa(dev, true);
4014
4015                 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
4016                 break;
4017         case INTEL_PIPE_CRC_SOURCE_NONE:
4018                 *val = 0;
4019                 break;
4020         default:
4021                 return -EINVAL;
4022         }
4023
4024         return 0;
4025 }
4026
4027 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4028                                enum intel_pipe_crc_source source)
4029 {
4030         struct drm_i915_private *dev_priv = dev->dev_private;
4031         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
4032         struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
4033                                                                         pipe));
4034         enum intel_display_power_domain power_domain;
4035         u32 val = 0; /* shut up gcc */
4036         int ret;
4037
4038         if (pipe_crc->source == source)
4039                 return 0;
4040
4041         /* forbid changing the source without going back to 'none' */
4042         if (pipe_crc->source && source)
4043                 return -EINVAL;
4044
4045         power_domain = POWER_DOMAIN_PIPE(pipe);
4046         if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
4047                 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
4048                 return -EIO;
4049         }
4050
4051         if (IS_GEN2(dev))
4052                 ret = i8xx_pipe_crc_ctl_reg(&source, &val);
4053         else if (INTEL_INFO(dev)->gen < 5)
4054                 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4055         else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4056                 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4057         else if (IS_GEN5(dev) || IS_GEN6(dev))
4058                 ret = ilk_pipe_crc_ctl_reg(&source, &val);
4059         else
4060                 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4061
4062         if (ret != 0)
4063                 goto out;
4064
4065         /* none -> real source transition */
4066         if (source) {
4067                 struct intel_pipe_crc_entry *entries;
4068
4069                 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
4070                                  pipe_name(pipe), pipe_crc_source_name(source));
4071
4072                 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
4073                                   sizeof(pipe_crc->entries[0]),
4074                                   GFP_KERNEL);
4075                 if (!entries) {
4076                         ret = -ENOMEM;
4077                         goto out;
4078                 }
4079
4080                 /*
4081                  * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4082                  * enabled and disabled dynamically based on package C states,
4083                  * user space can't make reliable use of the CRCs, so let's just
4084                  * completely disable it.
4085                  */
4086                 hsw_disable_ips(crtc);
4087
4088                 spin_lock_irq(&pipe_crc->lock);
4089                 kfree(pipe_crc->entries);
4090                 pipe_crc->entries = entries;
4091                 pipe_crc->head = 0;
4092                 pipe_crc->tail = 0;
4093                 spin_unlock_irq(&pipe_crc->lock);
4094         }
4095
4096         pipe_crc->source = source;
4097
4098         I915_WRITE(PIPE_CRC_CTL(pipe), val);
4099         POSTING_READ(PIPE_CRC_CTL(pipe));
4100
4101         /* real source -> none transition */
4102         if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
4103                 struct intel_pipe_crc_entry *entries;
4104                 struct intel_crtc *crtc =
4105                         to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
4106
4107                 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
4108                                  pipe_name(pipe));
4109
4110                 drm_modeset_lock(&crtc->base.mutex, NULL);
4111                 if (crtc->base.state->active)
4112                         intel_wait_for_vblank(dev, pipe);
4113                 drm_modeset_unlock(&crtc->base.mutex);
4114
4115                 spin_lock_irq(&pipe_crc->lock);
4116                 entries = pipe_crc->entries;
4117                 pipe_crc->entries = NULL;
4118                 pipe_crc->head = 0;
4119                 pipe_crc->tail = 0;
4120                 spin_unlock_irq(&pipe_crc->lock);
4121
4122                 kfree(entries);
4123
4124                 if (IS_G4X(dev))
4125                         g4x_undo_pipe_scramble_reset(dev, pipe);
4126                 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4127                         vlv_undo_pipe_scramble_reset(dev, pipe);
4128                 else if (IS_HASWELL(dev) && pipe == PIPE_A)
4129                         hsw_trans_edp_pipe_A_crc_wa(dev, false);
4130
4131                 hsw_enable_ips(crtc);
4132         }
4133
4134         ret = 0;
4135
4136 out:
4137         intel_display_power_put(dev_priv, power_domain);
4138
4139         return ret;
4140 }
4141
4142 /*
4143  * Parse pipe CRC command strings:
4144  *   command: wsp* object wsp+ name wsp+ source wsp*
4145  *   object: 'pipe'
4146  *   name: (A | B | C)
4147  *   source: (none | plane1 | plane2 | pf)
4148  *   wsp: (#0x20 | #0x9 | #0xA)+
4149  *
4150  * eg.:
4151  *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
4152  *  "pipe A none"    ->  Stop CRC
4153  */
4154 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
4155 {
4156         int n_words = 0;
4157
4158         while (*buf) {
4159                 char *end;
4160
4161                 /* skip leading white space */
4162                 buf = skip_spaces(buf);
4163                 if (!*buf)
4164                         break;  /* end of buffer */
4165
4166                 /* find end of word */
4167                 for (end = buf; *end && !isspace(*end); end++)
4168                         ;
4169
4170                 if (n_words == max_words) {
4171                         DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
4172                                          max_words);
4173                         return -EINVAL; /* ran out of words[] before bytes */
4174                 }
4175
4176                 if (*end)
4177                         *end++ = '\0';
4178                 words[n_words++] = buf;
4179                 buf = end;
4180         }
4181
4182         return n_words;
4183 }
4184
4185 enum intel_pipe_crc_object {
4186         PIPE_CRC_OBJECT_PIPE,
4187 };
4188
4189 static const char * const pipe_crc_objects[] = {
4190         "pipe",
4191 };
4192
4193 static int
4194 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
4195 {
4196         int i;
4197
4198         for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
4199                 if (!strcmp(buf, pipe_crc_objects[i])) {
4200                         *o = i;
4201                         return 0;
4202                     }
4203
4204         return -EINVAL;
4205 }
4206
4207 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
4208 {
4209         const char name = buf[0];
4210
4211         if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
4212                 return -EINVAL;
4213
4214         *pipe = name - 'A';
4215
4216         return 0;
4217 }
4218
4219 static int
4220 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
4221 {
4222         int i;
4223
4224         for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
4225                 if (!strcmp(buf, pipe_crc_sources[i])) {
4226                         *s = i;
4227                         return 0;
4228                     }
4229
4230         return -EINVAL;
4231 }
4232
4233 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
4234 {
4235 #define N_WORDS 3
4236         int n_words;
4237         char *words[N_WORDS];
4238         enum pipe pipe;
4239         enum intel_pipe_crc_object object;
4240         enum intel_pipe_crc_source source;
4241
4242         n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
4243         if (n_words != N_WORDS) {
4244                 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
4245                                  N_WORDS);
4246                 return -EINVAL;
4247         }
4248
4249         if (display_crc_ctl_parse_object(words[0], &object) < 0) {
4250                 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
4251                 return -EINVAL;
4252         }
4253
4254         if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
4255                 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
4256                 return -EINVAL;
4257         }
4258
4259         if (display_crc_ctl_parse_source(words[2], &source) < 0) {
4260                 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
4261                 return -EINVAL;
4262         }
4263
4264         return pipe_crc_set_source(dev, pipe, source);
4265 }
4266
4267 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
4268                                      size_t len, loff_t *offp)
4269 {
4270         struct seq_file *m = file->private_data;
4271         struct drm_device *dev = m->private;
4272         char *tmpbuf;
4273         int ret;
4274
4275         if (len == 0)
4276                 return 0;
4277
4278         if (len > PAGE_SIZE - 1) {
4279                 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
4280                                  PAGE_SIZE);
4281                 return -E2BIG;
4282         }
4283
4284         tmpbuf = kmalloc(len + 1, GFP_KERNEL);
4285         if (!tmpbuf)
4286                 return -ENOMEM;
4287
4288         if (copy_from_user(tmpbuf, ubuf, len)) {
4289                 ret = -EFAULT;
4290                 goto out;
4291         }
4292         tmpbuf[len] = '\0';
4293
4294         ret = display_crc_ctl_parse(dev, tmpbuf, len);
4295
4296 out:
4297         kfree(tmpbuf);
4298         if (ret < 0)
4299                 return ret;
4300
4301         *offp += len;
4302         return len;
4303 }
4304
4305 static const struct file_operations i915_display_crc_ctl_fops = {
4306         .owner = THIS_MODULE,
4307         .open = display_crc_ctl_open,
4308         .read = seq_read,
4309         .llseek = seq_lseek,
4310         .release = single_release,
4311         .write = display_crc_ctl_write
4312 };
4313
4314 static ssize_t i915_displayport_test_active_write(struct file *file,
4315                                             const char __user *ubuf,
4316                                             size_t len, loff_t *offp)
4317 {
4318         char *input_buffer;
4319         int status = 0;
4320         struct drm_device *dev;
4321         struct drm_connector *connector;
4322         struct list_head *connector_list;
4323         struct intel_dp *intel_dp;
4324         int val = 0;
4325
4326         dev = ((struct seq_file *)file->private_data)->private;
4327
4328         connector_list = &dev->mode_config.connector_list;
4329
4330         if (len == 0)
4331                 return 0;
4332
4333         input_buffer = kmalloc(len + 1, GFP_KERNEL);
4334         if (!input_buffer)
4335                 return -ENOMEM;
4336
4337         if (copy_from_user(input_buffer, ubuf, len)) {
4338                 status = -EFAULT;
4339                 goto out;
4340         }
4341
4342         input_buffer[len] = '\0';
4343         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
4344
4345         list_for_each_entry(connector, connector_list, head) {
4346
4347                 if (connector->connector_type !=
4348                     DRM_MODE_CONNECTOR_DisplayPort)
4349                         continue;
4350
4351                 if (connector->status == connector_status_connected &&
4352                     connector->encoder != NULL) {
4353                         intel_dp = enc_to_intel_dp(connector->encoder);
4354                         status = kstrtoint(input_buffer, 10, &val);
4355                         if (status < 0)
4356                                 goto out;
4357                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
4358                         /* To prevent erroneous activation of the compliance
4359                          * testing code, only accept an actual value of 1 here
4360                          */
4361                         if (val == 1)
4362                                 intel_dp->compliance_test_active = 1;
4363                         else
4364                                 intel_dp->compliance_test_active = 0;
4365                 }
4366         }
4367 out:
4368         kfree(input_buffer);
4369         if (status < 0)
4370                 return status;
4371
4372         *offp += len;
4373         return len;
4374 }
4375
4376 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
4377 {
4378         struct drm_device *dev = m->private;
4379         struct drm_connector *connector;
4380         struct list_head *connector_list = &dev->mode_config.connector_list;
4381         struct intel_dp *intel_dp;
4382
4383         list_for_each_entry(connector, connector_list, head) {
4384
4385                 if (connector->connector_type !=
4386                     DRM_MODE_CONNECTOR_DisplayPort)
4387                         continue;
4388
4389                 if (connector->status == connector_status_connected &&
4390                     connector->encoder != NULL) {
4391                         intel_dp = enc_to_intel_dp(connector->encoder);
4392                         if (intel_dp->compliance_test_active)
4393                                 seq_puts(m, "1");
4394                         else
4395                                 seq_puts(m, "0");
4396                 } else
4397                         seq_puts(m, "0");
4398         }
4399
4400         return 0;
4401 }
4402
4403 static int i915_displayport_test_active_open(struct inode *inode,
4404                                        struct file *file)
4405 {
4406         struct drm_device *dev = inode->i_private;
4407
4408         return single_open(file, i915_displayport_test_active_show, dev);
4409 }
4410
4411 static const struct file_operations i915_displayport_test_active_fops = {
4412         .owner = THIS_MODULE,
4413         .open = i915_displayport_test_active_open,
4414         .read = seq_read,
4415         .llseek = seq_lseek,
4416         .release = single_release,
4417         .write = i915_displayport_test_active_write
4418 };
4419
4420 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
4421 {
4422         struct drm_device *dev = m->private;
4423         struct drm_connector *connector;
4424         struct list_head *connector_list = &dev->mode_config.connector_list;
4425         struct intel_dp *intel_dp;
4426
4427         list_for_each_entry(connector, connector_list, head) {
4428
4429                 if (connector->connector_type !=
4430                     DRM_MODE_CONNECTOR_DisplayPort)
4431                         continue;
4432
4433                 if (connector->status == connector_status_connected &&
4434                     connector->encoder != NULL) {
4435                         intel_dp = enc_to_intel_dp(connector->encoder);
4436                         seq_printf(m, "%lx", intel_dp->compliance_test_data);
4437                 } else
4438                         seq_puts(m, "0");
4439         }
4440
4441         return 0;
4442 }
4443 static int i915_displayport_test_data_open(struct inode *inode,
4444                                        struct file *file)
4445 {
4446         struct drm_device *dev = inode->i_private;
4447
4448         return single_open(file, i915_displayport_test_data_show, dev);
4449 }
4450
4451 static const struct file_operations i915_displayport_test_data_fops = {
4452         .owner = THIS_MODULE,
4453         .open = i915_displayport_test_data_open,
4454         .read = seq_read,
4455         .llseek = seq_lseek,
4456         .release = single_release
4457 };
4458
4459 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
4460 {
4461         struct drm_device *dev = m->private;
4462         struct drm_connector *connector;
4463         struct list_head *connector_list = &dev->mode_config.connector_list;
4464         struct intel_dp *intel_dp;
4465
4466         list_for_each_entry(connector, connector_list, head) {
4467
4468                 if (connector->connector_type !=
4469                     DRM_MODE_CONNECTOR_DisplayPort)
4470                         continue;
4471
4472                 if (connector->status == connector_status_connected &&
4473                     connector->encoder != NULL) {
4474                         intel_dp = enc_to_intel_dp(connector->encoder);
4475                         seq_printf(m, "%02lx", intel_dp->compliance_test_type);
4476                 } else
4477                         seq_puts(m, "0");
4478         }
4479
4480         return 0;
4481 }
4482
4483 static int i915_displayport_test_type_open(struct inode *inode,
4484                                        struct file *file)
4485 {
4486         struct drm_device *dev = inode->i_private;
4487
4488         return single_open(file, i915_displayport_test_type_show, dev);
4489 }
4490
4491 static const struct file_operations i915_displayport_test_type_fops = {
4492         .owner = THIS_MODULE,
4493         .open = i915_displayport_test_type_open,
4494         .read = seq_read,
4495         .llseek = seq_lseek,
4496         .release = single_release
4497 };
4498
4499 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
4500 {
4501         struct drm_device *dev = m->private;
4502         int level;
4503         int num_levels;
4504
4505         if (IS_CHERRYVIEW(dev))
4506                 num_levels = 3;
4507         else if (IS_VALLEYVIEW(dev))
4508                 num_levels = 1;
4509         else
4510                 num_levels = ilk_wm_max_level(dev) + 1;
4511
4512         drm_modeset_lock_all(dev);
4513
4514         for (level = 0; level < num_levels; level++) {
4515                 unsigned int latency = wm[level];
4516
4517                 /*
4518                  * - WM1+ latency values in 0.5us units
4519                  * - latencies are in us on gen9/vlv/chv
4520                  */
4521                 if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) ||
4522                     IS_CHERRYVIEW(dev))
4523                         latency *= 10;
4524                 else if (level > 0)
4525                         latency *= 5;
4526
4527                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
4528                            level, wm[level], latency / 10, latency % 10);
4529         }
4530
4531         drm_modeset_unlock_all(dev);
4532 }
4533
4534 static int pri_wm_latency_show(struct seq_file *m, void *data)
4535 {
4536         struct drm_device *dev = m->private;
4537         struct drm_i915_private *dev_priv = dev->dev_private;
4538         const uint16_t *latencies;
4539
4540         if (INTEL_INFO(dev)->gen >= 9)
4541                 latencies = dev_priv->wm.skl_latency;
4542         else
4543                 latencies = to_i915(dev)->wm.pri_latency;
4544
4545         wm_latency_show(m, latencies);
4546
4547         return 0;
4548 }
4549
4550 static int spr_wm_latency_show(struct seq_file *m, void *data)
4551 {
4552         struct drm_device *dev = m->private;
4553         struct drm_i915_private *dev_priv = dev->dev_private;
4554         const uint16_t *latencies;
4555
4556         if (INTEL_INFO(dev)->gen >= 9)
4557                 latencies = dev_priv->wm.skl_latency;
4558         else
4559                 latencies = to_i915(dev)->wm.spr_latency;
4560
4561         wm_latency_show(m, latencies);
4562
4563         return 0;
4564 }
4565
4566 static int cur_wm_latency_show(struct seq_file *m, void *data)
4567 {
4568         struct drm_device *dev = m->private;
4569         struct drm_i915_private *dev_priv = dev->dev_private;
4570         const uint16_t *latencies;
4571
4572         if (INTEL_INFO(dev)->gen >= 9)
4573                 latencies = dev_priv->wm.skl_latency;
4574         else
4575                 latencies = to_i915(dev)->wm.cur_latency;
4576
4577         wm_latency_show(m, latencies);
4578
4579         return 0;
4580 }
4581
4582 static int pri_wm_latency_open(struct inode *inode, struct file *file)
4583 {
4584         struct drm_device *dev = inode->i_private;
4585
4586         if (INTEL_INFO(dev)->gen < 5)
4587                 return -ENODEV;
4588
4589         return single_open(file, pri_wm_latency_show, dev);
4590 }
4591
4592 static int spr_wm_latency_open(struct inode *inode, struct file *file)
4593 {
4594         struct drm_device *dev = inode->i_private;
4595
4596         if (HAS_GMCH_DISPLAY(dev))
4597                 return -ENODEV;
4598
4599         return single_open(file, spr_wm_latency_show, dev);
4600 }
4601
4602 static int cur_wm_latency_open(struct inode *inode, struct file *file)
4603 {
4604         struct drm_device *dev = inode->i_private;
4605
4606         if (HAS_GMCH_DISPLAY(dev))
4607                 return -ENODEV;
4608
4609         return single_open(file, cur_wm_latency_show, dev);
4610 }
4611
4612 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
4613                                 size_t len, loff_t *offp, uint16_t wm[8])
4614 {
4615         struct seq_file *m = file->private_data;
4616         struct drm_device *dev = m->private;
4617         uint16_t new[8] = { 0 };
4618         int num_levels;
4619         int level;
4620         int ret;
4621         char tmp[32];
4622
4623         if (IS_CHERRYVIEW(dev))
4624                 num_levels = 3;
4625         else if (IS_VALLEYVIEW(dev))
4626                 num_levels = 1;
4627         else
4628                 num_levels = ilk_wm_max_level(dev) + 1;
4629
4630         if (len >= sizeof(tmp))
4631                 return -EINVAL;
4632
4633         if (copy_from_user(tmp, ubuf, len))
4634                 return -EFAULT;
4635
4636         tmp[len] = '\0';
4637
4638         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
4639                      &new[0], &new[1], &new[2], &new[3],
4640                      &new[4], &new[5], &new[6], &new[7]);
4641         if (ret != num_levels)
4642                 return -EINVAL;
4643
4644         drm_modeset_lock_all(dev);
4645
4646         for (level = 0; level < num_levels; level++)
4647                 wm[level] = new[level];
4648
4649         drm_modeset_unlock_all(dev);
4650
4651         return len;
4652 }
4653
4654
4655 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
4656                                     size_t len, loff_t *offp)
4657 {
4658         struct seq_file *m = file->private_data;
4659         struct drm_device *dev = m->private;
4660         struct drm_i915_private *dev_priv = dev->dev_private;
4661         uint16_t *latencies;
4662
4663         if (INTEL_INFO(dev)->gen >= 9)
4664                 latencies = dev_priv->wm.skl_latency;
4665         else
4666                 latencies = to_i915(dev)->wm.pri_latency;
4667
4668         return wm_latency_write(file, ubuf, len, offp, latencies);
4669 }
4670
4671 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4672                                     size_t len, loff_t *offp)
4673 {
4674         struct seq_file *m = file->private_data;
4675         struct drm_device *dev = m->private;
4676         struct drm_i915_private *dev_priv = dev->dev_private;
4677         uint16_t *latencies;
4678
4679         if (INTEL_INFO(dev)->gen >= 9)
4680                 latencies = dev_priv->wm.skl_latency;
4681         else
4682                 latencies = to_i915(dev)->wm.spr_latency;
4683
4684         return wm_latency_write(file, ubuf, len, offp, latencies);
4685 }
4686
4687 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4688                                     size_t len, loff_t *offp)
4689 {
4690         struct seq_file *m = file->private_data;
4691         struct drm_device *dev = m->private;
4692         struct drm_i915_private *dev_priv = dev->dev_private;
4693         uint16_t *latencies;
4694
4695         if (INTEL_INFO(dev)->gen >= 9)
4696                 latencies = dev_priv->wm.skl_latency;
4697         else
4698                 latencies = to_i915(dev)->wm.cur_latency;
4699
4700         return wm_latency_write(file, ubuf, len, offp, latencies);
4701 }
4702
4703 static const struct file_operations i915_pri_wm_latency_fops = {
4704         .owner = THIS_MODULE,
4705         .open = pri_wm_latency_open,
4706         .read = seq_read,
4707         .llseek = seq_lseek,
4708         .release = single_release,
4709         .write = pri_wm_latency_write
4710 };
4711
4712 static const struct file_operations i915_spr_wm_latency_fops = {
4713         .owner = THIS_MODULE,
4714         .open = spr_wm_latency_open,
4715         .read = seq_read,
4716         .llseek = seq_lseek,
4717         .release = single_release,
4718         .write = spr_wm_latency_write
4719 };
4720
4721 static const struct file_operations i915_cur_wm_latency_fops = {
4722         .owner = THIS_MODULE,
4723         .open = cur_wm_latency_open,
4724         .read = seq_read,
4725         .llseek = seq_lseek,
4726         .release = single_release,
4727         .write = cur_wm_latency_write
4728 };
4729
4730 static int
4731 i915_wedged_get(void *data, u64 *val)
4732 {
4733         struct drm_device *dev = data;
4734         struct drm_i915_private *dev_priv = dev->dev_private;
4735
4736         *val = i915_terminally_wedged(&dev_priv->gpu_error);
4737
4738         return 0;
4739 }
4740
4741 static int
4742 i915_wedged_set(void *data, u64 val)
4743 {
4744         struct drm_device *dev = data;
4745         struct drm_i915_private *dev_priv = dev->dev_private;
4746
4747         /*
4748          * There is no safeguard against this debugfs entry colliding
4749          * with the hangcheck calling same i915_handle_error() in
4750          * parallel, causing an explosion. For now we assume that the
4751          * test harness is responsible enough not to inject gpu hangs
4752          * while it is writing to 'i915_wedged'
4753          */
4754
4755         if (i915_reset_in_progress(&dev_priv->gpu_error))
4756                 return -EAGAIN;
4757
4758         intel_runtime_pm_get(dev_priv);
4759
4760         i915_handle_error(dev, val,
4761                           "Manually setting wedged to %llu", val);
4762
4763         intel_runtime_pm_put(dev_priv);
4764
4765         return 0;
4766 }
4767
4768 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4769                         i915_wedged_get, i915_wedged_set,
4770                         "%llu\n");
4771
4772 static int
4773 i915_ring_stop_get(void *data, u64 *val)
4774 {
4775         struct drm_device *dev = data;
4776         struct drm_i915_private *dev_priv = dev->dev_private;
4777
4778         *val = dev_priv->gpu_error.stop_rings;
4779
4780         return 0;
4781 }
4782
4783 static int
4784 i915_ring_stop_set(void *data, u64 val)
4785 {
4786         struct drm_device *dev = data;
4787         struct drm_i915_private *dev_priv = dev->dev_private;
4788         int ret;
4789
4790         DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
4791
4792         ret = mutex_lock_interruptible(&dev->struct_mutex);
4793         if (ret)
4794                 return ret;
4795
4796         dev_priv->gpu_error.stop_rings = val;
4797         mutex_unlock(&dev->struct_mutex);
4798
4799         return 0;
4800 }
4801
4802 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
4803                         i915_ring_stop_get, i915_ring_stop_set,
4804                         "0x%08llx\n");
4805
4806 static int
4807 i915_ring_missed_irq_get(void *data, u64 *val)
4808 {
4809         struct drm_device *dev = data;
4810         struct drm_i915_private *dev_priv = dev->dev_private;
4811
4812         *val = dev_priv->gpu_error.missed_irq_rings;
4813         return 0;
4814 }
4815
4816 static int
4817 i915_ring_missed_irq_set(void *data, u64 val)
4818 {
4819         struct drm_device *dev = data;
4820         struct drm_i915_private *dev_priv = dev->dev_private;
4821         int ret;
4822
4823         /* Lock against concurrent debugfs callers */
4824         ret = mutex_lock_interruptible(&dev->struct_mutex);
4825         if (ret)
4826                 return ret;
4827         dev_priv->gpu_error.missed_irq_rings = val;
4828         mutex_unlock(&dev->struct_mutex);
4829
4830         return 0;
4831 }
4832
4833 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4834                         i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4835                         "0x%08llx\n");
4836
4837 static int
4838 i915_ring_test_irq_get(void *data, u64 *val)
4839 {
4840         struct drm_device *dev = data;
4841         struct drm_i915_private *dev_priv = dev->dev_private;
4842
4843         *val = dev_priv->gpu_error.test_irq_rings;
4844
4845         return 0;
4846 }
4847
4848 static int
4849 i915_ring_test_irq_set(void *data, u64 val)
4850 {
4851         struct drm_device *dev = data;
4852         struct drm_i915_private *dev_priv = dev->dev_private;
4853         int ret;
4854
4855         DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4856
4857         /* Lock against concurrent debugfs callers */
4858         ret = mutex_lock_interruptible(&dev->struct_mutex);
4859         if (ret)
4860                 return ret;
4861
4862         dev_priv->gpu_error.test_irq_rings = val;
4863         mutex_unlock(&dev->struct_mutex);
4864
4865         return 0;
4866 }
4867
4868 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4869                         i915_ring_test_irq_get, i915_ring_test_irq_set,
4870                         "0x%08llx\n");
4871
4872 #define DROP_UNBOUND 0x1
4873 #define DROP_BOUND 0x2
4874 #define DROP_RETIRE 0x4
4875 #define DROP_ACTIVE 0x8
4876 #define DROP_ALL (DROP_UNBOUND | \
4877                   DROP_BOUND | \
4878                   DROP_RETIRE | \
4879                   DROP_ACTIVE)
4880 static int
4881 i915_drop_caches_get(void *data, u64 *val)
4882 {
4883         *val = DROP_ALL;
4884
4885         return 0;
4886 }
4887
4888 static int
4889 i915_drop_caches_set(void *data, u64 val)
4890 {
4891         struct drm_device *dev = data;
4892         struct drm_i915_private *dev_priv = dev->dev_private;
4893         int ret;
4894
4895         DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
4896
4897         /* No need to check and wait for gpu resets, only libdrm auto-restarts
4898          * on ioctls on -EAGAIN. */
4899         ret = mutex_lock_interruptible(&dev->struct_mutex);
4900         if (ret)
4901                 return ret;
4902
4903         if (val & DROP_ACTIVE) {
4904                 ret = i915_gpu_idle(dev);
4905                 if (ret)
4906                         goto unlock;
4907         }
4908
4909         if (val & (DROP_RETIRE | DROP_ACTIVE))
4910                 i915_gem_retire_requests(dev);
4911
4912         if (val & DROP_BOUND)
4913                 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
4914
4915         if (val & DROP_UNBOUND)
4916                 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
4917
4918 unlock:
4919         mutex_unlock(&dev->struct_mutex);
4920
4921         return ret;
4922 }
4923
4924 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4925                         i915_drop_caches_get, i915_drop_caches_set,
4926                         "0x%08llx\n");
4927
4928 static int
4929 i915_max_freq_get(void *data, u64 *val)
4930 {
4931         struct drm_device *dev = data;
4932         struct drm_i915_private *dev_priv = dev->dev_private;
4933         int ret;
4934
4935         if (INTEL_INFO(dev)->gen < 6)
4936                 return -ENODEV;
4937
4938         flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4939
4940         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4941         if (ret)
4942                 return ret;
4943
4944         *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
4945         mutex_unlock(&dev_priv->rps.hw_lock);
4946
4947         return 0;
4948 }
4949
4950 static int
4951 i915_max_freq_set(void *data, u64 val)
4952 {
4953         struct drm_device *dev = data;
4954         struct drm_i915_private *dev_priv = dev->dev_private;
4955         u32 hw_max, hw_min;
4956         int ret;
4957
4958         if (INTEL_INFO(dev)->gen < 6)
4959                 return -ENODEV;
4960
4961         flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4962
4963         DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4964
4965         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4966         if (ret)
4967                 return ret;
4968
4969         /*
4970          * Turbo will still be enabled, but won't go above the set value.
4971          */
4972         val = intel_freq_opcode(dev_priv, val);
4973
4974         hw_max = dev_priv->rps.max_freq;
4975         hw_min = dev_priv->rps.min_freq;
4976
4977         if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
4978                 mutex_unlock(&dev_priv->rps.hw_lock);
4979                 return -EINVAL;
4980         }
4981
4982         dev_priv->rps.max_freq_softlimit = val;
4983
4984         intel_set_rps(dev, val);
4985
4986         mutex_unlock(&dev_priv->rps.hw_lock);
4987
4988         return 0;
4989 }
4990
4991 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4992                         i915_max_freq_get, i915_max_freq_set,
4993                         "%llu\n");
4994
4995 static int
4996 i915_min_freq_get(void *data, u64 *val)
4997 {
4998         struct drm_device *dev = data;
4999         struct drm_i915_private *dev_priv = dev->dev_private;
5000         int ret;
5001
5002         if (INTEL_INFO(dev)->gen < 6)
5003                 return -ENODEV;
5004
5005         flush_delayed_work(&dev_priv->rps.delayed_resume_work);
5006
5007         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
5008         if (ret)
5009                 return ret;
5010
5011         *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
5012         mutex_unlock(&dev_priv->rps.hw_lock);
5013
5014         return 0;
5015 }
5016
5017 static int
5018 i915_min_freq_set(void *data, u64 val)
5019 {
5020         struct drm_device *dev = data;
5021         struct drm_i915_private *dev_priv = dev->dev_private;
5022         u32 hw_max, hw_min;
5023         int ret;
5024
5025         if (INTEL_INFO(dev)->gen < 6)
5026                 return -ENODEV;
5027
5028         flush_delayed_work(&dev_priv->rps.delayed_resume_work);
5029
5030         DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
5031
5032         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
5033         if (ret)
5034                 return ret;
5035
5036         /*
5037          * Turbo will still be enabled, but won't go below the set value.
5038          */
5039         val = intel_freq_opcode(dev_priv, val);
5040
5041         hw_max = dev_priv->rps.max_freq;
5042         hw_min = dev_priv->rps.min_freq;
5043
5044         if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
5045                 mutex_unlock(&dev_priv->rps.hw_lock);
5046                 return -EINVAL;
5047         }
5048
5049         dev_priv->rps.min_freq_softlimit = val;
5050
5051         intel_set_rps(dev, val);
5052
5053         mutex_unlock(&dev_priv->rps.hw_lock);
5054
5055         return 0;
5056 }
5057
5058 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
5059                         i915_min_freq_get, i915_min_freq_set,
5060                         "%llu\n");
5061
5062 static int
5063 i915_cache_sharing_get(void *data, u64 *val)
5064 {
5065         struct drm_device *dev = data;
5066         struct drm_i915_private *dev_priv = dev->dev_private;
5067         u32 snpcr;
5068         int ret;
5069
5070         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
5071                 return -ENODEV;
5072
5073         ret = mutex_lock_interruptible(&dev->struct_mutex);
5074         if (ret)
5075                 return ret;
5076         intel_runtime_pm_get(dev_priv);
5077
5078         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5079
5080         intel_runtime_pm_put(dev_priv);
5081         mutex_unlock(&dev_priv->dev->struct_mutex);
5082
5083         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
5084
5085         return 0;
5086 }
5087
5088 static int
5089 i915_cache_sharing_set(void *data, u64 val)
5090 {
5091         struct drm_device *dev = data;
5092         struct drm_i915_private *dev_priv = dev->dev_private;
5093         u32 snpcr;
5094
5095         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
5096                 return -ENODEV;
5097
5098         if (val > 3)
5099                 return -EINVAL;
5100
5101         intel_runtime_pm_get(dev_priv);
5102         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
5103
5104         /* Update the cache sharing policy here as well */
5105         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5106         snpcr &= ~GEN6_MBC_SNPCR_MASK;
5107         snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
5108         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5109
5110         intel_runtime_pm_put(dev_priv);
5111         return 0;
5112 }
5113
5114 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
5115                         i915_cache_sharing_get, i915_cache_sharing_set,
5116                         "%llu\n");
5117
5118 struct sseu_dev_status {
5119         unsigned int slice_total;
5120         unsigned int subslice_total;
5121         unsigned int subslice_per_slice;
5122         unsigned int eu_total;
5123         unsigned int eu_per_subslice;
5124 };
5125
5126 static void cherryview_sseu_device_status(struct drm_device *dev,
5127                                           struct sseu_dev_status *stat)
5128 {
5129         struct drm_i915_private *dev_priv = dev->dev_private;
5130         int ss_max = 2;
5131         int ss;
5132         u32 sig1[ss_max], sig2[ss_max];
5133
5134         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
5135         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
5136         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
5137         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
5138
5139         for (ss = 0; ss < ss_max; ss++) {
5140                 unsigned int eu_cnt;
5141
5142                 if (sig1[ss] & CHV_SS_PG_ENABLE)
5143                         /* skip disabled subslice */
5144                         continue;
5145
5146                 stat->slice_total = 1;
5147                 stat->subslice_per_slice++;
5148                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
5149                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
5150                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
5151                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
5152                 stat->eu_total += eu_cnt;
5153                 stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
5154         }
5155         stat->subslice_total = stat->subslice_per_slice;
5156 }
5157
5158 static void gen9_sseu_device_status(struct drm_device *dev,
5159                                     struct sseu_dev_status *stat)
5160 {
5161         struct drm_i915_private *dev_priv = dev->dev_private;
5162         int s_max = 3, ss_max = 4;
5163         int s, ss;
5164         u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
5165
5166         /* BXT has a single slice and at most 3 subslices. */
5167         if (IS_BROXTON(dev)) {
5168                 s_max = 1;
5169                 ss_max = 3;
5170         }
5171
5172         for (s = 0; s < s_max; s++) {
5173                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
5174                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
5175                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
5176         }
5177
5178         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
5179                      GEN9_PGCTL_SSA_EU19_ACK |
5180                      GEN9_PGCTL_SSA_EU210_ACK |
5181                      GEN9_PGCTL_SSA_EU311_ACK;
5182         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
5183                      GEN9_PGCTL_SSB_EU19_ACK |
5184                      GEN9_PGCTL_SSB_EU210_ACK |
5185                      GEN9_PGCTL_SSB_EU311_ACK;
5186
5187         for (s = 0; s < s_max; s++) {
5188                 unsigned int ss_cnt = 0;
5189
5190                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
5191                         /* skip disabled slice */
5192                         continue;
5193
5194                 stat->slice_total++;
5195
5196                 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
5197                         ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
5198
5199                 for (ss = 0; ss < ss_max; ss++) {
5200                         unsigned int eu_cnt;
5201
5202                         if (IS_BROXTON(dev) &&
5203                             !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
5204                                 /* skip disabled subslice */
5205                                 continue;
5206
5207                         if (IS_BROXTON(dev))
5208                                 ss_cnt++;
5209
5210                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
5211                                                eu_mask[ss%2]);
5212                         stat->eu_total += eu_cnt;
5213                         stat->eu_per_subslice = max(stat->eu_per_subslice,
5214                                                     eu_cnt);
5215                 }
5216
5217                 stat->subslice_total += ss_cnt;
5218                 stat->subslice_per_slice = max(stat->subslice_per_slice,
5219                                                ss_cnt);
5220         }
5221 }
5222
5223 static void broadwell_sseu_device_status(struct drm_device *dev,
5224                                          struct sseu_dev_status *stat)
5225 {
5226         struct drm_i915_private *dev_priv = dev->dev_private;
5227         int s;
5228         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
5229
5230         stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
5231
5232         if (stat->slice_total) {
5233                 stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
5234                 stat->subslice_total = stat->slice_total *
5235                                        stat->subslice_per_slice;
5236                 stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
5237                 stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
5238
5239                 /* subtract fused off EU(s) from enabled slice(s) */
5240                 for (s = 0; s < stat->slice_total; s++) {
5241                         u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
5242
5243                         stat->eu_total -= hweight8(subslice_7eu);
5244                 }
5245         }
5246 }
5247
5248 static int i915_sseu_status(struct seq_file *m, void *unused)
5249 {
5250         struct drm_info_node *node = (struct drm_info_node *) m->private;
5251         struct drm_device *dev = node->minor->dev;
5252         struct sseu_dev_status stat;
5253
5254         if (INTEL_INFO(dev)->gen < 8)
5255                 return -ENODEV;
5256
5257         seq_puts(m, "SSEU Device Info\n");
5258         seq_printf(m, "  Available Slice Total: %u\n",
5259                    INTEL_INFO(dev)->slice_total);
5260         seq_printf(m, "  Available Subslice Total: %u\n",
5261                    INTEL_INFO(dev)->subslice_total);
5262         seq_printf(m, "  Available Subslice Per Slice: %u\n",
5263                    INTEL_INFO(dev)->subslice_per_slice);
5264         seq_printf(m, "  Available EU Total: %u\n",
5265                    INTEL_INFO(dev)->eu_total);
5266         seq_printf(m, "  Available EU Per Subslice: %u\n",
5267                    INTEL_INFO(dev)->eu_per_subslice);
5268         seq_printf(m, "  Has Slice Power Gating: %s\n",
5269                    yesno(INTEL_INFO(dev)->has_slice_pg));
5270         seq_printf(m, "  Has Subslice Power Gating: %s\n",
5271                    yesno(INTEL_INFO(dev)->has_subslice_pg));
5272         seq_printf(m, "  Has EU Power Gating: %s\n",
5273                    yesno(INTEL_INFO(dev)->has_eu_pg));
5274
5275         seq_puts(m, "SSEU Device Status\n");
5276         memset(&stat, 0, sizeof(stat));
5277         if (IS_CHERRYVIEW(dev)) {
5278                 cherryview_sseu_device_status(dev, &stat);
5279         } else if (IS_BROADWELL(dev)) {
5280                 broadwell_sseu_device_status(dev, &stat);
5281         } else if (INTEL_INFO(dev)->gen >= 9) {
5282                 gen9_sseu_device_status(dev, &stat);
5283         }
5284         seq_printf(m, "  Enabled Slice Total: %u\n",
5285                    stat.slice_total);
5286         seq_printf(m, "  Enabled Subslice Total: %u\n",
5287                    stat.subslice_total);
5288         seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
5289                    stat.subslice_per_slice);
5290         seq_printf(m, "  Enabled EU Total: %u\n",
5291                    stat.eu_total);
5292         seq_printf(m, "  Enabled EU Per Subslice: %u\n",
5293                    stat.eu_per_subslice);
5294
5295         return 0;
5296 }
5297
5298 static int i915_forcewake_open(struct inode *inode, struct file *file)
5299 {
5300         struct drm_device *dev = inode->i_private;
5301         struct drm_i915_private *dev_priv = dev->dev_private;
5302
5303         if (INTEL_INFO(dev)->gen < 6)
5304                 return 0;
5305
5306         intel_runtime_pm_get(dev_priv);
5307         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5308
5309         return 0;
5310 }
5311
5312 static int i915_forcewake_release(struct inode *inode, struct file *file)
5313 {
5314         struct drm_device *dev = inode->i_private;
5315         struct drm_i915_private *dev_priv = dev->dev_private;
5316
5317         if (INTEL_INFO(dev)->gen < 6)
5318                 return 0;
5319
5320         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5321         intel_runtime_pm_put(dev_priv);
5322
5323         return 0;
5324 }
5325
5326 static const struct file_operations i915_forcewake_fops = {
5327         .owner = THIS_MODULE,
5328         .open = i915_forcewake_open,
5329         .release = i915_forcewake_release,
5330 };
5331
5332 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
5333 {
5334         struct drm_device *dev = minor->dev;
5335         struct dentry *ent;
5336
5337         ent = debugfs_create_file("i915_forcewake_user",
5338                                   S_IRUSR,
5339                                   root, dev,
5340                                   &i915_forcewake_fops);
5341         if (!ent)
5342                 return -ENOMEM;
5343
5344         return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
5345 }
5346
5347 static int i915_debugfs_create(struct dentry *root,
5348                                struct drm_minor *minor,
5349                                const char *name,
5350                                const struct file_operations *fops)
5351 {
5352         struct drm_device *dev = minor->dev;
5353         struct dentry *ent;
5354
5355         ent = debugfs_create_file(name,
5356                                   S_IRUGO | S_IWUSR,
5357                                   root, dev,
5358                                   fops);
5359         if (!ent)
5360                 return -ENOMEM;
5361
5362         return drm_add_fake_info_node(minor, ent, fops);
5363 }
5364
5365 static const struct drm_info_list i915_debugfs_list[] = {
5366         {"i915_capabilities", i915_capabilities, 0},
5367         {"i915_gem_objects", i915_gem_object_info, 0},
5368         {"i915_gem_gtt", i915_gem_gtt_info, 0},
5369         {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
5370         {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
5371         {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
5372         {"i915_gem_stolen", i915_gem_stolen_list_info },
5373         {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
5374         {"i915_gem_request", i915_gem_request_info, 0},
5375         {"i915_gem_seqno", i915_gem_seqno_info, 0},
5376         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
5377         {"i915_gem_interrupt", i915_interrupt_info, 0},
5378         {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
5379         {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
5380         {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
5381         {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
5382         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
5383         {"i915_guc_info", i915_guc_info, 0},
5384         {"i915_guc_load_status", i915_guc_load_status_info, 0},
5385         {"i915_guc_log_dump", i915_guc_log_dump, 0},
5386         {"i915_frequency_info", i915_frequency_info, 0},
5387         {"i915_hangcheck_info", i915_hangcheck_info, 0},
5388         {"i915_drpc_info", i915_drpc_info, 0},
5389         {"i915_emon_status", i915_emon_status, 0},
5390         {"i915_ring_freq_table", i915_ring_freq_table, 0},
5391         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
5392         {"i915_fbc_status", i915_fbc_status, 0},
5393         {"i915_ips_status", i915_ips_status, 0},
5394         {"i915_sr_status", i915_sr_status, 0},
5395         {"i915_opregion", i915_opregion, 0},
5396         {"i915_vbt", i915_vbt, 0},
5397         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
5398         {"i915_context_status", i915_context_status, 0},
5399         {"i915_dump_lrc", i915_dump_lrc, 0},
5400         {"i915_execlists", i915_execlists, 0},
5401         {"i915_forcewake_domains", i915_forcewake_domains, 0},
5402         {"i915_swizzle_info", i915_swizzle_info, 0},
5403         {"i915_ppgtt_info", i915_ppgtt_info, 0},
5404         {"i915_llc", i915_llc, 0},
5405         {"i915_edp_psr_status", i915_edp_psr_status, 0},
5406         {"i915_sink_crc_eDP1", i915_sink_crc, 0},
5407         {"i915_energy_uJ", i915_energy_uJ, 0},
5408         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
5409         {"i915_power_domain_info", i915_power_domain_info, 0},
5410         {"i915_dmc_info", i915_dmc_info, 0},
5411         {"i915_display_info", i915_display_info, 0},
5412         {"i915_semaphore_status", i915_semaphore_status, 0},
5413         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
5414         {"i915_dp_mst_info", i915_dp_mst_info, 0},
5415         {"i915_wa_registers", i915_wa_registers, 0},
5416         {"i915_ddb_info", i915_ddb_info, 0},
5417         {"i915_sseu_status", i915_sseu_status, 0},
5418         {"i915_drrs_status", i915_drrs_status, 0},
5419         {"i915_rps_boost_info", i915_rps_boost_info, 0},
5420 };
5421 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
5422
5423 static const struct i915_debugfs_files {
5424         const char *name;
5425         const struct file_operations *fops;
5426 } i915_debugfs_files[] = {
5427         {"i915_wedged", &i915_wedged_fops},
5428         {"i915_max_freq", &i915_max_freq_fops},
5429         {"i915_min_freq", &i915_min_freq_fops},
5430         {"i915_cache_sharing", &i915_cache_sharing_fops},
5431         {"i915_ring_stop", &i915_ring_stop_fops},
5432         {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
5433         {"i915_ring_test_irq", &i915_ring_test_irq_fops},
5434         {"i915_gem_drop_caches", &i915_drop_caches_fops},
5435         {"i915_error_state", &i915_error_state_fops},
5436         {"i915_next_seqno", &i915_next_seqno_fops},
5437         {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
5438         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
5439         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
5440         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
5441         {"i915_fbc_false_color", &i915_fbc_fc_fops},
5442         {"i915_dp_test_data", &i915_displayport_test_data_fops},
5443         {"i915_dp_test_type", &i915_displayport_test_type_fops},
5444         {"i915_dp_test_active", &i915_displayport_test_active_fops}
5445 };
5446
5447 void intel_display_crc_init(struct drm_device *dev)
5448 {
5449         struct drm_i915_private *dev_priv = dev->dev_private;
5450         enum pipe pipe;
5451
5452         for_each_pipe(dev_priv, pipe) {
5453                 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
5454
5455                 pipe_crc->opened = false;
5456                 spin_lock_init(&pipe_crc->lock);
5457                 init_waitqueue_head(&pipe_crc->wq);
5458         }
5459 }
5460
5461 int i915_debugfs_init(struct drm_minor *minor)
5462 {
5463         int ret, i;
5464
5465         ret = i915_forcewake_create(minor->debugfs_root, minor);
5466         if (ret)
5467                 return ret;
5468
5469         for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5470                 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
5471                 if (ret)
5472                         return ret;
5473         }
5474
5475         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5476                 ret = i915_debugfs_create(minor->debugfs_root, minor,
5477                                           i915_debugfs_files[i].name,
5478                                           i915_debugfs_files[i].fops);
5479                 if (ret)
5480                         return ret;
5481         }
5482
5483         return drm_debugfs_create_files(i915_debugfs_list,
5484                                         I915_DEBUGFS_ENTRIES,
5485                                         minor->debugfs_root, minor);
5486 }
5487
5488 void i915_debugfs_cleanup(struct drm_minor *minor)
5489 {
5490         int i;
5491
5492         drm_debugfs_remove_files(i915_debugfs_list,
5493                                  I915_DEBUGFS_ENTRIES, minor);
5494
5495         drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
5496                                  1, minor);
5497
5498         for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5499                 struct drm_info_list *info_list =
5500                         (struct drm_info_list *)&i915_pipe_crc_data[i];
5501
5502                 drm_debugfs_remove_files(info_list, 1, minor);
5503         }
5504
5505         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5506                 struct drm_info_list *info_list =
5507                         (struct drm_info_list *) i915_debugfs_files[i].fops;
5508
5509                 drm_debugfs_remove_files(info_list, 1, minor);
5510         }
5511 }
5512
5513 struct dpcd_block {
5514         /* DPCD dump start address. */
5515         unsigned int offset;
5516         /* DPCD dump end address, inclusive. If unset, .size will be used. */
5517         unsigned int end;
5518         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
5519         size_t size;
5520         /* Only valid for eDP. */
5521         bool edp;
5522 };
5523
5524 static const struct dpcd_block i915_dpcd_debug[] = {
5525         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
5526         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
5527         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
5528         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
5529         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
5530         { .offset = DP_SET_POWER },
5531         { .offset = DP_EDP_DPCD_REV },
5532         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
5533         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
5534         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
5535 };
5536
5537 static int i915_dpcd_show(struct seq_file *m, void *data)
5538 {
5539         struct drm_connector *connector = m->private;
5540         struct intel_dp *intel_dp =
5541                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5542         uint8_t buf[16];
5543         ssize_t err;
5544         int i;
5545
5546         if (connector->status != connector_status_connected)
5547                 return -ENODEV;
5548
5549         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
5550                 const struct dpcd_block *b = &i915_dpcd_debug[i];
5551                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
5552
5553                 if (b->edp &&
5554                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
5555                         continue;
5556
5557                 /* low tech for now */
5558                 if (WARN_ON(size > sizeof(buf)))
5559                         continue;
5560
5561                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
5562                 if (err <= 0) {
5563                         DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
5564                                   size, b->offset, err);
5565                         continue;
5566                 }
5567
5568                 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
5569         }
5570
5571         return 0;
5572 }
5573
5574 static int i915_dpcd_open(struct inode *inode, struct file *file)
5575 {
5576         return single_open(file, i915_dpcd_show, inode->i_private);
5577 }
5578
5579 static const struct file_operations i915_dpcd_fops = {
5580         .owner = THIS_MODULE,
5581         .open = i915_dpcd_open,
5582         .read = seq_read,
5583         .llseek = seq_lseek,
5584         .release = single_release,
5585 };
5586
5587 /**
5588  * i915_debugfs_connector_add - add i915 specific connector debugfs files
5589  * @connector: pointer to a registered drm_connector
5590  *
5591  * Cleanup will be done by drm_connector_unregister() through a call to
5592  * drm_debugfs_connector_remove().
5593  *
5594  * Returns 0 on success, negative error codes on error.
5595  */
5596 int i915_debugfs_connector_add(struct drm_connector *connector)
5597 {
5598         struct dentry *root = connector->debugfs_entry;
5599
5600         /* The connector must have been registered beforehands. */
5601         if (!root)
5602                 return -ENODEV;
5603
5604         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5605             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5606                 debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
5607                                     &i915_dpcd_fops);
5608
5609         return 0;
5610 }