drm/i915: Remove intel_context.active_link
[linux-block.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
31
32 #include <drm/drm_debugfs.h>
33 #include <drm/drm_fourcc.h>
34
35 #include "gt/intel_reset.h"
36
37 #include "i915_gem_context.h"
38 #include "intel_dp.h"
39 #include "intel_drv.h"
40 #include "intel_fbc.h"
41 #include "intel_guc_submission.h"
42 #include "intel_hdcp.h"
43 #include "intel_hdmi.h"
44 #include "intel_pm.h"
45 #include "intel_psr.h"
46 #include "intel_sideband.h"
47
48 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
49 {
50         return to_i915(node->minor->dev);
51 }
52
53 static int i915_capabilities(struct seq_file *m, void *data)
54 {
55         struct drm_i915_private *dev_priv = node_to_i915(m->private);
56         const struct intel_device_info *info = INTEL_INFO(dev_priv);
57         struct drm_printer p = drm_seq_file_printer(m);
58
59         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
60         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
61         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
62
63         intel_device_info_dump_flags(info, &p);
64         intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
65         intel_driver_caps_print(&dev_priv->caps, &p);
66
67         kernel_param_lock(THIS_MODULE);
68         i915_params_dump(&i915_modparams, &p);
69         kernel_param_unlock(THIS_MODULE);
70
71         return 0;
72 }
73
74 static char get_active_flag(struct drm_i915_gem_object *obj)
75 {
76         return i915_gem_object_is_active(obj) ? '*' : ' ';
77 }
78
79 static char get_pin_flag(struct drm_i915_gem_object *obj)
80 {
81         return obj->pin_global ? 'p' : ' ';
82 }
83
84 static char get_tiling_flag(struct drm_i915_gem_object *obj)
85 {
86         switch (i915_gem_object_get_tiling(obj)) {
87         default:
88         case I915_TILING_NONE: return ' ';
89         case I915_TILING_X: return 'X';
90         case I915_TILING_Y: return 'Y';
91         }
92 }
93
94 static char get_global_flag(struct drm_i915_gem_object *obj)
95 {
96         return obj->userfault_count ? 'g' : ' ';
97 }
98
99 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
100 {
101         return obj->mm.mapping ? 'M' : ' ';
102 }
103
104 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
105 {
106         u64 size = 0;
107         struct i915_vma *vma;
108
109         for_each_ggtt_vma(vma, obj) {
110                 if (drm_mm_node_allocated(&vma->node))
111                         size += vma->node.size;
112         }
113
114         return size;
115 }
116
117 static const char *
118 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
119 {
120         size_t x = 0;
121
122         switch (page_sizes) {
123         case 0:
124                 return "";
125         case I915_GTT_PAGE_SIZE_4K:
126                 return "4K";
127         case I915_GTT_PAGE_SIZE_64K:
128                 return "64K";
129         case I915_GTT_PAGE_SIZE_2M:
130                 return "2M";
131         default:
132                 if (!buf)
133                         return "M";
134
135                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
136                         x += snprintf(buf + x, len - x, "2M, ");
137                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
138                         x += snprintf(buf + x, len - x, "64K, ");
139                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
140                         x += snprintf(buf + x, len - x, "4K, ");
141                 buf[x-2] = '\0';
142
143                 return buf;
144         }
145 }
146
147 static void
148 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
149 {
150         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
151         struct intel_engine_cs *engine;
152         struct i915_vma *vma;
153         unsigned int frontbuffer_bits;
154         int pin_count = 0;
155
156         lockdep_assert_held(&obj->base.dev->struct_mutex);
157
158         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
159                    &obj->base,
160                    get_active_flag(obj),
161                    get_pin_flag(obj),
162                    get_tiling_flag(obj),
163                    get_global_flag(obj),
164                    get_pin_mapped_flag(obj),
165                    obj->base.size / 1024,
166                    obj->read_domains,
167                    obj->write_domain,
168                    i915_cache_level_str(dev_priv, obj->cache_level),
169                    obj->mm.dirty ? " dirty" : "",
170                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
171         if (obj->base.name)
172                 seq_printf(m, " (name: %d)", obj->base.name);
173         list_for_each_entry(vma, &obj->vma.list, obj_link) {
174                 if (i915_vma_is_pinned(vma))
175                         pin_count++;
176         }
177         seq_printf(m, " (pinned x %d)", pin_count);
178         if (obj->pin_global)
179                 seq_printf(m, " (global)");
180         list_for_each_entry(vma, &obj->vma.list, obj_link) {
181                 if (!drm_mm_node_allocated(&vma->node))
182                         continue;
183
184                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
185                            i915_vma_is_ggtt(vma) ? "g" : "pp",
186                            vma->node.start, vma->node.size,
187                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
188                 if (i915_vma_is_ggtt(vma)) {
189                         switch (vma->ggtt_view.type) {
190                         case I915_GGTT_VIEW_NORMAL:
191                                 seq_puts(m, ", normal");
192                                 break;
193
194                         case I915_GGTT_VIEW_PARTIAL:
195                                 seq_printf(m, ", partial [%08llx+%x]",
196                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
197                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
198                                 break;
199
200                         case I915_GGTT_VIEW_ROTATED:
201                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
202                                            vma->ggtt_view.rotated.plane[0].width,
203                                            vma->ggtt_view.rotated.plane[0].height,
204                                            vma->ggtt_view.rotated.plane[0].stride,
205                                            vma->ggtt_view.rotated.plane[0].offset,
206                                            vma->ggtt_view.rotated.plane[1].width,
207                                            vma->ggtt_view.rotated.plane[1].height,
208                                            vma->ggtt_view.rotated.plane[1].stride,
209                                            vma->ggtt_view.rotated.plane[1].offset);
210                                 break;
211
212                         default:
213                                 MISSING_CASE(vma->ggtt_view.type);
214                                 break;
215                         }
216                 }
217                 if (vma->fence)
218                         seq_printf(m, " , fence: %d%s",
219                                    vma->fence->id,
220                                    i915_active_request_isset(&vma->last_fence) ? "*" : "");
221                 seq_puts(m, ")");
222         }
223         if (obj->stolen)
224                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
225
226         engine = i915_gem_object_last_write_engine(obj);
227         if (engine)
228                 seq_printf(m, " (%s)", engine->name);
229
230         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
231         if (frontbuffer_bits)
232                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
233 }
234
235 static int obj_rank_by_stolen(const void *A, const void *B)
236 {
237         const struct drm_i915_gem_object *a =
238                 *(const struct drm_i915_gem_object **)A;
239         const struct drm_i915_gem_object *b =
240                 *(const struct drm_i915_gem_object **)B;
241
242         if (a->stolen->start < b->stolen->start)
243                 return -1;
244         if (a->stolen->start > b->stolen->start)
245                 return 1;
246         return 0;
247 }
248
249 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
250 {
251         struct drm_i915_private *dev_priv = node_to_i915(m->private);
252         struct drm_device *dev = &dev_priv->drm;
253         struct drm_i915_gem_object **objects;
254         struct drm_i915_gem_object *obj;
255         u64 total_obj_size, total_gtt_size;
256         unsigned long total, count, n;
257         int ret;
258
259         total = READ_ONCE(dev_priv->mm.object_count);
260         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
261         if (!objects)
262                 return -ENOMEM;
263
264         ret = mutex_lock_interruptible(&dev->struct_mutex);
265         if (ret)
266                 goto out;
267
268         total_obj_size = total_gtt_size = count = 0;
269
270         spin_lock(&dev_priv->mm.obj_lock);
271         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
272                 if (count == total)
273                         break;
274
275                 if (obj->stolen == NULL)
276                         continue;
277
278                 objects[count++] = obj;
279                 total_obj_size += obj->base.size;
280                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
281
282         }
283         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
284                 if (count == total)
285                         break;
286
287                 if (obj->stolen == NULL)
288                         continue;
289
290                 objects[count++] = obj;
291                 total_obj_size += obj->base.size;
292         }
293         spin_unlock(&dev_priv->mm.obj_lock);
294
295         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
296
297         seq_puts(m, "Stolen:\n");
298         for (n = 0; n < count; n++) {
299                 seq_puts(m, "   ");
300                 describe_obj(m, objects[n]);
301                 seq_putc(m, '\n');
302         }
303         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
304                    count, total_obj_size, total_gtt_size);
305
306         mutex_unlock(&dev->struct_mutex);
307 out:
308         kvfree(objects);
309         return ret;
310 }
311
312 struct file_stats {
313         struct i915_address_space *vm;
314         unsigned long count;
315         u64 total, unbound;
316         u64 global, shared;
317         u64 active, inactive;
318         u64 closed;
319 };
320
321 static int per_file_stats(int id, void *ptr, void *data)
322 {
323         struct drm_i915_gem_object *obj = ptr;
324         struct file_stats *stats = data;
325         struct i915_vma *vma;
326
327         lockdep_assert_held(&obj->base.dev->struct_mutex);
328
329         stats->count++;
330         stats->total += obj->base.size;
331         if (!obj->bind_count)
332                 stats->unbound += obj->base.size;
333         if (obj->base.name || obj->base.dma_buf)
334                 stats->shared += obj->base.size;
335
336         list_for_each_entry(vma, &obj->vma.list, obj_link) {
337                 if (!drm_mm_node_allocated(&vma->node))
338                         continue;
339
340                 if (i915_vma_is_ggtt(vma)) {
341                         stats->global += vma->node.size;
342                 } else {
343                         if (vma->vm != stats->vm)
344                                 continue;
345                 }
346
347                 if (i915_vma_is_active(vma))
348                         stats->active += vma->node.size;
349                 else
350                         stats->inactive += vma->node.size;
351
352                 if (i915_vma_is_closed(vma))
353                         stats->closed += vma->node.size;
354         }
355
356         return 0;
357 }
358
359 #define print_file_stats(m, name, stats) do { \
360         if (stats.count) \
361                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
362                            name, \
363                            stats.count, \
364                            stats.total, \
365                            stats.active, \
366                            stats.inactive, \
367                            stats.global, \
368                            stats.shared, \
369                            stats.unbound, \
370                            stats.closed); \
371 } while (0)
372
373 static void print_batch_pool_stats(struct seq_file *m,
374                                    struct drm_i915_private *dev_priv)
375 {
376         struct drm_i915_gem_object *obj;
377         struct intel_engine_cs *engine;
378         struct file_stats stats = {};
379         enum intel_engine_id id;
380         int j;
381
382         for_each_engine(engine, dev_priv, id) {
383                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
384                         list_for_each_entry(obj,
385                                             &engine->batch_pool.cache_list[j],
386                                             batch_pool_link)
387                                 per_file_stats(0, obj, &stats);
388                 }
389         }
390
391         print_file_stats(m, "[k]batch pool", stats);
392 }
393
394 static void print_context_stats(struct seq_file *m,
395                                 struct drm_i915_private *i915)
396 {
397         struct file_stats kstats = {};
398         struct i915_gem_context *ctx;
399
400         list_for_each_entry(ctx, &i915->contexts.list, link) {
401                 struct i915_gem_engines_iter it;
402                 struct intel_context *ce;
403
404                 for_each_gem_engine(ce,
405                                     i915_gem_context_lock_engines(ctx), it) {
406                         if (ce->state)
407                                 per_file_stats(0, ce->state->obj, &kstats);
408                         if (ce->ring)
409                                 per_file_stats(0, ce->ring->vma->obj, &kstats);
410                 }
411                 i915_gem_context_unlock_engines(ctx);
412
413                 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
414                         struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
415                         struct drm_file *file = ctx->file_priv->file;
416                         struct task_struct *task;
417                         char name[80];
418
419                         spin_lock(&file->table_lock);
420                         idr_for_each(&file->object_idr, per_file_stats, &stats);
421                         spin_unlock(&file->table_lock);
422
423                         rcu_read_lock();
424                         task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
425                         snprintf(name, sizeof(name), "%s",
426                                  task ? task->comm : "<unknown>");
427                         rcu_read_unlock();
428
429                         print_file_stats(m, name, stats);
430                 }
431         }
432
433         print_file_stats(m, "[k]contexts", kstats);
434 }
435
436 static int i915_gem_object_info(struct seq_file *m, void *data)
437 {
438         struct drm_i915_private *dev_priv = node_to_i915(m->private);
439         struct drm_device *dev = &dev_priv->drm;
440         struct i915_ggtt *ggtt = &dev_priv->ggtt;
441         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
442         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
443         struct drm_i915_gem_object *obj;
444         unsigned int page_sizes = 0;
445         char buf[80];
446         int ret;
447
448         seq_printf(m, "%u objects, %llu bytes\n",
449                    dev_priv->mm.object_count,
450                    dev_priv->mm.object_memory);
451
452         size = count = 0;
453         mapped_size = mapped_count = 0;
454         purgeable_size = purgeable_count = 0;
455         huge_size = huge_count = 0;
456
457         spin_lock(&dev_priv->mm.obj_lock);
458         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
459                 size += obj->base.size;
460                 ++count;
461
462                 if (obj->mm.madv == I915_MADV_DONTNEED) {
463                         purgeable_size += obj->base.size;
464                         ++purgeable_count;
465                 }
466
467                 if (obj->mm.mapping) {
468                         mapped_count++;
469                         mapped_size += obj->base.size;
470                 }
471
472                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
473                         huge_count++;
474                         huge_size += obj->base.size;
475                         page_sizes |= obj->mm.page_sizes.sg;
476                 }
477         }
478         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
479
480         size = count = dpy_size = dpy_count = 0;
481         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
482                 size += obj->base.size;
483                 ++count;
484
485                 if (obj->pin_global) {
486                         dpy_size += obj->base.size;
487                         ++dpy_count;
488                 }
489
490                 if (obj->mm.madv == I915_MADV_DONTNEED) {
491                         purgeable_size += obj->base.size;
492                         ++purgeable_count;
493                 }
494
495                 if (obj->mm.mapping) {
496                         mapped_count++;
497                         mapped_size += obj->base.size;
498                 }
499
500                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
501                         huge_count++;
502                         huge_size += obj->base.size;
503                         page_sizes |= obj->mm.page_sizes.sg;
504                 }
505         }
506         spin_unlock(&dev_priv->mm.obj_lock);
507
508         seq_printf(m, "%u bound objects, %llu bytes\n",
509                    count, size);
510         seq_printf(m, "%u purgeable objects, %llu bytes\n",
511                    purgeable_count, purgeable_size);
512         seq_printf(m, "%u mapped objects, %llu bytes\n",
513                    mapped_count, mapped_size);
514         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
515                    huge_count,
516                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
517                    huge_size);
518         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
519                    dpy_count, dpy_size);
520
521         seq_printf(m, "%llu [%pa] gtt total\n",
522                    ggtt->vm.total, &ggtt->mappable_end);
523         seq_printf(m, "Supported page sizes: %s\n",
524                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
525                                         buf, sizeof(buf)));
526
527         seq_putc(m, '\n');
528
529         ret = mutex_lock_interruptible(&dev->struct_mutex);
530         if (ret)
531                 return ret;
532
533         print_batch_pool_stats(m, dev_priv);
534         print_context_stats(m, dev_priv);
535         mutex_unlock(&dev->struct_mutex);
536
537         return 0;
538 }
539
540 static int i915_gem_gtt_info(struct seq_file *m, void *data)
541 {
542         struct drm_info_node *node = m->private;
543         struct drm_i915_private *dev_priv = node_to_i915(node);
544         struct drm_device *dev = &dev_priv->drm;
545         struct drm_i915_gem_object **objects;
546         struct drm_i915_gem_object *obj;
547         u64 total_obj_size, total_gtt_size;
548         unsigned long nobject, n;
549         int count, ret;
550
551         nobject = READ_ONCE(dev_priv->mm.object_count);
552         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
553         if (!objects)
554                 return -ENOMEM;
555
556         ret = mutex_lock_interruptible(&dev->struct_mutex);
557         if (ret)
558                 return ret;
559
560         count = 0;
561         spin_lock(&dev_priv->mm.obj_lock);
562         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
563                 objects[count++] = obj;
564                 if (count == nobject)
565                         break;
566         }
567         spin_unlock(&dev_priv->mm.obj_lock);
568
569         total_obj_size = total_gtt_size = 0;
570         for (n = 0;  n < count; n++) {
571                 obj = objects[n];
572
573                 seq_puts(m, "   ");
574                 describe_obj(m, obj);
575                 seq_putc(m, '\n');
576                 total_obj_size += obj->base.size;
577                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
578         }
579
580         mutex_unlock(&dev->struct_mutex);
581
582         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
583                    count, total_obj_size, total_gtt_size);
584         kvfree(objects);
585
586         return 0;
587 }
588
589 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
590 {
591         struct drm_i915_private *dev_priv = node_to_i915(m->private);
592         struct drm_device *dev = &dev_priv->drm;
593         struct drm_i915_gem_object *obj;
594         struct intel_engine_cs *engine;
595         enum intel_engine_id id;
596         int total = 0;
597         int ret, j;
598
599         ret = mutex_lock_interruptible(&dev->struct_mutex);
600         if (ret)
601                 return ret;
602
603         for_each_engine(engine, dev_priv, id) {
604                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
605                         int count;
606
607                         count = 0;
608                         list_for_each_entry(obj,
609                                             &engine->batch_pool.cache_list[j],
610                                             batch_pool_link)
611                                 count++;
612                         seq_printf(m, "%s cache[%d]: %d objects\n",
613                                    engine->name, j, count);
614
615                         list_for_each_entry(obj,
616                                             &engine->batch_pool.cache_list[j],
617                                             batch_pool_link) {
618                                 seq_puts(m, "   ");
619                                 describe_obj(m, obj);
620                                 seq_putc(m, '\n');
621                         }
622
623                         total += count;
624                 }
625         }
626
627         seq_printf(m, "total: %d\n", total);
628
629         mutex_unlock(&dev->struct_mutex);
630
631         return 0;
632 }
633
634 static void gen8_display_interrupt_info(struct seq_file *m)
635 {
636         struct drm_i915_private *dev_priv = node_to_i915(m->private);
637         int pipe;
638
639         for_each_pipe(dev_priv, pipe) {
640                 enum intel_display_power_domain power_domain;
641                 intel_wakeref_t wakeref;
642
643                 power_domain = POWER_DOMAIN_PIPE(pipe);
644                 wakeref = intel_display_power_get_if_enabled(dev_priv,
645                                                              power_domain);
646                 if (!wakeref) {
647                         seq_printf(m, "Pipe %c power disabled\n",
648                                    pipe_name(pipe));
649                         continue;
650                 }
651                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
652                            pipe_name(pipe),
653                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
654                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
655                            pipe_name(pipe),
656                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
657                 seq_printf(m, "Pipe %c IER:\t%08x\n",
658                            pipe_name(pipe),
659                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
660
661                 intel_display_power_put(dev_priv, power_domain, wakeref);
662         }
663
664         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
665                    I915_READ(GEN8_DE_PORT_IMR));
666         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
667                    I915_READ(GEN8_DE_PORT_IIR));
668         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
669                    I915_READ(GEN8_DE_PORT_IER));
670
671         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
672                    I915_READ(GEN8_DE_MISC_IMR));
673         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
674                    I915_READ(GEN8_DE_MISC_IIR));
675         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
676                    I915_READ(GEN8_DE_MISC_IER));
677
678         seq_printf(m, "PCU interrupt mask:\t%08x\n",
679                    I915_READ(GEN8_PCU_IMR));
680         seq_printf(m, "PCU interrupt identity:\t%08x\n",
681                    I915_READ(GEN8_PCU_IIR));
682         seq_printf(m, "PCU interrupt enable:\t%08x\n",
683                    I915_READ(GEN8_PCU_IER));
684 }
685
686 static int i915_interrupt_info(struct seq_file *m, void *data)
687 {
688         struct drm_i915_private *dev_priv = node_to_i915(m->private);
689         struct intel_engine_cs *engine;
690         enum intel_engine_id id;
691         intel_wakeref_t wakeref;
692         int i, pipe;
693
694         wakeref = intel_runtime_pm_get(dev_priv);
695
696         if (IS_CHERRYVIEW(dev_priv)) {
697                 intel_wakeref_t pref;
698
699                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
700                            I915_READ(GEN8_MASTER_IRQ));
701
702                 seq_printf(m, "Display IER:\t%08x\n",
703                            I915_READ(VLV_IER));
704                 seq_printf(m, "Display IIR:\t%08x\n",
705                            I915_READ(VLV_IIR));
706                 seq_printf(m, "Display IIR_RW:\t%08x\n",
707                            I915_READ(VLV_IIR_RW));
708                 seq_printf(m, "Display IMR:\t%08x\n",
709                            I915_READ(VLV_IMR));
710                 for_each_pipe(dev_priv, pipe) {
711                         enum intel_display_power_domain power_domain;
712
713                         power_domain = POWER_DOMAIN_PIPE(pipe);
714                         pref = intel_display_power_get_if_enabled(dev_priv,
715                                                                   power_domain);
716                         if (!pref) {
717                                 seq_printf(m, "Pipe %c power disabled\n",
718                                            pipe_name(pipe));
719                                 continue;
720                         }
721
722                         seq_printf(m, "Pipe %c stat:\t%08x\n",
723                                    pipe_name(pipe),
724                                    I915_READ(PIPESTAT(pipe)));
725
726                         intel_display_power_put(dev_priv, power_domain, pref);
727                 }
728
729                 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
730                 seq_printf(m, "Port hotplug:\t%08x\n",
731                            I915_READ(PORT_HOTPLUG_EN));
732                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
733                            I915_READ(VLV_DPFLIPSTAT));
734                 seq_printf(m, "DPINVGTT:\t%08x\n",
735                            I915_READ(DPINVGTT));
736                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
737
738                 for (i = 0; i < 4; i++) {
739                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
740                                    i, I915_READ(GEN8_GT_IMR(i)));
741                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
742                                    i, I915_READ(GEN8_GT_IIR(i)));
743                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
744                                    i, I915_READ(GEN8_GT_IER(i)));
745                 }
746
747                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
748                            I915_READ(GEN8_PCU_IMR));
749                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
750                            I915_READ(GEN8_PCU_IIR));
751                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
752                            I915_READ(GEN8_PCU_IER));
753         } else if (INTEL_GEN(dev_priv) >= 11) {
754                 seq_printf(m, "Master Interrupt Control:  %08x\n",
755                            I915_READ(GEN11_GFX_MSTR_IRQ));
756
757                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
758                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
759                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
760                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
761                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
762                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
763                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
764                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
765                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
766                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
767                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
768                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
769
770                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
771                            I915_READ(GEN11_DISPLAY_INT_CTL));
772
773                 gen8_display_interrupt_info(m);
774         } else if (INTEL_GEN(dev_priv) >= 8) {
775                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
776                            I915_READ(GEN8_MASTER_IRQ));
777
778                 for (i = 0; i < 4; i++) {
779                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
780                                    i, I915_READ(GEN8_GT_IMR(i)));
781                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
782                                    i, I915_READ(GEN8_GT_IIR(i)));
783                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
784                                    i, I915_READ(GEN8_GT_IER(i)));
785                 }
786
787                 gen8_display_interrupt_info(m);
788         } else if (IS_VALLEYVIEW(dev_priv)) {
789                 seq_printf(m, "Display IER:\t%08x\n",
790                            I915_READ(VLV_IER));
791                 seq_printf(m, "Display IIR:\t%08x\n",
792                            I915_READ(VLV_IIR));
793                 seq_printf(m, "Display IIR_RW:\t%08x\n",
794                            I915_READ(VLV_IIR_RW));
795                 seq_printf(m, "Display IMR:\t%08x\n",
796                            I915_READ(VLV_IMR));
797                 for_each_pipe(dev_priv, pipe) {
798                         enum intel_display_power_domain power_domain;
799                         intel_wakeref_t pref;
800
801                         power_domain = POWER_DOMAIN_PIPE(pipe);
802                         pref = intel_display_power_get_if_enabled(dev_priv,
803                                                                   power_domain);
804                         if (!pref) {
805                                 seq_printf(m, "Pipe %c power disabled\n",
806                                            pipe_name(pipe));
807                                 continue;
808                         }
809
810                         seq_printf(m, "Pipe %c stat:\t%08x\n",
811                                    pipe_name(pipe),
812                                    I915_READ(PIPESTAT(pipe)));
813                         intel_display_power_put(dev_priv, power_domain, pref);
814                 }
815
816                 seq_printf(m, "Master IER:\t%08x\n",
817                            I915_READ(VLV_MASTER_IER));
818
819                 seq_printf(m, "Render IER:\t%08x\n",
820                            I915_READ(GTIER));
821                 seq_printf(m, "Render IIR:\t%08x\n",
822                            I915_READ(GTIIR));
823                 seq_printf(m, "Render IMR:\t%08x\n",
824                            I915_READ(GTIMR));
825
826                 seq_printf(m, "PM IER:\t\t%08x\n",
827                            I915_READ(GEN6_PMIER));
828                 seq_printf(m, "PM IIR:\t\t%08x\n",
829                            I915_READ(GEN6_PMIIR));
830                 seq_printf(m, "PM IMR:\t\t%08x\n",
831                            I915_READ(GEN6_PMIMR));
832
833                 seq_printf(m, "Port hotplug:\t%08x\n",
834                            I915_READ(PORT_HOTPLUG_EN));
835                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
836                            I915_READ(VLV_DPFLIPSTAT));
837                 seq_printf(m, "DPINVGTT:\t%08x\n",
838                            I915_READ(DPINVGTT));
839
840         } else if (!HAS_PCH_SPLIT(dev_priv)) {
841                 seq_printf(m, "Interrupt enable:    %08x\n",
842                            I915_READ(GEN2_IER));
843                 seq_printf(m, "Interrupt identity:  %08x\n",
844                            I915_READ(GEN2_IIR));
845                 seq_printf(m, "Interrupt mask:      %08x\n",
846                            I915_READ(GEN2_IMR));
847                 for_each_pipe(dev_priv, pipe)
848                         seq_printf(m, "Pipe %c stat:         %08x\n",
849                                    pipe_name(pipe),
850                                    I915_READ(PIPESTAT(pipe)));
851         } else {
852                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
853                            I915_READ(DEIER));
854                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
855                            I915_READ(DEIIR));
856                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
857                            I915_READ(DEIMR));
858                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
859                            I915_READ(SDEIER));
860                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
861                            I915_READ(SDEIIR));
862                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
863                            I915_READ(SDEIMR));
864                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
865                            I915_READ(GTIER));
866                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
867                            I915_READ(GTIIR));
868                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
869                            I915_READ(GTIMR));
870         }
871
872         if (INTEL_GEN(dev_priv) >= 11) {
873                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
874                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
875                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
876                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
877                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
878                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
879                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
880                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
881                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
882                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
883                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
884                            I915_READ(GEN11_GUC_SG_INTR_MASK));
885                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
886                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
887                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
888                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
889                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
890                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
891
892         } else if (INTEL_GEN(dev_priv) >= 6) {
893                 for_each_engine(engine, dev_priv, id) {
894                         seq_printf(m,
895                                    "Graphics Interrupt mask (%s):       %08x\n",
896                                    engine->name, ENGINE_READ(engine, RING_IMR));
897                 }
898         }
899
900         intel_runtime_pm_put(dev_priv, wakeref);
901
902         return 0;
903 }
904
905 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
906 {
907         struct drm_i915_private *dev_priv = node_to_i915(m->private);
908         struct drm_device *dev = &dev_priv->drm;
909         int i, ret;
910
911         ret = mutex_lock_interruptible(&dev->struct_mutex);
912         if (ret)
913                 return ret;
914
915         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
916         for (i = 0; i < dev_priv->num_fence_regs; i++) {
917                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
918
919                 seq_printf(m, "Fence %d, pin count = %d, object = ",
920                            i, dev_priv->fence_regs[i].pin_count);
921                 if (!vma)
922                         seq_puts(m, "unused");
923                 else
924                         describe_obj(m, vma->obj);
925                 seq_putc(m, '\n');
926         }
927
928         mutex_unlock(&dev->struct_mutex);
929         return 0;
930 }
931
932 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
933 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
934                               size_t count, loff_t *pos)
935 {
936         struct i915_gpu_state *error;
937         ssize_t ret;
938         void *buf;
939
940         error = file->private_data;
941         if (!error)
942                 return 0;
943
944         /* Bounce buffer required because of kernfs __user API convenience. */
945         buf = kmalloc(count, GFP_KERNEL);
946         if (!buf)
947                 return -ENOMEM;
948
949         ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
950         if (ret <= 0)
951                 goto out;
952
953         if (!copy_to_user(ubuf, buf, ret))
954                 *pos += ret;
955         else
956                 ret = -EFAULT;
957
958 out:
959         kfree(buf);
960         return ret;
961 }
962
963 static int gpu_state_release(struct inode *inode, struct file *file)
964 {
965         i915_gpu_state_put(file->private_data);
966         return 0;
967 }
968
969 static int i915_gpu_info_open(struct inode *inode, struct file *file)
970 {
971         struct drm_i915_private *i915 = inode->i_private;
972         struct i915_gpu_state *gpu;
973         intel_wakeref_t wakeref;
974
975         gpu = NULL;
976         with_intel_runtime_pm(i915, wakeref)
977                 gpu = i915_capture_gpu_state(i915);
978         if (IS_ERR(gpu))
979                 return PTR_ERR(gpu);
980
981         file->private_data = gpu;
982         return 0;
983 }
984
985 static const struct file_operations i915_gpu_info_fops = {
986         .owner = THIS_MODULE,
987         .open = i915_gpu_info_open,
988         .read = gpu_state_read,
989         .llseek = default_llseek,
990         .release = gpu_state_release,
991 };
992
993 static ssize_t
994 i915_error_state_write(struct file *filp,
995                        const char __user *ubuf,
996                        size_t cnt,
997                        loff_t *ppos)
998 {
999         struct i915_gpu_state *error = filp->private_data;
1000
1001         if (!error)
1002                 return 0;
1003
1004         DRM_DEBUG_DRIVER("Resetting error state\n");
1005         i915_reset_error_state(error->i915);
1006
1007         return cnt;
1008 }
1009
1010 static int i915_error_state_open(struct inode *inode, struct file *file)
1011 {
1012         struct i915_gpu_state *error;
1013
1014         error = i915_first_error_state(inode->i_private);
1015         if (IS_ERR(error))
1016                 return PTR_ERR(error);
1017
1018         file->private_data  = error;
1019         return 0;
1020 }
1021
1022 static const struct file_operations i915_error_state_fops = {
1023         .owner = THIS_MODULE,
1024         .open = i915_error_state_open,
1025         .read = gpu_state_read,
1026         .write = i915_error_state_write,
1027         .llseek = default_llseek,
1028         .release = gpu_state_release,
1029 };
1030 #endif
1031
1032 static int i915_frequency_info(struct seq_file *m, void *unused)
1033 {
1034         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1035         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1036         intel_wakeref_t wakeref;
1037         int ret = 0;
1038
1039         wakeref = intel_runtime_pm_get(dev_priv);
1040
1041         if (IS_GEN(dev_priv, 5)) {
1042                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1043                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1044
1045                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1046                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1047                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1048                            MEMSTAT_VID_SHIFT);
1049                 seq_printf(m, "Current P-state: %d\n",
1050                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1051         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1052                 u32 rpmodectl, freq_sts;
1053
1054                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1055                 seq_printf(m, "Video Turbo Mode: %s\n",
1056                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1057                 seq_printf(m, "HW control enabled: %s\n",
1058                            yesno(rpmodectl & GEN6_RP_ENABLE));
1059                 seq_printf(m, "SW control enabled: %s\n",
1060                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1061                                   GEN6_RP_MEDIA_SW_MODE));
1062
1063                 vlv_punit_get(dev_priv);
1064                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1065                 vlv_punit_put(dev_priv);
1066
1067                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1068                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1069
1070                 seq_printf(m, "actual GPU freq: %d MHz\n",
1071                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1072
1073                 seq_printf(m, "current GPU freq: %d MHz\n",
1074                            intel_gpu_freq(dev_priv, rps->cur_freq));
1075
1076                 seq_printf(m, "max GPU freq: %d MHz\n",
1077                            intel_gpu_freq(dev_priv, rps->max_freq));
1078
1079                 seq_printf(m, "min GPU freq: %d MHz\n",
1080                            intel_gpu_freq(dev_priv, rps->min_freq));
1081
1082                 seq_printf(m, "idle GPU freq: %d MHz\n",
1083                            intel_gpu_freq(dev_priv, rps->idle_freq));
1084
1085                 seq_printf(m,
1086                            "efficient (RPe) frequency: %d MHz\n",
1087                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1088         } else if (INTEL_GEN(dev_priv) >= 6) {
1089                 u32 rp_state_limits;
1090                 u32 gt_perf_status;
1091                 u32 rp_state_cap;
1092                 u32 rpmodectl, rpinclimit, rpdeclimit;
1093                 u32 rpstat, cagf, reqf;
1094                 u32 rpupei, rpcurup, rpprevup;
1095                 u32 rpdownei, rpcurdown, rpprevdown;
1096                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1097                 int max_freq;
1098
1099                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1100                 if (IS_GEN9_LP(dev_priv)) {
1101                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1102                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1103                 } else {
1104                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1105                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1106                 }
1107
1108                 /* RPSTAT1 is in the GT power well */
1109                 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1110
1111                 reqf = I915_READ(GEN6_RPNSWREQ);
1112                 if (INTEL_GEN(dev_priv) >= 9)
1113                         reqf >>= 23;
1114                 else {
1115                         reqf &= ~GEN6_TURBO_DISABLE;
1116                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1117                                 reqf >>= 24;
1118                         else
1119                                 reqf >>= 25;
1120                 }
1121                 reqf = intel_gpu_freq(dev_priv, reqf);
1122
1123                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1124                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1125                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1126
1127                 rpstat = I915_READ(GEN6_RPSTAT1);
1128                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1129                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1130                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1131                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1132                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1133                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1134                 cagf = intel_gpu_freq(dev_priv,
1135                                       intel_get_cagf(dev_priv, rpstat));
1136
1137                 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1138
1139                 if (INTEL_GEN(dev_priv) >= 11) {
1140                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1141                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1142                         /*
1143                          * The equivalent to the PM ISR & IIR cannot be read
1144                          * without affecting the current state of the system
1145                          */
1146                         pm_isr = 0;
1147                         pm_iir = 0;
1148                 } else if (INTEL_GEN(dev_priv) >= 8) {
1149                         pm_ier = I915_READ(GEN8_GT_IER(2));
1150                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1151                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1152                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1153                 } else {
1154                         pm_ier = I915_READ(GEN6_PMIER);
1155                         pm_imr = I915_READ(GEN6_PMIMR);
1156                         pm_isr = I915_READ(GEN6_PMISR);
1157                         pm_iir = I915_READ(GEN6_PMIIR);
1158                 }
1159                 pm_mask = I915_READ(GEN6_PMINTRMSK);
1160
1161                 seq_printf(m, "Video Turbo Mode: %s\n",
1162                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1163                 seq_printf(m, "HW control enabled: %s\n",
1164                            yesno(rpmodectl & GEN6_RP_ENABLE));
1165                 seq_printf(m, "SW control enabled: %s\n",
1166                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1167                                   GEN6_RP_MEDIA_SW_MODE));
1168
1169                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1170                            pm_ier, pm_imr, pm_mask);
1171                 if (INTEL_GEN(dev_priv) <= 10)
1172                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1173                                    pm_isr, pm_iir);
1174                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1175                            rps->pm_intrmsk_mbz);
1176                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1177                 seq_printf(m, "Render p-state ratio: %d\n",
1178                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1179                 seq_printf(m, "Render p-state VID: %d\n",
1180                            gt_perf_status & 0xff);
1181                 seq_printf(m, "Render p-state limit: %d\n",
1182                            rp_state_limits & 0xff);
1183                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1184                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1185                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1186                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1187                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1188                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1189                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1190                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1191                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1192                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1193                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1194                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1195                 seq_printf(m, "Up threshold: %d%%\n",
1196                            rps->power.up_threshold);
1197
1198                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1199                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1200                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1201                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1202                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1203                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1204                 seq_printf(m, "Down threshold: %d%%\n",
1205                            rps->power.down_threshold);
1206
1207                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1208                             rp_state_cap >> 16) & 0xff;
1209                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1210                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1211                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1212                            intel_gpu_freq(dev_priv, max_freq));
1213
1214                 max_freq = (rp_state_cap & 0xff00) >> 8;
1215                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1216                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1217                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1218                            intel_gpu_freq(dev_priv, max_freq));
1219
1220                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1221                             rp_state_cap >> 0) & 0xff;
1222                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1223                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1224                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1225                            intel_gpu_freq(dev_priv, max_freq));
1226                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1227                            intel_gpu_freq(dev_priv, rps->max_freq));
1228
1229                 seq_printf(m, "Current freq: %d MHz\n",
1230                            intel_gpu_freq(dev_priv, rps->cur_freq));
1231                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1232                 seq_printf(m, "Idle freq: %d MHz\n",
1233                            intel_gpu_freq(dev_priv, rps->idle_freq));
1234                 seq_printf(m, "Min freq: %d MHz\n",
1235                            intel_gpu_freq(dev_priv, rps->min_freq));
1236                 seq_printf(m, "Boost freq: %d MHz\n",
1237                            intel_gpu_freq(dev_priv, rps->boost_freq));
1238                 seq_printf(m, "Max freq: %d MHz\n",
1239                            intel_gpu_freq(dev_priv, rps->max_freq));
1240                 seq_printf(m,
1241                            "efficient (RPe) frequency: %d MHz\n",
1242                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1243         } else {
1244                 seq_puts(m, "no P-state info available\n");
1245         }
1246
1247         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1248         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1249         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1250
1251         intel_runtime_pm_put(dev_priv, wakeref);
1252         return ret;
1253 }
1254
1255 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1256                                struct seq_file *m,
1257                                struct intel_instdone *instdone)
1258 {
1259         int slice;
1260         int subslice;
1261
1262         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1263                    instdone->instdone);
1264
1265         if (INTEL_GEN(dev_priv) <= 3)
1266                 return;
1267
1268         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1269                    instdone->slice_common);
1270
1271         if (INTEL_GEN(dev_priv) <= 6)
1272                 return;
1273
1274         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1275                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1276                            slice, subslice, instdone->sampler[slice][subslice]);
1277
1278         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1279                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1280                            slice, subslice, instdone->row[slice][subslice]);
1281 }
1282
1283 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1284 {
1285         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1286         struct intel_engine_cs *engine;
1287         u64 acthd[I915_NUM_ENGINES];
1288         u32 seqno[I915_NUM_ENGINES];
1289         struct intel_instdone instdone;
1290         intel_wakeref_t wakeref;
1291         enum intel_engine_id id;
1292
1293         seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
1294         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1295                 seq_puts(m, "\tWedged\n");
1296         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1297                 seq_puts(m, "\tDevice (global) reset in progress\n");
1298
1299         if (!i915_modparams.enable_hangcheck) {
1300                 seq_puts(m, "Hangcheck disabled\n");
1301                 return 0;
1302         }
1303
1304         with_intel_runtime_pm(dev_priv, wakeref) {
1305                 for_each_engine(engine, dev_priv, id) {
1306                         acthd[id] = intel_engine_get_active_head(engine);
1307                         seqno[id] = intel_engine_get_hangcheck_seqno(engine);
1308                 }
1309
1310                 intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
1311         }
1312
1313         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1314                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1315                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1316                                             jiffies));
1317         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1318                 seq_puts(m, "Hangcheck active, work pending\n");
1319         else
1320                 seq_puts(m, "Hangcheck inactive\n");
1321
1322         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1323
1324         for_each_engine(engine, dev_priv, id) {
1325                 seq_printf(m, "%s:\n", engine->name);
1326                 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
1327                            engine->hangcheck.last_seqno,
1328                            seqno[id],
1329                            engine->hangcheck.next_seqno,
1330                            jiffies_to_msecs(jiffies -
1331                                             engine->hangcheck.action_timestamp));
1332
1333                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1334                            (long long)engine->hangcheck.acthd,
1335                            (long long)acthd[id]);
1336
1337                 if (engine->id == RCS0) {
1338                         seq_puts(m, "\tinstdone read =\n");
1339
1340                         i915_instdone_info(dev_priv, m, &instdone);
1341
1342                         seq_puts(m, "\tinstdone accu =\n");
1343
1344                         i915_instdone_info(dev_priv, m,
1345                                            &engine->hangcheck.instdone);
1346                 }
1347         }
1348
1349         return 0;
1350 }
1351
1352 static int i915_reset_info(struct seq_file *m, void *unused)
1353 {
1354         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1355         struct i915_gpu_error *error = &dev_priv->gpu_error;
1356         struct intel_engine_cs *engine;
1357         enum intel_engine_id id;
1358
1359         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1360
1361         for_each_engine(engine, dev_priv, id) {
1362                 seq_printf(m, "%s = %u\n", engine->name,
1363                            i915_reset_engine_count(error, engine));
1364         }
1365
1366         return 0;
1367 }
1368
1369 static int ironlake_drpc_info(struct seq_file *m)
1370 {
1371         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1372         u32 rgvmodectl, rstdbyctl;
1373         u16 crstandvid;
1374
1375         rgvmodectl = I915_READ(MEMMODECTL);
1376         rstdbyctl = I915_READ(RSTDBYCTL);
1377         crstandvid = I915_READ16(CRSTANDVID);
1378
1379         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1380         seq_printf(m, "Boost freq: %d\n",
1381                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1382                    MEMMODE_BOOST_FREQ_SHIFT);
1383         seq_printf(m, "HW control enabled: %s\n",
1384                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1385         seq_printf(m, "SW control enabled: %s\n",
1386                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1387         seq_printf(m, "Gated voltage change: %s\n",
1388                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1389         seq_printf(m, "Starting frequency: P%d\n",
1390                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1391         seq_printf(m, "Max P-state: P%d\n",
1392                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1393         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1394         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1395         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1396         seq_printf(m, "Render standby enabled: %s\n",
1397                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1398         seq_puts(m, "Current RS state: ");
1399         switch (rstdbyctl & RSX_STATUS_MASK) {
1400         case RSX_STATUS_ON:
1401                 seq_puts(m, "on\n");
1402                 break;
1403         case RSX_STATUS_RC1:
1404                 seq_puts(m, "RC1\n");
1405                 break;
1406         case RSX_STATUS_RC1E:
1407                 seq_puts(m, "RC1E\n");
1408                 break;
1409         case RSX_STATUS_RS1:
1410                 seq_puts(m, "RS1\n");
1411                 break;
1412         case RSX_STATUS_RS2:
1413                 seq_puts(m, "RS2 (RC6)\n");
1414                 break;
1415         case RSX_STATUS_RS3:
1416                 seq_puts(m, "RC3 (RC6+)\n");
1417                 break;
1418         default:
1419                 seq_puts(m, "unknown\n");
1420                 break;
1421         }
1422
1423         return 0;
1424 }
1425
1426 static int i915_forcewake_domains(struct seq_file *m, void *data)
1427 {
1428         struct drm_i915_private *i915 = node_to_i915(m->private);
1429         struct intel_uncore *uncore = &i915->uncore;
1430         struct intel_uncore_forcewake_domain *fw_domain;
1431         unsigned int tmp;
1432
1433         seq_printf(m, "user.bypass_count = %u\n",
1434                    uncore->user_forcewake.count);
1435
1436         for_each_fw_domain(fw_domain, uncore, tmp)
1437                 seq_printf(m, "%s.wake_count = %u\n",
1438                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1439                            READ_ONCE(fw_domain->wake_count));
1440
1441         return 0;
1442 }
1443
1444 static void print_rc6_res(struct seq_file *m,
1445                           const char *title,
1446                           const i915_reg_t reg)
1447 {
1448         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1449
1450         seq_printf(m, "%s %u (%llu us)\n",
1451                    title, I915_READ(reg),
1452                    intel_rc6_residency_us(dev_priv, reg));
1453 }
1454
1455 static int vlv_drpc_info(struct seq_file *m)
1456 {
1457         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1458         u32 rcctl1, pw_status;
1459
1460         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1461         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1462
1463         seq_printf(m, "RC6 Enabled: %s\n",
1464                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1465                                         GEN6_RC_CTL_EI_MODE(1))));
1466         seq_printf(m, "Render Power Well: %s\n",
1467                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1468         seq_printf(m, "Media Power Well: %s\n",
1469                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1470
1471         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1472         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1473
1474         return i915_forcewake_domains(m, NULL);
1475 }
1476
1477 static int gen6_drpc_info(struct seq_file *m)
1478 {
1479         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1480         u32 gt_core_status, rcctl1, rc6vids = 0;
1481         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1482
1483         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1484         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1485
1486         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1487         if (INTEL_GEN(dev_priv) >= 9) {
1488                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1489                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1490         }
1491
1492         if (INTEL_GEN(dev_priv) <= 7)
1493                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1494                                        &rc6vids);
1495
1496         seq_printf(m, "RC1e Enabled: %s\n",
1497                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1498         seq_printf(m, "RC6 Enabled: %s\n",
1499                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1500         if (INTEL_GEN(dev_priv) >= 9) {
1501                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1502                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1503                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1504                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1505         }
1506         seq_printf(m, "Deep RC6 Enabled: %s\n",
1507                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1508         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1509                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1510         seq_puts(m, "Current RC state: ");
1511         switch (gt_core_status & GEN6_RCn_MASK) {
1512         case GEN6_RC0:
1513                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1514                         seq_puts(m, "Core Power Down\n");
1515                 else
1516                         seq_puts(m, "on\n");
1517                 break;
1518         case GEN6_RC3:
1519                 seq_puts(m, "RC3\n");
1520                 break;
1521         case GEN6_RC6:
1522                 seq_puts(m, "RC6\n");
1523                 break;
1524         case GEN6_RC7:
1525                 seq_puts(m, "RC7\n");
1526                 break;
1527         default:
1528                 seq_puts(m, "Unknown\n");
1529                 break;
1530         }
1531
1532         seq_printf(m, "Core Power Down: %s\n",
1533                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1534         if (INTEL_GEN(dev_priv) >= 9) {
1535                 seq_printf(m, "Render Power Well: %s\n",
1536                         (gen9_powergate_status &
1537                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1538                 seq_printf(m, "Media Power Well: %s\n",
1539                         (gen9_powergate_status &
1540                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1541         }
1542
1543         /* Not exactly sure what this is */
1544         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1545                       GEN6_GT_GFX_RC6_LOCKED);
1546         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1547         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1548         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1549
1550         if (INTEL_GEN(dev_priv) <= 7) {
1551                 seq_printf(m, "RC6   voltage: %dmV\n",
1552                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1553                 seq_printf(m, "RC6+  voltage: %dmV\n",
1554                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1555                 seq_printf(m, "RC6++ voltage: %dmV\n",
1556                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1557         }
1558
1559         return i915_forcewake_domains(m, NULL);
1560 }
1561
1562 static int i915_drpc_info(struct seq_file *m, void *unused)
1563 {
1564         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1565         intel_wakeref_t wakeref;
1566         int err = -ENODEV;
1567
1568         with_intel_runtime_pm(dev_priv, wakeref) {
1569                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1570                         err = vlv_drpc_info(m);
1571                 else if (INTEL_GEN(dev_priv) >= 6)
1572                         err = gen6_drpc_info(m);
1573                 else
1574                         err = ironlake_drpc_info(m);
1575         }
1576
1577         return err;
1578 }
1579
1580 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1581 {
1582         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1583
1584         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1585                    dev_priv->fb_tracking.busy_bits);
1586
1587         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1588                    dev_priv->fb_tracking.flip_bits);
1589
1590         return 0;
1591 }
1592
1593 static int i915_fbc_status(struct seq_file *m, void *unused)
1594 {
1595         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1596         struct intel_fbc *fbc = &dev_priv->fbc;
1597         intel_wakeref_t wakeref;
1598
1599         if (!HAS_FBC(dev_priv))
1600                 return -ENODEV;
1601
1602         wakeref = intel_runtime_pm_get(dev_priv);
1603         mutex_lock(&fbc->lock);
1604
1605         if (intel_fbc_is_active(dev_priv))
1606                 seq_puts(m, "FBC enabled\n");
1607         else
1608                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1609
1610         if (intel_fbc_is_active(dev_priv)) {
1611                 u32 mask;
1612
1613                 if (INTEL_GEN(dev_priv) >= 8)
1614                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1615                 else if (INTEL_GEN(dev_priv) >= 7)
1616                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1617                 else if (INTEL_GEN(dev_priv) >= 5)
1618                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1619                 else if (IS_G4X(dev_priv))
1620                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1621                 else
1622                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1623                                                         FBC_STAT_COMPRESSED);
1624
1625                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1626         }
1627
1628         mutex_unlock(&fbc->lock);
1629         intel_runtime_pm_put(dev_priv, wakeref);
1630
1631         return 0;
1632 }
1633
1634 static int i915_fbc_false_color_get(void *data, u64 *val)
1635 {
1636         struct drm_i915_private *dev_priv = data;
1637
1638         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1639                 return -ENODEV;
1640
1641         *val = dev_priv->fbc.false_color;
1642
1643         return 0;
1644 }
1645
1646 static int i915_fbc_false_color_set(void *data, u64 val)
1647 {
1648         struct drm_i915_private *dev_priv = data;
1649         u32 reg;
1650
1651         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1652                 return -ENODEV;
1653
1654         mutex_lock(&dev_priv->fbc.lock);
1655
1656         reg = I915_READ(ILK_DPFC_CONTROL);
1657         dev_priv->fbc.false_color = val;
1658
1659         I915_WRITE(ILK_DPFC_CONTROL, val ?
1660                    (reg | FBC_CTL_FALSE_COLOR) :
1661                    (reg & ~FBC_CTL_FALSE_COLOR));
1662
1663         mutex_unlock(&dev_priv->fbc.lock);
1664         return 0;
1665 }
1666
1667 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1668                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1669                         "%llu\n");
1670
1671 static int i915_ips_status(struct seq_file *m, void *unused)
1672 {
1673         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1674         intel_wakeref_t wakeref;
1675
1676         if (!HAS_IPS(dev_priv))
1677                 return -ENODEV;
1678
1679         wakeref = intel_runtime_pm_get(dev_priv);
1680
1681         seq_printf(m, "Enabled by kernel parameter: %s\n",
1682                    yesno(i915_modparams.enable_ips));
1683
1684         if (INTEL_GEN(dev_priv) >= 8) {
1685                 seq_puts(m, "Currently: unknown\n");
1686         } else {
1687                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1688                         seq_puts(m, "Currently: enabled\n");
1689                 else
1690                         seq_puts(m, "Currently: disabled\n");
1691         }
1692
1693         intel_runtime_pm_put(dev_priv, wakeref);
1694
1695         return 0;
1696 }
1697
1698 static int i915_sr_status(struct seq_file *m, void *unused)
1699 {
1700         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1701         intel_wakeref_t wakeref;
1702         bool sr_enabled = false;
1703
1704         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1705
1706         if (INTEL_GEN(dev_priv) >= 9)
1707                 /* no global SR status; inspect per-plane WM */;
1708         else if (HAS_PCH_SPLIT(dev_priv))
1709                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1710         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1711                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1712                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1713         else if (IS_I915GM(dev_priv))
1714                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1715         else if (IS_PINEVIEW(dev_priv))
1716                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1717         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1718                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1719
1720         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1721
1722         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1723
1724         return 0;
1725 }
1726
1727 static int i915_emon_status(struct seq_file *m, void *unused)
1728 {
1729         struct drm_i915_private *i915 = node_to_i915(m->private);
1730         intel_wakeref_t wakeref;
1731
1732         if (!IS_GEN(i915, 5))
1733                 return -ENODEV;
1734
1735         with_intel_runtime_pm(i915, wakeref) {
1736                 unsigned long temp, chipset, gfx;
1737
1738                 temp = i915_mch_val(i915);
1739                 chipset = i915_chipset_val(i915);
1740                 gfx = i915_gfx_val(i915);
1741
1742                 seq_printf(m, "GMCH temp: %ld\n", temp);
1743                 seq_printf(m, "Chipset power: %ld\n", chipset);
1744                 seq_printf(m, "GFX power: %ld\n", gfx);
1745                 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1746         }
1747
1748         return 0;
1749 }
1750
1751 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1752 {
1753         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1754         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1755         unsigned int max_gpu_freq, min_gpu_freq;
1756         intel_wakeref_t wakeref;
1757         int gpu_freq, ia_freq;
1758
1759         if (!HAS_LLC(dev_priv))
1760                 return -ENODEV;
1761
1762         min_gpu_freq = rps->min_freq;
1763         max_gpu_freq = rps->max_freq;
1764         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1765                 /* Convert GT frequency to 50 HZ units */
1766                 min_gpu_freq /= GEN9_FREQ_SCALER;
1767                 max_gpu_freq /= GEN9_FREQ_SCALER;
1768         }
1769
1770         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1771
1772         wakeref = intel_runtime_pm_get(dev_priv);
1773         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1774                 ia_freq = gpu_freq;
1775                 sandybridge_pcode_read(dev_priv,
1776                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1777                                        &ia_freq);
1778                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1779                            intel_gpu_freq(dev_priv, (gpu_freq *
1780                                                      (IS_GEN9_BC(dev_priv) ||
1781                                                       INTEL_GEN(dev_priv) >= 10 ?
1782                                                       GEN9_FREQ_SCALER : 1))),
1783                            ((ia_freq >> 0) & 0xff) * 100,
1784                            ((ia_freq >> 8) & 0xff) * 100);
1785         }
1786         intel_runtime_pm_put(dev_priv, wakeref);
1787
1788         return 0;
1789 }
1790
1791 static int i915_opregion(struct seq_file *m, void *unused)
1792 {
1793         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1794         struct drm_device *dev = &dev_priv->drm;
1795         struct intel_opregion *opregion = &dev_priv->opregion;
1796         int ret;
1797
1798         ret = mutex_lock_interruptible(&dev->struct_mutex);
1799         if (ret)
1800                 goto out;
1801
1802         if (opregion->header)
1803                 seq_write(m, opregion->header, OPREGION_SIZE);
1804
1805         mutex_unlock(&dev->struct_mutex);
1806
1807 out:
1808         return 0;
1809 }
1810
1811 static int i915_vbt(struct seq_file *m, void *unused)
1812 {
1813         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1814
1815         if (opregion->vbt)
1816                 seq_write(m, opregion->vbt, opregion->vbt_size);
1817
1818         return 0;
1819 }
1820
1821 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1822 {
1823         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1824         struct drm_device *dev = &dev_priv->drm;
1825         struct intel_framebuffer *fbdev_fb = NULL;
1826         struct drm_framebuffer *drm_fb;
1827         int ret;
1828
1829         ret = mutex_lock_interruptible(&dev->struct_mutex);
1830         if (ret)
1831                 return ret;
1832
1833 #ifdef CONFIG_DRM_FBDEV_EMULATION
1834         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1835                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1836
1837                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1838                            fbdev_fb->base.width,
1839                            fbdev_fb->base.height,
1840                            fbdev_fb->base.format->depth,
1841                            fbdev_fb->base.format->cpp[0] * 8,
1842                            fbdev_fb->base.modifier,
1843                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1844                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1845                 seq_putc(m, '\n');
1846         }
1847 #endif
1848
1849         mutex_lock(&dev->mode_config.fb_lock);
1850         drm_for_each_fb(drm_fb, dev) {
1851                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1852                 if (fb == fbdev_fb)
1853                         continue;
1854
1855                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1856                            fb->base.width,
1857                            fb->base.height,
1858                            fb->base.format->depth,
1859                            fb->base.format->cpp[0] * 8,
1860                            fb->base.modifier,
1861                            drm_framebuffer_read_refcount(&fb->base));
1862                 describe_obj(m, intel_fb_obj(&fb->base));
1863                 seq_putc(m, '\n');
1864         }
1865         mutex_unlock(&dev->mode_config.fb_lock);
1866         mutex_unlock(&dev->struct_mutex);
1867
1868         return 0;
1869 }
1870
1871 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1872 {
1873         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1874                    ring->space, ring->head, ring->tail, ring->emit);
1875 }
1876
1877 static int i915_context_status(struct seq_file *m, void *unused)
1878 {
1879         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1880         struct drm_device *dev = &dev_priv->drm;
1881         struct i915_gem_context *ctx;
1882         int ret;
1883
1884         ret = mutex_lock_interruptible(&dev->struct_mutex);
1885         if (ret)
1886                 return ret;
1887
1888         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1889                 struct i915_gem_engines_iter it;
1890                 struct intel_context *ce;
1891
1892                 seq_puts(m, "HW context ");
1893                 if (!list_empty(&ctx->hw_id_link))
1894                         seq_printf(m, "%x [pin %u]", ctx->hw_id,
1895                                    atomic_read(&ctx->hw_id_pin_count));
1896                 if (ctx->pid) {
1897                         struct task_struct *task;
1898
1899                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1900                         if (task) {
1901                                 seq_printf(m, "(%s [%d]) ",
1902                                            task->comm, task->pid);
1903                                 put_task_struct(task);
1904                         }
1905                 } else if (IS_ERR(ctx->file_priv)) {
1906                         seq_puts(m, "(deleted) ");
1907                 } else {
1908                         seq_puts(m, "(kernel) ");
1909                 }
1910
1911                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1912                 seq_putc(m, '\n');
1913
1914                 for_each_gem_engine(ce,
1915                                     i915_gem_context_lock_engines(ctx), it) {
1916                         seq_printf(m, "%s: ", ce->engine->name);
1917                         if (ce->state)
1918                                 describe_obj(m, ce->state->obj);
1919                         if (ce->ring)
1920                                 describe_ctx_ring(m, ce->ring);
1921                         seq_putc(m, '\n');
1922                 }
1923                 i915_gem_context_unlock_engines(ctx);
1924
1925                 seq_putc(m, '\n');
1926         }
1927
1928         mutex_unlock(&dev->struct_mutex);
1929
1930         return 0;
1931 }
1932
1933 static const char *swizzle_string(unsigned swizzle)
1934 {
1935         switch (swizzle) {
1936         case I915_BIT_6_SWIZZLE_NONE:
1937                 return "none";
1938         case I915_BIT_6_SWIZZLE_9:
1939                 return "bit9";
1940         case I915_BIT_6_SWIZZLE_9_10:
1941                 return "bit9/bit10";
1942         case I915_BIT_6_SWIZZLE_9_11:
1943                 return "bit9/bit11";
1944         case I915_BIT_6_SWIZZLE_9_10_11:
1945                 return "bit9/bit10/bit11";
1946         case I915_BIT_6_SWIZZLE_9_17:
1947                 return "bit9/bit17";
1948         case I915_BIT_6_SWIZZLE_9_10_17:
1949                 return "bit9/bit10/bit17";
1950         case I915_BIT_6_SWIZZLE_UNKNOWN:
1951                 return "unknown";
1952         }
1953
1954         return "bug";
1955 }
1956
1957 static int i915_swizzle_info(struct seq_file *m, void *data)
1958 {
1959         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1960         intel_wakeref_t wakeref;
1961
1962         wakeref = intel_runtime_pm_get(dev_priv);
1963
1964         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1965                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1966         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1967                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1968
1969         if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1970                 seq_printf(m, "DDC = 0x%08x\n",
1971                            I915_READ(DCC));
1972                 seq_printf(m, "DDC2 = 0x%08x\n",
1973                            I915_READ(DCC2));
1974                 seq_printf(m, "C0DRB3 = 0x%04x\n",
1975                            I915_READ16(C0DRB3));
1976                 seq_printf(m, "C1DRB3 = 0x%04x\n",
1977                            I915_READ16(C1DRB3));
1978         } else if (INTEL_GEN(dev_priv) >= 6) {
1979                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1980                            I915_READ(MAD_DIMM_C0));
1981                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1982                            I915_READ(MAD_DIMM_C1));
1983                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1984                            I915_READ(MAD_DIMM_C2));
1985                 seq_printf(m, "TILECTL = 0x%08x\n",
1986                            I915_READ(TILECTL));
1987                 if (INTEL_GEN(dev_priv) >= 8)
1988                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1989                                    I915_READ(GAMTARBMODE));
1990                 else
1991                         seq_printf(m, "ARB_MODE = 0x%08x\n",
1992                                    I915_READ(ARB_MODE));
1993                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1994                            I915_READ(DISP_ARB_CTL));
1995         }
1996
1997         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1998                 seq_puts(m, "L-shaped memory detected\n");
1999
2000         intel_runtime_pm_put(dev_priv, wakeref);
2001
2002         return 0;
2003 }
2004
2005 static const char *rps_power_to_str(unsigned int power)
2006 {
2007         static const char * const strings[] = {
2008                 [LOW_POWER] = "low power",
2009                 [BETWEEN] = "mixed",
2010                 [HIGH_POWER] = "high power",
2011         };
2012
2013         if (power >= ARRAY_SIZE(strings) || !strings[power])
2014                 return "unknown";
2015
2016         return strings[power];
2017 }
2018
2019 static int i915_rps_boost_info(struct seq_file *m, void *data)
2020 {
2021         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2022         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2023         u32 act_freq = rps->cur_freq;
2024         intel_wakeref_t wakeref;
2025
2026         with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
2027                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2028                         vlv_punit_get(dev_priv);
2029                         act_freq = vlv_punit_read(dev_priv,
2030                                                   PUNIT_REG_GPU_FREQ_STS);
2031                         vlv_punit_put(dev_priv);
2032                         act_freq = (act_freq >> 8) & 0xff;
2033                 } else {
2034                         act_freq = intel_get_cagf(dev_priv,
2035                                                   I915_READ(GEN6_RPSTAT1));
2036                 }
2037         }
2038
2039         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2040         seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
2041         seq_printf(m, "Boosts outstanding? %d\n",
2042                    atomic_read(&rps->num_waiters));
2043         seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2044         seq_printf(m, "Frequency requested %d, actual %d\n",
2045                    intel_gpu_freq(dev_priv, rps->cur_freq),
2046                    intel_gpu_freq(dev_priv, act_freq));
2047         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2048                    intel_gpu_freq(dev_priv, rps->min_freq),
2049                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2050                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2051                    intel_gpu_freq(dev_priv, rps->max_freq));
2052         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2053                    intel_gpu_freq(dev_priv, rps->idle_freq),
2054                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2055                    intel_gpu_freq(dev_priv, rps->boost_freq));
2056
2057         seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
2058
2059         if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
2060                 u32 rpup, rpupei;
2061                 u32 rpdown, rpdownei;
2062
2063                 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
2064                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2065                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2066                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2067                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2068                 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
2069
2070                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2071                            rps_power_to_str(rps->power.mode));
2072                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2073                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2074                            rps->power.up_threshold);
2075                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2076                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2077                            rps->power.down_threshold);
2078         } else {
2079                 seq_puts(m, "\nRPS Autotuning inactive\n");
2080         }
2081
2082         return 0;
2083 }
2084
2085 static int i915_llc(struct seq_file *m, void *data)
2086 {
2087         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2088         const bool edram = INTEL_GEN(dev_priv) > 8;
2089
2090         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2091         seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
2092                    dev_priv->edram_size_mb);
2093
2094         return 0;
2095 }
2096
2097 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2098 {
2099         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2100         intel_wakeref_t wakeref;
2101         struct drm_printer p;
2102
2103         if (!HAS_HUC(dev_priv))
2104                 return -ENODEV;
2105
2106         p = drm_seq_file_printer(m);
2107         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2108
2109         with_intel_runtime_pm(dev_priv, wakeref)
2110                 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2111
2112         return 0;
2113 }
2114
2115 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2116 {
2117         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2118         intel_wakeref_t wakeref;
2119         struct drm_printer p;
2120
2121         if (!HAS_GUC(dev_priv))
2122                 return -ENODEV;
2123
2124         p = drm_seq_file_printer(m);
2125         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2126
2127         with_intel_runtime_pm(dev_priv, wakeref) {
2128                 u32 tmp = I915_READ(GUC_STATUS);
2129                 u32 i;
2130
2131                 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2132                 seq_printf(m, "\tBootrom status = 0x%x\n",
2133                            (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2134                 seq_printf(m, "\tuKernel status = 0x%x\n",
2135                            (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2136                 seq_printf(m, "\tMIA Core status = 0x%x\n",
2137                            (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2138                 seq_puts(m, "\nScratch registers:\n");
2139                 for (i = 0; i < 16; i++) {
2140                         seq_printf(m, "\t%2d: \t0x%x\n",
2141                                    i, I915_READ(SOFT_SCRATCH(i)));
2142                 }
2143         }
2144
2145         return 0;
2146 }
2147
2148 static const char *
2149 stringify_guc_log_type(enum guc_log_buffer_type type)
2150 {
2151         switch (type) {
2152         case GUC_ISR_LOG_BUFFER:
2153                 return "ISR";
2154         case GUC_DPC_LOG_BUFFER:
2155                 return "DPC";
2156         case GUC_CRASH_DUMP_LOG_BUFFER:
2157                 return "CRASH";
2158         default:
2159                 MISSING_CASE(type);
2160         }
2161
2162         return "";
2163 }
2164
2165 static void i915_guc_log_info(struct seq_file *m,
2166                               struct drm_i915_private *dev_priv)
2167 {
2168         struct intel_guc_log *log = &dev_priv->guc.log;
2169         enum guc_log_buffer_type type;
2170
2171         if (!intel_guc_log_relay_enabled(log)) {
2172                 seq_puts(m, "GuC log relay disabled\n");
2173                 return;
2174         }
2175
2176         seq_puts(m, "GuC logging stats:\n");
2177
2178         seq_printf(m, "\tRelay full count: %u\n",
2179                    log->relay.full_count);
2180
2181         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2182                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2183                            stringify_guc_log_type(type),
2184                            log->stats[type].flush,
2185                            log->stats[type].sampled_overflow);
2186         }
2187 }
2188
2189 static void i915_guc_client_info(struct seq_file *m,
2190                                  struct drm_i915_private *dev_priv,
2191                                  struct intel_guc_client *client)
2192 {
2193         struct intel_engine_cs *engine;
2194         enum intel_engine_id id;
2195         u64 tot = 0;
2196
2197         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2198                 client->priority, client->stage_id, client->proc_desc_offset);
2199         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2200                 client->doorbell_id, client->doorbell_offset);
2201
2202         for_each_engine(engine, dev_priv, id) {
2203                 u64 submissions = client->submissions[id];
2204                 tot += submissions;
2205                 seq_printf(m, "\tSubmissions: %llu %s\n",
2206                                 submissions, engine->name);
2207         }
2208         seq_printf(m, "\tTotal: %llu\n", tot);
2209 }
2210
2211 static int i915_guc_info(struct seq_file *m, void *data)
2212 {
2213         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2214         const struct intel_guc *guc = &dev_priv->guc;
2215
2216         if (!USES_GUC(dev_priv))
2217                 return -ENODEV;
2218
2219         i915_guc_log_info(m, dev_priv);
2220
2221         if (!USES_GUC_SUBMISSION(dev_priv))
2222                 return 0;
2223
2224         GEM_BUG_ON(!guc->execbuf_client);
2225
2226         seq_printf(m, "\nDoorbell map:\n");
2227         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2228         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2229
2230         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2231         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2232         if (guc->preempt_client) {
2233                 seq_printf(m, "\nGuC preempt client @ %p:\n",
2234                            guc->preempt_client);
2235                 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2236         }
2237
2238         /* Add more as required ... */
2239
2240         return 0;
2241 }
2242
2243 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2244 {
2245         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2246         const struct intel_guc *guc = &dev_priv->guc;
2247         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2248         struct intel_guc_client *client = guc->execbuf_client;
2249         intel_engine_mask_t tmp;
2250         int index;
2251
2252         if (!USES_GUC_SUBMISSION(dev_priv))
2253                 return -ENODEV;
2254
2255         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2256                 struct intel_engine_cs *engine;
2257
2258                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2259                         continue;
2260
2261                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2262                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2263                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2264                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2265                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2266                 seq_printf(m, "\tEngines used: 0x%x\n",
2267                            desc->engines_used);
2268                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2269                            desc->db_trigger_phy,
2270                            desc->db_trigger_cpu,
2271                            desc->db_trigger_uk);
2272                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2273                            desc->process_desc);
2274                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2275                            desc->wq_addr, desc->wq_size);
2276                 seq_putc(m, '\n');
2277
2278                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2279                         u32 guc_engine_id = engine->guc_id;
2280                         struct guc_execlist_context *lrc =
2281                                                 &desc->lrc[guc_engine_id];
2282
2283                         seq_printf(m, "\t%s LRC:\n", engine->name);
2284                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2285                                    lrc->context_desc);
2286                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2287                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2288                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2289                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2290                         seq_putc(m, '\n');
2291                 }
2292         }
2293
2294         return 0;
2295 }
2296
2297 static int i915_guc_log_dump(struct seq_file *m, void *data)
2298 {
2299         struct drm_info_node *node = m->private;
2300         struct drm_i915_private *dev_priv = node_to_i915(node);
2301         bool dump_load_err = !!node->info_ent->data;
2302         struct drm_i915_gem_object *obj = NULL;
2303         u32 *log;
2304         int i = 0;
2305
2306         if (!HAS_GUC(dev_priv))
2307                 return -ENODEV;
2308
2309         if (dump_load_err)
2310                 obj = dev_priv->guc.load_err_log;
2311         else if (dev_priv->guc.log.vma)
2312                 obj = dev_priv->guc.log.vma->obj;
2313
2314         if (!obj)
2315                 return 0;
2316
2317         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2318         if (IS_ERR(log)) {
2319                 DRM_DEBUG("Failed to pin object\n");
2320                 seq_puts(m, "(log data unaccessible)\n");
2321                 return PTR_ERR(log);
2322         }
2323
2324         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2325                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2326                            *(log + i), *(log + i + 1),
2327                            *(log + i + 2), *(log + i + 3));
2328
2329         seq_putc(m, '\n');
2330
2331         i915_gem_object_unpin_map(obj);
2332
2333         return 0;
2334 }
2335
2336 static int i915_guc_log_level_get(void *data, u64 *val)
2337 {
2338         struct drm_i915_private *dev_priv = data;
2339
2340         if (!USES_GUC(dev_priv))
2341                 return -ENODEV;
2342
2343         *val = intel_guc_log_get_level(&dev_priv->guc.log);
2344
2345         return 0;
2346 }
2347
2348 static int i915_guc_log_level_set(void *data, u64 val)
2349 {
2350         struct drm_i915_private *dev_priv = data;
2351
2352         if (!USES_GUC(dev_priv))
2353                 return -ENODEV;
2354
2355         return intel_guc_log_set_level(&dev_priv->guc.log, val);
2356 }
2357
2358 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2359                         i915_guc_log_level_get, i915_guc_log_level_set,
2360                         "%lld\n");
2361
2362 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2363 {
2364         struct drm_i915_private *dev_priv = inode->i_private;
2365
2366         if (!USES_GUC(dev_priv))
2367                 return -ENODEV;
2368
2369         file->private_data = &dev_priv->guc.log;
2370
2371         return intel_guc_log_relay_open(&dev_priv->guc.log);
2372 }
2373
2374 static ssize_t
2375 i915_guc_log_relay_write(struct file *filp,
2376                          const char __user *ubuf,
2377                          size_t cnt,
2378                          loff_t *ppos)
2379 {
2380         struct intel_guc_log *log = filp->private_data;
2381
2382         intel_guc_log_relay_flush(log);
2383
2384         return cnt;
2385 }
2386
2387 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2388 {
2389         struct drm_i915_private *dev_priv = inode->i_private;
2390
2391         intel_guc_log_relay_close(&dev_priv->guc.log);
2392
2393         return 0;
2394 }
2395
2396 static const struct file_operations i915_guc_log_relay_fops = {
2397         .owner = THIS_MODULE,
2398         .open = i915_guc_log_relay_open,
2399         .write = i915_guc_log_relay_write,
2400         .release = i915_guc_log_relay_release,
2401 };
2402
2403 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2404 {
2405         u8 val;
2406         static const char * const sink_status[] = {
2407                 "inactive",
2408                 "transition to active, capture and display",
2409                 "active, display from RFB",
2410                 "active, capture and display on sink device timings",
2411                 "transition to inactive, capture and display, timing re-sync",
2412                 "reserved",
2413                 "reserved",
2414                 "sink internal error",
2415         };
2416         struct drm_connector *connector = m->private;
2417         struct drm_i915_private *dev_priv = to_i915(connector->dev);
2418         struct intel_dp *intel_dp =
2419                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2420         int ret;
2421
2422         if (!CAN_PSR(dev_priv)) {
2423                 seq_puts(m, "PSR Unsupported\n");
2424                 return -ENODEV;
2425         }
2426
2427         if (connector->status != connector_status_connected)
2428                 return -ENODEV;
2429
2430         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2431
2432         if (ret == 1) {
2433                 const char *str = "unknown";
2434
2435                 val &= DP_PSR_SINK_STATE_MASK;
2436                 if (val < ARRAY_SIZE(sink_status))
2437                         str = sink_status[val];
2438                 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2439         } else {
2440                 return ret;
2441         }
2442
2443         return 0;
2444 }
2445 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2446
2447 static void
2448 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2449 {
2450         u32 val, status_val;
2451         const char *status = "unknown";
2452
2453         if (dev_priv->psr.psr2_enabled) {
2454                 static const char * const live_status[] = {
2455                         "IDLE",
2456                         "CAPTURE",
2457                         "CAPTURE_FS",
2458                         "SLEEP",
2459                         "BUFON_FW",
2460                         "ML_UP",
2461                         "SU_STANDBY",
2462                         "FAST_SLEEP",
2463                         "DEEP_SLEEP",
2464                         "BUF_ON",
2465                         "TG_ON"
2466                 };
2467                 val = I915_READ(EDP_PSR2_STATUS);
2468                 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2469                               EDP_PSR2_STATUS_STATE_SHIFT;
2470                 if (status_val < ARRAY_SIZE(live_status))
2471                         status = live_status[status_val];
2472         } else {
2473                 static const char * const live_status[] = {
2474                         "IDLE",
2475                         "SRDONACK",
2476                         "SRDENT",
2477                         "BUFOFF",
2478                         "BUFON",
2479                         "AUXACK",
2480                         "SRDOFFACK",
2481                         "SRDENT_ON",
2482                 };
2483                 val = I915_READ(EDP_PSR_STATUS);
2484                 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2485                               EDP_PSR_STATUS_STATE_SHIFT;
2486                 if (status_val < ARRAY_SIZE(live_status))
2487                         status = live_status[status_val];
2488         }
2489
2490         seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2491 }
2492
2493 static int i915_edp_psr_status(struct seq_file *m, void *data)
2494 {
2495         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2496         struct i915_psr *psr = &dev_priv->psr;
2497         intel_wakeref_t wakeref;
2498         const char *status;
2499         bool enabled;
2500         u32 val;
2501
2502         if (!HAS_PSR(dev_priv))
2503                 return -ENODEV;
2504
2505         seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2506         if (psr->dp)
2507                 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2508         seq_puts(m, "\n");
2509
2510         if (!psr->sink_support)
2511                 return 0;
2512
2513         wakeref = intel_runtime_pm_get(dev_priv);
2514         mutex_lock(&psr->lock);
2515
2516         if (psr->enabled)
2517                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2518         else
2519                 status = "disabled";
2520         seq_printf(m, "PSR mode: %s\n", status);
2521
2522         if (!psr->enabled)
2523                 goto unlock;
2524
2525         if (psr->psr2_enabled) {
2526                 val = I915_READ(EDP_PSR2_CTL);
2527                 enabled = val & EDP_PSR2_ENABLE;
2528         } else {
2529                 val = I915_READ(EDP_PSR_CTL);
2530                 enabled = val & EDP_PSR_ENABLE;
2531         }
2532         seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2533                    enableddisabled(enabled), val);
2534         psr_source_status(dev_priv, m);
2535         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2536                    psr->busy_frontbuffer_bits);
2537
2538         /*
2539          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2540          */
2541         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2542                 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2543                 seq_printf(m, "Performance counter: %u\n", val);
2544         }
2545
2546         if (psr->debug & I915_PSR_DEBUG_IRQ) {
2547                 seq_printf(m, "Last attempted entry at: %lld\n",
2548                            psr->last_entry_attempt);
2549                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2550         }
2551
2552         if (psr->psr2_enabled) {
2553                 u32 su_frames_val[3];
2554                 int frame;
2555
2556                 /*
2557                  * Reading all 3 registers before hand to minimize crossing a
2558                  * frame boundary between register reads
2559                  */
2560                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2561                         su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2562
2563                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2564
2565                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2566                         u32 su_blocks;
2567
2568                         su_blocks = su_frames_val[frame / 3] &
2569                                     PSR2_SU_STATUS_MASK(frame);
2570                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2571                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
2572                 }
2573         }
2574
2575 unlock:
2576         mutex_unlock(&psr->lock);
2577         intel_runtime_pm_put(dev_priv, wakeref);
2578
2579         return 0;
2580 }
2581
2582 static int
2583 i915_edp_psr_debug_set(void *data, u64 val)
2584 {
2585         struct drm_i915_private *dev_priv = data;
2586         intel_wakeref_t wakeref;
2587         int ret;
2588
2589         if (!CAN_PSR(dev_priv))
2590                 return -ENODEV;
2591
2592         DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2593
2594         wakeref = intel_runtime_pm_get(dev_priv);
2595
2596         ret = intel_psr_debug_set(dev_priv, val);
2597
2598         intel_runtime_pm_put(dev_priv, wakeref);
2599
2600         return ret;
2601 }
2602
2603 static int
2604 i915_edp_psr_debug_get(void *data, u64 *val)
2605 {
2606         struct drm_i915_private *dev_priv = data;
2607
2608         if (!CAN_PSR(dev_priv))
2609                 return -ENODEV;
2610
2611         *val = READ_ONCE(dev_priv->psr.debug);
2612         return 0;
2613 }
2614
2615 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2616                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2617                         "%llu\n");
2618
2619 static int i915_energy_uJ(struct seq_file *m, void *data)
2620 {
2621         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2622         unsigned long long power;
2623         intel_wakeref_t wakeref;
2624         u32 units;
2625
2626         if (INTEL_GEN(dev_priv) < 6)
2627                 return -ENODEV;
2628
2629         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2630                 return -ENODEV;
2631
2632         units = (power & 0x1f00) >> 8;
2633         with_intel_runtime_pm(dev_priv, wakeref)
2634                 power = I915_READ(MCH_SECP_NRG_STTS);
2635
2636         power = (1000000 * power) >> units; /* convert to uJ */
2637         seq_printf(m, "%llu", power);
2638
2639         return 0;
2640 }
2641
2642 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2643 {
2644         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2645         struct pci_dev *pdev = dev_priv->drm.pdev;
2646
2647         if (!HAS_RUNTIME_PM(dev_priv))
2648                 seq_puts(m, "Runtime power management not supported\n");
2649
2650         seq_printf(m, "Runtime power status: %s\n",
2651                    enableddisabled(!dev_priv->power_domains.wakeref));
2652
2653         seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2654         seq_printf(m, "IRQs disabled: %s\n",
2655                    yesno(!intel_irqs_enabled(dev_priv)));
2656 #ifdef CONFIG_PM
2657         seq_printf(m, "Usage count: %d\n",
2658                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2659 #else
2660         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2661 #endif
2662         seq_printf(m, "PCI device power state: %s [%d]\n",
2663                    pci_power_name(pdev->current_state),
2664                    pdev->current_state);
2665
2666         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2667                 struct drm_printer p = drm_seq_file_printer(m);
2668
2669                 print_intel_runtime_pm_wakeref(dev_priv, &p);
2670         }
2671
2672         return 0;
2673 }
2674
2675 static int i915_power_domain_info(struct seq_file *m, void *unused)
2676 {
2677         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2678         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2679         int i;
2680
2681         mutex_lock(&power_domains->lock);
2682
2683         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2684         for (i = 0; i < power_domains->power_well_count; i++) {
2685                 struct i915_power_well *power_well;
2686                 enum intel_display_power_domain power_domain;
2687
2688                 power_well = &power_domains->power_wells[i];
2689                 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2690                            power_well->count);
2691
2692                 for_each_power_domain(power_domain, power_well->desc->domains)
2693                         seq_printf(m, "  %-23s %d\n",
2694                                  intel_display_power_domain_str(power_domain),
2695                                  power_domains->domain_use_count[power_domain]);
2696         }
2697
2698         mutex_unlock(&power_domains->lock);
2699
2700         return 0;
2701 }
2702
2703 static int i915_dmc_info(struct seq_file *m, void *unused)
2704 {
2705         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2706         intel_wakeref_t wakeref;
2707         struct intel_csr *csr;
2708
2709         if (!HAS_CSR(dev_priv))
2710                 return -ENODEV;
2711
2712         csr = &dev_priv->csr;
2713
2714         wakeref = intel_runtime_pm_get(dev_priv);
2715
2716         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2717         seq_printf(m, "path: %s\n", csr->fw_path);
2718
2719         if (!csr->dmc_payload)
2720                 goto out;
2721
2722         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2723                    CSR_VERSION_MINOR(csr->version));
2724
2725         if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2726                 goto out;
2727
2728         seq_printf(m, "DC3 -> DC5 count: %d\n",
2729                    I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2730                                                     SKL_CSR_DC3_DC5_COUNT));
2731         if (!IS_GEN9_LP(dev_priv))
2732                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2733                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2734
2735 out:
2736         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2737         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2738         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2739
2740         intel_runtime_pm_put(dev_priv, wakeref);
2741
2742         return 0;
2743 }
2744
2745 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2746                                  struct drm_display_mode *mode)
2747 {
2748         int i;
2749
2750         for (i = 0; i < tabs; i++)
2751                 seq_putc(m, '\t');
2752
2753         seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2754 }
2755
2756 static void intel_encoder_info(struct seq_file *m,
2757                                struct intel_crtc *intel_crtc,
2758                                struct intel_encoder *intel_encoder)
2759 {
2760         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2761         struct drm_device *dev = &dev_priv->drm;
2762         struct drm_crtc *crtc = &intel_crtc->base;
2763         struct intel_connector *intel_connector;
2764         struct drm_encoder *encoder;
2765
2766         encoder = &intel_encoder->base;
2767         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2768                    encoder->base.id, encoder->name);
2769         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2770                 struct drm_connector *connector = &intel_connector->base;
2771                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2772                            connector->base.id,
2773                            connector->name,
2774                            drm_get_connector_status_name(connector->status));
2775                 if (connector->status == connector_status_connected) {
2776                         struct drm_display_mode *mode = &crtc->mode;
2777                         seq_printf(m, ", mode:\n");
2778                         intel_seq_print_mode(m, 2, mode);
2779                 } else {
2780                         seq_putc(m, '\n');
2781                 }
2782         }
2783 }
2784
2785 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2786 {
2787         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2788         struct drm_device *dev = &dev_priv->drm;
2789         struct drm_crtc *crtc = &intel_crtc->base;
2790         struct intel_encoder *intel_encoder;
2791         struct drm_plane_state *plane_state = crtc->primary->state;
2792         struct drm_framebuffer *fb = plane_state->fb;
2793
2794         if (fb)
2795                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2796                            fb->base.id, plane_state->src_x >> 16,
2797                            plane_state->src_y >> 16, fb->width, fb->height);
2798         else
2799                 seq_puts(m, "\tprimary plane disabled\n");
2800         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2801                 intel_encoder_info(m, intel_crtc, intel_encoder);
2802 }
2803
2804 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2805 {
2806         struct drm_display_mode *mode = panel->fixed_mode;
2807
2808         seq_printf(m, "\tfixed mode:\n");
2809         intel_seq_print_mode(m, 2, mode);
2810 }
2811
2812 static void intel_dp_info(struct seq_file *m,
2813                           struct intel_connector *intel_connector)
2814 {
2815         struct intel_encoder *intel_encoder = intel_connector->encoder;
2816         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2817
2818         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2819         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2820         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2821                 intel_panel_info(m, &intel_connector->panel);
2822
2823         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2824                                 &intel_dp->aux);
2825 }
2826
2827 static void intel_dp_mst_info(struct seq_file *m,
2828                           struct intel_connector *intel_connector)
2829 {
2830         struct intel_encoder *intel_encoder = intel_connector->encoder;
2831         struct intel_dp_mst_encoder *intel_mst =
2832                 enc_to_mst(&intel_encoder->base);
2833         struct intel_digital_port *intel_dig_port = intel_mst->primary;
2834         struct intel_dp *intel_dp = &intel_dig_port->dp;
2835         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2836                                         intel_connector->port);
2837
2838         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2839 }
2840
2841 static void intel_hdmi_info(struct seq_file *m,
2842                             struct intel_connector *intel_connector)
2843 {
2844         struct intel_encoder *intel_encoder = intel_connector->encoder;
2845         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2846
2847         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2848 }
2849
2850 static void intel_lvds_info(struct seq_file *m,
2851                             struct intel_connector *intel_connector)
2852 {
2853         intel_panel_info(m, &intel_connector->panel);
2854 }
2855
2856 static void intel_connector_info(struct seq_file *m,
2857                                  struct drm_connector *connector)
2858 {
2859         struct intel_connector *intel_connector = to_intel_connector(connector);
2860         struct intel_encoder *intel_encoder = intel_connector->encoder;
2861         struct drm_display_mode *mode;
2862
2863         seq_printf(m, "connector %d: type %s, status: %s\n",
2864                    connector->base.id, connector->name,
2865                    drm_get_connector_status_name(connector->status));
2866
2867         if (connector->status == connector_status_disconnected)
2868                 return;
2869
2870         seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2871                    connector->display_info.width_mm,
2872                    connector->display_info.height_mm);
2873         seq_printf(m, "\tsubpixel order: %s\n",
2874                    drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2875         seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2876
2877         if (!intel_encoder)
2878                 return;
2879
2880         switch (connector->connector_type) {
2881         case DRM_MODE_CONNECTOR_DisplayPort:
2882         case DRM_MODE_CONNECTOR_eDP:
2883                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2884                         intel_dp_mst_info(m, intel_connector);
2885                 else
2886                         intel_dp_info(m, intel_connector);
2887                 break;
2888         case DRM_MODE_CONNECTOR_LVDS:
2889                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2890                         intel_lvds_info(m, intel_connector);
2891                 break;
2892         case DRM_MODE_CONNECTOR_HDMIA:
2893                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2894                     intel_encoder->type == INTEL_OUTPUT_DDI)
2895                         intel_hdmi_info(m, intel_connector);
2896                 break;
2897         default:
2898                 break;
2899         }
2900
2901         seq_printf(m, "\tmodes:\n");
2902         list_for_each_entry(mode, &connector->modes, head)
2903                 intel_seq_print_mode(m, 2, mode);
2904 }
2905
2906 static const char *plane_type(enum drm_plane_type type)
2907 {
2908         switch (type) {
2909         case DRM_PLANE_TYPE_OVERLAY:
2910                 return "OVL";
2911         case DRM_PLANE_TYPE_PRIMARY:
2912                 return "PRI";
2913         case DRM_PLANE_TYPE_CURSOR:
2914                 return "CUR";
2915         /*
2916          * Deliberately omitting default: to generate compiler warnings
2917          * when a new drm_plane_type gets added.
2918          */
2919         }
2920
2921         return "unknown";
2922 }
2923
2924 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2925 {
2926         /*
2927          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2928          * will print them all to visualize if the values are misused
2929          */
2930         snprintf(buf, bufsize,
2931                  "%s%s%s%s%s%s(0x%08x)",
2932                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2933                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2934                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2935                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2936                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2937                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2938                  rotation);
2939 }
2940
2941 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2942 {
2943         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2944         struct drm_device *dev = &dev_priv->drm;
2945         struct intel_plane *intel_plane;
2946
2947         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2948                 struct drm_plane_state *state;
2949                 struct drm_plane *plane = &intel_plane->base;
2950                 struct drm_format_name_buf format_name;
2951                 char rot_str[48];
2952
2953                 if (!plane->state) {
2954                         seq_puts(m, "plane->state is NULL!\n");
2955                         continue;
2956                 }
2957
2958                 state = plane->state;
2959
2960                 if (state->fb) {
2961                         drm_get_format_name(state->fb->format->format,
2962                                             &format_name);
2963                 } else {
2964                         sprintf(format_name.str, "N/A");
2965                 }
2966
2967                 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2968
2969                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2970                            plane->base.id,
2971                            plane_type(intel_plane->base.type),
2972                            state->crtc_x, state->crtc_y,
2973                            state->crtc_w, state->crtc_h,
2974                            (state->src_x >> 16),
2975                            ((state->src_x & 0xffff) * 15625) >> 10,
2976                            (state->src_y >> 16),
2977                            ((state->src_y & 0xffff) * 15625) >> 10,
2978                            (state->src_w >> 16),
2979                            ((state->src_w & 0xffff) * 15625) >> 10,
2980                            (state->src_h >> 16),
2981                            ((state->src_h & 0xffff) * 15625) >> 10,
2982                            format_name.str,
2983                            rot_str);
2984         }
2985 }
2986
2987 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2988 {
2989         struct intel_crtc_state *pipe_config;
2990         int num_scalers = intel_crtc->num_scalers;
2991         int i;
2992
2993         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2994
2995         /* Not all platformas have a scaler */
2996         if (num_scalers) {
2997                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2998                            num_scalers,
2999                            pipe_config->scaler_state.scaler_users,
3000                            pipe_config->scaler_state.scaler_id);
3001
3002                 for (i = 0; i < num_scalers; i++) {
3003                         struct intel_scaler *sc =
3004                                         &pipe_config->scaler_state.scalers[i];
3005
3006                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3007                                    i, yesno(sc->in_use), sc->mode);
3008                 }
3009                 seq_puts(m, "\n");
3010         } else {
3011                 seq_puts(m, "\tNo scalers available on this platform\n");
3012         }
3013 }
3014
3015 static int i915_display_info(struct seq_file *m, void *unused)
3016 {
3017         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3018         struct drm_device *dev = &dev_priv->drm;
3019         struct intel_crtc *crtc;
3020         struct drm_connector *connector;
3021         struct drm_connector_list_iter conn_iter;
3022         intel_wakeref_t wakeref;
3023
3024         wakeref = intel_runtime_pm_get(dev_priv);
3025
3026         seq_printf(m, "CRTC info\n");
3027         seq_printf(m, "---------\n");
3028         for_each_intel_crtc(dev, crtc) {
3029                 struct intel_crtc_state *pipe_config;
3030
3031                 drm_modeset_lock(&crtc->base.mutex, NULL);
3032                 pipe_config = to_intel_crtc_state(crtc->base.state);
3033
3034                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3035                            crtc->base.base.id, pipe_name(crtc->pipe),
3036                            yesno(pipe_config->base.active),
3037                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3038                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3039
3040                 if (pipe_config->base.active) {
3041                         struct intel_plane *cursor =
3042                                 to_intel_plane(crtc->base.cursor);
3043
3044                         intel_crtc_info(m, crtc);
3045
3046                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3047                                    yesno(cursor->base.state->visible),
3048                                    cursor->base.state->crtc_x,
3049                                    cursor->base.state->crtc_y,
3050                                    cursor->base.state->crtc_w,
3051                                    cursor->base.state->crtc_h,
3052                                    cursor->cursor.base);
3053                         intel_scaler_info(m, crtc);
3054                         intel_plane_info(m, crtc);
3055                 }
3056
3057                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3058                            yesno(!crtc->cpu_fifo_underrun_disabled),
3059                            yesno(!crtc->pch_fifo_underrun_disabled));
3060                 drm_modeset_unlock(&crtc->base.mutex);
3061         }
3062
3063         seq_printf(m, "\n");
3064         seq_printf(m, "Connector info\n");
3065         seq_printf(m, "--------------\n");
3066         mutex_lock(&dev->mode_config.mutex);
3067         drm_connector_list_iter_begin(dev, &conn_iter);
3068         drm_for_each_connector_iter(connector, &conn_iter)
3069                 intel_connector_info(m, connector);
3070         drm_connector_list_iter_end(&conn_iter);
3071         mutex_unlock(&dev->mode_config.mutex);
3072
3073         intel_runtime_pm_put(dev_priv, wakeref);
3074
3075         return 0;
3076 }
3077
3078 static int i915_engine_info(struct seq_file *m, void *unused)
3079 {
3080         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3081         struct intel_engine_cs *engine;
3082         intel_wakeref_t wakeref;
3083         enum intel_engine_id id;
3084         struct drm_printer p;
3085
3086         wakeref = intel_runtime_pm_get(dev_priv);
3087
3088         seq_printf(m, "GT awake? %s [%d]\n",
3089                    yesno(dev_priv->gt.awake),
3090                    atomic_read(&dev_priv->gt.wakeref.count));
3091         seq_printf(m, "CS timestamp frequency: %u kHz\n",
3092                    RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
3093
3094         p = drm_seq_file_printer(m);
3095         for_each_engine(engine, dev_priv, id)
3096                 intel_engine_dump(engine, &p, "%s\n", engine->name);
3097
3098         intel_runtime_pm_put(dev_priv, wakeref);
3099
3100         return 0;
3101 }
3102
3103 static int i915_rcs_topology(struct seq_file *m, void *unused)
3104 {
3105         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3106         struct drm_printer p = drm_seq_file_printer(m);
3107
3108         intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
3109
3110         return 0;
3111 }
3112
3113 static int i915_shrinker_info(struct seq_file *m, void *unused)
3114 {
3115         struct drm_i915_private *i915 = node_to_i915(m->private);
3116
3117         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3118         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3119
3120         return 0;
3121 }
3122
3123 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3124 {
3125         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3126         struct drm_device *dev = &dev_priv->drm;
3127         int i;
3128
3129         drm_modeset_lock_all(dev);
3130         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3131                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3132
3133                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3134                            pll->info->id);
3135                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3136                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3137                 seq_printf(m, " tracked hardware state:\n");
3138                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3139                 seq_printf(m, " dpll_md: 0x%08x\n",
3140                            pll->state.hw_state.dpll_md);
3141                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3142                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3143                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3144                 seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3145                 seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3146                 seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3147                            pll->state.hw_state.mg_refclkin_ctl);
3148                 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3149                            pll->state.hw_state.mg_clktop2_coreclkctl1);
3150                 seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3151                            pll->state.hw_state.mg_clktop2_hsclkctl);
3152                 seq_printf(m, " mg_pll_div0:  0x%08x\n",
3153                            pll->state.hw_state.mg_pll_div0);
3154                 seq_printf(m, " mg_pll_div1:  0x%08x\n",
3155                            pll->state.hw_state.mg_pll_div1);
3156                 seq_printf(m, " mg_pll_lf:    0x%08x\n",
3157                            pll->state.hw_state.mg_pll_lf);
3158                 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3159                            pll->state.hw_state.mg_pll_frac_lock);
3160                 seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3161                            pll->state.hw_state.mg_pll_ssc);
3162                 seq_printf(m, " mg_pll_bias:  0x%08x\n",
3163                            pll->state.hw_state.mg_pll_bias);
3164                 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3165                            pll->state.hw_state.mg_pll_tdc_coldst_bias);
3166         }
3167         drm_modeset_unlock_all(dev);
3168
3169         return 0;
3170 }
3171
3172 static int i915_wa_registers(struct seq_file *m, void *unused)
3173 {
3174         struct drm_i915_private *i915 = node_to_i915(m->private);
3175         const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
3176         struct i915_wa *wa;
3177         unsigned int i;
3178
3179         seq_printf(m, "Workarounds applied: %u\n", wal->count);
3180         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
3181                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3182                            i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
3183
3184         return 0;
3185 }
3186
3187 static int i915_ipc_status_show(struct seq_file *m, void *data)
3188 {
3189         struct drm_i915_private *dev_priv = m->private;
3190
3191         seq_printf(m, "Isochronous Priority Control: %s\n",
3192                         yesno(dev_priv->ipc_enabled));
3193         return 0;
3194 }
3195
3196 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3197 {
3198         struct drm_i915_private *dev_priv = inode->i_private;
3199
3200         if (!HAS_IPC(dev_priv))
3201                 return -ENODEV;
3202
3203         return single_open(file, i915_ipc_status_show, dev_priv);
3204 }
3205
3206 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3207                                      size_t len, loff_t *offp)
3208 {
3209         struct seq_file *m = file->private_data;
3210         struct drm_i915_private *dev_priv = m->private;
3211         intel_wakeref_t wakeref;
3212         bool enable;
3213         int ret;
3214
3215         ret = kstrtobool_from_user(ubuf, len, &enable);
3216         if (ret < 0)
3217                 return ret;
3218
3219         with_intel_runtime_pm(dev_priv, wakeref) {
3220                 if (!dev_priv->ipc_enabled && enable)
3221                         DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3222                 dev_priv->wm.distrust_bios_wm = true;
3223                 dev_priv->ipc_enabled = enable;
3224                 intel_enable_ipc(dev_priv);
3225         }
3226
3227         return len;
3228 }
3229
3230 static const struct file_operations i915_ipc_status_fops = {
3231         .owner = THIS_MODULE,
3232         .open = i915_ipc_status_open,
3233         .read = seq_read,
3234         .llseek = seq_lseek,
3235         .release = single_release,
3236         .write = i915_ipc_status_write
3237 };
3238
3239 static int i915_ddb_info(struct seq_file *m, void *unused)
3240 {
3241         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3242         struct drm_device *dev = &dev_priv->drm;
3243         struct skl_ddb_entry *entry;
3244         struct intel_crtc *crtc;
3245
3246         if (INTEL_GEN(dev_priv) < 9)
3247                 return -ENODEV;
3248
3249         drm_modeset_lock_all(dev);
3250
3251         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3252
3253         for_each_intel_crtc(&dev_priv->drm, crtc) {
3254                 struct intel_crtc_state *crtc_state =
3255                         to_intel_crtc_state(crtc->base.state);
3256                 enum pipe pipe = crtc->pipe;
3257                 enum plane_id plane_id;
3258
3259                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3260
3261                 for_each_plane_id_on_crtc(crtc, plane_id) {
3262                         entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3263                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
3264                                    entry->start, entry->end,
3265                                    skl_ddb_entry_size(entry));
3266                 }
3267
3268                 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3269                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3270                            entry->end, skl_ddb_entry_size(entry));
3271         }
3272
3273         drm_modeset_unlock_all(dev);
3274
3275         return 0;
3276 }
3277
3278 static void drrs_status_per_crtc(struct seq_file *m,
3279                                  struct drm_device *dev,
3280                                  struct intel_crtc *intel_crtc)
3281 {
3282         struct drm_i915_private *dev_priv = to_i915(dev);
3283         struct i915_drrs *drrs = &dev_priv->drrs;
3284         int vrefresh = 0;
3285         struct drm_connector *connector;
3286         struct drm_connector_list_iter conn_iter;
3287
3288         drm_connector_list_iter_begin(dev, &conn_iter);
3289         drm_for_each_connector_iter(connector, &conn_iter) {
3290                 if (connector->state->crtc != &intel_crtc->base)
3291                         continue;
3292
3293                 seq_printf(m, "%s:\n", connector->name);
3294         }
3295         drm_connector_list_iter_end(&conn_iter);
3296
3297         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3298                 seq_puts(m, "\tVBT: DRRS_type: Static");
3299         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3300                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3301         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3302                 seq_puts(m, "\tVBT: DRRS_type: None");
3303         else
3304                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3305
3306         seq_puts(m, "\n\n");
3307
3308         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3309                 struct intel_panel *panel;
3310
3311                 mutex_lock(&drrs->mutex);
3312                 /* DRRS Supported */
3313                 seq_puts(m, "\tDRRS Supported: Yes\n");
3314
3315                 /* disable_drrs() will make drrs->dp NULL */
3316                 if (!drrs->dp) {
3317                         seq_puts(m, "Idleness DRRS: Disabled\n");
3318                         if (dev_priv->psr.enabled)
3319                                 seq_puts(m,
3320                                 "\tAs PSR is enabled, DRRS is not enabled\n");
3321                         mutex_unlock(&drrs->mutex);
3322                         return;
3323                 }
3324
3325                 panel = &drrs->dp->attached_connector->panel;
3326                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3327                                         drrs->busy_frontbuffer_bits);
3328
3329                 seq_puts(m, "\n\t\t");
3330                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3331                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3332                         vrefresh = panel->fixed_mode->vrefresh;
3333                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3334                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3335                         vrefresh = panel->downclock_mode->vrefresh;
3336                 } else {
3337                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3338                                                 drrs->refresh_rate_type);
3339                         mutex_unlock(&drrs->mutex);
3340                         return;
3341                 }
3342                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3343
3344                 seq_puts(m, "\n\t\t");
3345                 mutex_unlock(&drrs->mutex);
3346         } else {
3347                 /* DRRS not supported. Print the VBT parameter*/
3348                 seq_puts(m, "\tDRRS Supported : No");
3349         }
3350         seq_puts(m, "\n");
3351 }
3352
3353 static int i915_drrs_status(struct seq_file *m, void *unused)
3354 {
3355         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3356         struct drm_device *dev = &dev_priv->drm;
3357         struct intel_crtc *intel_crtc;
3358         int active_crtc_cnt = 0;
3359
3360         drm_modeset_lock_all(dev);
3361         for_each_intel_crtc(dev, intel_crtc) {
3362                 if (intel_crtc->base.state->active) {
3363                         active_crtc_cnt++;
3364                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3365
3366                         drrs_status_per_crtc(m, dev, intel_crtc);
3367                 }
3368         }
3369         drm_modeset_unlock_all(dev);
3370
3371         if (!active_crtc_cnt)
3372                 seq_puts(m, "No active crtc found\n");
3373
3374         return 0;
3375 }
3376
3377 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3378 {
3379         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3380         struct drm_device *dev = &dev_priv->drm;
3381         struct intel_encoder *intel_encoder;
3382         struct intel_digital_port *intel_dig_port;
3383         struct drm_connector *connector;
3384         struct drm_connector_list_iter conn_iter;
3385
3386         drm_connector_list_iter_begin(dev, &conn_iter);
3387         drm_for_each_connector_iter(connector, &conn_iter) {
3388                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3389                         continue;
3390
3391                 intel_encoder = intel_attached_encoder(connector);
3392                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3393                         continue;
3394
3395                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3396                 if (!intel_dig_port->dp.can_mst)
3397                         continue;
3398
3399                 seq_printf(m, "MST Source Port %c\n",
3400                            port_name(intel_dig_port->base.port));
3401                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3402         }
3403         drm_connector_list_iter_end(&conn_iter);
3404
3405         return 0;
3406 }
3407
3408 static ssize_t i915_displayport_test_active_write(struct file *file,
3409                                                   const char __user *ubuf,
3410                                                   size_t len, loff_t *offp)
3411 {
3412         char *input_buffer;
3413         int status = 0;
3414         struct drm_device *dev;
3415         struct drm_connector *connector;
3416         struct drm_connector_list_iter conn_iter;
3417         struct intel_dp *intel_dp;
3418         int val = 0;
3419
3420         dev = ((struct seq_file *)file->private_data)->private;
3421
3422         if (len == 0)
3423                 return 0;
3424
3425         input_buffer = memdup_user_nul(ubuf, len);
3426         if (IS_ERR(input_buffer))
3427                 return PTR_ERR(input_buffer);
3428
3429         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3430
3431         drm_connector_list_iter_begin(dev, &conn_iter);
3432         drm_for_each_connector_iter(connector, &conn_iter) {
3433                 struct intel_encoder *encoder;
3434
3435                 if (connector->connector_type !=
3436                     DRM_MODE_CONNECTOR_DisplayPort)
3437                         continue;
3438
3439                 encoder = to_intel_encoder(connector->encoder);
3440                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3441                         continue;
3442
3443                 if (encoder && connector->status == connector_status_connected) {
3444                         intel_dp = enc_to_intel_dp(&encoder->base);
3445                         status = kstrtoint(input_buffer, 10, &val);
3446                         if (status < 0)
3447                                 break;
3448                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3449                         /* To prevent erroneous activation of the compliance
3450                          * testing code, only accept an actual value of 1 here
3451                          */
3452                         if (val == 1)
3453                                 intel_dp->compliance.test_active = 1;
3454                         else
3455                                 intel_dp->compliance.test_active = 0;
3456                 }
3457         }
3458         drm_connector_list_iter_end(&conn_iter);
3459         kfree(input_buffer);
3460         if (status < 0)
3461                 return status;
3462
3463         *offp += len;
3464         return len;
3465 }
3466
3467 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3468 {
3469         struct drm_i915_private *dev_priv = m->private;
3470         struct drm_device *dev = &dev_priv->drm;
3471         struct drm_connector *connector;
3472         struct drm_connector_list_iter conn_iter;
3473         struct intel_dp *intel_dp;
3474
3475         drm_connector_list_iter_begin(dev, &conn_iter);
3476         drm_for_each_connector_iter(connector, &conn_iter) {
3477                 struct intel_encoder *encoder;
3478
3479                 if (connector->connector_type !=
3480                     DRM_MODE_CONNECTOR_DisplayPort)
3481                         continue;
3482
3483                 encoder = to_intel_encoder(connector->encoder);
3484                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3485                         continue;
3486
3487                 if (encoder && connector->status == connector_status_connected) {
3488                         intel_dp = enc_to_intel_dp(&encoder->base);
3489                         if (intel_dp->compliance.test_active)
3490                                 seq_puts(m, "1");
3491                         else
3492                                 seq_puts(m, "0");
3493                 } else
3494                         seq_puts(m, "0");
3495         }
3496         drm_connector_list_iter_end(&conn_iter);
3497
3498         return 0;
3499 }
3500
3501 static int i915_displayport_test_active_open(struct inode *inode,
3502                                              struct file *file)
3503 {
3504         return single_open(file, i915_displayport_test_active_show,
3505                            inode->i_private);
3506 }
3507
3508 static const struct file_operations i915_displayport_test_active_fops = {
3509         .owner = THIS_MODULE,
3510         .open = i915_displayport_test_active_open,
3511         .read = seq_read,
3512         .llseek = seq_lseek,
3513         .release = single_release,
3514         .write = i915_displayport_test_active_write
3515 };
3516
3517 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3518 {
3519         struct drm_i915_private *dev_priv = m->private;
3520         struct drm_device *dev = &dev_priv->drm;
3521         struct drm_connector *connector;
3522         struct drm_connector_list_iter conn_iter;
3523         struct intel_dp *intel_dp;
3524
3525         drm_connector_list_iter_begin(dev, &conn_iter);
3526         drm_for_each_connector_iter(connector, &conn_iter) {
3527                 struct intel_encoder *encoder;
3528
3529                 if (connector->connector_type !=
3530                     DRM_MODE_CONNECTOR_DisplayPort)
3531                         continue;
3532
3533                 encoder = to_intel_encoder(connector->encoder);
3534                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3535                         continue;
3536
3537                 if (encoder && connector->status == connector_status_connected) {
3538                         intel_dp = enc_to_intel_dp(&encoder->base);
3539                         if (intel_dp->compliance.test_type ==
3540                             DP_TEST_LINK_EDID_READ)
3541                                 seq_printf(m, "%lx",
3542                                            intel_dp->compliance.test_data.edid);
3543                         else if (intel_dp->compliance.test_type ==
3544                                  DP_TEST_LINK_VIDEO_PATTERN) {
3545                                 seq_printf(m, "hdisplay: %d\n",
3546                                            intel_dp->compliance.test_data.hdisplay);
3547                                 seq_printf(m, "vdisplay: %d\n",
3548                                            intel_dp->compliance.test_data.vdisplay);
3549                                 seq_printf(m, "bpc: %u\n",
3550                                            intel_dp->compliance.test_data.bpc);
3551                         }
3552                 } else
3553                         seq_puts(m, "0");
3554         }
3555         drm_connector_list_iter_end(&conn_iter);
3556
3557         return 0;
3558 }
3559 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3560
3561 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3562 {
3563         struct drm_i915_private *dev_priv = m->private;
3564         struct drm_device *dev = &dev_priv->drm;
3565         struct drm_connector *connector;
3566         struct drm_connector_list_iter conn_iter;
3567         struct intel_dp *intel_dp;
3568
3569         drm_connector_list_iter_begin(dev, &conn_iter);
3570         drm_for_each_connector_iter(connector, &conn_iter) {
3571                 struct intel_encoder *encoder;
3572
3573                 if (connector->connector_type !=
3574                     DRM_MODE_CONNECTOR_DisplayPort)
3575                         continue;
3576
3577                 encoder = to_intel_encoder(connector->encoder);
3578                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3579                         continue;
3580
3581                 if (encoder && connector->status == connector_status_connected) {
3582                         intel_dp = enc_to_intel_dp(&encoder->base);
3583                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3584                 } else
3585                         seq_puts(m, "0");
3586         }
3587         drm_connector_list_iter_end(&conn_iter);
3588
3589         return 0;
3590 }
3591 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3592
3593 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3594 {
3595         struct drm_i915_private *dev_priv = m->private;
3596         struct drm_device *dev = &dev_priv->drm;
3597         int level;
3598         int num_levels;
3599
3600         if (IS_CHERRYVIEW(dev_priv))
3601                 num_levels = 3;
3602         else if (IS_VALLEYVIEW(dev_priv))
3603                 num_levels = 1;
3604         else if (IS_G4X(dev_priv))
3605                 num_levels = 3;
3606         else
3607                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3608
3609         drm_modeset_lock_all(dev);
3610
3611         for (level = 0; level < num_levels; level++) {
3612                 unsigned int latency = wm[level];
3613
3614                 /*
3615                  * - WM1+ latency values in 0.5us units
3616                  * - latencies are in us on gen9/vlv/chv
3617                  */
3618                 if (INTEL_GEN(dev_priv) >= 9 ||
3619                     IS_VALLEYVIEW(dev_priv) ||
3620                     IS_CHERRYVIEW(dev_priv) ||
3621                     IS_G4X(dev_priv))
3622                         latency *= 10;
3623                 else if (level > 0)
3624                         latency *= 5;
3625
3626                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3627                            level, wm[level], latency / 10, latency % 10);
3628         }
3629
3630         drm_modeset_unlock_all(dev);
3631 }
3632
3633 static int pri_wm_latency_show(struct seq_file *m, void *data)
3634 {
3635         struct drm_i915_private *dev_priv = m->private;
3636         const u16 *latencies;
3637
3638         if (INTEL_GEN(dev_priv) >= 9)
3639                 latencies = dev_priv->wm.skl_latency;
3640         else
3641                 latencies = dev_priv->wm.pri_latency;
3642
3643         wm_latency_show(m, latencies);
3644
3645         return 0;
3646 }
3647
3648 static int spr_wm_latency_show(struct seq_file *m, void *data)
3649 {
3650         struct drm_i915_private *dev_priv = m->private;
3651         const u16 *latencies;
3652
3653         if (INTEL_GEN(dev_priv) >= 9)
3654                 latencies = dev_priv->wm.skl_latency;
3655         else
3656                 latencies = dev_priv->wm.spr_latency;
3657
3658         wm_latency_show(m, latencies);
3659
3660         return 0;
3661 }
3662
3663 static int cur_wm_latency_show(struct seq_file *m, void *data)
3664 {
3665         struct drm_i915_private *dev_priv = m->private;
3666         const u16 *latencies;
3667
3668         if (INTEL_GEN(dev_priv) >= 9)
3669                 latencies = dev_priv->wm.skl_latency;
3670         else
3671                 latencies = dev_priv->wm.cur_latency;
3672
3673         wm_latency_show(m, latencies);
3674
3675         return 0;
3676 }
3677
3678 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3679 {
3680         struct drm_i915_private *dev_priv = inode->i_private;
3681
3682         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3683                 return -ENODEV;
3684
3685         return single_open(file, pri_wm_latency_show, dev_priv);
3686 }
3687
3688 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3689 {
3690         struct drm_i915_private *dev_priv = inode->i_private;
3691
3692         if (HAS_GMCH(dev_priv))
3693                 return -ENODEV;
3694
3695         return single_open(file, spr_wm_latency_show, dev_priv);
3696 }
3697
3698 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3699 {
3700         struct drm_i915_private *dev_priv = inode->i_private;
3701
3702         if (HAS_GMCH(dev_priv))
3703                 return -ENODEV;
3704
3705         return single_open(file, cur_wm_latency_show, dev_priv);
3706 }
3707
3708 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3709                                 size_t len, loff_t *offp, u16 wm[8])
3710 {
3711         struct seq_file *m = file->private_data;
3712         struct drm_i915_private *dev_priv = m->private;
3713         struct drm_device *dev = &dev_priv->drm;
3714         u16 new[8] = { 0 };
3715         int num_levels;
3716         int level;
3717         int ret;
3718         char tmp[32];
3719
3720         if (IS_CHERRYVIEW(dev_priv))
3721                 num_levels = 3;
3722         else if (IS_VALLEYVIEW(dev_priv))
3723                 num_levels = 1;
3724         else if (IS_G4X(dev_priv))
3725                 num_levels = 3;
3726         else
3727                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3728
3729         if (len >= sizeof(tmp))
3730                 return -EINVAL;
3731
3732         if (copy_from_user(tmp, ubuf, len))
3733                 return -EFAULT;
3734
3735         tmp[len] = '\0';
3736
3737         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3738                      &new[0], &new[1], &new[2], &new[3],
3739                      &new[4], &new[5], &new[6], &new[7]);
3740         if (ret != num_levels)
3741                 return -EINVAL;
3742
3743         drm_modeset_lock_all(dev);
3744
3745         for (level = 0; level < num_levels; level++)
3746                 wm[level] = new[level];
3747
3748         drm_modeset_unlock_all(dev);
3749
3750         return len;
3751 }
3752
3753
3754 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3755                                     size_t len, loff_t *offp)
3756 {
3757         struct seq_file *m = file->private_data;
3758         struct drm_i915_private *dev_priv = m->private;
3759         u16 *latencies;
3760
3761         if (INTEL_GEN(dev_priv) >= 9)
3762                 latencies = dev_priv->wm.skl_latency;
3763         else
3764                 latencies = dev_priv->wm.pri_latency;
3765
3766         return wm_latency_write(file, ubuf, len, offp, latencies);
3767 }
3768
3769 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3770                                     size_t len, loff_t *offp)
3771 {
3772         struct seq_file *m = file->private_data;
3773         struct drm_i915_private *dev_priv = m->private;
3774         u16 *latencies;
3775
3776         if (INTEL_GEN(dev_priv) >= 9)
3777                 latencies = dev_priv->wm.skl_latency;
3778         else
3779                 latencies = dev_priv->wm.spr_latency;
3780
3781         return wm_latency_write(file, ubuf, len, offp, latencies);
3782 }
3783
3784 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3785                                     size_t len, loff_t *offp)
3786 {
3787         struct seq_file *m = file->private_data;
3788         struct drm_i915_private *dev_priv = m->private;
3789         u16 *latencies;
3790
3791         if (INTEL_GEN(dev_priv) >= 9)
3792                 latencies = dev_priv->wm.skl_latency;
3793         else
3794                 latencies = dev_priv->wm.cur_latency;
3795
3796         return wm_latency_write(file, ubuf, len, offp, latencies);
3797 }
3798
3799 static const struct file_operations i915_pri_wm_latency_fops = {
3800         .owner = THIS_MODULE,
3801         .open = pri_wm_latency_open,
3802         .read = seq_read,
3803         .llseek = seq_lseek,
3804         .release = single_release,
3805         .write = pri_wm_latency_write
3806 };
3807
3808 static const struct file_operations i915_spr_wm_latency_fops = {
3809         .owner = THIS_MODULE,
3810         .open = spr_wm_latency_open,
3811         .read = seq_read,
3812         .llseek = seq_lseek,
3813         .release = single_release,
3814         .write = spr_wm_latency_write
3815 };
3816
3817 static const struct file_operations i915_cur_wm_latency_fops = {
3818         .owner = THIS_MODULE,
3819         .open = cur_wm_latency_open,
3820         .read = seq_read,
3821         .llseek = seq_lseek,
3822         .release = single_release,
3823         .write = cur_wm_latency_write
3824 };
3825
3826 static int
3827 i915_wedged_get(void *data, u64 *val)
3828 {
3829         int ret = i915_terminally_wedged(data);
3830
3831         switch (ret) {
3832         case -EIO:
3833                 *val = 1;
3834                 return 0;
3835         case 0:
3836                 *val = 0;
3837                 return 0;
3838         default:
3839                 return ret;
3840         }
3841 }
3842
3843 static int
3844 i915_wedged_set(void *data, u64 val)
3845 {
3846         struct drm_i915_private *i915 = data;
3847
3848         /* Flush any previous reset before applying for a new one */
3849         wait_event(i915->gpu_error.reset_queue,
3850                    !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
3851
3852         i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3853                           "Manually set wedged engine mask = %llx", val);
3854         return 0;
3855 }
3856
3857 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3858                         i915_wedged_get, i915_wedged_set,
3859                         "%llu\n");
3860
3861 #define DROP_UNBOUND    BIT(0)
3862 #define DROP_BOUND      BIT(1)
3863 #define DROP_RETIRE     BIT(2)
3864 #define DROP_ACTIVE     BIT(3)
3865 #define DROP_FREED      BIT(4)
3866 #define DROP_SHRINK_ALL BIT(5)
3867 #define DROP_IDLE       BIT(6)
3868 #define DROP_RESET_ACTIVE       BIT(7)
3869 #define DROP_RESET_SEQNO        BIT(8)
3870 #define DROP_ALL (DROP_UNBOUND  | \
3871                   DROP_BOUND    | \
3872                   DROP_RETIRE   | \
3873                   DROP_ACTIVE   | \
3874                   DROP_FREED    | \
3875                   DROP_SHRINK_ALL |\
3876                   DROP_IDLE     | \
3877                   DROP_RESET_ACTIVE | \
3878                   DROP_RESET_SEQNO)
3879 static int
3880 i915_drop_caches_get(void *data, u64 *val)
3881 {
3882         *val = DROP_ALL;
3883
3884         return 0;
3885 }
3886
3887 static int
3888 i915_drop_caches_set(void *data, u64 val)
3889 {
3890         struct drm_i915_private *i915 = data;
3891
3892         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3893                   val, val & DROP_ALL);
3894
3895         if (val & DROP_RESET_ACTIVE &&
3896             wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
3897                 i915_gem_set_wedged(i915);
3898
3899         /* No need to check and wait for gpu resets, only libdrm auto-restarts
3900          * on ioctls on -EAGAIN. */
3901         if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3902                 int ret;
3903
3904                 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
3905                 if (ret)
3906                         return ret;
3907
3908                 if (val & DROP_ACTIVE)
3909                         ret = i915_gem_wait_for_idle(i915,
3910                                                      I915_WAIT_INTERRUPTIBLE |
3911                                                      I915_WAIT_LOCKED,
3912                                                      MAX_SCHEDULE_TIMEOUT);
3913
3914                 if (val & DROP_RETIRE)
3915                         i915_retire_requests(i915);
3916
3917                 mutex_unlock(&i915->drm.struct_mutex);
3918         }
3919
3920         if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
3921                 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
3922
3923         fs_reclaim_acquire(GFP_KERNEL);
3924         if (val & DROP_BOUND)
3925                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3926
3927         if (val & DROP_UNBOUND)
3928                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3929
3930         if (val & DROP_SHRINK_ALL)
3931                 i915_gem_shrink_all(i915);
3932         fs_reclaim_release(GFP_KERNEL);
3933
3934         if (val & DROP_IDLE) {
3935                 do {
3936                         flush_delayed_work(&i915->gem.retire_work);
3937                         drain_delayed_work(&i915->gem.idle_work);
3938                 } while (READ_ONCE(i915->gt.awake));
3939         }
3940
3941         if (val & DROP_FREED)
3942                 i915_gem_drain_freed_objects(i915);
3943
3944         return 0;
3945 }
3946
3947 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3948                         i915_drop_caches_get, i915_drop_caches_set,
3949                         "0x%08llx\n");
3950
3951 static int
3952 i915_cache_sharing_get(void *data, u64 *val)
3953 {
3954         struct drm_i915_private *dev_priv = data;
3955         intel_wakeref_t wakeref;
3956         u32 snpcr = 0;
3957
3958         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3959                 return -ENODEV;
3960
3961         with_intel_runtime_pm(dev_priv, wakeref)
3962                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3963
3964         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3965
3966         return 0;
3967 }
3968
3969 static int
3970 i915_cache_sharing_set(void *data, u64 val)
3971 {
3972         struct drm_i915_private *dev_priv = data;
3973         intel_wakeref_t wakeref;
3974
3975         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3976                 return -ENODEV;
3977
3978         if (val > 3)
3979                 return -EINVAL;
3980
3981         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3982         with_intel_runtime_pm(dev_priv, wakeref) {
3983                 u32 snpcr;
3984
3985                 /* Update the cache sharing policy here as well */
3986                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3987                 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3988                 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3989                 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3990         }
3991
3992         return 0;
3993 }
3994
3995 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3996                         i915_cache_sharing_get, i915_cache_sharing_set,
3997                         "%llu\n");
3998
3999 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4000                                           struct sseu_dev_info *sseu)
4001 {
4002 #define SS_MAX 2
4003         const int ss_max = SS_MAX;
4004         u32 sig1[SS_MAX], sig2[SS_MAX];
4005         int ss;
4006
4007         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4008         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4009         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4010         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4011
4012         for (ss = 0; ss < ss_max; ss++) {
4013                 unsigned int eu_cnt;
4014
4015                 if (sig1[ss] & CHV_SS_PG_ENABLE)
4016                         /* skip disabled subslice */
4017                         continue;
4018
4019                 sseu->slice_mask = BIT(0);
4020                 sseu->subslice_mask[0] |= BIT(ss);
4021                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4022                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4023                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4024                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4025                 sseu->eu_total += eu_cnt;
4026                 sseu->eu_per_subslice = max_t(unsigned int,
4027                                               sseu->eu_per_subslice, eu_cnt);
4028         }
4029 #undef SS_MAX
4030 }
4031
4032 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4033                                      struct sseu_dev_info *sseu)
4034 {
4035 #define SS_MAX 6
4036         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4037         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4038         int s, ss;
4039
4040         for (s = 0; s < info->sseu.max_slices; s++) {
4041                 /*
4042                  * FIXME: Valid SS Mask respects the spec and read
4043                  * only valid bits for those registers, excluding reserved
4044                  * although this seems wrong because it would leave many
4045                  * subslices without ACK.
4046                  */
4047                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4048                         GEN10_PGCTL_VALID_SS_MASK(s);
4049                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4050                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4051         }
4052
4053         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4054                      GEN9_PGCTL_SSA_EU19_ACK |
4055                      GEN9_PGCTL_SSA_EU210_ACK |
4056                      GEN9_PGCTL_SSA_EU311_ACK;
4057         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4058                      GEN9_PGCTL_SSB_EU19_ACK |
4059                      GEN9_PGCTL_SSB_EU210_ACK |
4060                      GEN9_PGCTL_SSB_EU311_ACK;
4061
4062         for (s = 0; s < info->sseu.max_slices; s++) {
4063                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4064                         /* skip disabled slice */
4065                         continue;
4066
4067                 sseu->slice_mask |= BIT(s);
4068                 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4069
4070                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4071                         unsigned int eu_cnt;
4072
4073                         if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4074                                 /* skip disabled subslice */
4075                                 continue;
4076
4077                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4078                                                eu_mask[ss % 2]);
4079                         sseu->eu_total += eu_cnt;
4080                         sseu->eu_per_subslice = max_t(unsigned int,
4081                                                       sseu->eu_per_subslice,
4082                                                       eu_cnt);
4083                 }
4084         }
4085 #undef SS_MAX
4086 }
4087
4088 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4089                                     struct sseu_dev_info *sseu)
4090 {
4091 #define SS_MAX 3
4092         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4093         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4094         int s, ss;
4095
4096         for (s = 0; s < info->sseu.max_slices; s++) {
4097                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4098                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4099                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4100         }
4101
4102         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4103                      GEN9_PGCTL_SSA_EU19_ACK |
4104                      GEN9_PGCTL_SSA_EU210_ACK |
4105                      GEN9_PGCTL_SSA_EU311_ACK;
4106         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4107                      GEN9_PGCTL_SSB_EU19_ACK |
4108                      GEN9_PGCTL_SSB_EU210_ACK |
4109                      GEN9_PGCTL_SSB_EU311_ACK;
4110
4111         for (s = 0; s < info->sseu.max_slices; s++) {
4112                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4113                         /* skip disabled slice */
4114                         continue;
4115
4116                 sseu->slice_mask |= BIT(s);
4117
4118                 if (IS_GEN9_BC(dev_priv))
4119                         sseu->subslice_mask[s] =
4120                                 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4121
4122                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4123                         unsigned int eu_cnt;
4124
4125                         if (IS_GEN9_LP(dev_priv)) {
4126                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4127                                         /* skip disabled subslice */
4128                                         continue;
4129
4130                                 sseu->subslice_mask[s] |= BIT(ss);
4131                         }
4132
4133                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4134                                                eu_mask[ss%2]);
4135                         sseu->eu_total += eu_cnt;
4136                         sseu->eu_per_subslice = max_t(unsigned int,
4137                                                       sseu->eu_per_subslice,
4138                                                       eu_cnt);
4139                 }
4140         }
4141 #undef SS_MAX
4142 }
4143
4144 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4145                                          struct sseu_dev_info *sseu)
4146 {
4147         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4148         int s;
4149
4150         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4151
4152         if (sseu->slice_mask) {
4153                 sseu->eu_per_subslice =
4154                         RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
4155                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4156                         sseu->subslice_mask[s] =
4157                                 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4158                 }
4159                 sseu->eu_total = sseu->eu_per_subslice *
4160                                  sseu_subslice_total(sseu);
4161
4162                 /* subtract fused off EU(s) from enabled slice(s) */
4163                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4164                         u8 subslice_7eu =
4165                                 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
4166
4167                         sseu->eu_total -= hweight8(subslice_7eu);
4168                 }
4169         }
4170 }
4171
4172 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4173                                  const struct sseu_dev_info *sseu)
4174 {
4175         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4176         const char *type = is_available_info ? "Available" : "Enabled";
4177         int s;
4178
4179         seq_printf(m, "  %s Slice Mask: %04x\n", type,
4180                    sseu->slice_mask);
4181         seq_printf(m, "  %s Slice Total: %u\n", type,
4182                    hweight8(sseu->slice_mask));
4183         seq_printf(m, "  %s Subslice Total: %u\n", type,
4184                    sseu_subslice_total(sseu));
4185         for (s = 0; s < fls(sseu->slice_mask); s++) {
4186                 seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4187                            s, hweight8(sseu->subslice_mask[s]));
4188         }
4189         seq_printf(m, "  %s EU Total: %u\n", type,
4190                    sseu->eu_total);
4191         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4192                    sseu->eu_per_subslice);
4193
4194         if (!is_available_info)
4195                 return;
4196
4197         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4198         if (HAS_POOLED_EU(dev_priv))
4199                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4200
4201         seq_printf(m, "  Has Slice Power Gating: %s\n",
4202                    yesno(sseu->has_slice_pg));
4203         seq_printf(m, "  Has Subslice Power Gating: %s\n",
4204                    yesno(sseu->has_subslice_pg));
4205         seq_printf(m, "  Has EU Power Gating: %s\n",
4206                    yesno(sseu->has_eu_pg));
4207 }
4208
4209 static int i915_sseu_status(struct seq_file *m, void *unused)
4210 {
4211         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4212         struct sseu_dev_info sseu;
4213         intel_wakeref_t wakeref;
4214
4215         if (INTEL_GEN(dev_priv) < 8)
4216                 return -ENODEV;
4217
4218         seq_puts(m, "SSEU Device Info\n");
4219         i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
4220
4221         seq_puts(m, "SSEU Device Status\n");
4222         memset(&sseu, 0, sizeof(sseu));
4223         sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4224         sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
4225         sseu.max_eus_per_subslice =
4226                 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
4227
4228         with_intel_runtime_pm(dev_priv, wakeref) {
4229                 if (IS_CHERRYVIEW(dev_priv))
4230                         cherryview_sseu_device_status(dev_priv, &sseu);
4231                 else if (IS_BROADWELL(dev_priv))
4232                         broadwell_sseu_device_status(dev_priv, &sseu);
4233                 else if (IS_GEN(dev_priv, 9))
4234                         gen9_sseu_device_status(dev_priv, &sseu);
4235                 else if (INTEL_GEN(dev_priv) >= 10)
4236                         gen10_sseu_device_status(dev_priv, &sseu);
4237         }
4238
4239         i915_print_sseu_info(m, false, &sseu);
4240
4241         return 0;
4242 }
4243
4244 static int i915_forcewake_open(struct inode *inode, struct file *file)
4245 {
4246         struct drm_i915_private *i915 = inode->i_private;
4247
4248         if (INTEL_GEN(i915) < 6)
4249                 return 0;
4250
4251         file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
4252         intel_uncore_forcewake_user_get(&i915->uncore);
4253
4254         return 0;
4255 }
4256
4257 static int i915_forcewake_release(struct inode *inode, struct file *file)
4258 {
4259         struct drm_i915_private *i915 = inode->i_private;
4260
4261         if (INTEL_GEN(i915) < 6)
4262                 return 0;
4263
4264         intel_uncore_forcewake_user_put(&i915->uncore);
4265         intel_runtime_pm_put(i915,
4266                              (intel_wakeref_t)(uintptr_t)file->private_data);
4267
4268         return 0;
4269 }
4270
4271 static const struct file_operations i915_forcewake_fops = {
4272         .owner = THIS_MODULE,
4273         .open = i915_forcewake_open,
4274         .release = i915_forcewake_release,
4275 };
4276
4277 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4278 {
4279         struct drm_i915_private *dev_priv = m->private;
4280         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4281
4282         /* Synchronize with everything first in case there's been an HPD
4283          * storm, but we haven't finished handling it in the kernel yet
4284          */
4285         synchronize_irq(dev_priv->drm.irq);
4286         flush_work(&dev_priv->hotplug.dig_port_work);
4287         flush_work(&dev_priv->hotplug.hotplug_work);
4288
4289         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4290         seq_printf(m, "Detected: %s\n",
4291                    yesno(delayed_work_pending(&hotplug->reenable_work)));
4292
4293         return 0;
4294 }
4295
4296 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4297                                         const char __user *ubuf, size_t len,
4298                                         loff_t *offp)
4299 {
4300         struct seq_file *m = file->private_data;
4301         struct drm_i915_private *dev_priv = m->private;
4302         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4303         unsigned int new_threshold;
4304         int i;
4305         char *newline;
4306         char tmp[16];
4307
4308         if (len >= sizeof(tmp))
4309                 return -EINVAL;
4310
4311         if (copy_from_user(tmp, ubuf, len))
4312                 return -EFAULT;
4313
4314         tmp[len] = '\0';
4315
4316         /* Strip newline, if any */
4317         newline = strchr(tmp, '\n');
4318         if (newline)
4319                 *newline = '\0';
4320
4321         if (strcmp(tmp, "reset") == 0)
4322                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4323         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4324                 return -EINVAL;
4325
4326         if (new_threshold > 0)
4327                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4328                               new_threshold);
4329         else
4330                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4331
4332         spin_lock_irq(&dev_priv->irq_lock);
4333         hotplug->hpd_storm_threshold = new_threshold;
4334         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4335         for_each_hpd_pin(i)
4336                 hotplug->stats[i].count = 0;
4337         spin_unlock_irq(&dev_priv->irq_lock);
4338
4339         /* Re-enable hpd immediately if we were in an irq storm */
4340         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4341
4342         return len;
4343 }
4344
4345 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4346 {
4347         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4348 }
4349
4350 static const struct file_operations i915_hpd_storm_ctl_fops = {
4351         .owner = THIS_MODULE,
4352         .open = i915_hpd_storm_ctl_open,
4353         .read = seq_read,
4354         .llseek = seq_lseek,
4355         .release = single_release,
4356         .write = i915_hpd_storm_ctl_write
4357 };
4358
4359 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4360 {
4361         struct drm_i915_private *dev_priv = m->private;
4362
4363         seq_printf(m, "Enabled: %s\n",
4364                    yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4365
4366         return 0;
4367 }
4368
4369 static int
4370 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4371 {
4372         return single_open(file, i915_hpd_short_storm_ctl_show,
4373                            inode->i_private);
4374 }
4375
4376 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4377                                               const char __user *ubuf,
4378                                               size_t len, loff_t *offp)
4379 {
4380         struct seq_file *m = file->private_data;
4381         struct drm_i915_private *dev_priv = m->private;
4382         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4383         char *newline;
4384         char tmp[16];
4385         int i;
4386         bool new_state;
4387
4388         if (len >= sizeof(tmp))
4389                 return -EINVAL;
4390
4391         if (copy_from_user(tmp, ubuf, len))
4392                 return -EFAULT;
4393
4394         tmp[len] = '\0';
4395
4396         /* Strip newline, if any */
4397         newline = strchr(tmp, '\n');
4398         if (newline)
4399                 *newline = '\0';
4400
4401         /* Reset to the "default" state for this system */
4402         if (strcmp(tmp, "reset") == 0)
4403                 new_state = !HAS_DP_MST(dev_priv);
4404         else if (kstrtobool(tmp, &new_state) != 0)
4405                 return -EINVAL;
4406
4407         DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4408                       new_state ? "En" : "Dis");
4409
4410         spin_lock_irq(&dev_priv->irq_lock);
4411         hotplug->hpd_short_storm_enabled = new_state;
4412         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4413         for_each_hpd_pin(i)
4414                 hotplug->stats[i].count = 0;
4415         spin_unlock_irq(&dev_priv->irq_lock);
4416
4417         /* Re-enable hpd immediately if we were in an irq storm */
4418         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4419
4420         return len;
4421 }
4422
4423 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4424         .owner = THIS_MODULE,
4425         .open = i915_hpd_short_storm_ctl_open,
4426         .read = seq_read,
4427         .llseek = seq_lseek,
4428         .release = single_release,
4429         .write = i915_hpd_short_storm_ctl_write,
4430 };
4431
4432 static int i915_drrs_ctl_set(void *data, u64 val)
4433 {
4434         struct drm_i915_private *dev_priv = data;
4435         struct drm_device *dev = &dev_priv->drm;
4436         struct intel_crtc *crtc;
4437
4438         if (INTEL_GEN(dev_priv) < 7)
4439                 return -ENODEV;
4440
4441         for_each_intel_crtc(dev, crtc) {
4442                 struct drm_connector_list_iter conn_iter;
4443                 struct intel_crtc_state *crtc_state;
4444                 struct drm_connector *connector;
4445                 struct drm_crtc_commit *commit;
4446                 int ret;
4447
4448                 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4449                 if (ret)
4450                         return ret;
4451
4452                 crtc_state = to_intel_crtc_state(crtc->base.state);
4453
4454                 if (!crtc_state->base.active ||
4455                     !crtc_state->has_drrs)
4456                         goto out;
4457
4458                 commit = crtc_state->base.commit;
4459                 if (commit) {
4460                         ret = wait_for_completion_interruptible(&commit->hw_done);
4461                         if (ret)
4462                                 goto out;
4463                 }
4464
4465                 drm_connector_list_iter_begin(dev, &conn_iter);
4466                 drm_for_each_connector_iter(connector, &conn_iter) {
4467                         struct intel_encoder *encoder;
4468                         struct intel_dp *intel_dp;
4469
4470                         if (!(crtc_state->base.connector_mask &
4471                               drm_connector_mask(connector)))
4472                                 continue;
4473
4474                         encoder = intel_attached_encoder(connector);
4475                         if (encoder->type != INTEL_OUTPUT_EDP)
4476                                 continue;
4477
4478                         DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4479                                                 val ? "en" : "dis", val);
4480
4481                         intel_dp = enc_to_intel_dp(&encoder->base);
4482                         if (val)
4483                                 intel_edp_drrs_enable(intel_dp,
4484                                                       crtc_state);
4485                         else
4486                                 intel_edp_drrs_disable(intel_dp,
4487                                                        crtc_state);
4488                 }
4489                 drm_connector_list_iter_end(&conn_iter);
4490
4491 out:
4492                 drm_modeset_unlock(&crtc->base.mutex);
4493                 if (ret)
4494                         return ret;
4495         }
4496
4497         return 0;
4498 }
4499
4500 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4501
4502 static ssize_t
4503 i915_fifo_underrun_reset_write(struct file *filp,
4504                                const char __user *ubuf,
4505                                size_t cnt, loff_t *ppos)
4506 {
4507         struct drm_i915_private *dev_priv = filp->private_data;
4508         struct intel_crtc *intel_crtc;
4509         struct drm_device *dev = &dev_priv->drm;
4510         int ret;
4511         bool reset;
4512
4513         ret = kstrtobool_from_user(ubuf, cnt, &reset);
4514         if (ret)
4515                 return ret;
4516
4517         if (!reset)
4518                 return cnt;
4519
4520         for_each_intel_crtc(dev, intel_crtc) {
4521                 struct drm_crtc_commit *commit;
4522                 struct intel_crtc_state *crtc_state;
4523
4524                 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4525                 if (ret)
4526                         return ret;
4527
4528                 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4529                 commit = crtc_state->base.commit;
4530                 if (commit) {
4531                         ret = wait_for_completion_interruptible(&commit->hw_done);
4532                         if (!ret)
4533                                 ret = wait_for_completion_interruptible(&commit->flip_done);
4534                 }
4535
4536                 if (!ret && crtc_state->base.active) {
4537                         DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4538                                       pipe_name(intel_crtc->pipe));
4539
4540                         intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4541                 }
4542
4543                 drm_modeset_unlock(&intel_crtc->base.mutex);
4544
4545                 if (ret)
4546                         return ret;
4547         }
4548
4549         ret = intel_fbc_reset_underrun(dev_priv);
4550         if (ret)
4551                 return ret;
4552
4553         return cnt;
4554 }
4555
4556 static const struct file_operations i915_fifo_underrun_reset_ops = {
4557         .owner = THIS_MODULE,
4558         .open = simple_open,
4559         .write = i915_fifo_underrun_reset_write,
4560         .llseek = default_llseek,
4561 };
4562
4563 static const struct drm_info_list i915_debugfs_list[] = {
4564         {"i915_capabilities", i915_capabilities, 0},
4565         {"i915_gem_objects", i915_gem_object_info, 0},
4566         {"i915_gem_gtt", i915_gem_gtt_info, 0},
4567         {"i915_gem_stolen", i915_gem_stolen_list_info },
4568         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4569         {"i915_gem_interrupt", i915_interrupt_info, 0},
4570         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4571         {"i915_guc_info", i915_guc_info, 0},
4572         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4573         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4574         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4575         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4576         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4577         {"i915_frequency_info", i915_frequency_info, 0},
4578         {"i915_hangcheck_info", i915_hangcheck_info, 0},
4579         {"i915_reset_info", i915_reset_info, 0},
4580         {"i915_drpc_info", i915_drpc_info, 0},
4581         {"i915_emon_status", i915_emon_status, 0},
4582         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4583         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4584         {"i915_fbc_status", i915_fbc_status, 0},
4585         {"i915_ips_status", i915_ips_status, 0},
4586         {"i915_sr_status", i915_sr_status, 0},
4587         {"i915_opregion", i915_opregion, 0},
4588         {"i915_vbt", i915_vbt, 0},
4589         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4590         {"i915_context_status", i915_context_status, 0},
4591         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4592         {"i915_swizzle_info", i915_swizzle_info, 0},
4593         {"i915_llc", i915_llc, 0},
4594         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4595         {"i915_energy_uJ", i915_energy_uJ, 0},
4596         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4597         {"i915_power_domain_info", i915_power_domain_info, 0},
4598         {"i915_dmc_info", i915_dmc_info, 0},
4599         {"i915_display_info", i915_display_info, 0},
4600         {"i915_engine_info", i915_engine_info, 0},
4601         {"i915_rcs_topology", i915_rcs_topology, 0},
4602         {"i915_shrinker_info", i915_shrinker_info, 0},
4603         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4604         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4605         {"i915_wa_registers", i915_wa_registers, 0},
4606         {"i915_ddb_info", i915_ddb_info, 0},
4607         {"i915_sseu_status", i915_sseu_status, 0},
4608         {"i915_drrs_status", i915_drrs_status, 0},
4609         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4610 };
4611 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4612
4613 static const struct i915_debugfs_files {
4614         const char *name;
4615         const struct file_operations *fops;
4616 } i915_debugfs_files[] = {
4617         {"i915_wedged", &i915_wedged_fops},
4618         {"i915_cache_sharing", &i915_cache_sharing_fops},
4619         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4620 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4621         {"i915_error_state", &i915_error_state_fops},
4622         {"i915_gpu_info", &i915_gpu_info_fops},
4623 #endif
4624         {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4625         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4626         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4627         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4628         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4629         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4630         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4631         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4632         {"i915_guc_log_level", &i915_guc_log_level_fops},
4633         {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4634         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4635         {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4636         {"i915_ipc_status", &i915_ipc_status_fops},
4637         {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4638         {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4639 };
4640
4641 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4642 {
4643         struct drm_minor *minor = dev_priv->drm.primary;
4644         struct dentry *ent;
4645         int i;
4646
4647         ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4648                                   minor->debugfs_root, to_i915(minor->dev),
4649                                   &i915_forcewake_fops);
4650         if (!ent)
4651                 return -ENOMEM;
4652
4653         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4654                 ent = debugfs_create_file(i915_debugfs_files[i].name,
4655                                           S_IRUGO | S_IWUSR,
4656                                           minor->debugfs_root,
4657                                           to_i915(minor->dev),
4658                                           i915_debugfs_files[i].fops);
4659                 if (!ent)
4660                         return -ENOMEM;
4661         }
4662
4663         return drm_debugfs_create_files(i915_debugfs_list,
4664                                         I915_DEBUGFS_ENTRIES,
4665                                         minor->debugfs_root, minor);
4666 }
4667
4668 struct dpcd_block {
4669         /* DPCD dump start address. */
4670         unsigned int offset;
4671         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4672         unsigned int end;
4673         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4674         size_t size;
4675         /* Only valid for eDP. */
4676         bool edp;
4677 };
4678
4679 static const struct dpcd_block i915_dpcd_debug[] = {
4680         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4681         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4682         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4683         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4684         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4685         { .offset = DP_SET_POWER },
4686         { .offset = DP_EDP_DPCD_REV },
4687         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4688         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4689         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4690 };
4691
4692 static int i915_dpcd_show(struct seq_file *m, void *data)
4693 {
4694         struct drm_connector *connector = m->private;
4695         struct intel_dp *intel_dp =
4696                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4697         u8 buf[16];
4698         ssize_t err;
4699         int i;
4700
4701         if (connector->status != connector_status_connected)
4702                 return -ENODEV;
4703
4704         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4705                 const struct dpcd_block *b = &i915_dpcd_debug[i];
4706                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4707
4708                 if (b->edp &&
4709                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4710                         continue;
4711
4712                 /* low tech for now */
4713                 if (WARN_ON(size > sizeof(buf)))
4714                         continue;
4715
4716                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4717                 if (err < 0)
4718                         seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4719                 else
4720                         seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4721         }
4722
4723         return 0;
4724 }
4725 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4726
4727 static int i915_panel_show(struct seq_file *m, void *data)
4728 {
4729         struct drm_connector *connector = m->private;
4730         struct intel_dp *intel_dp =
4731                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4732
4733         if (connector->status != connector_status_connected)
4734                 return -ENODEV;
4735
4736         seq_printf(m, "Panel power up delay: %d\n",
4737                    intel_dp->panel_power_up_delay);
4738         seq_printf(m, "Panel power down delay: %d\n",
4739                    intel_dp->panel_power_down_delay);
4740         seq_printf(m, "Backlight on delay: %d\n",
4741                    intel_dp->backlight_on_delay);
4742         seq_printf(m, "Backlight off delay: %d\n",
4743                    intel_dp->backlight_off_delay);
4744
4745         return 0;
4746 }
4747 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4748
4749 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4750 {
4751         struct drm_connector *connector = m->private;
4752         struct intel_connector *intel_connector = to_intel_connector(connector);
4753
4754         if (connector->status != connector_status_connected)
4755                 return -ENODEV;
4756
4757         /* HDCP is supported by connector */
4758         if (!intel_connector->hdcp.shim)
4759                 return -EINVAL;
4760
4761         seq_printf(m, "%s:%d HDCP version: ", connector->name,
4762                    connector->base.id);
4763         seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4764                    "None" : "HDCP1.4");
4765         seq_puts(m, "\n");
4766
4767         return 0;
4768 }
4769 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4770
4771 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4772 {
4773         struct drm_connector *connector = m->private;
4774         struct drm_device *dev = connector->dev;
4775         struct drm_crtc *crtc;
4776         struct intel_dp *intel_dp;
4777         struct drm_modeset_acquire_ctx ctx;
4778         struct intel_crtc_state *crtc_state = NULL;
4779         int ret = 0;
4780         bool try_again = false;
4781
4782         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4783
4784         do {
4785                 try_again = false;
4786                 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4787                                        &ctx);
4788                 if (ret) {
4789                         if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4790                                 try_again = true;
4791                                 continue;
4792                         }
4793                         break;
4794                 }
4795                 crtc = connector->state->crtc;
4796                 if (connector->status != connector_status_connected || !crtc) {
4797                         ret = -ENODEV;
4798                         break;
4799                 }
4800                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4801                 if (ret == -EDEADLK) {
4802                         ret = drm_modeset_backoff(&ctx);
4803                         if (!ret) {
4804                                 try_again = true;
4805                                 continue;
4806                         }
4807                         break;
4808                 } else if (ret) {
4809                         break;
4810                 }
4811                 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4812                 crtc_state = to_intel_crtc_state(crtc->state);
4813                 seq_printf(m, "DSC_Enabled: %s\n",
4814                            yesno(crtc_state->dsc_params.compression_enable));
4815                 seq_printf(m, "DSC_Sink_Support: %s\n",
4816                            yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4817                 seq_printf(m, "Force_DSC_Enable: %s\n",
4818                            yesno(intel_dp->force_dsc_en));
4819                 if (!intel_dp_is_edp(intel_dp))
4820                         seq_printf(m, "FEC_Sink_Support: %s\n",
4821                                    yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4822         } while (try_again);
4823
4824         drm_modeset_drop_locks(&ctx);
4825         drm_modeset_acquire_fini(&ctx);
4826
4827         return ret;
4828 }
4829
4830 static ssize_t i915_dsc_fec_support_write(struct file *file,
4831                                           const char __user *ubuf,
4832                                           size_t len, loff_t *offp)
4833 {
4834         bool dsc_enable = false;
4835         int ret;
4836         struct drm_connector *connector =
4837                 ((struct seq_file *)file->private_data)->private;
4838         struct intel_encoder *encoder = intel_attached_encoder(connector);
4839         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4840
4841         if (len == 0)
4842                 return 0;
4843
4844         DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4845                          len);
4846
4847         ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4848         if (ret < 0)
4849                 return ret;
4850
4851         DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4852                          (dsc_enable) ? "true" : "false");
4853         intel_dp->force_dsc_en = dsc_enable;
4854
4855         *offp += len;
4856         return len;
4857 }
4858
4859 static int i915_dsc_fec_support_open(struct inode *inode,
4860                                      struct file *file)
4861 {
4862         return single_open(file, i915_dsc_fec_support_show,
4863                            inode->i_private);
4864 }
4865
4866 static const struct file_operations i915_dsc_fec_support_fops = {
4867         .owner = THIS_MODULE,
4868         .open = i915_dsc_fec_support_open,
4869         .read = seq_read,
4870         .llseek = seq_lseek,
4871         .release = single_release,
4872         .write = i915_dsc_fec_support_write
4873 };
4874
4875 /**
4876  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4877  * @connector: pointer to a registered drm_connector
4878  *
4879  * Cleanup will be done by drm_connector_unregister() through a call to
4880  * drm_debugfs_connector_remove().
4881  *
4882  * Returns 0 on success, negative error codes on error.
4883  */
4884 int i915_debugfs_connector_add(struct drm_connector *connector)
4885 {
4886         struct dentry *root = connector->debugfs_entry;
4887         struct drm_i915_private *dev_priv = to_i915(connector->dev);
4888
4889         /* The connector must have been registered beforehands. */
4890         if (!root)
4891                 return -ENODEV;
4892
4893         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4894             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4895                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4896                                     connector, &i915_dpcd_fops);
4897
4898         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4899                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4900                                     connector, &i915_panel_fops);
4901                 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4902                                     connector, &i915_psr_sink_status_fops);
4903         }
4904
4905         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4906             connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4907             connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4908                 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4909                                     connector, &i915_hdcp_sink_capability_fops);
4910         }
4911
4912         if (INTEL_GEN(dev_priv) >= 10 &&
4913             (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4914              connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4915                 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4916                                     connector, &i915_dsc_fec_support_fops);
4917
4918         return 0;
4919 }