Merge drm/drm-next into drm-misc-next
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34
35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37         return to_i915(node->minor->dev);
38 }
39
40 static int i915_capabilities(struct seq_file *m, void *data)
41 {
42         struct drm_i915_private *dev_priv = node_to_i915(m->private);
43         const struct intel_device_info *info = INTEL_INFO(dev_priv);
44         struct drm_printer p = drm_seq_file_printer(m);
45
46         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
47         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
48         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
49
50         intel_device_info_dump_flags(info, &p);
51         intel_device_info_dump_runtime(info, &p);
52         intel_driver_caps_print(&dev_priv->caps, &p);
53
54         kernel_param_lock(THIS_MODULE);
55         i915_params_dump(&i915_modparams, &p);
56         kernel_param_unlock(THIS_MODULE);
57
58         return 0;
59 }
60
61 static char get_active_flag(struct drm_i915_gem_object *obj)
62 {
63         return i915_gem_object_is_active(obj) ? '*' : ' ';
64 }
65
66 static char get_pin_flag(struct drm_i915_gem_object *obj)
67 {
68         return obj->pin_global ? 'p' : ' ';
69 }
70
71 static char get_tiling_flag(struct drm_i915_gem_object *obj)
72 {
73         switch (i915_gem_object_get_tiling(obj)) {
74         default:
75         case I915_TILING_NONE: return ' ';
76         case I915_TILING_X: return 'X';
77         case I915_TILING_Y: return 'Y';
78         }
79 }
80
81 static char get_global_flag(struct drm_i915_gem_object *obj)
82 {
83         return obj->userfault_count ? 'g' : ' ';
84 }
85
86 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
87 {
88         return obj->mm.mapping ? 'M' : ' ';
89 }
90
91 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92 {
93         u64 size = 0;
94         struct i915_vma *vma;
95
96         for_each_ggtt_vma(vma, obj) {
97                 if (drm_mm_node_allocated(&vma->node))
98                         size += vma->node.size;
99         }
100
101         return size;
102 }
103
104 static const char *
105 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106 {
107         size_t x = 0;
108
109         switch (page_sizes) {
110         case 0:
111                 return "";
112         case I915_GTT_PAGE_SIZE_4K:
113                 return "4K";
114         case I915_GTT_PAGE_SIZE_64K:
115                 return "64K";
116         case I915_GTT_PAGE_SIZE_2M:
117                 return "2M";
118         default:
119                 if (!buf)
120                         return "M";
121
122                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123                         x += snprintf(buf + x, len - x, "2M, ");
124                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125                         x += snprintf(buf + x, len - x, "64K, ");
126                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127                         x += snprintf(buf + x, len - x, "4K, ");
128                 buf[x-2] = '\0';
129
130                 return buf;
131         }
132 }
133
134 static void
135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 {
137         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138         struct intel_engine_cs *engine;
139         struct i915_vma *vma;
140         unsigned int frontbuffer_bits;
141         int pin_count = 0;
142
143         lockdep_assert_held(&obj->base.dev->struct_mutex);
144
145         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
146                    &obj->base,
147                    get_active_flag(obj),
148                    get_pin_flag(obj),
149                    get_tiling_flag(obj),
150                    get_global_flag(obj),
151                    get_pin_mapped_flag(obj),
152                    obj->base.size / 1024,
153                    obj->read_domains,
154                    obj->write_domain,
155                    i915_cache_level_str(dev_priv, obj->cache_level),
156                    obj->mm.dirty ? " dirty" : "",
157                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158         if (obj->base.name)
159                 seq_printf(m, " (name: %d)", obj->base.name);
160         list_for_each_entry(vma, &obj->vma_list, obj_link) {
161                 if (i915_vma_is_pinned(vma))
162                         pin_count++;
163         }
164         seq_printf(m, " (pinned x %d)", pin_count);
165         if (obj->pin_global)
166                 seq_printf(m, " (global)");
167         list_for_each_entry(vma, &obj->vma_list, obj_link) {
168                 if (!drm_mm_node_allocated(&vma->node))
169                         continue;
170
171                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172                            i915_vma_is_ggtt(vma) ? "g" : "pp",
173                            vma->node.start, vma->node.size,
174                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
175                 if (i915_vma_is_ggtt(vma)) {
176                         switch (vma->ggtt_view.type) {
177                         case I915_GGTT_VIEW_NORMAL:
178                                 seq_puts(m, ", normal");
179                                 break;
180
181                         case I915_GGTT_VIEW_PARTIAL:
182                                 seq_printf(m, ", partial [%08llx+%x]",
183                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
184                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
185                                 break;
186
187                         case I915_GGTT_VIEW_ROTATED:
188                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
189                                            vma->ggtt_view.rotated.plane[0].width,
190                                            vma->ggtt_view.rotated.plane[0].height,
191                                            vma->ggtt_view.rotated.plane[0].stride,
192                                            vma->ggtt_view.rotated.plane[0].offset,
193                                            vma->ggtt_view.rotated.plane[1].width,
194                                            vma->ggtt_view.rotated.plane[1].height,
195                                            vma->ggtt_view.rotated.plane[1].stride,
196                                            vma->ggtt_view.rotated.plane[1].offset);
197                                 break;
198
199                         default:
200                                 MISSING_CASE(vma->ggtt_view.type);
201                                 break;
202                         }
203                 }
204                 if (vma->fence)
205                         seq_printf(m, " , fence: %d%s",
206                                    vma->fence->id,
207                                    i915_gem_active_isset(&vma->last_fence) ? "*" : "");
208                 seq_puts(m, ")");
209         }
210         if (obj->stolen)
211                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
212
213         engine = i915_gem_object_last_write_engine(obj);
214         if (engine)
215                 seq_printf(m, " (%s)", engine->name);
216
217         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218         if (frontbuffer_bits)
219                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
220 }
221
222 static int obj_rank_by_stolen(const void *A, const void *B)
223 {
224         const struct drm_i915_gem_object *a =
225                 *(const struct drm_i915_gem_object **)A;
226         const struct drm_i915_gem_object *b =
227                 *(const struct drm_i915_gem_object **)B;
228
229         if (a->stolen->start < b->stolen->start)
230                 return -1;
231         if (a->stolen->start > b->stolen->start)
232                 return 1;
233         return 0;
234 }
235
236 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237 {
238         struct drm_i915_private *dev_priv = node_to_i915(m->private);
239         struct drm_device *dev = &dev_priv->drm;
240         struct drm_i915_gem_object **objects;
241         struct drm_i915_gem_object *obj;
242         u64 total_obj_size, total_gtt_size;
243         unsigned long total, count, n;
244         int ret;
245
246         total = READ_ONCE(dev_priv->mm.object_count);
247         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
248         if (!objects)
249                 return -ENOMEM;
250
251         ret = mutex_lock_interruptible(&dev->struct_mutex);
252         if (ret)
253                 goto out;
254
255         total_obj_size = total_gtt_size = count = 0;
256
257         spin_lock(&dev_priv->mm.obj_lock);
258         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
259                 if (count == total)
260                         break;
261
262                 if (obj->stolen == NULL)
263                         continue;
264
265                 objects[count++] = obj;
266                 total_obj_size += obj->base.size;
267                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268
269         }
270         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
271                 if (count == total)
272                         break;
273
274                 if (obj->stolen == NULL)
275                         continue;
276
277                 objects[count++] = obj;
278                 total_obj_size += obj->base.size;
279         }
280         spin_unlock(&dev_priv->mm.obj_lock);
281
282         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
284         seq_puts(m, "Stolen:\n");
285         for (n = 0; n < count; n++) {
286                 seq_puts(m, "   ");
287                 describe_obj(m, objects[n]);
288                 seq_putc(m, '\n');
289         }
290         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
291                    count, total_obj_size, total_gtt_size);
292
293         mutex_unlock(&dev->struct_mutex);
294 out:
295         kvfree(objects);
296         return ret;
297 }
298
299 struct file_stats {
300         struct drm_i915_file_private *file_priv;
301         unsigned long count;
302         u64 total, unbound;
303         u64 global, shared;
304         u64 active, inactive;
305 };
306
307 static int per_file_stats(int id, void *ptr, void *data)
308 {
309         struct drm_i915_gem_object *obj = ptr;
310         struct file_stats *stats = data;
311         struct i915_vma *vma;
312
313         lockdep_assert_held(&obj->base.dev->struct_mutex);
314
315         stats->count++;
316         stats->total += obj->base.size;
317         if (!obj->bind_count)
318                 stats->unbound += obj->base.size;
319         if (obj->base.name || obj->base.dma_buf)
320                 stats->shared += obj->base.size;
321
322         list_for_each_entry(vma, &obj->vma_list, obj_link) {
323                 if (!drm_mm_node_allocated(&vma->node))
324                         continue;
325
326                 if (i915_vma_is_ggtt(vma)) {
327                         stats->global += vma->node.size;
328                 } else {
329                         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
330
331                         if (ppgtt->vm.file != stats->file_priv)
332                                 continue;
333                 }
334
335                 if (i915_vma_is_active(vma))
336                         stats->active += vma->node.size;
337                 else
338                         stats->inactive += vma->node.size;
339         }
340
341         return 0;
342 }
343
344 #define print_file_stats(m, name, stats) do { \
345         if (stats.count) \
346                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
347                            name, \
348                            stats.count, \
349                            stats.total, \
350                            stats.active, \
351                            stats.inactive, \
352                            stats.global, \
353                            stats.shared, \
354                            stats.unbound); \
355 } while (0)
356
357 static void print_batch_pool_stats(struct seq_file *m,
358                                    struct drm_i915_private *dev_priv)
359 {
360         struct drm_i915_gem_object *obj;
361         struct file_stats stats;
362         struct intel_engine_cs *engine;
363         enum intel_engine_id id;
364         int j;
365
366         memset(&stats, 0, sizeof(stats));
367
368         for_each_engine(engine, dev_priv, id) {
369                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
370                         list_for_each_entry(obj,
371                                             &engine->batch_pool.cache_list[j],
372                                             batch_pool_link)
373                                 per_file_stats(0, obj, &stats);
374                 }
375         }
376
377         print_file_stats(m, "[k]batch pool", stats);
378 }
379
380 static int per_file_ctx_stats(int idx, void *ptr, void *data)
381 {
382         struct i915_gem_context *ctx = ptr;
383         struct intel_engine_cs *engine;
384         enum intel_engine_id id;
385
386         for_each_engine(engine, ctx->i915, id) {
387                 struct intel_context *ce = to_intel_context(ctx, engine);
388
389                 if (ce->state)
390                         per_file_stats(0, ce->state->obj, data);
391                 if (ce->ring)
392                         per_file_stats(0, ce->ring->vma->obj, data);
393         }
394
395         return 0;
396 }
397
398 static void print_context_stats(struct seq_file *m,
399                                 struct drm_i915_private *dev_priv)
400 {
401         struct drm_device *dev = &dev_priv->drm;
402         struct file_stats stats;
403         struct drm_file *file;
404
405         memset(&stats, 0, sizeof(stats));
406
407         mutex_lock(&dev->struct_mutex);
408         if (dev_priv->kernel_context)
409                 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410
411         list_for_each_entry(file, &dev->filelist, lhead) {
412                 struct drm_i915_file_private *fpriv = file->driver_priv;
413                 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414         }
415         mutex_unlock(&dev->struct_mutex);
416
417         print_file_stats(m, "[k]contexts", stats);
418 }
419
420 static int i915_gem_object_info(struct seq_file *m, void *data)
421 {
422         struct drm_i915_private *dev_priv = node_to_i915(m->private);
423         struct drm_device *dev = &dev_priv->drm;
424         struct i915_ggtt *ggtt = &dev_priv->ggtt;
425         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
427         struct drm_i915_gem_object *obj;
428         unsigned int page_sizes = 0;
429         struct drm_file *file;
430         char buf[80];
431         int ret;
432
433         ret = mutex_lock_interruptible(&dev->struct_mutex);
434         if (ret)
435                 return ret;
436
437         seq_printf(m, "%u objects, %llu bytes\n",
438                    dev_priv->mm.object_count,
439                    dev_priv->mm.object_memory);
440
441         size = count = 0;
442         mapped_size = mapped_count = 0;
443         purgeable_size = purgeable_count = 0;
444         huge_size = huge_count = 0;
445
446         spin_lock(&dev_priv->mm.obj_lock);
447         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
448                 size += obj->base.size;
449                 ++count;
450
451                 if (obj->mm.madv == I915_MADV_DONTNEED) {
452                         purgeable_size += obj->base.size;
453                         ++purgeable_count;
454                 }
455
456                 if (obj->mm.mapping) {
457                         mapped_count++;
458                         mapped_size += obj->base.size;
459                 }
460
461                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462                         huge_count++;
463                         huge_size += obj->base.size;
464                         page_sizes |= obj->mm.page_sizes.sg;
465                 }
466         }
467         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468
469         size = count = dpy_size = dpy_count = 0;
470         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
471                 size += obj->base.size;
472                 ++count;
473
474                 if (obj->pin_global) {
475                         dpy_size += obj->base.size;
476                         ++dpy_count;
477                 }
478
479                 if (obj->mm.madv == I915_MADV_DONTNEED) {
480                         purgeable_size += obj->base.size;
481                         ++purgeable_count;
482                 }
483
484                 if (obj->mm.mapping) {
485                         mapped_count++;
486                         mapped_size += obj->base.size;
487                 }
488
489                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490                         huge_count++;
491                         huge_size += obj->base.size;
492                         page_sizes |= obj->mm.page_sizes.sg;
493                 }
494         }
495         spin_unlock(&dev_priv->mm.obj_lock);
496
497         seq_printf(m, "%u bound objects, %llu bytes\n",
498                    count, size);
499         seq_printf(m, "%u purgeable objects, %llu bytes\n",
500                    purgeable_count, purgeable_size);
501         seq_printf(m, "%u mapped objects, %llu bytes\n",
502                    mapped_count, mapped_size);
503         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504                    huge_count,
505                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506                    huge_size);
507         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
508                    dpy_count, dpy_size);
509
510         seq_printf(m, "%llu [%pa] gtt total\n",
511                    ggtt->vm.total, &ggtt->mappable_end);
512         seq_printf(m, "Supported page sizes: %s\n",
513                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514                                         buf, sizeof(buf)));
515
516         seq_putc(m, '\n');
517         print_batch_pool_stats(m, dev_priv);
518         mutex_unlock(&dev->struct_mutex);
519
520         mutex_lock(&dev->filelist_mutex);
521         print_context_stats(m, dev_priv);
522         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523                 struct file_stats stats;
524                 struct drm_i915_file_private *file_priv = file->driver_priv;
525                 struct i915_request *request;
526                 struct task_struct *task;
527
528                 mutex_lock(&dev->struct_mutex);
529
530                 memset(&stats, 0, sizeof(stats));
531                 stats.file_priv = file->driver_priv;
532                 spin_lock(&file->table_lock);
533                 idr_for_each(&file->object_idr, per_file_stats, &stats);
534                 spin_unlock(&file->table_lock);
535                 /*
536                  * Although we have a valid reference on file->pid, that does
537                  * not guarantee that the task_struct who called get_pid() is
538                  * still alive (e.g. get_pid(current) => fork() => exit()).
539                  * Therefore, we need to protect this ->comm access using RCU.
540                  */
541                 request = list_first_entry_or_null(&file_priv->mm.request_list,
542                                                    struct i915_request,
543                                                    client_link);
544                 rcu_read_lock();
545                 task = pid_task(request && request->gem_context->pid ?
546                                 request->gem_context->pid : file->pid,
547                                 PIDTYPE_PID);
548                 print_file_stats(m, task ? task->comm : "<unknown>", stats);
549                 rcu_read_unlock();
550
551                 mutex_unlock(&dev->struct_mutex);
552         }
553         mutex_unlock(&dev->filelist_mutex);
554
555         return 0;
556 }
557
558 static int i915_gem_gtt_info(struct seq_file *m, void *data)
559 {
560         struct drm_info_node *node = m->private;
561         struct drm_i915_private *dev_priv = node_to_i915(node);
562         struct drm_device *dev = &dev_priv->drm;
563         struct drm_i915_gem_object **objects;
564         struct drm_i915_gem_object *obj;
565         u64 total_obj_size, total_gtt_size;
566         unsigned long nobject, n;
567         int count, ret;
568
569         nobject = READ_ONCE(dev_priv->mm.object_count);
570         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571         if (!objects)
572                 return -ENOMEM;
573
574         ret = mutex_lock_interruptible(&dev->struct_mutex);
575         if (ret)
576                 return ret;
577
578         count = 0;
579         spin_lock(&dev_priv->mm.obj_lock);
580         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581                 objects[count++] = obj;
582                 if (count == nobject)
583                         break;
584         }
585         spin_unlock(&dev_priv->mm.obj_lock);
586
587         total_obj_size = total_gtt_size = 0;
588         for (n = 0;  n < count; n++) {
589                 obj = objects[n];
590
591                 seq_puts(m, "   ");
592                 describe_obj(m, obj);
593                 seq_putc(m, '\n');
594                 total_obj_size += obj->base.size;
595                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
596         }
597
598         mutex_unlock(&dev->struct_mutex);
599
600         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
601                    count, total_obj_size, total_gtt_size);
602         kvfree(objects);
603
604         return 0;
605 }
606
607 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608 {
609         struct drm_i915_private *dev_priv = node_to_i915(m->private);
610         struct drm_device *dev = &dev_priv->drm;
611         struct drm_i915_gem_object *obj;
612         struct intel_engine_cs *engine;
613         enum intel_engine_id id;
614         int total = 0;
615         int ret, j;
616
617         ret = mutex_lock_interruptible(&dev->struct_mutex);
618         if (ret)
619                 return ret;
620
621         for_each_engine(engine, dev_priv, id) {
622                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
623                         int count;
624
625                         count = 0;
626                         list_for_each_entry(obj,
627                                             &engine->batch_pool.cache_list[j],
628                                             batch_pool_link)
629                                 count++;
630                         seq_printf(m, "%s cache[%d]: %d objects\n",
631                                    engine->name, j, count);
632
633                         list_for_each_entry(obj,
634                                             &engine->batch_pool.cache_list[j],
635                                             batch_pool_link) {
636                                 seq_puts(m, "   ");
637                                 describe_obj(m, obj);
638                                 seq_putc(m, '\n');
639                         }
640
641                         total += count;
642                 }
643         }
644
645         seq_printf(m, "total: %d\n", total);
646
647         mutex_unlock(&dev->struct_mutex);
648
649         return 0;
650 }
651
652 static void gen8_display_interrupt_info(struct seq_file *m)
653 {
654         struct drm_i915_private *dev_priv = node_to_i915(m->private);
655         int pipe;
656
657         for_each_pipe(dev_priv, pipe) {
658                 enum intel_display_power_domain power_domain;
659
660                 power_domain = POWER_DOMAIN_PIPE(pipe);
661                 if (!intel_display_power_get_if_enabled(dev_priv,
662                                                         power_domain)) {
663                         seq_printf(m, "Pipe %c power disabled\n",
664                                    pipe_name(pipe));
665                         continue;
666                 }
667                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
668                            pipe_name(pipe),
669                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
671                            pipe_name(pipe),
672                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673                 seq_printf(m, "Pipe %c IER:\t%08x\n",
674                            pipe_name(pipe),
675                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
676
677                 intel_display_power_put(dev_priv, power_domain);
678         }
679
680         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681                    I915_READ(GEN8_DE_PORT_IMR));
682         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683                    I915_READ(GEN8_DE_PORT_IIR));
684         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685                    I915_READ(GEN8_DE_PORT_IER));
686
687         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688                    I915_READ(GEN8_DE_MISC_IMR));
689         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690                    I915_READ(GEN8_DE_MISC_IIR));
691         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692                    I915_READ(GEN8_DE_MISC_IER));
693
694         seq_printf(m, "PCU interrupt mask:\t%08x\n",
695                    I915_READ(GEN8_PCU_IMR));
696         seq_printf(m, "PCU interrupt identity:\t%08x\n",
697                    I915_READ(GEN8_PCU_IIR));
698         seq_printf(m, "PCU interrupt enable:\t%08x\n",
699                    I915_READ(GEN8_PCU_IER));
700 }
701
702 static int i915_interrupt_info(struct seq_file *m, void *data)
703 {
704         struct drm_i915_private *dev_priv = node_to_i915(m->private);
705         struct intel_engine_cs *engine;
706         enum intel_engine_id id;
707         int i, pipe;
708
709         intel_runtime_pm_get(dev_priv);
710
711         if (IS_CHERRYVIEW(dev_priv)) {
712                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713                            I915_READ(GEN8_MASTER_IRQ));
714
715                 seq_printf(m, "Display IER:\t%08x\n",
716                            I915_READ(VLV_IER));
717                 seq_printf(m, "Display IIR:\t%08x\n",
718                            I915_READ(VLV_IIR));
719                 seq_printf(m, "Display IIR_RW:\t%08x\n",
720                            I915_READ(VLV_IIR_RW));
721                 seq_printf(m, "Display IMR:\t%08x\n",
722                            I915_READ(VLV_IMR));
723                 for_each_pipe(dev_priv, pipe) {
724                         enum intel_display_power_domain power_domain;
725
726                         power_domain = POWER_DOMAIN_PIPE(pipe);
727                         if (!intel_display_power_get_if_enabled(dev_priv,
728                                                                 power_domain)) {
729                                 seq_printf(m, "Pipe %c power disabled\n",
730                                            pipe_name(pipe));
731                                 continue;
732                         }
733
734                         seq_printf(m, "Pipe %c stat:\t%08x\n",
735                                    pipe_name(pipe),
736                                    I915_READ(PIPESTAT(pipe)));
737
738                         intel_display_power_put(dev_priv, power_domain);
739                 }
740
741                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
742                 seq_printf(m, "Port hotplug:\t%08x\n",
743                            I915_READ(PORT_HOTPLUG_EN));
744                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745                            I915_READ(VLV_DPFLIPSTAT));
746                 seq_printf(m, "DPINVGTT:\t%08x\n",
747                            I915_READ(DPINVGTT));
748                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
749
750                 for (i = 0; i < 4; i++) {
751                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752                                    i, I915_READ(GEN8_GT_IMR(i)));
753                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754                                    i, I915_READ(GEN8_GT_IIR(i)));
755                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756                                    i, I915_READ(GEN8_GT_IER(i)));
757                 }
758
759                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
760                            I915_READ(GEN8_PCU_IMR));
761                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
762                            I915_READ(GEN8_PCU_IIR));
763                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764                            I915_READ(GEN8_PCU_IER));
765         } else if (INTEL_GEN(dev_priv) >= 11) {
766                 seq_printf(m, "Master Interrupt Control:  %08x\n",
767                            I915_READ(GEN11_GFX_MSTR_IRQ));
768
769                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
770                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
772                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
774                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
778                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
780                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781
782                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
783                            I915_READ(GEN11_DISPLAY_INT_CTL));
784
785                 gen8_display_interrupt_info(m);
786         } else if (INTEL_GEN(dev_priv) >= 8) {
787                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
788                            I915_READ(GEN8_MASTER_IRQ));
789
790                 for (i = 0; i < 4; i++) {
791                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792                                    i, I915_READ(GEN8_GT_IMR(i)));
793                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794                                    i, I915_READ(GEN8_GT_IIR(i)));
795                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796                                    i, I915_READ(GEN8_GT_IER(i)));
797                 }
798
799                 gen8_display_interrupt_info(m);
800         } else if (IS_VALLEYVIEW(dev_priv)) {
801                 seq_printf(m, "Display IER:\t%08x\n",
802                            I915_READ(VLV_IER));
803                 seq_printf(m, "Display IIR:\t%08x\n",
804                            I915_READ(VLV_IIR));
805                 seq_printf(m, "Display IIR_RW:\t%08x\n",
806                            I915_READ(VLV_IIR_RW));
807                 seq_printf(m, "Display IMR:\t%08x\n",
808                            I915_READ(VLV_IMR));
809                 for_each_pipe(dev_priv, pipe) {
810                         enum intel_display_power_domain power_domain;
811
812                         power_domain = POWER_DOMAIN_PIPE(pipe);
813                         if (!intel_display_power_get_if_enabled(dev_priv,
814                                                                 power_domain)) {
815                                 seq_printf(m, "Pipe %c power disabled\n",
816                                            pipe_name(pipe));
817                                 continue;
818                         }
819
820                         seq_printf(m, "Pipe %c stat:\t%08x\n",
821                                    pipe_name(pipe),
822                                    I915_READ(PIPESTAT(pipe)));
823                         intel_display_power_put(dev_priv, power_domain);
824                 }
825
826                 seq_printf(m, "Master IER:\t%08x\n",
827                            I915_READ(VLV_MASTER_IER));
828
829                 seq_printf(m, "Render IER:\t%08x\n",
830                            I915_READ(GTIER));
831                 seq_printf(m, "Render IIR:\t%08x\n",
832                            I915_READ(GTIIR));
833                 seq_printf(m, "Render IMR:\t%08x\n",
834                            I915_READ(GTIMR));
835
836                 seq_printf(m, "PM IER:\t\t%08x\n",
837                            I915_READ(GEN6_PMIER));
838                 seq_printf(m, "PM IIR:\t\t%08x\n",
839                            I915_READ(GEN6_PMIIR));
840                 seq_printf(m, "PM IMR:\t\t%08x\n",
841                            I915_READ(GEN6_PMIMR));
842
843                 seq_printf(m, "Port hotplug:\t%08x\n",
844                            I915_READ(PORT_HOTPLUG_EN));
845                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846                            I915_READ(VLV_DPFLIPSTAT));
847                 seq_printf(m, "DPINVGTT:\t%08x\n",
848                            I915_READ(DPINVGTT));
849
850         } else if (!HAS_PCH_SPLIT(dev_priv)) {
851                 seq_printf(m, "Interrupt enable:    %08x\n",
852                            I915_READ(IER));
853                 seq_printf(m, "Interrupt identity:  %08x\n",
854                            I915_READ(IIR));
855                 seq_printf(m, "Interrupt mask:      %08x\n",
856                            I915_READ(IMR));
857                 for_each_pipe(dev_priv, pipe)
858                         seq_printf(m, "Pipe %c stat:         %08x\n",
859                                    pipe_name(pipe),
860                                    I915_READ(PIPESTAT(pipe)));
861         } else {
862                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
863                            I915_READ(DEIER));
864                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
865                            I915_READ(DEIIR));
866                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
867                            I915_READ(DEIMR));
868                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
869                            I915_READ(SDEIER));
870                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
871                            I915_READ(SDEIIR));
872                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
873                            I915_READ(SDEIMR));
874                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
875                            I915_READ(GTIER));
876                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
877                            I915_READ(GTIIR));
878                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
879                            I915_READ(GTIMR));
880         }
881
882         if (INTEL_GEN(dev_priv) >= 11) {
883                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
884                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
886                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894                            I915_READ(GEN11_GUC_SG_INTR_MASK));
895                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901
902         } else if (INTEL_GEN(dev_priv) >= 6) {
903                 for_each_engine(engine, dev_priv, id) {
904                         seq_printf(m,
905                                    "Graphics Interrupt mask (%s):       %08x\n",
906                                    engine->name, I915_READ_IMR(engine));
907                 }
908         }
909
910         intel_runtime_pm_put(dev_priv);
911
912         return 0;
913 }
914
915 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916 {
917         struct drm_i915_private *dev_priv = node_to_i915(m->private);
918         struct drm_device *dev = &dev_priv->drm;
919         int i, ret;
920
921         ret = mutex_lock_interruptible(&dev->struct_mutex);
922         if (ret)
923                 return ret;
924
925         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926         for (i = 0; i < dev_priv->num_fence_regs; i++) {
927                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
928
929                 seq_printf(m, "Fence %d, pin count = %d, object = ",
930                            i, dev_priv->fence_regs[i].pin_count);
931                 if (!vma)
932                         seq_puts(m, "unused");
933                 else
934                         describe_obj(m, vma->obj);
935                 seq_putc(m, '\n');
936         }
937
938         mutex_unlock(&dev->struct_mutex);
939         return 0;
940 }
941
942 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
943 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944                               size_t count, loff_t *pos)
945 {
946         struct i915_gpu_state *error;
947         ssize_t ret;
948         void *buf;
949
950         error = file->private_data;
951         if (!error)
952                 return 0;
953
954         /* Bounce buffer required because of kernfs __user API convenience. */
955         buf = kmalloc(count, GFP_KERNEL);
956         if (!buf)
957                 return -ENOMEM;
958
959         ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
960         if (ret <= 0)
961                 goto out;
962
963         if (!copy_to_user(ubuf, buf, ret))
964                 *pos += ret;
965         else
966                 ret = -EFAULT;
967
968 out:
969         kfree(buf);
970         return ret;
971 }
972
973 static int gpu_state_release(struct inode *inode, struct file *file)
974 {
975         i915_gpu_state_put(file->private_data);
976         return 0;
977 }
978
979 static int i915_gpu_info_open(struct inode *inode, struct file *file)
980 {
981         struct drm_i915_private *i915 = inode->i_private;
982         struct i915_gpu_state *gpu;
983
984         intel_runtime_pm_get(i915);
985         gpu = i915_capture_gpu_state(i915);
986         intel_runtime_pm_put(i915);
987         if (!gpu)
988                 return -ENOMEM;
989
990         file->private_data = gpu;
991         return 0;
992 }
993
994 static const struct file_operations i915_gpu_info_fops = {
995         .owner = THIS_MODULE,
996         .open = i915_gpu_info_open,
997         .read = gpu_state_read,
998         .llseek = default_llseek,
999         .release = gpu_state_release,
1000 };
1001
1002 static ssize_t
1003 i915_error_state_write(struct file *filp,
1004                        const char __user *ubuf,
1005                        size_t cnt,
1006                        loff_t *ppos)
1007 {
1008         struct i915_gpu_state *error = filp->private_data;
1009
1010         if (!error)
1011                 return 0;
1012
1013         DRM_DEBUG_DRIVER("Resetting error state\n");
1014         i915_reset_error_state(error->i915);
1015
1016         return cnt;
1017 }
1018
1019 static int i915_error_state_open(struct inode *inode, struct file *file)
1020 {
1021         file->private_data = i915_first_error_state(inode->i_private);
1022         return 0;
1023 }
1024
1025 static const struct file_operations i915_error_state_fops = {
1026         .owner = THIS_MODULE,
1027         .open = i915_error_state_open,
1028         .read = gpu_state_read,
1029         .write = i915_error_state_write,
1030         .llseek = default_llseek,
1031         .release = gpu_state_release,
1032 };
1033 #endif
1034
1035 static int
1036 i915_next_seqno_set(void *data, u64 val)
1037 {
1038         struct drm_i915_private *dev_priv = data;
1039         struct drm_device *dev = &dev_priv->drm;
1040         int ret;
1041
1042         ret = mutex_lock_interruptible(&dev->struct_mutex);
1043         if (ret)
1044                 return ret;
1045
1046         intel_runtime_pm_get(dev_priv);
1047         ret = i915_gem_set_global_seqno(dev, val);
1048         intel_runtime_pm_put(dev_priv);
1049
1050         mutex_unlock(&dev->struct_mutex);
1051
1052         return ret;
1053 }
1054
1055 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1056                         NULL, i915_next_seqno_set,
1057                         "0x%llx\n");
1058
1059 static int i915_frequency_info(struct seq_file *m, void *unused)
1060 {
1061         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1062         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1063         int ret = 0;
1064
1065         intel_runtime_pm_get(dev_priv);
1066
1067         if (IS_GEN5(dev_priv)) {
1068                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1069                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070
1071                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074                            MEMSTAT_VID_SHIFT);
1075                 seq_printf(m, "Current P-state: %d\n",
1076                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1077         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1078                 u32 rpmodectl, freq_sts;
1079
1080                 mutex_lock(&dev_priv->pcu_lock);
1081
1082                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083                 seq_printf(m, "Video Turbo Mode: %s\n",
1084                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085                 seq_printf(m, "HW control enabled: %s\n",
1086                            yesno(rpmodectl & GEN6_RP_ENABLE));
1087                 seq_printf(m, "SW control enabled: %s\n",
1088                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089                                   GEN6_RP_MEDIA_SW_MODE));
1090
1091                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094
1095                 seq_printf(m, "actual GPU freq: %d MHz\n",
1096                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097
1098                 seq_printf(m, "current GPU freq: %d MHz\n",
1099                            intel_gpu_freq(dev_priv, rps->cur_freq));
1100
1101                 seq_printf(m, "max GPU freq: %d MHz\n",
1102                            intel_gpu_freq(dev_priv, rps->max_freq));
1103
1104                 seq_printf(m, "min GPU freq: %d MHz\n",
1105                            intel_gpu_freq(dev_priv, rps->min_freq));
1106
1107                 seq_printf(m, "idle GPU freq: %d MHz\n",
1108                            intel_gpu_freq(dev_priv, rps->idle_freq));
1109
1110                 seq_printf(m,
1111                            "efficient (RPe) frequency: %d MHz\n",
1112                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1113                 mutex_unlock(&dev_priv->pcu_lock);
1114         } else if (INTEL_GEN(dev_priv) >= 6) {
1115                 u32 rp_state_limits;
1116                 u32 gt_perf_status;
1117                 u32 rp_state_cap;
1118                 u32 rpmodectl, rpinclimit, rpdeclimit;
1119                 u32 rpstat, cagf, reqf;
1120                 u32 rpupei, rpcurup, rpprevup;
1121                 u32 rpdownei, rpcurdown, rpprevdown;
1122                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1123                 int max_freq;
1124
1125                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1126                 if (IS_GEN9_LP(dev_priv)) {
1127                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129                 } else {
1130                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132                 }
1133
1134                 /* RPSTAT1 is in the GT power well */
1135                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1136
1137                 reqf = I915_READ(GEN6_RPNSWREQ);
1138                 if (INTEL_GEN(dev_priv) >= 9)
1139                         reqf >>= 23;
1140                 else {
1141                         reqf &= ~GEN6_TURBO_DISABLE;
1142                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1143                                 reqf >>= 24;
1144                         else
1145                                 reqf >>= 25;
1146                 }
1147                 reqf = intel_gpu_freq(dev_priv, reqf);
1148
1149                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152
1153                 rpstat = I915_READ(GEN6_RPSTAT1);
1154                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1160                 cagf = intel_gpu_freq(dev_priv,
1161                                       intel_get_cagf(dev_priv, rpstat));
1162
1163                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1164
1165                 if (INTEL_GEN(dev_priv) >= 11) {
1166                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1167                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1168                         /*
1169                          * The equivalent to the PM ISR & IIR cannot be read
1170                          * without affecting the current state of the system
1171                          */
1172                         pm_isr = 0;
1173                         pm_iir = 0;
1174                 } else if (INTEL_GEN(dev_priv) >= 8) {
1175                         pm_ier = I915_READ(GEN8_GT_IER(2));
1176                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1177                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1178                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1179                 } else {
1180                         pm_ier = I915_READ(GEN6_PMIER);
1181                         pm_imr = I915_READ(GEN6_PMIMR);
1182                         pm_isr = I915_READ(GEN6_PMISR);
1183                         pm_iir = I915_READ(GEN6_PMIIR);
1184                 }
1185                 pm_mask = I915_READ(GEN6_PMINTRMSK);
1186
1187                 seq_printf(m, "Video Turbo Mode: %s\n",
1188                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1189                 seq_printf(m, "HW control enabled: %s\n",
1190                            yesno(rpmodectl & GEN6_RP_ENABLE));
1191                 seq_printf(m, "SW control enabled: %s\n",
1192                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1193                                   GEN6_RP_MEDIA_SW_MODE));
1194
1195                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196                            pm_ier, pm_imr, pm_mask);
1197                 if (INTEL_GEN(dev_priv) <= 10)
1198                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1199                                    pm_isr, pm_iir);
1200                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1201                            rps->pm_intrmsk_mbz);
1202                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1203                 seq_printf(m, "Render p-state ratio: %d\n",
1204                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1205                 seq_printf(m, "Render p-state VID: %d\n",
1206                            gt_perf_status & 0xff);
1207                 seq_printf(m, "Render p-state limit: %d\n",
1208                            rp_state_limits & 0xff);
1209                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1210                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1211                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1212                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1213                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1214                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1215                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1221                 seq_printf(m, "Up threshold: %d%%\n",
1222                            rps->power.up_threshold);
1223
1224                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1225                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1226                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1227                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1228                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1229                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1230                 seq_printf(m, "Down threshold: %d%%\n",
1231                            rps->power.down_threshold);
1232
1233                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1234                             rp_state_cap >> 16) & 0xff;
1235                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1236                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1237                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1238                            intel_gpu_freq(dev_priv, max_freq));
1239
1240                 max_freq = (rp_state_cap & 0xff00) >> 8;
1241                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1242                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1243                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1244                            intel_gpu_freq(dev_priv, max_freq));
1245
1246                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1247                             rp_state_cap >> 0) & 0xff;
1248                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1249                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1250                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1251                            intel_gpu_freq(dev_priv, max_freq));
1252                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1253                            intel_gpu_freq(dev_priv, rps->max_freq));
1254
1255                 seq_printf(m, "Current freq: %d MHz\n",
1256                            intel_gpu_freq(dev_priv, rps->cur_freq));
1257                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1258                 seq_printf(m, "Idle freq: %d MHz\n",
1259                            intel_gpu_freq(dev_priv, rps->idle_freq));
1260                 seq_printf(m, "Min freq: %d MHz\n",
1261                            intel_gpu_freq(dev_priv, rps->min_freq));
1262                 seq_printf(m, "Boost freq: %d MHz\n",
1263                            intel_gpu_freq(dev_priv, rps->boost_freq));
1264                 seq_printf(m, "Max freq: %d MHz\n",
1265                            intel_gpu_freq(dev_priv, rps->max_freq));
1266                 seq_printf(m,
1267                            "efficient (RPe) frequency: %d MHz\n",
1268                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1269         } else {
1270                 seq_puts(m, "no P-state info available\n");
1271         }
1272
1273         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1274         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1275         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1276
1277         intel_runtime_pm_put(dev_priv);
1278         return ret;
1279 }
1280
1281 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1282                                struct seq_file *m,
1283                                struct intel_instdone *instdone)
1284 {
1285         int slice;
1286         int subslice;
1287
1288         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1289                    instdone->instdone);
1290
1291         if (INTEL_GEN(dev_priv) <= 3)
1292                 return;
1293
1294         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1295                    instdone->slice_common);
1296
1297         if (INTEL_GEN(dev_priv) <= 6)
1298                 return;
1299
1300         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1301                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1302                            slice, subslice, instdone->sampler[slice][subslice]);
1303
1304         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1305                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1306                            slice, subslice, instdone->row[slice][subslice]);
1307 }
1308
1309 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1310 {
1311         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1312         struct intel_engine_cs *engine;
1313         u64 acthd[I915_NUM_ENGINES];
1314         u32 seqno[I915_NUM_ENGINES];
1315         struct intel_instdone instdone;
1316         enum intel_engine_id id;
1317
1318         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1319                 seq_puts(m, "Wedged\n");
1320         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1321                 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1322         if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1323                 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1324         if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1325                 seq_puts(m, "Waiter holding struct mutex\n");
1326         if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1327                 seq_puts(m, "struct_mutex blocked for reset\n");
1328
1329         if (!i915_modparams.enable_hangcheck) {
1330                 seq_puts(m, "Hangcheck disabled\n");
1331                 return 0;
1332         }
1333
1334         intel_runtime_pm_get(dev_priv);
1335
1336         for_each_engine(engine, dev_priv, id) {
1337                 acthd[id] = intel_engine_get_active_head(engine);
1338                 seqno[id] = intel_engine_get_seqno(engine);
1339         }
1340
1341         intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1342
1343         intel_runtime_pm_put(dev_priv);
1344
1345         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1346                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1347                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1348                                             jiffies));
1349         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1350                 seq_puts(m, "Hangcheck active, work pending\n");
1351         else
1352                 seq_puts(m, "Hangcheck inactive\n");
1353
1354         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1355
1356         for_each_engine(engine, dev_priv, id) {
1357                 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1358                 struct rb_node *rb;
1359
1360                 seq_printf(m, "%s:\n", engine->name);
1361                 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1362                            engine->hangcheck.seqno, seqno[id],
1363                            intel_engine_last_submit(engine));
1364                 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
1365                            yesno(intel_engine_has_waiter(engine)),
1366                            yesno(test_bit(engine->id,
1367                                           &dev_priv->gpu_error.missed_irq_rings)),
1368                            yesno(engine->hangcheck.stalled),
1369                            yesno(engine->hangcheck.wedged));
1370
1371                 spin_lock_irq(&b->rb_lock);
1372                 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1373                         struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1374
1375                         seq_printf(m, "\t%s [%d] waiting for %x\n",
1376                                    w->tsk->comm, w->tsk->pid, w->seqno);
1377                 }
1378                 spin_unlock_irq(&b->rb_lock);
1379
1380                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1381                            (long long)engine->hangcheck.acthd,
1382                            (long long)acthd[id]);
1383                 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1384                            hangcheck_action_to_str(engine->hangcheck.action),
1385                            engine->hangcheck.action,
1386                            jiffies_to_msecs(jiffies -
1387                                             engine->hangcheck.action_timestamp));
1388
1389                 if (engine->id == RCS) {
1390                         seq_puts(m, "\tinstdone read =\n");
1391
1392                         i915_instdone_info(dev_priv, m, &instdone);
1393
1394                         seq_puts(m, "\tinstdone accu =\n");
1395
1396                         i915_instdone_info(dev_priv, m,
1397                                            &engine->hangcheck.instdone);
1398                 }
1399         }
1400
1401         return 0;
1402 }
1403
1404 static int i915_reset_info(struct seq_file *m, void *unused)
1405 {
1406         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1407         struct i915_gpu_error *error = &dev_priv->gpu_error;
1408         struct intel_engine_cs *engine;
1409         enum intel_engine_id id;
1410
1411         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1412
1413         for_each_engine(engine, dev_priv, id) {
1414                 seq_printf(m, "%s = %u\n", engine->name,
1415                            i915_reset_engine_count(error, engine));
1416         }
1417
1418         return 0;
1419 }
1420
1421 static int ironlake_drpc_info(struct seq_file *m)
1422 {
1423         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1424         u32 rgvmodectl, rstdbyctl;
1425         u16 crstandvid;
1426
1427         rgvmodectl = I915_READ(MEMMODECTL);
1428         rstdbyctl = I915_READ(RSTDBYCTL);
1429         crstandvid = I915_READ16(CRSTANDVID);
1430
1431         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1432         seq_printf(m, "Boost freq: %d\n",
1433                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1434                    MEMMODE_BOOST_FREQ_SHIFT);
1435         seq_printf(m, "HW control enabled: %s\n",
1436                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1437         seq_printf(m, "SW control enabled: %s\n",
1438                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1439         seq_printf(m, "Gated voltage change: %s\n",
1440                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1441         seq_printf(m, "Starting frequency: P%d\n",
1442                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1443         seq_printf(m, "Max P-state: P%d\n",
1444                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1445         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1446         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1447         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1448         seq_printf(m, "Render standby enabled: %s\n",
1449                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1450         seq_puts(m, "Current RS state: ");
1451         switch (rstdbyctl & RSX_STATUS_MASK) {
1452         case RSX_STATUS_ON:
1453                 seq_puts(m, "on\n");
1454                 break;
1455         case RSX_STATUS_RC1:
1456                 seq_puts(m, "RC1\n");
1457                 break;
1458         case RSX_STATUS_RC1E:
1459                 seq_puts(m, "RC1E\n");
1460                 break;
1461         case RSX_STATUS_RS1:
1462                 seq_puts(m, "RS1\n");
1463                 break;
1464         case RSX_STATUS_RS2:
1465                 seq_puts(m, "RS2 (RC6)\n");
1466                 break;
1467         case RSX_STATUS_RS3:
1468                 seq_puts(m, "RC3 (RC6+)\n");
1469                 break;
1470         default:
1471                 seq_puts(m, "unknown\n");
1472                 break;
1473         }
1474
1475         return 0;
1476 }
1477
1478 static int i915_forcewake_domains(struct seq_file *m, void *data)
1479 {
1480         struct drm_i915_private *i915 = node_to_i915(m->private);
1481         struct intel_uncore_forcewake_domain *fw_domain;
1482         unsigned int tmp;
1483
1484         seq_printf(m, "user.bypass_count = %u\n",
1485                    i915->uncore.user_forcewake.count);
1486
1487         for_each_fw_domain(fw_domain, i915, tmp)
1488                 seq_printf(m, "%s.wake_count = %u\n",
1489                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1490                            READ_ONCE(fw_domain->wake_count));
1491
1492         return 0;
1493 }
1494
1495 static void print_rc6_res(struct seq_file *m,
1496                           const char *title,
1497                           const i915_reg_t reg)
1498 {
1499         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1500
1501         seq_printf(m, "%s %u (%llu us)\n",
1502                    title, I915_READ(reg),
1503                    intel_rc6_residency_us(dev_priv, reg));
1504 }
1505
1506 static int vlv_drpc_info(struct seq_file *m)
1507 {
1508         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1509         u32 rcctl1, pw_status;
1510
1511         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1512         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1513
1514         seq_printf(m, "RC6 Enabled: %s\n",
1515                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1516                                         GEN6_RC_CTL_EI_MODE(1))));
1517         seq_printf(m, "Render Power Well: %s\n",
1518                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1519         seq_printf(m, "Media Power Well: %s\n",
1520                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1521
1522         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1523         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1524
1525         return i915_forcewake_domains(m, NULL);
1526 }
1527
1528 static int gen6_drpc_info(struct seq_file *m)
1529 {
1530         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1531         u32 gt_core_status, rcctl1, rc6vids = 0;
1532         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1533
1534         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1535         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1536
1537         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1538         if (INTEL_GEN(dev_priv) >= 9) {
1539                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1540                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1541         }
1542
1543         if (INTEL_GEN(dev_priv) <= 7) {
1544                 mutex_lock(&dev_priv->pcu_lock);
1545                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1546                                        &rc6vids);
1547                 mutex_unlock(&dev_priv->pcu_lock);
1548         }
1549
1550         seq_printf(m, "RC1e Enabled: %s\n",
1551                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1552         seq_printf(m, "RC6 Enabled: %s\n",
1553                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1554         if (INTEL_GEN(dev_priv) >= 9) {
1555                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1556                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1557                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1558                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1559         }
1560         seq_printf(m, "Deep RC6 Enabled: %s\n",
1561                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1562         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1563                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1564         seq_puts(m, "Current RC state: ");
1565         switch (gt_core_status & GEN6_RCn_MASK) {
1566         case GEN6_RC0:
1567                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1568                         seq_puts(m, "Core Power Down\n");
1569                 else
1570                         seq_puts(m, "on\n");
1571                 break;
1572         case GEN6_RC3:
1573                 seq_puts(m, "RC3\n");
1574                 break;
1575         case GEN6_RC6:
1576                 seq_puts(m, "RC6\n");
1577                 break;
1578         case GEN6_RC7:
1579                 seq_puts(m, "RC7\n");
1580                 break;
1581         default:
1582                 seq_puts(m, "Unknown\n");
1583                 break;
1584         }
1585
1586         seq_printf(m, "Core Power Down: %s\n",
1587                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1588         if (INTEL_GEN(dev_priv) >= 9) {
1589                 seq_printf(m, "Render Power Well: %s\n",
1590                         (gen9_powergate_status &
1591                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1592                 seq_printf(m, "Media Power Well: %s\n",
1593                         (gen9_powergate_status &
1594                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1595         }
1596
1597         /* Not exactly sure what this is */
1598         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1599                       GEN6_GT_GFX_RC6_LOCKED);
1600         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1601         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1602         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1603
1604         if (INTEL_GEN(dev_priv) <= 7) {
1605                 seq_printf(m, "RC6   voltage: %dmV\n",
1606                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1607                 seq_printf(m, "RC6+  voltage: %dmV\n",
1608                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1609                 seq_printf(m, "RC6++ voltage: %dmV\n",
1610                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1611         }
1612
1613         return i915_forcewake_domains(m, NULL);
1614 }
1615
1616 static int i915_drpc_info(struct seq_file *m, void *unused)
1617 {
1618         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1619         int err;
1620
1621         intel_runtime_pm_get(dev_priv);
1622
1623         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1624                 err = vlv_drpc_info(m);
1625         else if (INTEL_GEN(dev_priv) >= 6)
1626                 err = gen6_drpc_info(m);
1627         else
1628                 err = ironlake_drpc_info(m);
1629
1630         intel_runtime_pm_put(dev_priv);
1631
1632         return err;
1633 }
1634
1635 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1636 {
1637         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1638
1639         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1640                    dev_priv->fb_tracking.busy_bits);
1641
1642         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1643                    dev_priv->fb_tracking.flip_bits);
1644
1645         return 0;
1646 }
1647
1648 static int i915_fbc_status(struct seq_file *m, void *unused)
1649 {
1650         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1651         struct intel_fbc *fbc = &dev_priv->fbc;
1652
1653         if (!HAS_FBC(dev_priv))
1654                 return -ENODEV;
1655
1656         intel_runtime_pm_get(dev_priv);
1657         mutex_lock(&fbc->lock);
1658
1659         if (intel_fbc_is_active(dev_priv))
1660                 seq_puts(m, "FBC enabled\n");
1661         else
1662                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1663
1664         if (intel_fbc_is_active(dev_priv)) {
1665                 u32 mask;
1666
1667                 if (INTEL_GEN(dev_priv) >= 8)
1668                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1669                 else if (INTEL_GEN(dev_priv) >= 7)
1670                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1671                 else if (INTEL_GEN(dev_priv) >= 5)
1672                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1673                 else if (IS_G4X(dev_priv))
1674                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1675                 else
1676                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1677                                                         FBC_STAT_COMPRESSED);
1678
1679                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1680         }
1681
1682         mutex_unlock(&fbc->lock);
1683         intel_runtime_pm_put(dev_priv);
1684
1685         return 0;
1686 }
1687
1688 static int i915_fbc_false_color_get(void *data, u64 *val)
1689 {
1690         struct drm_i915_private *dev_priv = data;
1691
1692         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1693                 return -ENODEV;
1694
1695         *val = dev_priv->fbc.false_color;
1696
1697         return 0;
1698 }
1699
1700 static int i915_fbc_false_color_set(void *data, u64 val)
1701 {
1702         struct drm_i915_private *dev_priv = data;
1703         u32 reg;
1704
1705         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1706                 return -ENODEV;
1707
1708         mutex_lock(&dev_priv->fbc.lock);
1709
1710         reg = I915_READ(ILK_DPFC_CONTROL);
1711         dev_priv->fbc.false_color = val;
1712
1713         I915_WRITE(ILK_DPFC_CONTROL, val ?
1714                    (reg | FBC_CTL_FALSE_COLOR) :
1715                    (reg & ~FBC_CTL_FALSE_COLOR));
1716
1717         mutex_unlock(&dev_priv->fbc.lock);
1718         return 0;
1719 }
1720
1721 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1722                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1723                         "%llu\n");
1724
1725 static int i915_ips_status(struct seq_file *m, void *unused)
1726 {
1727         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1728
1729         if (!HAS_IPS(dev_priv))
1730                 return -ENODEV;
1731
1732         intel_runtime_pm_get(dev_priv);
1733
1734         seq_printf(m, "Enabled by kernel parameter: %s\n",
1735                    yesno(i915_modparams.enable_ips));
1736
1737         if (INTEL_GEN(dev_priv) >= 8) {
1738                 seq_puts(m, "Currently: unknown\n");
1739         } else {
1740                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1741                         seq_puts(m, "Currently: enabled\n");
1742                 else
1743                         seq_puts(m, "Currently: disabled\n");
1744         }
1745
1746         intel_runtime_pm_put(dev_priv);
1747
1748         return 0;
1749 }
1750
1751 static int i915_sr_status(struct seq_file *m, void *unused)
1752 {
1753         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1754         bool sr_enabled = false;
1755
1756         intel_runtime_pm_get(dev_priv);
1757         intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1758
1759         if (INTEL_GEN(dev_priv) >= 9)
1760                 /* no global SR status; inspect per-plane WM */;
1761         else if (HAS_PCH_SPLIT(dev_priv))
1762                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1763         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1764                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1765                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1766         else if (IS_I915GM(dev_priv))
1767                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1768         else if (IS_PINEVIEW(dev_priv))
1769                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1770         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1771                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1772
1773         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1774         intel_runtime_pm_put(dev_priv);
1775
1776         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1777
1778         return 0;
1779 }
1780
1781 static int i915_emon_status(struct seq_file *m, void *unused)
1782 {
1783         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1784         struct drm_device *dev = &dev_priv->drm;
1785         unsigned long temp, chipset, gfx;
1786         int ret;
1787
1788         if (!IS_GEN5(dev_priv))
1789                 return -ENODEV;
1790
1791         intel_runtime_pm_get(dev_priv);
1792
1793         ret = mutex_lock_interruptible(&dev->struct_mutex);
1794         if (ret)
1795                 return ret;
1796
1797         temp = i915_mch_val(dev_priv);
1798         chipset = i915_chipset_val(dev_priv);
1799         gfx = i915_gfx_val(dev_priv);
1800         mutex_unlock(&dev->struct_mutex);
1801
1802         seq_printf(m, "GMCH temp: %ld\n", temp);
1803         seq_printf(m, "Chipset power: %ld\n", chipset);
1804         seq_printf(m, "GFX power: %ld\n", gfx);
1805         seq_printf(m, "Total power: %ld\n", chipset + gfx);
1806
1807         intel_runtime_pm_put(dev_priv);
1808
1809         return 0;
1810 }
1811
1812 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1813 {
1814         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1815         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1816         unsigned int max_gpu_freq, min_gpu_freq;
1817         int gpu_freq, ia_freq;
1818         int ret;
1819
1820         if (!HAS_LLC(dev_priv))
1821                 return -ENODEV;
1822
1823         intel_runtime_pm_get(dev_priv);
1824
1825         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1826         if (ret)
1827                 goto out;
1828
1829         min_gpu_freq = rps->min_freq;
1830         max_gpu_freq = rps->max_freq;
1831         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1832                 /* Convert GT frequency to 50 HZ units */
1833                 min_gpu_freq /= GEN9_FREQ_SCALER;
1834                 max_gpu_freq /= GEN9_FREQ_SCALER;
1835         }
1836
1837         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1838
1839         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1840                 ia_freq = gpu_freq;
1841                 sandybridge_pcode_read(dev_priv,
1842                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1843                                        &ia_freq);
1844                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1845                            intel_gpu_freq(dev_priv, (gpu_freq *
1846                                                      (IS_GEN9_BC(dev_priv) ||
1847                                                       INTEL_GEN(dev_priv) >= 10 ?
1848                                                       GEN9_FREQ_SCALER : 1))),
1849                            ((ia_freq >> 0) & 0xff) * 100,
1850                            ((ia_freq >> 8) & 0xff) * 100);
1851         }
1852
1853         mutex_unlock(&dev_priv->pcu_lock);
1854
1855 out:
1856         intel_runtime_pm_put(dev_priv);
1857         return ret;
1858 }
1859
1860 static int i915_opregion(struct seq_file *m, void *unused)
1861 {
1862         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1863         struct drm_device *dev = &dev_priv->drm;
1864         struct intel_opregion *opregion = &dev_priv->opregion;
1865         int ret;
1866
1867         ret = mutex_lock_interruptible(&dev->struct_mutex);
1868         if (ret)
1869                 goto out;
1870
1871         if (opregion->header)
1872                 seq_write(m, opregion->header, OPREGION_SIZE);
1873
1874         mutex_unlock(&dev->struct_mutex);
1875
1876 out:
1877         return 0;
1878 }
1879
1880 static int i915_vbt(struct seq_file *m, void *unused)
1881 {
1882         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1883
1884         if (opregion->vbt)
1885                 seq_write(m, opregion->vbt, opregion->vbt_size);
1886
1887         return 0;
1888 }
1889
1890 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1891 {
1892         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1893         struct drm_device *dev = &dev_priv->drm;
1894         struct intel_framebuffer *fbdev_fb = NULL;
1895         struct drm_framebuffer *drm_fb;
1896         int ret;
1897
1898         ret = mutex_lock_interruptible(&dev->struct_mutex);
1899         if (ret)
1900                 return ret;
1901
1902 #ifdef CONFIG_DRM_FBDEV_EMULATION
1903         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1904                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1905
1906                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1907                            fbdev_fb->base.width,
1908                            fbdev_fb->base.height,
1909                            fbdev_fb->base.format->depth,
1910                            fbdev_fb->base.format->cpp[0] * 8,
1911                            fbdev_fb->base.modifier,
1912                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1913                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1914                 seq_putc(m, '\n');
1915         }
1916 #endif
1917
1918         mutex_lock(&dev->mode_config.fb_lock);
1919         drm_for_each_fb(drm_fb, dev) {
1920                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1921                 if (fb == fbdev_fb)
1922                         continue;
1923
1924                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1925                            fb->base.width,
1926                            fb->base.height,
1927                            fb->base.format->depth,
1928                            fb->base.format->cpp[0] * 8,
1929                            fb->base.modifier,
1930                            drm_framebuffer_read_refcount(&fb->base));
1931                 describe_obj(m, intel_fb_obj(&fb->base));
1932                 seq_putc(m, '\n');
1933         }
1934         mutex_unlock(&dev->mode_config.fb_lock);
1935         mutex_unlock(&dev->struct_mutex);
1936
1937         return 0;
1938 }
1939
1940 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1941 {
1942         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1943                    ring->space, ring->head, ring->tail, ring->emit);
1944 }
1945
1946 static int i915_context_status(struct seq_file *m, void *unused)
1947 {
1948         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1949         struct drm_device *dev = &dev_priv->drm;
1950         struct intel_engine_cs *engine;
1951         struct i915_gem_context *ctx;
1952         enum intel_engine_id id;
1953         int ret;
1954
1955         ret = mutex_lock_interruptible(&dev->struct_mutex);
1956         if (ret)
1957                 return ret;
1958
1959         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1960                 seq_puts(m, "HW context ");
1961                 if (!list_empty(&ctx->hw_id_link))
1962                         seq_printf(m, "%x [pin %u]", ctx->hw_id,
1963                                    atomic_read(&ctx->hw_id_pin_count));
1964                 if (ctx->pid) {
1965                         struct task_struct *task;
1966
1967                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1968                         if (task) {
1969                                 seq_printf(m, "(%s [%d]) ",
1970                                            task->comm, task->pid);
1971                                 put_task_struct(task);
1972                         }
1973                 } else if (IS_ERR(ctx->file_priv)) {
1974                         seq_puts(m, "(deleted) ");
1975                 } else {
1976                         seq_puts(m, "(kernel) ");
1977                 }
1978
1979                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1980                 seq_putc(m, '\n');
1981
1982                 for_each_engine(engine, dev_priv, id) {
1983                         struct intel_context *ce =
1984                                 to_intel_context(ctx, engine);
1985
1986                         seq_printf(m, "%s: ", engine->name);
1987                         if (ce->state)
1988                                 describe_obj(m, ce->state->obj);
1989                         if (ce->ring)
1990                                 describe_ctx_ring(m, ce->ring);
1991                         seq_putc(m, '\n');
1992                 }
1993
1994                 seq_putc(m, '\n');
1995         }
1996
1997         mutex_unlock(&dev->struct_mutex);
1998
1999         return 0;
2000 }
2001
2002 static const char *swizzle_string(unsigned swizzle)
2003 {
2004         switch (swizzle) {
2005         case I915_BIT_6_SWIZZLE_NONE:
2006                 return "none";
2007         case I915_BIT_6_SWIZZLE_9:
2008                 return "bit9";
2009         case I915_BIT_6_SWIZZLE_9_10:
2010                 return "bit9/bit10";
2011         case I915_BIT_6_SWIZZLE_9_11:
2012                 return "bit9/bit11";
2013         case I915_BIT_6_SWIZZLE_9_10_11:
2014                 return "bit9/bit10/bit11";
2015         case I915_BIT_6_SWIZZLE_9_17:
2016                 return "bit9/bit17";
2017         case I915_BIT_6_SWIZZLE_9_10_17:
2018                 return "bit9/bit10/bit17";
2019         case I915_BIT_6_SWIZZLE_UNKNOWN:
2020                 return "unknown";
2021         }
2022
2023         return "bug";
2024 }
2025
2026 static int i915_swizzle_info(struct seq_file *m, void *data)
2027 {
2028         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2029
2030         intel_runtime_pm_get(dev_priv);
2031
2032         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2033                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2034         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2035                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2036
2037         if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2038                 seq_printf(m, "DDC = 0x%08x\n",
2039                            I915_READ(DCC));
2040                 seq_printf(m, "DDC2 = 0x%08x\n",
2041                            I915_READ(DCC2));
2042                 seq_printf(m, "C0DRB3 = 0x%04x\n",
2043                            I915_READ16(C0DRB3));
2044                 seq_printf(m, "C1DRB3 = 0x%04x\n",
2045                            I915_READ16(C1DRB3));
2046         } else if (INTEL_GEN(dev_priv) >= 6) {
2047                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2048                            I915_READ(MAD_DIMM_C0));
2049                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2050                            I915_READ(MAD_DIMM_C1));
2051                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2052                            I915_READ(MAD_DIMM_C2));
2053                 seq_printf(m, "TILECTL = 0x%08x\n",
2054                            I915_READ(TILECTL));
2055                 if (INTEL_GEN(dev_priv) >= 8)
2056                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2057                                    I915_READ(GAMTARBMODE));
2058                 else
2059                         seq_printf(m, "ARB_MODE = 0x%08x\n",
2060                                    I915_READ(ARB_MODE));
2061                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2062                            I915_READ(DISP_ARB_CTL));
2063         }
2064
2065         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2066                 seq_puts(m, "L-shaped memory detected\n");
2067
2068         intel_runtime_pm_put(dev_priv);
2069
2070         return 0;
2071 }
2072
2073 static int per_file_ctx(int id, void *ptr, void *data)
2074 {
2075         struct i915_gem_context *ctx = ptr;
2076         struct seq_file *m = data;
2077         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2078
2079         if (!ppgtt) {
2080                 seq_printf(m, "  no ppgtt for context %d\n",
2081                            ctx->user_handle);
2082                 return 0;
2083         }
2084
2085         if (i915_gem_context_is_default(ctx))
2086                 seq_puts(m, "  default context:\n");
2087         else
2088                 seq_printf(m, "  context %d:\n", ctx->user_handle);
2089         ppgtt->debug_dump(ppgtt, m);
2090
2091         return 0;
2092 }
2093
2094 static void gen8_ppgtt_info(struct seq_file *m,
2095                             struct drm_i915_private *dev_priv)
2096 {
2097         struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2098         struct intel_engine_cs *engine;
2099         enum intel_engine_id id;
2100         int i;
2101
2102         if (!ppgtt)
2103                 return;
2104
2105         for_each_engine(engine, dev_priv, id) {
2106                 seq_printf(m, "%s\n", engine->name);
2107                 for (i = 0; i < 4; i++) {
2108                         u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2109                         pdp <<= 32;
2110                         pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2111                         seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2112                 }
2113         }
2114 }
2115
2116 static void gen6_ppgtt_info(struct seq_file *m,
2117                             struct drm_i915_private *dev_priv)
2118 {
2119         struct intel_engine_cs *engine;
2120         enum intel_engine_id id;
2121
2122         if (IS_GEN6(dev_priv))
2123                 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2124
2125         for_each_engine(engine, dev_priv, id) {
2126                 seq_printf(m, "%s\n", engine->name);
2127                 if (IS_GEN7(dev_priv))
2128                         seq_printf(m, "GFX_MODE: 0x%08x\n",
2129                                    I915_READ(RING_MODE_GEN7(engine)));
2130                 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2131                            I915_READ(RING_PP_DIR_BASE(engine)));
2132                 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2133                            I915_READ(RING_PP_DIR_BASE_READ(engine)));
2134                 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2135                            I915_READ(RING_PP_DIR_DCLV(engine)));
2136         }
2137         if (dev_priv->mm.aliasing_ppgtt) {
2138                 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2139
2140                 seq_puts(m, "aliasing PPGTT:\n");
2141                 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2142
2143                 ppgtt->debug_dump(ppgtt, m);
2144         }
2145
2146         seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2147 }
2148
2149 static int i915_ppgtt_info(struct seq_file *m, void *data)
2150 {
2151         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2152         struct drm_device *dev = &dev_priv->drm;
2153         struct drm_file *file;
2154         int ret;
2155
2156         mutex_lock(&dev->filelist_mutex);
2157         ret = mutex_lock_interruptible(&dev->struct_mutex);
2158         if (ret)
2159                 goto out_unlock;
2160
2161         intel_runtime_pm_get(dev_priv);
2162
2163         if (INTEL_GEN(dev_priv) >= 8)
2164                 gen8_ppgtt_info(m, dev_priv);
2165         else if (INTEL_GEN(dev_priv) >= 6)
2166                 gen6_ppgtt_info(m, dev_priv);
2167
2168         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2169                 struct drm_i915_file_private *file_priv = file->driver_priv;
2170                 struct task_struct *task;
2171
2172                 task = get_pid_task(file->pid, PIDTYPE_PID);
2173                 if (!task) {
2174                         ret = -ESRCH;
2175                         goto out_rpm;
2176                 }
2177                 seq_printf(m, "\nproc: %s\n", task->comm);
2178                 put_task_struct(task);
2179                 idr_for_each(&file_priv->context_idr, per_file_ctx,
2180                              (void *)(unsigned long)m);
2181         }
2182
2183 out_rpm:
2184         intel_runtime_pm_put(dev_priv);
2185         mutex_unlock(&dev->struct_mutex);
2186 out_unlock:
2187         mutex_unlock(&dev->filelist_mutex);
2188         return ret;
2189 }
2190
2191 static int count_irq_waiters(struct drm_i915_private *i915)
2192 {
2193         struct intel_engine_cs *engine;
2194         enum intel_engine_id id;
2195         int count = 0;
2196
2197         for_each_engine(engine, i915, id)
2198                 count += intel_engine_has_waiter(engine);
2199
2200         return count;
2201 }
2202
2203 static const char *rps_power_to_str(unsigned int power)
2204 {
2205         static const char * const strings[] = {
2206                 [LOW_POWER] = "low power",
2207                 [BETWEEN] = "mixed",
2208                 [HIGH_POWER] = "high power",
2209         };
2210
2211         if (power >= ARRAY_SIZE(strings) || !strings[power])
2212                 return "unknown";
2213
2214         return strings[power];
2215 }
2216
2217 static int i915_rps_boost_info(struct seq_file *m, void *data)
2218 {
2219         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2220         struct drm_device *dev = &dev_priv->drm;
2221         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2222         u32 act_freq = rps->cur_freq;
2223         struct drm_file *file;
2224
2225         if (intel_runtime_pm_get_if_in_use(dev_priv)) {
2226                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2227                         mutex_lock(&dev_priv->pcu_lock);
2228                         act_freq = vlv_punit_read(dev_priv,
2229                                                   PUNIT_REG_GPU_FREQ_STS);
2230                         act_freq = (act_freq >> 8) & 0xff;
2231                         mutex_unlock(&dev_priv->pcu_lock);
2232                 } else {
2233                         act_freq = intel_get_cagf(dev_priv,
2234                                                   I915_READ(GEN6_RPSTAT1));
2235                 }
2236                 intel_runtime_pm_put(dev_priv);
2237         }
2238
2239         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2240         seq_printf(m, "GPU busy? %s [%d requests]\n",
2241                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2242         seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2243         seq_printf(m, "Boosts outstanding? %d\n",
2244                    atomic_read(&rps->num_waiters));
2245         seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2246         seq_printf(m, "Frequency requested %d, actual %d\n",
2247                    intel_gpu_freq(dev_priv, rps->cur_freq),
2248                    intel_gpu_freq(dev_priv, act_freq));
2249         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2250                    intel_gpu_freq(dev_priv, rps->min_freq),
2251                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2252                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2253                    intel_gpu_freq(dev_priv, rps->max_freq));
2254         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2255                    intel_gpu_freq(dev_priv, rps->idle_freq),
2256                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2257                    intel_gpu_freq(dev_priv, rps->boost_freq));
2258
2259         mutex_lock(&dev->filelist_mutex);
2260         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2261                 struct drm_i915_file_private *file_priv = file->driver_priv;
2262                 struct task_struct *task;
2263
2264                 rcu_read_lock();
2265                 task = pid_task(file->pid, PIDTYPE_PID);
2266                 seq_printf(m, "%s [%d]: %d boosts\n",
2267                            task ? task->comm : "<unknown>",
2268                            task ? task->pid : -1,
2269                            atomic_read(&file_priv->rps_client.boosts));
2270                 rcu_read_unlock();
2271         }
2272         seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2273                    atomic_read(&rps->boosts));
2274         mutex_unlock(&dev->filelist_mutex);
2275
2276         if (INTEL_GEN(dev_priv) >= 6 &&
2277             rps->enabled &&
2278             dev_priv->gt.active_requests) {
2279                 u32 rpup, rpupei;
2280                 u32 rpdown, rpdownei;
2281
2282                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2283                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2284                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2285                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2286                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2287                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2288
2289                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2290                            rps_power_to_str(rps->power.mode));
2291                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2292                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2293                            rps->power.up_threshold);
2294                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2295                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2296                            rps->power.down_threshold);
2297         } else {
2298                 seq_puts(m, "\nRPS Autotuning inactive\n");
2299         }
2300
2301         return 0;
2302 }
2303
2304 static int i915_llc(struct seq_file *m, void *data)
2305 {
2306         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2307         const bool edram = INTEL_GEN(dev_priv) > 8;
2308
2309         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2310         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2311                    intel_uncore_edram_size(dev_priv)/1024/1024);
2312
2313         return 0;
2314 }
2315
2316 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2317 {
2318         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2319         struct drm_printer p;
2320
2321         if (!HAS_HUC(dev_priv))
2322                 return -ENODEV;
2323
2324         p = drm_seq_file_printer(m);
2325         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2326
2327         intel_runtime_pm_get(dev_priv);
2328         seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2329         intel_runtime_pm_put(dev_priv);
2330
2331         return 0;
2332 }
2333
2334 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2335 {
2336         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2337         struct drm_printer p;
2338         u32 tmp, i;
2339
2340         if (!HAS_GUC(dev_priv))
2341                 return -ENODEV;
2342
2343         p = drm_seq_file_printer(m);
2344         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2345
2346         intel_runtime_pm_get(dev_priv);
2347
2348         tmp = I915_READ(GUC_STATUS);
2349
2350         seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2351         seq_printf(m, "\tBootrom status = 0x%x\n",
2352                 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2353         seq_printf(m, "\tuKernel status = 0x%x\n",
2354                 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2355         seq_printf(m, "\tMIA Core status = 0x%x\n",
2356                 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2357         seq_puts(m, "\nScratch registers:\n");
2358         for (i = 0; i < 16; i++)
2359                 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2360
2361         intel_runtime_pm_put(dev_priv);
2362
2363         return 0;
2364 }
2365
2366 static const char *
2367 stringify_guc_log_type(enum guc_log_buffer_type type)
2368 {
2369         switch (type) {
2370         case GUC_ISR_LOG_BUFFER:
2371                 return "ISR";
2372         case GUC_DPC_LOG_BUFFER:
2373                 return "DPC";
2374         case GUC_CRASH_DUMP_LOG_BUFFER:
2375                 return "CRASH";
2376         default:
2377                 MISSING_CASE(type);
2378         }
2379
2380         return "";
2381 }
2382
2383 static void i915_guc_log_info(struct seq_file *m,
2384                               struct drm_i915_private *dev_priv)
2385 {
2386         struct intel_guc_log *log = &dev_priv->guc.log;
2387         enum guc_log_buffer_type type;
2388
2389         if (!intel_guc_log_relay_enabled(log)) {
2390                 seq_puts(m, "GuC log relay disabled\n");
2391                 return;
2392         }
2393
2394         seq_puts(m, "GuC logging stats:\n");
2395
2396         seq_printf(m, "\tRelay full count: %u\n",
2397                    log->relay.full_count);
2398
2399         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2400                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2401                            stringify_guc_log_type(type),
2402                            log->stats[type].flush,
2403                            log->stats[type].sampled_overflow);
2404         }
2405 }
2406
2407 static void i915_guc_client_info(struct seq_file *m,
2408                                  struct drm_i915_private *dev_priv,
2409                                  struct intel_guc_client *client)
2410 {
2411         struct intel_engine_cs *engine;
2412         enum intel_engine_id id;
2413         uint64_t tot = 0;
2414
2415         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2416                 client->priority, client->stage_id, client->proc_desc_offset);
2417         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2418                 client->doorbell_id, client->doorbell_offset);
2419
2420         for_each_engine(engine, dev_priv, id) {
2421                 u64 submissions = client->submissions[id];
2422                 tot += submissions;
2423                 seq_printf(m, "\tSubmissions: %llu %s\n",
2424                                 submissions, engine->name);
2425         }
2426         seq_printf(m, "\tTotal: %llu\n", tot);
2427 }
2428
2429 static int i915_guc_info(struct seq_file *m, void *data)
2430 {
2431         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2432         const struct intel_guc *guc = &dev_priv->guc;
2433
2434         if (!USES_GUC(dev_priv))
2435                 return -ENODEV;
2436
2437         i915_guc_log_info(m, dev_priv);
2438
2439         if (!USES_GUC_SUBMISSION(dev_priv))
2440                 return 0;
2441
2442         GEM_BUG_ON(!guc->execbuf_client);
2443
2444         seq_printf(m, "\nDoorbell map:\n");
2445         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2446         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2447
2448         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2449         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2450         if (guc->preempt_client) {
2451                 seq_printf(m, "\nGuC preempt client @ %p:\n",
2452                            guc->preempt_client);
2453                 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2454         }
2455
2456         /* Add more as required ... */
2457
2458         return 0;
2459 }
2460
2461 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2462 {
2463         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2464         const struct intel_guc *guc = &dev_priv->guc;
2465         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2466         struct intel_guc_client *client = guc->execbuf_client;
2467         unsigned int tmp;
2468         int index;
2469
2470         if (!USES_GUC_SUBMISSION(dev_priv))
2471                 return -ENODEV;
2472
2473         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2474                 struct intel_engine_cs *engine;
2475
2476                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2477                         continue;
2478
2479                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2480                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2481                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2482                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2483                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2484                 seq_printf(m, "\tEngines used: 0x%x\n",
2485                            desc->engines_used);
2486                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2487                            desc->db_trigger_phy,
2488                            desc->db_trigger_cpu,
2489                            desc->db_trigger_uk);
2490                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2491                            desc->process_desc);
2492                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2493                            desc->wq_addr, desc->wq_size);
2494                 seq_putc(m, '\n');
2495
2496                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2497                         u32 guc_engine_id = engine->guc_id;
2498                         struct guc_execlist_context *lrc =
2499                                                 &desc->lrc[guc_engine_id];
2500
2501                         seq_printf(m, "\t%s LRC:\n", engine->name);
2502                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2503                                    lrc->context_desc);
2504                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2505                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2506                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2507                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2508                         seq_putc(m, '\n');
2509                 }
2510         }
2511
2512         return 0;
2513 }
2514
2515 static int i915_guc_log_dump(struct seq_file *m, void *data)
2516 {
2517         struct drm_info_node *node = m->private;
2518         struct drm_i915_private *dev_priv = node_to_i915(node);
2519         bool dump_load_err = !!node->info_ent->data;
2520         struct drm_i915_gem_object *obj = NULL;
2521         u32 *log;
2522         int i = 0;
2523
2524         if (!HAS_GUC(dev_priv))
2525                 return -ENODEV;
2526
2527         if (dump_load_err)
2528                 obj = dev_priv->guc.load_err_log;
2529         else if (dev_priv->guc.log.vma)
2530                 obj = dev_priv->guc.log.vma->obj;
2531
2532         if (!obj)
2533                 return 0;
2534
2535         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2536         if (IS_ERR(log)) {
2537                 DRM_DEBUG("Failed to pin object\n");
2538                 seq_puts(m, "(log data unaccessible)\n");
2539                 return PTR_ERR(log);
2540         }
2541
2542         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2543                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2544                            *(log + i), *(log + i + 1),
2545                            *(log + i + 2), *(log + i + 3));
2546
2547         seq_putc(m, '\n');
2548
2549         i915_gem_object_unpin_map(obj);
2550
2551         return 0;
2552 }
2553
2554 static int i915_guc_log_level_get(void *data, u64 *val)
2555 {
2556         struct drm_i915_private *dev_priv = data;
2557
2558         if (!USES_GUC(dev_priv))
2559                 return -ENODEV;
2560
2561         *val = intel_guc_log_get_level(&dev_priv->guc.log);
2562
2563         return 0;
2564 }
2565
2566 static int i915_guc_log_level_set(void *data, u64 val)
2567 {
2568         struct drm_i915_private *dev_priv = data;
2569
2570         if (!USES_GUC(dev_priv))
2571                 return -ENODEV;
2572
2573         return intel_guc_log_set_level(&dev_priv->guc.log, val);
2574 }
2575
2576 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2577                         i915_guc_log_level_get, i915_guc_log_level_set,
2578                         "%lld\n");
2579
2580 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2581 {
2582         struct drm_i915_private *dev_priv = inode->i_private;
2583
2584         if (!USES_GUC(dev_priv))
2585                 return -ENODEV;
2586
2587         file->private_data = &dev_priv->guc.log;
2588
2589         return intel_guc_log_relay_open(&dev_priv->guc.log);
2590 }
2591
2592 static ssize_t
2593 i915_guc_log_relay_write(struct file *filp,
2594                          const char __user *ubuf,
2595                          size_t cnt,
2596                          loff_t *ppos)
2597 {
2598         struct intel_guc_log *log = filp->private_data;
2599
2600         intel_guc_log_relay_flush(log);
2601
2602         return cnt;
2603 }
2604
2605 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2606 {
2607         struct drm_i915_private *dev_priv = inode->i_private;
2608
2609         intel_guc_log_relay_close(&dev_priv->guc.log);
2610
2611         return 0;
2612 }
2613
2614 static const struct file_operations i915_guc_log_relay_fops = {
2615         .owner = THIS_MODULE,
2616         .open = i915_guc_log_relay_open,
2617         .write = i915_guc_log_relay_write,
2618         .release = i915_guc_log_relay_release,
2619 };
2620
2621 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2622 {
2623         u8 val;
2624         static const char * const sink_status[] = {
2625                 "inactive",
2626                 "transition to active, capture and display",
2627                 "active, display from RFB",
2628                 "active, capture and display on sink device timings",
2629                 "transition to inactive, capture and display, timing re-sync",
2630                 "reserved",
2631                 "reserved",
2632                 "sink internal error",
2633         };
2634         struct drm_connector *connector = m->private;
2635         struct drm_i915_private *dev_priv = to_i915(connector->dev);
2636         struct intel_dp *intel_dp =
2637                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2638         int ret;
2639
2640         if (!CAN_PSR(dev_priv)) {
2641                 seq_puts(m, "PSR Unsupported\n");
2642                 return -ENODEV;
2643         }
2644
2645         if (connector->status != connector_status_connected)
2646                 return -ENODEV;
2647
2648         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2649
2650         if (ret == 1) {
2651                 const char *str = "unknown";
2652
2653                 val &= DP_PSR_SINK_STATE_MASK;
2654                 if (val < ARRAY_SIZE(sink_status))
2655                         str = sink_status[val];
2656                 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2657         } else {
2658                 return ret;
2659         }
2660
2661         return 0;
2662 }
2663 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2664
2665 static void
2666 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2667 {
2668         u32 val, psr_status;
2669
2670         if (dev_priv->psr.psr2_enabled) {
2671                 static const char * const live_status[] = {
2672                         "IDLE",
2673                         "CAPTURE",
2674                         "CAPTURE_FS",
2675                         "SLEEP",
2676                         "BUFON_FW",
2677                         "ML_UP",
2678                         "SU_STANDBY",
2679                         "FAST_SLEEP",
2680                         "DEEP_SLEEP",
2681                         "BUF_ON",
2682                         "TG_ON"
2683                 };
2684                 psr_status = I915_READ(EDP_PSR2_STATUS);
2685                 val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2686                         EDP_PSR2_STATUS_STATE_SHIFT;
2687                 if (val < ARRAY_SIZE(live_status)) {
2688                         seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2689                                    psr_status, live_status[val]);
2690                         return;
2691                 }
2692         } else {
2693                 static const char * const live_status[] = {
2694                         "IDLE",
2695                         "SRDONACK",
2696                         "SRDENT",
2697                         "BUFOFF",
2698                         "BUFON",
2699                         "AUXACK",
2700                         "SRDOFFACK",
2701                         "SRDENT_ON",
2702                 };
2703                 psr_status = I915_READ(EDP_PSR_STATUS);
2704                 val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2705                         EDP_PSR_STATUS_STATE_SHIFT;
2706                 if (val < ARRAY_SIZE(live_status)) {
2707                         seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2708                                    psr_status, live_status[val]);
2709                         return;
2710                 }
2711         }
2712
2713         seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
2714 }
2715
2716 static int i915_edp_psr_status(struct seq_file *m, void *data)
2717 {
2718         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2719         u32 psrperf = 0;
2720         bool enabled = false;
2721         bool sink_support;
2722
2723         if (!HAS_PSR(dev_priv))
2724                 return -ENODEV;
2725
2726         sink_support = dev_priv->psr.sink_support;
2727         seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2728         if (!sink_support)
2729                 return 0;
2730
2731         intel_runtime_pm_get(dev_priv);
2732
2733         mutex_lock(&dev_priv->psr.lock);
2734         seq_printf(m, "PSR mode: %s\n",
2735                    dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
2736         seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
2737         seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2738                    dev_priv->psr.busy_frontbuffer_bits);
2739
2740         if (dev_priv->psr.psr2_enabled)
2741                 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2742         else
2743                 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2744
2745         seq_printf(m, "Main link in standby mode: %s\n",
2746                    yesno(dev_priv->psr.link_standby));
2747
2748         seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
2749
2750         /*
2751          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2752          */
2753         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2754                 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2755                         EDP_PSR_PERF_CNT_MASK;
2756
2757                 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2758         }
2759
2760         psr_source_status(dev_priv, m);
2761         mutex_unlock(&dev_priv->psr.lock);
2762
2763         if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
2764                 seq_printf(m, "Last attempted entry at: %lld\n",
2765                            dev_priv->psr.last_entry_attempt);
2766                 seq_printf(m, "Last exit at: %lld\n",
2767                            dev_priv->psr.last_exit);
2768         }
2769
2770         intel_runtime_pm_put(dev_priv);
2771         return 0;
2772 }
2773
2774 static int
2775 i915_edp_psr_debug_set(void *data, u64 val)
2776 {
2777         struct drm_i915_private *dev_priv = data;
2778         struct drm_modeset_acquire_ctx ctx;
2779         int ret;
2780
2781         if (!CAN_PSR(dev_priv))
2782                 return -ENODEV;
2783
2784         DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2785
2786         intel_runtime_pm_get(dev_priv);
2787
2788         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2789
2790 retry:
2791         ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2792         if (ret == -EDEADLK) {
2793                 ret = drm_modeset_backoff(&ctx);
2794                 if (!ret)
2795                         goto retry;
2796         }
2797
2798         drm_modeset_drop_locks(&ctx);
2799         drm_modeset_acquire_fini(&ctx);
2800
2801         intel_runtime_pm_put(dev_priv);
2802
2803         return ret;
2804 }
2805
2806 static int
2807 i915_edp_psr_debug_get(void *data, u64 *val)
2808 {
2809         struct drm_i915_private *dev_priv = data;
2810
2811         if (!CAN_PSR(dev_priv))
2812                 return -ENODEV;
2813
2814         *val = READ_ONCE(dev_priv->psr.debug);
2815         return 0;
2816 }
2817
2818 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2819                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2820                         "%llu\n");
2821
2822 static int i915_energy_uJ(struct seq_file *m, void *data)
2823 {
2824         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2825         unsigned long long power;
2826         u32 units;
2827
2828         if (INTEL_GEN(dev_priv) < 6)
2829                 return -ENODEV;
2830
2831         intel_runtime_pm_get(dev_priv);
2832
2833         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2834                 intel_runtime_pm_put(dev_priv);
2835                 return -ENODEV;
2836         }
2837
2838         units = (power & 0x1f00) >> 8;
2839         power = I915_READ(MCH_SECP_NRG_STTS);
2840         power = (1000000 * power) >> units; /* convert to uJ */
2841
2842         intel_runtime_pm_put(dev_priv);
2843
2844         seq_printf(m, "%llu", power);
2845
2846         return 0;
2847 }
2848
2849 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2850 {
2851         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2852         struct pci_dev *pdev = dev_priv->drm.pdev;
2853
2854         if (!HAS_RUNTIME_PM(dev_priv))
2855                 seq_puts(m, "Runtime power management not supported\n");
2856
2857         seq_printf(m, "GPU idle: %s (epoch %u)\n",
2858                    yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2859         seq_printf(m, "IRQs disabled: %s\n",
2860                    yesno(!intel_irqs_enabled(dev_priv)));
2861 #ifdef CONFIG_PM
2862         seq_printf(m, "Usage count: %d\n",
2863                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2864 #else
2865         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2866 #endif
2867         seq_printf(m, "PCI device power state: %s [%d]\n",
2868                    pci_power_name(pdev->current_state),
2869                    pdev->current_state);
2870
2871         return 0;
2872 }
2873
2874 static int i915_power_domain_info(struct seq_file *m, void *unused)
2875 {
2876         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2877         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2878         int i;
2879
2880         mutex_lock(&power_domains->lock);
2881
2882         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2883         for (i = 0; i < power_domains->power_well_count; i++) {
2884                 struct i915_power_well *power_well;
2885                 enum intel_display_power_domain power_domain;
2886
2887                 power_well = &power_domains->power_wells[i];
2888                 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2889                            power_well->count);
2890
2891                 for_each_power_domain(power_domain, power_well->desc->domains)
2892                         seq_printf(m, "  %-23s %d\n",
2893                                  intel_display_power_domain_str(power_domain),
2894                                  power_domains->domain_use_count[power_domain]);
2895         }
2896
2897         mutex_unlock(&power_domains->lock);
2898
2899         return 0;
2900 }
2901
2902 static int i915_dmc_info(struct seq_file *m, void *unused)
2903 {
2904         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2905         struct intel_csr *csr;
2906
2907         if (!HAS_CSR(dev_priv))
2908                 return -ENODEV;
2909
2910         csr = &dev_priv->csr;
2911
2912         intel_runtime_pm_get(dev_priv);
2913
2914         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2915         seq_printf(m, "path: %s\n", csr->fw_path);
2916
2917         if (!csr->dmc_payload)
2918                 goto out;
2919
2920         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2921                    CSR_VERSION_MINOR(csr->version));
2922
2923         if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2924                 goto out;
2925
2926         seq_printf(m, "DC3 -> DC5 count: %d\n",
2927                    I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2928                                                     SKL_CSR_DC3_DC5_COUNT));
2929         if (!IS_GEN9_LP(dev_priv))
2930                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2931                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2932
2933 out:
2934         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2935         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2936         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2937
2938         intel_runtime_pm_put(dev_priv);
2939
2940         return 0;
2941 }
2942
2943 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2944                                  struct drm_display_mode *mode)
2945 {
2946         int i;
2947
2948         for (i = 0; i < tabs; i++)
2949                 seq_putc(m, '\t');
2950
2951         seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2952 }
2953
2954 static void intel_encoder_info(struct seq_file *m,
2955                                struct intel_crtc *intel_crtc,
2956                                struct intel_encoder *intel_encoder)
2957 {
2958         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2959         struct drm_device *dev = &dev_priv->drm;
2960         struct drm_crtc *crtc = &intel_crtc->base;
2961         struct intel_connector *intel_connector;
2962         struct drm_encoder *encoder;
2963
2964         encoder = &intel_encoder->base;
2965         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2966                    encoder->base.id, encoder->name);
2967         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2968                 struct drm_connector *connector = &intel_connector->base;
2969                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2970                            connector->base.id,
2971                            connector->name,
2972                            drm_get_connector_status_name(connector->status));
2973                 if (connector->status == connector_status_connected) {
2974                         struct drm_display_mode *mode = &crtc->mode;
2975                         seq_printf(m, ", mode:\n");
2976                         intel_seq_print_mode(m, 2, mode);
2977                 } else {
2978                         seq_putc(m, '\n');
2979                 }
2980         }
2981 }
2982
2983 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2984 {
2985         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2986         struct drm_device *dev = &dev_priv->drm;
2987         struct drm_crtc *crtc = &intel_crtc->base;
2988         struct intel_encoder *intel_encoder;
2989         struct drm_plane_state *plane_state = crtc->primary->state;
2990         struct drm_framebuffer *fb = plane_state->fb;
2991
2992         if (fb)
2993                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2994                            fb->base.id, plane_state->src_x >> 16,
2995                            plane_state->src_y >> 16, fb->width, fb->height);
2996         else
2997                 seq_puts(m, "\tprimary plane disabled\n");
2998         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2999                 intel_encoder_info(m, intel_crtc, intel_encoder);
3000 }
3001
3002 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3003 {
3004         struct drm_display_mode *mode = panel->fixed_mode;
3005
3006         seq_printf(m, "\tfixed mode:\n");
3007         intel_seq_print_mode(m, 2, mode);
3008 }
3009
3010 static void intel_dp_info(struct seq_file *m,
3011                           struct intel_connector *intel_connector)
3012 {
3013         struct intel_encoder *intel_encoder = intel_connector->encoder;
3014         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3015
3016         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
3017         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
3018         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
3019                 intel_panel_info(m, &intel_connector->panel);
3020
3021         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3022                                 &intel_dp->aux);
3023 }
3024
3025 static void intel_dp_mst_info(struct seq_file *m,
3026                           struct intel_connector *intel_connector)
3027 {
3028         struct intel_encoder *intel_encoder = intel_connector->encoder;
3029         struct intel_dp_mst_encoder *intel_mst =
3030                 enc_to_mst(&intel_encoder->base);
3031         struct intel_digital_port *intel_dig_port = intel_mst->primary;
3032         struct intel_dp *intel_dp = &intel_dig_port->dp;
3033         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3034                                         intel_connector->port);
3035
3036         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3037 }
3038
3039 static void intel_hdmi_info(struct seq_file *m,
3040                             struct intel_connector *intel_connector)
3041 {
3042         struct intel_encoder *intel_encoder = intel_connector->encoder;
3043         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3044
3045         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3046 }
3047
3048 static void intel_lvds_info(struct seq_file *m,
3049                             struct intel_connector *intel_connector)
3050 {
3051         intel_panel_info(m, &intel_connector->panel);
3052 }
3053
3054 static void intel_connector_info(struct seq_file *m,
3055                                  struct drm_connector *connector)
3056 {
3057         struct intel_connector *intel_connector = to_intel_connector(connector);
3058         struct intel_encoder *intel_encoder = intel_connector->encoder;
3059         struct drm_display_mode *mode;
3060
3061         seq_printf(m, "connector %d: type %s, status: %s\n",
3062                    connector->base.id, connector->name,
3063                    drm_get_connector_status_name(connector->status));
3064
3065         if (connector->status == connector_status_disconnected)
3066                 return;
3067
3068         seq_printf(m, "\tname: %s\n", connector->display_info.name);
3069         seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3070                    connector->display_info.width_mm,
3071                    connector->display_info.height_mm);
3072         seq_printf(m, "\tsubpixel order: %s\n",
3073                    drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3074         seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
3075
3076         if (!intel_encoder)
3077                 return;
3078
3079         switch (connector->connector_type) {
3080         case DRM_MODE_CONNECTOR_DisplayPort:
3081         case DRM_MODE_CONNECTOR_eDP:
3082                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3083                         intel_dp_mst_info(m, intel_connector);
3084                 else
3085                         intel_dp_info(m, intel_connector);
3086                 break;
3087         case DRM_MODE_CONNECTOR_LVDS:
3088                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3089                         intel_lvds_info(m, intel_connector);
3090                 break;
3091         case DRM_MODE_CONNECTOR_HDMIA:
3092                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3093                     intel_encoder->type == INTEL_OUTPUT_DDI)
3094                         intel_hdmi_info(m, intel_connector);
3095                 break;
3096         default:
3097                 break;
3098         }
3099
3100         seq_printf(m, "\tmodes:\n");
3101         list_for_each_entry(mode, &connector->modes, head)
3102                 intel_seq_print_mode(m, 2, mode);
3103 }
3104
3105 static const char *plane_type(enum drm_plane_type type)
3106 {
3107         switch (type) {
3108         case DRM_PLANE_TYPE_OVERLAY:
3109                 return "OVL";
3110         case DRM_PLANE_TYPE_PRIMARY:
3111                 return "PRI";
3112         case DRM_PLANE_TYPE_CURSOR:
3113                 return "CUR";
3114         /*
3115          * Deliberately omitting default: to generate compiler warnings
3116          * when a new drm_plane_type gets added.
3117          */
3118         }
3119
3120         return "unknown";
3121 }
3122
3123 static const char *plane_rotation(unsigned int rotation)
3124 {
3125         static char buf[48];
3126         /*
3127          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3128          * will print them all to visualize if the values are misused
3129          */
3130         snprintf(buf, sizeof(buf),
3131                  "%s%s%s%s%s%s(0x%08x)",
3132                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3133                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3134                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3135                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3136                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3137                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3138                  rotation);
3139
3140         return buf;
3141 }
3142
3143 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3144 {
3145         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3146         struct drm_device *dev = &dev_priv->drm;
3147         struct intel_plane *intel_plane;
3148
3149         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3150                 struct drm_plane_state *state;
3151                 struct drm_plane *plane = &intel_plane->base;
3152                 struct drm_format_name_buf format_name;
3153
3154                 if (!plane->state) {
3155                         seq_puts(m, "plane->state is NULL!\n");
3156                         continue;
3157                 }
3158
3159                 state = plane->state;
3160
3161                 if (state->fb) {
3162                         drm_get_format_name(state->fb->format->format,
3163                                             &format_name);
3164                 } else {
3165                         sprintf(format_name.str, "N/A");
3166                 }
3167
3168                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3169                            plane->base.id,
3170                            plane_type(intel_plane->base.type),
3171                            state->crtc_x, state->crtc_y,
3172                            state->crtc_w, state->crtc_h,
3173                            (state->src_x >> 16),
3174                            ((state->src_x & 0xffff) * 15625) >> 10,
3175                            (state->src_y >> 16),
3176                            ((state->src_y & 0xffff) * 15625) >> 10,
3177                            (state->src_w >> 16),
3178                            ((state->src_w & 0xffff) * 15625) >> 10,
3179                            (state->src_h >> 16),
3180                            ((state->src_h & 0xffff) * 15625) >> 10,
3181                            format_name.str,
3182                            plane_rotation(state->rotation));
3183         }
3184 }
3185
3186 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3187 {
3188         struct intel_crtc_state *pipe_config;
3189         int num_scalers = intel_crtc->num_scalers;
3190         int i;
3191
3192         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3193
3194         /* Not all platformas have a scaler */
3195         if (num_scalers) {
3196                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3197                            num_scalers,
3198                            pipe_config->scaler_state.scaler_users,
3199                            pipe_config->scaler_state.scaler_id);
3200
3201                 for (i = 0; i < num_scalers; i++) {
3202                         struct intel_scaler *sc =
3203                                         &pipe_config->scaler_state.scalers[i];
3204
3205                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3206                                    i, yesno(sc->in_use), sc->mode);
3207                 }
3208                 seq_puts(m, "\n");
3209         } else {
3210                 seq_puts(m, "\tNo scalers available on this platform\n");
3211         }
3212 }
3213
3214 static int i915_display_info(struct seq_file *m, void *unused)
3215 {
3216         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3217         struct drm_device *dev = &dev_priv->drm;
3218         struct intel_crtc *crtc;
3219         struct drm_connector *connector;
3220         struct drm_connector_list_iter conn_iter;
3221
3222         intel_runtime_pm_get(dev_priv);
3223         seq_printf(m, "CRTC info\n");
3224         seq_printf(m, "---------\n");
3225         for_each_intel_crtc(dev, crtc) {
3226                 struct intel_crtc_state *pipe_config;
3227
3228                 drm_modeset_lock(&crtc->base.mutex, NULL);
3229                 pipe_config = to_intel_crtc_state(crtc->base.state);
3230
3231                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3232                            crtc->base.base.id, pipe_name(crtc->pipe),
3233                            yesno(pipe_config->base.active),
3234                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3235                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3236
3237                 if (pipe_config->base.active) {
3238                         struct intel_plane *cursor =
3239                                 to_intel_plane(crtc->base.cursor);
3240
3241                         intel_crtc_info(m, crtc);
3242
3243                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3244                                    yesno(cursor->base.state->visible),
3245                                    cursor->base.state->crtc_x,
3246                                    cursor->base.state->crtc_y,
3247                                    cursor->base.state->crtc_w,
3248                                    cursor->base.state->crtc_h,
3249                                    cursor->cursor.base);
3250                         intel_scaler_info(m, crtc);
3251                         intel_plane_info(m, crtc);
3252                 }
3253
3254                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3255                            yesno(!crtc->cpu_fifo_underrun_disabled),
3256                            yesno(!crtc->pch_fifo_underrun_disabled));
3257                 drm_modeset_unlock(&crtc->base.mutex);
3258         }
3259
3260         seq_printf(m, "\n");
3261         seq_printf(m, "Connector info\n");
3262         seq_printf(m, "--------------\n");
3263         mutex_lock(&dev->mode_config.mutex);
3264         drm_connector_list_iter_begin(dev, &conn_iter);
3265         drm_for_each_connector_iter(connector, &conn_iter)
3266                 intel_connector_info(m, connector);
3267         drm_connector_list_iter_end(&conn_iter);
3268         mutex_unlock(&dev->mode_config.mutex);
3269
3270         intel_runtime_pm_put(dev_priv);
3271
3272         return 0;
3273 }
3274
3275 static int i915_engine_info(struct seq_file *m, void *unused)
3276 {
3277         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3278         struct intel_engine_cs *engine;
3279         enum intel_engine_id id;
3280         struct drm_printer p;
3281
3282         intel_runtime_pm_get(dev_priv);
3283
3284         seq_printf(m, "GT awake? %s (epoch %u)\n",
3285                    yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3286         seq_printf(m, "Global active requests: %d\n",
3287                    dev_priv->gt.active_requests);
3288         seq_printf(m, "CS timestamp frequency: %u kHz\n",
3289                    dev_priv->info.cs_timestamp_frequency_khz);
3290
3291         p = drm_seq_file_printer(m);
3292         for_each_engine(engine, dev_priv, id)
3293                 intel_engine_dump(engine, &p, "%s\n", engine->name);
3294
3295         intel_runtime_pm_put(dev_priv);
3296
3297         return 0;
3298 }
3299
3300 static int i915_rcs_topology(struct seq_file *m, void *unused)
3301 {
3302         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3303         struct drm_printer p = drm_seq_file_printer(m);
3304
3305         intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3306
3307         return 0;
3308 }
3309
3310 static int i915_shrinker_info(struct seq_file *m, void *unused)
3311 {
3312         struct drm_i915_private *i915 = node_to_i915(m->private);
3313
3314         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3315         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3316
3317         return 0;
3318 }
3319
3320 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3321 {
3322         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3323         struct drm_device *dev = &dev_priv->drm;
3324         int i;
3325
3326         drm_modeset_lock_all(dev);
3327         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3328                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3329
3330                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3331                            pll->info->id);
3332                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3333                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3334                 seq_printf(m, " tracked hardware state:\n");
3335                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3336                 seq_printf(m, " dpll_md: 0x%08x\n",
3337                            pll->state.hw_state.dpll_md);
3338                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3339                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3340                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3341                 seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3342                 seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3343                 seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3344                            pll->state.hw_state.mg_refclkin_ctl);
3345                 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3346                            pll->state.hw_state.mg_clktop2_coreclkctl1);
3347                 seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3348                            pll->state.hw_state.mg_clktop2_hsclkctl);
3349                 seq_printf(m, " mg_pll_div0:  0x%08x\n",
3350                            pll->state.hw_state.mg_pll_div0);
3351                 seq_printf(m, " mg_pll_div1:  0x%08x\n",
3352                            pll->state.hw_state.mg_pll_div1);
3353                 seq_printf(m, " mg_pll_lf:    0x%08x\n",
3354                            pll->state.hw_state.mg_pll_lf);
3355                 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3356                            pll->state.hw_state.mg_pll_frac_lock);
3357                 seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3358                            pll->state.hw_state.mg_pll_ssc);
3359                 seq_printf(m, " mg_pll_bias:  0x%08x\n",
3360                            pll->state.hw_state.mg_pll_bias);
3361                 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3362                            pll->state.hw_state.mg_pll_tdc_coldst_bias);
3363         }
3364         drm_modeset_unlock_all(dev);
3365
3366         return 0;
3367 }
3368
3369 static int i915_wa_registers(struct seq_file *m, void *unused)
3370 {
3371         struct drm_i915_private *i915 = node_to_i915(m->private);
3372         const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3373         struct i915_wa *wa;
3374         unsigned int i;
3375
3376         seq_printf(m, "Workarounds applied: %u\n", wal->count);
3377         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
3378                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3379                            i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
3380
3381         return 0;
3382 }
3383
3384 static int i915_ipc_status_show(struct seq_file *m, void *data)
3385 {
3386         struct drm_i915_private *dev_priv = m->private;
3387
3388         seq_printf(m, "Isochronous Priority Control: %s\n",
3389                         yesno(dev_priv->ipc_enabled));
3390         return 0;
3391 }
3392
3393 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3394 {
3395         struct drm_i915_private *dev_priv = inode->i_private;
3396
3397         if (!HAS_IPC(dev_priv))
3398                 return -ENODEV;
3399
3400         return single_open(file, i915_ipc_status_show, dev_priv);
3401 }
3402
3403 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3404                                      size_t len, loff_t *offp)
3405 {
3406         struct seq_file *m = file->private_data;
3407         struct drm_i915_private *dev_priv = m->private;
3408         int ret;
3409         bool enable;
3410
3411         ret = kstrtobool_from_user(ubuf, len, &enable);
3412         if (ret < 0)
3413                 return ret;
3414
3415         intel_runtime_pm_get(dev_priv);
3416         if (!dev_priv->ipc_enabled && enable)
3417                 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3418         dev_priv->wm.distrust_bios_wm = true;
3419         dev_priv->ipc_enabled = enable;
3420         intel_enable_ipc(dev_priv);
3421         intel_runtime_pm_put(dev_priv);
3422
3423         return len;
3424 }
3425
3426 static const struct file_operations i915_ipc_status_fops = {
3427         .owner = THIS_MODULE,
3428         .open = i915_ipc_status_open,
3429         .read = seq_read,
3430         .llseek = seq_lseek,
3431         .release = single_release,
3432         .write = i915_ipc_status_write
3433 };
3434
3435 static int i915_ddb_info(struct seq_file *m, void *unused)
3436 {
3437         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3438         struct drm_device *dev = &dev_priv->drm;
3439         struct skl_ddb_entry *entry;
3440         struct intel_crtc *crtc;
3441
3442         if (INTEL_GEN(dev_priv) < 9)
3443                 return -ENODEV;
3444
3445         drm_modeset_lock_all(dev);
3446
3447         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3448
3449         for_each_intel_crtc(&dev_priv->drm, crtc) {
3450                 struct intel_crtc_state *crtc_state =
3451                         to_intel_crtc_state(crtc->base.state);
3452                 enum pipe pipe = crtc->pipe;
3453                 enum plane_id plane_id;
3454
3455                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3456
3457                 for_each_plane_id_on_crtc(crtc, plane_id) {
3458                         entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3459                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
3460                                    entry->start, entry->end,
3461                                    skl_ddb_entry_size(entry));
3462                 }
3463
3464                 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3465                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3466                            entry->end, skl_ddb_entry_size(entry));
3467         }
3468
3469         drm_modeset_unlock_all(dev);
3470
3471         return 0;
3472 }
3473
3474 static void drrs_status_per_crtc(struct seq_file *m,
3475                                  struct drm_device *dev,
3476                                  struct intel_crtc *intel_crtc)
3477 {
3478         struct drm_i915_private *dev_priv = to_i915(dev);
3479         struct i915_drrs *drrs = &dev_priv->drrs;
3480         int vrefresh = 0;
3481         struct drm_connector *connector;
3482         struct drm_connector_list_iter conn_iter;
3483
3484         drm_connector_list_iter_begin(dev, &conn_iter);
3485         drm_for_each_connector_iter(connector, &conn_iter) {
3486                 if (connector->state->crtc != &intel_crtc->base)
3487                         continue;
3488
3489                 seq_printf(m, "%s:\n", connector->name);
3490         }
3491         drm_connector_list_iter_end(&conn_iter);
3492
3493         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3494                 seq_puts(m, "\tVBT: DRRS_type: Static");
3495         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3496                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3497         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3498                 seq_puts(m, "\tVBT: DRRS_type: None");
3499         else
3500                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3501
3502         seq_puts(m, "\n\n");
3503
3504         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3505                 struct intel_panel *panel;
3506
3507                 mutex_lock(&drrs->mutex);
3508                 /* DRRS Supported */
3509                 seq_puts(m, "\tDRRS Supported: Yes\n");
3510
3511                 /* disable_drrs() will make drrs->dp NULL */
3512                 if (!drrs->dp) {
3513                         seq_puts(m, "Idleness DRRS: Disabled\n");
3514                         if (dev_priv->psr.enabled)
3515                                 seq_puts(m,
3516                                 "\tAs PSR is enabled, DRRS is not enabled\n");
3517                         mutex_unlock(&drrs->mutex);
3518                         return;
3519                 }
3520
3521                 panel = &drrs->dp->attached_connector->panel;
3522                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3523                                         drrs->busy_frontbuffer_bits);
3524
3525                 seq_puts(m, "\n\t\t");
3526                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3527                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3528                         vrefresh = panel->fixed_mode->vrefresh;
3529                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3530                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3531                         vrefresh = panel->downclock_mode->vrefresh;
3532                 } else {
3533                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3534                                                 drrs->refresh_rate_type);
3535                         mutex_unlock(&drrs->mutex);
3536                         return;
3537                 }
3538                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3539
3540                 seq_puts(m, "\n\t\t");
3541                 mutex_unlock(&drrs->mutex);
3542         } else {
3543                 /* DRRS not supported. Print the VBT parameter*/
3544                 seq_puts(m, "\tDRRS Supported : No");
3545         }
3546         seq_puts(m, "\n");
3547 }
3548
3549 static int i915_drrs_status(struct seq_file *m, void *unused)
3550 {
3551         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3552         struct drm_device *dev = &dev_priv->drm;
3553         struct intel_crtc *intel_crtc;
3554         int active_crtc_cnt = 0;
3555
3556         drm_modeset_lock_all(dev);
3557         for_each_intel_crtc(dev, intel_crtc) {
3558                 if (intel_crtc->base.state->active) {
3559                         active_crtc_cnt++;
3560                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3561
3562                         drrs_status_per_crtc(m, dev, intel_crtc);
3563                 }
3564         }
3565         drm_modeset_unlock_all(dev);
3566
3567         if (!active_crtc_cnt)
3568                 seq_puts(m, "No active crtc found\n");
3569
3570         return 0;
3571 }
3572
3573 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3574 {
3575         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3576         struct drm_device *dev = &dev_priv->drm;
3577         struct intel_encoder *intel_encoder;
3578         struct intel_digital_port *intel_dig_port;
3579         struct drm_connector *connector;
3580         struct drm_connector_list_iter conn_iter;
3581
3582         drm_connector_list_iter_begin(dev, &conn_iter);
3583         drm_for_each_connector_iter(connector, &conn_iter) {
3584                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3585                         continue;
3586
3587                 intel_encoder = intel_attached_encoder(connector);
3588                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3589                         continue;
3590
3591                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3592                 if (!intel_dig_port->dp.can_mst)
3593                         continue;
3594
3595                 seq_printf(m, "MST Source Port %c\n",
3596                            port_name(intel_dig_port->base.port));
3597                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3598         }
3599         drm_connector_list_iter_end(&conn_iter);
3600
3601         return 0;
3602 }
3603
3604 static ssize_t i915_displayport_test_active_write(struct file *file,
3605                                                   const char __user *ubuf,
3606                                                   size_t len, loff_t *offp)
3607 {
3608         char *input_buffer;
3609         int status = 0;
3610         struct drm_device *dev;
3611         struct drm_connector *connector;
3612         struct drm_connector_list_iter conn_iter;
3613         struct intel_dp *intel_dp;
3614         int val = 0;
3615
3616         dev = ((struct seq_file *)file->private_data)->private;
3617
3618         if (len == 0)
3619                 return 0;
3620
3621         input_buffer = memdup_user_nul(ubuf, len);
3622         if (IS_ERR(input_buffer))
3623                 return PTR_ERR(input_buffer);
3624
3625         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3626
3627         drm_connector_list_iter_begin(dev, &conn_iter);
3628         drm_for_each_connector_iter(connector, &conn_iter) {
3629                 struct intel_encoder *encoder;
3630
3631                 if (connector->connector_type !=
3632                     DRM_MODE_CONNECTOR_DisplayPort)
3633                         continue;
3634
3635                 encoder = to_intel_encoder(connector->encoder);
3636                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3637                         continue;
3638
3639                 if (encoder && connector->status == connector_status_connected) {
3640                         intel_dp = enc_to_intel_dp(&encoder->base);
3641                         status = kstrtoint(input_buffer, 10, &val);
3642                         if (status < 0)
3643                                 break;
3644                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3645                         /* To prevent erroneous activation of the compliance
3646                          * testing code, only accept an actual value of 1 here
3647                          */
3648                         if (val == 1)
3649                                 intel_dp->compliance.test_active = 1;
3650                         else
3651                                 intel_dp->compliance.test_active = 0;
3652                 }
3653         }
3654         drm_connector_list_iter_end(&conn_iter);
3655         kfree(input_buffer);
3656         if (status < 0)
3657                 return status;
3658
3659         *offp += len;
3660         return len;
3661 }
3662
3663 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3664 {
3665         struct drm_i915_private *dev_priv = m->private;
3666         struct drm_device *dev = &dev_priv->drm;
3667         struct drm_connector *connector;
3668         struct drm_connector_list_iter conn_iter;
3669         struct intel_dp *intel_dp;
3670
3671         drm_connector_list_iter_begin(dev, &conn_iter);
3672         drm_for_each_connector_iter(connector, &conn_iter) {
3673                 struct intel_encoder *encoder;
3674
3675                 if (connector->connector_type !=
3676                     DRM_MODE_CONNECTOR_DisplayPort)
3677                         continue;
3678
3679                 encoder = to_intel_encoder(connector->encoder);
3680                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3681                         continue;
3682
3683                 if (encoder && connector->status == connector_status_connected) {
3684                         intel_dp = enc_to_intel_dp(&encoder->base);
3685                         if (intel_dp->compliance.test_active)
3686                                 seq_puts(m, "1");
3687                         else
3688                                 seq_puts(m, "0");
3689                 } else
3690                         seq_puts(m, "0");
3691         }
3692         drm_connector_list_iter_end(&conn_iter);
3693
3694         return 0;
3695 }
3696
3697 static int i915_displayport_test_active_open(struct inode *inode,
3698                                              struct file *file)
3699 {
3700         return single_open(file, i915_displayport_test_active_show,
3701                            inode->i_private);
3702 }
3703
3704 static const struct file_operations i915_displayport_test_active_fops = {
3705         .owner = THIS_MODULE,
3706         .open = i915_displayport_test_active_open,
3707         .read = seq_read,
3708         .llseek = seq_lseek,
3709         .release = single_release,
3710         .write = i915_displayport_test_active_write
3711 };
3712
3713 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3714 {
3715         struct drm_i915_private *dev_priv = m->private;
3716         struct drm_device *dev = &dev_priv->drm;
3717         struct drm_connector *connector;
3718         struct drm_connector_list_iter conn_iter;
3719         struct intel_dp *intel_dp;
3720
3721         drm_connector_list_iter_begin(dev, &conn_iter);
3722         drm_for_each_connector_iter(connector, &conn_iter) {
3723                 struct intel_encoder *encoder;
3724
3725                 if (connector->connector_type !=
3726                     DRM_MODE_CONNECTOR_DisplayPort)
3727                         continue;
3728
3729                 encoder = to_intel_encoder(connector->encoder);
3730                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3731                         continue;
3732
3733                 if (encoder && connector->status == connector_status_connected) {
3734                         intel_dp = enc_to_intel_dp(&encoder->base);
3735                         if (intel_dp->compliance.test_type ==
3736                             DP_TEST_LINK_EDID_READ)
3737                                 seq_printf(m, "%lx",
3738                                            intel_dp->compliance.test_data.edid);
3739                         else if (intel_dp->compliance.test_type ==
3740                                  DP_TEST_LINK_VIDEO_PATTERN) {
3741                                 seq_printf(m, "hdisplay: %d\n",
3742                                            intel_dp->compliance.test_data.hdisplay);
3743                                 seq_printf(m, "vdisplay: %d\n",
3744                                            intel_dp->compliance.test_data.vdisplay);
3745                                 seq_printf(m, "bpc: %u\n",
3746                                            intel_dp->compliance.test_data.bpc);
3747                         }
3748                 } else
3749                         seq_puts(m, "0");
3750         }
3751         drm_connector_list_iter_end(&conn_iter);
3752
3753         return 0;
3754 }
3755 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3756
3757 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3758 {
3759         struct drm_i915_private *dev_priv = m->private;
3760         struct drm_device *dev = &dev_priv->drm;
3761         struct drm_connector *connector;
3762         struct drm_connector_list_iter conn_iter;
3763         struct intel_dp *intel_dp;
3764
3765         drm_connector_list_iter_begin(dev, &conn_iter);
3766         drm_for_each_connector_iter(connector, &conn_iter) {
3767                 struct intel_encoder *encoder;
3768
3769                 if (connector->connector_type !=
3770                     DRM_MODE_CONNECTOR_DisplayPort)
3771                         continue;
3772
3773                 encoder = to_intel_encoder(connector->encoder);
3774                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3775                         continue;
3776
3777                 if (encoder && connector->status == connector_status_connected) {
3778                         intel_dp = enc_to_intel_dp(&encoder->base);
3779                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3780                 } else
3781                         seq_puts(m, "0");
3782         }
3783         drm_connector_list_iter_end(&conn_iter);
3784
3785         return 0;
3786 }
3787 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3788
3789 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3790 {
3791         struct drm_i915_private *dev_priv = m->private;
3792         struct drm_device *dev = &dev_priv->drm;
3793         int level;
3794         int num_levels;
3795
3796         if (IS_CHERRYVIEW(dev_priv))
3797                 num_levels = 3;
3798         else if (IS_VALLEYVIEW(dev_priv))
3799                 num_levels = 1;
3800         else if (IS_G4X(dev_priv))
3801                 num_levels = 3;
3802         else
3803                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3804
3805         drm_modeset_lock_all(dev);
3806
3807         for (level = 0; level < num_levels; level++) {
3808                 unsigned int latency = wm[level];
3809
3810                 /*
3811                  * - WM1+ latency values in 0.5us units
3812                  * - latencies are in us on gen9/vlv/chv
3813                  */
3814                 if (INTEL_GEN(dev_priv) >= 9 ||
3815                     IS_VALLEYVIEW(dev_priv) ||
3816                     IS_CHERRYVIEW(dev_priv) ||
3817                     IS_G4X(dev_priv))
3818                         latency *= 10;
3819                 else if (level > 0)
3820                         latency *= 5;
3821
3822                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3823                            level, wm[level], latency / 10, latency % 10);
3824         }
3825
3826         drm_modeset_unlock_all(dev);
3827 }
3828
3829 static int pri_wm_latency_show(struct seq_file *m, void *data)
3830 {
3831         struct drm_i915_private *dev_priv = m->private;
3832         const uint16_t *latencies;
3833
3834         if (INTEL_GEN(dev_priv) >= 9)
3835                 latencies = dev_priv->wm.skl_latency;
3836         else
3837                 latencies = dev_priv->wm.pri_latency;
3838
3839         wm_latency_show(m, latencies);
3840
3841         return 0;
3842 }
3843
3844 static int spr_wm_latency_show(struct seq_file *m, void *data)
3845 {
3846         struct drm_i915_private *dev_priv = m->private;
3847         const uint16_t *latencies;
3848
3849         if (INTEL_GEN(dev_priv) >= 9)
3850                 latencies = dev_priv->wm.skl_latency;
3851         else
3852                 latencies = dev_priv->wm.spr_latency;
3853
3854         wm_latency_show(m, latencies);
3855
3856         return 0;
3857 }
3858
3859 static int cur_wm_latency_show(struct seq_file *m, void *data)
3860 {
3861         struct drm_i915_private *dev_priv = m->private;
3862         const uint16_t *latencies;
3863
3864         if (INTEL_GEN(dev_priv) >= 9)
3865                 latencies = dev_priv->wm.skl_latency;
3866         else
3867                 latencies = dev_priv->wm.cur_latency;
3868
3869         wm_latency_show(m, latencies);
3870
3871         return 0;
3872 }
3873
3874 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3875 {
3876         struct drm_i915_private *dev_priv = inode->i_private;
3877
3878         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3879                 return -ENODEV;
3880
3881         return single_open(file, pri_wm_latency_show, dev_priv);
3882 }
3883
3884 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3885 {
3886         struct drm_i915_private *dev_priv = inode->i_private;
3887
3888         if (HAS_GMCH_DISPLAY(dev_priv))
3889                 return -ENODEV;
3890
3891         return single_open(file, spr_wm_latency_show, dev_priv);
3892 }
3893
3894 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3895 {
3896         struct drm_i915_private *dev_priv = inode->i_private;
3897
3898         if (HAS_GMCH_DISPLAY(dev_priv))
3899                 return -ENODEV;
3900
3901         return single_open(file, cur_wm_latency_show, dev_priv);
3902 }
3903
3904 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3905                                 size_t len, loff_t *offp, uint16_t wm[8])
3906 {
3907         struct seq_file *m = file->private_data;
3908         struct drm_i915_private *dev_priv = m->private;
3909         struct drm_device *dev = &dev_priv->drm;
3910         uint16_t new[8] = { 0 };
3911         int num_levels;
3912         int level;
3913         int ret;
3914         char tmp[32];
3915
3916         if (IS_CHERRYVIEW(dev_priv))
3917                 num_levels = 3;
3918         else if (IS_VALLEYVIEW(dev_priv))
3919                 num_levels = 1;
3920         else if (IS_G4X(dev_priv))
3921                 num_levels = 3;
3922         else
3923                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3924
3925         if (len >= sizeof(tmp))
3926                 return -EINVAL;
3927
3928         if (copy_from_user(tmp, ubuf, len))
3929                 return -EFAULT;
3930
3931         tmp[len] = '\0';
3932
3933         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3934                      &new[0], &new[1], &new[2], &new[3],
3935                      &new[4], &new[5], &new[6], &new[7]);
3936         if (ret != num_levels)
3937                 return -EINVAL;
3938
3939         drm_modeset_lock_all(dev);
3940
3941         for (level = 0; level < num_levels; level++)
3942                 wm[level] = new[level];
3943
3944         drm_modeset_unlock_all(dev);
3945
3946         return len;
3947 }
3948
3949
3950 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3951                                     size_t len, loff_t *offp)
3952 {
3953         struct seq_file *m = file->private_data;
3954         struct drm_i915_private *dev_priv = m->private;
3955         uint16_t *latencies;
3956
3957         if (INTEL_GEN(dev_priv) >= 9)
3958                 latencies = dev_priv->wm.skl_latency;
3959         else
3960                 latencies = dev_priv->wm.pri_latency;
3961
3962         return wm_latency_write(file, ubuf, len, offp, latencies);
3963 }
3964
3965 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3966                                     size_t len, loff_t *offp)
3967 {
3968         struct seq_file *m = file->private_data;
3969         struct drm_i915_private *dev_priv = m->private;
3970         uint16_t *latencies;
3971
3972         if (INTEL_GEN(dev_priv) >= 9)
3973                 latencies = dev_priv->wm.skl_latency;
3974         else
3975                 latencies = dev_priv->wm.spr_latency;
3976
3977         return wm_latency_write(file, ubuf, len, offp, latencies);
3978 }
3979
3980 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3981                                     size_t len, loff_t *offp)
3982 {
3983         struct seq_file *m = file->private_data;
3984         struct drm_i915_private *dev_priv = m->private;
3985         uint16_t *latencies;
3986
3987         if (INTEL_GEN(dev_priv) >= 9)
3988                 latencies = dev_priv->wm.skl_latency;
3989         else
3990                 latencies = dev_priv->wm.cur_latency;
3991
3992         return wm_latency_write(file, ubuf, len, offp, latencies);
3993 }
3994
3995 static const struct file_operations i915_pri_wm_latency_fops = {
3996         .owner = THIS_MODULE,
3997         .open = pri_wm_latency_open,
3998         .read = seq_read,
3999         .llseek = seq_lseek,
4000         .release = single_release,
4001         .write = pri_wm_latency_write
4002 };
4003
4004 static const struct file_operations i915_spr_wm_latency_fops = {
4005         .owner = THIS_MODULE,
4006         .open = spr_wm_latency_open,
4007         .read = seq_read,
4008         .llseek = seq_lseek,
4009         .release = single_release,
4010         .write = spr_wm_latency_write
4011 };
4012
4013 static const struct file_operations i915_cur_wm_latency_fops = {
4014         .owner = THIS_MODULE,
4015         .open = cur_wm_latency_open,
4016         .read = seq_read,
4017         .llseek = seq_lseek,
4018         .release = single_release,
4019         .write = cur_wm_latency_write
4020 };
4021
4022 static int
4023 i915_wedged_get(void *data, u64 *val)
4024 {
4025         struct drm_i915_private *dev_priv = data;
4026
4027         *val = i915_terminally_wedged(&dev_priv->gpu_error);
4028
4029         return 0;
4030 }
4031
4032 static int
4033 i915_wedged_set(void *data, u64 val)
4034 {
4035         struct drm_i915_private *i915 = data;
4036         struct intel_engine_cs *engine;
4037         unsigned int tmp;
4038
4039         /*
4040          * There is no safeguard against this debugfs entry colliding
4041          * with the hangcheck calling same i915_handle_error() in
4042          * parallel, causing an explosion. For now we assume that the
4043          * test harness is responsible enough not to inject gpu hangs
4044          * while it is writing to 'i915_wedged'
4045          */
4046
4047         if (i915_reset_backoff(&i915->gpu_error))
4048                 return -EAGAIN;
4049
4050         for_each_engine_masked(engine, i915, val, tmp) {
4051                 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4052                 engine->hangcheck.stalled = true;
4053         }
4054
4055         i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4056                           "Manually set wedged engine mask = %llx", val);
4057
4058         wait_on_bit(&i915->gpu_error.flags,
4059                     I915_RESET_HANDOFF,
4060                     TASK_UNINTERRUPTIBLE);
4061
4062         return 0;
4063 }
4064
4065 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4066                         i915_wedged_get, i915_wedged_set,
4067                         "%llu\n");
4068
4069 static int
4070 fault_irq_set(struct drm_i915_private *i915,
4071               unsigned long *irq,
4072               unsigned long val)
4073 {
4074         int err;
4075
4076         err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4077         if (err)
4078                 return err;
4079
4080         err = i915_gem_wait_for_idle(i915,
4081                                      I915_WAIT_LOCKED |
4082                                      I915_WAIT_INTERRUPTIBLE,
4083                                      MAX_SCHEDULE_TIMEOUT);
4084         if (err)
4085                 goto err_unlock;
4086
4087         *irq = val;
4088         mutex_unlock(&i915->drm.struct_mutex);
4089
4090         /* Flush idle worker to disarm irq */
4091         drain_delayed_work(&i915->gt.idle_work);
4092
4093         return 0;
4094
4095 err_unlock:
4096         mutex_unlock(&i915->drm.struct_mutex);
4097         return err;
4098 }
4099
4100 static int
4101 i915_ring_missed_irq_get(void *data, u64 *val)
4102 {
4103         struct drm_i915_private *dev_priv = data;
4104
4105         *val = dev_priv->gpu_error.missed_irq_rings;
4106         return 0;
4107 }
4108
4109 static int
4110 i915_ring_missed_irq_set(void *data, u64 val)
4111 {
4112         struct drm_i915_private *i915 = data;
4113
4114         return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4115 }
4116
4117 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4118                         i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4119                         "0x%08llx\n");
4120
4121 static int
4122 i915_ring_test_irq_get(void *data, u64 *val)
4123 {
4124         struct drm_i915_private *dev_priv = data;
4125
4126         *val = dev_priv->gpu_error.test_irq_rings;
4127
4128         return 0;
4129 }
4130
4131 static int
4132 i915_ring_test_irq_set(void *data, u64 val)
4133 {
4134         struct drm_i915_private *i915 = data;
4135
4136         /* GuC keeps the user interrupt permanently enabled for submission */
4137         if (USES_GUC_SUBMISSION(i915))
4138                 return -ENODEV;
4139
4140         /*
4141          * From icl, we can no longer individually mask interrupt generation
4142          * from each engine.
4143          */
4144         if (INTEL_GEN(i915) >= 11)
4145                 return -ENODEV;
4146
4147         val &= INTEL_INFO(i915)->ring_mask;
4148         DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4149
4150         return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4151 }
4152
4153 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4154                         i915_ring_test_irq_get, i915_ring_test_irq_set,
4155                         "0x%08llx\n");
4156
4157 #define DROP_UNBOUND    BIT(0)
4158 #define DROP_BOUND      BIT(1)
4159 #define DROP_RETIRE     BIT(2)
4160 #define DROP_ACTIVE     BIT(3)
4161 #define DROP_FREED      BIT(4)
4162 #define DROP_SHRINK_ALL BIT(5)
4163 #define DROP_IDLE       BIT(6)
4164 #define DROP_RESET_ACTIVE       BIT(7)
4165 #define DROP_RESET_SEQNO        BIT(8)
4166 #define DROP_ALL (DROP_UNBOUND  | \
4167                   DROP_BOUND    | \
4168                   DROP_RETIRE   | \
4169                   DROP_ACTIVE   | \
4170                   DROP_FREED    | \
4171                   DROP_SHRINK_ALL |\
4172                   DROP_IDLE     | \
4173                   DROP_RESET_ACTIVE | \
4174                   DROP_RESET_SEQNO)
4175 static int
4176 i915_drop_caches_get(void *data, u64 *val)
4177 {
4178         *val = DROP_ALL;
4179
4180         return 0;
4181 }
4182
4183 static int
4184 i915_drop_caches_set(void *data, u64 val)
4185 {
4186         struct drm_i915_private *i915 = data;
4187         int ret = 0;
4188
4189         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4190                   val, val & DROP_ALL);
4191         intel_runtime_pm_get(i915);
4192
4193         if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4194                 i915_gem_set_wedged(i915);
4195
4196         /* No need to check and wait for gpu resets, only libdrm auto-restarts
4197          * on ioctls on -EAGAIN. */
4198         if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4199                 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
4200                 if (ret)
4201                         goto out;
4202
4203                 if (val & DROP_ACTIVE)
4204                         ret = i915_gem_wait_for_idle(i915,
4205                                                      I915_WAIT_INTERRUPTIBLE |
4206                                                      I915_WAIT_LOCKED,
4207                                                      MAX_SCHEDULE_TIMEOUT);
4208
4209                 if (ret == 0 && val & DROP_RESET_SEQNO)
4210                         ret = i915_gem_set_global_seqno(&i915->drm, 1);
4211
4212                 if (val & DROP_RETIRE)
4213                         i915_retire_requests(i915);
4214
4215                 mutex_unlock(&i915->drm.struct_mutex);
4216         }
4217
4218         if (val & DROP_RESET_ACTIVE &&
4219             i915_terminally_wedged(&i915->gpu_error)) {
4220                 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
4221                 wait_on_bit(&i915->gpu_error.flags,
4222                             I915_RESET_HANDOFF,
4223                             TASK_UNINTERRUPTIBLE);
4224         }
4225
4226         fs_reclaim_acquire(GFP_KERNEL);
4227         if (val & DROP_BOUND)
4228                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4229
4230         if (val & DROP_UNBOUND)
4231                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4232
4233         if (val & DROP_SHRINK_ALL)
4234                 i915_gem_shrink_all(i915);
4235         fs_reclaim_release(GFP_KERNEL);
4236
4237         if (val & DROP_IDLE) {
4238                 do {
4239                         if (READ_ONCE(i915->gt.active_requests))
4240                                 flush_delayed_work(&i915->gt.retire_work);
4241                         drain_delayed_work(&i915->gt.idle_work);
4242                 } while (READ_ONCE(i915->gt.awake));
4243         }
4244
4245         if (val & DROP_FREED)
4246                 i915_gem_drain_freed_objects(i915);
4247
4248 out:
4249         intel_runtime_pm_put(i915);
4250
4251         return ret;
4252 }
4253
4254 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4255                         i915_drop_caches_get, i915_drop_caches_set,
4256                         "0x%08llx\n");
4257
4258 static int
4259 i915_cache_sharing_get(void *data, u64 *val)
4260 {
4261         struct drm_i915_private *dev_priv = data;
4262         u32 snpcr;
4263
4264         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4265                 return -ENODEV;
4266
4267         intel_runtime_pm_get(dev_priv);
4268
4269         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4270
4271         intel_runtime_pm_put(dev_priv);
4272
4273         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4274
4275         return 0;
4276 }
4277
4278 static int
4279 i915_cache_sharing_set(void *data, u64 val)
4280 {
4281         struct drm_i915_private *dev_priv = data;
4282         u32 snpcr;
4283
4284         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4285                 return -ENODEV;
4286
4287         if (val > 3)
4288                 return -EINVAL;
4289
4290         intel_runtime_pm_get(dev_priv);
4291         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4292
4293         /* Update the cache sharing policy here as well */
4294         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4295         snpcr &= ~GEN6_MBC_SNPCR_MASK;
4296         snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4297         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4298
4299         intel_runtime_pm_put(dev_priv);
4300         return 0;
4301 }
4302
4303 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4304                         i915_cache_sharing_get, i915_cache_sharing_set,
4305                         "%llu\n");
4306
4307 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4308                                           struct sseu_dev_info *sseu)
4309 {
4310 #define SS_MAX 2
4311         const int ss_max = SS_MAX;
4312         u32 sig1[SS_MAX], sig2[SS_MAX];
4313         int ss;
4314
4315         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4316         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4317         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4318         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4319
4320         for (ss = 0; ss < ss_max; ss++) {
4321                 unsigned int eu_cnt;
4322
4323                 if (sig1[ss] & CHV_SS_PG_ENABLE)
4324                         /* skip disabled subslice */
4325                         continue;
4326
4327                 sseu->slice_mask = BIT(0);
4328                 sseu->subslice_mask[0] |= BIT(ss);
4329                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4330                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4331                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4332                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4333                 sseu->eu_total += eu_cnt;
4334                 sseu->eu_per_subslice = max_t(unsigned int,
4335                                               sseu->eu_per_subslice, eu_cnt);
4336         }
4337 #undef SS_MAX
4338 }
4339
4340 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4341                                      struct sseu_dev_info *sseu)
4342 {
4343 #define SS_MAX 6
4344         const struct intel_device_info *info = INTEL_INFO(dev_priv);
4345         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4346         int s, ss;
4347
4348         for (s = 0; s < info->sseu.max_slices; s++) {
4349                 /*
4350                  * FIXME: Valid SS Mask respects the spec and read
4351                  * only valid bits for those registers, excluding reserved
4352                  * although this seems wrong because it would leave many
4353                  * subslices without ACK.
4354                  */
4355                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4356                         GEN10_PGCTL_VALID_SS_MASK(s);
4357                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4358                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4359         }
4360
4361         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4362                      GEN9_PGCTL_SSA_EU19_ACK |
4363                      GEN9_PGCTL_SSA_EU210_ACK |
4364                      GEN9_PGCTL_SSA_EU311_ACK;
4365         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4366                      GEN9_PGCTL_SSB_EU19_ACK |
4367                      GEN9_PGCTL_SSB_EU210_ACK |
4368                      GEN9_PGCTL_SSB_EU311_ACK;
4369
4370         for (s = 0; s < info->sseu.max_slices; s++) {
4371                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4372                         /* skip disabled slice */
4373                         continue;
4374
4375                 sseu->slice_mask |= BIT(s);
4376                 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4377
4378                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4379                         unsigned int eu_cnt;
4380
4381                         if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4382                                 /* skip disabled subslice */
4383                                 continue;
4384
4385                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4386                                                eu_mask[ss % 2]);
4387                         sseu->eu_total += eu_cnt;
4388                         sseu->eu_per_subslice = max_t(unsigned int,
4389                                                       sseu->eu_per_subslice,
4390                                                       eu_cnt);
4391                 }
4392         }
4393 #undef SS_MAX
4394 }
4395
4396 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4397                                     struct sseu_dev_info *sseu)
4398 {
4399 #define SS_MAX 3
4400         const struct intel_device_info *info = INTEL_INFO(dev_priv);
4401         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4402         int s, ss;
4403
4404         for (s = 0; s < info->sseu.max_slices; s++) {
4405                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4406                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4407                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4408         }
4409
4410         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4411                      GEN9_PGCTL_SSA_EU19_ACK |
4412                      GEN9_PGCTL_SSA_EU210_ACK |
4413                      GEN9_PGCTL_SSA_EU311_ACK;
4414         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4415                      GEN9_PGCTL_SSB_EU19_ACK |
4416                      GEN9_PGCTL_SSB_EU210_ACK |
4417                      GEN9_PGCTL_SSB_EU311_ACK;
4418
4419         for (s = 0; s < info->sseu.max_slices; s++) {
4420                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4421                         /* skip disabled slice */
4422                         continue;
4423
4424                 sseu->slice_mask |= BIT(s);
4425
4426                 if (IS_GEN9_BC(dev_priv))
4427                         sseu->subslice_mask[s] =
4428                                 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4429
4430                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4431                         unsigned int eu_cnt;
4432
4433                         if (IS_GEN9_LP(dev_priv)) {
4434                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4435                                         /* skip disabled subslice */
4436                                         continue;
4437
4438                                 sseu->subslice_mask[s] |= BIT(ss);
4439                         }
4440
4441                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4442                                                eu_mask[ss%2]);
4443                         sseu->eu_total += eu_cnt;
4444                         sseu->eu_per_subslice = max_t(unsigned int,
4445                                                       sseu->eu_per_subslice,
4446                                                       eu_cnt);
4447                 }
4448         }
4449 #undef SS_MAX
4450 }
4451
4452 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4453                                          struct sseu_dev_info *sseu)
4454 {
4455         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4456         int s;
4457
4458         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4459
4460         if (sseu->slice_mask) {
4461                 sseu->eu_per_subslice =
4462                                 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4463                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4464                         sseu->subslice_mask[s] =
4465                                 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4466                 }
4467                 sseu->eu_total = sseu->eu_per_subslice *
4468                                  sseu_subslice_total(sseu);
4469
4470                 /* subtract fused off EU(s) from enabled slice(s) */
4471                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4472                         u8 subslice_7eu =
4473                                 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4474
4475                         sseu->eu_total -= hweight8(subslice_7eu);
4476                 }
4477         }
4478 }
4479
4480 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4481                                  const struct sseu_dev_info *sseu)
4482 {
4483         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4484         const char *type = is_available_info ? "Available" : "Enabled";
4485         int s;
4486
4487         seq_printf(m, "  %s Slice Mask: %04x\n", type,
4488                    sseu->slice_mask);
4489         seq_printf(m, "  %s Slice Total: %u\n", type,
4490                    hweight8(sseu->slice_mask));
4491         seq_printf(m, "  %s Subslice Total: %u\n", type,
4492                    sseu_subslice_total(sseu));
4493         for (s = 0; s < fls(sseu->slice_mask); s++) {
4494                 seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4495                            s, hweight8(sseu->subslice_mask[s]));
4496         }
4497         seq_printf(m, "  %s EU Total: %u\n", type,
4498                    sseu->eu_total);
4499         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4500                    sseu->eu_per_subslice);
4501
4502         if (!is_available_info)
4503                 return;
4504
4505         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4506         if (HAS_POOLED_EU(dev_priv))
4507                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4508
4509         seq_printf(m, "  Has Slice Power Gating: %s\n",
4510                    yesno(sseu->has_slice_pg));
4511         seq_printf(m, "  Has Subslice Power Gating: %s\n",
4512                    yesno(sseu->has_subslice_pg));
4513         seq_printf(m, "  Has EU Power Gating: %s\n",
4514                    yesno(sseu->has_eu_pg));
4515 }
4516
4517 static int i915_sseu_status(struct seq_file *m, void *unused)
4518 {
4519         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4520         struct sseu_dev_info sseu;
4521
4522         if (INTEL_GEN(dev_priv) < 8)
4523                 return -ENODEV;
4524
4525         seq_puts(m, "SSEU Device Info\n");
4526         i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4527
4528         seq_puts(m, "SSEU Device Status\n");
4529         memset(&sseu, 0, sizeof(sseu));
4530         sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4531         sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4532         sseu.max_eus_per_subslice =
4533                 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
4534
4535         intel_runtime_pm_get(dev_priv);
4536
4537         if (IS_CHERRYVIEW(dev_priv)) {
4538                 cherryview_sseu_device_status(dev_priv, &sseu);
4539         } else if (IS_BROADWELL(dev_priv)) {
4540                 broadwell_sseu_device_status(dev_priv, &sseu);
4541         } else if (IS_GEN9(dev_priv)) {
4542                 gen9_sseu_device_status(dev_priv, &sseu);
4543         } else if (INTEL_GEN(dev_priv) >= 10) {
4544                 gen10_sseu_device_status(dev_priv, &sseu);
4545         }
4546
4547         intel_runtime_pm_put(dev_priv);
4548
4549         i915_print_sseu_info(m, false, &sseu);
4550
4551         return 0;
4552 }
4553
4554 static int i915_forcewake_open(struct inode *inode, struct file *file)
4555 {
4556         struct drm_i915_private *i915 = inode->i_private;
4557
4558         if (INTEL_GEN(i915) < 6)
4559                 return 0;
4560
4561         intel_runtime_pm_get(i915);
4562         intel_uncore_forcewake_user_get(i915);
4563
4564         return 0;
4565 }
4566
4567 static int i915_forcewake_release(struct inode *inode, struct file *file)
4568 {
4569         struct drm_i915_private *i915 = inode->i_private;
4570
4571         if (INTEL_GEN(i915) < 6)
4572                 return 0;
4573
4574         intel_uncore_forcewake_user_put(i915);
4575         intel_runtime_pm_put(i915);
4576
4577         return 0;
4578 }
4579
4580 static const struct file_operations i915_forcewake_fops = {
4581         .owner = THIS_MODULE,
4582         .open = i915_forcewake_open,
4583         .release = i915_forcewake_release,
4584 };
4585
4586 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4587 {
4588         struct drm_i915_private *dev_priv = m->private;
4589         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4590
4591         /* Synchronize with everything first in case there's been an HPD
4592          * storm, but we haven't finished handling it in the kernel yet
4593          */
4594         synchronize_irq(dev_priv->drm.irq);
4595         flush_work(&dev_priv->hotplug.dig_port_work);
4596         flush_work(&dev_priv->hotplug.hotplug_work);
4597
4598         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4599         seq_printf(m, "Detected: %s\n",
4600                    yesno(delayed_work_pending(&hotplug->reenable_work)));
4601
4602         return 0;
4603 }
4604
4605 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4606                                         const char __user *ubuf, size_t len,
4607                                         loff_t *offp)
4608 {
4609         struct seq_file *m = file->private_data;
4610         struct drm_i915_private *dev_priv = m->private;
4611         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4612         unsigned int new_threshold;
4613         int i;
4614         char *newline;
4615         char tmp[16];
4616
4617         if (len >= sizeof(tmp))
4618                 return -EINVAL;
4619
4620         if (copy_from_user(tmp, ubuf, len))
4621                 return -EFAULT;
4622
4623         tmp[len] = '\0';
4624
4625         /* Strip newline, if any */
4626         newline = strchr(tmp, '\n');
4627         if (newline)
4628                 *newline = '\0';
4629
4630         if (strcmp(tmp, "reset") == 0)
4631                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4632         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4633                 return -EINVAL;
4634
4635         if (new_threshold > 0)
4636                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4637                               new_threshold);
4638         else
4639                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4640
4641         spin_lock_irq(&dev_priv->irq_lock);
4642         hotplug->hpd_storm_threshold = new_threshold;
4643         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4644         for_each_hpd_pin(i)
4645                 hotplug->stats[i].count = 0;
4646         spin_unlock_irq(&dev_priv->irq_lock);
4647
4648         /* Re-enable hpd immediately if we were in an irq storm */
4649         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4650
4651         return len;
4652 }
4653
4654 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4655 {
4656         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4657 }
4658
4659 static const struct file_operations i915_hpd_storm_ctl_fops = {
4660         .owner = THIS_MODULE,
4661         .open = i915_hpd_storm_ctl_open,
4662         .read = seq_read,
4663         .llseek = seq_lseek,
4664         .release = single_release,
4665         .write = i915_hpd_storm_ctl_write
4666 };
4667
4668 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4669 {
4670         struct drm_i915_private *dev_priv = m->private;
4671
4672         seq_printf(m, "Enabled: %s\n",
4673                    yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4674
4675         return 0;
4676 }
4677
4678 static int
4679 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4680 {
4681         return single_open(file, i915_hpd_short_storm_ctl_show,
4682                            inode->i_private);
4683 }
4684
4685 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4686                                               const char __user *ubuf,
4687                                               size_t len, loff_t *offp)
4688 {
4689         struct seq_file *m = file->private_data;
4690         struct drm_i915_private *dev_priv = m->private;
4691         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4692         char *newline;
4693         char tmp[16];
4694         int i;
4695         bool new_state;
4696
4697         if (len >= sizeof(tmp))
4698                 return -EINVAL;
4699
4700         if (copy_from_user(tmp, ubuf, len))
4701                 return -EFAULT;
4702
4703         tmp[len] = '\0';
4704
4705         /* Strip newline, if any */
4706         newline = strchr(tmp, '\n');
4707         if (newline)
4708                 *newline = '\0';
4709
4710         /* Reset to the "default" state for this system */
4711         if (strcmp(tmp, "reset") == 0)
4712                 new_state = !HAS_DP_MST(dev_priv);
4713         else if (kstrtobool(tmp, &new_state) != 0)
4714                 return -EINVAL;
4715
4716         DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4717                       new_state ? "En" : "Dis");
4718
4719         spin_lock_irq(&dev_priv->irq_lock);
4720         hotplug->hpd_short_storm_enabled = new_state;
4721         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4722         for_each_hpd_pin(i)
4723                 hotplug->stats[i].count = 0;
4724         spin_unlock_irq(&dev_priv->irq_lock);
4725
4726         /* Re-enable hpd immediately if we were in an irq storm */
4727         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4728
4729         return len;
4730 }
4731
4732 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4733         .owner = THIS_MODULE,
4734         .open = i915_hpd_short_storm_ctl_open,
4735         .read = seq_read,
4736         .llseek = seq_lseek,
4737         .release = single_release,
4738         .write = i915_hpd_short_storm_ctl_write,
4739 };
4740
4741 static int i915_drrs_ctl_set(void *data, u64 val)
4742 {
4743         struct drm_i915_private *dev_priv = data;
4744         struct drm_device *dev = &dev_priv->drm;
4745         struct intel_crtc *crtc;
4746
4747         if (INTEL_GEN(dev_priv) < 7)
4748                 return -ENODEV;
4749
4750         for_each_intel_crtc(dev, crtc) {
4751                 struct drm_connector_list_iter conn_iter;
4752                 struct intel_crtc_state *crtc_state;
4753                 struct drm_connector *connector;
4754                 struct drm_crtc_commit *commit;
4755                 int ret;
4756
4757                 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4758                 if (ret)
4759                         return ret;
4760
4761                 crtc_state = to_intel_crtc_state(crtc->base.state);
4762
4763                 if (!crtc_state->base.active ||
4764                     !crtc_state->has_drrs)
4765                         goto out;
4766
4767                 commit = crtc_state->base.commit;
4768                 if (commit) {
4769                         ret = wait_for_completion_interruptible(&commit->hw_done);
4770                         if (ret)
4771                                 goto out;
4772                 }
4773
4774                 drm_connector_list_iter_begin(dev, &conn_iter);
4775                 drm_for_each_connector_iter(connector, &conn_iter) {
4776                         struct intel_encoder *encoder;
4777                         struct intel_dp *intel_dp;
4778
4779                         if (!(crtc_state->base.connector_mask &
4780                               drm_connector_mask(connector)))
4781                                 continue;
4782
4783                         encoder = intel_attached_encoder(connector);
4784                         if (encoder->type != INTEL_OUTPUT_EDP)
4785                                 continue;
4786
4787                         DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4788                                                 val ? "en" : "dis", val);
4789
4790                         intel_dp = enc_to_intel_dp(&encoder->base);
4791                         if (val)
4792                                 intel_edp_drrs_enable(intel_dp,
4793                                                       crtc_state);
4794                         else
4795                                 intel_edp_drrs_disable(intel_dp,
4796                                                        crtc_state);
4797                 }
4798                 drm_connector_list_iter_end(&conn_iter);
4799
4800 out:
4801                 drm_modeset_unlock(&crtc->base.mutex);
4802                 if (ret)
4803                         return ret;
4804         }
4805
4806         return 0;
4807 }
4808
4809 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4810
4811 static ssize_t
4812 i915_fifo_underrun_reset_write(struct file *filp,
4813                                const char __user *ubuf,
4814                                size_t cnt, loff_t *ppos)
4815 {
4816         struct drm_i915_private *dev_priv = filp->private_data;
4817         struct intel_crtc *intel_crtc;
4818         struct drm_device *dev = &dev_priv->drm;
4819         int ret;
4820         bool reset;
4821
4822         ret = kstrtobool_from_user(ubuf, cnt, &reset);
4823         if (ret)
4824                 return ret;
4825
4826         if (!reset)
4827                 return cnt;
4828
4829         for_each_intel_crtc(dev, intel_crtc) {
4830                 struct drm_crtc_commit *commit;
4831                 struct intel_crtc_state *crtc_state;
4832
4833                 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4834                 if (ret)
4835                         return ret;
4836
4837                 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4838                 commit = crtc_state->base.commit;
4839                 if (commit) {
4840                         ret = wait_for_completion_interruptible(&commit->hw_done);
4841                         if (!ret)
4842                                 ret = wait_for_completion_interruptible(&commit->flip_done);
4843                 }
4844
4845                 if (!ret && crtc_state->base.active) {
4846                         DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4847                                       pipe_name(intel_crtc->pipe));
4848
4849                         intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4850                 }
4851
4852                 drm_modeset_unlock(&intel_crtc->base.mutex);
4853
4854                 if (ret)
4855                         return ret;
4856         }
4857
4858         ret = intel_fbc_reset_underrun(dev_priv);
4859         if (ret)
4860                 return ret;
4861
4862         return cnt;
4863 }
4864
4865 static const struct file_operations i915_fifo_underrun_reset_ops = {
4866         .owner = THIS_MODULE,
4867         .open = simple_open,
4868         .write = i915_fifo_underrun_reset_write,
4869         .llseek = default_llseek,
4870 };
4871
4872 static const struct drm_info_list i915_debugfs_list[] = {
4873         {"i915_capabilities", i915_capabilities, 0},
4874         {"i915_gem_objects", i915_gem_object_info, 0},
4875         {"i915_gem_gtt", i915_gem_gtt_info, 0},
4876         {"i915_gem_stolen", i915_gem_stolen_list_info },
4877         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4878         {"i915_gem_interrupt", i915_interrupt_info, 0},
4879         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4880         {"i915_guc_info", i915_guc_info, 0},
4881         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4882         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4883         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4884         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4885         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4886         {"i915_frequency_info", i915_frequency_info, 0},
4887         {"i915_hangcheck_info", i915_hangcheck_info, 0},
4888         {"i915_reset_info", i915_reset_info, 0},
4889         {"i915_drpc_info", i915_drpc_info, 0},
4890         {"i915_emon_status", i915_emon_status, 0},
4891         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4892         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4893         {"i915_fbc_status", i915_fbc_status, 0},
4894         {"i915_ips_status", i915_ips_status, 0},
4895         {"i915_sr_status", i915_sr_status, 0},
4896         {"i915_opregion", i915_opregion, 0},
4897         {"i915_vbt", i915_vbt, 0},
4898         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4899         {"i915_context_status", i915_context_status, 0},
4900         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4901         {"i915_swizzle_info", i915_swizzle_info, 0},
4902         {"i915_ppgtt_info", i915_ppgtt_info, 0},
4903         {"i915_llc", i915_llc, 0},
4904         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4905         {"i915_energy_uJ", i915_energy_uJ, 0},
4906         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4907         {"i915_power_domain_info", i915_power_domain_info, 0},
4908         {"i915_dmc_info", i915_dmc_info, 0},
4909         {"i915_display_info", i915_display_info, 0},
4910         {"i915_engine_info", i915_engine_info, 0},
4911         {"i915_rcs_topology", i915_rcs_topology, 0},
4912         {"i915_shrinker_info", i915_shrinker_info, 0},
4913         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4914         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4915         {"i915_wa_registers", i915_wa_registers, 0},
4916         {"i915_ddb_info", i915_ddb_info, 0},
4917         {"i915_sseu_status", i915_sseu_status, 0},
4918         {"i915_drrs_status", i915_drrs_status, 0},
4919         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4920 };
4921 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4922
4923 static const struct i915_debugfs_files {
4924         const char *name;
4925         const struct file_operations *fops;
4926 } i915_debugfs_files[] = {
4927         {"i915_wedged", &i915_wedged_fops},
4928         {"i915_cache_sharing", &i915_cache_sharing_fops},
4929         {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4930         {"i915_ring_test_irq", &i915_ring_test_irq_fops},
4931         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4932 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4933         {"i915_error_state", &i915_error_state_fops},
4934         {"i915_gpu_info", &i915_gpu_info_fops},
4935 #endif
4936         {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4937         {"i915_next_seqno", &i915_next_seqno_fops},
4938         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4939         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4940         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4941         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4942         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4943         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4944         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4945         {"i915_guc_log_level", &i915_guc_log_level_fops},
4946         {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4947         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4948         {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4949         {"i915_ipc_status", &i915_ipc_status_fops},
4950         {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4951         {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4952 };
4953
4954 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4955 {
4956         struct drm_minor *minor = dev_priv->drm.primary;
4957         struct dentry *ent;
4958         int i;
4959
4960         ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4961                                   minor->debugfs_root, to_i915(minor->dev),
4962                                   &i915_forcewake_fops);
4963         if (!ent)
4964                 return -ENOMEM;
4965
4966         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4967                 ent = debugfs_create_file(i915_debugfs_files[i].name,
4968                                           S_IRUGO | S_IWUSR,
4969                                           minor->debugfs_root,
4970                                           to_i915(minor->dev),
4971                                           i915_debugfs_files[i].fops);
4972                 if (!ent)
4973                         return -ENOMEM;
4974         }
4975
4976         return drm_debugfs_create_files(i915_debugfs_list,
4977                                         I915_DEBUGFS_ENTRIES,
4978                                         minor->debugfs_root, minor);
4979 }
4980
4981 struct dpcd_block {
4982         /* DPCD dump start address. */
4983         unsigned int offset;
4984         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4985         unsigned int end;
4986         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4987         size_t size;
4988         /* Only valid for eDP. */
4989         bool edp;
4990 };
4991
4992 static const struct dpcd_block i915_dpcd_debug[] = {
4993         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4994         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4995         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4996         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4997         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4998         { .offset = DP_SET_POWER },
4999         { .offset = DP_EDP_DPCD_REV },
5000         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
5001         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
5002         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
5003 };
5004
5005 static int i915_dpcd_show(struct seq_file *m, void *data)
5006 {
5007         struct drm_connector *connector = m->private;
5008         struct intel_dp *intel_dp =
5009                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5010         uint8_t buf[16];
5011         ssize_t err;
5012         int i;
5013
5014         if (connector->status != connector_status_connected)
5015                 return -ENODEV;
5016
5017         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
5018                 const struct dpcd_block *b = &i915_dpcd_debug[i];
5019                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
5020
5021                 if (b->edp &&
5022                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
5023                         continue;
5024
5025                 /* low tech for now */
5026                 if (WARN_ON(size > sizeof(buf)))
5027                         continue;
5028
5029                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
5030                 if (err < 0)
5031                         seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
5032                 else
5033                         seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
5034         }
5035
5036         return 0;
5037 }
5038 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
5039
5040 static int i915_panel_show(struct seq_file *m, void *data)
5041 {
5042         struct drm_connector *connector = m->private;
5043         struct intel_dp *intel_dp =
5044                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5045
5046         if (connector->status != connector_status_connected)
5047                 return -ENODEV;
5048
5049         seq_printf(m, "Panel power up delay: %d\n",
5050                    intel_dp->panel_power_up_delay);
5051         seq_printf(m, "Panel power down delay: %d\n",
5052                    intel_dp->panel_power_down_delay);
5053         seq_printf(m, "Backlight on delay: %d\n",
5054                    intel_dp->backlight_on_delay);
5055         seq_printf(m, "Backlight off delay: %d\n",
5056                    intel_dp->backlight_off_delay);
5057
5058         return 0;
5059 }
5060 DEFINE_SHOW_ATTRIBUTE(i915_panel);
5061
5062 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
5063 {
5064         struct drm_connector *connector = m->private;
5065         struct intel_connector *intel_connector = to_intel_connector(connector);
5066
5067         if (connector->status != connector_status_connected)
5068                 return -ENODEV;
5069
5070         /* HDCP is supported by connector */
5071         if (!intel_connector->hdcp.shim)
5072                 return -EINVAL;
5073
5074         seq_printf(m, "%s:%d HDCP version: ", connector->name,
5075                    connector->base.id);
5076         seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
5077                    "None" : "HDCP1.4");
5078         seq_puts(m, "\n");
5079
5080         return 0;
5081 }
5082 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
5083
5084 /**
5085  * i915_debugfs_connector_add - add i915 specific connector debugfs files
5086  * @connector: pointer to a registered drm_connector
5087  *
5088  * Cleanup will be done by drm_connector_unregister() through a call to
5089  * drm_debugfs_connector_remove().
5090  *
5091  * Returns 0 on success, negative error codes on error.
5092  */
5093 int i915_debugfs_connector_add(struct drm_connector *connector)
5094 {
5095         struct dentry *root = connector->debugfs_entry;
5096
5097         /* The connector must have been registered beforehands. */
5098         if (!root)
5099                 return -ENODEV;
5100
5101         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5102             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5103                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
5104                                     connector, &i915_dpcd_fops);
5105
5106         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
5107                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5108                                     connector, &i915_panel_fops);
5109                 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
5110                                     connector, &i915_psr_sink_status_fops);
5111         }
5112
5113         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5114             connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5115             connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5116                 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5117                                     connector, &i915_hdcp_sink_capability_fops);
5118         }
5119
5120         return 0;
5121 }