drm/i915: Replace global breadcrumbs with per-context interrupt tracking
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34
35 #include "i915_reset.h"
36
37 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
38 {
39         return to_i915(node->minor->dev);
40 }
41
42 static int i915_capabilities(struct seq_file *m, void *data)
43 {
44         struct drm_i915_private *dev_priv = node_to_i915(m->private);
45         const struct intel_device_info *info = INTEL_INFO(dev_priv);
46         struct drm_printer p = drm_seq_file_printer(m);
47
48         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
49         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
50         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
51
52         intel_device_info_dump_flags(info, &p);
53         intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
54         intel_driver_caps_print(&dev_priv->caps, &p);
55
56         kernel_param_lock(THIS_MODULE);
57         i915_params_dump(&i915_modparams, &p);
58         kernel_param_unlock(THIS_MODULE);
59
60         return 0;
61 }
62
63 static char get_active_flag(struct drm_i915_gem_object *obj)
64 {
65         return i915_gem_object_is_active(obj) ? '*' : ' ';
66 }
67
68 static char get_pin_flag(struct drm_i915_gem_object *obj)
69 {
70         return obj->pin_global ? 'p' : ' ';
71 }
72
73 static char get_tiling_flag(struct drm_i915_gem_object *obj)
74 {
75         switch (i915_gem_object_get_tiling(obj)) {
76         default:
77         case I915_TILING_NONE: return ' ';
78         case I915_TILING_X: return 'X';
79         case I915_TILING_Y: return 'Y';
80         }
81 }
82
83 static char get_global_flag(struct drm_i915_gem_object *obj)
84 {
85         return obj->userfault_count ? 'g' : ' ';
86 }
87
88 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
89 {
90         return obj->mm.mapping ? 'M' : ' ';
91 }
92
93 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
94 {
95         u64 size = 0;
96         struct i915_vma *vma;
97
98         for_each_ggtt_vma(vma, obj) {
99                 if (drm_mm_node_allocated(&vma->node))
100                         size += vma->node.size;
101         }
102
103         return size;
104 }
105
106 static const char *
107 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
108 {
109         size_t x = 0;
110
111         switch (page_sizes) {
112         case 0:
113                 return "";
114         case I915_GTT_PAGE_SIZE_4K:
115                 return "4K";
116         case I915_GTT_PAGE_SIZE_64K:
117                 return "64K";
118         case I915_GTT_PAGE_SIZE_2M:
119                 return "2M";
120         default:
121                 if (!buf)
122                         return "M";
123
124                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
125                         x += snprintf(buf + x, len - x, "2M, ");
126                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
127                         x += snprintf(buf + x, len - x, "64K, ");
128                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
129                         x += snprintf(buf + x, len - x, "4K, ");
130                 buf[x-2] = '\0';
131
132                 return buf;
133         }
134 }
135
136 static void
137 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
138 {
139         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
140         struct intel_engine_cs *engine;
141         struct i915_vma *vma;
142         unsigned int frontbuffer_bits;
143         int pin_count = 0;
144
145         lockdep_assert_held(&obj->base.dev->struct_mutex);
146
147         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
148                    &obj->base,
149                    get_active_flag(obj),
150                    get_pin_flag(obj),
151                    get_tiling_flag(obj),
152                    get_global_flag(obj),
153                    get_pin_mapped_flag(obj),
154                    obj->base.size / 1024,
155                    obj->read_domains,
156                    obj->write_domain,
157                    i915_cache_level_str(dev_priv, obj->cache_level),
158                    obj->mm.dirty ? " dirty" : "",
159                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
160         if (obj->base.name)
161                 seq_printf(m, " (name: %d)", obj->base.name);
162         list_for_each_entry(vma, &obj->vma.list, obj_link) {
163                 if (i915_vma_is_pinned(vma))
164                         pin_count++;
165         }
166         seq_printf(m, " (pinned x %d)", pin_count);
167         if (obj->pin_global)
168                 seq_printf(m, " (global)");
169         list_for_each_entry(vma, &obj->vma.list, obj_link) {
170                 if (!drm_mm_node_allocated(&vma->node))
171                         continue;
172
173                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
174                            i915_vma_is_ggtt(vma) ? "g" : "pp",
175                            vma->node.start, vma->node.size,
176                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
177                 if (i915_vma_is_ggtt(vma)) {
178                         switch (vma->ggtt_view.type) {
179                         case I915_GGTT_VIEW_NORMAL:
180                                 seq_puts(m, ", normal");
181                                 break;
182
183                         case I915_GGTT_VIEW_PARTIAL:
184                                 seq_printf(m, ", partial [%08llx+%x]",
185                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
186                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
187                                 break;
188
189                         case I915_GGTT_VIEW_ROTATED:
190                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
191                                            vma->ggtt_view.rotated.plane[0].width,
192                                            vma->ggtt_view.rotated.plane[0].height,
193                                            vma->ggtt_view.rotated.plane[0].stride,
194                                            vma->ggtt_view.rotated.plane[0].offset,
195                                            vma->ggtt_view.rotated.plane[1].width,
196                                            vma->ggtt_view.rotated.plane[1].height,
197                                            vma->ggtt_view.rotated.plane[1].stride,
198                                            vma->ggtt_view.rotated.plane[1].offset);
199                                 break;
200
201                         default:
202                                 MISSING_CASE(vma->ggtt_view.type);
203                                 break;
204                         }
205                 }
206                 if (vma->fence)
207                         seq_printf(m, " , fence: %d%s",
208                                    vma->fence->id,
209                                    i915_gem_active_isset(&vma->last_fence) ? "*" : "");
210                 seq_puts(m, ")");
211         }
212         if (obj->stolen)
213                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
214
215         engine = i915_gem_object_last_write_engine(obj);
216         if (engine)
217                 seq_printf(m, " (%s)", engine->name);
218
219         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
220         if (frontbuffer_bits)
221                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
222 }
223
224 static int obj_rank_by_stolen(const void *A, const void *B)
225 {
226         const struct drm_i915_gem_object *a =
227                 *(const struct drm_i915_gem_object **)A;
228         const struct drm_i915_gem_object *b =
229                 *(const struct drm_i915_gem_object **)B;
230
231         if (a->stolen->start < b->stolen->start)
232                 return -1;
233         if (a->stolen->start > b->stolen->start)
234                 return 1;
235         return 0;
236 }
237
238 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
239 {
240         struct drm_i915_private *dev_priv = node_to_i915(m->private);
241         struct drm_device *dev = &dev_priv->drm;
242         struct drm_i915_gem_object **objects;
243         struct drm_i915_gem_object *obj;
244         u64 total_obj_size, total_gtt_size;
245         unsigned long total, count, n;
246         int ret;
247
248         total = READ_ONCE(dev_priv->mm.object_count);
249         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
250         if (!objects)
251                 return -ENOMEM;
252
253         ret = mutex_lock_interruptible(&dev->struct_mutex);
254         if (ret)
255                 goto out;
256
257         total_obj_size = total_gtt_size = count = 0;
258
259         spin_lock(&dev_priv->mm.obj_lock);
260         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
261                 if (count == total)
262                         break;
263
264                 if (obj->stolen == NULL)
265                         continue;
266
267                 objects[count++] = obj;
268                 total_obj_size += obj->base.size;
269                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
270
271         }
272         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
273                 if (count == total)
274                         break;
275
276                 if (obj->stolen == NULL)
277                         continue;
278
279                 objects[count++] = obj;
280                 total_obj_size += obj->base.size;
281         }
282         spin_unlock(&dev_priv->mm.obj_lock);
283
284         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
285
286         seq_puts(m, "Stolen:\n");
287         for (n = 0; n < count; n++) {
288                 seq_puts(m, "   ");
289                 describe_obj(m, objects[n]);
290                 seq_putc(m, '\n');
291         }
292         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
293                    count, total_obj_size, total_gtt_size);
294
295         mutex_unlock(&dev->struct_mutex);
296 out:
297         kvfree(objects);
298         return ret;
299 }
300
301 struct file_stats {
302         struct i915_address_space *vm;
303         unsigned long count;
304         u64 total, unbound;
305         u64 global, shared;
306         u64 active, inactive;
307         u64 closed;
308 };
309
310 static int per_file_stats(int id, void *ptr, void *data)
311 {
312         struct drm_i915_gem_object *obj = ptr;
313         struct file_stats *stats = data;
314         struct i915_vma *vma;
315
316         lockdep_assert_held(&obj->base.dev->struct_mutex);
317
318         stats->count++;
319         stats->total += obj->base.size;
320         if (!obj->bind_count)
321                 stats->unbound += obj->base.size;
322         if (obj->base.name || obj->base.dma_buf)
323                 stats->shared += obj->base.size;
324
325         list_for_each_entry(vma, &obj->vma.list, obj_link) {
326                 if (!drm_mm_node_allocated(&vma->node))
327                         continue;
328
329                 if (i915_vma_is_ggtt(vma)) {
330                         stats->global += vma->node.size;
331                 } else {
332                         if (vma->vm != stats->vm)
333                                 continue;
334                 }
335
336                 if (i915_vma_is_active(vma))
337                         stats->active += vma->node.size;
338                 else
339                         stats->inactive += vma->node.size;
340
341                 if (i915_vma_is_closed(vma))
342                         stats->closed += vma->node.size;
343         }
344
345         return 0;
346 }
347
348 #define print_file_stats(m, name, stats) do { \
349         if (stats.count) \
350                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
351                            name, \
352                            stats.count, \
353                            stats.total, \
354                            stats.active, \
355                            stats.inactive, \
356                            stats.global, \
357                            stats.shared, \
358                            stats.unbound, \
359                            stats.closed); \
360 } while (0)
361
362 static void print_batch_pool_stats(struct seq_file *m,
363                                    struct drm_i915_private *dev_priv)
364 {
365         struct drm_i915_gem_object *obj;
366         struct intel_engine_cs *engine;
367         struct file_stats stats = {};
368         enum intel_engine_id id;
369         int j;
370
371         for_each_engine(engine, dev_priv, id) {
372                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
373                         list_for_each_entry(obj,
374                                             &engine->batch_pool.cache_list[j],
375                                             batch_pool_link)
376                                 per_file_stats(0, obj, &stats);
377                 }
378         }
379
380         print_file_stats(m, "[k]batch pool", stats);
381 }
382
383 static void print_context_stats(struct seq_file *m,
384                                 struct drm_i915_private *i915)
385 {
386         struct file_stats kstats = {};
387         struct i915_gem_context *ctx;
388
389         list_for_each_entry(ctx, &i915->contexts.list, link) {
390                 struct intel_engine_cs *engine;
391                 enum intel_engine_id id;
392
393                 for_each_engine(engine, i915, id) {
394                         struct intel_context *ce = to_intel_context(ctx, engine);
395
396                         if (ce->state)
397                                 per_file_stats(0, ce->state->obj, &kstats);
398                         if (ce->ring)
399                                 per_file_stats(0, ce->ring->vma->obj, &kstats);
400                 }
401
402                 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
403                         struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
404                         struct drm_file *file = ctx->file_priv->file;
405                         struct task_struct *task;
406                         char name[80];
407
408                         spin_lock(&file->table_lock);
409                         idr_for_each(&file->object_idr, per_file_stats, &stats);
410                         spin_unlock(&file->table_lock);
411
412                         rcu_read_lock();
413                         task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
414                         snprintf(name, sizeof(name), "%s/%d",
415                                  task ? task->comm : "<unknown>",
416                                  ctx->user_handle);
417                         rcu_read_unlock();
418
419                         print_file_stats(m, name, stats);
420                 }
421         }
422
423         print_file_stats(m, "[k]contexts", kstats);
424 }
425
426 static int i915_gem_object_info(struct seq_file *m, void *data)
427 {
428         struct drm_i915_private *dev_priv = node_to_i915(m->private);
429         struct drm_device *dev = &dev_priv->drm;
430         struct i915_ggtt *ggtt = &dev_priv->ggtt;
431         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
432         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
433         struct drm_i915_gem_object *obj;
434         unsigned int page_sizes = 0;
435         char buf[80];
436         int ret;
437
438         seq_printf(m, "%u objects, %llu bytes\n",
439                    dev_priv->mm.object_count,
440                    dev_priv->mm.object_memory);
441
442         size = count = 0;
443         mapped_size = mapped_count = 0;
444         purgeable_size = purgeable_count = 0;
445         huge_size = huge_count = 0;
446
447         spin_lock(&dev_priv->mm.obj_lock);
448         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
449                 size += obj->base.size;
450                 ++count;
451
452                 if (obj->mm.madv == I915_MADV_DONTNEED) {
453                         purgeable_size += obj->base.size;
454                         ++purgeable_count;
455                 }
456
457                 if (obj->mm.mapping) {
458                         mapped_count++;
459                         mapped_size += obj->base.size;
460                 }
461
462                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
463                         huge_count++;
464                         huge_size += obj->base.size;
465                         page_sizes |= obj->mm.page_sizes.sg;
466                 }
467         }
468         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
469
470         size = count = dpy_size = dpy_count = 0;
471         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
472                 size += obj->base.size;
473                 ++count;
474
475                 if (obj->pin_global) {
476                         dpy_size += obj->base.size;
477                         ++dpy_count;
478                 }
479
480                 if (obj->mm.madv == I915_MADV_DONTNEED) {
481                         purgeable_size += obj->base.size;
482                         ++purgeable_count;
483                 }
484
485                 if (obj->mm.mapping) {
486                         mapped_count++;
487                         mapped_size += obj->base.size;
488                 }
489
490                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
491                         huge_count++;
492                         huge_size += obj->base.size;
493                         page_sizes |= obj->mm.page_sizes.sg;
494                 }
495         }
496         spin_unlock(&dev_priv->mm.obj_lock);
497
498         seq_printf(m, "%u bound objects, %llu bytes\n",
499                    count, size);
500         seq_printf(m, "%u purgeable objects, %llu bytes\n",
501                    purgeable_count, purgeable_size);
502         seq_printf(m, "%u mapped objects, %llu bytes\n",
503                    mapped_count, mapped_size);
504         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
505                    huge_count,
506                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
507                    huge_size);
508         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
509                    dpy_count, dpy_size);
510
511         seq_printf(m, "%llu [%pa] gtt total\n",
512                    ggtt->vm.total, &ggtt->mappable_end);
513         seq_printf(m, "Supported page sizes: %s\n",
514                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
515                                         buf, sizeof(buf)));
516
517         seq_putc(m, '\n');
518
519         ret = mutex_lock_interruptible(&dev->struct_mutex);
520         if (ret)
521                 return ret;
522
523         print_batch_pool_stats(m, dev_priv);
524         print_context_stats(m, dev_priv);
525         mutex_unlock(&dev->struct_mutex);
526
527         return 0;
528 }
529
530 static int i915_gem_gtt_info(struct seq_file *m, void *data)
531 {
532         struct drm_info_node *node = m->private;
533         struct drm_i915_private *dev_priv = node_to_i915(node);
534         struct drm_device *dev = &dev_priv->drm;
535         struct drm_i915_gem_object **objects;
536         struct drm_i915_gem_object *obj;
537         u64 total_obj_size, total_gtt_size;
538         unsigned long nobject, n;
539         int count, ret;
540
541         nobject = READ_ONCE(dev_priv->mm.object_count);
542         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
543         if (!objects)
544                 return -ENOMEM;
545
546         ret = mutex_lock_interruptible(&dev->struct_mutex);
547         if (ret)
548                 return ret;
549
550         count = 0;
551         spin_lock(&dev_priv->mm.obj_lock);
552         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
553                 objects[count++] = obj;
554                 if (count == nobject)
555                         break;
556         }
557         spin_unlock(&dev_priv->mm.obj_lock);
558
559         total_obj_size = total_gtt_size = 0;
560         for (n = 0;  n < count; n++) {
561                 obj = objects[n];
562
563                 seq_puts(m, "   ");
564                 describe_obj(m, obj);
565                 seq_putc(m, '\n');
566                 total_obj_size += obj->base.size;
567                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
568         }
569
570         mutex_unlock(&dev->struct_mutex);
571
572         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
573                    count, total_obj_size, total_gtt_size);
574         kvfree(objects);
575
576         return 0;
577 }
578
579 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
580 {
581         struct drm_i915_private *dev_priv = node_to_i915(m->private);
582         struct drm_device *dev = &dev_priv->drm;
583         struct drm_i915_gem_object *obj;
584         struct intel_engine_cs *engine;
585         enum intel_engine_id id;
586         int total = 0;
587         int ret, j;
588
589         ret = mutex_lock_interruptible(&dev->struct_mutex);
590         if (ret)
591                 return ret;
592
593         for_each_engine(engine, dev_priv, id) {
594                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
595                         int count;
596
597                         count = 0;
598                         list_for_each_entry(obj,
599                                             &engine->batch_pool.cache_list[j],
600                                             batch_pool_link)
601                                 count++;
602                         seq_printf(m, "%s cache[%d]: %d objects\n",
603                                    engine->name, j, count);
604
605                         list_for_each_entry(obj,
606                                             &engine->batch_pool.cache_list[j],
607                                             batch_pool_link) {
608                                 seq_puts(m, "   ");
609                                 describe_obj(m, obj);
610                                 seq_putc(m, '\n');
611                         }
612
613                         total += count;
614                 }
615         }
616
617         seq_printf(m, "total: %d\n", total);
618
619         mutex_unlock(&dev->struct_mutex);
620
621         return 0;
622 }
623
624 static void gen8_display_interrupt_info(struct seq_file *m)
625 {
626         struct drm_i915_private *dev_priv = node_to_i915(m->private);
627         int pipe;
628
629         for_each_pipe(dev_priv, pipe) {
630                 enum intel_display_power_domain power_domain;
631                 intel_wakeref_t wakeref;
632
633                 power_domain = POWER_DOMAIN_PIPE(pipe);
634                 wakeref = intel_display_power_get_if_enabled(dev_priv,
635                                                              power_domain);
636                 if (!wakeref) {
637                         seq_printf(m, "Pipe %c power disabled\n",
638                                    pipe_name(pipe));
639                         continue;
640                 }
641                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
642                            pipe_name(pipe),
643                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
644                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
645                            pipe_name(pipe),
646                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
647                 seq_printf(m, "Pipe %c IER:\t%08x\n",
648                            pipe_name(pipe),
649                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
650
651                 intel_display_power_put(dev_priv, power_domain, wakeref);
652         }
653
654         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
655                    I915_READ(GEN8_DE_PORT_IMR));
656         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
657                    I915_READ(GEN8_DE_PORT_IIR));
658         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
659                    I915_READ(GEN8_DE_PORT_IER));
660
661         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
662                    I915_READ(GEN8_DE_MISC_IMR));
663         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
664                    I915_READ(GEN8_DE_MISC_IIR));
665         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
666                    I915_READ(GEN8_DE_MISC_IER));
667
668         seq_printf(m, "PCU interrupt mask:\t%08x\n",
669                    I915_READ(GEN8_PCU_IMR));
670         seq_printf(m, "PCU interrupt identity:\t%08x\n",
671                    I915_READ(GEN8_PCU_IIR));
672         seq_printf(m, "PCU interrupt enable:\t%08x\n",
673                    I915_READ(GEN8_PCU_IER));
674 }
675
676 static int i915_interrupt_info(struct seq_file *m, void *data)
677 {
678         struct drm_i915_private *dev_priv = node_to_i915(m->private);
679         struct intel_engine_cs *engine;
680         enum intel_engine_id id;
681         intel_wakeref_t wakeref;
682         int i, pipe;
683
684         wakeref = intel_runtime_pm_get(dev_priv);
685
686         if (IS_CHERRYVIEW(dev_priv)) {
687                 intel_wakeref_t pref;
688
689                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
690                            I915_READ(GEN8_MASTER_IRQ));
691
692                 seq_printf(m, "Display IER:\t%08x\n",
693                            I915_READ(VLV_IER));
694                 seq_printf(m, "Display IIR:\t%08x\n",
695                            I915_READ(VLV_IIR));
696                 seq_printf(m, "Display IIR_RW:\t%08x\n",
697                            I915_READ(VLV_IIR_RW));
698                 seq_printf(m, "Display IMR:\t%08x\n",
699                            I915_READ(VLV_IMR));
700                 for_each_pipe(dev_priv, pipe) {
701                         enum intel_display_power_domain power_domain;
702
703                         power_domain = POWER_DOMAIN_PIPE(pipe);
704                         pref = intel_display_power_get_if_enabled(dev_priv,
705                                                                   power_domain);
706                         if (!pref) {
707                                 seq_printf(m, "Pipe %c power disabled\n",
708                                            pipe_name(pipe));
709                                 continue;
710                         }
711
712                         seq_printf(m, "Pipe %c stat:\t%08x\n",
713                                    pipe_name(pipe),
714                                    I915_READ(PIPESTAT(pipe)));
715
716                         intel_display_power_put(dev_priv, power_domain, pref);
717                 }
718
719                 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
720                 seq_printf(m, "Port hotplug:\t%08x\n",
721                            I915_READ(PORT_HOTPLUG_EN));
722                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
723                            I915_READ(VLV_DPFLIPSTAT));
724                 seq_printf(m, "DPINVGTT:\t%08x\n",
725                            I915_READ(DPINVGTT));
726                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
727
728                 for (i = 0; i < 4; i++) {
729                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
730                                    i, I915_READ(GEN8_GT_IMR(i)));
731                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
732                                    i, I915_READ(GEN8_GT_IIR(i)));
733                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
734                                    i, I915_READ(GEN8_GT_IER(i)));
735                 }
736
737                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
738                            I915_READ(GEN8_PCU_IMR));
739                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
740                            I915_READ(GEN8_PCU_IIR));
741                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
742                            I915_READ(GEN8_PCU_IER));
743         } else if (INTEL_GEN(dev_priv) >= 11) {
744                 seq_printf(m, "Master Interrupt Control:  %08x\n",
745                            I915_READ(GEN11_GFX_MSTR_IRQ));
746
747                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
748                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
749                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
750                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
751                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
752                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
753                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
754                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
755                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
756                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
757                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
758                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
759
760                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
761                            I915_READ(GEN11_DISPLAY_INT_CTL));
762
763                 gen8_display_interrupt_info(m);
764         } else if (INTEL_GEN(dev_priv) >= 8) {
765                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
766                            I915_READ(GEN8_MASTER_IRQ));
767
768                 for (i = 0; i < 4; i++) {
769                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
770                                    i, I915_READ(GEN8_GT_IMR(i)));
771                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
772                                    i, I915_READ(GEN8_GT_IIR(i)));
773                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
774                                    i, I915_READ(GEN8_GT_IER(i)));
775                 }
776
777                 gen8_display_interrupt_info(m);
778         } else if (IS_VALLEYVIEW(dev_priv)) {
779                 seq_printf(m, "Display IER:\t%08x\n",
780                            I915_READ(VLV_IER));
781                 seq_printf(m, "Display IIR:\t%08x\n",
782                            I915_READ(VLV_IIR));
783                 seq_printf(m, "Display IIR_RW:\t%08x\n",
784                            I915_READ(VLV_IIR_RW));
785                 seq_printf(m, "Display IMR:\t%08x\n",
786                            I915_READ(VLV_IMR));
787                 for_each_pipe(dev_priv, pipe) {
788                         enum intel_display_power_domain power_domain;
789                         intel_wakeref_t pref;
790
791                         power_domain = POWER_DOMAIN_PIPE(pipe);
792                         pref = intel_display_power_get_if_enabled(dev_priv,
793                                                                   power_domain);
794                         if (!pref) {
795                                 seq_printf(m, "Pipe %c power disabled\n",
796                                            pipe_name(pipe));
797                                 continue;
798                         }
799
800                         seq_printf(m, "Pipe %c stat:\t%08x\n",
801                                    pipe_name(pipe),
802                                    I915_READ(PIPESTAT(pipe)));
803                         intel_display_power_put(dev_priv, power_domain, pref);
804                 }
805
806                 seq_printf(m, "Master IER:\t%08x\n",
807                            I915_READ(VLV_MASTER_IER));
808
809                 seq_printf(m, "Render IER:\t%08x\n",
810                            I915_READ(GTIER));
811                 seq_printf(m, "Render IIR:\t%08x\n",
812                            I915_READ(GTIIR));
813                 seq_printf(m, "Render IMR:\t%08x\n",
814                            I915_READ(GTIMR));
815
816                 seq_printf(m, "PM IER:\t\t%08x\n",
817                            I915_READ(GEN6_PMIER));
818                 seq_printf(m, "PM IIR:\t\t%08x\n",
819                            I915_READ(GEN6_PMIIR));
820                 seq_printf(m, "PM IMR:\t\t%08x\n",
821                            I915_READ(GEN6_PMIMR));
822
823                 seq_printf(m, "Port hotplug:\t%08x\n",
824                            I915_READ(PORT_HOTPLUG_EN));
825                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
826                            I915_READ(VLV_DPFLIPSTAT));
827                 seq_printf(m, "DPINVGTT:\t%08x\n",
828                            I915_READ(DPINVGTT));
829
830         } else if (!HAS_PCH_SPLIT(dev_priv)) {
831                 seq_printf(m, "Interrupt enable:    %08x\n",
832                            I915_READ(IER));
833                 seq_printf(m, "Interrupt identity:  %08x\n",
834                            I915_READ(IIR));
835                 seq_printf(m, "Interrupt mask:      %08x\n",
836                            I915_READ(IMR));
837                 for_each_pipe(dev_priv, pipe)
838                         seq_printf(m, "Pipe %c stat:         %08x\n",
839                                    pipe_name(pipe),
840                                    I915_READ(PIPESTAT(pipe)));
841         } else {
842                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
843                            I915_READ(DEIER));
844                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
845                            I915_READ(DEIIR));
846                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
847                            I915_READ(DEIMR));
848                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
849                            I915_READ(SDEIER));
850                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
851                            I915_READ(SDEIIR));
852                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
853                            I915_READ(SDEIMR));
854                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
855                            I915_READ(GTIER));
856                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
857                            I915_READ(GTIIR));
858                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
859                            I915_READ(GTIMR));
860         }
861
862         if (INTEL_GEN(dev_priv) >= 11) {
863                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
864                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
865                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
866                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
867                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
868                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
869                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
870                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
871                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
872                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
873                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
874                            I915_READ(GEN11_GUC_SG_INTR_MASK));
875                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
876                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
877                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
878                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
879                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
880                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
881
882         } else if (INTEL_GEN(dev_priv) >= 6) {
883                 for_each_engine(engine, dev_priv, id) {
884                         seq_printf(m,
885                                    "Graphics Interrupt mask (%s):       %08x\n",
886                                    engine->name, I915_READ_IMR(engine));
887                 }
888         }
889
890         intel_runtime_pm_put(dev_priv, wakeref);
891
892         return 0;
893 }
894
895 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
896 {
897         struct drm_i915_private *dev_priv = node_to_i915(m->private);
898         struct drm_device *dev = &dev_priv->drm;
899         int i, ret;
900
901         ret = mutex_lock_interruptible(&dev->struct_mutex);
902         if (ret)
903                 return ret;
904
905         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
906         for (i = 0; i < dev_priv->num_fence_regs; i++) {
907                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
908
909                 seq_printf(m, "Fence %d, pin count = %d, object = ",
910                            i, dev_priv->fence_regs[i].pin_count);
911                 if (!vma)
912                         seq_puts(m, "unused");
913                 else
914                         describe_obj(m, vma->obj);
915                 seq_putc(m, '\n');
916         }
917
918         mutex_unlock(&dev->struct_mutex);
919         return 0;
920 }
921
922 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
923 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
924                               size_t count, loff_t *pos)
925 {
926         struct i915_gpu_state *error;
927         ssize_t ret;
928         void *buf;
929
930         error = file->private_data;
931         if (!error)
932                 return 0;
933
934         /* Bounce buffer required because of kernfs __user API convenience. */
935         buf = kmalloc(count, GFP_KERNEL);
936         if (!buf)
937                 return -ENOMEM;
938
939         ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
940         if (ret <= 0)
941                 goto out;
942
943         if (!copy_to_user(ubuf, buf, ret))
944                 *pos += ret;
945         else
946                 ret = -EFAULT;
947
948 out:
949         kfree(buf);
950         return ret;
951 }
952
953 static int gpu_state_release(struct inode *inode, struct file *file)
954 {
955         i915_gpu_state_put(file->private_data);
956         return 0;
957 }
958
959 static int i915_gpu_info_open(struct inode *inode, struct file *file)
960 {
961         struct drm_i915_private *i915 = inode->i_private;
962         struct i915_gpu_state *gpu;
963         intel_wakeref_t wakeref;
964
965         gpu = NULL;
966         with_intel_runtime_pm(i915, wakeref)
967                 gpu = i915_capture_gpu_state(i915);
968         if (IS_ERR(gpu))
969                 return PTR_ERR(gpu);
970
971         file->private_data = gpu;
972         return 0;
973 }
974
975 static const struct file_operations i915_gpu_info_fops = {
976         .owner = THIS_MODULE,
977         .open = i915_gpu_info_open,
978         .read = gpu_state_read,
979         .llseek = default_llseek,
980         .release = gpu_state_release,
981 };
982
983 static ssize_t
984 i915_error_state_write(struct file *filp,
985                        const char __user *ubuf,
986                        size_t cnt,
987                        loff_t *ppos)
988 {
989         struct i915_gpu_state *error = filp->private_data;
990
991         if (!error)
992                 return 0;
993
994         DRM_DEBUG_DRIVER("Resetting error state\n");
995         i915_reset_error_state(error->i915);
996
997         return cnt;
998 }
999
1000 static int i915_error_state_open(struct inode *inode, struct file *file)
1001 {
1002         struct i915_gpu_state *error;
1003
1004         error = i915_first_error_state(inode->i_private);
1005         if (IS_ERR(error))
1006                 return PTR_ERR(error);
1007
1008         file->private_data  = error;
1009         return 0;
1010 }
1011
1012 static const struct file_operations i915_error_state_fops = {
1013         .owner = THIS_MODULE,
1014         .open = i915_error_state_open,
1015         .read = gpu_state_read,
1016         .write = i915_error_state_write,
1017         .llseek = default_llseek,
1018         .release = gpu_state_release,
1019 };
1020 #endif
1021
1022 static int i915_frequency_info(struct seq_file *m, void *unused)
1023 {
1024         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1025         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1026         intel_wakeref_t wakeref;
1027         int ret = 0;
1028
1029         wakeref = intel_runtime_pm_get(dev_priv);
1030
1031         if (IS_GEN(dev_priv, 5)) {
1032                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1033                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1034
1035                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1036                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1037                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1038                            MEMSTAT_VID_SHIFT);
1039                 seq_printf(m, "Current P-state: %d\n",
1040                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1041         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1042                 u32 rpmodectl, freq_sts;
1043
1044                 mutex_lock(&dev_priv->pcu_lock);
1045
1046                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1047                 seq_printf(m, "Video Turbo Mode: %s\n",
1048                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1049                 seq_printf(m, "HW control enabled: %s\n",
1050                            yesno(rpmodectl & GEN6_RP_ENABLE));
1051                 seq_printf(m, "SW control enabled: %s\n",
1052                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1053                                   GEN6_RP_MEDIA_SW_MODE));
1054
1055                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1056                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1057                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1058
1059                 seq_printf(m, "actual GPU freq: %d MHz\n",
1060                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1061
1062                 seq_printf(m, "current GPU freq: %d MHz\n",
1063                            intel_gpu_freq(dev_priv, rps->cur_freq));
1064
1065                 seq_printf(m, "max GPU freq: %d MHz\n",
1066                            intel_gpu_freq(dev_priv, rps->max_freq));
1067
1068                 seq_printf(m, "min GPU freq: %d MHz\n",
1069                            intel_gpu_freq(dev_priv, rps->min_freq));
1070
1071                 seq_printf(m, "idle GPU freq: %d MHz\n",
1072                            intel_gpu_freq(dev_priv, rps->idle_freq));
1073
1074                 seq_printf(m,
1075                            "efficient (RPe) frequency: %d MHz\n",
1076                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1077                 mutex_unlock(&dev_priv->pcu_lock);
1078         } else if (INTEL_GEN(dev_priv) >= 6) {
1079                 u32 rp_state_limits;
1080                 u32 gt_perf_status;
1081                 u32 rp_state_cap;
1082                 u32 rpmodectl, rpinclimit, rpdeclimit;
1083                 u32 rpstat, cagf, reqf;
1084                 u32 rpupei, rpcurup, rpprevup;
1085                 u32 rpdownei, rpcurdown, rpprevdown;
1086                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1087                 int max_freq;
1088
1089                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1090                 if (IS_GEN9_LP(dev_priv)) {
1091                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1092                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1093                 } else {
1094                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1095                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1096                 }
1097
1098                 /* RPSTAT1 is in the GT power well */
1099                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1100
1101                 reqf = I915_READ(GEN6_RPNSWREQ);
1102                 if (INTEL_GEN(dev_priv) >= 9)
1103                         reqf >>= 23;
1104                 else {
1105                         reqf &= ~GEN6_TURBO_DISABLE;
1106                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1107                                 reqf >>= 24;
1108                         else
1109                                 reqf >>= 25;
1110                 }
1111                 reqf = intel_gpu_freq(dev_priv, reqf);
1112
1113                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1114                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1115                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1116
1117                 rpstat = I915_READ(GEN6_RPSTAT1);
1118                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1119                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1120                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1121                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1122                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1123                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1124                 cagf = intel_gpu_freq(dev_priv,
1125                                       intel_get_cagf(dev_priv, rpstat));
1126
1127                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1128
1129                 if (INTEL_GEN(dev_priv) >= 11) {
1130                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1131                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1132                         /*
1133                          * The equivalent to the PM ISR & IIR cannot be read
1134                          * without affecting the current state of the system
1135                          */
1136                         pm_isr = 0;
1137                         pm_iir = 0;
1138                 } else if (INTEL_GEN(dev_priv) >= 8) {
1139                         pm_ier = I915_READ(GEN8_GT_IER(2));
1140                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1141                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1142                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1143                 } else {
1144                         pm_ier = I915_READ(GEN6_PMIER);
1145                         pm_imr = I915_READ(GEN6_PMIMR);
1146                         pm_isr = I915_READ(GEN6_PMISR);
1147                         pm_iir = I915_READ(GEN6_PMIIR);
1148                 }
1149                 pm_mask = I915_READ(GEN6_PMINTRMSK);
1150
1151                 seq_printf(m, "Video Turbo Mode: %s\n",
1152                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1153                 seq_printf(m, "HW control enabled: %s\n",
1154                            yesno(rpmodectl & GEN6_RP_ENABLE));
1155                 seq_printf(m, "SW control enabled: %s\n",
1156                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1157                                   GEN6_RP_MEDIA_SW_MODE));
1158
1159                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1160                            pm_ier, pm_imr, pm_mask);
1161                 if (INTEL_GEN(dev_priv) <= 10)
1162                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1163                                    pm_isr, pm_iir);
1164                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1165                            rps->pm_intrmsk_mbz);
1166                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1167                 seq_printf(m, "Render p-state ratio: %d\n",
1168                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1169                 seq_printf(m, "Render p-state VID: %d\n",
1170                            gt_perf_status & 0xff);
1171                 seq_printf(m, "Render p-state limit: %d\n",
1172                            rp_state_limits & 0xff);
1173                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1174                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1175                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1176                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1177                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1178                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1179                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1180                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1181                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1182                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1183                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1184                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1185                 seq_printf(m, "Up threshold: %d%%\n",
1186                            rps->power.up_threshold);
1187
1188                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1189                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1190                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1191                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1192                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1193                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1194                 seq_printf(m, "Down threshold: %d%%\n",
1195                            rps->power.down_threshold);
1196
1197                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1198                             rp_state_cap >> 16) & 0xff;
1199                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1200                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1201                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1202                            intel_gpu_freq(dev_priv, max_freq));
1203
1204                 max_freq = (rp_state_cap & 0xff00) >> 8;
1205                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1206                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1207                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1208                            intel_gpu_freq(dev_priv, max_freq));
1209
1210                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1211                             rp_state_cap >> 0) & 0xff;
1212                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1213                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1214                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1215                            intel_gpu_freq(dev_priv, max_freq));
1216                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1217                            intel_gpu_freq(dev_priv, rps->max_freq));
1218
1219                 seq_printf(m, "Current freq: %d MHz\n",
1220                            intel_gpu_freq(dev_priv, rps->cur_freq));
1221                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1222                 seq_printf(m, "Idle freq: %d MHz\n",
1223                            intel_gpu_freq(dev_priv, rps->idle_freq));
1224                 seq_printf(m, "Min freq: %d MHz\n",
1225                            intel_gpu_freq(dev_priv, rps->min_freq));
1226                 seq_printf(m, "Boost freq: %d MHz\n",
1227                            intel_gpu_freq(dev_priv, rps->boost_freq));
1228                 seq_printf(m, "Max freq: %d MHz\n",
1229                            intel_gpu_freq(dev_priv, rps->max_freq));
1230                 seq_printf(m,
1231                            "efficient (RPe) frequency: %d MHz\n",
1232                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1233         } else {
1234                 seq_puts(m, "no P-state info available\n");
1235         }
1236
1237         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1238         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1239         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1240
1241         intel_runtime_pm_put(dev_priv, wakeref);
1242         return ret;
1243 }
1244
1245 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1246                                struct seq_file *m,
1247                                struct intel_instdone *instdone)
1248 {
1249         int slice;
1250         int subslice;
1251
1252         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1253                    instdone->instdone);
1254
1255         if (INTEL_GEN(dev_priv) <= 3)
1256                 return;
1257
1258         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1259                    instdone->slice_common);
1260
1261         if (INTEL_GEN(dev_priv) <= 6)
1262                 return;
1263
1264         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1265                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1266                            slice, subslice, instdone->sampler[slice][subslice]);
1267
1268         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1269                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1270                            slice, subslice, instdone->row[slice][subslice]);
1271 }
1272
1273 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1274 {
1275         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1276         struct intel_engine_cs *engine;
1277         u64 acthd[I915_NUM_ENGINES];
1278         u32 seqno[I915_NUM_ENGINES];
1279         struct intel_instdone instdone;
1280         intel_wakeref_t wakeref;
1281         enum intel_engine_id id;
1282
1283         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1284                 seq_puts(m, "Wedged\n");
1285         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1286                 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1287         if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1288                 seq_puts(m, "Waiter holding struct mutex\n");
1289         if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1290                 seq_puts(m, "struct_mutex blocked for reset\n");
1291
1292         if (!i915_modparams.enable_hangcheck) {
1293                 seq_puts(m, "Hangcheck disabled\n");
1294                 return 0;
1295         }
1296
1297         with_intel_runtime_pm(dev_priv, wakeref) {
1298                 for_each_engine(engine, dev_priv, id) {
1299                         acthd[id] = intel_engine_get_active_head(engine);
1300                         seqno[id] = intel_engine_get_seqno(engine);
1301                 }
1302
1303                 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1304         }
1305
1306         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1307                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1308                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1309                                             jiffies));
1310         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1311                 seq_puts(m, "Hangcheck active, work pending\n");
1312         else
1313                 seq_puts(m, "Hangcheck inactive\n");
1314
1315         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1316
1317         for_each_engine(engine, dev_priv, id) {
1318                 seq_printf(m, "%s:\n", engine->name);
1319                 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
1320                            engine->hangcheck.seqno, seqno[id],
1321                            intel_engine_last_submit(engine),
1322                            jiffies_to_msecs(jiffies -
1323                                             engine->hangcheck.action_timestamp));
1324                 seq_printf(m, "\tfake irq active? %s\n",
1325                            yesno(test_bit(engine->id,
1326                                           &dev_priv->gpu_error.missed_irq_rings)));
1327
1328                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1329                            (long long)engine->hangcheck.acthd,
1330                            (long long)acthd[id]);
1331
1332                 if (engine->id == RCS) {
1333                         seq_puts(m, "\tinstdone read =\n");
1334
1335                         i915_instdone_info(dev_priv, m, &instdone);
1336
1337                         seq_puts(m, "\tinstdone accu =\n");
1338
1339                         i915_instdone_info(dev_priv, m,
1340                                            &engine->hangcheck.instdone);
1341                 }
1342         }
1343
1344         return 0;
1345 }
1346
1347 static int i915_reset_info(struct seq_file *m, void *unused)
1348 {
1349         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1350         struct i915_gpu_error *error = &dev_priv->gpu_error;
1351         struct intel_engine_cs *engine;
1352         enum intel_engine_id id;
1353
1354         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1355
1356         for_each_engine(engine, dev_priv, id) {
1357                 seq_printf(m, "%s = %u\n", engine->name,
1358                            i915_reset_engine_count(error, engine));
1359         }
1360
1361         return 0;
1362 }
1363
1364 static int ironlake_drpc_info(struct seq_file *m)
1365 {
1366         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1367         u32 rgvmodectl, rstdbyctl;
1368         u16 crstandvid;
1369
1370         rgvmodectl = I915_READ(MEMMODECTL);
1371         rstdbyctl = I915_READ(RSTDBYCTL);
1372         crstandvid = I915_READ16(CRSTANDVID);
1373
1374         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1375         seq_printf(m, "Boost freq: %d\n",
1376                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1377                    MEMMODE_BOOST_FREQ_SHIFT);
1378         seq_printf(m, "HW control enabled: %s\n",
1379                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1380         seq_printf(m, "SW control enabled: %s\n",
1381                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1382         seq_printf(m, "Gated voltage change: %s\n",
1383                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1384         seq_printf(m, "Starting frequency: P%d\n",
1385                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1386         seq_printf(m, "Max P-state: P%d\n",
1387                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1388         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1389         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1390         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1391         seq_printf(m, "Render standby enabled: %s\n",
1392                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1393         seq_puts(m, "Current RS state: ");
1394         switch (rstdbyctl & RSX_STATUS_MASK) {
1395         case RSX_STATUS_ON:
1396                 seq_puts(m, "on\n");
1397                 break;
1398         case RSX_STATUS_RC1:
1399                 seq_puts(m, "RC1\n");
1400                 break;
1401         case RSX_STATUS_RC1E:
1402                 seq_puts(m, "RC1E\n");
1403                 break;
1404         case RSX_STATUS_RS1:
1405                 seq_puts(m, "RS1\n");
1406                 break;
1407         case RSX_STATUS_RS2:
1408                 seq_puts(m, "RS2 (RC6)\n");
1409                 break;
1410         case RSX_STATUS_RS3:
1411                 seq_puts(m, "RC3 (RC6+)\n");
1412                 break;
1413         default:
1414                 seq_puts(m, "unknown\n");
1415                 break;
1416         }
1417
1418         return 0;
1419 }
1420
1421 static int i915_forcewake_domains(struct seq_file *m, void *data)
1422 {
1423         struct drm_i915_private *i915 = node_to_i915(m->private);
1424         struct intel_uncore_forcewake_domain *fw_domain;
1425         unsigned int tmp;
1426
1427         seq_printf(m, "user.bypass_count = %u\n",
1428                    i915->uncore.user_forcewake.count);
1429
1430         for_each_fw_domain(fw_domain, i915, tmp)
1431                 seq_printf(m, "%s.wake_count = %u\n",
1432                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1433                            READ_ONCE(fw_domain->wake_count));
1434
1435         return 0;
1436 }
1437
1438 static void print_rc6_res(struct seq_file *m,
1439                           const char *title,
1440                           const i915_reg_t reg)
1441 {
1442         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1443
1444         seq_printf(m, "%s %u (%llu us)\n",
1445                    title, I915_READ(reg),
1446                    intel_rc6_residency_us(dev_priv, reg));
1447 }
1448
1449 static int vlv_drpc_info(struct seq_file *m)
1450 {
1451         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1452         u32 rcctl1, pw_status;
1453
1454         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1455         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1456
1457         seq_printf(m, "RC6 Enabled: %s\n",
1458                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1459                                         GEN6_RC_CTL_EI_MODE(1))));
1460         seq_printf(m, "Render Power Well: %s\n",
1461                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1462         seq_printf(m, "Media Power Well: %s\n",
1463                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1464
1465         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1466         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1467
1468         return i915_forcewake_domains(m, NULL);
1469 }
1470
1471 static int gen6_drpc_info(struct seq_file *m)
1472 {
1473         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1474         u32 gt_core_status, rcctl1, rc6vids = 0;
1475         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1476
1477         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1478         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1479
1480         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1481         if (INTEL_GEN(dev_priv) >= 9) {
1482                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1483                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1484         }
1485
1486         if (INTEL_GEN(dev_priv) <= 7) {
1487                 mutex_lock(&dev_priv->pcu_lock);
1488                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1489                                        &rc6vids);
1490                 mutex_unlock(&dev_priv->pcu_lock);
1491         }
1492
1493         seq_printf(m, "RC1e Enabled: %s\n",
1494                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1495         seq_printf(m, "RC6 Enabled: %s\n",
1496                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1497         if (INTEL_GEN(dev_priv) >= 9) {
1498                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1499                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1500                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1501                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1502         }
1503         seq_printf(m, "Deep RC6 Enabled: %s\n",
1504                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1505         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1506                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1507         seq_puts(m, "Current RC state: ");
1508         switch (gt_core_status & GEN6_RCn_MASK) {
1509         case GEN6_RC0:
1510                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1511                         seq_puts(m, "Core Power Down\n");
1512                 else
1513                         seq_puts(m, "on\n");
1514                 break;
1515         case GEN6_RC3:
1516                 seq_puts(m, "RC3\n");
1517                 break;
1518         case GEN6_RC6:
1519                 seq_puts(m, "RC6\n");
1520                 break;
1521         case GEN6_RC7:
1522                 seq_puts(m, "RC7\n");
1523                 break;
1524         default:
1525                 seq_puts(m, "Unknown\n");
1526                 break;
1527         }
1528
1529         seq_printf(m, "Core Power Down: %s\n",
1530                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1531         if (INTEL_GEN(dev_priv) >= 9) {
1532                 seq_printf(m, "Render Power Well: %s\n",
1533                         (gen9_powergate_status &
1534                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1535                 seq_printf(m, "Media Power Well: %s\n",
1536                         (gen9_powergate_status &
1537                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1538         }
1539
1540         /* Not exactly sure what this is */
1541         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1542                       GEN6_GT_GFX_RC6_LOCKED);
1543         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1544         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1545         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1546
1547         if (INTEL_GEN(dev_priv) <= 7) {
1548                 seq_printf(m, "RC6   voltage: %dmV\n",
1549                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1550                 seq_printf(m, "RC6+  voltage: %dmV\n",
1551                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1552                 seq_printf(m, "RC6++ voltage: %dmV\n",
1553                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1554         }
1555
1556         return i915_forcewake_domains(m, NULL);
1557 }
1558
1559 static int i915_drpc_info(struct seq_file *m, void *unused)
1560 {
1561         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1562         intel_wakeref_t wakeref;
1563         int err = -ENODEV;
1564
1565         with_intel_runtime_pm(dev_priv, wakeref) {
1566                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1567                         err = vlv_drpc_info(m);
1568                 else if (INTEL_GEN(dev_priv) >= 6)
1569                         err = gen6_drpc_info(m);
1570                 else
1571                         err = ironlake_drpc_info(m);
1572         }
1573
1574         return err;
1575 }
1576
1577 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1578 {
1579         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1580
1581         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1582                    dev_priv->fb_tracking.busy_bits);
1583
1584         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1585                    dev_priv->fb_tracking.flip_bits);
1586
1587         return 0;
1588 }
1589
1590 static int i915_fbc_status(struct seq_file *m, void *unused)
1591 {
1592         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1593         struct intel_fbc *fbc = &dev_priv->fbc;
1594         intel_wakeref_t wakeref;
1595
1596         if (!HAS_FBC(dev_priv))
1597                 return -ENODEV;
1598
1599         wakeref = intel_runtime_pm_get(dev_priv);
1600         mutex_lock(&fbc->lock);
1601
1602         if (intel_fbc_is_active(dev_priv))
1603                 seq_puts(m, "FBC enabled\n");
1604         else
1605                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1606
1607         if (intel_fbc_is_active(dev_priv)) {
1608                 u32 mask;
1609
1610                 if (INTEL_GEN(dev_priv) >= 8)
1611                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1612                 else if (INTEL_GEN(dev_priv) >= 7)
1613                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1614                 else if (INTEL_GEN(dev_priv) >= 5)
1615                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1616                 else if (IS_G4X(dev_priv))
1617                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1618                 else
1619                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1620                                                         FBC_STAT_COMPRESSED);
1621
1622                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1623         }
1624
1625         mutex_unlock(&fbc->lock);
1626         intel_runtime_pm_put(dev_priv, wakeref);
1627
1628         return 0;
1629 }
1630
1631 static int i915_fbc_false_color_get(void *data, u64 *val)
1632 {
1633         struct drm_i915_private *dev_priv = data;
1634
1635         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1636                 return -ENODEV;
1637
1638         *val = dev_priv->fbc.false_color;
1639
1640         return 0;
1641 }
1642
1643 static int i915_fbc_false_color_set(void *data, u64 val)
1644 {
1645         struct drm_i915_private *dev_priv = data;
1646         u32 reg;
1647
1648         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1649                 return -ENODEV;
1650
1651         mutex_lock(&dev_priv->fbc.lock);
1652
1653         reg = I915_READ(ILK_DPFC_CONTROL);
1654         dev_priv->fbc.false_color = val;
1655
1656         I915_WRITE(ILK_DPFC_CONTROL, val ?
1657                    (reg | FBC_CTL_FALSE_COLOR) :
1658                    (reg & ~FBC_CTL_FALSE_COLOR));
1659
1660         mutex_unlock(&dev_priv->fbc.lock);
1661         return 0;
1662 }
1663
1664 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1665                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1666                         "%llu\n");
1667
1668 static int i915_ips_status(struct seq_file *m, void *unused)
1669 {
1670         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1671         intel_wakeref_t wakeref;
1672
1673         if (!HAS_IPS(dev_priv))
1674                 return -ENODEV;
1675
1676         wakeref = intel_runtime_pm_get(dev_priv);
1677
1678         seq_printf(m, "Enabled by kernel parameter: %s\n",
1679                    yesno(i915_modparams.enable_ips));
1680
1681         if (INTEL_GEN(dev_priv) >= 8) {
1682                 seq_puts(m, "Currently: unknown\n");
1683         } else {
1684                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1685                         seq_puts(m, "Currently: enabled\n");
1686                 else
1687                         seq_puts(m, "Currently: disabled\n");
1688         }
1689
1690         intel_runtime_pm_put(dev_priv, wakeref);
1691
1692         return 0;
1693 }
1694
1695 static int i915_sr_status(struct seq_file *m, void *unused)
1696 {
1697         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1698         intel_wakeref_t wakeref;
1699         bool sr_enabled = false;
1700
1701         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1702
1703         if (INTEL_GEN(dev_priv) >= 9)
1704                 /* no global SR status; inspect per-plane WM */;
1705         else if (HAS_PCH_SPLIT(dev_priv))
1706                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1707         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1708                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1709                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1710         else if (IS_I915GM(dev_priv))
1711                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1712         else if (IS_PINEVIEW(dev_priv))
1713                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1714         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1715                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1716
1717         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1718
1719         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1720
1721         return 0;
1722 }
1723
1724 static int i915_emon_status(struct seq_file *m, void *unused)
1725 {
1726         struct drm_i915_private *i915 = node_to_i915(m->private);
1727         intel_wakeref_t wakeref;
1728
1729         if (!IS_GEN(i915, 5))
1730                 return -ENODEV;
1731
1732         with_intel_runtime_pm(i915, wakeref) {
1733                 unsigned long temp, chipset, gfx;
1734
1735                 temp = i915_mch_val(i915);
1736                 chipset = i915_chipset_val(i915);
1737                 gfx = i915_gfx_val(i915);
1738
1739                 seq_printf(m, "GMCH temp: %ld\n", temp);
1740                 seq_printf(m, "Chipset power: %ld\n", chipset);
1741                 seq_printf(m, "GFX power: %ld\n", gfx);
1742                 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1743         }
1744
1745         return 0;
1746 }
1747
1748 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1749 {
1750         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1751         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1752         unsigned int max_gpu_freq, min_gpu_freq;
1753         intel_wakeref_t wakeref;
1754         int gpu_freq, ia_freq;
1755         int ret;
1756
1757         if (!HAS_LLC(dev_priv))
1758                 return -ENODEV;
1759
1760         wakeref = intel_runtime_pm_get(dev_priv);
1761
1762         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1763         if (ret)
1764                 goto out;
1765
1766         min_gpu_freq = rps->min_freq;
1767         max_gpu_freq = rps->max_freq;
1768         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1769                 /* Convert GT frequency to 50 HZ units */
1770                 min_gpu_freq /= GEN9_FREQ_SCALER;
1771                 max_gpu_freq /= GEN9_FREQ_SCALER;
1772         }
1773
1774         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1775
1776         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1777                 ia_freq = gpu_freq;
1778                 sandybridge_pcode_read(dev_priv,
1779                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1780                                        &ia_freq);
1781                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1782                            intel_gpu_freq(dev_priv, (gpu_freq *
1783                                                      (IS_GEN9_BC(dev_priv) ||
1784                                                       INTEL_GEN(dev_priv) >= 10 ?
1785                                                       GEN9_FREQ_SCALER : 1))),
1786                            ((ia_freq >> 0) & 0xff) * 100,
1787                            ((ia_freq >> 8) & 0xff) * 100);
1788         }
1789
1790         mutex_unlock(&dev_priv->pcu_lock);
1791
1792 out:
1793         intel_runtime_pm_put(dev_priv, wakeref);
1794         return ret;
1795 }
1796
1797 static int i915_opregion(struct seq_file *m, void *unused)
1798 {
1799         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1800         struct drm_device *dev = &dev_priv->drm;
1801         struct intel_opregion *opregion = &dev_priv->opregion;
1802         int ret;
1803
1804         ret = mutex_lock_interruptible(&dev->struct_mutex);
1805         if (ret)
1806                 goto out;
1807
1808         if (opregion->header)
1809                 seq_write(m, opregion->header, OPREGION_SIZE);
1810
1811         mutex_unlock(&dev->struct_mutex);
1812
1813 out:
1814         return 0;
1815 }
1816
1817 static int i915_vbt(struct seq_file *m, void *unused)
1818 {
1819         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1820
1821         if (opregion->vbt)
1822                 seq_write(m, opregion->vbt, opregion->vbt_size);
1823
1824         return 0;
1825 }
1826
1827 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1828 {
1829         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1830         struct drm_device *dev = &dev_priv->drm;
1831         struct intel_framebuffer *fbdev_fb = NULL;
1832         struct drm_framebuffer *drm_fb;
1833         int ret;
1834
1835         ret = mutex_lock_interruptible(&dev->struct_mutex);
1836         if (ret)
1837                 return ret;
1838
1839 #ifdef CONFIG_DRM_FBDEV_EMULATION
1840         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1841                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1842
1843                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1844                            fbdev_fb->base.width,
1845                            fbdev_fb->base.height,
1846                            fbdev_fb->base.format->depth,
1847                            fbdev_fb->base.format->cpp[0] * 8,
1848                            fbdev_fb->base.modifier,
1849                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1850                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1851                 seq_putc(m, '\n');
1852         }
1853 #endif
1854
1855         mutex_lock(&dev->mode_config.fb_lock);
1856         drm_for_each_fb(drm_fb, dev) {
1857                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1858                 if (fb == fbdev_fb)
1859                         continue;
1860
1861                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1862                            fb->base.width,
1863                            fb->base.height,
1864                            fb->base.format->depth,
1865                            fb->base.format->cpp[0] * 8,
1866                            fb->base.modifier,
1867                            drm_framebuffer_read_refcount(&fb->base));
1868                 describe_obj(m, intel_fb_obj(&fb->base));
1869                 seq_putc(m, '\n');
1870         }
1871         mutex_unlock(&dev->mode_config.fb_lock);
1872         mutex_unlock(&dev->struct_mutex);
1873
1874         return 0;
1875 }
1876
1877 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1878 {
1879         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1880                    ring->space, ring->head, ring->tail, ring->emit);
1881 }
1882
1883 static int i915_context_status(struct seq_file *m, void *unused)
1884 {
1885         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1886         struct drm_device *dev = &dev_priv->drm;
1887         struct intel_engine_cs *engine;
1888         struct i915_gem_context *ctx;
1889         enum intel_engine_id id;
1890         int ret;
1891
1892         ret = mutex_lock_interruptible(&dev->struct_mutex);
1893         if (ret)
1894                 return ret;
1895
1896         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1897                 seq_puts(m, "HW context ");
1898                 if (!list_empty(&ctx->hw_id_link))
1899                         seq_printf(m, "%x [pin %u]", ctx->hw_id,
1900                                    atomic_read(&ctx->hw_id_pin_count));
1901                 if (ctx->pid) {
1902                         struct task_struct *task;
1903
1904                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1905                         if (task) {
1906                                 seq_printf(m, "(%s [%d]) ",
1907                                            task->comm, task->pid);
1908                                 put_task_struct(task);
1909                         }
1910                 } else if (IS_ERR(ctx->file_priv)) {
1911                         seq_puts(m, "(deleted) ");
1912                 } else {
1913                         seq_puts(m, "(kernel) ");
1914                 }
1915
1916                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1917                 seq_putc(m, '\n');
1918
1919                 for_each_engine(engine, dev_priv, id) {
1920                         struct intel_context *ce =
1921                                 to_intel_context(ctx, engine);
1922
1923                         seq_printf(m, "%s: ", engine->name);
1924                         if (ce->state)
1925                                 describe_obj(m, ce->state->obj);
1926                         if (ce->ring)
1927                                 describe_ctx_ring(m, ce->ring);
1928                         seq_putc(m, '\n');
1929                 }
1930
1931                 seq_putc(m, '\n');
1932         }
1933
1934         mutex_unlock(&dev->struct_mutex);
1935
1936         return 0;
1937 }
1938
1939 static const char *swizzle_string(unsigned swizzle)
1940 {
1941         switch (swizzle) {
1942         case I915_BIT_6_SWIZZLE_NONE:
1943                 return "none";
1944         case I915_BIT_6_SWIZZLE_9:
1945                 return "bit9";
1946         case I915_BIT_6_SWIZZLE_9_10:
1947                 return "bit9/bit10";
1948         case I915_BIT_6_SWIZZLE_9_11:
1949                 return "bit9/bit11";
1950         case I915_BIT_6_SWIZZLE_9_10_11:
1951                 return "bit9/bit10/bit11";
1952         case I915_BIT_6_SWIZZLE_9_17:
1953                 return "bit9/bit17";
1954         case I915_BIT_6_SWIZZLE_9_10_17:
1955                 return "bit9/bit10/bit17";
1956         case I915_BIT_6_SWIZZLE_UNKNOWN:
1957                 return "unknown";
1958         }
1959
1960         return "bug";
1961 }
1962
1963 static int i915_swizzle_info(struct seq_file *m, void *data)
1964 {
1965         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1966         intel_wakeref_t wakeref;
1967
1968         wakeref = intel_runtime_pm_get(dev_priv);
1969
1970         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1971                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1972         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1973                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1974
1975         if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1976                 seq_printf(m, "DDC = 0x%08x\n",
1977                            I915_READ(DCC));
1978                 seq_printf(m, "DDC2 = 0x%08x\n",
1979                            I915_READ(DCC2));
1980                 seq_printf(m, "C0DRB3 = 0x%04x\n",
1981                            I915_READ16(C0DRB3));
1982                 seq_printf(m, "C1DRB3 = 0x%04x\n",
1983                            I915_READ16(C1DRB3));
1984         } else if (INTEL_GEN(dev_priv) >= 6) {
1985                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1986                            I915_READ(MAD_DIMM_C0));
1987                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1988                            I915_READ(MAD_DIMM_C1));
1989                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1990                            I915_READ(MAD_DIMM_C2));
1991                 seq_printf(m, "TILECTL = 0x%08x\n",
1992                            I915_READ(TILECTL));
1993                 if (INTEL_GEN(dev_priv) >= 8)
1994                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1995                                    I915_READ(GAMTARBMODE));
1996                 else
1997                         seq_printf(m, "ARB_MODE = 0x%08x\n",
1998                                    I915_READ(ARB_MODE));
1999                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2000                            I915_READ(DISP_ARB_CTL));
2001         }
2002
2003         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2004                 seq_puts(m, "L-shaped memory detected\n");
2005
2006         intel_runtime_pm_put(dev_priv, wakeref);
2007
2008         return 0;
2009 }
2010
2011 static const char *rps_power_to_str(unsigned int power)
2012 {
2013         static const char * const strings[] = {
2014                 [LOW_POWER] = "low power",
2015                 [BETWEEN] = "mixed",
2016                 [HIGH_POWER] = "high power",
2017         };
2018
2019         if (power >= ARRAY_SIZE(strings) || !strings[power])
2020                 return "unknown";
2021
2022         return strings[power];
2023 }
2024
2025 static int i915_rps_boost_info(struct seq_file *m, void *data)
2026 {
2027         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2028         struct drm_device *dev = &dev_priv->drm;
2029         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2030         u32 act_freq = rps->cur_freq;
2031         intel_wakeref_t wakeref;
2032         struct drm_file *file;
2033
2034         with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
2035                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2036                         mutex_lock(&dev_priv->pcu_lock);
2037                         act_freq = vlv_punit_read(dev_priv,
2038                                                   PUNIT_REG_GPU_FREQ_STS);
2039                         act_freq = (act_freq >> 8) & 0xff;
2040                         mutex_unlock(&dev_priv->pcu_lock);
2041                 } else {
2042                         act_freq = intel_get_cagf(dev_priv,
2043                                                   I915_READ(GEN6_RPSTAT1));
2044                 }
2045         }
2046
2047         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2048         seq_printf(m, "GPU busy? %s [%d requests]\n",
2049                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2050         seq_printf(m, "Boosts outstanding? %d\n",
2051                    atomic_read(&rps->num_waiters));
2052         seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2053         seq_printf(m, "Frequency requested %d, actual %d\n",
2054                    intel_gpu_freq(dev_priv, rps->cur_freq),
2055                    intel_gpu_freq(dev_priv, act_freq));
2056         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2057                    intel_gpu_freq(dev_priv, rps->min_freq),
2058                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2059                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2060                    intel_gpu_freq(dev_priv, rps->max_freq));
2061         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2062                    intel_gpu_freq(dev_priv, rps->idle_freq),
2063                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2064                    intel_gpu_freq(dev_priv, rps->boost_freq));
2065
2066         mutex_lock(&dev->filelist_mutex);
2067         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2068                 struct drm_i915_file_private *file_priv = file->driver_priv;
2069                 struct task_struct *task;
2070
2071                 rcu_read_lock();
2072                 task = pid_task(file->pid, PIDTYPE_PID);
2073                 seq_printf(m, "%s [%d]: %d boosts\n",
2074                            task ? task->comm : "<unknown>",
2075                            task ? task->pid : -1,
2076                            atomic_read(&file_priv->rps_client.boosts));
2077                 rcu_read_unlock();
2078         }
2079         seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2080                    atomic_read(&rps->boosts));
2081         mutex_unlock(&dev->filelist_mutex);
2082
2083         if (INTEL_GEN(dev_priv) >= 6 &&
2084             rps->enabled &&
2085             dev_priv->gt.active_requests) {
2086                 u32 rpup, rpupei;
2087                 u32 rpdown, rpdownei;
2088
2089                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2090                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2091                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2092                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2093                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2094                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2095
2096                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2097                            rps_power_to_str(rps->power.mode));
2098                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2099                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2100                            rps->power.up_threshold);
2101                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2102                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2103                            rps->power.down_threshold);
2104         } else {
2105                 seq_puts(m, "\nRPS Autotuning inactive\n");
2106         }
2107
2108         return 0;
2109 }
2110
2111 static int i915_llc(struct seq_file *m, void *data)
2112 {
2113         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2114         const bool edram = INTEL_GEN(dev_priv) > 8;
2115
2116         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2117         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2118                    intel_uncore_edram_size(dev_priv)/1024/1024);
2119
2120         return 0;
2121 }
2122
2123 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2124 {
2125         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2126         intel_wakeref_t wakeref;
2127         struct drm_printer p;
2128
2129         if (!HAS_HUC(dev_priv))
2130                 return -ENODEV;
2131
2132         p = drm_seq_file_printer(m);
2133         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2134
2135         with_intel_runtime_pm(dev_priv, wakeref)
2136                 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2137
2138         return 0;
2139 }
2140
2141 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2142 {
2143         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2144         intel_wakeref_t wakeref;
2145         struct drm_printer p;
2146
2147         if (!HAS_GUC(dev_priv))
2148                 return -ENODEV;
2149
2150         p = drm_seq_file_printer(m);
2151         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2152
2153         with_intel_runtime_pm(dev_priv, wakeref) {
2154                 u32 tmp = I915_READ(GUC_STATUS);
2155                 u32 i;
2156
2157                 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2158                 seq_printf(m, "\tBootrom status = 0x%x\n",
2159                            (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2160                 seq_printf(m, "\tuKernel status = 0x%x\n",
2161                            (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2162                 seq_printf(m, "\tMIA Core status = 0x%x\n",
2163                            (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2164                 seq_puts(m, "\nScratch registers:\n");
2165                 for (i = 0; i < 16; i++) {
2166                         seq_printf(m, "\t%2d: \t0x%x\n",
2167                                    i, I915_READ(SOFT_SCRATCH(i)));
2168                 }
2169         }
2170
2171         return 0;
2172 }
2173
2174 static const char *
2175 stringify_guc_log_type(enum guc_log_buffer_type type)
2176 {
2177         switch (type) {
2178         case GUC_ISR_LOG_BUFFER:
2179                 return "ISR";
2180         case GUC_DPC_LOG_BUFFER:
2181                 return "DPC";
2182         case GUC_CRASH_DUMP_LOG_BUFFER:
2183                 return "CRASH";
2184         default:
2185                 MISSING_CASE(type);
2186         }
2187
2188         return "";
2189 }
2190
2191 static void i915_guc_log_info(struct seq_file *m,
2192                               struct drm_i915_private *dev_priv)
2193 {
2194         struct intel_guc_log *log = &dev_priv->guc.log;
2195         enum guc_log_buffer_type type;
2196
2197         if (!intel_guc_log_relay_enabled(log)) {
2198                 seq_puts(m, "GuC log relay disabled\n");
2199                 return;
2200         }
2201
2202         seq_puts(m, "GuC logging stats:\n");
2203
2204         seq_printf(m, "\tRelay full count: %u\n",
2205                    log->relay.full_count);
2206
2207         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2208                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2209                            stringify_guc_log_type(type),
2210                            log->stats[type].flush,
2211                            log->stats[type].sampled_overflow);
2212         }
2213 }
2214
2215 static void i915_guc_client_info(struct seq_file *m,
2216                                  struct drm_i915_private *dev_priv,
2217                                  struct intel_guc_client *client)
2218 {
2219         struct intel_engine_cs *engine;
2220         enum intel_engine_id id;
2221         u64 tot = 0;
2222
2223         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2224                 client->priority, client->stage_id, client->proc_desc_offset);
2225         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2226                 client->doorbell_id, client->doorbell_offset);
2227
2228         for_each_engine(engine, dev_priv, id) {
2229                 u64 submissions = client->submissions[id];
2230                 tot += submissions;
2231                 seq_printf(m, "\tSubmissions: %llu %s\n",
2232                                 submissions, engine->name);
2233         }
2234         seq_printf(m, "\tTotal: %llu\n", tot);
2235 }
2236
2237 static int i915_guc_info(struct seq_file *m, void *data)
2238 {
2239         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2240         const struct intel_guc *guc = &dev_priv->guc;
2241
2242         if (!USES_GUC(dev_priv))
2243                 return -ENODEV;
2244
2245         i915_guc_log_info(m, dev_priv);
2246
2247         if (!USES_GUC_SUBMISSION(dev_priv))
2248                 return 0;
2249
2250         GEM_BUG_ON(!guc->execbuf_client);
2251
2252         seq_printf(m, "\nDoorbell map:\n");
2253         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2254         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2255
2256         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2257         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2258         if (guc->preempt_client) {
2259                 seq_printf(m, "\nGuC preempt client @ %p:\n",
2260                            guc->preempt_client);
2261                 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2262         }
2263
2264         /* Add more as required ... */
2265
2266         return 0;
2267 }
2268
2269 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2270 {
2271         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2272         const struct intel_guc *guc = &dev_priv->guc;
2273         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2274         struct intel_guc_client *client = guc->execbuf_client;
2275         unsigned int tmp;
2276         int index;
2277
2278         if (!USES_GUC_SUBMISSION(dev_priv))
2279                 return -ENODEV;
2280
2281         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2282                 struct intel_engine_cs *engine;
2283
2284                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2285                         continue;
2286
2287                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2288                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2289                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2290                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2291                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2292                 seq_printf(m, "\tEngines used: 0x%x\n",
2293                            desc->engines_used);
2294                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2295                            desc->db_trigger_phy,
2296                            desc->db_trigger_cpu,
2297                            desc->db_trigger_uk);
2298                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2299                            desc->process_desc);
2300                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2301                            desc->wq_addr, desc->wq_size);
2302                 seq_putc(m, '\n');
2303
2304                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2305                         u32 guc_engine_id = engine->guc_id;
2306                         struct guc_execlist_context *lrc =
2307                                                 &desc->lrc[guc_engine_id];
2308
2309                         seq_printf(m, "\t%s LRC:\n", engine->name);
2310                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2311                                    lrc->context_desc);
2312                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2313                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2314                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2315                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2316                         seq_putc(m, '\n');
2317                 }
2318         }
2319
2320         return 0;
2321 }
2322
2323 static int i915_guc_log_dump(struct seq_file *m, void *data)
2324 {
2325         struct drm_info_node *node = m->private;
2326         struct drm_i915_private *dev_priv = node_to_i915(node);
2327         bool dump_load_err = !!node->info_ent->data;
2328         struct drm_i915_gem_object *obj = NULL;
2329         u32 *log;
2330         int i = 0;
2331
2332         if (!HAS_GUC(dev_priv))
2333                 return -ENODEV;
2334
2335         if (dump_load_err)
2336                 obj = dev_priv->guc.load_err_log;
2337         else if (dev_priv->guc.log.vma)
2338                 obj = dev_priv->guc.log.vma->obj;
2339
2340         if (!obj)
2341                 return 0;
2342
2343         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2344         if (IS_ERR(log)) {
2345                 DRM_DEBUG("Failed to pin object\n");
2346                 seq_puts(m, "(log data unaccessible)\n");
2347                 return PTR_ERR(log);
2348         }
2349
2350         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2351                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2352                            *(log + i), *(log + i + 1),
2353                            *(log + i + 2), *(log + i + 3));
2354
2355         seq_putc(m, '\n');
2356
2357         i915_gem_object_unpin_map(obj);
2358
2359         return 0;
2360 }
2361
2362 static int i915_guc_log_level_get(void *data, u64 *val)
2363 {
2364         struct drm_i915_private *dev_priv = data;
2365
2366         if (!USES_GUC(dev_priv))
2367                 return -ENODEV;
2368
2369         *val = intel_guc_log_get_level(&dev_priv->guc.log);
2370
2371         return 0;
2372 }
2373
2374 static int i915_guc_log_level_set(void *data, u64 val)
2375 {
2376         struct drm_i915_private *dev_priv = data;
2377
2378         if (!USES_GUC(dev_priv))
2379                 return -ENODEV;
2380
2381         return intel_guc_log_set_level(&dev_priv->guc.log, val);
2382 }
2383
2384 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2385                         i915_guc_log_level_get, i915_guc_log_level_set,
2386                         "%lld\n");
2387
2388 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2389 {
2390         struct drm_i915_private *dev_priv = inode->i_private;
2391
2392         if (!USES_GUC(dev_priv))
2393                 return -ENODEV;
2394
2395         file->private_data = &dev_priv->guc.log;
2396
2397         return intel_guc_log_relay_open(&dev_priv->guc.log);
2398 }
2399
2400 static ssize_t
2401 i915_guc_log_relay_write(struct file *filp,
2402                          const char __user *ubuf,
2403                          size_t cnt,
2404                          loff_t *ppos)
2405 {
2406         struct intel_guc_log *log = filp->private_data;
2407
2408         intel_guc_log_relay_flush(log);
2409
2410         return cnt;
2411 }
2412
2413 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2414 {
2415         struct drm_i915_private *dev_priv = inode->i_private;
2416
2417         intel_guc_log_relay_close(&dev_priv->guc.log);
2418
2419         return 0;
2420 }
2421
2422 static const struct file_operations i915_guc_log_relay_fops = {
2423         .owner = THIS_MODULE,
2424         .open = i915_guc_log_relay_open,
2425         .write = i915_guc_log_relay_write,
2426         .release = i915_guc_log_relay_release,
2427 };
2428
2429 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2430 {
2431         u8 val;
2432         static const char * const sink_status[] = {
2433                 "inactive",
2434                 "transition to active, capture and display",
2435                 "active, display from RFB",
2436                 "active, capture and display on sink device timings",
2437                 "transition to inactive, capture and display, timing re-sync",
2438                 "reserved",
2439                 "reserved",
2440                 "sink internal error",
2441         };
2442         struct drm_connector *connector = m->private;
2443         struct drm_i915_private *dev_priv = to_i915(connector->dev);
2444         struct intel_dp *intel_dp =
2445                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2446         int ret;
2447
2448         if (!CAN_PSR(dev_priv)) {
2449                 seq_puts(m, "PSR Unsupported\n");
2450                 return -ENODEV;
2451         }
2452
2453         if (connector->status != connector_status_connected)
2454                 return -ENODEV;
2455
2456         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2457
2458         if (ret == 1) {
2459                 const char *str = "unknown";
2460
2461                 val &= DP_PSR_SINK_STATE_MASK;
2462                 if (val < ARRAY_SIZE(sink_status))
2463                         str = sink_status[val];
2464                 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2465         } else {
2466                 return ret;
2467         }
2468
2469         return 0;
2470 }
2471 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2472
2473 static void
2474 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2475 {
2476         u32 val, status_val;
2477         const char *status = "unknown";
2478
2479         if (dev_priv->psr.psr2_enabled) {
2480                 static const char * const live_status[] = {
2481                         "IDLE",
2482                         "CAPTURE",
2483                         "CAPTURE_FS",
2484                         "SLEEP",
2485                         "BUFON_FW",
2486                         "ML_UP",
2487                         "SU_STANDBY",
2488                         "FAST_SLEEP",
2489                         "DEEP_SLEEP",
2490                         "BUF_ON",
2491                         "TG_ON"
2492                 };
2493                 val = I915_READ(EDP_PSR2_STATUS);
2494                 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2495                               EDP_PSR2_STATUS_STATE_SHIFT;
2496                 if (status_val < ARRAY_SIZE(live_status))
2497                         status = live_status[status_val];
2498         } else {
2499                 static const char * const live_status[] = {
2500                         "IDLE",
2501                         "SRDONACK",
2502                         "SRDENT",
2503                         "BUFOFF",
2504                         "BUFON",
2505                         "AUXACK",
2506                         "SRDOFFACK",
2507                         "SRDENT_ON",
2508                 };
2509                 val = I915_READ(EDP_PSR_STATUS);
2510                 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2511                               EDP_PSR_STATUS_STATE_SHIFT;
2512                 if (status_val < ARRAY_SIZE(live_status))
2513                         status = live_status[status_val];
2514         }
2515
2516         seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2517 }
2518
2519 static int i915_edp_psr_status(struct seq_file *m, void *data)
2520 {
2521         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2522         struct i915_psr *psr = &dev_priv->psr;
2523         intel_wakeref_t wakeref;
2524         const char *status;
2525         bool enabled;
2526         u32 val;
2527
2528         if (!HAS_PSR(dev_priv))
2529                 return -ENODEV;
2530
2531         seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2532         if (psr->dp)
2533                 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2534         seq_puts(m, "\n");
2535
2536         if (!psr->sink_support)
2537                 return 0;
2538
2539         wakeref = intel_runtime_pm_get(dev_priv);
2540         mutex_lock(&psr->lock);
2541
2542         if (psr->enabled)
2543                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2544         else
2545                 status = "disabled";
2546         seq_printf(m, "PSR mode: %s\n", status);
2547
2548         if (!psr->enabled)
2549                 goto unlock;
2550
2551         if (psr->psr2_enabled) {
2552                 val = I915_READ(EDP_PSR2_CTL);
2553                 enabled = val & EDP_PSR2_ENABLE;
2554         } else {
2555                 val = I915_READ(EDP_PSR_CTL);
2556                 enabled = val & EDP_PSR_ENABLE;
2557         }
2558         seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2559                    enableddisabled(enabled), val);
2560         psr_source_status(dev_priv, m);
2561         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2562                    psr->busy_frontbuffer_bits);
2563
2564         /*
2565          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2566          */
2567         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2568                 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2569                 seq_printf(m, "Performance counter: %u\n", val);
2570         }
2571
2572         if (psr->debug & I915_PSR_DEBUG_IRQ) {
2573                 seq_printf(m, "Last attempted entry at: %lld\n",
2574                            psr->last_entry_attempt);
2575                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2576         }
2577
2578         if (psr->psr2_enabled) {
2579                 u32 su_frames_val[3];
2580                 int frame;
2581
2582                 /*
2583                  * Reading all 3 registers before hand to minimize crossing a
2584                  * frame boundary between register reads
2585                  */
2586                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2587                         su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2588
2589                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2590
2591                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2592                         u32 su_blocks;
2593
2594                         su_blocks = su_frames_val[frame / 3] &
2595                                     PSR2_SU_STATUS_MASK(frame);
2596                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2597                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
2598                 }
2599         }
2600
2601 unlock:
2602         mutex_unlock(&psr->lock);
2603         intel_runtime_pm_put(dev_priv, wakeref);
2604
2605         return 0;
2606 }
2607
2608 static int
2609 i915_edp_psr_debug_set(void *data, u64 val)
2610 {
2611         struct drm_i915_private *dev_priv = data;
2612         struct drm_modeset_acquire_ctx ctx;
2613         intel_wakeref_t wakeref;
2614         int ret;
2615
2616         if (!CAN_PSR(dev_priv))
2617                 return -ENODEV;
2618
2619         DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2620
2621         wakeref = intel_runtime_pm_get(dev_priv);
2622
2623         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2624
2625 retry:
2626         ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2627         if (ret == -EDEADLK) {
2628                 ret = drm_modeset_backoff(&ctx);
2629                 if (!ret)
2630                         goto retry;
2631         }
2632
2633         drm_modeset_drop_locks(&ctx);
2634         drm_modeset_acquire_fini(&ctx);
2635
2636         intel_runtime_pm_put(dev_priv, wakeref);
2637
2638         return ret;
2639 }
2640
2641 static int
2642 i915_edp_psr_debug_get(void *data, u64 *val)
2643 {
2644         struct drm_i915_private *dev_priv = data;
2645
2646         if (!CAN_PSR(dev_priv))
2647                 return -ENODEV;
2648
2649         *val = READ_ONCE(dev_priv->psr.debug);
2650         return 0;
2651 }
2652
2653 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2654                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2655                         "%llu\n");
2656
2657 static int i915_energy_uJ(struct seq_file *m, void *data)
2658 {
2659         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2660         unsigned long long power;
2661         intel_wakeref_t wakeref;
2662         u32 units;
2663
2664         if (INTEL_GEN(dev_priv) < 6)
2665                 return -ENODEV;
2666
2667         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2668                 return -ENODEV;
2669
2670         units = (power & 0x1f00) >> 8;
2671         with_intel_runtime_pm(dev_priv, wakeref)
2672                 power = I915_READ(MCH_SECP_NRG_STTS);
2673
2674         power = (1000000 * power) >> units; /* convert to uJ */
2675         seq_printf(m, "%llu", power);
2676
2677         return 0;
2678 }
2679
2680 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2681 {
2682         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2683         struct pci_dev *pdev = dev_priv->drm.pdev;
2684
2685         if (!HAS_RUNTIME_PM(dev_priv))
2686                 seq_puts(m, "Runtime power management not supported\n");
2687
2688         seq_printf(m, "Runtime power status: %s\n",
2689                    enableddisabled(!dev_priv->power_domains.wakeref));
2690
2691         seq_printf(m, "GPU idle: %s (epoch %u)\n",
2692                    yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2693         seq_printf(m, "IRQs disabled: %s\n",
2694                    yesno(!intel_irqs_enabled(dev_priv)));
2695 #ifdef CONFIG_PM
2696         seq_printf(m, "Usage count: %d\n",
2697                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2698 #else
2699         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2700 #endif
2701         seq_printf(m, "PCI device power state: %s [%d]\n",
2702                    pci_power_name(pdev->current_state),
2703                    pdev->current_state);
2704
2705         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2706                 struct drm_printer p = drm_seq_file_printer(m);
2707
2708                 print_intel_runtime_pm_wakeref(dev_priv, &p);
2709         }
2710
2711         return 0;
2712 }
2713
2714 static int i915_power_domain_info(struct seq_file *m, void *unused)
2715 {
2716         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2717         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2718         int i;
2719
2720         mutex_lock(&power_domains->lock);
2721
2722         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2723         for (i = 0; i < power_domains->power_well_count; i++) {
2724                 struct i915_power_well *power_well;
2725                 enum intel_display_power_domain power_domain;
2726
2727                 power_well = &power_domains->power_wells[i];
2728                 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2729                            power_well->count);
2730
2731                 for_each_power_domain(power_domain, power_well->desc->domains)
2732                         seq_printf(m, "  %-23s %d\n",
2733                                  intel_display_power_domain_str(power_domain),
2734                                  power_domains->domain_use_count[power_domain]);
2735         }
2736
2737         mutex_unlock(&power_domains->lock);
2738
2739         return 0;
2740 }
2741
2742 static int i915_dmc_info(struct seq_file *m, void *unused)
2743 {
2744         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2745         intel_wakeref_t wakeref;
2746         struct intel_csr *csr;
2747
2748         if (!HAS_CSR(dev_priv))
2749                 return -ENODEV;
2750
2751         csr = &dev_priv->csr;
2752
2753         wakeref = intel_runtime_pm_get(dev_priv);
2754
2755         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2756         seq_printf(m, "path: %s\n", csr->fw_path);
2757
2758         if (!csr->dmc_payload)
2759                 goto out;
2760
2761         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2762                    CSR_VERSION_MINOR(csr->version));
2763
2764         if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2765                 goto out;
2766
2767         seq_printf(m, "DC3 -> DC5 count: %d\n",
2768                    I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2769                                                     SKL_CSR_DC3_DC5_COUNT));
2770         if (!IS_GEN9_LP(dev_priv))
2771                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2772                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2773
2774 out:
2775         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2776         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2777         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2778
2779         intel_runtime_pm_put(dev_priv, wakeref);
2780
2781         return 0;
2782 }
2783
2784 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2785                                  struct drm_display_mode *mode)
2786 {
2787         int i;
2788
2789         for (i = 0; i < tabs; i++)
2790                 seq_putc(m, '\t');
2791
2792         seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2793 }
2794
2795 static void intel_encoder_info(struct seq_file *m,
2796                                struct intel_crtc *intel_crtc,
2797                                struct intel_encoder *intel_encoder)
2798 {
2799         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2800         struct drm_device *dev = &dev_priv->drm;
2801         struct drm_crtc *crtc = &intel_crtc->base;
2802         struct intel_connector *intel_connector;
2803         struct drm_encoder *encoder;
2804
2805         encoder = &intel_encoder->base;
2806         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2807                    encoder->base.id, encoder->name);
2808         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2809                 struct drm_connector *connector = &intel_connector->base;
2810                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2811                            connector->base.id,
2812                            connector->name,
2813                            drm_get_connector_status_name(connector->status));
2814                 if (connector->status == connector_status_connected) {
2815                         struct drm_display_mode *mode = &crtc->mode;
2816                         seq_printf(m, ", mode:\n");
2817                         intel_seq_print_mode(m, 2, mode);
2818                 } else {
2819                         seq_putc(m, '\n');
2820                 }
2821         }
2822 }
2823
2824 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2825 {
2826         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2827         struct drm_device *dev = &dev_priv->drm;
2828         struct drm_crtc *crtc = &intel_crtc->base;
2829         struct intel_encoder *intel_encoder;
2830         struct drm_plane_state *plane_state = crtc->primary->state;
2831         struct drm_framebuffer *fb = plane_state->fb;
2832
2833         if (fb)
2834                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2835                            fb->base.id, plane_state->src_x >> 16,
2836                            plane_state->src_y >> 16, fb->width, fb->height);
2837         else
2838                 seq_puts(m, "\tprimary plane disabled\n");
2839         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2840                 intel_encoder_info(m, intel_crtc, intel_encoder);
2841 }
2842
2843 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2844 {
2845         struct drm_display_mode *mode = panel->fixed_mode;
2846
2847         seq_printf(m, "\tfixed mode:\n");
2848         intel_seq_print_mode(m, 2, mode);
2849 }
2850
2851 static void intel_dp_info(struct seq_file *m,
2852                           struct intel_connector *intel_connector)
2853 {
2854         struct intel_encoder *intel_encoder = intel_connector->encoder;
2855         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2856
2857         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2858         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2859         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2860                 intel_panel_info(m, &intel_connector->panel);
2861
2862         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2863                                 &intel_dp->aux);
2864 }
2865
2866 static void intel_dp_mst_info(struct seq_file *m,
2867                           struct intel_connector *intel_connector)
2868 {
2869         struct intel_encoder *intel_encoder = intel_connector->encoder;
2870         struct intel_dp_mst_encoder *intel_mst =
2871                 enc_to_mst(&intel_encoder->base);
2872         struct intel_digital_port *intel_dig_port = intel_mst->primary;
2873         struct intel_dp *intel_dp = &intel_dig_port->dp;
2874         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2875                                         intel_connector->port);
2876
2877         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2878 }
2879
2880 static void intel_hdmi_info(struct seq_file *m,
2881                             struct intel_connector *intel_connector)
2882 {
2883         struct intel_encoder *intel_encoder = intel_connector->encoder;
2884         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2885
2886         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2887 }
2888
2889 static void intel_lvds_info(struct seq_file *m,
2890                             struct intel_connector *intel_connector)
2891 {
2892         intel_panel_info(m, &intel_connector->panel);
2893 }
2894
2895 static void intel_connector_info(struct seq_file *m,
2896                                  struct drm_connector *connector)
2897 {
2898         struct intel_connector *intel_connector = to_intel_connector(connector);
2899         struct intel_encoder *intel_encoder = intel_connector->encoder;
2900         struct drm_display_mode *mode;
2901
2902         seq_printf(m, "connector %d: type %s, status: %s\n",
2903                    connector->base.id, connector->name,
2904                    drm_get_connector_status_name(connector->status));
2905
2906         if (connector->status == connector_status_disconnected)
2907                 return;
2908
2909         seq_printf(m, "\tname: %s\n", connector->display_info.name);
2910         seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2911                    connector->display_info.width_mm,
2912                    connector->display_info.height_mm);
2913         seq_printf(m, "\tsubpixel order: %s\n",
2914                    drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2915         seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2916
2917         if (!intel_encoder)
2918                 return;
2919
2920         switch (connector->connector_type) {
2921         case DRM_MODE_CONNECTOR_DisplayPort:
2922         case DRM_MODE_CONNECTOR_eDP:
2923                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2924                         intel_dp_mst_info(m, intel_connector);
2925                 else
2926                         intel_dp_info(m, intel_connector);
2927                 break;
2928         case DRM_MODE_CONNECTOR_LVDS:
2929                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2930                         intel_lvds_info(m, intel_connector);
2931                 break;
2932         case DRM_MODE_CONNECTOR_HDMIA:
2933                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2934                     intel_encoder->type == INTEL_OUTPUT_DDI)
2935                         intel_hdmi_info(m, intel_connector);
2936                 break;
2937         default:
2938                 break;
2939         }
2940
2941         seq_printf(m, "\tmodes:\n");
2942         list_for_each_entry(mode, &connector->modes, head)
2943                 intel_seq_print_mode(m, 2, mode);
2944 }
2945
2946 static const char *plane_type(enum drm_plane_type type)
2947 {
2948         switch (type) {
2949         case DRM_PLANE_TYPE_OVERLAY:
2950                 return "OVL";
2951         case DRM_PLANE_TYPE_PRIMARY:
2952                 return "PRI";
2953         case DRM_PLANE_TYPE_CURSOR:
2954                 return "CUR";
2955         /*
2956          * Deliberately omitting default: to generate compiler warnings
2957          * when a new drm_plane_type gets added.
2958          */
2959         }
2960
2961         return "unknown";
2962 }
2963
2964 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2965 {
2966         /*
2967          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2968          * will print them all to visualize if the values are misused
2969          */
2970         snprintf(buf, bufsize,
2971                  "%s%s%s%s%s%s(0x%08x)",
2972                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2973                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2974                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2975                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2976                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2977                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2978                  rotation);
2979 }
2980
2981 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2982 {
2983         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2984         struct drm_device *dev = &dev_priv->drm;
2985         struct intel_plane *intel_plane;
2986
2987         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2988                 struct drm_plane_state *state;
2989                 struct drm_plane *plane = &intel_plane->base;
2990                 struct drm_format_name_buf format_name;
2991                 char rot_str[48];
2992
2993                 if (!plane->state) {
2994                         seq_puts(m, "plane->state is NULL!\n");
2995                         continue;
2996                 }
2997
2998                 state = plane->state;
2999
3000                 if (state->fb) {
3001                         drm_get_format_name(state->fb->format->format,
3002                                             &format_name);
3003                 } else {
3004                         sprintf(format_name.str, "N/A");
3005                 }
3006
3007                 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
3008
3009                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3010                            plane->base.id,
3011                            plane_type(intel_plane->base.type),
3012                            state->crtc_x, state->crtc_y,
3013                            state->crtc_w, state->crtc_h,
3014                            (state->src_x >> 16),
3015                            ((state->src_x & 0xffff) * 15625) >> 10,
3016                            (state->src_y >> 16),
3017                            ((state->src_y & 0xffff) * 15625) >> 10,
3018                            (state->src_w >> 16),
3019                            ((state->src_w & 0xffff) * 15625) >> 10,
3020                            (state->src_h >> 16),
3021                            ((state->src_h & 0xffff) * 15625) >> 10,
3022                            format_name.str,
3023                            rot_str);
3024         }
3025 }
3026
3027 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3028 {
3029         struct intel_crtc_state *pipe_config;
3030         int num_scalers = intel_crtc->num_scalers;
3031         int i;
3032
3033         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3034
3035         /* Not all platformas have a scaler */
3036         if (num_scalers) {
3037                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3038                            num_scalers,
3039                            pipe_config->scaler_state.scaler_users,
3040                            pipe_config->scaler_state.scaler_id);
3041
3042                 for (i = 0; i < num_scalers; i++) {
3043                         struct intel_scaler *sc =
3044                                         &pipe_config->scaler_state.scalers[i];
3045
3046                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3047                                    i, yesno(sc->in_use), sc->mode);
3048                 }
3049                 seq_puts(m, "\n");
3050         } else {
3051                 seq_puts(m, "\tNo scalers available on this platform\n");
3052         }
3053 }
3054
3055 static int i915_display_info(struct seq_file *m, void *unused)
3056 {
3057         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3058         struct drm_device *dev = &dev_priv->drm;
3059         struct intel_crtc *crtc;
3060         struct drm_connector *connector;
3061         struct drm_connector_list_iter conn_iter;
3062         intel_wakeref_t wakeref;
3063
3064         wakeref = intel_runtime_pm_get(dev_priv);
3065
3066         seq_printf(m, "CRTC info\n");
3067         seq_printf(m, "---------\n");
3068         for_each_intel_crtc(dev, crtc) {
3069                 struct intel_crtc_state *pipe_config;
3070
3071                 drm_modeset_lock(&crtc->base.mutex, NULL);
3072                 pipe_config = to_intel_crtc_state(crtc->base.state);
3073
3074                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3075                            crtc->base.base.id, pipe_name(crtc->pipe),
3076                            yesno(pipe_config->base.active),
3077                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3078                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3079
3080                 if (pipe_config->base.active) {
3081                         struct intel_plane *cursor =
3082                                 to_intel_plane(crtc->base.cursor);
3083
3084                         intel_crtc_info(m, crtc);
3085
3086                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3087                                    yesno(cursor->base.state->visible),
3088                                    cursor->base.state->crtc_x,
3089                                    cursor->base.state->crtc_y,
3090                                    cursor->base.state->crtc_w,
3091                                    cursor->base.state->crtc_h,
3092                                    cursor->cursor.base);
3093                         intel_scaler_info(m, crtc);
3094                         intel_plane_info(m, crtc);
3095                 }
3096
3097                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3098                            yesno(!crtc->cpu_fifo_underrun_disabled),
3099                            yesno(!crtc->pch_fifo_underrun_disabled));
3100                 drm_modeset_unlock(&crtc->base.mutex);
3101         }
3102
3103         seq_printf(m, "\n");
3104         seq_printf(m, "Connector info\n");
3105         seq_printf(m, "--------------\n");
3106         mutex_lock(&dev->mode_config.mutex);
3107         drm_connector_list_iter_begin(dev, &conn_iter);
3108         drm_for_each_connector_iter(connector, &conn_iter)
3109                 intel_connector_info(m, connector);
3110         drm_connector_list_iter_end(&conn_iter);
3111         mutex_unlock(&dev->mode_config.mutex);
3112
3113         intel_runtime_pm_put(dev_priv, wakeref);
3114
3115         return 0;
3116 }
3117
3118 static int i915_engine_info(struct seq_file *m, void *unused)
3119 {
3120         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3121         struct intel_engine_cs *engine;
3122         intel_wakeref_t wakeref;
3123         enum intel_engine_id id;
3124         struct drm_printer p;
3125
3126         wakeref = intel_runtime_pm_get(dev_priv);
3127
3128         seq_printf(m, "GT awake? %s (epoch %u)\n",
3129                    yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3130         seq_printf(m, "Global active requests: %d\n",
3131                    dev_priv->gt.active_requests);
3132         seq_printf(m, "CS timestamp frequency: %u kHz\n",
3133                    RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
3134
3135         p = drm_seq_file_printer(m);
3136         for_each_engine(engine, dev_priv, id)
3137                 intel_engine_dump(engine, &p, "%s\n", engine->name);
3138
3139         intel_runtime_pm_put(dev_priv, wakeref);
3140
3141         return 0;
3142 }
3143
3144 static int i915_rcs_topology(struct seq_file *m, void *unused)
3145 {
3146         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3147         struct drm_printer p = drm_seq_file_printer(m);
3148
3149         intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
3150
3151         return 0;
3152 }
3153
3154 static int i915_shrinker_info(struct seq_file *m, void *unused)
3155 {
3156         struct drm_i915_private *i915 = node_to_i915(m->private);
3157
3158         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3159         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3160
3161         return 0;
3162 }
3163
3164 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3165 {
3166         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3167         struct drm_device *dev = &dev_priv->drm;
3168         int i;
3169
3170         drm_modeset_lock_all(dev);
3171         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3172                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3173
3174                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3175                            pll->info->id);
3176                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3177                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3178                 seq_printf(m, " tracked hardware state:\n");
3179                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3180                 seq_printf(m, " dpll_md: 0x%08x\n",
3181                            pll->state.hw_state.dpll_md);
3182                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3183                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3184                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3185                 seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3186                 seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3187                 seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3188                            pll->state.hw_state.mg_refclkin_ctl);
3189                 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3190                            pll->state.hw_state.mg_clktop2_coreclkctl1);
3191                 seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3192                            pll->state.hw_state.mg_clktop2_hsclkctl);
3193                 seq_printf(m, " mg_pll_div0:  0x%08x\n",
3194                            pll->state.hw_state.mg_pll_div0);
3195                 seq_printf(m, " mg_pll_div1:  0x%08x\n",
3196                            pll->state.hw_state.mg_pll_div1);
3197                 seq_printf(m, " mg_pll_lf:    0x%08x\n",
3198                            pll->state.hw_state.mg_pll_lf);
3199                 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3200                            pll->state.hw_state.mg_pll_frac_lock);
3201                 seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3202                            pll->state.hw_state.mg_pll_ssc);
3203                 seq_printf(m, " mg_pll_bias:  0x%08x\n",
3204                            pll->state.hw_state.mg_pll_bias);
3205                 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3206                            pll->state.hw_state.mg_pll_tdc_coldst_bias);
3207         }
3208         drm_modeset_unlock_all(dev);
3209
3210         return 0;
3211 }
3212
3213 static int i915_wa_registers(struct seq_file *m, void *unused)
3214 {
3215         struct drm_i915_private *i915 = node_to_i915(m->private);
3216         const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3217         struct i915_wa *wa;
3218         unsigned int i;
3219
3220         seq_printf(m, "Workarounds applied: %u\n", wal->count);
3221         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
3222                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3223                            i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
3224
3225         return 0;
3226 }
3227
3228 static int i915_ipc_status_show(struct seq_file *m, void *data)
3229 {
3230         struct drm_i915_private *dev_priv = m->private;
3231
3232         seq_printf(m, "Isochronous Priority Control: %s\n",
3233                         yesno(dev_priv->ipc_enabled));
3234         return 0;
3235 }
3236
3237 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3238 {
3239         struct drm_i915_private *dev_priv = inode->i_private;
3240
3241         if (!HAS_IPC(dev_priv))
3242                 return -ENODEV;
3243
3244         return single_open(file, i915_ipc_status_show, dev_priv);
3245 }
3246
3247 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3248                                      size_t len, loff_t *offp)
3249 {
3250         struct seq_file *m = file->private_data;
3251         struct drm_i915_private *dev_priv = m->private;
3252         intel_wakeref_t wakeref;
3253         bool enable;
3254         int ret;
3255
3256         ret = kstrtobool_from_user(ubuf, len, &enable);
3257         if (ret < 0)
3258                 return ret;
3259
3260         with_intel_runtime_pm(dev_priv, wakeref) {
3261                 if (!dev_priv->ipc_enabled && enable)
3262                         DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3263                 dev_priv->wm.distrust_bios_wm = true;
3264                 dev_priv->ipc_enabled = enable;
3265                 intel_enable_ipc(dev_priv);
3266         }
3267
3268         return len;
3269 }
3270
3271 static const struct file_operations i915_ipc_status_fops = {
3272         .owner = THIS_MODULE,
3273         .open = i915_ipc_status_open,
3274         .read = seq_read,
3275         .llseek = seq_lseek,
3276         .release = single_release,
3277         .write = i915_ipc_status_write
3278 };
3279
3280 static int i915_ddb_info(struct seq_file *m, void *unused)
3281 {
3282         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3283         struct drm_device *dev = &dev_priv->drm;
3284         struct skl_ddb_entry *entry;
3285         struct intel_crtc *crtc;
3286
3287         if (INTEL_GEN(dev_priv) < 9)
3288                 return -ENODEV;
3289
3290         drm_modeset_lock_all(dev);
3291
3292         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3293
3294         for_each_intel_crtc(&dev_priv->drm, crtc) {
3295                 struct intel_crtc_state *crtc_state =
3296                         to_intel_crtc_state(crtc->base.state);
3297                 enum pipe pipe = crtc->pipe;
3298                 enum plane_id plane_id;
3299
3300                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3301
3302                 for_each_plane_id_on_crtc(crtc, plane_id) {
3303                         entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3304                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
3305                                    entry->start, entry->end,
3306                                    skl_ddb_entry_size(entry));
3307                 }
3308
3309                 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3310                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3311                            entry->end, skl_ddb_entry_size(entry));
3312         }
3313
3314         drm_modeset_unlock_all(dev);
3315
3316         return 0;
3317 }
3318
3319 static void drrs_status_per_crtc(struct seq_file *m,
3320                                  struct drm_device *dev,
3321                                  struct intel_crtc *intel_crtc)
3322 {
3323         struct drm_i915_private *dev_priv = to_i915(dev);
3324         struct i915_drrs *drrs = &dev_priv->drrs;
3325         int vrefresh = 0;
3326         struct drm_connector *connector;
3327         struct drm_connector_list_iter conn_iter;
3328
3329         drm_connector_list_iter_begin(dev, &conn_iter);
3330         drm_for_each_connector_iter(connector, &conn_iter) {
3331                 if (connector->state->crtc != &intel_crtc->base)
3332                         continue;
3333
3334                 seq_printf(m, "%s:\n", connector->name);
3335         }
3336         drm_connector_list_iter_end(&conn_iter);
3337
3338         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3339                 seq_puts(m, "\tVBT: DRRS_type: Static");
3340         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3341                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3342         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3343                 seq_puts(m, "\tVBT: DRRS_type: None");
3344         else
3345                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3346
3347         seq_puts(m, "\n\n");
3348
3349         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3350                 struct intel_panel *panel;
3351
3352                 mutex_lock(&drrs->mutex);
3353                 /* DRRS Supported */
3354                 seq_puts(m, "\tDRRS Supported: Yes\n");
3355
3356                 /* disable_drrs() will make drrs->dp NULL */
3357                 if (!drrs->dp) {
3358                         seq_puts(m, "Idleness DRRS: Disabled\n");
3359                         if (dev_priv->psr.enabled)
3360                                 seq_puts(m,
3361                                 "\tAs PSR is enabled, DRRS is not enabled\n");
3362                         mutex_unlock(&drrs->mutex);
3363                         return;
3364                 }
3365
3366                 panel = &drrs->dp->attached_connector->panel;
3367                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3368                                         drrs->busy_frontbuffer_bits);
3369
3370                 seq_puts(m, "\n\t\t");
3371                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3372                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3373                         vrefresh = panel->fixed_mode->vrefresh;
3374                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3375                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3376                         vrefresh = panel->downclock_mode->vrefresh;
3377                 } else {
3378                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3379                                                 drrs->refresh_rate_type);
3380                         mutex_unlock(&drrs->mutex);
3381                         return;
3382                 }
3383                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3384
3385                 seq_puts(m, "\n\t\t");
3386                 mutex_unlock(&drrs->mutex);
3387         } else {
3388                 /* DRRS not supported. Print the VBT parameter*/
3389                 seq_puts(m, "\tDRRS Supported : No");
3390         }
3391         seq_puts(m, "\n");
3392 }
3393
3394 static int i915_drrs_status(struct seq_file *m, void *unused)
3395 {
3396         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3397         struct drm_device *dev = &dev_priv->drm;
3398         struct intel_crtc *intel_crtc;
3399         int active_crtc_cnt = 0;
3400
3401         drm_modeset_lock_all(dev);
3402         for_each_intel_crtc(dev, intel_crtc) {
3403                 if (intel_crtc->base.state->active) {
3404                         active_crtc_cnt++;
3405                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3406
3407                         drrs_status_per_crtc(m, dev, intel_crtc);
3408                 }
3409         }
3410         drm_modeset_unlock_all(dev);
3411
3412         if (!active_crtc_cnt)
3413                 seq_puts(m, "No active crtc found\n");
3414
3415         return 0;
3416 }
3417
3418 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3419 {
3420         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3421         struct drm_device *dev = &dev_priv->drm;
3422         struct intel_encoder *intel_encoder;
3423         struct intel_digital_port *intel_dig_port;
3424         struct drm_connector *connector;
3425         struct drm_connector_list_iter conn_iter;
3426
3427         drm_connector_list_iter_begin(dev, &conn_iter);
3428         drm_for_each_connector_iter(connector, &conn_iter) {
3429                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3430                         continue;
3431
3432                 intel_encoder = intel_attached_encoder(connector);
3433                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3434                         continue;
3435
3436                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3437                 if (!intel_dig_port->dp.can_mst)
3438                         continue;
3439
3440                 seq_printf(m, "MST Source Port %c\n",
3441                            port_name(intel_dig_port->base.port));
3442                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3443         }
3444         drm_connector_list_iter_end(&conn_iter);
3445
3446         return 0;
3447 }
3448
3449 static ssize_t i915_displayport_test_active_write(struct file *file,
3450                                                   const char __user *ubuf,
3451                                                   size_t len, loff_t *offp)
3452 {
3453         char *input_buffer;
3454         int status = 0;
3455         struct drm_device *dev;
3456         struct drm_connector *connector;
3457         struct drm_connector_list_iter conn_iter;
3458         struct intel_dp *intel_dp;
3459         int val = 0;
3460
3461         dev = ((struct seq_file *)file->private_data)->private;
3462
3463         if (len == 0)
3464                 return 0;
3465
3466         input_buffer = memdup_user_nul(ubuf, len);
3467         if (IS_ERR(input_buffer))
3468                 return PTR_ERR(input_buffer);
3469
3470         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3471
3472         drm_connector_list_iter_begin(dev, &conn_iter);
3473         drm_for_each_connector_iter(connector, &conn_iter) {
3474                 struct intel_encoder *encoder;
3475
3476                 if (connector->connector_type !=
3477                     DRM_MODE_CONNECTOR_DisplayPort)
3478                         continue;
3479
3480                 encoder = to_intel_encoder(connector->encoder);
3481                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3482                         continue;
3483
3484                 if (encoder && connector->status == connector_status_connected) {
3485                         intel_dp = enc_to_intel_dp(&encoder->base);
3486                         status = kstrtoint(input_buffer, 10, &val);
3487                         if (status < 0)
3488                                 break;
3489                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3490                         /* To prevent erroneous activation of the compliance
3491                          * testing code, only accept an actual value of 1 here
3492                          */
3493                         if (val == 1)
3494                                 intel_dp->compliance.test_active = 1;
3495                         else
3496                                 intel_dp->compliance.test_active = 0;
3497                 }
3498         }
3499         drm_connector_list_iter_end(&conn_iter);
3500         kfree(input_buffer);
3501         if (status < 0)
3502                 return status;
3503
3504         *offp += len;
3505         return len;
3506 }
3507
3508 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3509 {
3510         struct drm_i915_private *dev_priv = m->private;
3511         struct drm_device *dev = &dev_priv->drm;
3512         struct drm_connector *connector;
3513         struct drm_connector_list_iter conn_iter;
3514         struct intel_dp *intel_dp;
3515
3516         drm_connector_list_iter_begin(dev, &conn_iter);
3517         drm_for_each_connector_iter(connector, &conn_iter) {
3518                 struct intel_encoder *encoder;
3519
3520                 if (connector->connector_type !=
3521                     DRM_MODE_CONNECTOR_DisplayPort)
3522                         continue;
3523
3524                 encoder = to_intel_encoder(connector->encoder);
3525                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3526                         continue;
3527
3528                 if (encoder && connector->status == connector_status_connected) {
3529                         intel_dp = enc_to_intel_dp(&encoder->base);
3530                         if (intel_dp->compliance.test_active)
3531                                 seq_puts(m, "1");
3532                         else
3533                                 seq_puts(m, "0");
3534                 } else
3535                         seq_puts(m, "0");
3536         }
3537         drm_connector_list_iter_end(&conn_iter);
3538
3539         return 0;
3540 }
3541
3542 static int i915_displayport_test_active_open(struct inode *inode,
3543                                              struct file *file)
3544 {
3545         return single_open(file, i915_displayport_test_active_show,
3546                            inode->i_private);
3547 }
3548
3549 static const struct file_operations i915_displayport_test_active_fops = {
3550         .owner = THIS_MODULE,
3551         .open = i915_displayport_test_active_open,
3552         .read = seq_read,
3553         .llseek = seq_lseek,
3554         .release = single_release,
3555         .write = i915_displayport_test_active_write
3556 };
3557
3558 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3559 {
3560         struct drm_i915_private *dev_priv = m->private;
3561         struct drm_device *dev = &dev_priv->drm;
3562         struct drm_connector *connector;
3563         struct drm_connector_list_iter conn_iter;
3564         struct intel_dp *intel_dp;
3565
3566         drm_connector_list_iter_begin(dev, &conn_iter);
3567         drm_for_each_connector_iter(connector, &conn_iter) {
3568                 struct intel_encoder *encoder;
3569
3570                 if (connector->connector_type !=
3571                     DRM_MODE_CONNECTOR_DisplayPort)
3572                         continue;
3573
3574                 encoder = to_intel_encoder(connector->encoder);
3575                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3576                         continue;
3577
3578                 if (encoder && connector->status == connector_status_connected) {
3579                         intel_dp = enc_to_intel_dp(&encoder->base);
3580                         if (intel_dp->compliance.test_type ==
3581                             DP_TEST_LINK_EDID_READ)
3582                                 seq_printf(m, "%lx",
3583                                            intel_dp->compliance.test_data.edid);
3584                         else if (intel_dp->compliance.test_type ==
3585                                  DP_TEST_LINK_VIDEO_PATTERN) {
3586                                 seq_printf(m, "hdisplay: %d\n",
3587                                            intel_dp->compliance.test_data.hdisplay);
3588                                 seq_printf(m, "vdisplay: %d\n",
3589                                            intel_dp->compliance.test_data.vdisplay);
3590                                 seq_printf(m, "bpc: %u\n",
3591                                            intel_dp->compliance.test_data.bpc);
3592                         }
3593                 } else
3594                         seq_puts(m, "0");
3595         }
3596         drm_connector_list_iter_end(&conn_iter);
3597
3598         return 0;
3599 }
3600 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3601
3602 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3603 {
3604         struct drm_i915_private *dev_priv = m->private;
3605         struct drm_device *dev = &dev_priv->drm;
3606         struct drm_connector *connector;
3607         struct drm_connector_list_iter conn_iter;
3608         struct intel_dp *intel_dp;
3609
3610         drm_connector_list_iter_begin(dev, &conn_iter);
3611         drm_for_each_connector_iter(connector, &conn_iter) {
3612                 struct intel_encoder *encoder;
3613
3614                 if (connector->connector_type !=
3615                     DRM_MODE_CONNECTOR_DisplayPort)
3616                         continue;
3617
3618                 encoder = to_intel_encoder(connector->encoder);
3619                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3620                         continue;
3621
3622                 if (encoder && connector->status == connector_status_connected) {
3623                         intel_dp = enc_to_intel_dp(&encoder->base);
3624                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3625                 } else
3626                         seq_puts(m, "0");
3627         }
3628         drm_connector_list_iter_end(&conn_iter);
3629
3630         return 0;
3631 }
3632 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3633
3634 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3635 {
3636         struct drm_i915_private *dev_priv = m->private;
3637         struct drm_device *dev = &dev_priv->drm;
3638         int level;
3639         int num_levels;
3640
3641         if (IS_CHERRYVIEW(dev_priv))
3642                 num_levels = 3;
3643         else if (IS_VALLEYVIEW(dev_priv))
3644                 num_levels = 1;
3645         else if (IS_G4X(dev_priv))
3646                 num_levels = 3;
3647         else
3648                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3649
3650         drm_modeset_lock_all(dev);
3651
3652         for (level = 0; level < num_levels; level++) {
3653                 unsigned int latency = wm[level];
3654
3655                 /*
3656                  * - WM1+ latency values in 0.5us units
3657                  * - latencies are in us on gen9/vlv/chv
3658                  */
3659                 if (INTEL_GEN(dev_priv) >= 9 ||
3660                     IS_VALLEYVIEW(dev_priv) ||
3661                     IS_CHERRYVIEW(dev_priv) ||
3662                     IS_G4X(dev_priv))
3663                         latency *= 10;
3664                 else if (level > 0)
3665                         latency *= 5;
3666
3667                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3668                            level, wm[level], latency / 10, latency % 10);
3669         }
3670
3671         drm_modeset_unlock_all(dev);
3672 }
3673
3674 static int pri_wm_latency_show(struct seq_file *m, void *data)
3675 {
3676         struct drm_i915_private *dev_priv = m->private;
3677         const u16 *latencies;
3678
3679         if (INTEL_GEN(dev_priv) >= 9)
3680                 latencies = dev_priv->wm.skl_latency;
3681         else
3682                 latencies = dev_priv->wm.pri_latency;
3683
3684         wm_latency_show(m, latencies);
3685
3686         return 0;
3687 }
3688
3689 static int spr_wm_latency_show(struct seq_file *m, void *data)
3690 {
3691         struct drm_i915_private *dev_priv = m->private;
3692         const u16 *latencies;
3693
3694         if (INTEL_GEN(dev_priv) >= 9)
3695                 latencies = dev_priv->wm.skl_latency;
3696         else
3697                 latencies = dev_priv->wm.spr_latency;
3698
3699         wm_latency_show(m, latencies);
3700
3701         return 0;
3702 }
3703
3704 static int cur_wm_latency_show(struct seq_file *m, void *data)
3705 {
3706         struct drm_i915_private *dev_priv = m->private;
3707         const u16 *latencies;
3708
3709         if (INTEL_GEN(dev_priv) >= 9)
3710                 latencies = dev_priv->wm.skl_latency;
3711         else
3712                 latencies = dev_priv->wm.cur_latency;
3713
3714         wm_latency_show(m, latencies);
3715
3716         return 0;
3717 }
3718
3719 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3720 {
3721         struct drm_i915_private *dev_priv = inode->i_private;
3722
3723         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3724                 return -ENODEV;
3725
3726         return single_open(file, pri_wm_latency_show, dev_priv);
3727 }
3728
3729 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3730 {
3731         struct drm_i915_private *dev_priv = inode->i_private;
3732
3733         if (HAS_GMCH_DISPLAY(dev_priv))
3734                 return -ENODEV;
3735
3736         return single_open(file, spr_wm_latency_show, dev_priv);
3737 }
3738
3739 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3740 {
3741         struct drm_i915_private *dev_priv = inode->i_private;
3742
3743         if (HAS_GMCH_DISPLAY(dev_priv))
3744                 return -ENODEV;
3745
3746         return single_open(file, cur_wm_latency_show, dev_priv);
3747 }
3748
3749 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3750                                 size_t len, loff_t *offp, u16 wm[8])
3751 {
3752         struct seq_file *m = file->private_data;
3753         struct drm_i915_private *dev_priv = m->private;
3754         struct drm_device *dev = &dev_priv->drm;
3755         u16 new[8] = { 0 };
3756         int num_levels;
3757         int level;
3758         int ret;
3759         char tmp[32];
3760
3761         if (IS_CHERRYVIEW(dev_priv))
3762                 num_levels = 3;
3763         else if (IS_VALLEYVIEW(dev_priv))
3764                 num_levels = 1;
3765         else if (IS_G4X(dev_priv))
3766                 num_levels = 3;
3767         else
3768                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3769
3770         if (len >= sizeof(tmp))
3771                 return -EINVAL;
3772
3773         if (copy_from_user(tmp, ubuf, len))
3774                 return -EFAULT;
3775
3776         tmp[len] = '\0';
3777
3778         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3779                      &new[0], &new[1], &new[2], &new[3],
3780                      &new[4], &new[5], &new[6], &new[7]);
3781         if (ret != num_levels)
3782                 return -EINVAL;
3783
3784         drm_modeset_lock_all(dev);
3785
3786         for (level = 0; level < num_levels; level++)
3787                 wm[level] = new[level];
3788
3789         drm_modeset_unlock_all(dev);
3790
3791         return len;
3792 }
3793
3794
3795 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3796                                     size_t len, loff_t *offp)
3797 {
3798         struct seq_file *m = file->private_data;
3799         struct drm_i915_private *dev_priv = m->private;
3800         u16 *latencies;
3801
3802         if (INTEL_GEN(dev_priv) >= 9)
3803                 latencies = dev_priv->wm.skl_latency;
3804         else
3805                 latencies = dev_priv->wm.pri_latency;
3806
3807         return wm_latency_write(file, ubuf, len, offp, latencies);
3808 }
3809
3810 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3811                                     size_t len, loff_t *offp)
3812 {
3813         struct seq_file *m = file->private_data;
3814         struct drm_i915_private *dev_priv = m->private;
3815         u16 *latencies;
3816
3817         if (INTEL_GEN(dev_priv) >= 9)
3818                 latencies = dev_priv->wm.skl_latency;
3819         else
3820                 latencies = dev_priv->wm.spr_latency;
3821
3822         return wm_latency_write(file, ubuf, len, offp, latencies);
3823 }
3824
3825 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3826                                     size_t len, loff_t *offp)
3827 {
3828         struct seq_file *m = file->private_data;
3829         struct drm_i915_private *dev_priv = m->private;
3830         u16 *latencies;
3831
3832         if (INTEL_GEN(dev_priv) >= 9)
3833                 latencies = dev_priv->wm.skl_latency;
3834         else
3835                 latencies = dev_priv->wm.cur_latency;
3836
3837         return wm_latency_write(file, ubuf, len, offp, latencies);
3838 }
3839
3840 static const struct file_operations i915_pri_wm_latency_fops = {
3841         .owner = THIS_MODULE,
3842         .open = pri_wm_latency_open,
3843         .read = seq_read,
3844         .llseek = seq_lseek,
3845         .release = single_release,
3846         .write = pri_wm_latency_write
3847 };
3848
3849 static const struct file_operations i915_spr_wm_latency_fops = {
3850         .owner = THIS_MODULE,
3851         .open = spr_wm_latency_open,
3852         .read = seq_read,
3853         .llseek = seq_lseek,
3854         .release = single_release,
3855         .write = spr_wm_latency_write
3856 };
3857
3858 static const struct file_operations i915_cur_wm_latency_fops = {
3859         .owner = THIS_MODULE,
3860         .open = cur_wm_latency_open,
3861         .read = seq_read,
3862         .llseek = seq_lseek,
3863         .release = single_release,
3864         .write = cur_wm_latency_write
3865 };
3866
3867 static int
3868 i915_wedged_get(void *data, u64 *val)
3869 {
3870         struct drm_i915_private *dev_priv = data;
3871
3872         *val = i915_terminally_wedged(&dev_priv->gpu_error);
3873
3874         return 0;
3875 }
3876
3877 static int
3878 i915_wedged_set(void *data, u64 val)
3879 {
3880         struct drm_i915_private *i915 = data;
3881
3882         /*
3883          * There is no safeguard against this debugfs entry colliding
3884          * with the hangcheck calling same i915_handle_error() in
3885          * parallel, causing an explosion. For now we assume that the
3886          * test harness is responsible enough not to inject gpu hangs
3887          * while it is writing to 'i915_wedged'
3888          */
3889
3890         if (i915_reset_backoff(&i915->gpu_error))
3891                 return -EAGAIN;
3892
3893         i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3894                           "Manually set wedged engine mask = %llx", val);
3895         return 0;
3896 }
3897
3898 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3899                         i915_wedged_get, i915_wedged_set,
3900                         "%llu\n");
3901
3902 static int
3903 fault_irq_set(struct drm_i915_private *i915,
3904               unsigned long *irq,
3905               unsigned long val)
3906 {
3907         int err;
3908
3909         err = mutex_lock_interruptible(&i915->drm.struct_mutex);
3910         if (err)
3911                 return err;
3912
3913         err = i915_gem_wait_for_idle(i915,
3914                                      I915_WAIT_LOCKED |
3915                                      I915_WAIT_INTERRUPTIBLE,
3916                                      MAX_SCHEDULE_TIMEOUT);
3917         if (err)
3918                 goto err_unlock;
3919
3920         *irq = val;
3921         mutex_unlock(&i915->drm.struct_mutex);
3922
3923         /* Flush idle worker to disarm irq */
3924         drain_delayed_work(&i915->gt.idle_work);
3925
3926         return 0;
3927
3928 err_unlock:
3929         mutex_unlock(&i915->drm.struct_mutex);
3930         return err;
3931 }
3932
3933 static int
3934 i915_ring_missed_irq_get(void *data, u64 *val)
3935 {
3936         struct drm_i915_private *dev_priv = data;
3937
3938         *val = dev_priv->gpu_error.missed_irq_rings;
3939         return 0;
3940 }
3941
3942 static int
3943 i915_ring_missed_irq_set(void *data, u64 val)
3944 {
3945         struct drm_i915_private *i915 = data;
3946
3947         return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
3948 }
3949
3950 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
3951                         i915_ring_missed_irq_get, i915_ring_missed_irq_set,
3952                         "0x%08llx\n");
3953
3954 static int
3955 i915_ring_test_irq_get(void *data, u64 *val)
3956 {
3957         struct drm_i915_private *dev_priv = data;
3958
3959         *val = dev_priv->gpu_error.test_irq_rings;
3960
3961         return 0;
3962 }
3963
3964 static int
3965 i915_ring_test_irq_set(void *data, u64 val)
3966 {
3967         struct drm_i915_private *i915 = data;
3968
3969         /* GuC keeps the user interrupt permanently enabled for submission */
3970         if (USES_GUC_SUBMISSION(i915))
3971                 return -ENODEV;
3972
3973         /*
3974          * From icl, we can no longer individually mask interrupt generation
3975          * from each engine.
3976          */
3977         if (INTEL_GEN(i915) >= 11)
3978                 return -ENODEV;
3979
3980         val &= INTEL_INFO(i915)->ring_mask;
3981         DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
3982
3983         return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
3984 }
3985
3986 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
3987                         i915_ring_test_irq_get, i915_ring_test_irq_set,
3988                         "0x%08llx\n");
3989
3990 #define DROP_UNBOUND    BIT(0)
3991 #define DROP_BOUND      BIT(1)
3992 #define DROP_RETIRE     BIT(2)
3993 #define DROP_ACTIVE     BIT(3)
3994 #define DROP_FREED      BIT(4)
3995 #define DROP_SHRINK_ALL BIT(5)
3996 #define DROP_IDLE       BIT(6)
3997 #define DROP_RESET_ACTIVE       BIT(7)
3998 #define DROP_RESET_SEQNO        BIT(8)
3999 #define DROP_ALL (DROP_UNBOUND  | \
4000                   DROP_BOUND    | \
4001                   DROP_RETIRE   | \
4002                   DROP_ACTIVE   | \
4003                   DROP_FREED    | \
4004                   DROP_SHRINK_ALL |\
4005                   DROP_IDLE     | \
4006                   DROP_RESET_ACTIVE | \
4007                   DROP_RESET_SEQNO)
4008 static int
4009 i915_drop_caches_get(void *data, u64 *val)
4010 {
4011         *val = DROP_ALL;
4012
4013         return 0;
4014 }
4015
4016 static int
4017 i915_drop_caches_set(void *data, u64 val)
4018 {
4019         struct drm_i915_private *i915 = data;
4020         intel_wakeref_t wakeref;
4021         int ret = 0;
4022
4023         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4024                   val, val & DROP_ALL);
4025         wakeref = intel_runtime_pm_get(i915);
4026
4027         if (val & DROP_RESET_ACTIVE &&
4028             wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
4029                 i915_gem_set_wedged(i915);
4030
4031         /* No need to check and wait for gpu resets, only libdrm auto-restarts
4032          * on ioctls on -EAGAIN. */
4033         if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4034                 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
4035                 if (ret)
4036                         goto out;
4037
4038                 if (val & DROP_ACTIVE)
4039                         ret = i915_gem_wait_for_idle(i915,
4040                                                      I915_WAIT_INTERRUPTIBLE |
4041                                                      I915_WAIT_LOCKED,
4042                                                      MAX_SCHEDULE_TIMEOUT);
4043
4044                 if (val & DROP_RETIRE)
4045                         i915_retire_requests(i915);
4046
4047                 mutex_unlock(&i915->drm.struct_mutex);
4048         }
4049
4050         if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
4051                 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
4052
4053         fs_reclaim_acquire(GFP_KERNEL);
4054         if (val & DROP_BOUND)
4055                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4056
4057         if (val & DROP_UNBOUND)
4058                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4059
4060         if (val & DROP_SHRINK_ALL)
4061                 i915_gem_shrink_all(i915);
4062         fs_reclaim_release(GFP_KERNEL);
4063
4064         if (val & DROP_IDLE) {
4065                 do {
4066                         if (READ_ONCE(i915->gt.active_requests))
4067                                 flush_delayed_work(&i915->gt.retire_work);
4068                         drain_delayed_work(&i915->gt.idle_work);
4069                 } while (READ_ONCE(i915->gt.awake));
4070         }
4071
4072         if (val & DROP_FREED)
4073                 i915_gem_drain_freed_objects(i915);
4074
4075 out:
4076         intel_runtime_pm_put(i915, wakeref);
4077
4078         return ret;
4079 }
4080
4081 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4082                         i915_drop_caches_get, i915_drop_caches_set,
4083                         "0x%08llx\n");
4084
4085 static int
4086 i915_cache_sharing_get(void *data, u64 *val)
4087 {
4088         struct drm_i915_private *dev_priv = data;
4089         intel_wakeref_t wakeref;
4090         u32 snpcr = 0;
4091
4092         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
4093                 return -ENODEV;
4094
4095         with_intel_runtime_pm(dev_priv, wakeref)
4096                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4097
4098         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4099
4100         return 0;
4101 }
4102
4103 static int
4104 i915_cache_sharing_set(void *data, u64 val)
4105 {
4106         struct drm_i915_private *dev_priv = data;
4107         intel_wakeref_t wakeref;
4108
4109         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
4110                 return -ENODEV;
4111
4112         if (val > 3)
4113                 return -EINVAL;
4114
4115         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4116         with_intel_runtime_pm(dev_priv, wakeref) {
4117                 u32 snpcr;
4118
4119                 /* Update the cache sharing policy here as well */
4120                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4121                 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4122                 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
4123                 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4124         }
4125
4126         return 0;
4127 }
4128
4129 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4130                         i915_cache_sharing_get, i915_cache_sharing_set,
4131                         "%llu\n");
4132
4133 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4134                                           struct sseu_dev_info *sseu)
4135 {
4136 #define SS_MAX 2
4137         const int ss_max = SS_MAX;
4138         u32 sig1[SS_MAX], sig2[SS_MAX];
4139         int ss;
4140
4141         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4142         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4143         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4144         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4145
4146         for (ss = 0; ss < ss_max; ss++) {
4147                 unsigned int eu_cnt;
4148
4149                 if (sig1[ss] & CHV_SS_PG_ENABLE)
4150                         /* skip disabled subslice */
4151                         continue;
4152
4153                 sseu->slice_mask = BIT(0);
4154                 sseu->subslice_mask[0] |= BIT(ss);
4155                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4156                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4157                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4158                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4159                 sseu->eu_total += eu_cnt;
4160                 sseu->eu_per_subslice = max_t(unsigned int,
4161                                               sseu->eu_per_subslice, eu_cnt);
4162         }
4163 #undef SS_MAX
4164 }
4165
4166 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4167                                      struct sseu_dev_info *sseu)
4168 {
4169 #define SS_MAX 6
4170         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4171         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4172         int s, ss;
4173
4174         for (s = 0; s < info->sseu.max_slices; s++) {
4175                 /*
4176                  * FIXME: Valid SS Mask respects the spec and read
4177                  * only valid bits for those registers, excluding reserved
4178                  * although this seems wrong because it would leave many
4179                  * subslices without ACK.
4180                  */
4181                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4182                         GEN10_PGCTL_VALID_SS_MASK(s);
4183                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4184                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4185         }
4186
4187         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4188                      GEN9_PGCTL_SSA_EU19_ACK |
4189                      GEN9_PGCTL_SSA_EU210_ACK |
4190                      GEN9_PGCTL_SSA_EU311_ACK;
4191         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4192                      GEN9_PGCTL_SSB_EU19_ACK |
4193                      GEN9_PGCTL_SSB_EU210_ACK |
4194                      GEN9_PGCTL_SSB_EU311_ACK;
4195
4196         for (s = 0; s < info->sseu.max_slices; s++) {
4197                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4198                         /* skip disabled slice */
4199                         continue;
4200
4201                 sseu->slice_mask |= BIT(s);
4202                 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4203
4204                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4205                         unsigned int eu_cnt;
4206
4207                         if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4208                                 /* skip disabled subslice */
4209                                 continue;
4210
4211                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4212                                                eu_mask[ss % 2]);
4213                         sseu->eu_total += eu_cnt;
4214                         sseu->eu_per_subslice = max_t(unsigned int,
4215                                                       sseu->eu_per_subslice,
4216                                                       eu_cnt);
4217                 }
4218         }
4219 #undef SS_MAX
4220 }
4221
4222 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4223                                     struct sseu_dev_info *sseu)
4224 {
4225 #define SS_MAX 3
4226         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4227         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4228         int s, ss;
4229
4230         for (s = 0; s < info->sseu.max_slices; s++) {
4231                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4232                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4233                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4234         }
4235
4236         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4237                      GEN9_PGCTL_SSA_EU19_ACK |
4238                      GEN9_PGCTL_SSA_EU210_ACK |
4239                      GEN9_PGCTL_SSA_EU311_ACK;
4240         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4241                      GEN9_PGCTL_SSB_EU19_ACK |
4242                      GEN9_PGCTL_SSB_EU210_ACK |
4243                      GEN9_PGCTL_SSB_EU311_ACK;
4244
4245         for (s = 0; s < info->sseu.max_slices; s++) {
4246                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4247                         /* skip disabled slice */
4248                         continue;
4249
4250                 sseu->slice_mask |= BIT(s);
4251
4252                 if (IS_GEN9_BC(dev_priv))
4253                         sseu->subslice_mask[s] =
4254                                 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4255
4256                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4257                         unsigned int eu_cnt;
4258
4259                         if (IS_GEN9_LP(dev_priv)) {
4260                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4261                                         /* skip disabled subslice */
4262                                         continue;
4263
4264                                 sseu->subslice_mask[s] |= BIT(ss);
4265                         }
4266
4267                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4268                                                eu_mask[ss%2]);
4269                         sseu->eu_total += eu_cnt;
4270                         sseu->eu_per_subslice = max_t(unsigned int,
4271                                                       sseu->eu_per_subslice,
4272                                                       eu_cnt);
4273                 }
4274         }
4275 #undef SS_MAX
4276 }
4277
4278 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4279                                          struct sseu_dev_info *sseu)
4280 {
4281         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4282         int s;
4283
4284         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4285
4286         if (sseu->slice_mask) {
4287                 sseu->eu_per_subslice =
4288                         RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
4289                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4290                         sseu->subslice_mask[s] =
4291                                 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4292                 }
4293                 sseu->eu_total = sseu->eu_per_subslice *
4294                                  sseu_subslice_total(sseu);
4295
4296                 /* subtract fused off EU(s) from enabled slice(s) */
4297                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4298                         u8 subslice_7eu =
4299                                 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
4300
4301                         sseu->eu_total -= hweight8(subslice_7eu);
4302                 }
4303         }
4304 }
4305
4306 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4307                                  const struct sseu_dev_info *sseu)
4308 {
4309         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4310         const char *type = is_available_info ? "Available" : "Enabled";
4311         int s;
4312
4313         seq_printf(m, "  %s Slice Mask: %04x\n", type,
4314                    sseu->slice_mask);
4315         seq_printf(m, "  %s Slice Total: %u\n", type,
4316                    hweight8(sseu->slice_mask));
4317         seq_printf(m, "  %s Subslice Total: %u\n", type,
4318                    sseu_subslice_total(sseu));
4319         for (s = 0; s < fls(sseu->slice_mask); s++) {
4320                 seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4321                            s, hweight8(sseu->subslice_mask[s]));
4322         }
4323         seq_printf(m, "  %s EU Total: %u\n", type,
4324                    sseu->eu_total);
4325         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4326                    sseu->eu_per_subslice);
4327
4328         if (!is_available_info)
4329                 return;
4330
4331         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4332         if (HAS_POOLED_EU(dev_priv))
4333                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4334
4335         seq_printf(m, "  Has Slice Power Gating: %s\n",
4336                    yesno(sseu->has_slice_pg));
4337         seq_printf(m, "  Has Subslice Power Gating: %s\n",
4338                    yesno(sseu->has_subslice_pg));
4339         seq_printf(m, "  Has EU Power Gating: %s\n",
4340                    yesno(sseu->has_eu_pg));
4341 }
4342
4343 static int i915_sseu_status(struct seq_file *m, void *unused)
4344 {
4345         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4346         struct sseu_dev_info sseu;
4347         intel_wakeref_t wakeref;
4348
4349         if (INTEL_GEN(dev_priv) < 8)
4350                 return -ENODEV;
4351
4352         seq_puts(m, "SSEU Device Info\n");
4353         i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
4354
4355         seq_puts(m, "SSEU Device Status\n");
4356         memset(&sseu, 0, sizeof(sseu));
4357         sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4358         sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
4359         sseu.max_eus_per_subslice =
4360                 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
4361
4362         with_intel_runtime_pm(dev_priv, wakeref) {
4363                 if (IS_CHERRYVIEW(dev_priv))
4364                         cherryview_sseu_device_status(dev_priv, &sseu);
4365                 else if (IS_BROADWELL(dev_priv))
4366                         broadwell_sseu_device_status(dev_priv, &sseu);
4367                 else if (IS_GEN(dev_priv, 9))
4368                         gen9_sseu_device_status(dev_priv, &sseu);
4369                 else if (INTEL_GEN(dev_priv) >= 10)
4370                         gen10_sseu_device_status(dev_priv, &sseu);
4371         }
4372
4373         i915_print_sseu_info(m, false, &sseu);
4374
4375         return 0;
4376 }
4377
4378 static int i915_forcewake_open(struct inode *inode, struct file *file)
4379 {
4380         struct drm_i915_private *i915 = inode->i_private;
4381
4382         if (INTEL_GEN(i915) < 6)
4383                 return 0;
4384
4385         file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
4386         intel_uncore_forcewake_user_get(i915);
4387
4388         return 0;
4389 }
4390
4391 static int i915_forcewake_release(struct inode *inode, struct file *file)
4392 {
4393         struct drm_i915_private *i915 = inode->i_private;
4394
4395         if (INTEL_GEN(i915) < 6)
4396                 return 0;
4397
4398         intel_uncore_forcewake_user_put(i915);
4399         intel_runtime_pm_put(i915,
4400                              (intel_wakeref_t)(uintptr_t)file->private_data);
4401
4402         return 0;
4403 }
4404
4405 static const struct file_operations i915_forcewake_fops = {
4406         .owner = THIS_MODULE,
4407         .open = i915_forcewake_open,
4408         .release = i915_forcewake_release,
4409 };
4410
4411 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4412 {
4413         struct drm_i915_private *dev_priv = m->private;
4414         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4415
4416         /* Synchronize with everything first in case there's been an HPD
4417          * storm, but we haven't finished handling it in the kernel yet
4418          */
4419         synchronize_irq(dev_priv->drm.irq);
4420         flush_work(&dev_priv->hotplug.dig_port_work);
4421         flush_work(&dev_priv->hotplug.hotplug_work);
4422
4423         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4424         seq_printf(m, "Detected: %s\n",
4425                    yesno(delayed_work_pending(&hotplug->reenable_work)));
4426
4427         return 0;
4428 }
4429
4430 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4431                                         const char __user *ubuf, size_t len,
4432                                         loff_t *offp)
4433 {
4434         struct seq_file *m = file->private_data;
4435         struct drm_i915_private *dev_priv = m->private;
4436         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4437         unsigned int new_threshold;
4438         int i;
4439         char *newline;
4440         char tmp[16];
4441
4442         if (len >= sizeof(tmp))
4443                 return -EINVAL;
4444
4445         if (copy_from_user(tmp, ubuf, len))
4446                 return -EFAULT;
4447
4448         tmp[len] = '\0';
4449
4450         /* Strip newline, if any */
4451         newline = strchr(tmp, '\n');
4452         if (newline)
4453                 *newline = '\0';
4454
4455         if (strcmp(tmp, "reset") == 0)
4456                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4457         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4458                 return -EINVAL;
4459
4460         if (new_threshold > 0)
4461                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4462                               new_threshold);
4463         else
4464                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4465
4466         spin_lock_irq(&dev_priv->irq_lock);
4467         hotplug->hpd_storm_threshold = new_threshold;
4468         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4469         for_each_hpd_pin(i)
4470                 hotplug->stats[i].count = 0;
4471         spin_unlock_irq(&dev_priv->irq_lock);
4472
4473         /* Re-enable hpd immediately if we were in an irq storm */
4474         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4475
4476         return len;
4477 }
4478
4479 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4480 {
4481         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4482 }
4483
4484 static const struct file_operations i915_hpd_storm_ctl_fops = {
4485         .owner = THIS_MODULE,
4486         .open = i915_hpd_storm_ctl_open,
4487         .read = seq_read,
4488         .llseek = seq_lseek,
4489         .release = single_release,
4490         .write = i915_hpd_storm_ctl_write
4491 };
4492
4493 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4494 {
4495         struct drm_i915_private *dev_priv = m->private;
4496
4497         seq_printf(m, "Enabled: %s\n",
4498                    yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4499
4500         return 0;
4501 }
4502
4503 static int
4504 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4505 {
4506         return single_open(file, i915_hpd_short_storm_ctl_show,
4507                            inode->i_private);
4508 }
4509
4510 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4511                                               const char __user *ubuf,
4512                                               size_t len, loff_t *offp)
4513 {
4514         struct seq_file *m = file->private_data;
4515         struct drm_i915_private *dev_priv = m->private;
4516         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4517         char *newline;
4518         char tmp[16];
4519         int i;
4520         bool new_state;
4521
4522         if (len >= sizeof(tmp))
4523                 return -EINVAL;
4524
4525         if (copy_from_user(tmp, ubuf, len))
4526                 return -EFAULT;
4527
4528         tmp[len] = '\0';
4529
4530         /* Strip newline, if any */
4531         newline = strchr(tmp, '\n');
4532         if (newline)
4533                 *newline = '\0';
4534
4535         /* Reset to the "default" state for this system */
4536         if (strcmp(tmp, "reset") == 0)
4537                 new_state = !HAS_DP_MST(dev_priv);
4538         else if (kstrtobool(tmp, &new_state) != 0)
4539                 return -EINVAL;
4540
4541         DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4542                       new_state ? "En" : "Dis");
4543
4544         spin_lock_irq(&dev_priv->irq_lock);
4545         hotplug->hpd_short_storm_enabled = new_state;
4546         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4547         for_each_hpd_pin(i)
4548                 hotplug->stats[i].count = 0;
4549         spin_unlock_irq(&dev_priv->irq_lock);
4550
4551         /* Re-enable hpd immediately if we were in an irq storm */
4552         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4553
4554         return len;
4555 }
4556
4557 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4558         .owner = THIS_MODULE,
4559         .open = i915_hpd_short_storm_ctl_open,
4560         .read = seq_read,
4561         .llseek = seq_lseek,
4562         .release = single_release,
4563         .write = i915_hpd_short_storm_ctl_write,
4564 };
4565
4566 static int i915_drrs_ctl_set(void *data, u64 val)
4567 {
4568         struct drm_i915_private *dev_priv = data;
4569         struct drm_device *dev = &dev_priv->drm;
4570         struct intel_crtc *crtc;
4571
4572         if (INTEL_GEN(dev_priv) < 7)
4573                 return -ENODEV;
4574
4575         for_each_intel_crtc(dev, crtc) {
4576                 struct drm_connector_list_iter conn_iter;
4577                 struct intel_crtc_state *crtc_state;
4578                 struct drm_connector *connector;
4579                 struct drm_crtc_commit *commit;
4580                 int ret;
4581
4582                 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4583                 if (ret)
4584                         return ret;
4585
4586                 crtc_state = to_intel_crtc_state(crtc->base.state);
4587
4588                 if (!crtc_state->base.active ||
4589                     !crtc_state->has_drrs)
4590                         goto out;
4591
4592                 commit = crtc_state->base.commit;
4593                 if (commit) {
4594                         ret = wait_for_completion_interruptible(&commit->hw_done);
4595                         if (ret)
4596                                 goto out;
4597                 }
4598
4599                 drm_connector_list_iter_begin(dev, &conn_iter);
4600                 drm_for_each_connector_iter(connector, &conn_iter) {
4601                         struct intel_encoder *encoder;
4602                         struct intel_dp *intel_dp;
4603
4604                         if (!(crtc_state->base.connector_mask &
4605                               drm_connector_mask(connector)))
4606                                 continue;
4607
4608                         encoder = intel_attached_encoder(connector);
4609                         if (encoder->type != INTEL_OUTPUT_EDP)
4610                                 continue;
4611
4612                         DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4613                                                 val ? "en" : "dis", val);
4614
4615                         intel_dp = enc_to_intel_dp(&encoder->base);
4616                         if (val)
4617                                 intel_edp_drrs_enable(intel_dp,
4618                                                       crtc_state);
4619                         else
4620                                 intel_edp_drrs_disable(intel_dp,
4621                                                        crtc_state);
4622                 }
4623                 drm_connector_list_iter_end(&conn_iter);
4624
4625 out:
4626                 drm_modeset_unlock(&crtc->base.mutex);
4627                 if (ret)
4628                         return ret;
4629         }
4630
4631         return 0;
4632 }
4633
4634 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4635
4636 static ssize_t
4637 i915_fifo_underrun_reset_write(struct file *filp,
4638                                const char __user *ubuf,
4639                                size_t cnt, loff_t *ppos)
4640 {
4641         struct drm_i915_private *dev_priv = filp->private_data;
4642         struct intel_crtc *intel_crtc;
4643         struct drm_device *dev = &dev_priv->drm;
4644         int ret;
4645         bool reset;
4646
4647         ret = kstrtobool_from_user(ubuf, cnt, &reset);
4648         if (ret)
4649                 return ret;
4650
4651         if (!reset)
4652                 return cnt;
4653
4654         for_each_intel_crtc(dev, intel_crtc) {
4655                 struct drm_crtc_commit *commit;
4656                 struct intel_crtc_state *crtc_state;
4657
4658                 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4659                 if (ret)
4660                         return ret;
4661
4662                 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4663                 commit = crtc_state->base.commit;
4664                 if (commit) {
4665                         ret = wait_for_completion_interruptible(&commit->hw_done);
4666                         if (!ret)
4667                                 ret = wait_for_completion_interruptible(&commit->flip_done);
4668                 }
4669
4670                 if (!ret && crtc_state->base.active) {
4671                         DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4672                                       pipe_name(intel_crtc->pipe));
4673
4674                         intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4675                 }
4676
4677                 drm_modeset_unlock(&intel_crtc->base.mutex);
4678
4679                 if (ret)
4680                         return ret;
4681         }
4682
4683         ret = intel_fbc_reset_underrun(dev_priv);
4684         if (ret)
4685                 return ret;
4686
4687         return cnt;
4688 }
4689
4690 static const struct file_operations i915_fifo_underrun_reset_ops = {
4691         .owner = THIS_MODULE,
4692         .open = simple_open,
4693         .write = i915_fifo_underrun_reset_write,
4694         .llseek = default_llseek,
4695 };
4696
4697 static const struct drm_info_list i915_debugfs_list[] = {
4698         {"i915_capabilities", i915_capabilities, 0},
4699         {"i915_gem_objects", i915_gem_object_info, 0},
4700         {"i915_gem_gtt", i915_gem_gtt_info, 0},
4701         {"i915_gem_stolen", i915_gem_stolen_list_info },
4702         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4703         {"i915_gem_interrupt", i915_interrupt_info, 0},
4704         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4705         {"i915_guc_info", i915_guc_info, 0},
4706         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4707         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4708         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4709         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4710         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4711         {"i915_frequency_info", i915_frequency_info, 0},
4712         {"i915_hangcheck_info", i915_hangcheck_info, 0},
4713         {"i915_reset_info", i915_reset_info, 0},
4714         {"i915_drpc_info", i915_drpc_info, 0},
4715         {"i915_emon_status", i915_emon_status, 0},
4716         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4717         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4718         {"i915_fbc_status", i915_fbc_status, 0},
4719         {"i915_ips_status", i915_ips_status, 0},
4720         {"i915_sr_status", i915_sr_status, 0},
4721         {"i915_opregion", i915_opregion, 0},
4722         {"i915_vbt", i915_vbt, 0},
4723         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4724         {"i915_context_status", i915_context_status, 0},
4725         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4726         {"i915_swizzle_info", i915_swizzle_info, 0},
4727         {"i915_llc", i915_llc, 0},
4728         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4729         {"i915_energy_uJ", i915_energy_uJ, 0},
4730         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4731         {"i915_power_domain_info", i915_power_domain_info, 0},
4732         {"i915_dmc_info", i915_dmc_info, 0},
4733         {"i915_display_info", i915_display_info, 0},
4734         {"i915_engine_info", i915_engine_info, 0},
4735         {"i915_rcs_topology", i915_rcs_topology, 0},
4736         {"i915_shrinker_info", i915_shrinker_info, 0},
4737         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4738         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4739         {"i915_wa_registers", i915_wa_registers, 0},
4740         {"i915_ddb_info", i915_ddb_info, 0},
4741         {"i915_sseu_status", i915_sseu_status, 0},
4742         {"i915_drrs_status", i915_drrs_status, 0},
4743         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4744 };
4745 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4746
4747 static const struct i915_debugfs_files {
4748         const char *name;
4749         const struct file_operations *fops;
4750 } i915_debugfs_files[] = {
4751         {"i915_wedged", &i915_wedged_fops},
4752         {"i915_cache_sharing", &i915_cache_sharing_fops},
4753         {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4754         {"i915_ring_test_irq", &i915_ring_test_irq_fops},
4755         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4756 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4757         {"i915_error_state", &i915_error_state_fops},
4758         {"i915_gpu_info", &i915_gpu_info_fops},
4759 #endif
4760         {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4761         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4762         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4763         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4764         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4765         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4766         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4767         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4768         {"i915_guc_log_level", &i915_guc_log_level_fops},
4769         {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4770         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4771         {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4772         {"i915_ipc_status", &i915_ipc_status_fops},
4773         {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4774         {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4775 };
4776
4777 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4778 {
4779         struct drm_minor *minor = dev_priv->drm.primary;
4780         struct dentry *ent;
4781         int i;
4782
4783         ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4784                                   minor->debugfs_root, to_i915(minor->dev),
4785                                   &i915_forcewake_fops);
4786         if (!ent)
4787                 return -ENOMEM;
4788
4789         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4790                 ent = debugfs_create_file(i915_debugfs_files[i].name,
4791                                           S_IRUGO | S_IWUSR,
4792                                           minor->debugfs_root,
4793                                           to_i915(minor->dev),
4794                                           i915_debugfs_files[i].fops);
4795                 if (!ent)
4796                         return -ENOMEM;
4797         }
4798
4799         return drm_debugfs_create_files(i915_debugfs_list,
4800                                         I915_DEBUGFS_ENTRIES,
4801                                         minor->debugfs_root, minor);
4802 }
4803
4804 struct dpcd_block {
4805         /* DPCD dump start address. */
4806         unsigned int offset;
4807         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4808         unsigned int end;
4809         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4810         size_t size;
4811         /* Only valid for eDP. */
4812         bool edp;
4813 };
4814
4815 static const struct dpcd_block i915_dpcd_debug[] = {
4816         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4817         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4818         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4819         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4820         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4821         { .offset = DP_SET_POWER },
4822         { .offset = DP_EDP_DPCD_REV },
4823         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4824         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4825         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4826 };
4827
4828 static int i915_dpcd_show(struct seq_file *m, void *data)
4829 {
4830         struct drm_connector *connector = m->private;
4831         struct intel_dp *intel_dp =
4832                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4833         u8 buf[16];
4834         ssize_t err;
4835         int i;
4836
4837         if (connector->status != connector_status_connected)
4838                 return -ENODEV;
4839
4840         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4841                 const struct dpcd_block *b = &i915_dpcd_debug[i];
4842                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4843
4844                 if (b->edp &&
4845                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4846                         continue;
4847
4848                 /* low tech for now */
4849                 if (WARN_ON(size > sizeof(buf)))
4850                         continue;
4851
4852                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4853                 if (err < 0)
4854                         seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4855                 else
4856                         seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4857         }
4858
4859         return 0;
4860 }
4861 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4862
4863 static int i915_panel_show(struct seq_file *m, void *data)
4864 {
4865         struct drm_connector *connector = m->private;
4866         struct intel_dp *intel_dp =
4867                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4868
4869         if (connector->status != connector_status_connected)
4870                 return -ENODEV;
4871
4872         seq_printf(m, "Panel power up delay: %d\n",
4873                    intel_dp->panel_power_up_delay);
4874         seq_printf(m, "Panel power down delay: %d\n",
4875                    intel_dp->panel_power_down_delay);
4876         seq_printf(m, "Backlight on delay: %d\n",
4877                    intel_dp->backlight_on_delay);
4878         seq_printf(m, "Backlight off delay: %d\n",
4879                    intel_dp->backlight_off_delay);
4880
4881         return 0;
4882 }
4883 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4884
4885 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4886 {
4887         struct drm_connector *connector = m->private;
4888         struct intel_connector *intel_connector = to_intel_connector(connector);
4889
4890         if (connector->status != connector_status_connected)
4891                 return -ENODEV;
4892
4893         /* HDCP is supported by connector */
4894         if (!intel_connector->hdcp.shim)
4895                 return -EINVAL;
4896
4897         seq_printf(m, "%s:%d HDCP version: ", connector->name,
4898                    connector->base.id);
4899         seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4900                    "None" : "HDCP1.4");
4901         seq_puts(m, "\n");
4902
4903         return 0;
4904 }
4905 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4906
4907 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4908 {
4909         struct drm_connector *connector = m->private;
4910         struct drm_device *dev = connector->dev;
4911         struct drm_crtc *crtc;
4912         struct intel_dp *intel_dp;
4913         struct drm_modeset_acquire_ctx ctx;
4914         struct intel_crtc_state *crtc_state = NULL;
4915         int ret = 0;
4916         bool try_again = false;
4917
4918         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4919
4920         do {
4921                 try_again = false;
4922                 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4923                                        &ctx);
4924                 if (ret) {
4925                         ret = -EINTR;
4926                         break;
4927                 }
4928                 crtc = connector->state->crtc;
4929                 if (connector->status != connector_status_connected || !crtc) {
4930                         ret = -ENODEV;
4931                         break;
4932                 }
4933                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4934                 if (ret == -EDEADLK) {
4935                         ret = drm_modeset_backoff(&ctx);
4936                         if (!ret) {
4937                                 try_again = true;
4938                                 continue;
4939                         }
4940                         break;
4941                 } else if (ret) {
4942                         break;
4943                 }
4944                 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4945                 crtc_state = to_intel_crtc_state(crtc->state);
4946                 seq_printf(m, "DSC_Enabled: %s\n",
4947                            yesno(crtc_state->dsc_params.compression_enable));
4948                 seq_printf(m, "DSC_Sink_Support: %s\n",
4949                            yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4950                 if (!intel_dp_is_edp(intel_dp))
4951                         seq_printf(m, "FEC_Sink_Support: %s\n",
4952                                    yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4953         } while (try_again);
4954
4955         drm_modeset_drop_locks(&ctx);
4956         drm_modeset_acquire_fini(&ctx);
4957
4958         return ret;
4959 }
4960
4961 static ssize_t i915_dsc_fec_support_write(struct file *file,
4962                                           const char __user *ubuf,
4963                                           size_t len, loff_t *offp)
4964 {
4965         bool dsc_enable = false;
4966         int ret;
4967         struct drm_connector *connector =
4968                 ((struct seq_file *)file->private_data)->private;
4969         struct intel_encoder *encoder = intel_attached_encoder(connector);
4970         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4971
4972         if (len == 0)
4973                 return 0;
4974
4975         DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4976                          len);
4977
4978         ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4979         if (ret < 0)
4980                 return ret;
4981
4982         DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4983                          (dsc_enable) ? "true" : "false");
4984         intel_dp->force_dsc_en = dsc_enable;
4985
4986         *offp += len;
4987         return len;
4988 }
4989
4990 static int i915_dsc_fec_support_open(struct inode *inode,
4991                                      struct file *file)
4992 {
4993         return single_open(file, i915_dsc_fec_support_show,
4994                            inode->i_private);
4995 }
4996
4997 static const struct file_operations i915_dsc_fec_support_fops = {
4998         .owner = THIS_MODULE,
4999         .open = i915_dsc_fec_support_open,
5000         .read = seq_read,
5001         .llseek = seq_lseek,
5002         .release = single_release,
5003         .write = i915_dsc_fec_support_write
5004 };
5005
5006 /**
5007  * i915_debugfs_connector_add - add i915 specific connector debugfs files
5008  * @connector: pointer to a registered drm_connector
5009  *
5010  * Cleanup will be done by drm_connector_unregister() through a call to
5011  * drm_debugfs_connector_remove().
5012  *
5013  * Returns 0 on success, negative error codes on error.
5014  */
5015 int i915_debugfs_connector_add(struct drm_connector *connector)
5016 {
5017         struct dentry *root = connector->debugfs_entry;
5018         struct drm_i915_private *dev_priv = to_i915(connector->dev);
5019
5020         /* The connector must have been registered beforehands. */
5021         if (!root)
5022                 return -ENODEV;
5023
5024         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5025             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5026                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
5027                                     connector, &i915_dpcd_fops);
5028
5029         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
5030                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5031                                     connector, &i915_panel_fops);
5032                 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
5033                                     connector, &i915_psr_sink_status_fops);
5034         }
5035
5036         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5037             connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5038             connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5039                 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5040                                     connector, &i915_hdcp_sink_capability_fops);
5041         }
5042
5043         if (INTEL_GEN(dev_priv) >= 10 &&
5044             (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5045              connector->connector_type == DRM_MODE_CONNECTOR_eDP))
5046                 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
5047                                     connector, &i915_dsc_fec_support_fops);
5048
5049         return 0;
5050 }