drm/i915: Replace global breadcrumbs with per-context interrupt tracking
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_debugfs.c
CommitLineData
2017263e
BG
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
f3cd474b 29#include <linux/debugfs.h>
e637d2cb 30#include <linux/sort.h>
d92a8cfc 31#include <linux/sched/mm.h>
4e5359cd 32#include "intel_drv.h"
a2695744 33#include "intel_guc_submission.h"
2017263e 34
9f58892e
CW
35#include "i915_reset.h"
36
36cdd013
DW
37static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
38{
39 return to_i915(node->minor->dev);
40}
41
70d39fe4
CW
42static int i915_capabilities(struct seq_file *m, void *data)
43{
36cdd013
DW
44 struct drm_i915_private *dev_priv = node_to_i915(m->private);
45 const struct intel_device_info *info = INTEL_INFO(dev_priv);
a8c9b849 46 struct drm_printer p = drm_seq_file_printer(m);
70d39fe4 47
36cdd013 48 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
2e0d26f8 49 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
36cdd013 50 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
418e3cd8 51
a8c9b849 52 intel_device_info_dump_flags(info, &p);
0258404f 53 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
3fed1808 54 intel_driver_caps_print(&dev_priv->caps, &p);
70d39fe4 55
418e3cd8 56 kernel_param_lock(THIS_MODULE);
acfb9973 57 i915_params_dump(&i915_modparams, &p);
418e3cd8
CW
58 kernel_param_unlock(THIS_MODULE);
59
70d39fe4
CW
60 return 0;
61}
2017263e 62
a7363de7 63static char get_active_flag(struct drm_i915_gem_object *obj)
a6172a80 64{
573adb39 65 return i915_gem_object_is_active(obj) ? '*' : ' ';
a6172a80
CW
66}
67
a7363de7 68static char get_pin_flag(struct drm_i915_gem_object *obj)
be12a86b 69{
bd3d2252 70 return obj->pin_global ? 'p' : ' ';
be12a86b
TU
71}
72
a7363de7 73static char get_tiling_flag(struct drm_i915_gem_object *obj)
a6172a80 74{
3e510a8e 75 switch (i915_gem_object_get_tiling(obj)) {
0206e353 76 default:
be12a86b
TU
77 case I915_TILING_NONE: return ' ';
78 case I915_TILING_X: return 'X';
79 case I915_TILING_Y: return 'Y';
0206e353 80 }
a6172a80
CW
81}
82
a7363de7 83static char get_global_flag(struct drm_i915_gem_object *obj)
be12a86b 84{
a65adaf8 85 return obj->userfault_count ? 'g' : ' ';
be12a86b
TU
86}
87
a7363de7 88static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
1d693bcc 89{
a4f5ea64 90 return obj->mm.mapping ? 'M' : ' ';
1d693bcc
BW
91}
92
ca1543be
TU
93static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
94{
95 u64 size = 0;
96 struct i915_vma *vma;
97
e2189dd0
CW
98 for_each_ggtt_vma(vma, obj) {
99 if (drm_mm_node_allocated(&vma->node))
ca1543be
TU
100 size += vma->node.size;
101 }
102
103 return size;
104}
105
7393b7ee
MA
106static const char *
107stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
108{
109 size_t x = 0;
110
111 switch (page_sizes) {
112 case 0:
113 return "";
114 case I915_GTT_PAGE_SIZE_4K:
115 return "4K";
116 case I915_GTT_PAGE_SIZE_64K:
117 return "64K";
118 case I915_GTT_PAGE_SIZE_2M:
119 return "2M";
120 default:
121 if (!buf)
122 return "M";
123
124 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
125 x += snprintf(buf + x, len - x, "2M, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
127 x += snprintf(buf + x, len - x, "64K, ");
128 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
129 x += snprintf(buf + x, len - x, "4K, ");
130 buf[x-2] = '\0';
131
132 return buf;
133 }
134}
135
37811fcc
CW
136static void
137describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
138{
b4716185 139 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
e2f80391 140 struct intel_engine_cs *engine;
1d693bcc 141 struct i915_vma *vma;
faf5bf0a 142 unsigned int frontbuffer_bits;
d7f46fc4
BW
143 int pin_count = 0;
144
188c1ab7
CW
145 lockdep_assert_held(&obj->base.dev->struct_mutex);
146
d07f0e59 147 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
37811fcc 148 &obj->base,
be12a86b 149 get_active_flag(obj),
37811fcc
CW
150 get_pin_flag(obj),
151 get_tiling_flag(obj),
1d693bcc 152 get_global_flag(obj),
be12a86b 153 get_pin_mapped_flag(obj),
a05a5862 154 obj->base.size / 1024,
c0a51fd0
CK
155 obj->read_domains,
156 obj->write_domain,
36cdd013 157 i915_cache_level_str(dev_priv, obj->cache_level),
a4f5ea64
CW
158 obj->mm.dirty ? " dirty" : "",
159 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
37811fcc
CW
160 if (obj->base.name)
161 seq_printf(m, " (name: %d)", obj->base.name);
528cbd17 162 list_for_each_entry(vma, &obj->vma.list, obj_link) {
20dfbde4 163 if (i915_vma_is_pinned(vma))
d7f46fc4 164 pin_count++;
ba0635ff
DC
165 }
166 seq_printf(m, " (pinned x %d)", pin_count);
bd3d2252
CW
167 if (obj->pin_global)
168 seq_printf(m, " (global)");
528cbd17 169 list_for_each_entry(vma, &obj->vma.list, obj_link) {
15717de2
CW
170 if (!drm_mm_node_allocated(&vma->node))
171 continue;
172
7393b7ee 173 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
3272db53 174 i915_vma_is_ggtt(vma) ? "g" : "pp",
7393b7ee
MA
175 vma->node.start, vma->node.size,
176 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
21976853
CW
177 if (i915_vma_is_ggtt(vma)) {
178 switch (vma->ggtt_view.type) {
179 case I915_GGTT_VIEW_NORMAL:
180 seq_puts(m, ", normal");
181 break;
182
183 case I915_GGTT_VIEW_PARTIAL:
184 seq_printf(m, ", partial [%08llx+%x]",
8bab1193
CW
185 vma->ggtt_view.partial.offset << PAGE_SHIFT,
186 vma->ggtt_view.partial.size << PAGE_SHIFT);
21976853
CW
187 break;
188
189 case I915_GGTT_VIEW_ROTATED:
190 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
8bab1193
CW
191 vma->ggtt_view.rotated.plane[0].width,
192 vma->ggtt_view.rotated.plane[0].height,
193 vma->ggtt_view.rotated.plane[0].stride,
194 vma->ggtt_view.rotated.plane[0].offset,
195 vma->ggtt_view.rotated.plane[1].width,
196 vma->ggtt_view.rotated.plane[1].height,
197 vma->ggtt_view.rotated.plane[1].stride,
198 vma->ggtt_view.rotated.plane[1].offset);
21976853
CW
199 break;
200
201 default:
202 MISSING_CASE(vma->ggtt_view.type);
203 break;
204 }
205 }
49ef5294
CW
206 if (vma->fence)
207 seq_printf(m, " , fence: %d%s",
208 vma->fence->id,
209 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
596c5923 210 seq_puts(m, ")");
1d693bcc 211 }
c1ad11fc 212 if (obj->stolen)
440fd528 213 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
27c01aae 214
d07f0e59 215 engine = i915_gem_object_last_write_engine(obj);
27c01aae
CW
216 if (engine)
217 seq_printf(m, " (%s)", engine->name);
218
faf5bf0a
CW
219 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
220 if (frontbuffer_bits)
221 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
37811fcc
CW
222}
223
e637d2cb 224static int obj_rank_by_stolen(const void *A, const void *B)
6d2b8885 225{
e637d2cb
CW
226 const struct drm_i915_gem_object *a =
227 *(const struct drm_i915_gem_object **)A;
228 const struct drm_i915_gem_object *b =
229 *(const struct drm_i915_gem_object **)B;
6d2b8885 230
2d05fa16
RV
231 if (a->stolen->start < b->stolen->start)
232 return -1;
233 if (a->stolen->start > b->stolen->start)
234 return 1;
235 return 0;
6d2b8885
CW
236}
237
238static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
239{
36cdd013
DW
240 struct drm_i915_private *dev_priv = node_to_i915(m->private);
241 struct drm_device *dev = &dev_priv->drm;
e637d2cb 242 struct drm_i915_gem_object **objects;
6d2b8885 243 struct drm_i915_gem_object *obj;
c44ef60e 244 u64 total_obj_size, total_gtt_size;
e637d2cb
CW
245 unsigned long total, count, n;
246 int ret;
247
248 total = READ_ONCE(dev_priv->mm.object_count);
2098105e 249 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
e637d2cb
CW
250 if (!objects)
251 return -ENOMEM;
6d2b8885
CW
252
253 ret = mutex_lock_interruptible(&dev->struct_mutex);
254 if (ret)
e637d2cb 255 goto out;
6d2b8885
CW
256
257 total_obj_size = total_gtt_size = count = 0;
f2123818
CW
258
259 spin_lock(&dev_priv->mm.obj_lock);
260 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
e637d2cb
CW
261 if (count == total)
262 break;
263
6d2b8885
CW
264 if (obj->stolen == NULL)
265 continue;
266
e637d2cb 267 objects[count++] = obj;
6d2b8885 268 total_obj_size += obj->base.size;
ca1543be 269 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
e637d2cb 270
6d2b8885 271 }
f2123818 272 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
e637d2cb
CW
273 if (count == total)
274 break;
275
6d2b8885
CW
276 if (obj->stolen == NULL)
277 continue;
278
e637d2cb 279 objects[count++] = obj;
6d2b8885 280 total_obj_size += obj->base.size;
6d2b8885 281 }
f2123818 282 spin_unlock(&dev_priv->mm.obj_lock);
e637d2cb
CW
283
284 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
285
6d2b8885 286 seq_puts(m, "Stolen:\n");
e637d2cb 287 for (n = 0; n < count; n++) {
6d2b8885 288 seq_puts(m, " ");
e637d2cb 289 describe_obj(m, objects[n]);
6d2b8885 290 seq_putc(m, '\n');
6d2b8885 291 }
e637d2cb 292 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
6d2b8885 293 count, total_obj_size, total_gtt_size);
e637d2cb
CW
294
295 mutex_unlock(&dev->struct_mutex);
296out:
2098105e 297 kvfree(objects);
e637d2cb 298 return ret;
6d2b8885
CW
299}
300
2db8e9d6 301struct file_stats {
f6e8aa38 302 struct i915_address_space *vm;
c44ef60e
MK
303 unsigned long count;
304 u64 total, unbound;
305 u64 global, shared;
306 u64 active, inactive;
f6e8aa38 307 u64 closed;
2db8e9d6
CW
308};
309
310static int per_file_stats(int id, void *ptr, void *data)
311{
312 struct drm_i915_gem_object *obj = ptr;
313 struct file_stats *stats = data;
6313c204 314 struct i915_vma *vma;
2db8e9d6 315
0caf81b5
CW
316 lockdep_assert_held(&obj->base.dev->struct_mutex);
317
2db8e9d6
CW
318 stats->count++;
319 stats->total += obj->base.size;
15717de2
CW
320 if (!obj->bind_count)
321 stats->unbound += obj->base.size;
c67a17e9
CW
322 if (obj->base.name || obj->base.dma_buf)
323 stats->shared += obj->base.size;
324
528cbd17 325 list_for_each_entry(vma, &obj->vma.list, obj_link) {
894eeecc
CW
326 if (!drm_mm_node_allocated(&vma->node))
327 continue;
6313c204 328
3272db53 329 if (i915_vma_is_ggtt(vma)) {
894eeecc
CW
330 stats->global += vma->node.size;
331 } else {
f6e8aa38 332 if (vma->vm != stats->vm)
6313c204 333 continue;
6313c204 334 }
894eeecc 335
b0decaf7 336 if (i915_vma_is_active(vma))
894eeecc
CW
337 stats->active += vma->node.size;
338 else
339 stats->inactive += vma->node.size;
f6e8aa38
CW
340
341 if (i915_vma_is_closed(vma))
342 stats->closed += vma->node.size;
2db8e9d6
CW
343 }
344
345 return 0;
346}
347
b0da1b79
CW
348#define print_file_stats(m, name, stats) do { \
349 if (stats.count) \
f6e8aa38 350 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
b0da1b79
CW
351 name, \
352 stats.count, \
353 stats.total, \
354 stats.active, \
355 stats.inactive, \
356 stats.global, \
357 stats.shared, \
f6e8aa38
CW
358 stats.unbound, \
359 stats.closed); \
b0da1b79 360} while (0)
493018dc
BV
361
362static void print_batch_pool_stats(struct seq_file *m,
363 struct drm_i915_private *dev_priv)
364{
365 struct drm_i915_gem_object *obj;
e2f80391 366 struct intel_engine_cs *engine;
f6e8aa38 367 struct file_stats stats = {};
3b3f1650 368 enum intel_engine_id id;
b4ac5afc 369 int j;
493018dc 370
3b3f1650 371 for_each_engine(engine, dev_priv, id) {
e2f80391 372 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744 373 list_for_each_entry(obj,
e2f80391 374 &engine->batch_pool.cache_list[j],
8d9d5744
CW
375 batch_pool_link)
376 per_file_stats(0, obj, &stats);
377 }
06fbca71 378 }
493018dc 379
b0da1b79 380 print_file_stats(m, "[k]batch pool", stats);
493018dc
BV
381}
382
f6e8aa38
CW
383static void print_context_stats(struct seq_file *m,
384 struct drm_i915_private *i915)
15da9565 385{
f6e8aa38
CW
386 struct file_stats kstats = {};
387 struct i915_gem_context *ctx;
ab82a063 388
f6e8aa38
CW
389 list_for_each_entry(ctx, &i915->contexts.list, link) {
390 struct intel_engine_cs *engine;
391 enum intel_engine_id id;
15da9565 392
f6e8aa38
CW
393 for_each_engine(engine, i915, id) {
394 struct intel_context *ce = to_intel_context(ctx, engine);
15da9565 395
f6e8aa38
CW
396 if (ce->state)
397 per_file_stats(0, ce->state->obj, &kstats);
398 if (ce->ring)
399 per_file_stats(0, ce->ring->vma->obj, &kstats);
400 }
15da9565 401
f6e8aa38
CW
402 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
403 struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
404 struct drm_file *file = ctx->file_priv->file;
405 struct task_struct *task;
406 char name[80];
15da9565 407
f6e8aa38
CW
408 spin_lock(&file->table_lock);
409 idr_for_each(&file->object_idr, per_file_stats, &stats);
410 spin_unlock(&file->table_lock);
15da9565 411
f6e8aa38
CW
412 rcu_read_lock();
413 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
414 snprintf(name, sizeof(name), "%s/%d",
415 task ? task->comm : "<unknown>",
416 ctx->user_handle);
417 rcu_read_unlock();
15da9565 418
f6e8aa38
CW
419 print_file_stats(m, name, stats);
420 }
15da9565 421 }
15da9565 422
f6e8aa38 423 print_file_stats(m, "[k]contexts", kstats);
15da9565
CW
424}
425
36cdd013 426static int i915_gem_object_info(struct seq_file *m, void *data)
73aa808f 427{
36cdd013
DW
428 struct drm_i915_private *dev_priv = node_to_i915(m->private);
429 struct drm_device *dev = &dev_priv->drm;
72e96d64 430 struct i915_ggtt *ggtt = &dev_priv->ggtt;
7393b7ee
MA
431 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
432 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
6299f992 433 struct drm_i915_gem_object *obj;
7393b7ee 434 unsigned int page_sizes = 0;
7393b7ee 435 char buf[80];
73aa808f
CW
436 int ret;
437
3ef7f228 438 seq_printf(m, "%u objects, %llu bytes\n",
6299f992
CW
439 dev_priv->mm.object_count,
440 dev_priv->mm.object_memory);
441
1544c42e
CW
442 size = count = 0;
443 mapped_size = mapped_count = 0;
444 purgeable_size = purgeable_count = 0;
7393b7ee 445 huge_size = huge_count = 0;
f2123818
CW
446
447 spin_lock(&dev_priv->mm.obj_lock);
448 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
2bd160a1
CW
449 size += obj->base.size;
450 ++count;
451
a4f5ea64 452 if (obj->mm.madv == I915_MADV_DONTNEED) {
2bd160a1
CW
453 purgeable_size += obj->base.size;
454 ++purgeable_count;
455 }
456
a4f5ea64 457 if (obj->mm.mapping) {
2bd160a1
CW
458 mapped_count++;
459 mapped_size += obj->base.size;
be19b10d 460 }
7393b7ee
MA
461
462 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
463 huge_count++;
464 huge_size += obj->base.size;
465 page_sizes |= obj->mm.page_sizes.sg;
466 }
b7abb714 467 }
c44ef60e 468 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
6c085a72 469
2bd160a1 470 size = count = dpy_size = dpy_count = 0;
f2123818 471 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
2bd160a1
CW
472 size += obj->base.size;
473 ++count;
474
bd3d2252 475 if (obj->pin_global) {
2bd160a1
CW
476 dpy_size += obj->base.size;
477 ++dpy_count;
6299f992 478 }
2bd160a1 479
a4f5ea64 480 if (obj->mm.madv == I915_MADV_DONTNEED) {
b7abb714
CW
481 purgeable_size += obj->base.size;
482 ++purgeable_count;
483 }
2bd160a1 484
a4f5ea64 485 if (obj->mm.mapping) {
2bd160a1
CW
486 mapped_count++;
487 mapped_size += obj->base.size;
be19b10d 488 }
7393b7ee
MA
489
490 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
491 huge_count++;
492 huge_size += obj->base.size;
493 page_sizes |= obj->mm.page_sizes.sg;
494 }
6299f992 495 }
f2123818
CW
496 spin_unlock(&dev_priv->mm.obj_lock);
497
2bd160a1
CW
498 seq_printf(m, "%u bound objects, %llu bytes\n",
499 count, size);
c44ef60e 500 seq_printf(m, "%u purgeable objects, %llu bytes\n",
b7abb714 501 purgeable_count, purgeable_size);
2bd160a1
CW
502 seq_printf(m, "%u mapped objects, %llu bytes\n",
503 mapped_count, mapped_size);
7393b7ee
MA
504 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
505 huge_count,
506 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
507 huge_size);
bd3d2252 508 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
2bd160a1 509 dpy_count, dpy_size);
6299f992 510
b7128ef1 511 seq_printf(m, "%llu [%pa] gtt total\n",
82ad6443 512 ggtt->vm.total, &ggtt->mappable_end);
7393b7ee
MA
513 seq_printf(m, "Supported page sizes: %s\n",
514 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
515 buf, sizeof(buf)));
73aa808f 516
493018dc 517 seq_putc(m, '\n');
1d2ac403 518
f6e8aa38
CW
519 ret = mutex_lock_interruptible(&dev->struct_mutex);
520 if (ret)
521 return ret;
522
523 print_batch_pool_stats(m, dev_priv);
15da9565 524 print_context_stats(m, dev_priv);
f6e8aa38 525 mutex_unlock(&dev->struct_mutex);
73aa808f
CW
526
527 return 0;
528}
529
aee56cff 530static int i915_gem_gtt_info(struct seq_file *m, void *data)
08c18323 531{
9f25d007 532 struct drm_info_node *node = m->private;
36cdd013
DW
533 struct drm_i915_private *dev_priv = node_to_i915(node);
534 struct drm_device *dev = &dev_priv->drm;
f2123818 535 struct drm_i915_gem_object **objects;
08c18323 536 struct drm_i915_gem_object *obj;
c44ef60e 537 u64 total_obj_size, total_gtt_size;
f2123818 538 unsigned long nobject, n;
08c18323
CW
539 int count, ret;
540
f2123818
CW
541 nobject = READ_ONCE(dev_priv->mm.object_count);
542 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
543 if (!objects)
544 return -ENOMEM;
545
08c18323
CW
546 ret = mutex_lock_interruptible(&dev->struct_mutex);
547 if (ret)
548 return ret;
549
f2123818
CW
550 count = 0;
551 spin_lock(&dev_priv->mm.obj_lock);
552 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
553 objects[count++] = obj;
554 if (count == nobject)
555 break;
556 }
557 spin_unlock(&dev_priv->mm.obj_lock);
558
559 total_obj_size = total_gtt_size = 0;
560 for (n = 0; n < count; n++) {
561 obj = objects[n];
562
267f0c90 563 seq_puts(m, " ");
08c18323 564 describe_obj(m, obj);
267f0c90 565 seq_putc(m, '\n');
08c18323 566 total_obj_size += obj->base.size;
ca1543be 567 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
08c18323
CW
568 }
569
570 mutex_unlock(&dev->struct_mutex);
571
c44ef60e 572 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
08c18323 573 count, total_obj_size, total_gtt_size);
f2123818 574 kvfree(objects);
08c18323
CW
575
576 return 0;
577}
578
493018dc
BV
579static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
580{
36cdd013
DW
581 struct drm_i915_private *dev_priv = node_to_i915(m->private);
582 struct drm_device *dev = &dev_priv->drm;
493018dc 583 struct drm_i915_gem_object *obj;
e2f80391 584 struct intel_engine_cs *engine;
3b3f1650 585 enum intel_engine_id id;
8d9d5744 586 int total = 0;
b4ac5afc 587 int ret, j;
493018dc
BV
588
589 ret = mutex_lock_interruptible(&dev->struct_mutex);
590 if (ret)
591 return ret;
592
3b3f1650 593 for_each_engine(engine, dev_priv, id) {
e2f80391 594 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744
CW
595 int count;
596
597 count = 0;
598 list_for_each_entry(obj,
e2f80391 599 &engine->batch_pool.cache_list[j],
8d9d5744
CW
600 batch_pool_link)
601 count++;
602 seq_printf(m, "%s cache[%d]: %d objects\n",
e2f80391 603 engine->name, j, count);
8d9d5744
CW
604
605 list_for_each_entry(obj,
e2f80391 606 &engine->batch_pool.cache_list[j],
8d9d5744
CW
607 batch_pool_link) {
608 seq_puts(m, " ");
609 describe_obj(m, obj);
610 seq_putc(m, '\n');
611 }
612
613 total += count;
06fbca71 614 }
493018dc
BV
615 }
616
8d9d5744 617 seq_printf(m, "total: %d\n", total);
493018dc
BV
618
619 mutex_unlock(&dev->struct_mutex);
620
621 return 0;
622}
623
80d89350
TU
624static void gen8_display_interrupt_info(struct seq_file *m)
625{
626 struct drm_i915_private *dev_priv = node_to_i915(m->private);
627 int pipe;
628
629 for_each_pipe(dev_priv, pipe) {
630 enum intel_display_power_domain power_domain;
0e6e0be4 631 intel_wakeref_t wakeref;
80d89350
TU
632
633 power_domain = POWER_DOMAIN_PIPE(pipe);
0e6e0be4
CW
634 wakeref = intel_display_power_get_if_enabled(dev_priv,
635 power_domain);
636 if (!wakeref) {
80d89350
TU
637 seq_printf(m, "Pipe %c power disabled\n",
638 pipe_name(pipe));
639 continue;
640 }
641 seq_printf(m, "Pipe %c IMR:\t%08x\n",
642 pipe_name(pipe),
643 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
644 seq_printf(m, "Pipe %c IIR:\t%08x\n",
645 pipe_name(pipe),
646 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
647 seq_printf(m, "Pipe %c IER:\t%08x\n",
648 pipe_name(pipe),
649 I915_READ(GEN8_DE_PIPE_IER(pipe)));
650
0e6e0be4 651 intel_display_power_put(dev_priv, power_domain, wakeref);
80d89350
TU
652 }
653
654 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
655 I915_READ(GEN8_DE_PORT_IMR));
656 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
657 I915_READ(GEN8_DE_PORT_IIR));
658 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
659 I915_READ(GEN8_DE_PORT_IER));
660
661 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
662 I915_READ(GEN8_DE_MISC_IMR));
663 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
664 I915_READ(GEN8_DE_MISC_IIR));
665 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
666 I915_READ(GEN8_DE_MISC_IER));
667
668 seq_printf(m, "PCU interrupt mask:\t%08x\n",
669 I915_READ(GEN8_PCU_IMR));
670 seq_printf(m, "PCU interrupt identity:\t%08x\n",
671 I915_READ(GEN8_PCU_IIR));
672 seq_printf(m, "PCU interrupt enable:\t%08x\n",
673 I915_READ(GEN8_PCU_IER));
674}
675
2017263e
BG
676static int i915_interrupt_info(struct seq_file *m, void *data)
677{
36cdd013 678 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 679 struct intel_engine_cs *engine;
3b3f1650 680 enum intel_engine_id id;
a037121c 681 intel_wakeref_t wakeref;
4bb05040 682 int i, pipe;
de227ef0 683
a037121c 684 wakeref = intel_runtime_pm_get(dev_priv);
2017263e 685
36cdd013 686 if (IS_CHERRYVIEW(dev_priv)) {
0e6e0be4
CW
687 intel_wakeref_t pref;
688
74e1ca8c
VS
689 seq_printf(m, "Master Interrupt Control:\t%08x\n",
690 I915_READ(GEN8_MASTER_IRQ));
691
692 seq_printf(m, "Display IER:\t%08x\n",
693 I915_READ(VLV_IER));
694 seq_printf(m, "Display IIR:\t%08x\n",
695 I915_READ(VLV_IIR));
696 seq_printf(m, "Display IIR_RW:\t%08x\n",
697 I915_READ(VLV_IIR_RW));
698 seq_printf(m, "Display IMR:\t%08x\n",
699 I915_READ(VLV_IMR));
9c870d03
CW
700 for_each_pipe(dev_priv, pipe) {
701 enum intel_display_power_domain power_domain;
702
703 power_domain = POWER_DOMAIN_PIPE(pipe);
0e6e0be4
CW
704 pref = intel_display_power_get_if_enabled(dev_priv,
705 power_domain);
706 if (!pref) {
9c870d03
CW
707 seq_printf(m, "Pipe %c power disabled\n",
708 pipe_name(pipe));
709 continue;
710 }
711
74e1ca8c
VS
712 seq_printf(m, "Pipe %c stat:\t%08x\n",
713 pipe_name(pipe),
714 I915_READ(PIPESTAT(pipe)));
715
0e6e0be4 716 intel_display_power_put(dev_priv, power_domain, pref);
9c870d03
CW
717 }
718
0e6e0be4 719 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
720 seq_printf(m, "Port hotplug:\t%08x\n",
721 I915_READ(PORT_HOTPLUG_EN));
722 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
723 I915_READ(VLV_DPFLIPSTAT));
724 seq_printf(m, "DPINVGTT:\t%08x\n",
725 I915_READ(DPINVGTT));
0e6e0be4 726 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
74e1ca8c
VS
727
728 for (i = 0; i < 4; i++) {
729 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
730 i, I915_READ(GEN8_GT_IMR(i)));
731 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
732 i, I915_READ(GEN8_GT_IIR(i)));
733 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
734 i, I915_READ(GEN8_GT_IER(i)));
735 }
736
737 seq_printf(m, "PCU interrupt mask:\t%08x\n",
738 I915_READ(GEN8_PCU_IMR));
739 seq_printf(m, "PCU interrupt identity:\t%08x\n",
740 I915_READ(GEN8_PCU_IIR));
741 seq_printf(m, "PCU interrupt enable:\t%08x\n",
742 I915_READ(GEN8_PCU_IER));
80d89350
TU
743 } else if (INTEL_GEN(dev_priv) >= 11) {
744 seq_printf(m, "Master Interrupt Control: %08x\n",
745 I915_READ(GEN11_GFX_MSTR_IRQ));
746
747 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
748 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
749 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
750 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
751 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
752 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
753 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
754 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
755 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
756 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
757 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
758 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
759
760 seq_printf(m, "Display Interrupt Control:\t%08x\n",
761 I915_READ(GEN11_DISPLAY_INT_CTL));
762
763 gen8_display_interrupt_info(m);
36cdd013 764 } else if (INTEL_GEN(dev_priv) >= 8) {
a123f157
BW
765 seq_printf(m, "Master Interrupt Control:\t%08x\n",
766 I915_READ(GEN8_MASTER_IRQ));
767
768 for (i = 0; i < 4; i++) {
769 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
770 i, I915_READ(GEN8_GT_IMR(i)));
771 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
772 i, I915_READ(GEN8_GT_IIR(i)));
773 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
774 i, I915_READ(GEN8_GT_IER(i)));
775 }
776
80d89350 777 gen8_display_interrupt_info(m);
36cdd013 778 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
779 seq_printf(m, "Display IER:\t%08x\n",
780 I915_READ(VLV_IER));
781 seq_printf(m, "Display IIR:\t%08x\n",
782 I915_READ(VLV_IIR));
783 seq_printf(m, "Display IIR_RW:\t%08x\n",
784 I915_READ(VLV_IIR_RW));
785 seq_printf(m, "Display IMR:\t%08x\n",
786 I915_READ(VLV_IMR));
4f4631af
CW
787 for_each_pipe(dev_priv, pipe) {
788 enum intel_display_power_domain power_domain;
0e6e0be4 789 intel_wakeref_t pref;
4f4631af
CW
790
791 power_domain = POWER_DOMAIN_PIPE(pipe);
0e6e0be4
CW
792 pref = intel_display_power_get_if_enabled(dev_priv,
793 power_domain);
794 if (!pref) {
4f4631af
CW
795 seq_printf(m, "Pipe %c power disabled\n",
796 pipe_name(pipe));
797 continue;
798 }
799
7e231dbe
JB
800 seq_printf(m, "Pipe %c stat:\t%08x\n",
801 pipe_name(pipe),
802 I915_READ(PIPESTAT(pipe)));
0e6e0be4 803 intel_display_power_put(dev_priv, power_domain, pref);
4f4631af 804 }
7e231dbe
JB
805
806 seq_printf(m, "Master IER:\t%08x\n",
807 I915_READ(VLV_MASTER_IER));
808
809 seq_printf(m, "Render IER:\t%08x\n",
810 I915_READ(GTIER));
811 seq_printf(m, "Render IIR:\t%08x\n",
812 I915_READ(GTIIR));
813 seq_printf(m, "Render IMR:\t%08x\n",
814 I915_READ(GTIMR));
815
816 seq_printf(m, "PM IER:\t\t%08x\n",
817 I915_READ(GEN6_PMIER));
818 seq_printf(m, "PM IIR:\t\t%08x\n",
819 I915_READ(GEN6_PMIIR));
820 seq_printf(m, "PM IMR:\t\t%08x\n",
821 I915_READ(GEN6_PMIMR));
822
823 seq_printf(m, "Port hotplug:\t%08x\n",
824 I915_READ(PORT_HOTPLUG_EN));
825 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
826 I915_READ(VLV_DPFLIPSTAT));
827 seq_printf(m, "DPINVGTT:\t%08x\n",
828 I915_READ(DPINVGTT));
829
36cdd013 830 } else if (!HAS_PCH_SPLIT(dev_priv)) {
5f6a1695
ZW
831 seq_printf(m, "Interrupt enable: %08x\n",
832 I915_READ(IER));
833 seq_printf(m, "Interrupt identity: %08x\n",
834 I915_READ(IIR));
835 seq_printf(m, "Interrupt mask: %08x\n",
836 I915_READ(IMR));
055e393f 837 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
838 seq_printf(m, "Pipe %c stat: %08x\n",
839 pipe_name(pipe),
840 I915_READ(PIPESTAT(pipe)));
5f6a1695
ZW
841 } else {
842 seq_printf(m, "North Display Interrupt enable: %08x\n",
843 I915_READ(DEIER));
844 seq_printf(m, "North Display Interrupt identity: %08x\n",
845 I915_READ(DEIIR));
846 seq_printf(m, "North Display Interrupt mask: %08x\n",
847 I915_READ(DEIMR));
848 seq_printf(m, "South Display Interrupt enable: %08x\n",
849 I915_READ(SDEIER));
850 seq_printf(m, "South Display Interrupt identity: %08x\n",
851 I915_READ(SDEIIR));
852 seq_printf(m, "South Display Interrupt mask: %08x\n",
853 I915_READ(SDEIMR));
854 seq_printf(m, "Graphics Interrupt enable: %08x\n",
855 I915_READ(GTIER));
856 seq_printf(m, "Graphics Interrupt identity: %08x\n",
857 I915_READ(GTIIR));
858 seq_printf(m, "Graphics Interrupt mask: %08x\n",
859 I915_READ(GTIMR));
860 }
80d89350
TU
861
862 if (INTEL_GEN(dev_priv) >= 11) {
863 seq_printf(m, "RCS Intr Mask:\t %08x\n",
864 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
865 seq_printf(m, "BCS Intr Mask:\t %08x\n",
866 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
867 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
868 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
869 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
870 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
871 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
872 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
873 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
874 I915_READ(GEN11_GUC_SG_INTR_MASK));
875 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
876 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
877 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
878 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
879 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
880 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
881
882 } else if (INTEL_GEN(dev_priv) >= 6) {
d5acadfe 883 for_each_engine(engine, dev_priv, id) {
a2c7f6fd
CW
884 seq_printf(m,
885 "Graphics Interrupt mask (%s): %08x\n",
e2f80391 886 engine->name, I915_READ_IMR(engine));
9862e600 887 }
9862e600 888 }
80d89350 889
a037121c 890 intel_runtime_pm_put(dev_priv, wakeref);
de227ef0 891
2017263e
BG
892 return 0;
893}
894
a6172a80
CW
895static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
896{
36cdd013
DW
897 struct drm_i915_private *dev_priv = node_to_i915(m->private);
898 struct drm_device *dev = &dev_priv->drm;
de227ef0
CW
899 int i, ret;
900
901 ret = mutex_lock_interruptible(&dev->struct_mutex);
902 if (ret)
903 return ret;
a6172a80 904
a6172a80
CW
905 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
906 for (i = 0; i < dev_priv->num_fence_regs; i++) {
49ef5294 907 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
a6172a80 908
6c085a72
CW
909 seq_printf(m, "Fence %d, pin count = %d, object = ",
910 i, dev_priv->fence_regs[i].pin_count);
49ef5294 911 if (!vma)
267f0c90 912 seq_puts(m, "unused");
c2c347a9 913 else
49ef5294 914 describe_obj(m, vma->obj);
267f0c90 915 seq_putc(m, '\n');
a6172a80
CW
916 }
917
05394f39 918 mutex_unlock(&dev->struct_mutex);
a6172a80
CW
919 return 0;
920}
921
98a2f411 922#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
5a4c6f1b
CW
923static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
924 size_t count, loff_t *pos)
d5442303 925{
0e39037b 926 struct i915_gpu_state *error;
5a4c6f1b 927 ssize_t ret;
0e39037b 928 void *buf;
d5442303 929
0e39037b 930 error = file->private_data;
5a4c6f1b
CW
931 if (!error)
932 return 0;
d5442303 933
0e39037b
CW
934 /* Bounce buffer required because of kernfs __user API convenience. */
935 buf = kmalloc(count, GFP_KERNEL);
936 if (!buf)
937 return -ENOMEM;
d5442303 938
0e39037b
CW
939 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
940 if (ret <= 0)
5a4c6f1b 941 goto out;
d5442303 942
0e39037b
CW
943 if (!copy_to_user(ubuf, buf, ret))
944 *pos += ret;
945 else
946 ret = -EFAULT;
d5442303 947
5a4c6f1b 948out:
0e39037b 949 kfree(buf);
5a4c6f1b
CW
950 return ret;
951}
edc3d884 952
5a4c6f1b
CW
953static int gpu_state_release(struct inode *inode, struct file *file)
954{
955 i915_gpu_state_put(file->private_data);
edc3d884 956 return 0;
d5442303
DV
957}
958
5a4c6f1b 959static int i915_gpu_info_open(struct inode *inode, struct file *file)
d5442303 960{
090e5fe3 961 struct drm_i915_private *i915 = inode->i_private;
5a4c6f1b 962 struct i915_gpu_state *gpu;
a037121c 963 intel_wakeref_t wakeref;
d5442303 964
d4225a53
CW
965 gpu = NULL;
966 with_intel_runtime_pm(i915, wakeref)
967 gpu = i915_capture_gpu_state(i915);
e6154e4c
CW
968 if (IS_ERR(gpu))
969 return PTR_ERR(gpu);
d5442303 970
5a4c6f1b 971 file->private_data = gpu;
edc3d884
MK
972 return 0;
973}
974
5a4c6f1b
CW
975static const struct file_operations i915_gpu_info_fops = {
976 .owner = THIS_MODULE,
977 .open = i915_gpu_info_open,
978 .read = gpu_state_read,
979 .llseek = default_llseek,
980 .release = gpu_state_release,
981};
982
983static ssize_t
984i915_error_state_write(struct file *filp,
985 const char __user *ubuf,
986 size_t cnt,
987 loff_t *ppos)
4dc955f7 988{
5a4c6f1b 989 struct i915_gpu_state *error = filp->private_data;
4dc955f7 990
5a4c6f1b
CW
991 if (!error)
992 return 0;
edc3d884 993
5a4c6f1b
CW
994 DRM_DEBUG_DRIVER("Resetting error state\n");
995 i915_reset_error_state(error->i915);
edc3d884 996
5a4c6f1b
CW
997 return cnt;
998}
edc3d884 999
5a4c6f1b
CW
1000static int i915_error_state_open(struct inode *inode, struct file *file)
1001{
e6154e4c
CW
1002 struct i915_gpu_state *error;
1003
1004 error = i915_first_error_state(inode->i_private);
1005 if (IS_ERR(error))
1006 return PTR_ERR(error);
1007
1008 file->private_data = error;
5a4c6f1b 1009 return 0;
d5442303
DV
1010}
1011
1012static const struct file_operations i915_error_state_fops = {
1013 .owner = THIS_MODULE,
1014 .open = i915_error_state_open,
5a4c6f1b 1015 .read = gpu_state_read,
d5442303
DV
1016 .write = i915_error_state_write,
1017 .llseek = default_llseek,
5a4c6f1b 1018 .release = gpu_state_release,
d5442303 1019};
98a2f411
CW
1020#endif
1021
adb4bd12 1022static int i915_frequency_info(struct seq_file *m, void *unused)
f97108d1 1023{
36cdd013 1024 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1025 struct intel_rps *rps = &dev_priv->gt_pm.rps;
a037121c 1026 intel_wakeref_t wakeref;
c8c8fb33
PZ
1027 int ret = 0;
1028
a037121c 1029 wakeref = intel_runtime_pm_get(dev_priv);
3b8d8d91 1030
cf819eff 1031 if (IS_GEN(dev_priv, 5)) {
3b8d8d91
JB
1032 u16 rgvswctl = I915_READ16(MEMSWCTL);
1033 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1034
1035 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1036 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1037 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1038 MEMSTAT_VID_SHIFT);
1039 seq_printf(m, "Current P-state: %d\n",
1040 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
36cdd013 1041 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
0d6fc92a 1042 u32 rpmodectl, freq_sts;
666a4537 1043
9f817501 1044 mutex_lock(&dev_priv->pcu_lock);
0d6fc92a
SAK
1045
1046 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1047 seq_printf(m, "Video Turbo Mode: %s\n",
1048 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1049 seq_printf(m, "HW control enabled: %s\n",
1050 yesno(rpmodectl & GEN6_RP_ENABLE));
1051 seq_printf(m, "SW control enabled: %s\n",
1052 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1053 GEN6_RP_MEDIA_SW_MODE));
1054
666a4537
WB
1055 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1056 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1057 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1058
1059 seq_printf(m, "actual GPU freq: %d MHz\n",
1060 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1061
1062 seq_printf(m, "current GPU freq: %d MHz\n",
562d9bae 1063 intel_gpu_freq(dev_priv, rps->cur_freq));
666a4537
WB
1064
1065 seq_printf(m, "max GPU freq: %d MHz\n",
562d9bae 1066 intel_gpu_freq(dev_priv, rps->max_freq));
666a4537
WB
1067
1068 seq_printf(m, "min GPU freq: %d MHz\n",
562d9bae 1069 intel_gpu_freq(dev_priv, rps->min_freq));
666a4537
WB
1070
1071 seq_printf(m, "idle GPU freq: %d MHz\n",
562d9bae 1072 intel_gpu_freq(dev_priv, rps->idle_freq));
666a4537
WB
1073
1074 seq_printf(m,
1075 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1076 intel_gpu_freq(dev_priv, rps->efficient_freq));
9f817501 1077 mutex_unlock(&dev_priv->pcu_lock);
36cdd013 1078 } else if (INTEL_GEN(dev_priv) >= 6) {
35040562
BP
1079 u32 rp_state_limits;
1080 u32 gt_perf_status;
1081 u32 rp_state_cap;
0d8f9491 1082 u32 rpmodectl, rpinclimit, rpdeclimit;
8e8c06cd 1083 u32 rpstat, cagf, reqf;
ccab5c82
JB
1084 u32 rpupei, rpcurup, rpprevup;
1085 u32 rpdownei, rpcurdown, rpprevdown;
9dd3c605 1086 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
3b8d8d91
JB
1087 int max_freq;
1088
35040562 1089 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
cc3f90f0 1090 if (IS_GEN9_LP(dev_priv)) {
35040562
BP
1091 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1092 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1093 } else {
1094 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1095 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1096 }
1097
3b8d8d91 1098 /* RPSTAT1 is in the GT power well */
59bad947 1099 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
3b8d8d91 1100
8e8c06cd 1101 reqf = I915_READ(GEN6_RPNSWREQ);
35ceabf3 1102 if (INTEL_GEN(dev_priv) >= 9)
60260a5b
AG
1103 reqf >>= 23;
1104 else {
1105 reqf &= ~GEN6_TURBO_DISABLE;
36cdd013 1106 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
60260a5b
AG
1107 reqf >>= 24;
1108 else
1109 reqf >>= 25;
1110 }
7c59a9c1 1111 reqf = intel_gpu_freq(dev_priv, reqf);
8e8c06cd 1112
0d8f9491
CW
1113 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1114 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1115 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1116
ccab5c82 1117 rpstat = I915_READ(GEN6_RPSTAT1);
d6cda9c7
AG
1118 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1119 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1120 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1121 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1122 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1123 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
c84b2705
TU
1124 cagf = intel_gpu_freq(dev_priv,
1125 intel_get_cagf(dev_priv, rpstat));
ccab5c82 1126
59bad947 1127 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
d1ebd816 1128
6b7a6a7b
OM
1129 if (INTEL_GEN(dev_priv) >= 11) {
1130 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1131 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1132 /*
1133 * The equivalent to the PM ISR & IIR cannot be read
1134 * without affecting the current state of the system
1135 */
1136 pm_isr = 0;
1137 pm_iir = 0;
1138 } else if (INTEL_GEN(dev_priv) >= 8) {
9dd3c605
PZ
1139 pm_ier = I915_READ(GEN8_GT_IER(2));
1140 pm_imr = I915_READ(GEN8_GT_IMR(2));
1141 pm_isr = I915_READ(GEN8_GT_ISR(2));
1142 pm_iir = I915_READ(GEN8_GT_IIR(2));
6b7a6a7b
OM
1143 } else {
1144 pm_ier = I915_READ(GEN6_PMIER);
1145 pm_imr = I915_READ(GEN6_PMIMR);
1146 pm_isr = I915_READ(GEN6_PMISR);
1147 pm_iir = I915_READ(GEN6_PMIIR);
9dd3c605 1148 }
6b7a6a7b
OM
1149 pm_mask = I915_READ(GEN6_PMINTRMSK);
1150
960e5465
SAK
1151 seq_printf(m, "Video Turbo Mode: %s\n",
1152 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1153 seq_printf(m, "HW control enabled: %s\n",
1154 yesno(rpmodectl & GEN6_RP_ENABLE));
1155 seq_printf(m, "SW control enabled: %s\n",
1156 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1157 GEN6_RP_MEDIA_SW_MODE));
6b7a6a7b
OM
1158
1159 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1160 pm_ier, pm_imr, pm_mask);
1161 if (INTEL_GEN(dev_priv) <= 10)
1162 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1163 pm_isr, pm_iir);
5dd04556 1164 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
562d9bae 1165 rps->pm_intrmsk_mbz);
3b8d8d91 1166 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
3b8d8d91 1167 seq_printf(m, "Render p-state ratio: %d\n",
35ceabf3 1168 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
3b8d8d91
JB
1169 seq_printf(m, "Render p-state VID: %d\n",
1170 gt_perf_status & 0xff);
1171 seq_printf(m, "Render p-state limit: %d\n",
1172 rp_state_limits & 0xff);
0d8f9491
CW
1173 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1174 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1175 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1176 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
8e8c06cd 1177 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
f82855d3 1178 seq_printf(m, "CAGF: %dMHz\n", cagf);
d6cda9c7
AG
1179 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1180 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1181 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1182 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1183 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1184 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
60548c55
CW
1185 seq_printf(m, "Up threshold: %d%%\n",
1186 rps->power.up_threshold);
d86ed34a 1187
d6cda9c7
AG
1188 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1189 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1190 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1191 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1192 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1193 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
60548c55
CW
1194 seq_printf(m, "Down threshold: %d%%\n",
1195 rps->power.down_threshold);
3b8d8d91 1196
cc3f90f0 1197 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
35040562 1198 rp_state_cap >> 16) & 0xff;
35ceabf3 1199 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1200 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1201 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
7c59a9c1 1202 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91
JB
1203
1204 max_freq = (rp_state_cap & 0xff00) >> 8;
35ceabf3 1205 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1206 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1207 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
7c59a9c1 1208 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91 1209
cc3f90f0 1210 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
35040562 1211 rp_state_cap >> 0) & 0xff;
35ceabf3 1212 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1213 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1214 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
7c59a9c1 1215 intel_gpu_freq(dev_priv, max_freq));
31c77388 1216 seq_printf(m, "Max overclocked frequency: %dMHz\n",
562d9bae 1217 intel_gpu_freq(dev_priv, rps->max_freq));
aed242ff 1218
d86ed34a 1219 seq_printf(m, "Current freq: %d MHz\n",
562d9bae 1220 intel_gpu_freq(dev_priv, rps->cur_freq));
d86ed34a 1221 seq_printf(m, "Actual freq: %d MHz\n", cagf);
aed242ff 1222 seq_printf(m, "Idle freq: %d MHz\n",
562d9bae 1223 intel_gpu_freq(dev_priv, rps->idle_freq));
d86ed34a 1224 seq_printf(m, "Min freq: %d MHz\n",
562d9bae 1225 intel_gpu_freq(dev_priv, rps->min_freq));
29ecd78d 1226 seq_printf(m, "Boost freq: %d MHz\n",
562d9bae 1227 intel_gpu_freq(dev_priv, rps->boost_freq));
d86ed34a 1228 seq_printf(m, "Max freq: %d MHz\n",
562d9bae 1229 intel_gpu_freq(dev_priv, rps->max_freq));
d86ed34a
CW
1230 seq_printf(m,
1231 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1232 intel_gpu_freq(dev_priv, rps->efficient_freq));
3b8d8d91 1233 } else {
267f0c90 1234 seq_puts(m, "no P-state info available\n");
3b8d8d91 1235 }
f97108d1 1236
49cd97a3 1237 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1170f28c
MK
1238 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1239 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1240
a037121c 1241 intel_runtime_pm_put(dev_priv, wakeref);
c8c8fb33 1242 return ret;
f97108d1
JB
1243}
1244
d636951e
BW
1245static void i915_instdone_info(struct drm_i915_private *dev_priv,
1246 struct seq_file *m,
1247 struct intel_instdone *instdone)
1248{
f9e61372
BW
1249 int slice;
1250 int subslice;
1251
d636951e
BW
1252 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1253 instdone->instdone);
1254
1255 if (INTEL_GEN(dev_priv) <= 3)
1256 return;
1257
1258 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1259 instdone->slice_common);
1260
1261 if (INTEL_GEN(dev_priv) <= 6)
1262 return;
1263
f9e61372
BW
1264 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1265 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1266 slice, subslice, instdone->sampler[slice][subslice]);
1267
1268 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1269 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1270 slice, subslice, instdone->row[slice][subslice]);
d636951e
BW
1271}
1272
f654449a
CW
1273static int i915_hangcheck_info(struct seq_file *m, void *unused)
1274{
36cdd013 1275 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 1276 struct intel_engine_cs *engine;
666796da
TU
1277 u64 acthd[I915_NUM_ENGINES];
1278 u32 seqno[I915_NUM_ENGINES];
d636951e 1279 struct intel_instdone instdone;
a037121c 1280 intel_wakeref_t wakeref;
c3232b18 1281 enum intel_engine_id id;
f654449a 1282
8af29b0c 1283 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
8c185eca
CW
1284 seq_puts(m, "Wedged\n");
1285 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1286 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
8af29b0c 1287 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
8c185eca 1288 seq_puts(m, "Waiter holding struct mutex\n");
8af29b0c 1289 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
8c185eca 1290 seq_puts(m, "struct_mutex blocked for reset\n");
8af29b0c 1291
4f044a88 1292 if (!i915_modparams.enable_hangcheck) {
8c185eca 1293 seq_puts(m, "Hangcheck disabled\n");
f654449a
CW
1294 return 0;
1295 }
1296
d4225a53
CW
1297 with_intel_runtime_pm(dev_priv, wakeref) {
1298 for_each_engine(engine, dev_priv, id) {
1299 acthd[id] = intel_engine_get_active_head(engine);
1300 seqno[id] = intel_engine_get_seqno(engine);
1301 }
ebbc7546 1302
d4225a53 1303 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
ebbc7546
MK
1304 }
1305
8352aea3
CW
1306 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1307 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
f654449a
CW
1308 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1309 jiffies));
8352aea3
CW
1310 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1311 seq_puts(m, "Hangcheck active, work pending\n");
1312 else
1313 seq_puts(m, "Hangcheck inactive\n");
f654449a 1314
f73b5674
CW
1315 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1316
3b3f1650 1317 for_each_engine(engine, dev_priv, id) {
e2f80391 1318 seq_printf(m, "%s:\n", engine->name);
eb8d0f5a 1319 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
cb399eab 1320 engine->hangcheck.seqno, seqno[id],
eb8d0f5a
CW
1321 intel_engine_last_submit(engine),
1322 jiffies_to_msecs(jiffies -
1323 engine->hangcheck.action_timestamp));
52c0fdb2 1324 seq_printf(m, "\tfake irq active? %s\n",
83348ba8 1325 yesno(test_bit(engine->id,
eb8d0f5a 1326 &dev_priv->gpu_error.missed_irq_rings)));
3fe3b030 1327
f654449a 1328 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
e2f80391 1329 (long long)engine->hangcheck.acthd,
c3232b18 1330 (long long)acthd[id]);
61642ff0 1331
e2f80391 1332 if (engine->id == RCS) {
d636951e 1333 seq_puts(m, "\tinstdone read =\n");
61642ff0 1334
d636951e 1335 i915_instdone_info(dev_priv, m, &instdone);
61642ff0 1336
d636951e 1337 seq_puts(m, "\tinstdone accu =\n");
61642ff0 1338
d636951e
BW
1339 i915_instdone_info(dev_priv, m,
1340 &engine->hangcheck.instdone);
61642ff0 1341 }
f654449a
CW
1342 }
1343
1344 return 0;
1345}
1346
061d06a2
MT
1347static int i915_reset_info(struct seq_file *m, void *unused)
1348{
1349 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1350 struct i915_gpu_error *error = &dev_priv->gpu_error;
1351 struct intel_engine_cs *engine;
1352 enum intel_engine_id id;
1353
1354 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1355
1356 for_each_engine(engine, dev_priv, id) {
1357 seq_printf(m, "%s = %u\n", engine->name,
1358 i915_reset_engine_count(error, engine));
1359 }
1360
1361 return 0;
1362}
1363
4d85529d 1364static int ironlake_drpc_info(struct seq_file *m)
f97108d1 1365{
36cdd013 1366 struct drm_i915_private *dev_priv = node_to_i915(m->private);
616fdb5a
BW
1367 u32 rgvmodectl, rstdbyctl;
1368 u16 crstandvid;
616fdb5a 1369
616fdb5a
BW
1370 rgvmodectl = I915_READ(MEMMODECTL);
1371 rstdbyctl = I915_READ(RSTDBYCTL);
1372 crstandvid = I915_READ16(CRSTANDVID);
1373
742f491d 1374 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
f97108d1
JB
1375 seq_printf(m, "Boost freq: %d\n",
1376 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1377 MEMMODE_BOOST_FREQ_SHIFT);
1378 seq_printf(m, "HW control enabled: %s\n",
742f491d 1379 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
f97108d1 1380 seq_printf(m, "SW control enabled: %s\n",
742f491d 1381 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
f97108d1 1382 seq_printf(m, "Gated voltage change: %s\n",
742f491d 1383 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
f97108d1
JB
1384 seq_printf(m, "Starting frequency: P%d\n",
1385 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
7648fa99 1386 seq_printf(m, "Max P-state: P%d\n",
f97108d1 1387 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
7648fa99
JB
1388 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1389 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1390 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1391 seq_printf(m, "Render standby enabled: %s\n",
742f491d 1392 yesno(!(rstdbyctl & RCX_SW_EXIT)));
267f0c90 1393 seq_puts(m, "Current RS state: ");
88271da3
JB
1394 switch (rstdbyctl & RSX_STATUS_MASK) {
1395 case RSX_STATUS_ON:
267f0c90 1396 seq_puts(m, "on\n");
88271da3
JB
1397 break;
1398 case RSX_STATUS_RC1:
267f0c90 1399 seq_puts(m, "RC1\n");
88271da3
JB
1400 break;
1401 case RSX_STATUS_RC1E:
267f0c90 1402 seq_puts(m, "RC1E\n");
88271da3
JB
1403 break;
1404 case RSX_STATUS_RS1:
267f0c90 1405 seq_puts(m, "RS1\n");
88271da3
JB
1406 break;
1407 case RSX_STATUS_RS2:
267f0c90 1408 seq_puts(m, "RS2 (RC6)\n");
88271da3
JB
1409 break;
1410 case RSX_STATUS_RS3:
267f0c90 1411 seq_puts(m, "RC3 (RC6+)\n");
88271da3
JB
1412 break;
1413 default:
267f0c90 1414 seq_puts(m, "unknown\n");
88271da3
JB
1415 break;
1416 }
f97108d1
JB
1417
1418 return 0;
1419}
1420
f65367b5 1421static int i915_forcewake_domains(struct seq_file *m, void *data)
669ab5aa 1422{
233ebf57 1423 struct drm_i915_private *i915 = node_to_i915(m->private);
b2cff0db 1424 struct intel_uncore_forcewake_domain *fw_domain;
d2dc94bc 1425 unsigned int tmp;
b2cff0db 1426
d7a133d8
CW
1427 seq_printf(m, "user.bypass_count = %u\n",
1428 i915->uncore.user_forcewake.count);
1429
233ebf57 1430 for_each_fw_domain(fw_domain, i915, tmp)
b2cff0db 1431 seq_printf(m, "%s.wake_count = %u\n",
33c582c1 1432 intel_uncore_forcewake_domain_to_str(fw_domain->id),
233ebf57 1433 READ_ONCE(fw_domain->wake_count));
669ab5aa 1434
b2cff0db
CW
1435 return 0;
1436}
1437
1362877e
MK
1438static void print_rc6_res(struct seq_file *m,
1439 const char *title,
1440 const i915_reg_t reg)
1441{
1442 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1443
1444 seq_printf(m, "%s %u (%llu us)\n",
1445 title, I915_READ(reg),
1446 intel_rc6_residency_us(dev_priv, reg));
1447}
1448
b2cff0db
CW
1449static int vlv_drpc_info(struct seq_file *m)
1450{
36cdd013 1451 struct drm_i915_private *dev_priv = node_to_i915(m->private);
0d6fc92a 1452 u32 rcctl1, pw_status;
669ab5aa 1453
6b312cd3 1454 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
669ab5aa
D
1455 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1456
669ab5aa
D
1457 seq_printf(m, "RC6 Enabled: %s\n",
1458 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1459 GEN6_RC_CTL_EI_MODE(1))));
1460 seq_printf(m, "Render Power Well: %s\n",
6b312cd3 1461 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1462 seq_printf(m, "Media Power Well: %s\n",
6b312cd3 1463 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1464
1362877e
MK
1465 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1466 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
9cc19be5 1467
f65367b5 1468 return i915_forcewake_domains(m, NULL);
669ab5aa
D
1469}
1470
4d85529d
BW
1471static int gen6_drpc_info(struct seq_file *m)
1472{
36cdd013 1473 struct drm_i915_private *dev_priv = node_to_i915(m->private);
960e5465 1474 u32 gt_core_status, rcctl1, rc6vids = 0;
f2dd7578 1475 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
4d85529d 1476
75aa3f63 1477 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
ed71f1b4 1478 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
4d85529d 1479
4d85529d 1480 rcctl1 = I915_READ(GEN6_RC_CONTROL);
36cdd013 1481 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1482 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1483 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1484 }
cf632bd6 1485
51cc9ade
ID
1486 if (INTEL_GEN(dev_priv) <= 7) {
1487 mutex_lock(&dev_priv->pcu_lock);
1488 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1489 &rc6vids);
1490 mutex_unlock(&dev_priv->pcu_lock);
1491 }
4d85529d 1492
fff24e21 1493 seq_printf(m, "RC1e Enabled: %s\n",
4d85529d
BW
1494 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1495 seq_printf(m, "RC6 Enabled: %s\n",
1496 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
36cdd013 1497 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1498 seq_printf(m, "Render Well Gating Enabled: %s\n",
1499 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1500 seq_printf(m, "Media Well Gating Enabled: %s\n",
1501 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1502 }
4d85529d
BW
1503 seq_printf(m, "Deep RC6 Enabled: %s\n",
1504 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1505 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1506 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
267f0c90 1507 seq_puts(m, "Current RC state: ");
4d85529d
BW
1508 switch (gt_core_status & GEN6_RCn_MASK) {
1509 case GEN6_RC0:
1510 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
267f0c90 1511 seq_puts(m, "Core Power Down\n");
4d85529d 1512 else
267f0c90 1513 seq_puts(m, "on\n");
4d85529d
BW
1514 break;
1515 case GEN6_RC3:
267f0c90 1516 seq_puts(m, "RC3\n");
4d85529d
BW
1517 break;
1518 case GEN6_RC6:
267f0c90 1519 seq_puts(m, "RC6\n");
4d85529d
BW
1520 break;
1521 case GEN6_RC7:
267f0c90 1522 seq_puts(m, "RC7\n");
4d85529d
BW
1523 break;
1524 default:
267f0c90 1525 seq_puts(m, "Unknown\n");
4d85529d
BW
1526 break;
1527 }
1528
1529 seq_printf(m, "Core Power Down: %s\n",
1530 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
36cdd013 1531 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1532 seq_printf(m, "Render Power Well: %s\n",
1533 (gen9_powergate_status &
1534 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1535 seq_printf(m, "Media Power Well: %s\n",
1536 (gen9_powergate_status &
1537 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1538 }
cce66a28
BW
1539
1540 /* Not exactly sure what this is */
1362877e
MK
1541 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1542 GEN6_GT_GFX_RC6_LOCKED);
1543 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1544 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1545 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
cce66a28 1546
51cc9ade
ID
1547 if (INTEL_GEN(dev_priv) <= 7) {
1548 seq_printf(m, "RC6 voltage: %dmV\n",
1549 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1550 seq_printf(m, "RC6+ voltage: %dmV\n",
1551 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1552 seq_printf(m, "RC6++ voltage: %dmV\n",
1553 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1554 }
1555
f2dd7578 1556 return i915_forcewake_domains(m, NULL);
4d85529d
BW
1557}
1558
1559static int i915_drpc_info(struct seq_file *m, void *unused)
1560{
36cdd013 1561 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 1562 intel_wakeref_t wakeref;
d4225a53 1563 int err = -ENODEV;
cf632bd6 1564
d4225a53
CW
1565 with_intel_runtime_pm(dev_priv, wakeref) {
1566 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1567 err = vlv_drpc_info(m);
1568 else if (INTEL_GEN(dev_priv) >= 6)
1569 err = gen6_drpc_info(m);
1570 else
1571 err = ironlake_drpc_info(m);
1572 }
cf632bd6
CW
1573
1574 return err;
4d85529d
BW
1575}
1576
9a851789
DV
1577static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1578{
36cdd013 1579 struct drm_i915_private *dev_priv = node_to_i915(m->private);
9a851789
DV
1580
1581 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1582 dev_priv->fb_tracking.busy_bits);
1583
1584 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1585 dev_priv->fb_tracking.flip_bits);
1586
1587 return 0;
1588}
1589
b5e50c3f
JB
1590static int i915_fbc_status(struct seq_file *m, void *unused)
1591{
36cdd013 1592 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3138872c 1593 struct intel_fbc *fbc = &dev_priv->fbc;
a037121c 1594 intel_wakeref_t wakeref;
b5e50c3f 1595
ab309a6a
MW
1596 if (!HAS_FBC(dev_priv))
1597 return -ENODEV;
b5e50c3f 1598
a037121c 1599 wakeref = intel_runtime_pm_get(dev_priv);
3138872c 1600 mutex_lock(&fbc->lock);
36623ef8 1601
0e631adc 1602 if (intel_fbc_is_active(dev_priv))
267f0c90 1603 seq_puts(m, "FBC enabled\n");
2e8144a5 1604 else
3138872c
CW
1605 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1606
3fd5d1ec
VS
1607 if (intel_fbc_is_active(dev_priv)) {
1608 u32 mask;
1609
1610 if (INTEL_GEN(dev_priv) >= 8)
1611 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1612 else if (INTEL_GEN(dev_priv) >= 7)
1613 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1614 else if (INTEL_GEN(dev_priv) >= 5)
1615 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1616 else if (IS_G4X(dev_priv))
1617 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1618 else
1619 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1620 FBC_STAT_COMPRESSED);
1621
1622 seq_printf(m, "Compressing: %s\n", yesno(mask));
0fc6a9dc 1623 }
31b9df10 1624
3138872c 1625 mutex_unlock(&fbc->lock);
a037121c 1626 intel_runtime_pm_put(dev_priv, wakeref);
36623ef8 1627
b5e50c3f
JB
1628 return 0;
1629}
1630
4127dc43 1631static int i915_fbc_false_color_get(void *data, u64 *val)
da46f936 1632{
36cdd013 1633 struct drm_i915_private *dev_priv = data;
da46f936 1634
36cdd013 1635 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1636 return -ENODEV;
1637
da46f936 1638 *val = dev_priv->fbc.false_color;
da46f936
RV
1639
1640 return 0;
1641}
1642
4127dc43 1643static int i915_fbc_false_color_set(void *data, u64 val)
da46f936 1644{
36cdd013 1645 struct drm_i915_private *dev_priv = data;
da46f936
RV
1646 u32 reg;
1647
36cdd013 1648 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1649 return -ENODEV;
1650
25ad93fd 1651 mutex_lock(&dev_priv->fbc.lock);
da46f936
RV
1652
1653 reg = I915_READ(ILK_DPFC_CONTROL);
1654 dev_priv->fbc.false_color = val;
1655
1656 I915_WRITE(ILK_DPFC_CONTROL, val ?
1657 (reg | FBC_CTL_FALSE_COLOR) :
1658 (reg & ~FBC_CTL_FALSE_COLOR));
1659
25ad93fd 1660 mutex_unlock(&dev_priv->fbc.lock);
da46f936
RV
1661 return 0;
1662}
1663
4127dc43
VS
1664DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1665 i915_fbc_false_color_get, i915_fbc_false_color_set,
da46f936
RV
1666 "%llu\n");
1667
92d44621
PZ
1668static int i915_ips_status(struct seq_file *m, void *unused)
1669{
36cdd013 1670 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 1671 intel_wakeref_t wakeref;
92d44621 1672
ab309a6a
MW
1673 if (!HAS_IPS(dev_priv))
1674 return -ENODEV;
92d44621 1675
a037121c 1676 wakeref = intel_runtime_pm_get(dev_priv);
36623ef8 1677
0eaa53f0 1678 seq_printf(m, "Enabled by kernel parameter: %s\n",
4f044a88 1679 yesno(i915_modparams.enable_ips));
0eaa53f0 1680
36cdd013 1681 if (INTEL_GEN(dev_priv) >= 8) {
0eaa53f0
RV
1682 seq_puts(m, "Currently: unknown\n");
1683 } else {
1684 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1685 seq_puts(m, "Currently: enabled\n");
1686 else
1687 seq_puts(m, "Currently: disabled\n");
1688 }
92d44621 1689
a037121c 1690 intel_runtime_pm_put(dev_priv, wakeref);
36623ef8 1691
92d44621
PZ
1692 return 0;
1693}
1694
4a9bef37
JB
1695static int i915_sr_status(struct seq_file *m, void *unused)
1696{
36cdd013 1697 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 1698 intel_wakeref_t wakeref;
4a9bef37
JB
1699 bool sr_enabled = false;
1700
0e6e0be4 1701 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
36623ef8 1702
7342a72c
CW
1703 if (INTEL_GEN(dev_priv) >= 9)
1704 /* no global SR status; inspect per-plane WM */;
1705 else if (HAS_PCH_SPLIT(dev_priv))
5ba2aaaa 1706 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
c0f86832 1707 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
36cdd013 1708 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
4a9bef37 1709 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
36cdd013 1710 else if (IS_I915GM(dev_priv))
4a9bef37 1711 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
36cdd013 1712 else if (IS_PINEVIEW(dev_priv))
4a9bef37 1713 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
36cdd013 1714 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
77b64555 1715 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4a9bef37 1716
0e6e0be4 1717 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
36623ef8 1718
08c4d7fc 1719 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
4a9bef37
JB
1720
1721 return 0;
1722}
1723
7648fa99
JB
1724static int i915_emon_status(struct seq_file *m, void *unused)
1725{
4a8ab5ea 1726 struct drm_i915_private *i915 = node_to_i915(m->private);
a037121c 1727 intel_wakeref_t wakeref;
de227ef0 1728
4a8ab5ea 1729 if (!IS_GEN(i915, 5))
582be6b4
CW
1730 return -ENODEV;
1731
4a8ab5ea
CW
1732 with_intel_runtime_pm(i915, wakeref) {
1733 unsigned long temp, chipset, gfx;
7648fa99 1734
4a8ab5ea
CW
1735 temp = i915_mch_val(i915);
1736 chipset = i915_chipset_val(i915);
1737 gfx = i915_gfx_val(i915);
7648fa99 1738
4a8ab5ea
CW
1739 seq_printf(m, "GMCH temp: %ld\n", temp);
1740 seq_printf(m, "Chipset power: %ld\n", chipset);
1741 seq_printf(m, "GFX power: %ld\n", gfx);
1742 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1743 }
7648fa99
JB
1744
1745 return 0;
1746}
1747
23b2f8bb
JB
1748static int i915_ring_freq_table(struct seq_file *m, void *unused)
1749{
36cdd013 1750 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1751 struct intel_rps *rps = &dev_priv->gt_pm.rps;
f936ec34 1752 unsigned int max_gpu_freq, min_gpu_freq;
a037121c 1753 intel_wakeref_t wakeref;
d586b5f4
CW
1754 int gpu_freq, ia_freq;
1755 int ret;
23b2f8bb 1756
ab309a6a
MW
1757 if (!HAS_LLC(dev_priv))
1758 return -ENODEV;
23b2f8bb 1759
a037121c 1760 wakeref = intel_runtime_pm_get(dev_priv);
5bfa0199 1761
9f817501 1762 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
23b2f8bb 1763 if (ret)
5bfa0199 1764 goto out;
23b2f8bb 1765
d586b5f4
CW
1766 min_gpu_freq = rps->min_freq;
1767 max_gpu_freq = rps->max_freq;
2b2874ef 1768 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
f936ec34 1769 /* Convert GT frequency to 50 HZ units */
d586b5f4
CW
1770 min_gpu_freq /= GEN9_FREQ_SCALER;
1771 max_gpu_freq /= GEN9_FREQ_SCALER;
f936ec34
AG
1772 }
1773
267f0c90 1774 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
23b2f8bb 1775
f936ec34 1776 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
42c0526c
BW
1777 ia_freq = gpu_freq;
1778 sandybridge_pcode_read(dev_priv,
1779 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1780 &ia_freq);
3ebecd07 1781 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
f936ec34 1782 intel_gpu_freq(dev_priv, (gpu_freq *
35ceabf3 1783 (IS_GEN9_BC(dev_priv) ||
2b2874ef 1784 INTEL_GEN(dev_priv) >= 10 ?
b976dc53 1785 GEN9_FREQ_SCALER : 1))),
3ebecd07
CW
1786 ((ia_freq >> 0) & 0xff) * 100,
1787 ((ia_freq >> 8) & 0xff) * 100);
23b2f8bb
JB
1788 }
1789
9f817501 1790 mutex_unlock(&dev_priv->pcu_lock);
23b2f8bb 1791
5bfa0199 1792out:
a037121c 1793 intel_runtime_pm_put(dev_priv, wakeref);
5bfa0199 1794 return ret;
23b2f8bb
JB
1795}
1796
44834a67
CW
1797static int i915_opregion(struct seq_file *m, void *unused)
1798{
36cdd013
DW
1799 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1800 struct drm_device *dev = &dev_priv->drm;
44834a67
CW
1801 struct intel_opregion *opregion = &dev_priv->opregion;
1802 int ret;
1803
1804 ret = mutex_lock_interruptible(&dev->struct_mutex);
1805 if (ret)
0d38f009 1806 goto out;
44834a67 1807
2455a8e4
JN
1808 if (opregion->header)
1809 seq_write(m, opregion->header, OPREGION_SIZE);
44834a67
CW
1810
1811 mutex_unlock(&dev->struct_mutex);
1812
0d38f009 1813out:
44834a67
CW
1814 return 0;
1815}
1816
ada8f955
JN
1817static int i915_vbt(struct seq_file *m, void *unused)
1818{
36cdd013 1819 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
ada8f955
JN
1820
1821 if (opregion->vbt)
1822 seq_write(m, opregion->vbt, opregion->vbt_size);
1823
1824 return 0;
1825}
1826
37811fcc
CW
1827static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1828{
36cdd013
DW
1829 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1830 struct drm_device *dev = &dev_priv->drm;
b13b8402 1831 struct intel_framebuffer *fbdev_fb = NULL;
3a58ee10 1832 struct drm_framebuffer *drm_fb;
188c1ab7
CW
1833 int ret;
1834
1835 ret = mutex_lock_interruptible(&dev->struct_mutex);
1836 if (ret)
1837 return ret;
37811fcc 1838
0695726e 1839#ifdef CONFIG_DRM_FBDEV_EMULATION
346fb4e0 1840 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
36cdd013 1841 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
25bcce94
CW
1842
1843 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1844 fbdev_fb->base.width,
1845 fbdev_fb->base.height,
b00c600e 1846 fbdev_fb->base.format->depth,
272725c7 1847 fbdev_fb->base.format->cpp[0] * 8,
bae781b2 1848 fbdev_fb->base.modifier,
25bcce94 1849 drm_framebuffer_read_refcount(&fbdev_fb->base));
a5ff7a45 1850 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
25bcce94
CW
1851 seq_putc(m, '\n');
1852 }
4520f53a 1853#endif
37811fcc 1854
4b096ac1 1855 mutex_lock(&dev->mode_config.fb_lock);
3a58ee10 1856 drm_for_each_fb(drm_fb, dev) {
b13b8402
NS
1857 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1858 if (fb == fbdev_fb)
37811fcc
CW
1859 continue;
1860
c1ca506d 1861 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
37811fcc
CW
1862 fb->base.width,
1863 fb->base.height,
b00c600e 1864 fb->base.format->depth,
272725c7 1865 fb->base.format->cpp[0] * 8,
bae781b2 1866 fb->base.modifier,
747a598f 1867 drm_framebuffer_read_refcount(&fb->base));
a5ff7a45 1868 describe_obj(m, intel_fb_obj(&fb->base));
267f0c90 1869 seq_putc(m, '\n');
37811fcc 1870 }
4b096ac1 1871 mutex_unlock(&dev->mode_config.fb_lock);
188c1ab7 1872 mutex_unlock(&dev->struct_mutex);
37811fcc
CW
1873
1874 return 0;
1875}
1876
7e37f889 1877static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
c9fe99bd 1878{
ef5032a0
CW
1879 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1880 ring->space, ring->head, ring->tail, ring->emit);
c9fe99bd
OM
1881}
1882
e76d3630
BW
1883static int i915_context_status(struct seq_file *m, void *unused)
1884{
36cdd013
DW
1885 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1886 struct drm_device *dev = &dev_priv->drm;
e2f80391 1887 struct intel_engine_cs *engine;
e2efd130 1888 struct i915_gem_context *ctx;
3b3f1650 1889 enum intel_engine_id id;
c3232b18 1890 int ret;
e76d3630 1891
f3d28878 1892 ret = mutex_lock_interruptible(&dev->struct_mutex);
e76d3630
BW
1893 if (ret)
1894 return ret;
1895
829a0af2 1896 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
288f1ced
CW
1897 seq_puts(m, "HW context ");
1898 if (!list_empty(&ctx->hw_id_link))
1899 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1900 atomic_read(&ctx->hw_id_pin_count));
c84455b4 1901 if (ctx->pid) {
d28b99ab
CW
1902 struct task_struct *task;
1903
c84455b4 1904 task = get_pid_task(ctx->pid, PIDTYPE_PID);
d28b99ab
CW
1905 if (task) {
1906 seq_printf(m, "(%s [%d]) ",
1907 task->comm, task->pid);
1908 put_task_struct(task);
1909 }
c84455b4
CW
1910 } else if (IS_ERR(ctx->file_priv)) {
1911 seq_puts(m, "(deleted) ");
d28b99ab
CW
1912 } else {
1913 seq_puts(m, "(kernel) ");
1914 }
1915
bca44d80
CW
1916 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1917 seq_putc(m, '\n');
c9fe99bd 1918
3b3f1650 1919 for_each_engine(engine, dev_priv, id) {
ab82a063
CW
1920 struct intel_context *ce =
1921 to_intel_context(ctx, engine);
bca44d80
CW
1922
1923 seq_printf(m, "%s: ", engine->name);
bca44d80 1924 if (ce->state)
bf3783e5 1925 describe_obj(m, ce->state->obj);
dca33ecc 1926 if (ce->ring)
7e37f889 1927 describe_ctx_ring(m, ce->ring);
c9fe99bd 1928 seq_putc(m, '\n');
c9fe99bd 1929 }
a33afea5 1930
a33afea5 1931 seq_putc(m, '\n');
a168c293
BW
1932 }
1933
f3d28878 1934 mutex_unlock(&dev->struct_mutex);
e76d3630
BW
1935
1936 return 0;
1937}
1938
ea16a3cd
DV
1939static const char *swizzle_string(unsigned swizzle)
1940{
aee56cff 1941 switch (swizzle) {
ea16a3cd
DV
1942 case I915_BIT_6_SWIZZLE_NONE:
1943 return "none";
1944 case I915_BIT_6_SWIZZLE_9:
1945 return "bit9";
1946 case I915_BIT_6_SWIZZLE_9_10:
1947 return "bit9/bit10";
1948 case I915_BIT_6_SWIZZLE_9_11:
1949 return "bit9/bit11";
1950 case I915_BIT_6_SWIZZLE_9_10_11:
1951 return "bit9/bit10/bit11";
1952 case I915_BIT_6_SWIZZLE_9_17:
1953 return "bit9/bit17";
1954 case I915_BIT_6_SWIZZLE_9_10_17:
1955 return "bit9/bit10/bit17";
1956 case I915_BIT_6_SWIZZLE_UNKNOWN:
8a168ca7 1957 return "unknown";
ea16a3cd
DV
1958 }
1959
1960 return "bug";
1961}
1962
1963static int i915_swizzle_info(struct seq_file *m, void *data)
1964{
36cdd013 1965 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 1966 intel_wakeref_t wakeref;
22bcfc6a 1967
a037121c 1968 wakeref = intel_runtime_pm_get(dev_priv);
ea16a3cd 1969
ea16a3cd
DV
1970 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1971 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1972 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1973 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1974
f3ce44a0 1975 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
ea16a3cd
DV
1976 seq_printf(m, "DDC = 0x%08x\n",
1977 I915_READ(DCC));
656bfa3a
DV
1978 seq_printf(m, "DDC2 = 0x%08x\n",
1979 I915_READ(DCC2));
ea16a3cd
DV
1980 seq_printf(m, "C0DRB3 = 0x%04x\n",
1981 I915_READ16(C0DRB3));
1982 seq_printf(m, "C1DRB3 = 0x%04x\n",
1983 I915_READ16(C1DRB3));
36cdd013 1984 } else if (INTEL_GEN(dev_priv) >= 6) {
3fa7d235
DV
1985 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1986 I915_READ(MAD_DIMM_C0));
1987 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1988 I915_READ(MAD_DIMM_C1));
1989 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1990 I915_READ(MAD_DIMM_C2));
1991 seq_printf(m, "TILECTL = 0x%08x\n",
1992 I915_READ(TILECTL));
36cdd013 1993 if (INTEL_GEN(dev_priv) >= 8)
9d3203e1
BW
1994 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1995 I915_READ(GAMTARBMODE));
1996 else
1997 seq_printf(m, "ARB_MODE = 0x%08x\n",
1998 I915_READ(ARB_MODE));
3fa7d235
DV
1999 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2000 I915_READ(DISP_ARB_CTL));
ea16a3cd 2001 }
656bfa3a
DV
2002
2003 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2004 seq_puts(m, "L-shaped memory detected\n");
2005
a037121c 2006 intel_runtime_pm_put(dev_priv, wakeref);
ea16a3cd
DV
2007
2008 return 0;
2009}
2010
7466c291
CW
2011static const char *rps_power_to_str(unsigned int power)
2012{
2013 static const char * const strings[] = {
2014 [LOW_POWER] = "low power",
2015 [BETWEEN] = "mixed",
2016 [HIGH_POWER] = "high power",
2017 };
2018
2019 if (power >= ARRAY_SIZE(strings) || !strings[power])
2020 return "unknown";
2021
2022 return strings[power];
2023}
2024
1854d5ca
CW
2025static int i915_rps_boost_info(struct seq_file *m, void *data)
2026{
36cdd013
DW
2027 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2028 struct drm_device *dev = &dev_priv->drm;
562d9bae 2029 struct intel_rps *rps = &dev_priv->gt_pm.rps;
c0a6aa7e 2030 u32 act_freq = rps->cur_freq;
a037121c 2031 intel_wakeref_t wakeref;
1854d5ca 2032 struct drm_file *file;
1854d5ca 2033
d4225a53 2034 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
c0a6aa7e
CW
2035 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2036 mutex_lock(&dev_priv->pcu_lock);
2037 act_freq = vlv_punit_read(dev_priv,
2038 PUNIT_REG_GPU_FREQ_STS);
2039 act_freq = (act_freq >> 8) & 0xff;
2040 mutex_unlock(&dev_priv->pcu_lock);
2041 } else {
2042 act_freq = intel_get_cagf(dev_priv,
2043 I915_READ(GEN6_RPSTAT1));
2044 }
c0a6aa7e
CW
2045 }
2046
562d9bae 2047 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
28176ef4
CW
2048 seq_printf(m, "GPU busy? %s [%d requests]\n",
2049 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
7b92c1bd 2050 seq_printf(m, "Boosts outstanding? %d\n",
562d9bae 2051 atomic_read(&rps->num_waiters));
60548c55 2052 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
c0a6aa7e
CW
2053 seq_printf(m, "Frequency requested %d, actual %d\n",
2054 intel_gpu_freq(dev_priv, rps->cur_freq),
2055 intel_gpu_freq(dev_priv, act_freq));
7466c291 2056 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
562d9bae
SAK
2057 intel_gpu_freq(dev_priv, rps->min_freq),
2058 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2059 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2060 intel_gpu_freq(dev_priv, rps->max_freq));
7466c291 2061 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
562d9bae
SAK
2062 intel_gpu_freq(dev_priv, rps->idle_freq),
2063 intel_gpu_freq(dev_priv, rps->efficient_freq),
2064 intel_gpu_freq(dev_priv, rps->boost_freq));
1d2ac403
DV
2065
2066 mutex_lock(&dev->filelist_mutex);
1854d5ca
CW
2067 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2068 struct drm_i915_file_private *file_priv = file->driver_priv;
2069 struct task_struct *task;
2070
2071 rcu_read_lock();
2072 task = pid_task(file->pid, PIDTYPE_PID);
7b92c1bd 2073 seq_printf(m, "%s [%d]: %d boosts\n",
1854d5ca
CW
2074 task ? task->comm : "<unknown>",
2075 task ? task->pid : -1,
562d9bae 2076 atomic_read(&file_priv->rps_client.boosts));
1854d5ca
CW
2077 rcu_read_unlock();
2078 }
7b92c1bd 2079 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
562d9bae 2080 atomic_read(&rps->boosts));
1d2ac403 2081 mutex_unlock(&dev->filelist_mutex);
1854d5ca 2082
7466c291 2083 if (INTEL_GEN(dev_priv) >= 6 &&
562d9bae 2084 rps->enabled &&
28176ef4 2085 dev_priv->gt.active_requests) {
7466c291
CW
2086 u32 rpup, rpupei;
2087 u32 rpdown, rpdownei;
2088
2089 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2090 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2091 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2092 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2093 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2094 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2095
2096 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
60548c55 2097 rps_power_to_str(rps->power.mode));
7466c291 2098 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
23f4a287 2099 rpup && rpupei ? 100 * rpup / rpupei : 0,
60548c55 2100 rps->power.up_threshold);
7466c291 2101 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
23f4a287 2102 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
60548c55 2103 rps->power.down_threshold);
7466c291
CW
2104 } else {
2105 seq_puts(m, "\nRPS Autotuning inactive\n");
2106 }
2107
8d3afd7d 2108 return 0;
1854d5ca
CW
2109}
2110
63573eb7
BW
2111static int i915_llc(struct seq_file *m, void *data)
2112{
36cdd013 2113 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3accaf7e 2114 const bool edram = INTEL_GEN(dev_priv) > 8;
63573eb7 2115
36cdd013 2116 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
3accaf7e
MK
2117 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2118 intel_uncore_edram_size(dev_priv)/1024/1024);
63573eb7
BW
2119
2120 return 0;
2121}
2122
0509ead1
AS
2123static int i915_huc_load_status_info(struct seq_file *m, void *data)
2124{
2125 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 2126 intel_wakeref_t wakeref;
56ffc742 2127 struct drm_printer p;
0509ead1 2128
ab309a6a
MW
2129 if (!HAS_HUC(dev_priv))
2130 return -ENODEV;
0509ead1 2131
56ffc742
MW
2132 p = drm_seq_file_printer(m);
2133 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
0509ead1 2134
d4225a53
CW
2135 with_intel_runtime_pm(dev_priv, wakeref)
2136 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
0509ead1
AS
2137
2138 return 0;
2139}
2140
fdf5d357
AD
2141static int i915_guc_load_status_info(struct seq_file *m, void *data)
2142{
36cdd013 2143 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 2144 intel_wakeref_t wakeref;
56ffc742 2145 struct drm_printer p;
fdf5d357 2146
ab309a6a
MW
2147 if (!HAS_GUC(dev_priv))
2148 return -ENODEV;
fdf5d357 2149
56ffc742
MW
2150 p = drm_seq_file_printer(m);
2151 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
fdf5d357 2152
d4225a53
CW
2153 with_intel_runtime_pm(dev_priv, wakeref) {
2154 u32 tmp = I915_READ(GUC_STATUS);
2155 u32 i;
2156
2157 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2158 seq_printf(m, "\tBootrom status = 0x%x\n",
2159 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2160 seq_printf(m, "\tuKernel status = 0x%x\n",
2161 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2162 seq_printf(m, "\tMIA Core status = 0x%x\n",
2163 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2164 seq_puts(m, "\nScratch registers:\n");
2165 for (i = 0; i < 16; i++) {
2166 seq_printf(m, "\t%2d: \t0x%x\n",
2167 i, I915_READ(SOFT_SCRATCH(i)));
2168 }
2169 }
3582ad13 2170
fdf5d357
AD
2171 return 0;
2172}
2173
5e24e4a2
MW
2174static const char *
2175stringify_guc_log_type(enum guc_log_buffer_type type)
2176{
2177 switch (type) {
2178 case GUC_ISR_LOG_BUFFER:
2179 return "ISR";
2180 case GUC_DPC_LOG_BUFFER:
2181 return "DPC";
2182 case GUC_CRASH_DUMP_LOG_BUFFER:
2183 return "CRASH";
2184 default:
2185 MISSING_CASE(type);
2186 }
2187
2188 return "";
2189}
2190
5aa1ee4b
AG
2191static void i915_guc_log_info(struct seq_file *m,
2192 struct drm_i915_private *dev_priv)
2193{
5e24e4a2
MW
2194 struct intel_guc_log *log = &dev_priv->guc.log;
2195 enum guc_log_buffer_type type;
5aa1ee4b 2196
5e24e4a2
MW
2197 if (!intel_guc_log_relay_enabled(log)) {
2198 seq_puts(m, "GuC log relay disabled\n");
2199 return;
2200 }
5aa1ee4b 2201
5e24e4a2 2202 seq_puts(m, "GuC logging stats:\n");
5aa1ee4b 2203
6a96be24 2204 seq_printf(m, "\tRelay full count: %u\n",
5e24e4a2
MW
2205 log->relay.full_count);
2206
2207 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2208 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2209 stringify_guc_log_type(type),
2210 log->stats[type].flush,
2211 log->stats[type].sampled_overflow);
2212 }
5aa1ee4b
AG
2213}
2214
8b417c26
DG
2215static void i915_guc_client_info(struct seq_file *m,
2216 struct drm_i915_private *dev_priv,
5afc8b49 2217 struct intel_guc_client *client)
8b417c26 2218{
e2f80391 2219 struct intel_engine_cs *engine;
c18468c4 2220 enum intel_engine_id id;
e5315213 2221 u64 tot = 0;
8b417c26 2222
b09935a6
OM
2223 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2224 client->priority, client->stage_id, client->proc_desc_offset);
59db36cf
MW
2225 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2226 client->doorbell_id, client->doorbell_offset);
8b417c26 2227
3b3f1650 2228 for_each_engine(engine, dev_priv, id) {
c18468c4
DG
2229 u64 submissions = client->submissions[id];
2230 tot += submissions;
8b417c26 2231 seq_printf(m, "\tSubmissions: %llu %s\n",
c18468c4 2232 submissions, engine->name);
8b417c26
DG
2233 }
2234 seq_printf(m, "\tTotal: %llu\n", tot);
2235}
2236
a8b9370f
OM
2237static int i915_guc_info(struct seq_file *m, void *data)
2238{
2239 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2240 const struct intel_guc *guc = &dev_priv->guc;
a8b9370f 2241
db557993 2242 if (!USES_GUC(dev_priv))
ab309a6a
MW
2243 return -ENODEV;
2244
db557993
MW
2245 i915_guc_log_info(m, dev_priv);
2246
2247 if (!USES_GUC_SUBMISSION(dev_priv))
2248 return 0;
2249
ab309a6a 2250 GEM_BUG_ON(!guc->execbuf_client);
a8b9370f 2251
db557993 2252 seq_printf(m, "\nDoorbell map:\n");
abddffdf 2253 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
db557993 2254 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
9636f6db 2255
334636c6
CW
2256 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2257 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
e78c9175
CW
2258 if (guc->preempt_client) {
2259 seq_printf(m, "\nGuC preempt client @ %p:\n",
2260 guc->preempt_client);
2261 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2262 }
8b417c26
DG
2263
2264 /* Add more as required ... */
2265
2266 return 0;
2267}
2268
a8b9370f 2269static int i915_guc_stage_pool(struct seq_file *m, void *data)
4c7e77fc 2270{
36cdd013 2271 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a8b9370f
OM
2272 const struct intel_guc *guc = &dev_priv->guc;
2273 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
5afc8b49 2274 struct intel_guc_client *client = guc->execbuf_client;
a8b9370f
OM
2275 unsigned int tmp;
2276 int index;
4c7e77fc 2277
ab309a6a
MW
2278 if (!USES_GUC_SUBMISSION(dev_priv))
2279 return -ENODEV;
4c7e77fc 2280
a8b9370f
OM
2281 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2282 struct intel_engine_cs *engine;
2283
2284 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2285 continue;
2286
2287 seq_printf(m, "GuC stage descriptor %u:\n", index);
2288 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2289 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2290 seq_printf(m, "\tPriority: %d\n", desc->priority);
2291 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2292 seq_printf(m, "\tEngines used: 0x%x\n",
2293 desc->engines_used);
2294 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2295 desc->db_trigger_phy,
2296 desc->db_trigger_cpu,
2297 desc->db_trigger_uk);
2298 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2299 desc->process_desc);
9a09485d 2300 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
a8b9370f
OM
2301 desc->wq_addr, desc->wq_size);
2302 seq_putc(m, '\n');
2303
2304 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2305 u32 guc_engine_id = engine->guc_id;
2306 struct guc_execlist_context *lrc =
2307 &desc->lrc[guc_engine_id];
2308
2309 seq_printf(m, "\t%s LRC:\n", engine->name);
2310 seq_printf(m, "\t\tContext desc: 0x%x\n",
2311 lrc->context_desc);
2312 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2313 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2314 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2315 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2316 seq_putc(m, '\n');
2317 }
2318 }
2319
2320 return 0;
2321}
2322
4c7e77fc
AD
2323static int i915_guc_log_dump(struct seq_file *m, void *data)
2324{
ac58d2ab
DCS
2325 struct drm_info_node *node = m->private;
2326 struct drm_i915_private *dev_priv = node_to_i915(node);
2327 bool dump_load_err = !!node->info_ent->data;
2328 struct drm_i915_gem_object *obj = NULL;
2329 u32 *log;
2330 int i = 0;
4c7e77fc 2331
ab309a6a
MW
2332 if (!HAS_GUC(dev_priv))
2333 return -ENODEV;
2334
ac58d2ab
DCS
2335 if (dump_load_err)
2336 obj = dev_priv->guc.load_err_log;
2337 else if (dev_priv->guc.log.vma)
2338 obj = dev_priv->guc.log.vma->obj;
4c7e77fc 2339
ac58d2ab
DCS
2340 if (!obj)
2341 return 0;
4c7e77fc 2342
ac58d2ab
DCS
2343 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2344 if (IS_ERR(log)) {
2345 DRM_DEBUG("Failed to pin object\n");
2346 seq_puts(m, "(log data unaccessible)\n");
2347 return PTR_ERR(log);
4c7e77fc
AD
2348 }
2349
ac58d2ab
DCS
2350 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2351 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2352 *(log + i), *(log + i + 1),
2353 *(log + i + 2), *(log + i + 3));
2354
4c7e77fc
AD
2355 seq_putc(m, '\n');
2356
ac58d2ab
DCS
2357 i915_gem_object_unpin_map(obj);
2358
4c7e77fc
AD
2359 return 0;
2360}
2361
4977a287 2362static int i915_guc_log_level_get(void *data, u64 *val)
685534ef 2363{
bcc36d8a 2364 struct drm_i915_private *dev_priv = data;
685534ef 2365
86aa8247 2366 if (!USES_GUC(dev_priv))
ab309a6a
MW
2367 return -ENODEV;
2368
50935ac7 2369 *val = intel_guc_log_get_level(&dev_priv->guc.log);
685534ef
SAK
2370
2371 return 0;
2372}
2373
4977a287 2374static int i915_guc_log_level_set(void *data, u64 val)
685534ef 2375{
bcc36d8a 2376 struct drm_i915_private *dev_priv = data;
685534ef 2377
86aa8247 2378 if (!USES_GUC(dev_priv))
ab309a6a
MW
2379 return -ENODEV;
2380
50935ac7 2381 return intel_guc_log_set_level(&dev_priv->guc.log, val);
685534ef
SAK
2382}
2383
4977a287
MW
2384DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2385 i915_guc_log_level_get, i915_guc_log_level_set,
685534ef
SAK
2386 "%lld\n");
2387
4977a287
MW
2388static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2389{
2390 struct drm_i915_private *dev_priv = inode->i_private;
2391
2392 if (!USES_GUC(dev_priv))
2393 return -ENODEV;
2394
2395 file->private_data = &dev_priv->guc.log;
2396
2397 return intel_guc_log_relay_open(&dev_priv->guc.log);
2398}
2399
2400static ssize_t
2401i915_guc_log_relay_write(struct file *filp,
2402 const char __user *ubuf,
2403 size_t cnt,
2404 loff_t *ppos)
2405{
2406 struct intel_guc_log *log = filp->private_data;
2407
2408 intel_guc_log_relay_flush(log);
2409
2410 return cnt;
2411}
2412
2413static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2414{
2415 struct drm_i915_private *dev_priv = inode->i_private;
2416
2417 intel_guc_log_relay_close(&dev_priv->guc.log);
2418
2419 return 0;
2420}
2421
2422static const struct file_operations i915_guc_log_relay_fops = {
2423 .owner = THIS_MODULE,
2424 .open = i915_guc_log_relay_open,
2425 .write = i915_guc_log_relay_write,
2426 .release = i915_guc_log_relay_release,
2427};
2428
5b7b3086
DP
2429static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2430{
2431 u8 val;
2432 static const char * const sink_status[] = {
2433 "inactive",
2434 "transition to active, capture and display",
2435 "active, display from RFB",
2436 "active, capture and display on sink device timings",
2437 "transition to inactive, capture and display, timing re-sync",
2438 "reserved",
2439 "reserved",
2440 "sink internal error",
2441 };
2442 struct drm_connector *connector = m->private;
7a72c78b 2443 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5b7b3086
DP
2444 struct intel_dp *intel_dp =
2445 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
7a72c78b
RV
2446 int ret;
2447
2448 if (!CAN_PSR(dev_priv)) {
2449 seq_puts(m, "PSR Unsupported\n");
2450 return -ENODEV;
2451 }
5b7b3086
DP
2452
2453 if (connector->status != connector_status_connected)
2454 return -ENODEV;
2455
7a72c78b
RV
2456 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2457
2458 if (ret == 1) {
5b7b3086
DP
2459 const char *str = "unknown";
2460
2461 val &= DP_PSR_SINK_STATE_MASK;
2462 if (val < ARRAY_SIZE(sink_status))
2463 str = sink_status[val];
2464 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2465 } else {
7a72c78b 2466 return ret;
5b7b3086
DP
2467 }
2468
2469 return 0;
2470}
2471DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2472
00b06296
VN
2473static void
2474psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2475{
47c6cd54
JRS
2476 u32 val, status_val;
2477 const char *status = "unknown";
b86bef20 2478
00b06296
VN
2479 if (dev_priv->psr.psr2_enabled) {
2480 static const char * const live_status[] = {
2481 "IDLE",
2482 "CAPTURE",
2483 "CAPTURE_FS",
2484 "SLEEP",
2485 "BUFON_FW",
2486 "ML_UP",
2487 "SU_STANDBY",
2488 "FAST_SLEEP",
2489 "DEEP_SLEEP",
2490 "BUF_ON",
2491 "TG_ON"
2492 };
47c6cd54
JRS
2493 val = I915_READ(EDP_PSR2_STATUS);
2494 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2495 EDP_PSR2_STATUS_STATE_SHIFT;
2496 if (status_val < ARRAY_SIZE(live_status))
2497 status = live_status[status_val];
00b06296
VN
2498 } else {
2499 static const char * const live_status[] = {
2500 "IDLE",
2501 "SRDONACK",
2502 "SRDENT",
2503 "BUFOFF",
2504 "BUFON",
2505 "AUXACK",
2506 "SRDOFFACK",
2507 "SRDENT_ON",
2508 };
47c6cd54
JRS
2509 val = I915_READ(EDP_PSR_STATUS);
2510 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2511 EDP_PSR_STATUS_STATE_SHIFT;
2512 if (status_val < ARRAY_SIZE(live_status))
2513 status = live_status[status_val];
00b06296 2514 }
b86bef20 2515
47c6cd54 2516 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
b86bef20
CW
2517}
2518
e91fd8c6
RV
2519static int i915_edp_psr_status(struct seq_file *m, void *data)
2520{
36cdd013 2521 struct drm_i915_private *dev_priv = node_to_i915(m->private);
47c6cd54 2522 struct i915_psr *psr = &dev_priv->psr;
a037121c 2523 intel_wakeref_t wakeref;
47c6cd54
JRS
2524 const char *status;
2525 bool enabled;
2526 u32 val;
e91fd8c6 2527
ab309a6a
MW
2528 if (!HAS_PSR(dev_priv))
2529 return -ENODEV;
3553a8ea 2530
47c6cd54
JRS
2531 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2532 if (psr->dp)
2533 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2534 seq_puts(m, "\n");
2535
2536 if (!psr->sink_support)
c9ef291a
DP
2537 return 0;
2538
a037121c 2539 wakeref = intel_runtime_pm_get(dev_priv);
47c6cd54 2540 mutex_lock(&psr->lock);
c8c8fb33 2541
47c6cd54
JRS
2542 if (psr->enabled)
2543 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
ce3508fd 2544 else
47c6cd54
JRS
2545 status = "disabled";
2546 seq_printf(m, "PSR mode: %s\n", status);
60e5ffe3 2547
47c6cd54
JRS
2548 if (!psr->enabled)
2549 goto unlock;
60e5ffe3 2550
47c6cd54
JRS
2551 if (psr->psr2_enabled) {
2552 val = I915_READ(EDP_PSR2_CTL);
2553 enabled = val & EDP_PSR2_ENABLE;
2554 } else {
2555 val = I915_READ(EDP_PSR_CTL);
2556 enabled = val & EDP_PSR_ENABLE;
2557 }
2558 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2559 enableddisabled(enabled), val);
2560 psr_source_status(dev_priv, m);
2561 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2562 psr->busy_frontbuffer_bits);
e91fd8c6 2563
05eec3c2 2564 /*
05eec3c2
RV
2565 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2566 */
36cdd013 2567 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
47c6cd54
JRS
2568 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2569 seq_printf(m, "Performance counter: %u\n", val);
a6cbdb8e 2570 }
b86bef20 2571
47c6cd54 2572 if (psr->debug & I915_PSR_DEBUG_IRQ) {
3f983e54 2573 seq_printf(m, "Last attempted entry at: %lld\n",
47c6cd54
JRS
2574 psr->last_entry_attempt);
2575 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3f983e54
DP
2576 }
2577
a81f781a
JRS
2578 if (psr->psr2_enabled) {
2579 u32 su_frames_val[3];
2580 int frame;
2581
2582 /*
2583 * Reading all 3 registers before hand to minimize crossing a
2584 * frame boundary between register reads
2585 */
2586 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2587 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2588
2589 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2590
2591 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2592 u32 su_blocks;
2593
2594 su_blocks = su_frames_val[frame / 3] &
2595 PSR2_SU_STATUS_MASK(frame);
2596 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2597 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2598 }
2599 }
2600
47c6cd54
JRS
2601unlock:
2602 mutex_unlock(&psr->lock);
a037121c 2603 intel_runtime_pm_put(dev_priv, wakeref);
47c6cd54 2604
e91fd8c6
RV
2605 return 0;
2606}
2607
54fd3149
DP
2608static int
2609i915_edp_psr_debug_set(void *data, u64 val)
2610{
2611 struct drm_i915_private *dev_priv = data;
c44301fc 2612 struct drm_modeset_acquire_ctx ctx;
a037121c 2613 intel_wakeref_t wakeref;
c44301fc 2614 int ret;
54fd3149
DP
2615
2616 if (!CAN_PSR(dev_priv))
2617 return -ENODEV;
2618
c44301fc 2619 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
54fd3149 2620
a037121c 2621 wakeref = intel_runtime_pm_get(dev_priv);
c44301fc
ML
2622
2623 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2624
2625retry:
2626 ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2627 if (ret == -EDEADLK) {
2628 ret = drm_modeset_backoff(&ctx);
2629 if (!ret)
2630 goto retry;
2631 }
2632
2633 drm_modeset_drop_locks(&ctx);
2634 drm_modeset_acquire_fini(&ctx);
2635
a037121c 2636 intel_runtime_pm_put(dev_priv, wakeref);
54fd3149 2637
c44301fc 2638 return ret;
54fd3149
DP
2639}
2640
2641static int
2642i915_edp_psr_debug_get(void *data, u64 *val)
2643{
2644 struct drm_i915_private *dev_priv = data;
2645
2646 if (!CAN_PSR(dev_priv))
2647 return -ENODEV;
2648
2649 *val = READ_ONCE(dev_priv->psr.debug);
2650 return 0;
2651}
2652
2653DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2654 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2655 "%llu\n");
2656
ec013e7f
JB
2657static int i915_energy_uJ(struct seq_file *m, void *data)
2658{
36cdd013 2659 struct drm_i915_private *dev_priv = node_to_i915(m->private);
d38014ea 2660 unsigned long long power;
a037121c 2661 intel_wakeref_t wakeref;
ec013e7f
JB
2662 u32 units;
2663
36cdd013 2664 if (INTEL_GEN(dev_priv) < 6)
ec013e7f
JB
2665 return -ENODEV;
2666
d4225a53 2667 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
d38014ea 2668 return -ENODEV;
d38014ea
GKB
2669
2670 units = (power & 0x1f00) >> 8;
d4225a53
CW
2671 with_intel_runtime_pm(dev_priv, wakeref)
2672 power = I915_READ(MCH_SECP_NRG_STTS);
36623ef8 2673
d4225a53 2674 power = (1000000 * power) >> units; /* convert to uJ */
d38014ea 2675 seq_printf(m, "%llu", power);
371db66a
PZ
2676
2677 return 0;
2678}
2679
6455c870 2680static int i915_runtime_pm_status(struct seq_file *m, void *unused)
371db66a 2681{
36cdd013 2682 struct drm_i915_private *dev_priv = node_to_i915(m->private);
52a05c30 2683 struct pci_dev *pdev = dev_priv->drm.pdev;
371db66a 2684
a156e64d
CW
2685 if (!HAS_RUNTIME_PM(dev_priv))
2686 seq_puts(m, "Runtime power management not supported\n");
371db66a 2687
25c896bd
CW
2688 seq_printf(m, "Runtime power status: %s\n",
2689 enableddisabled(!dev_priv->power_domains.wakeref));
2690
6f56103d
CW
2691 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2692 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
371db66a 2693 seq_printf(m, "IRQs disabled: %s\n",
9df7575f 2694 yesno(!intel_irqs_enabled(dev_priv)));
0d804184 2695#ifdef CONFIG_PM
a6aaec8b 2696 seq_printf(m, "Usage count: %d\n",
36cdd013 2697 atomic_read(&dev_priv->drm.dev->power.usage_count));
0d804184
CW
2698#else
2699 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2700#endif
a156e64d 2701 seq_printf(m, "PCI device power state: %s [%d]\n",
52a05c30
DW
2702 pci_power_name(pdev->current_state),
2703 pdev->current_state);
371db66a 2704
bd780f37
CW
2705 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2706 struct drm_printer p = drm_seq_file_printer(m);
2707
2708 print_intel_runtime_pm_wakeref(dev_priv, &p);
2709 }
2710
ec013e7f
JB
2711 return 0;
2712}
2713
1da51581
ID
2714static int i915_power_domain_info(struct seq_file *m, void *unused)
2715{
36cdd013 2716 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1da51581
ID
2717 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2718 int i;
2719
2720 mutex_lock(&power_domains->lock);
2721
2722 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2723 for (i = 0; i < power_domains->power_well_count; i++) {
2724 struct i915_power_well *power_well;
2725 enum intel_display_power_domain power_domain;
2726
2727 power_well = &power_domains->power_wells[i];
f28ec6f4 2728 seq_printf(m, "%-25s %d\n", power_well->desc->name,
1da51581
ID
2729 power_well->count);
2730
f28ec6f4 2731 for_each_power_domain(power_domain, power_well->desc->domains)
1da51581 2732 seq_printf(m, " %-23s %d\n",
9895ad03 2733 intel_display_power_domain_str(power_domain),
1da51581 2734 power_domains->domain_use_count[power_domain]);
1da51581
ID
2735 }
2736
2737 mutex_unlock(&power_domains->lock);
2738
2739 return 0;
2740}
2741
b7cec66d
DL
2742static int i915_dmc_info(struct seq_file *m, void *unused)
2743{
36cdd013 2744 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 2745 intel_wakeref_t wakeref;
b7cec66d
DL
2746 struct intel_csr *csr;
2747
ab309a6a
MW
2748 if (!HAS_CSR(dev_priv))
2749 return -ENODEV;
b7cec66d
DL
2750
2751 csr = &dev_priv->csr;
2752
a037121c 2753 wakeref = intel_runtime_pm_get(dev_priv);
6fb403de 2754
b7cec66d
DL
2755 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2756 seq_printf(m, "path: %s\n", csr->fw_path);
2757
2758 if (!csr->dmc_payload)
6fb403de 2759 goto out;
b7cec66d
DL
2760
2761 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2762 CSR_VERSION_MINOR(csr->version));
2763
34b2f8da
ID
2764 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2765 goto out;
2766
2767 seq_printf(m, "DC3 -> DC5 count: %d\n",
2768 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2769 SKL_CSR_DC3_DC5_COUNT));
2770 if (!IS_GEN9_LP(dev_priv))
8337206d
DL
2771 seq_printf(m, "DC5 -> DC6 count: %d\n",
2772 I915_READ(SKL_CSR_DC5_DC6_COUNT));
8337206d 2773
6fb403de
MK
2774out:
2775 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2776 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2777 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2778
a037121c 2779 intel_runtime_pm_put(dev_priv, wakeref);
8337206d 2780
b7cec66d
DL
2781 return 0;
2782}
2783
53f5e3ca
JB
2784static void intel_seq_print_mode(struct seq_file *m, int tabs,
2785 struct drm_display_mode *mode)
2786{
2787 int i;
2788
2789 for (i = 0; i < tabs; i++)
2790 seq_putc(m, '\t');
2791
4fb6bb89 2792 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
53f5e3ca
JB
2793}
2794
2795static void intel_encoder_info(struct seq_file *m,
2796 struct intel_crtc *intel_crtc,
2797 struct intel_encoder *intel_encoder)
2798{
36cdd013
DW
2799 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2800 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2801 struct drm_crtc *crtc = &intel_crtc->base;
2802 struct intel_connector *intel_connector;
2803 struct drm_encoder *encoder;
2804
2805 encoder = &intel_encoder->base;
2806 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
8e329a03 2807 encoder->base.id, encoder->name);
53f5e3ca
JB
2808 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2809 struct drm_connector *connector = &intel_connector->base;
2810 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2811 connector->base.id,
c23cc417 2812 connector->name,
53f5e3ca
JB
2813 drm_get_connector_status_name(connector->status));
2814 if (connector->status == connector_status_connected) {
2815 struct drm_display_mode *mode = &crtc->mode;
2816 seq_printf(m, ", mode:\n");
2817 intel_seq_print_mode(m, 2, mode);
2818 } else {
2819 seq_putc(m, '\n');
2820 }
2821 }
2822}
2823
2824static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2825{
36cdd013
DW
2826 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2827 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2828 struct drm_crtc *crtc = &intel_crtc->base;
2829 struct intel_encoder *intel_encoder;
23a48d53
ML
2830 struct drm_plane_state *plane_state = crtc->primary->state;
2831 struct drm_framebuffer *fb = plane_state->fb;
53f5e3ca 2832
23a48d53 2833 if (fb)
5aa8a937 2834 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
23a48d53
ML
2835 fb->base.id, plane_state->src_x >> 16,
2836 plane_state->src_y >> 16, fb->width, fb->height);
5aa8a937
MR
2837 else
2838 seq_puts(m, "\tprimary plane disabled\n");
53f5e3ca
JB
2839 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2840 intel_encoder_info(m, intel_crtc, intel_encoder);
2841}
2842
2843static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2844{
2845 struct drm_display_mode *mode = panel->fixed_mode;
2846
2847 seq_printf(m, "\tfixed mode:\n");
2848 intel_seq_print_mode(m, 2, mode);
2849}
2850
2851static void intel_dp_info(struct seq_file *m,
2852 struct intel_connector *intel_connector)
2853{
2854 struct intel_encoder *intel_encoder = intel_connector->encoder;
2855 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2856
2857 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
742f491d 2858 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
b6dabe3b 2859 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
53f5e3ca 2860 intel_panel_info(m, &intel_connector->panel);
80209e5f
MK
2861
2862 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2863 &intel_dp->aux);
53f5e3ca
JB
2864}
2865
9a148a96
LY
2866static void intel_dp_mst_info(struct seq_file *m,
2867 struct intel_connector *intel_connector)
2868{
2869 struct intel_encoder *intel_encoder = intel_connector->encoder;
2870 struct intel_dp_mst_encoder *intel_mst =
2871 enc_to_mst(&intel_encoder->base);
2872 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2873 struct intel_dp *intel_dp = &intel_dig_port->dp;
2874 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2875 intel_connector->port);
2876
2877 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2878}
2879
53f5e3ca
JB
2880static void intel_hdmi_info(struct seq_file *m,
2881 struct intel_connector *intel_connector)
2882{
2883 struct intel_encoder *intel_encoder = intel_connector->encoder;
2884 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2885
742f491d 2886 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
53f5e3ca
JB
2887}
2888
2889static void intel_lvds_info(struct seq_file *m,
2890 struct intel_connector *intel_connector)
2891{
2892 intel_panel_info(m, &intel_connector->panel);
2893}
2894
2895static void intel_connector_info(struct seq_file *m,
2896 struct drm_connector *connector)
2897{
2898 struct intel_connector *intel_connector = to_intel_connector(connector);
2899 struct intel_encoder *intel_encoder = intel_connector->encoder;
f103fc7d 2900 struct drm_display_mode *mode;
53f5e3ca
JB
2901
2902 seq_printf(m, "connector %d: type %s, status: %s\n",
c23cc417 2903 connector->base.id, connector->name,
53f5e3ca 2904 drm_get_connector_status_name(connector->status));
3e037f9b
JRS
2905
2906 if (connector->status == connector_status_disconnected)
2907 return;
2908
2909 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2910 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2911 connector->display_info.width_mm,
2912 connector->display_info.height_mm);
2913 seq_printf(m, "\tsubpixel order: %s\n",
2914 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2915 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
ee648a74 2916
77d1f615 2917 if (!intel_encoder)
ee648a74
ML
2918 return;
2919
2920 switch (connector->connector_type) {
2921 case DRM_MODE_CONNECTOR_DisplayPort:
2922 case DRM_MODE_CONNECTOR_eDP:
9a148a96
LY
2923 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2924 intel_dp_mst_info(m, intel_connector);
2925 else
2926 intel_dp_info(m, intel_connector);
ee648a74
ML
2927 break;
2928 case DRM_MODE_CONNECTOR_LVDS:
2929 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
36cd7444 2930 intel_lvds_info(m, intel_connector);
ee648a74
ML
2931 break;
2932 case DRM_MODE_CONNECTOR_HDMIA:
2933 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
7e732cac 2934 intel_encoder->type == INTEL_OUTPUT_DDI)
ee648a74
ML
2935 intel_hdmi_info(m, intel_connector);
2936 break;
2937 default:
2938 break;
36cd7444 2939 }
53f5e3ca 2940
f103fc7d
JB
2941 seq_printf(m, "\tmodes:\n");
2942 list_for_each_entry(mode, &connector->modes, head)
2943 intel_seq_print_mode(m, 2, mode);
53f5e3ca
JB
2944}
2945
3abc4e09
RF
2946static const char *plane_type(enum drm_plane_type type)
2947{
2948 switch (type) {
2949 case DRM_PLANE_TYPE_OVERLAY:
2950 return "OVL";
2951 case DRM_PLANE_TYPE_PRIMARY:
2952 return "PRI";
2953 case DRM_PLANE_TYPE_CURSOR:
2954 return "CUR";
2955 /*
2956 * Deliberately omitting default: to generate compiler warnings
2957 * when a new drm_plane_type gets added.
2958 */
2959 }
2960
2961 return "unknown";
2962}
2963
5852a15c 2964static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
3abc4e09 2965{
3abc4e09 2966 /*
c2c446ad 2967 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3abc4e09
RF
2968 * will print them all to visualize if the values are misused
2969 */
5852a15c 2970 snprintf(buf, bufsize,
3abc4e09 2971 "%s%s%s%s%s%s(0x%08x)",
c2c446ad
RF
2972 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2973 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2974 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2975 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2976 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2977 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3abc4e09 2978 rotation);
3abc4e09
RF
2979}
2980
2981static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2982{
36cdd013
DW
2983 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2984 struct drm_device *dev = &dev_priv->drm;
3abc4e09
RF
2985 struct intel_plane *intel_plane;
2986
2987 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2988 struct drm_plane_state *state;
2989 struct drm_plane *plane = &intel_plane->base;
b3c11ac2 2990 struct drm_format_name_buf format_name;
5852a15c 2991 char rot_str[48];
3abc4e09
RF
2992
2993 if (!plane->state) {
2994 seq_puts(m, "plane->state is NULL!\n");
2995 continue;
2996 }
2997
2998 state = plane->state;
2999
90844f00 3000 if (state->fb) {
438b74a5
VS
3001 drm_get_format_name(state->fb->format->format,
3002 &format_name);
90844f00 3003 } else {
b3c11ac2 3004 sprintf(format_name.str, "N/A");
90844f00
EE
3005 }
3006
5852a15c
JN
3007 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
3008
3abc4e09
RF
3009 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3010 plane->base.id,
3011 plane_type(intel_plane->base.type),
3012 state->crtc_x, state->crtc_y,
3013 state->crtc_w, state->crtc_h,
3014 (state->src_x >> 16),
3015 ((state->src_x & 0xffff) * 15625) >> 10,
3016 (state->src_y >> 16),
3017 ((state->src_y & 0xffff) * 15625) >> 10,
3018 (state->src_w >> 16),
3019 ((state->src_w & 0xffff) * 15625) >> 10,
3020 (state->src_h >> 16),
3021 ((state->src_h & 0xffff) * 15625) >> 10,
b3c11ac2 3022 format_name.str,
5852a15c 3023 rot_str);
3abc4e09
RF
3024 }
3025}
3026
3027static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3028{
3029 struct intel_crtc_state *pipe_config;
3030 int num_scalers = intel_crtc->num_scalers;
3031 int i;
3032
3033 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3034
3035 /* Not all platformas have a scaler */
3036 if (num_scalers) {
3037 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3038 num_scalers,
3039 pipe_config->scaler_state.scaler_users,
3040 pipe_config->scaler_state.scaler_id);
3041
58415918 3042 for (i = 0; i < num_scalers; i++) {
3abc4e09
RF
3043 struct intel_scaler *sc =
3044 &pipe_config->scaler_state.scalers[i];
3045
3046 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3047 i, yesno(sc->in_use), sc->mode);
3048 }
3049 seq_puts(m, "\n");
3050 } else {
3051 seq_puts(m, "\tNo scalers available on this platform\n");
3052 }
3053}
3054
53f5e3ca
JB
3055static int i915_display_info(struct seq_file *m, void *unused)
3056{
36cdd013
DW
3057 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3058 struct drm_device *dev = &dev_priv->drm;
065f2ec2 3059 struct intel_crtc *crtc;
53f5e3ca 3060 struct drm_connector *connector;
3f6a5e1e 3061 struct drm_connector_list_iter conn_iter;
a037121c
CW
3062 intel_wakeref_t wakeref;
3063
3064 wakeref = intel_runtime_pm_get(dev_priv);
53f5e3ca 3065
53f5e3ca
JB
3066 seq_printf(m, "CRTC info\n");
3067 seq_printf(m, "---------\n");
d3fcc808 3068 for_each_intel_crtc(dev, crtc) {
f77076c9 3069 struct intel_crtc_state *pipe_config;
53f5e3ca 3070
3f6a5e1e 3071 drm_modeset_lock(&crtc->base.mutex, NULL);
f77076c9
ML
3072 pipe_config = to_intel_crtc_state(crtc->base.state);
3073
3abc4e09 3074 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
065f2ec2 3075 crtc->base.base.id, pipe_name(crtc->pipe),
f77076c9 3076 yesno(pipe_config->base.active),
3abc4e09
RF
3077 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3078 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3079
f77076c9 3080 if (pipe_config->base.active) {
cd5dcbf1
VS
3081 struct intel_plane *cursor =
3082 to_intel_plane(crtc->base.cursor);
3083
065f2ec2
CW
3084 intel_crtc_info(m, crtc);
3085
cd5dcbf1
VS
3086 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3087 yesno(cursor->base.state->visible),
3088 cursor->base.state->crtc_x,
3089 cursor->base.state->crtc_y,
3090 cursor->base.state->crtc_w,
3091 cursor->base.state->crtc_h,
3092 cursor->cursor.base);
3abc4e09
RF
3093 intel_scaler_info(m, crtc);
3094 intel_plane_info(m, crtc);
a23dc658 3095 }
cace841c
DV
3096
3097 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3098 yesno(!crtc->cpu_fifo_underrun_disabled),
3099 yesno(!crtc->pch_fifo_underrun_disabled));
3f6a5e1e 3100 drm_modeset_unlock(&crtc->base.mutex);
53f5e3ca
JB
3101 }
3102
3103 seq_printf(m, "\n");
3104 seq_printf(m, "Connector info\n");
3105 seq_printf(m, "--------------\n");
3f6a5e1e
DV
3106 mutex_lock(&dev->mode_config.mutex);
3107 drm_connector_list_iter_begin(dev, &conn_iter);
3108 drm_for_each_connector_iter(connector, &conn_iter)
53f5e3ca 3109 intel_connector_info(m, connector);
3f6a5e1e
DV
3110 drm_connector_list_iter_end(&conn_iter);
3111 mutex_unlock(&dev->mode_config.mutex);
3112
a037121c 3113 intel_runtime_pm_put(dev_priv, wakeref);
53f5e3ca
JB
3114
3115 return 0;
3116}
3117
1b36595f
CW
3118static int i915_engine_info(struct seq_file *m, void *unused)
3119{
3120 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3121 struct intel_engine_cs *engine;
a037121c 3122 intel_wakeref_t wakeref;
3b3f1650 3123 enum intel_engine_id id;
f636edb2 3124 struct drm_printer p;
1b36595f 3125
a037121c 3126 wakeref = intel_runtime_pm_get(dev_priv);
9c870d03 3127
6f56103d
CW
3128 seq_printf(m, "GT awake? %s (epoch %u)\n",
3129 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
f73b5674
CW
3130 seq_printf(m, "Global active requests: %d\n",
3131 dev_priv->gt.active_requests);
f577a03b 3132 seq_printf(m, "CS timestamp frequency: %u kHz\n",
0258404f 3133 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
f73b5674 3134
f636edb2
CW
3135 p = drm_seq_file_printer(m);
3136 for_each_engine(engine, dev_priv, id)
0db18b17 3137 intel_engine_dump(engine, &p, "%s\n", engine->name);
1b36595f 3138
a037121c 3139 intel_runtime_pm_put(dev_priv, wakeref);
9c870d03 3140
1b36595f
CW
3141 return 0;
3142}
3143
79e9cd5f
LL
3144static int i915_rcs_topology(struct seq_file *m, void *unused)
3145{
3146 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3147 struct drm_printer p = drm_seq_file_printer(m);
3148
0258404f 3149 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
79e9cd5f
LL
3150
3151 return 0;
3152}
3153
c5418a8b
CW
3154static int i915_shrinker_info(struct seq_file *m, void *unused)
3155{
3156 struct drm_i915_private *i915 = node_to_i915(m->private);
3157
3158 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3159 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3160
3161 return 0;
3162}
3163
728e29d7
DV
3164static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3165{
36cdd013
DW
3166 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3167 struct drm_device *dev = &dev_priv->drm;
728e29d7
DV
3168 int i;
3169
3170 drm_modeset_lock_all(dev);
3171 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3172 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3173
72f775fa 3174 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
0823eb9c 3175 pll->info->id);
2dd66ebd 3176 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2c42e535 3177 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
728e29d7 3178 seq_printf(m, " tracked hardware state:\n");
2c42e535 3179 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
3e369b76 3180 seq_printf(m, " dpll_md: 0x%08x\n",
2c42e535
ACO
3181 pll->state.hw_state.dpll_md);
3182 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3183 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3184 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
c27e917e
PZ
3185 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3186 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3187 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3188 pll->state.hw_state.mg_refclkin_ctl);
3189 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3190 pll->state.hw_state.mg_clktop2_coreclkctl1);
3191 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3192 pll->state.hw_state.mg_clktop2_hsclkctl);
3193 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3194 pll->state.hw_state.mg_pll_div0);
3195 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3196 pll->state.hw_state.mg_pll_div1);
3197 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3198 pll->state.hw_state.mg_pll_lf);
3199 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3200 pll->state.hw_state.mg_pll_frac_lock);
3201 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3202 pll->state.hw_state.mg_pll_ssc);
3203 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3204 pll->state.hw_state.mg_pll_bias);
3205 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3206 pll->state.hw_state.mg_pll_tdc_coldst_bias);
728e29d7
DV
3207 }
3208 drm_modeset_unlock_all(dev);
3209
3210 return 0;
3211}
3212
1ed1ef9d 3213static int i915_wa_registers(struct seq_file *m, void *unused)
888b5995 3214{
452420d2
TU
3215 struct drm_i915_private *i915 = node_to_i915(m->private);
3216 const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3217 struct i915_wa *wa;
3218 unsigned int i;
888b5995 3219
452420d2
TU
3220 seq_printf(m, "Workarounds applied: %u\n", wal->count);
3221 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
548764bb 3222 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
452420d2 3223 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
888b5995
AS
3224
3225 return 0;
3226}
3227
d2d4f39b
KM
3228static int i915_ipc_status_show(struct seq_file *m, void *data)
3229{
3230 struct drm_i915_private *dev_priv = m->private;
3231
3232 seq_printf(m, "Isochronous Priority Control: %s\n",
3233 yesno(dev_priv->ipc_enabled));
3234 return 0;
3235}
3236
3237static int i915_ipc_status_open(struct inode *inode, struct file *file)
3238{
3239 struct drm_i915_private *dev_priv = inode->i_private;
3240
3241 if (!HAS_IPC(dev_priv))
3242 return -ENODEV;
3243
3244 return single_open(file, i915_ipc_status_show, dev_priv);
3245}
3246
3247static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3248 size_t len, loff_t *offp)
3249{
3250 struct seq_file *m = file->private_data;
3251 struct drm_i915_private *dev_priv = m->private;
a037121c 3252 intel_wakeref_t wakeref;
d2d4f39b 3253 bool enable;
d4225a53 3254 int ret;
d2d4f39b
KM
3255
3256 ret = kstrtobool_from_user(ubuf, len, &enable);
3257 if (ret < 0)
3258 return ret;
3259
d4225a53
CW
3260 with_intel_runtime_pm(dev_priv, wakeref) {
3261 if (!dev_priv->ipc_enabled && enable)
3262 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3263 dev_priv->wm.distrust_bios_wm = true;
3264 dev_priv->ipc_enabled = enable;
3265 intel_enable_ipc(dev_priv);
3266 }
d2d4f39b
KM
3267
3268 return len;
3269}
3270
3271static const struct file_operations i915_ipc_status_fops = {
3272 .owner = THIS_MODULE,
3273 .open = i915_ipc_status_open,
3274 .read = seq_read,
3275 .llseek = seq_lseek,
3276 .release = single_release,
3277 .write = i915_ipc_status_write
3278};
3279
c5511e44
DL
3280static int i915_ddb_info(struct seq_file *m, void *unused)
3281{
36cdd013
DW
3282 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3283 struct drm_device *dev = &dev_priv->drm;
c5511e44 3284 struct skl_ddb_entry *entry;
ff43bc37 3285 struct intel_crtc *crtc;
c5511e44 3286
36cdd013 3287 if (INTEL_GEN(dev_priv) < 9)
ab309a6a 3288 return -ENODEV;
2fcffe19 3289
c5511e44
DL
3290 drm_modeset_lock_all(dev);
3291
c5511e44
DL
3292 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3293
ff43bc37
VS
3294 for_each_intel_crtc(&dev_priv->drm, crtc) {
3295 struct intel_crtc_state *crtc_state =
3296 to_intel_crtc_state(crtc->base.state);
3297 enum pipe pipe = crtc->pipe;
3298 enum plane_id plane_id;
3299
c5511e44
DL
3300 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3301
ff43bc37
VS
3302 for_each_plane_id_on_crtc(crtc, plane_id) {
3303 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3304 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
c5511e44
DL
3305 entry->start, entry->end,
3306 skl_ddb_entry_size(entry));
3307 }
3308
ff43bc37 3309 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
c5511e44
DL
3310 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3311 entry->end, skl_ddb_entry_size(entry));
3312 }
3313
3314 drm_modeset_unlock_all(dev);
3315
3316 return 0;
3317}
3318
a54746e3 3319static void drrs_status_per_crtc(struct seq_file *m,
36cdd013
DW
3320 struct drm_device *dev,
3321 struct intel_crtc *intel_crtc)
a54746e3 3322{
fac5e23e 3323 struct drm_i915_private *dev_priv = to_i915(dev);
a54746e3
VK
3324 struct i915_drrs *drrs = &dev_priv->drrs;
3325 int vrefresh = 0;
26875fe5 3326 struct drm_connector *connector;
3f6a5e1e 3327 struct drm_connector_list_iter conn_iter;
a54746e3 3328
3f6a5e1e
DV
3329 drm_connector_list_iter_begin(dev, &conn_iter);
3330 drm_for_each_connector_iter(connector, &conn_iter) {
26875fe5
ML
3331 if (connector->state->crtc != &intel_crtc->base)
3332 continue;
3333
3334 seq_printf(m, "%s:\n", connector->name);
a54746e3 3335 }
3f6a5e1e 3336 drm_connector_list_iter_end(&conn_iter);
a54746e3
VK
3337
3338 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3339 seq_puts(m, "\tVBT: DRRS_type: Static");
3340 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3341 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3342 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3343 seq_puts(m, "\tVBT: DRRS_type: None");
3344 else
3345 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3346
3347 seq_puts(m, "\n\n");
3348
f77076c9 3349 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
a54746e3
VK
3350 struct intel_panel *panel;
3351
3352 mutex_lock(&drrs->mutex);
3353 /* DRRS Supported */
3354 seq_puts(m, "\tDRRS Supported: Yes\n");
3355
3356 /* disable_drrs() will make drrs->dp NULL */
3357 if (!drrs->dp) {
ce6e2137
R
3358 seq_puts(m, "Idleness DRRS: Disabled\n");
3359 if (dev_priv->psr.enabled)
3360 seq_puts(m,
3361 "\tAs PSR is enabled, DRRS is not enabled\n");
a54746e3
VK
3362 mutex_unlock(&drrs->mutex);
3363 return;
3364 }
3365
3366 panel = &drrs->dp->attached_connector->panel;
3367 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3368 drrs->busy_frontbuffer_bits);
3369
3370 seq_puts(m, "\n\t\t");
3371 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3372 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3373 vrefresh = panel->fixed_mode->vrefresh;
3374 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3375 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3376 vrefresh = panel->downclock_mode->vrefresh;
3377 } else {
3378 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3379 drrs->refresh_rate_type);
3380 mutex_unlock(&drrs->mutex);
3381 return;
3382 }
3383 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3384
3385 seq_puts(m, "\n\t\t");
3386 mutex_unlock(&drrs->mutex);
3387 } else {
3388 /* DRRS not supported. Print the VBT parameter*/
3389 seq_puts(m, "\tDRRS Supported : No");
3390 }
3391 seq_puts(m, "\n");
3392}
3393
3394static int i915_drrs_status(struct seq_file *m, void *unused)
3395{
36cdd013
DW
3396 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3397 struct drm_device *dev = &dev_priv->drm;
a54746e3
VK
3398 struct intel_crtc *intel_crtc;
3399 int active_crtc_cnt = 0;
3400
26875fe5 3401 drm_modeset_lock_all(dev);
a54746e3 3402 for_each_intel_crtc(dev, intel_crtc) {
f77076c9 3403 if (intel_crtc->base.state->active) {
a54746e3
VK
3404 active_crtc_cnt++;
3405 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3406
3407 drrs_status_per_crtc(m, dev, intel_crtc);
3408 }
a54746e3 3409 }
26875fe5 3410 drm_modeset_unlock_all(dev);
a54746e3
VK
3411
3412 if (!active_crtc_cnt)
3413 seq_puts(m, "No active crtc found\n");
3414
3415 return 0;
3416}
3417
11bed958
DA
3418static int i915_dp_mst_info(struct seq_file *m, void *unused)
3419{
36cdd013
DW
3420 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3421 struct drm_device *dev = &dev_priv->drm;
11bed958
DA
3422 struct intel_encoder *intel_encoder;
3423 struct intel_digital_port *intel_dig_port;
b6dabe3b 3424 struct drm_connector *connector;
3f6a5e1e 3425 struct drm_connector_list_iter conn_iter;
b6dabe3b 3426
3f6a5e1e
DV
3427 drm_connector_list_iter_begin(dev, &conn_iter);
3428 drm_for_each_connector_iter(connector, &conn_iter) {
b6dabe3b 3429 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
11bed958 3430 continue;
b6dabe3b
ML
3431
3432 intel_encoder = intel_attached_encoder(connector);
3433 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3434 continue;
3435
3436 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
11bed958
DA
3437 if (!intel_dig_port->dp.can_mst)
3438 continue;
b6dabe3b 3439
40ae80cc 3440 seq_printf(m, "MST Source Port %c\n",
8f4f2797 3441 port_name(intel_dig_port->base.port));
11bed958
DA
3442 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3443 }
3f6a5e1e
DV
3444 drm_connector_list_iter_end(&conn_iter);
3445
11bed958
DA
3446 return 0;
3447}
3448
eb3394fa 3449static ssize_t i915_displayport_test_active_write(struct file *file,
36cdd013
DW
3450 const char __user *ubuf,
3451 size_t len, loff_t *offp)
eb3394fa
TP
3452{
3453 char *input_buffer;
3454 int status = 0;
eb3394fa
TP
3455 struct drm_device *dev;
3456 struct drm_connector *connector;
3f6a5e1e 3457 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3458 struct intel_dp *intel_dp;
3459 int val = 0;
3460
9aaffa34 3461 dev = ((struct seq_file *)file->private_data)->private;
eb3394fa 3462
eb3394fa
TP
3463 if (len == 0)
3464 return 0;
3465
261aeba8
GT
3466 input_buffer = memdup_user_nul(ubuf, len);
3467 if (IS_ERR(input_buffer))
3468 return PTR_ERR(input_buffer);
eb3394fa 3469
eb3394fa
TP
3470 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3471
3f6a5e1e
DV
3472 drm_connector_list_iter_begin(dev, &conn_iter);
3473 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3474 struct intel_encoder *encoder;
3475
eb3394fa
TP
3476 if (connector->connector_type !=
3477 DRM_MODE_CONNECTOR_DisplayPort)
3478 continue;
3479
a874b6a3
ML
3480 encoder = to_intel_encoder(connector->encoder);
3481 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3482 continue;
3483
3484 if (encoder && connector->status == connector_status_connected) {
3485 intel_dp = enc_to_intel_dp(&encoder->base);
eb3394fa
TP
3486 status = kstrtoint(input_buffer, 10, &val);
3487 if (status < 0)
3f6a5e1e 3488 break;
eb3394fa
TP
3489 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3490 /* To prevent erroneous activation of the compliance
3491 * testing code, only accept an actual value of 1 here
3492 */
3493 if (val == 1)
c1617abc 3494 intel_dp->compliance.test_active = 1;
eb3394fa 3495 else
c1617abc 3496 intel_dp->compliance.test_active = 0;
eb3394fa
TP
3497 }
3498 }
3f6a5e1e 3499 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3500 kfree(input_buffer);
3501 if (status < 0)
3502 return status;
3503
3504 *offp += len;
3505 return len;
3506}
3507
3508static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3509{
e4006713
AS
3510 struct drm_i915_private *dev_priv = m->private;
3511 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3512 struct drm_connector *connector;
3f6a5e1e 3513 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3514 struct intel_dp *intel_dp;
3515
3f6a5e1e
DV
3516 drm_connector_list_iter_begin(dev, &conn_iter);
3517 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3518 struct intel_encoder *encoder;
3519
eb3394fa
TP
3520 if (connector->connector_type !=
3521 DRM_MODE_CONNECTOR_DisplayPort)
3522 continue;
3523
a874b6a3
ML
3524 encoder = to_intel_encoder(connector->encoder);
3525 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3526 continue;
3527
3528 if (encoder && connector->status == connector_status_connected) {
3529 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3530 if (intel_dp->compliance.test_active)
eb3394fa
TP
3531 seq_puts(m, "1");
3532 else
3533 seq_puts(m, "0");
3534 } else
3535 seq_puts(m, "0");
3536 }
3f6a5e1e 3537 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3538
3539 return 0;
3540}
3541
3542static int i915_displayport_test_active_open(struct inode *inode,
36cdd013 3543 struct file *file)
eb3394fa 3544{
36cdd013 3545 return single_open(file, i915_displayport_test_active_show,
e4006713 3546 inode->i_private);
eb3394fa
TP
3547}
3548
3549static const struct file_operations i915_displayport_test_active_fops = {
3550 .owner = THIS_MODULE,
3551 .open = i915_displayport_test_active_open,
3552 .read = seq_read,
3553 .llseek = seq_lseek,
3554 .release = single_release,
3555 .write = i915_displayport_test_active_write
3556};
3557
3558static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3559{
e4006713
AS
3560 struct drm_i915_private *dev_priv = m->private;
3561 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3562 struct drm_connector *connector;
3f6a5e1e 3563 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3564 struct intel_dp *intel_dp;
3565
3f6a5e1e
DV
3566 drm_connector_list_iter_begin(dev, &conn_iter);
3567 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3568 struct intel_encoder *encoder;
3569
eb3394fa
TP
3570 if (connector->connector_type !=
3571 DRM_MODE_CONNECTOR_DisplayPort)
3572 continue;
3573
a874b6a3
ML
3574 encoder = to_intel_encoder(connector->encoder);
3575 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3576 continue;
3577
3578 if (encoder && connector->status == connector_status_connected) {
3579 intel_dp = enc_to_intel_dp(&encoder->base);
b48a5ba9
MN
3580 if (intel_dp->compliance.test_type ==
3581 DP_TEST_LINK_EDID_READ)
3582 seq_printf(m, "%lx",
3583 intel_dp->compliance.test_data.edid);
611032bf
MN
3584 else if (intel_dp->compliance.test_type ==
3585 DP_TEST_LINK_VIDEO_PATTERN) {
3586 seq_printf(m, "hdisplay: %d\n",
3587 intel_dp->compliance.test_data.hdisplay);
3588 seq_printf(m, "vdisplay: %d\n",
3589 intel_dp->compliance.test_data.vdisplay);
3590 seq_printf(m, "bpc: %u\n",
3591 intel_dp->compliance.test_data.bpc);
3592 }
eb3394fa
TP
3593 } else
3594 seq_puts(m, "0");
3595 }
3f6a5e1e 3596 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3597
3598 return 0;
3599}
e4006713 3600DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
eb3394fa
TP
3601
3602static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3603{
e4006713
AS
3604 struct drm_i915_private *dev_priv = m->private;
3605 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3606 struct drm_connector *connector;
3f6a5e1e 3607 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3608 struct intel_dp *intel_dp;
3609
3f6a5e1e
DV
3610 drm_connector_list_iter_begin(dev, &conn_iter);
3611 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3612 struct intel_encoder *encoder;
3613
eb3394fa
TP
3614 if (connector->connector_type !=
3615 DRM_MODE_CONNECTOR_DisplayPort)
3616 continue;
3617
a874b6a3
ML
3618 encoder = to_intel_encoder(connector->encoder);
3619 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3620 continue;
3621
3622 if (encoder && connector->status == connector_status_connected) {
3623 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3624 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
eb3394fa
TP
3625 } else
3626 seq_puts(m, "0");
3627 }
3f6a5e1e 3628 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3629
3630 return 0;
3631}
e4006713 3632DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
eb3394fa 3633
e5315213 3634static void wm_latency_show(struct seq_file *m, const u16 wm[8])
369a1342 3635{
36cdd013
DW
3636 struct drm_i915_private *dev_priv = m->private;
3637 struct drm_device *dev = &dev_priv->drm;
369a1342 3638 int level;
de38b95c
VS
3639 int num_levels;
3640
36cdd013 3641 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3642 num_levels = 3;
36cdd013 3643 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3644 num_levels = 1;
04548cba
VS
3645 else if (IS_G4X(dev_priv))
3646 num_levels = 3;
de38b95c 3647 else
5db94019 3648 num_levels = ilk_wm_max_level(dev_priv) + 1;
369a1342
VS
3649
3650 drm_modeset_lock_all(dev);
3651
3652 for (level = 0; level < num_levels; level++) {
3653 unsigned int latency = wm[level];
3654
97e94b22
DL
3655 /*
3656 * - WM1+ latency values in 0.5us units
de38b95c 3657 * - latencies are in us on gen9/vlv/chv
97e94b22 3658 */
04548cba
VS
3659 if (INTEL_GEN(dev_priv) >= 9 ||
3660 IS_VALLEYVIEW(dev_priv) ||
3661 IS_CHERRYVIEW(dev_priv) ||
3662 IS_G4X(dev_priv))
97e94b22
DL
3663 latency *= 10;
3664 else if (level > 0)
369a1342
VS
3665 latency *= 5;
3666
3667 seq_printf(m, "WM%d %u (%u.%u usec)\n",
97e94b22 3668 level, wm[level], latency / 10, latency % 10);
369a1342
VS
3669 }
3670
3671 drm_modeset_unlock_all(dev);
3672}
3673
3674static int pri_wm_latency_show(struct seq_file *m, void *data)
3675{
36cdd013 3676 struct drm_i915_private *dev_priv = m->private;
e5315213 3677 const u16 *latencies;
97e94b22 3678
36cdd013 3679 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3680 latencies = dev_priv->wm.skl_latency;
3681 else
36cdd013 3682 latencies = dev_priv->wm.pri_latency;
369a1342 3683
97e94b22 3684 wm_latency_show(m, latencies);
369a1342
VS
3685
3686 return 0;
3687}
3688
3689static int spr_wm_latency_show(struct seq_file *m, void *data)
3690{
36cdd013 3691 struct drm_i915_private *dev_priv = m->private;
e5315213 3692 const u16 *latencies;
97e94b22 3693
36cdd013 3694 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3695 latencies = dev_priv->wm.skl_latency;
3696 else
36cdd013 3697 latencies = dev_priv->wm.spr_latency;
369a1342 3698
97e94b22 3699 wm_latency_show(m, latencies);
369a1342
VS
3700
3701 return 0;
3702}
3703
3704static int cur_wm_latency_show(struct seq_file *m, void *data)
3705{
36cdd013 3706 struct drm_i915_private *dev_priv = m->private;
e5315213 3707 const u16 *latencies;
97e94b22 3708
36cdd013 3709 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3710 latencies = dev_priv->wm.skl_latency;
3711 else
36cdd013 3712 latencies = dev_priv->wm.cur_latency;
369a1342 3713
97e94b22 3714 wm_latency_show(m, latencies);
369a1342
VS
3715
3716 return 0;
3717}
3718
3719static int pri_wm_latency_open(struct inode *inode, struct file *file)
3720{
36cdd013 3721 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3722
04548cba 3723 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
369a1342
VS
3724 return -ENODEV;
3725
36cdd013 3726 return single_open(file, pri_wm_latency_show, dev_priv);
369a1342
VS
3727}
3728
3729static int spr_wm_latency_open(struct inode *inode, struct file *file)
3730{
36cdd013 3731 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3732
36cdd013 3733 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
3734 return -ENODEV;
3735
36cdd013 3736 return single_open(file, spr_wm_latency_show, dev_priv);
369a1342
VS
3737}
3738
3739static int cur_wm_latency_open(struct inode *inode, struct file *file)
3740{
36cdd013 3741 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3742
36cdd013 3743 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
3744 return -ENODEV;
3745
36cdd013 3746 return single_open(file, cur_wm_latency_show, dev_priv);
369a1342
VS
3747}
3748
3749static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
e5315213 3750 size_t len, loff_t *offp, u16 wm[8])
369a1342
VS
3751{
3752 struct seq_file *m = file->private_data;
36cdd013
DW
3753 struct drm_i915_private *dev_priv = m->private;
3754 struct drm_device *dev = &dev_priv->drm;
e5315213 3755 u16 new[8] = { 0 };
de38b95c 3756 int num_levels;
369a1342
VS
3757 int level;
3758 int ret;
3759 char tmp[32];
3760
36cdd013 3761 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3762 num_levels = 3;
36cdd013 3763 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3764 num_levels = 1;
04548cba
VS
3765 else if (IS_G4X(dev_priv))
3766 num_levels = 3;
de38b95c 3767 else
5db94019 3768 num_levels = ilk_wm_max_level(dev_priv) + 1;
de38b95c 3769
369a1342
VS
3770 if (len >= sizeof(tmp))
3771 return -EINVAL;
3772
3773 if (copy_from_user(tmp, ubuf, len))
3774 return -EFAULT;
3775
3776 tmp[len] = '\0';
3777
97e94b22
DL
3778 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3779 &new[0], &new[1], &new[2], &new[3],
3780 &new[4], &new[5], &new[6], &new[7]);
369a1342
VS
3781 if (ret != num_levels)
3782 return -EINVAL;
3783
3784 drm_modeset_lock_all(dev);
3785
3786 for (level = 0; level < num_levels; level++)
3787 wm[level] = new[level];
3788
3789 drm_modeset_unlock_all(dev);
3790
3791 return len;
3792}
3793
3794
3795static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3796 size_t len, loff_t *offp)
3797{
3798 struct seq_file *m = file->private_data;
36cdd013 3799 struct drm_i915_private *dev_priv = m->private;
e5315213 3800 u16 *latencies;
369a1342 3801
36cdd013 3802 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3803 latencies = dev_priv->wm.skl_latency;
3804 else
36cdd013 3805 latencies = dev_priv->wm.pri_latency;
97e94b22
DL
3806
3807 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3808}
3809
3810static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3811 size_t len, loff_t *offp)
3812{
3813 struct seq_file *m = file->private_data;
36cdd013 3814 struct drm_i915_private *dev_priv = m->private;
e5315213 3815 u16 *latencies;
369a1342 3816
36cdd013 3817 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3818 latencies = dev_priv->wm.skl_latency;
3819 else
36cdd013 3820 latencies = dev_priv->wm.spr_latency;
97e94b22
DL
3821
3822 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3823}
3824
3825static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3826 size_t len, loff_t *offp)
3827{
3828 struct seq_file *m = file->private_data;
36cdd013 3829 struct drm_i915_private *dev_priv = m->private;
e5315213 3830 u16 *latencies;
97e94b22 3831
36cdd013 3832 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3833 latencies = dev_priv->wm.skl_latency;
3834 else
36cdd013 3835 latencies = dev_priv->wm.cur_latency;
369a1342 3836
97e94b22 3837 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3838}
3839
3840static const struct file_operations i915_pri_wm_latency_fops = {
3841 .owner = THIS_MODULE,
3842 .open = pri_wm_latency_open,
3843 .read = seq_read,
3844 .llseek = seq_lseek,
3845 .release = single_release,
3846 .write = pri_wm_latency_write
3847};
3848
3849static const struct file_operations i915_spr_wm_latency_fops = {
3850 .owner = THIS_MODULE,
3851 .open = spr_wm_latency_open,
3852 .read = seq_read,
3853 .llseek = seq_lseek,
3854 .release = single_release,
3855 .write = spr_wm_latency_write
3856};
3857
3858static const struct file_operations i915_cur_wm_latency_fops = {
3859 .owner = THIS_MODULE,
3860 .open = cur_wm_latency_open,
3861 .read = seq_read,
3862 .llseek = seq_lseek,
3863 .release = single_release,
3864 .write = cur_wm_latency_write
3865};
3866
647416f9
KC
3867static int
3868i915_wedged_get(void *data, u64 *val)
f3cd474b 3869{
36cdd013 3870 struct drm_i915_private *dev_priv = data;
f3cd474b 3871
d98c52cf 3872 *val = i915_terminally_wedged(&dev_priv->gpu_error);
f3cd474b 3873
647416f9 3874 return 0;
f3cd474b
CW
3875}
3876
647416f9
KC
3877static int
3878i915_wedged_set(void *data, u64 val)
f3cd474b 3879{
598b6b5a 3880 struct drm_i915_private *i915 = data;
d46c0517 3881
b8d24a06
MK
3882 /*
3883 * There is no safeguard against this debugfs entry colliding
3884 * with the hangcheck calling same i915_handle_error() in
3885 * parallel, causing an explosion. For now we assume that the
3886 * test harness is responsible enough not to inject gpu hangs
3887 * while it is writing to 'i915_wedged'
3888 */
3889
598b6b5a 3890 if (i915_reset_backoff(&i915->gpu_error))
b8d24a06
MK
3891 return -EAGAIN;
3892
ce800754
CW
3893 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3894 "Manually set wedged engine mask = %llx", val);
647416f9 3895 return 0;
f3cd474b
CW
3896}
3897
647416f9
KC
3898DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3899 i915_wedged_get, i915_wedged_set,
3a3b4f98 3900 "%llu\n");
f3cd474b 3901
64486ae7
CW
3902static int
3903fault_irq_set(struct drm_i915_private *i915,
3904 unsigned long *irq,
3905 unsigned long val)
3906{
3907 int err;
3908
3909 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
3910 if (err)
3911 return err;
3912
3913 err = i915_gem_wait_for_idle(i915,
3914 I915_WAIT_LOCKED |
ec625fb9
CW
3915 I915_WAIT_INTERRUPTIBLE,
3916 MAX_SCHEDULE_TIMEOUT);
64486ae7
CW
3917 if (err)
3918 goto err_unlock;
3919
64486ae7
CW
3920 *irq = val;
3921 mutex_unlock(&i915->drm.struct_mutex);
3922
3923 /* Flush idle worker to disarm irq */
7c26240e 3924 drain_delayed_work(&i915->gt.idle_work);
64486ae7
CW
3925
3926 return 0;
3927
3928err_unlock:
3929 mutex_unlock(&i915->drm.struct_mutex);
3930 return err;
3931}
3932
094f9a54
CW
3933static int
3934i915_ring_missed_irq_get(void *data, u64 *val)
3935{
36cdd013 3936 struct drm_i915_private *dev_priv = data;
094f9a54
CW
3937
3938 *val = dev_priv->gpu_error.missed_irq_rings;
3939 return 0;
3940}
3941
3942static int
3943i915_ring_missed_irq_set(void *data, u64 val)
3944{
64486ae7 3945 struct drm_i915_private *i915 = data;
094f9a54 3946
64486ae7 3947 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
094f9a54
CW
3948}
3949
3950DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
3951 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
3952 "0x%08llx\n");
3953
3954static int
3955i915_ring_test_irq_get(void *data, u64 *val)
3956{
36cdd013 3957 struct drm_i915_private *dev_priv = data;
094f9a54
CW
3958
3959 *val = dev_priv->gpu_error.test_irq_rings;
3960
3961 return 0;
3962}
3963
3964static int
3965i915_ring_test_irq_set(void *data, u64 val)
3966{
64486ae7 3967 struct drm_i915_private *i915 = data;
094f9a54 3968
5f521722
CW
3969 /* GuC keeps the user interrupt permanently enabled for submission */
3970 if (USES_GUC_SUBMISSION(i915))
3971 return -ENODEV;
3972
3973 /*
3974 * From icl, we can no longer individually mask interrupt generation
3975 * from each engine.
3976 */
3977 if (INTEL_GEN(i915) >= 11)
3978 return -ENODEV;
3979
64486ae7 3980 val &= INTEL_INFO(i915)->ring_mask;
094f9a54 3981 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
094f9a54 3982
64486ae7 3983 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
094f9a54
CW
3984}
3985
3986DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
3987 i915_ring_test_irq_get, i915_ring_test_irq_set,
3988 "0x%08llx\n");
3989
b4a0b32d
CW
3990#define DROP_UNBOUND BIT(0)
3991#define DROP_BOUND BIT(1)
3992#define DROP_RETIRE BIT(2)
3993#define DROP_ACTIVE BIT(3)
3994#define DROP_FREED BIT(4)
3995#define DROP_SHRINK_ALL BIT(5)
3996#define DROP_IDLE BIT(6)
6b048706
CW
3997#define DROP_RESET_ACTIVE BIT(7)
3998#define DROP_RESET_SEQNO BIT(8)
fbbd37b3
CW
3999#define DROP_ALL (DROP_UNBOUND | \
4000 DROP_BOUND | \
4001 DROP_RETIRE | \
4002 DROP_ACTIVE | \
8eadc19b 4003 DROP_FREED | \
b4a0b32d 4004 DROP_SHRINK_ALL |\
6b048706
CW
4005 DROP_IDLE | \
4006 DROP_RESET_ACTIVE | \
4007 DROP_RESET_SEQNO)
647416f9
KC
4008static int
4009i915_drop_caches_get(void *data, u64 *val)
dd624afd 4010{
647416f9 4011 *val = DROP_ALL;
dd624afd 4012
647416f9 4013 return 0;
dd624afd
CW
4014}
4015
647416f9
KC
4016static int
4017i915_drop_caches_set(void *data, u64 val)
dd624afd 4018{
6b048706 4019 struct drm_i915_private *i915 = data;
a037121c 4020 intel_wakeref_t wakeref;
00c26cf9 4021 int ret = 0;
dd624afd 4022
b4a0b32d
CW
4023 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4024 val, val & DROP_ALL);
a037121c 4025 wakeref = intel_runtime_pm_get(i915);
dd624afd 4026
ad4062da
CW
4027 if (val & DROP_RESET_ACTIVE &&
4028 wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
6b048706
CW
4029 i915_gem_set_wedged(i915);
4030
dd624afd
CW
4031 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4032 * on ioctls on -EAGAIN. */
6b048706
CW
4033 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4034 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
dd624afd 4035 if (ret)
198a2a2f 4036 goto out;
dd624afd 4037
00c26cf9 4038 if (val & DROP_ACTIVE)
6b048706 4039 ret = i915_gem_wait_for_idle(i915,
00c26cf9 4040 I915_WAIT_INTERRUPTIBLE |
ec625fb9
CW
4041 I915_WAIT_LOCKED,
4042 MAX_SCHEDULE_TIMEOUT);
00c26cf9
CW
4043
4044 if (val & DROP_RETIRE)
6b048706 4045 i915_retire_requests(i915);
00c26cf9 4046
6b048706
CW
4047 mutex_unlock(&i915->drm.struct_mutex);
4048 }
4049
eb8d0f5a 4050 if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
6b048706 4051 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
dd624afd 4052
d92a8cfc 4053 fs_reclaim_acquire(GFP_KERNEL);
21ab4e74 4054 if (val & DROP_BOUND)
6b048706 4055 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4ad72b7f 4056
21ab4e74 4057 if (val & DROP_UNBOUND)
6b048706 4058 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
dd624afd 4059
8eadc19b 4060 if (val & DROP_SHRINK_ALL)
6b048706 4061 i915_gem_shrink_all(i915);
d92a8cfc 4062 fs_reclaim_release(GFP_KERNEL);
8eadc19b 4063
4dfacb0b
CW
4064 if (val & DROP_IDLE) {
4065 do {
6b048706
CW
4066 if (READ_ONCE(i915->gt.active_requests))
4067 flush_delayed_work(&i915->gt.retire_work);
4068 drain_delayed_work(&i915->gt.idle_work);
4069 } while (READ_ONCE(i915->gt.awake));
4dfacb0b 4070 }
b4a0b32d 4071
c9c70471 4072 if (val & DROP_FREED)
6b048706 4073 i915_gem_drain_freed_objects(i915);
fbbd37b3 4074
198a2a2f 4075out:
a037121c 4076 intel_runtime_pm_put(i915, wakeref);
9d3eb2c3 4077
647416f9 4078 return ret;
dd624afd
CW
4079}
4080
647416f9
KC
4081DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4082 i915_drop_caches_get, i915_drop_caches_set,
4083 "0x%08llx\n");
dd624afd 4084
647416f9
KC
4085static int
4086i915_cache_sharing_get(void *data, u64 *val)
07b7ddd9 4087{
36cdd013 4088 struct drm_i915_private *dev_priv = data;
a037121c 4089 intel_wakeref_t wakeref;
d4225a53 4090 u32 snpcr = 0;
07b7ddd9 4091
f3ce44a0 4092 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
004777cb
DV
4093 return -ENODEV;
4094
d4225a53
CW
4095 with_intel_runtime_pm(dev_priv, wakeref)
4096 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
07b7ddd9 4097
647416f9 4098 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
07b7ddd9 4099
647416f9 4100 return 0;
07b7ddd9
JB
4101}
4102
647416f9
KC
4103static int
4104i915_cache_sharing_set(void *data, u64 val)
07b7ddd9 4105{
36cdd013 4106 struct drm_i915_private *dev_priv = data;
a037121c 4107 intel_wakeref_t wakeref;
07b7ddd9 4108
f3ce44a0 4109 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
004777cb
DV
4110 return -ENODEV;
4111
647416f9 4112 if (val > 3)
07b7ddd9
JB
4113 return -EINVAL;
4114
647416f9 4115 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
d4225a53
CW
4116 with_intel_runtime_pm(dev_priv, wakeref) {
4117 u32 snpcr;
4118
4119 /* Update the cache sharing policy here as well */
4120 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4121 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4122 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
4123 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4124 }
07b7ddd9 4125
647416f9 4126 return 0;
07b7ddd9
JB
4127}
4128
647416f9
KC
4129DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4130 i915_cache_sharing_get, i915_cache_sharing_set,
4131 "%llu\n");
07b7ddd9 4132
36cdd013 4133static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4134 struct sseu_dev_info *sseu)
5d39525a 4135{
7aa0b14e
CW
4136#define SS_MAX 2
4137 const int ss_max = SS_MAX;
4138 u32 sig1[SS_MAX], sig2[SS_MAX];
5d39525a 4139 int ss;
5d39525a
JM
4140
4141 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4142 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4143 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4144 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4145
4146 for (ss = 0; ss < ss_max; ss++) {
4147 unsigned int eu_cnt;
4148
4149 if (sig1[ss] & CHV_SS_PG_ENABLE)
4150 /* skip disabled subslice */
4151 continue;
4152
f08a0c92 4153 sseu->slice_mask = BIT(0);
8cc76693 4154 sseu->subslice_mask[0] |= BIT(ss);
5d39525a
JM
4155 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4156 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4157 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4158 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
915490d5
ID
4159 sseu->eu_total += eu_cnt;
4160 sseu->eu_per_subslice = max_t(unsigned int,
4161 sseu->eu_per_subslice, eu_cnt);
5d39525a 4162 }
7aa0b14e 4163#undef SS_MAX
5d39525a
JM
4164}
4165
f8c3dcf9
RV
4166static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4167 struct sseu_dev_info *sseu)
4168{
c7fb3c6c 4169#define SS_MAX 6
0258404f 4170 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
c7fb3c6c 4171 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
f8c3dcf9 4172 int s, ss;
f8c3dcf9 4173
b3e7f866 4174 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4175 /*
4176 * FIXME: Valid SS Mask respects the spec and read
3c64ea8c 4177 * only valid bits for those registers, excluding reserved
f8c3dcf9
RV
4178 * although this seems wrong because it would leave many
4179 * subslices without ACK.
4180 */
4181 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4182 GEN10_PGCTL_VALID_SS_MASK(s);
4183 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4184 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4185 }
4186
4187 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4188 GEN9_PGCTL_SSA_EU19_ACK |
4189 GEN9_PGCTL_SSA_EU210_ACK |
4190 GEN9_PGCTL_SSA_EU311_ACK;
4191 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4192 GEN9_PGCTL_SSB_EU19_ACK |
4193 GEN9_PGCTL_SSB_EU210_ACK |
4194 GEN9_PGCTL_SSB_EU311_ACK;
4195
b3e7f866 4196 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4197 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4198 /* skip disabled slice */
4199 continue;
4200
4201 sseu->slice_mask |= BIT(s);
8cc76693 4202 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
f8c3dcf9 4203
b3e7f866 4204 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
f8c3dcf9
RV
4205 unsigned int eu_cnt;
4206
4207 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4208 /* skip disabled subslice */
4209 continue;
4210
4211 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4212 eu_mask[ss % 2]);
4213 sseu->eu_total += eu_cnt;
4214 sseu->eu_per_subslice = max_t(unsigned int,
4215 sseu->eu_per_subslice,
4216 eu_cnt);
4217 }
4218 }
c7fb3c6c 4219#undef SS_MAX
f8c3dcf9
RV
4220}
4221
36cdd013 4222static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4223 struct sseu_dev_info *sseu)
5d39525a 4224{
c7fb3c6c 4225#define SS_MAX 3
0258404f 4226 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
c7fb3c6c 4227 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
5d39525a 4228 int s, ss;
1c046bc1 4229
b3e7f866 4230 for (s = 0; s < info->sseu.max_slices; s++) {
1c046bc1
JM
4231 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4232 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4233 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4234 }
4235
5d39525a
JM
4236 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4237 GEN9_PGCTL_SSA_EU19_ACK |
4238 GEN9_PGCTL_SSA_EU210_ACK |
4239 GEN9_PGCTL_SSA_EU311_ACK;
4240 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4241 GEN9_PGCTL_SSB_EU19_ACK |
4242 GEN9_PGCTL_SSB_EU210_ACK |
4243 GEN9_PGCTL_SSB_EU311_ACK;
4244
b3e7f866 4245 for (s = 0; s < info->sseu.max_slices; s++) {
5d39525a
JM
4246 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4247 /* skip disabled slice */
4248 continue;
4249
f08a0c92 4250 sseu->slice_mask |= BIT(s);
1c046bc1 4251
f8c3dcf9 4252 if (IS_GEN9_BC(dev_priv))
8cc76693 4253 sseu->subslice_mask[s] =
0258404f 4254 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
1c046bc1 4255
b3e7f866 4256 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
5d39525a
JM
4257 unsigned int eu_cnt;
4258
cc3f90f0 4259 if (IS_GEN9_LP(dev_priv)) {
57ec171e
ID
4260 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4261 /* skip disabled subslice */
4262 continue;
1c046bc1 4263
8cc76693 4264 sseu->subslice_mask[s] |= BIT(ss);
57ec171e 4265 }
1c046bc1 4266
5d39525a
JM
4267 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4268 eu_mask[ss%2]);
915490d5
ID
4269 sseu->eu_total += eu_cnt;
4270 sseu->eu_per_subslice = max_t(unsigned int,
4271 sseu->eu_per_subslice,
4272 eu_cnt);
5d39525a
JM
4273 }
4274 }
c7fb3c6c 4275#undef SS_MAX
5d39525a
JM
4276}
4277
36cdd013 4278static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4279 struct sseu_dev_info *sseu)
91bedd34 4280{
91bedd34 4281 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
36cdd013 4282 int s;
91bedd34 4283
f08a0c92 4284 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
91bedd34 4285
f08a0c92 4286 if (sseu->slice_mask) {
43b67998 4287 sseu->eu_per_subslice =
0258404f 4288 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
8cc76693
LL
4289 for (s = 0; s < fls(sseu->slice_mask); s++) {
4290 sseu->subslice_mask[s] =
0258404f 4291 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
8cc76693 4292 }
57ec171e
ID
4293 sseu->eu_total = sseu->eu_per_subslice *
4294 sseu_subslice_total(sseu);
91bedd34
ŁD
4295
4296 /* subtract fused off EU(s) from enabled slice(s) */
795b38b3 4297 for (s = 0; s < fls(sseu->slice_mask); s++) {
43b67998 4298 u8 subslice_7eu =
0258404f 4299 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
91bedd34 4300
915490d5 4301 sseu->eu_total -= hweight8(subslice_7eu);
91bedd34
ŁD
4302 }
4303 }
4304}
4305
615d8908
ID
4306static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4307 const struct sseu_dev_info *sseu)
4308{
4309 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4310 const char *type = is_available_info ? "Available" : "Enabled";
8cc76693 4311 int s;
615d8908 4312
c67ba538
ID
4313 seq_printf(m, " %s Slice Mask: %04x\n", type,
4314 sseu->slice_mask);
615d8908 4315 seq_printf(m, " %s Slice Total: %u\n", type,
f08a0c92 4316 hweight8(sseu->slice_mask));
615d8908 4317 seq_printf(m, " %s Subslice Total: %u\n", type,
57ec171e 4318 sseu_subslice_total(sseu));
8cc76693
LL
4319 for (s = 0; s < fls(sseu->slice_mask); s++) {
4320 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4321 s, hweight8(sseu->subslice_mask[s]));
4322 }
615d8908
ID
4323 seq_printf(m, " %s EU Total: %u\n", type,
4324 sseu->eu_total);
4325 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4326 sseu->eu_per_subslice);
4327
4328 if (!is_available_info)
4329 return;
4330
4331 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4332 if (HAS_POOLED_EU(dev_priv))
4333 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4334
4335 seq_printf(m, " Has Slice Power Gating: %s\n",
4336 yesno(sseu->has_slice_pg));
4337 seq_printf(m, " Has Subslice Power Gating: %s\n",
4338 yesno(sseu->has_subslice_pg));
4339 seq_printf(m, " Has EU Power Gating: %s\n",
4340 yesno(sseu->has_eu_pg));
4341}
4342
3873218f
JM
4343static int i915_sseu_status(struct seq_file *m, void *unused)
4344{
36cdd013 4345 struct drm_i915_private *dev_priv = node_to_i915(m->private);
915490d5 4346 struct sseu_dev_info sseu;
a037121c 4347 intel_wakeref_t wakeref;
3873218f 4348
36cdd013 4349 if (INTEL_GEN(dev_priv) < 8)
3873218f
JM
4350 return -ENODEV;
4351
4352 seq_puts(m, "SSEU Device Info\n");
0258404f 4353 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
3873218f 4354
7f992aba 4355 seq_puts(m, "SSEU Device Status\n");
915490d5 4356 memset(&sseu, 0, sizeof(sseu));
0258404f
JN
4357 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4358 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
8cc76693 4359 sseu.max_eus_per_subslice =
0258404f 4360 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
238010ed 4361
d4225a53
CW
4362 with_intel_runtime_pm(dev_priv, wakeref) {
4363 if (IS_CHERRYVIEW(dev_priv))
4364 cherryview_sseu_device_status(dev_priv, &sseu);
4365 else if (IS_BROADWELL(dev_priv))
4366 broadwell_sseu_device_status(dev_priv, &sseu);
4367 else if (IS_GEN(dev_priv, 9))
4368 gen9_sseu_device_status(dev_priv, &sseu);
4369 else if (INTEL_GEN(dev_priv) >= 10)
4370 gen10_sseu_device_status(dev_priv, &sseu);
7f992aba 4371 }
238010ed 4372
615d8908 4373 i915_print_sseu_info(m, false, &sseu);
7f992aba 4374
3873218f
JM
4375 return 0;
4376}
4377
6d794d42
BW
4378static int i915_forcewake_open(struct inode *inode, struct file *file)
4379{
d7a133d8 4380 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4381
d7a133d8 4382 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4383 return 0;
4384
6ddbb12e 4385 file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
d7a133d8 4386 intel_uncore_forcewake_user_get(i915);
6d794d42
BW
4387
4388 return 0;
4389}
4390
c43b5634 4391static int i915_forcewake_release(struct inode *inode, struct file *file)
6d794d42 4392{
d7a133d8 4393 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4394
d7a133d8 4395 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4396 return 0;
4397
d7a133d8 4398 intel_uncore_forcewake_user_put(i915);
6ddbb12e
TU
4399 intel_runtime_pm_put(i915,
4400 (intel_wakeref_t)(uintptr_t)file->private_data);
6d794d42
BW
4401
4402 return 0;
4403}
4404
4405static const struct file_operations i915_forcewake_fops = {
4406 .owner = THIS_MODULE,
4407 .open = i915_forcewake_open,
4408 .release = i915_forcewake_release,
4409};
4410
317eaa95
L
4411static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4412{
4413 struct drm_i915_private *dev_priv = m->private;
4414 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4415
6fc5d789
LP
4416 /* Synchronize with everything first in case there's been an HPD
4417 * storm, but we haven't finished handling it in the kernel yet
4418 */
4419 synchronize_irq(dev_priv->drm.irq);
4420 flush_work(&dev_priv->hotplug.dig_port_work);
4421 flush_work(&dev_priv->hotplug.hotplug_work);
4422
317eaa95
L
4423 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4424 seq_printf(m, "Detected: %s\n",
4425 yesno(delayed_work_pending(&hotplug->reenable_work)));
4426
4427 return 0;
4428}
4429
4430static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4431 const char __user *ubuf, size_t len,
4432 loff_t *offp)
4433{
4434 struct seq_file *m = file->private_data;
4435 struct drm_i915_private *dev_priv = m->private;
4436 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4437 unsigned int new_threshold;
4438 int i;
4439 char *newline;
4440 char tmp[16];
4441
4442 if (len >= sizeof(tmp))
4443 return -EINVAL;
4444
4445 if (copy_from_user(tmp, ubuf, len))
4446 return -EFAULT;
4447
4448 tmp[len] = '\0';
4449
4450 /* Strip newline, if any */
4451 newline = strchr(tmp, '\n');
4452 if (newline)
4453 *newline = '\0';
4454
4455 if (strcmp(tmp, "reset") == 0)
4456 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4457 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4458 return -EINVAL;
4459
4460 if (new_threshold > 0)
4461 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4462 new_threshold);
4463 else
4464 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4465
4466 spin_lock_irq(&dev_priv->irq_lock);
4467 hotplug->hpd_storm_threshold = new_threshold;
4468 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4469 for_each_hpd_pin(i)
4470 hotplug->stats[i].count = 0;
4471 spin_unlock_irq(&dev_priv->irq_lock);
4472
4473 /* Re-enable hpd immediately if we were in an irq storm */
4474 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4475
4476 return len;
4477}
4478
4479static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4480{
4481 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4482}
4483
4484static const struct file_operations i915_hpd_storm_ctl_fops = {
4485 .owner = THIS_MODULE,
4486 .open = i915_hpd_storm_ctl_open,
4487 .read = seq_read,
4488 .llseek = seq_lseek,
4489 .release = single_release,
4490 .write = i915_hpd_storm_ctl_write
4491};
4492
9a64c650
LP
4493static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4494{
4495 struct drm_i915_private *dev_priv = m->private;
4496
4497 seq_printf(m, "Enabled: %s\n",
4498 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4499
4500 return 0;
4501}
4502
4503static int
4504i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4505{
4506 return single_open(file, i915_hpd_short_storm_ctl_show,
4507 inode->i_private);
4508}
4509
4510static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4511 const char __user *ubuf,
4512 size_t len, loff_t *offp)
4513{
4514 struct seq_file *m = file->private_data;
4515 struct drm_i915_private *dev_priv = m->private;
4516 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4517 char *newline;
4518 char tmp[16];
4519 int i;
4520 bool new_state;
4521
4522 if (len >= sizeof(tmp))
4523 return -EINVAL;
4524
4525 if (copy_from_user(tmp, ubuf, len))
4526 return -EFAULT;
4527
4528 tmp[len] = '\0';
4529
4530 /* Strip newline, if any */
4531 newline = strchr(tmp, '\n');
4532 if (newline)
4533 *newline = '\0';
4534
4535 /* Reset to the "default" state for this system */
4536 if (strcmp(tmp, "reset") == 0)
4537 new_state = !HAS_DP_MST(dev_priv);
4538 else if (kstrtobool(tmp, &new_state) != 0)
4539 return -EINVAL;
4540
4541 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4542 new_state ? "En" : "Dis");
4543
4544 spin_lock_irq(&dev_priv->irq_lock);
4545 hotplug->hpd_short_storm_enabled = new_state;
4546 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4547 for_each_hpd_pin(i)
4548 hotplug->stats[i].count = 0;
4549 spin_unlock_irq(&dev_priv->irq_lock);
4550
4551 /* Re-enable hpd immediately if we were in an irq storm */
4552 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4553
4554 return len;
4555}
4556
4557static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4558 .owner = THIS_MODULE,
4559 .open = i915_hpd_short_storm_ctl_open,
4560 .read = seq_read,
4561 .llseek = seq_lseek,
4562 .release = single_release,
4563 .write = i915_hpd_short_storm_ctl_write,
4564};
4565
35954e88
R
4566static int i915_drrs_ctl_set(void *data, u64 val)
4567{
4568 struct drm_i915_private *dev_priv = data;
4569 struct drm_device *dev = &dev_priv->drm;
138bdac8 4570 struct intel_crtc *crtc;
35954e88
R
4571
4572 if (INTEL_GEN(dev_priv) < 7)
4573 return -ENODEV;
4574
138bdac8
ML
4575 for_each_intel_crtc(dev, crtc) {
4576 struct drm_connector_list_iter conn_iter;
4577 struct intel_crtc_state *crtc_state;
4578 struct drm_connector *connector;
4579 struct drm_crtc_commit *commit;
4580 int ret;
4581
4582 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4583 if (ret)
4584 return ret;
4585
4586 crtc_state = to_intel_crtc_state(crtc->base.state);
4587
4588 if (!crtc_state->base.active ||
4589 !crtc_state->has_drrs)
4590 goto out;
35954e88 4591
138bdac8
ML
4592 commit = crtc_state->base.commit;
4593 if (commit) {
4594 ret = wait_for_completion_interruptible(&commit->hw_done);
4595 if (ret)
4596 goto out;
4597 }
4598
4599 drm_connector_list_iter_begin(dev, &conn_iter);
4600 drm_for_each_connector_iter(connector, &conn_iter) {
4601 struct intel_encoder *encoder;
4602 struct intel_dp *intel_dp;
4603
4604 if (!(crtc_state->base.connector_mask &
4605 drm_connector_mask(connector)))
4606 continue;
4607
4608 encoder = intel_attached_encoder(connector);
35954e88
R
4609 if (encoder->type != INTEL_OUTPUT_EDP)
4610 continue;
4611
4612 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4613 val ? "en" : "dis", val);
4614
4615 intel_dp = enc_to_intel_dp(&encoder->base);
4616 if (val)
4617 intel_edp_drrs_enable(intel_dp,
138bdac8 4618 crtc_state);
35954e88
R
4619 else
4620 intel_edp_drrs_disable(intel_dp,
138bdac8 4621 crtc_state);
35954e88 4622 }
138bdac8
ML
4623 drm_connector_list_iter_end(&conn_iter);
4624
4625out:
4626 drm_modeset_unlock(&crtc->base.mutex);
4627 if (ret)
4628 return ret;
35954e88 4629 }
35954e88
R
4630
4631 return 0;
4632}
4633
4634DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4635
d52ad9cb
ML
4636static ssize_t
4637i915_fifo_underrun_reset_write(struct file *filp,
4638 const char __user *ubuf,
4639 size_t cnt, loff_t *ppos)
4640{
4641 struct drm_i915_private *dev_priv = filp->private_data;
4642 struct intel_crtc *intel_crtc;
4643 struct drm_device *dev = &dev_priv->drm;
4644 int ret;
4645 bool reset;
4646
4647 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4648 if (ret)
4649 return ret;
4650
4651 if (!reset)
4652 return cnt;
4653
4654 for_each_intel_crtc(dev, intel_crtc) {
4655 struct drm_crtc_commit *commit;
4656 struct intel_crtc_state *crtc_state;
4657
4658 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4659 if (ret)
4660 return ret;
4661
4662 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4663 commit = crtc_state->base.commit;
4664 if (commit) {
4665 ret = wait_for_completion_interruptible(&commit->hw_done);
4666 if (!ret)
4667 ret = wait_for_completion_interruptible(&commit->flip_done);
4668 }
4669
4670 if (!ret && crtc_state->base.active) {
4671 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4672 pipe_name(intel_crtc->pipe));
4673
4674 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4675 }
4676
4677 drm_modeset_unlock(&intel_crtc->base.mutex);
4678
4679 if (ret)
4680 return ret;
4681 }
4682
4683 ret = intel_fbc_reset_underrun(dev_priv);
4684 if (ret)
4685 return ret;
4686
4687 return cnt;
4688}
4689
4690static const struct file_operations i915_fifo_underrun_reset_ops = {
4691 .owner = THIS_MODULE,
4692 .open = simple_open,
4693 .write = i915_fifo_underrun_reset_write,
4694 .llseek = default_llseek,
4695};
4696
06c5bf8c 4697static const struct drm_info_list i915_debugfs_list[] = {
311bd68e 4698 {"i915_capabilities", i915_capabilities, 0},
73aa808f 4699 {"i915_gem_objects", i915_gem_object_info, 0},
08c18323 4700 {"i915_gem_gtt", i915_gem_gtt_info, 0},
6d2b8885 4701 {"i915_gem_stolen", i915_gem_stolen_list_info },
a6172a80 4702 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2017263e 4703 {"i915_gem_interrupt", i915_interrupt_info, 0},
493018dc 4704 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
8b417c26 4705 {"i915_guc_info", i915_guc_info, 0},
fdf5d357 4706 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4c7e77fc 4707 {"i915_guc_log_dump", i915_guc_log_dump, 0},
ac58d2ab 4708 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
a8b9370f 4709 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
0509ead1 4710 {"i915_huc_load_status", i915_huc_load_status_info, 0},
adb4bd12 4711 {"i915_frequency_info", i915_frequency_info, 0},
f654449a 4712 {"i915_hangcheck_info", i915_hangcheck_info, 0},
061d06a2 4713 {"i915_reset_info", i915_reset_info, 0},
f97108d1 4714 {"i915_drpc_info", i915_drpc_info, 0},
7648fa99 4715 {"i915_emon_status", i915_emon_status, 0},
23b2f8bb 4716 {"i915_ring_freq_table", i915_ring_freq_table, 0},
9a851789 4717 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
b5e50c3f 4718 {"i915_fbc_status", i915_fbc_status, 0},
92d44621 4719 {"i915_ips_status", i915_ips_status, 0},
4a9bef37 4720 {"i915_sr_status", i915_sr_status, 0},
44834a67 4721 {"i915_opregion", i915_opregion, 0},
ada8f955 4722 {"i915_vbt", i915_vbt, 0},
37811fcc 4723 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
e76d3630 4724 {"i915_context_status", i915_context_status, 0},
f65367b5 4725 {"i915_forcewake_domains", i915_forcewake_domains, 0},
ea16a3cd 4726 {"i915_swizzle_info", i915_swizzle_info, 0},
63573eb7 4727 {"i915_llc", i915_llc, 0},
e91fd8c6 4728 {"i915_edp_psr_status", i915_edp_psr_status, 0},
ec013e7f 4729 {"i915_energy_uJ", i915_energy_uJ, 0},
6455c870 4730 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
1da51581 4731 {"i915_power_domain_info", i915_power_domain_info, 0},
b7cec66d 4732 {"i915_dmc_info", i915_dmc_info, 0},
53f5e3ca 4733 {"i915_display_info", i915_display_info, 0},
1b36595f 4734 {"i915_engine_info", i915_engine_info, 0},
79e9cd5f 4735 {"i915_rcs_topology", i915_rcs_topology, 0},
c5418a8b 4736 {"i915_shrinker_info", i915_shrinker_info, 0},
728e29d7 4737 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
11bed958 4738 {"i915_dp_mst_info", i915_dp_mst_info, 0},
1ed1ef9d 4739 {"i915_wa_registers", i915_wa_registers, 0},
c5511e44 4740 {"i915_ddb_info", i915_ddb_info, 0},
3873218f 4741 {"i915_sseu_status", i915_sseu_status, 0},
a54746e3 4742 {"i915_drrs_status", i915_drrs_status, 0},
1854d5ca 4743 {"i915_rps_boost_info", i915_rps_boost_info, 0},
2017263e 4744};
27c202ad 4745#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2017263e 4746
06c5bf8c 4747static const struct i915_debugfs_files {
34b9674c
DV
4748 const char *name;
4749 const struct file_operations *fops;
4750} i915_debugfs_files[] = {
4751 {"i915_wedged", &i915_wedged_fops},
34b9674c 4752 {"i915_cache_sharing", &i915_cache_sharing_fops},
094f9a54
CW
4753 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4754 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
34b9674c 4755 {"i915_gem_drop_caches", &i915_drop_caches_fops},
98a2f411 4756#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
34b9674c 4757 {"i915_error_state", &i915_error_state_fops},
5a4c6f1b 4758 {"i915_gpu_info", &i915_gpu_info_fops},
98a2f411 4759#endif
d52ad9cb 4760 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
369a1342
VS
4761 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4762 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4763 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4127dc43 4764 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
eb3394fa
TP
4765 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4766 {"i915_dp_test_type", &i915_displayport_test_type_fops},
685534ef 4767 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4977a287
MW
4768 {"i915_guc_log_level", &i915_guc_log_level_fops},
4769 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
d2d4f39b 4770 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
9a64c650 4771 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
35954e88 4772 {"i915_ipc_status", &i915_ipc_status_fops},
54fd3149
DP
4773 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4774 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
34b9674c
DV
4775};
4776
1dac891c 4777int i915_debugfs_register(struct drm_i915_private *dev_priv)
2017263e 4778{
91c8a326 4779 struct drm_minor *minor = dev_priv->drm.primary;
b05eeb0f 4780 struct dentry *ent;
6cc42152 4781 int i;
f3cd474b 4782
b05eeb0f
NT
4783 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4784 minor->debugfs_root, to_i915(minor->dev),
4785 &i915_forcewake_fops);
4786 if (!ent)
4787 return -ENOMEM;
6a9c308d 4788
34b9674c 4789 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
b05eeb0f
NT
4790 ent = debugfs_create_file(i915_debugfs_files[i].name,
4791 S_IRUGO | S_IWUSR,
4792 minor->debugfs_root,
4793 to_i915(minor->dev),
34b9674c 4794 i915_debugfs_files[i].fops);
b05eeb0f
NT
4795 if (!ent)
4796 return -ENOMEM;
34b9674c 4797 }
40633219 4798
27c202ad
BG
4799 return drm_debugfs_create_files(i915_debugfs_list,
4800 I915_DEBUGFS_ENTRIES,
2017263e
BG
4801 minor->debugfs_root, minor);
4802}
4803
aa7471d2
JN
4804struct dpcd_block {
4805 /* DPCD dump start address. */
4806 unsigned int offset;
4807 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4808 unsigned int end;
4809 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4810 size_t size;
4811 /* Only valid for eDP. */
4812 bool edp;
4813};
4814
4815static const struct dpcd_block i915_dpcd_debug[] = {
4816 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4817 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4818 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4819 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4820 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4821 { .offset = DP_SET_POWER },
4822 { .offset = DP_EDP_DPCD_REV },
4823 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4824 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4825 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4826};
4827
4828static int i915_dpcd_show(struct seq_file *m, void *data)
4829{
4830 struct drm_connector *connector = m->private;
4831 struct intel_dp *intel_dp =
4832 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
e5315213 4833 u8 buf[16];
aa7471d2
JN
4834 ssize_t err;
4835 int i;
4836
5c1a8875
MK
4837 if (connector->status != connector_status_connected)
4838 return -ENODEV;
4839
aa7471d2
JN
4840 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4841 const struct dpcd_block *b = &i915_dpcd_debug[i];
4842 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4843
4844 if (b->edp &&
4845 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4846 continue;
4847
4848 /* low tech for now */
4849 if (WARN_ON(size > sizeof(buf)))
4850 continue;
4851
4852 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
65404c89
CW
4853 if (err < 0)
4854 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4855 else
4856 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
b3f9d7d7 4857 }
aa7471d2
JN
4858
4859 return 0;
4860}
e4006713 4861DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
aa7471d2 4862
ecbd6781
DW
4863static int i915_panel_show(struct seq_file *m, void *data)
4864{
4865 struct drm_connector *connector = m->private;
4866 struct intel_dp *intel_dp =
4867 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4868
4869 if (connector->status != connector_status_connected)
4870 return -ENODEV;
4871
4872 seq_printf(m, "Panel power up delay: %d\n",
4873 intel_dp->panel_power_up_delay);
4874 seq_printf(m, "Panel power down delay: %d\n",
4875 intel_dp->panel_power_down_delay);
4876 seq_printf(m, "Backlight on delay: %d\n",
4877 intel_dp->backlight_on_delay);
4878 seq_printf(m, "Backlight off delay: %d\n",
4879 intel_dp->backlight_off_delay);
4880
4881 return 0;
4882}
e4006713 4883DEFINE_SHOW_ATTRIBUTE(i915_panel);
ecbd6781 4884
bdc93fe0
R
4885static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4886{
4887 struct drm_connector *connector = m->private;
4888 struct intel_connector *intel_connector = to_intel_connector(connector);
4889
4890 if (connector->status != connector_status_connected)
4891 return -ENODEV;
4892
4893 /* HDCP is supported by connector */
d3dacc70 4894 if (!intel_connector->hdcp.shim)
bdc93fe0
R
4895 return -EINVAL;
4896
4897 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4898 connector->base.id);
4899 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4900 "None" : "HDCP1.4");
4901 seq_puts(m, "\n");
4902
4903 return 0;
4904}
4905DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4906
e845f099
MN
4907static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4908{
4909 struct drm_connector *connector = m->private;
4910 struct drm_device *dev = connector->dev;
4911 struct drm_crtc *crtc;
4912 struct intel_dp *intel_dp;
4913 struct drm_modeset_acquire_ctx ctx;
4914 struct intel_crtc_state *crtc_state = NULL;
4915 int ret = 0;
4916 bool try_again = false;
4917
4918 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4919
4920 do {
6afe8925 4921 try_again = false;
e845f099
MN
4922 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4923 &ctx);
4924 if (ret) {
4925 ret = -EINTR;
4926 break;
4927 }
4928 crtc = connector->state->crtc;
4929 if (connector->status != connector_status_connected || !crtc) {
4930 ret = -ENODEV;
4931 break;
4932 }
4933 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4934 if (ret == -EDEADLK) {
4935 ret = drm_modeset_backoff(&ctx);
4936 if (!ret) {
4937 try_again = true;
4938 continue;
4939 }
4940 break;
4941 } else if (ret) {
4942 break;
4943 }
4944 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4945 crtc_state = to_intel_crtc_state(crtc->state);
4946 seq_printf(m, "DSC_Enabled: %s\n",
4947 yesno(crtc_state->dsc_params.compression_enable));
fed85691
RS
4948 seq_printf(m, "DSC_Sink_Support: %s\n",
4949 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
e845f099
MN
4950 if (!intel_dp_is_edp(intel_dp))
4951 seq_printf(m, "FEC_Sink_Support: %s\n",
4952 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4953 } while (try_again);
4954
4955 drm_modeset_drop_locks(&ctx);
4956 drm_modeset_acquire_fini(&ctx);
4957
4958 return ret;
4959}
4960
4961static ssize_t i915_dsc_fec_support_write(struct file *file,
4962 const char __user *ubuf,
4963 size_t len, loff_t *offp)
4964{
4965 bool dsc_enable = false;
4966 int ret;
4967 struct drm_connector *connector =
4968 ((struct seq_file *)file->private_data)->private;
4969 struct intel_encoder *encoder = intel_attached_encoder(connector);
4970 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4971
4972 if (len == 0)
4973 return 0;
4974
4975 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4976 len);
4977
4978 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4979 if (ret < 0)
4980 return ret;
4981
4982 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4983 (dsc_enable) ? "true" : "false");
4984 intel_dp->force_dsc_en = dsc_enable;
4985
4986 *offp += len;
4987 return len;
4988}
4989
4990static int i915_dsc_fec_support_open(struct inode *inode,
4991 struct file *file)
4992{
4993 return single_open(file, i915_dsc_fec_support_show,
4994 inode->i_private);
4995}
4996
4997static const struct file_operations i915_dsc_fec_support_fops = {
4998 .owner = THIS_MODULE,
4999 .open = i915_dsc_fec_support_open,
5000 .read = seq_read,
5001 .llseek = seq_lseek,
5002 .release = single_release,
5003 .write = i915_dsc_fec_support_write
5004};
5005
aa7471d2
JN
5006/**
5007 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5008 * @connector: pointer to a registered drm_connector
5009 *
5010 * Cleanup will be done by drm_connector_unregister() through a call to
5011 * drm_debugfs_connector_remove().
5012 *
5013 * Returns 0 on success, negative error codes on error.
5014 */
5015int i915_debugfs_connector_add(struct drm_connector *connector)
5016{
5017 struct dentry *root = connector->debugfs_entry;
e845f099 5018 struct drm_i915_private *dev_priv = to_i915(connector->dev);
aa7471d2
JN
5019
5020 /* The connector must have been registered beforehands. */
5021 if (!root)
5022 return -ENODEV;
5023
5024 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5025 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
ecbd6781
DW
5026 debugfs_create_file("i915_dpcd", S_IRUGO, root,
5027 connector, &i915_dpcd_fops);
5028
5b7b3086 5029 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
ecbd6781
DW
5030 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5031 connector, &i915_panel_fops);
5b7b3086
DP
5032 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
5033 connector, &i915_psr_sink_status_fops);
5034 }
aa7471d2 5035
bdc93fe0
R
5036 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5037 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5038 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5039 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5040 connector, &i915_hdcp_sink_capability_fops);
5041 }
5042
e845f099
MN
5043 if (INTEL_GEN(dev_priv) >= 10 &&
5044 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5045 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
5046 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
5047 connector, &i915_dsc_fec_support_fops);
5048
aa7471d2
JN
5049 return 0;
5050}