drm/i915: Remove intel_context.active_link
[linux-block.git] / drivers / gpu / drm / i915 / i915_debugfs.c
CommitLineData
2017263e
BG
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
d92a8cfc 29#include <linux/sched/mm.h>
98afa316
JN
30#include <linux/sort.h>
31
fcd70cd3
DV
32#include <drm/drm_debugfs.h>
33#include <drm/drm_fourcc.h>
2017263e 34
112ed2d3
CW
35#include "gt/intel_reset.h"
36
02684446 37#include "i915_gem_context.h"
27fec1f9 38#include "intel_dp.h"
98afa316
JN
39#include "intel_drv.h"
40#include "intel_fbc.h"
41#include "intel_guc_submission.h"
408bd917 42#include "intel_hdcp.h"
0550691d 43#include "intel_hdmi.h"
696173b0 44#include "intel_pm.h"
55367a27 45#include "intel_psr.h"
56c5098f 46#include "intel_sideband.h"
9f58892e 47
36cdd013
DW
48static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
49{
50 return to_i915(node->minor->dev);
51}
52
70d39fe4
CW
53static int i915_capabilities(struct seq_file *m, void *data)
54{
36cdd013
DW
55 struct drm_i915_private *dev_priv = node_to_i915(m->private);
56 const struct intel_device_info *info = INTEL_INFO(dev_priv);
a8c9b849 57 struct drm_printer p = drm_seq_file_printer(m);
70d39fe4 58
36cdd013 59 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
2e0d26f8 60 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
36cdd013 61 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
418e3cd8 62
a8c9b849 63 intel_device_info_dump_flags(info, &p);
0258404f 64 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
3fed1808 65 intel_driver_caps_print(&dev_priv->caps, &p);
70d39fe4 66
418e3cd8 67 kernel_param_lock(THIS_MODULE);
acfb9973 68 i915_params_dump(&i915_modparams, &p);
418e3cd8
CW
69 kernel_param_unlock(THIS_MODULE);
70
70d39fe4
CW
71 return 0;
72}
2017263e 73
a7363de7 74static char get_active_flag(struct drm_i915_gem_object *obj)
a6172a80 75{
573adb39 76 return i915_gem_object_is_active(obj) ? '*' : ' ';
a6172a80
CW
77}
78
a7363de7 79static char get_pin_flag(struct drm_i915_gem_object *obj)
be12a86b 80{
bd3d2252 81 return obj->pin_global ? 'p' : ' ';
be12a86b
TU
82}
83
a7363de7 84static char get_tiling_flag(struct drm_i915_gem_object *obj)
a6172a80 85{
3e510a8e 86 switch (i915_gem_object_get_tiling(obj)) {
0206e353 87 default:
be12a86b
TU
88 case I915_TILING_NONE: return ' ';
89 case I915_TILING_X: return 'X';
90 case I915_TILING_Y: return 'Y';
0206e353 91 }
a6172a80
CW
92}
93
a7363de7 94static char get_global_flag(struct drm_i915_gem_object *obj)
be12a86b 95{
a65adaf8 96 return obj->userfault_count ? 'g' : ' ';
be12a86b
TU
97}
98
a7363de7 99static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
1d693bcc 100{
a4f5ea64 101 return obj->mm.mapping ? 'M' : ' ';
1d693bcc
BW
102}
103
ca1543be
TU
104static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
105{
106 u64 size = 0;
107 struct i915_vma *vma;
108
e2189dd0
CW
109 for_each_ggtt_vma(vma, obj) {
110 if (drm_mm_node_allocated(&vma->node))
ca1543be
TU
111 size += vma->node.size;
112 }
113
114 return size;
115}
116
7393b7ee
MA
117static const char *
118stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
119{
120 size_t x = 0;
121
122 switch (page_sizes) {
123 case 0:
124 return "";
125 case I915_GTT_PAGE_SIZE_4K:
126 return "4K";
127 case I915_GTT_PAGE_SIZE_64K:
128 return "64K";
129 case I915_GTT_PAGE_SIZE_2M:
130 return "2M";
131 default:
132 if (!buf)
133 return "M";
134
135 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
136 x += snprintf(buf + x, len - x, "2M, ");
137 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
138 x += snprintf(buf + x, len - x, "64K, ");
139 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
140 x += snprintf(buf + x, len - x, "4K, ");
141 buf[x-2] = '\0';
142
143 return buf;
144 }
145}
146
37811fcc
CW
147static void
148describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
149{
b4716185 150 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
e2f80391 151 struct intel_engine_cs *engine;
1d693bcc 152 struct i915_vma *vma;
faf5bf0a 153 unsigned int frontbuffer_bits;
d7f46fc4
BW
154 int pin_count = 0;
155
188c1ab7
CW
156 lockdep_assert_held(&obj->base.dev->struct_mutex);
157
d07f0e59 158 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
37811fcc 159 &obj->base,
be12a86b 160 get_active_flag(obj),
37811fcc
CW
161 get_pin_flag(obj),
162 get_tiling_flag(obj),
1d693bcc 163 get_global_flag(obj),
be12a86b 164 get_pin_mapped_flag(obj),
a05a5862 165 obj->base.size / 1024,
c0a51fd0
CK
166 obj->read_domains,
167 obj->write_domain,
36cdd013 168 i915_cache_level_str(dev_priv, obj->cache_level),
a4f5ea64
CW
169 obj->mm.dirty ? " dirty" : "",
170 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
37811fcc
CW
171 if (obj->base.name)
172 seq_printf(m, " (name: %d)", obj->base.name);
528cbd17 173 list_for_each_entry(vma, &obj->vma.list, obj_link) {
20dfbde4 174 if (i915_vma_is_pinned(vma))
d7f46fc4 175 pin_count++;
ba0635ff
DC
176 }
177 seq_printf(m, " (pinned x %d)", pin_count);
bd3d2252
CW
178 if (obj->pin_global)
179 seq_printf(m, " (global)");
528cbd17 180 list_for_each_entry(vma, &obj->vma.list, obj_link) {
15717de2
CW
181 if (!drm_mm_node_allocated(&vma->node))
182 continue;
183
7393b7ee 184 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
3272db53 185 i915_vma_is_ggtt(vma) ? "g" : "pp",
7393b7ee
MA
186 vma->node.start, vma->node.size,
187 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
21976853
CW
188 if (i915_vma_is_ggtt(vma)) {
189 switch (vma->ggtt_view.type) {
190 case I915_GGTT_VIEW_NORMAL:
191 seq_puts(m, ", normal");
192 break;
193
194 case I915_GGTT_VIEW_PARTIAL:
195 seq_printf(m, ", partial [%08llx+%x]",
8bab1193
CW
196 vma->ggtt_view.partial.offset << PAGE_SHIFT,
197 vma->ggtt_view.partial.size << PAGE_SHIFT);
21976853
CW
198 break;
199
200 case I915_GGTT_VIEW_ROTATED:
201 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
8bab1193
CW
202 vma->ggtt_view.rotated.plane[0].width,
203 vma->ggtt_view.rotated.plane[0].height,
204 vma->ggtt_view.rotated.plane[0].stride,
205 vma->ggtt_view.rotated.plane[0].offset,
206 vma->ggtt_view.rotated.plane[1].width,
207 vma->ggtt_view.rotated.plane[1].height,
208 vma->ggtt_view.rotated.plane[1].stride,
209 vma->ggtt_view.rotated.plane[1].offset);
21976853
CW
210 break;
211
212 default:
213 MISSING_CASE(vma->ggtt_view.type);
214 break;
215 }
216 }
49ef5294
CW
217 if (vma->fence)
218 seq_printf(m, " , fence: %d%s",
219 vma->fence->id,
21950ee7 220 i915_active_request_isset(&vma->last_fence) ? "*" : "");
596c5923 221 seq_puts(m, ")");
1d693bcc 222 }
c1ad11fc 223 if (obj->stolen)
440fd528 224 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
27c01aae 225
d07f0e59 226 engine = i915_gem_object_last_write_engine(obj);
27c01aae
CW
227 if (engine)
228 seq_printf(m, " (%s)", engine->name);
229
faf5bf0a
CW
230 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
231 if (frontbuffer_bits)
232 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
37811fcc
CW
233}
234
e637d2cb 235static int obj_rank_by_stolen(const void *A, const void *B)
6d2b8885 236{
e637d2cb
CW
237 const struct drm_i915_gem_object *a =
238 *(const struct drm_i915_gem_object **)A;
239 const struct drm_i915_gem_object *b =
240 *(const struct drm_i915_gem_object **)B;
6d2b8885 241
2d05fa16
RV
242 if (a->stolen->start < b->stolen->start)
243 return -1;
244 if (a->stolen->start > b->stolen->start)
245 return 1;
246 return 0;
6d2b8885
CW
247}
248
249static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
250{
36cdd013
DW
251 struct drm_i915_private *dev_priv = node_to_i915(m->private);
252 struct drm_device *dev = &dev_priv->drm;
e637d2cb 253 struct drm_i915_gem_object **objects;
6d2b8885 254 struct drm_i915_gem_object *obj;
c44ef60e 255 u64 total_obj_size, total_gtt_size;
e637d2cb
CW
256 unsigned long total, count, n;
257 int ret;
258
259 total = READ_ONCE(dev_priv->mm.object_count);
2098105e 260 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
e637d2cb
CW
261 if (!objects)
262 return -ENOMEM;
6d2b8885
CW
263
264 ret = mutex_lock_interruptible(&dev->struct_mutex);
265 if (ret)
e637d2cb 266 goto out;
6d2b8885
CW
267
268 total_obj_size = total_gtt_size = count = 0;
f2123818
CW
269
270 spin_lock(&dev_priv->mm.obj_lock);
271 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
e637d2cb
CW
272 if (count == total)
273 break;
274
6d2b8885
CW
275 if (obj->stolen == NULL)
276 continue;
277
e637d2cb 278 objects[count++] = obj;
6d2b8885 279 total_obj_size += obj->base.size;
ca1543be 280 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
e637d2cb 281
6d2b8885 282 }
f2123818 283 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
e637d2cb
CW
284 if (count == total)
285 break;
286
6d2b8885
CW
287 if (obj->stolen == NULL)
288 continue;
289
e637d2cb 290 objects[count++] = obj;
6d2b8885 291 total_obj_size += obj->base.size;
6d2b8885 292 }
f2123818 293 spin_unlock(&dev_priv->mm.obj_lock);
e637d2cb
CW
294
295 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
296
6d2b8885 297 seq_puts(m, "Stolen:\n");
e637d2cb 298 for (n = 0; n < count; n++) {
6d2b8885 299 seq_puts(m, " ");
e637d2cb 300 describe_obj(m, objects[n]);
6d2b8885 301 seq_putc(m, '\n');
6d2b8885 302 }
e637d2cb 303 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
6d2b8885 304 count, total_obj_size, total_gtt_size);
e637d2cb
CW
305
306 mutex_unlock(&dev->struct_mutex);
307out:
2098105e 308 kvfree(objects);
e637d2cb 309 return ret;
6d2b8885
CW
310}
311
2db8e9d6 312struct file_stats {
f6e8aa38 313 struct i915_address_space *vm;
c44ef60e
MK
314 unsigned long count;
315 u64 total, unbound;
316 u64 global, shared;
317 u64 active, inactive;
f6e8aa38 318 u64 closed;
2db8e9d6
CW
319};
320
321static int per_file_stats(int id, void *ptr, void *data)
322{
323 struct drm_i915_gem_object *obj = ptr;
324 struct file_stats *stats = data;
6313c204 325 struct i915_vma *vma;
2db8e9d6 326
0caf81b5
CW
327 lockdep_assert_held(&obj->base.dev->struct_mutex);
328
2db8e9d6
CW
329 stats->count++;
330 stats->total += obj->base.size;
15717de2
CW
331 if (!obj->bind_count)
332 stats->unbound += obj->base.size;
c67a17e9
CW
333 if (obj->base.name || obj->base.dma_buf)
334 stats->shared += obj->base.size;
335
528cbd17 336 list_for_each_entry(vma, &obj->vma.list, obj_link) {
894eeecc
CW
337 if (!drm_mm_node_allocated(&vma->node))
338 continue;
6313c204 339
3272db53 340 if (i915_vma_is_ggtt(vma)) {
894eeecc
CW
341 stats->global += vma->node.size;
342 } else {
f6e8aa38 343 if (vma->vm != stats->vm)
6313c204 344 continue;
6313c204 345 }
894eeecc 346
b0decaf7 347 if (i915_vma_is_active(vma))
894eeecc
CW
348 stats->active += vma->node.size;
349 else
350 stats->inactive += vma->node.size;
f6e8aa38
CW
351
352 if (i915_vma_is_closed(vma))
353 stats->closed += vma->node.size;
2db8e9d6
CW
354 }
355
356 return 0;
357}
358
b0da1b79
CW
359#define print_file_stats(m, name, stats) do { \
360 if (stats.count) \
f6e8aa38 361 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
b0da1b79
CW
362 name, \
363 stats.count, \
364 stats.total, \
365 stats.active, \
366 stats.inactive, \
367 stats.global, \
368 stats.shared, \
f6e8aa38
CW
369 stats.unbound, \
370 stats.closed); \
b0da1b79 371} while (0)
493018dc
BV
372
373static void print_batch_pool_stats(struct seq_file *m,
374 struct drm_i915_private *dev_priv)
375{
376 struct drm_i915_gem_object *obj;
e2f80391 377 struct intel_engine_cs *engine;
f6e8aa38 378 struct file_stats stats = {};
3b3f1650 379 enum intel_engine_id id;
b4ac5afc 380 int j;
493018dc 381
3b3f1650 382 for_each_engine(engine, dev_priv, id) {
e2f80391 383 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744 384 list_for_each_entry(obj,
e2f80391 385 &engine->batch_pool.cache_list[j],
8d9d5744
CW
386 batch_pool_link)
387 per_file_stats(0, obj, &stats);
388 }
06fbca71 389 }
493018dc 390
b0da1b79 391 print_file_stats(m, "[k]batch pool", stats);
493018dc
BV
392}
393
f6e8aa38
CW
394static void print_context_stats(struct seq_file *m,
395 struct drm_i915_private *i915)
15da9565 396{
f6e8aa38
CW
397 struct file_stats kstats = {};
398 struct i915_gem_context *ctx;
ab82a063 399
f6e8aa38 400 list_for_each_entry(ctx, &i915->contexts.list, link) {
02684446 401 struct i915_gem_engines_iter it;
7e3d9a59 402 struct intel_context *ce;
15da9565 403
02684446
CW
404 for_each_gem_engine(ce,
405 i915_gem_context_lock_engines(ctx), it) {
f6e8aa38
CW
406 if (ce->state)
407 per_file_stats(0, ce->state->obj, &kstats);
408 if (ce->ring)
409 per_file_stats(0, ce->ring->vma->obj, &kstats);
410 }
02684446 411 i915_gem_context_unlock_engines(ctx);
15da9565 412
f6e8aa38
CW
413 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
414 struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
415 struct drm_file *file = ctx->file_priv->file;
416 struct task_struct *task;
417 char name[80];
15da9565 418
f6e8aa38
CW
419 spin_lock(&file->table_lock);
420 idr_for_each(&file->object_idr, per_file_stats, &stats);
421 spin_unlock(&file->table_lock);
15da9565 422
f6e8aa38
CW
423 rcu_read_lock();
424 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
3e055312
CW
425 snprintf(name, sizeof(name), "%s",
426 task ? task->comm : "<unknown>");
f6e8aa38 427 rcu_read_unlock();
15da9565 428
f6e8aa38
CW
429 print_file_stats(m, name, stats);
430 }
15da9565 431 }
15da9565 432
f6e8aa38 433 print_file_stats(m, "[k]contexts", kstats);
15da9565
CW
434}
435
36cdd013 436static int i915_gem_object_info(struct seq_file *m, void *data)
73aa808f 437{
36cdd013
DW
438 struct drm_i915_private *dev_priv = node_to_i915(m->private);
439 struct drm_device *dev = &dev_priv->drm;
72e96d64 440 struct i915_ggtt *ggtt = &dev_priv->ggtt;
7393b7ee
MA
441 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
442 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
6299f992 443 struct drm_i915_gem_object *obj;
7393b7ee 444 unsigned int page_sizes = 0;
7393b7ee 445 char buf[80];
73aa808f
CW
446 int ret;
447
3ef7f228 448 seq_printf(m, "%u objects, %llu bytes\n",
6299f992
CW
449 dev_priv->mm.object_count,
450 dev_priv->mm.object_memory);
451
1544c42e
CW
452 size = count = 0;
453 mapped_size = mapped_count = 0;
454 purgeable_size = purgeable_count = 0;
7393b7ee 455 huge_size = huge_count = 0;
f2123818
CW
456
457 spin_lock(&dev_priv->mm.obj_lock);
458 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
2bd160a1
CW
459 size += obj->base.size;
460 ++count;
461
a4f5ea64 462 if (obj->mm.madv == I915_MADV_DONTNEED) {
2bd160a1
CW
463 purgeable_size += obj->base.size;
464 ++purgeable_count;
465 }
466
a4f5ea64 467 if (obj->mm.mapping) {
2bd160a1
CW
468 mapped_count++;
469 mapped_size += obj->base.size;
be19b10d 470 }
7393b7ee
MA
471
472 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
473 huge_count++;
474 huge_size += obj->base.size;
475 page_sizes |= obj->mm.page_sizes.sg;
476 }
b7abb714 477 }
c44ef60e 478 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
6c085a72 479
2bd160a1 480 size = count = dpy_size = dpy_count = 0;
f2123818 481 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
2bd160a1
CW
482 size += obj->base.size;
483 ++count;
484
bd3d2252 485 if (obj->pin_global) {
2bd160a1
CW
486 dpy_size += obj->base.size;
487 ++dpy_count;
6299f992 488 }
2bd160a1 489
a4f5ea64 490 if (obj->mm.madv == I915_MADV_DONTNEED) {
b7abb714
CW
491 purgeable_size += obj->base.size;
492 ++purgeable_count;
493 }
2bd160a1 494
a4f5ea64 495 if (obj->mm.mapping) {
2bd160a1
CW
496 mapped_count++;
497 mapped_size += obj->base.size;
be19b10d 498 }
7393b7ee
MA
499
500 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
501 huge_count++;
502 huge_size += obj->base.size;
503 page_sizes |= obj->mm.page_sizes.sg;
504 }
6299f992 505 }
f2123818
CW
506 spin_unlock(&dev_priv->mm.obj_lock);
507
2bd160a1
CW
508 seq_printf(m, "%u bound objects, %llu bytes\n",
509 count, size);
c44ef60e 510 seq_printf(m, "%u purgeable objects, %llu bytes\n",
b7abb714 511 purgeable_count, purgeable_size);
2bd160a1
CW
512 seq_printf(m, "%u mapped objects, %llu bytes\n",
513 mapped_count, mapped_size);
7393b7ee
MA
514 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
515 huge_count,
516 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
517 huge_size);
bd3d2252 518 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
2bd160a1 519 dpy_count, dpy_size);
6299f992 520
b7128ef1 521 seq_printf(m, "%llu [%pa] gtt total\n",
82ad6443 522 ggtt->vm.total, &ggtt->mappable_end);
7393b7ee
MA
523 seq_printf(m, "Supported page sizes: %s\n",
524 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
525 buf, sizeof(buf)));
73aa808f 526
493018dc 527 seq_putc(m, '\n');
1d2ac403 528
f6e8aa38
CW
529 ret = mutex_lock_interruptible(&dev->struct_mutex);
530 if (ret)
531 return ret;
532
533 print_batch_pool_stats(m, dev_priv);
15da9565 534 print_context_stats(m, dev_priv);
f6e8aa38 535 mutex_unlock(&dev->struct_mutex);
73aa808f
CW
536
537 return 0;
538}
539
aee56cff 540static int i915_gem_gtt_info(struct seq_file *m, void *data)
08c18323 541{
9f25d007 542 struct drm_info_node *node = m->private;
36cdd013
DW
543 struct drm_i915_private *dev_priv = node_to_i915(node);
544 struct drm_device *dev = &dev_priv->drm;
f2123818 545 struct drm_i915_gem_object **objects;
08c18323 546 struct drm_i915_gem_object *obj;
c44ef60e 547 u64 total_obj_size, total_gtt_size;
f2123818 548 unsigned long nobject, n;
08c18323
CW
549 int count, ret;
550
f2123818
CW
551 nobject = READ_ONCE(dev_priv->mm.object_count);
552 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
553 if (!objects)
554 return -ENOMEM;
555
08c18323
CW
556 ret = mutex_lock_interruptible(&dev->struct_mutex);
557 if (ret)
558 return ret;
559
f2123818
CW
560 count = 0;
561 spin_lock(&dev_priv->mm.obj_lock);
562 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
563 objects[count++] = obj;
564 if (count == nobject)
565 break;
566 }
567 spin_unlock(&dev_priv->mm.obj_lock);
568
569 total_obj_size = total_gtt_size = 0;
570 for (n = 0; n < count; n++) {
571 obj = objects[n];
572
267f0c90 573 seq_puts(m, " ");
08c18323 574 describe_obj(m, obj);
267f0c90 575 seq_putc(m, '\n');
08c18323 576 total_obj_size += obj->base.size;
ca1543be 577 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
08c18323
CW
578 }
579
580 mutex_unlock(&dev->struct_mutex);
581
c44ef60e 582 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
08c18323 583 count, total_obj_size, total_gtt_size);
f2123818 584 kvfree(objects);
08c18323
CW
585
586 return 0;
587}
588
493018dc
BV
589static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
590{
36cdd013
DW
591 struct drm_i915_private *dev_priv = node_to_i915(m->private);
592 struct drm_device *dev = &dev_priv->drm;
493018dc 593 struct drm_i915_gem_object *obj;
e2f80391 594 struct intel_engine_cs *engine;
3b3f1650 595 enum intel_engine_id id;
8d9d5744 596 int total = 0;
b4ac5afc 597 int ret, j;
493018dc
BV
598
599 ret = mutex_lock_interruptible(&dev->struct_mutex);
600 if (ret)
601 return ret;
602
3b3f1650 603 for_each_engine(engine, dev_priv, id) {
e2f80391 604 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744
CW
605 int count;
606
607 count = 0;
608 list_for_each_entry(obj,
e2f80391 609 &engine->batch_pool.cache_list[j],
8d9d5744
CW
610 batch_pool_link)
611 count++;
612 seq_printf(m, "%s cache[%d]: %d objects\n",
e2f80391 613 engine->name, j, count);
8d9d5744
CW
614
615 list_for_each_entry(obj,
e2f80391 616 &engine->batch_pool.cache_list[j],
8d9d5744
CW
617 batch_pool_link) {
618 seq_puts(m, " ");
619 describe_obj(m, obj);
620 seq_putc(m, '\n');
621 }
622
623 total += count;
06fbca71 624 }
493018dc
BV
625 }
626
8d9d5744 627 seq_printf(m, "total: %d\n", total);
493018dc
BV
628
629 mutex_unlock(&dev->struct_mutex);
630
631 return 0;
632}
633
80d89350
TU
634static void gen8_display_interrupt_info(struct seq_file *m)
635{
636 struct drm_i915_private *dev_priv = node_to_i915(m->private);
637 int pipe;
638
639 for_each_pipe(dev_priv, pipe) {
640 enum intel_display_power_domain power_domain;
0e6e0be4 641 intel_wakeref_t wakeref;
80d89350
TU
642
643 power_domain = POWER_DOMAIN_PIPE(pipe);
0e6e0be4
CW
644 wakeref = intel_display_power_get_if_enabled(dev_priv,
645 power_domain);
646 if (!wakeref) {
80d89350
TU
647 seq_printf(m, "Pipe %c power disabled\n",
648 pipe_name(pipe));
649 continue;
650 }
651 seq_printf(m, "Pipe %c IMR:\t%08x\n",
652 pipe_name(pipe),
653 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
654 seq_printf(m, "Pipe %c IIR:\t%08x\n",
655 pipe_name(pipe),
656 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
657 seq_printf(m, "Pipe %c IER:\t%08x\n",
658 pipe_name(pipe),
659 I915_READ(GEN8_DE_PIPE_IER(pipe)));
660
0e6e0be4 661 intel_display_power_put(dev_priv, power_domain, wakeref);
80d89350
TU
662 }
663
664 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
665 I915_READ(GEN8_DE_PORT_IMR));
666 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
667 I915_READ(GEN8_DE_PORT_IIR));
668 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
669 I915_READ(GEN8_DE_PORT_IER));
670
671 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
672 I915_READ(GEN8_DE_MISC_IMR));
673 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
674 I915_READ(GEN8_DE_MISC_IIR));
675 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
676 I915_READ(GEN8_DE_MISC_IER));
677
678 seq_printf(m, "PCU interrupt mask:\t%08x\n",
679 I915_READ(GEN8_PCU_IMR));
680 seq_printf(m, "PCU interrupt identity:\t%08x\n",
681 I915_READ(GEN8_PCU_IIR));
682 seq_printf(m, "PCU interrupt enable:\t%08x\n",
683 I915_READ(GEN8_PCU_IER));
684}
685
2017263e
BG
686static int i915_interrupt_info(struct seq_file *m, void *data)
687{
36cdd013 688 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 689 struct intel_engine_cs *engine;
3b3f1650 690 enum intel_engine_id id;
a037121c 691 intel_wakeref_t wakeref;
4bb05040 692 int i, pipe;
de227ef0 693
a037121c 694 wakeref = intel_runtime_pm_get(dev_priv);
2017263e 695
36cdd013 696 if (IS_CHERRYVIEW(dev_priv)) {
0e6e0be4
CW
697 intel_wakeref_t pref;
698
74e1ca8c
VS
699 seq_printf(m, "Master Interrupt Control:\t%08x\n",
700 I915_READ(GEN8_MASTER_IRQ));
701
702 seq_printf(m, "Display IER:\t%08x\n",
703 I915_READ(VLV_IER));
704 seq_printf(m, "Display IIR:\t%08x\n",
705 I915_READ(VLV_IIR));
706 seq_printf(m, "Display IIR_RW:\t%08x\n",
707 I915_READ(VLV_IIR_RW));
708 seq_printf(m, "Display IMR:\t%08x\n",
709 I915_READ(VLV_IMR));
9c870d03
CW
710 for_each_pipe(dev_priv, pipe) {
711 enum intel_display_power_domain power_domain;
712
713 power_domain = POWER_DOMAIN_PIPE(pipe);
0e6e0be4
CW
714 pref = intel_display_power_get_if_enabled(dev_priv,
715 power_domain);
716 if (!pref) {
9c870d03
CW
717 seq_printf(m, "Pipe %c power disabled\n",
718 pipe_name(pipe));
719 continue;
720 }
721
74e1ca8c
VS
722 seq_printf(m, "Pipe %c stat:\t%08x\n",
723 pipe_name(pipe),
724 I915_READ(PIPESTAT(pipe)));
725
0e6e0be4 726 intel_display_power_put(dev_priv, power_domain, pref);
9c870d03
CW
727 }
728
0e6e0be4 729 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
730 seq_printf(m, "Port hotplug:\t%08x\n",
731 I915_READ(PORT_HOTPLUG_EN));
732 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
733 I915_READ(VLV_DPFLIPSTAT));
734 seq_printf(m, "DPINVGTT:\t%08x\n",
735 I915_READ(DPINVGTT));
0e6e0be4 736 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
74e1ca8c
VS
737
738 for (i = 0; i < 4; i++) {
739 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
740 i, I915_READ(GEN8_GT_IMR(i)));
741 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
742 i, I915_READ(GEN8_GT_IIR(i)));
743 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
744 i, I915_READ(GEN8_GT_IER(i)));
745 }
746
747 seq_printf(m, "PCU interrupt mask:\t%08x\n",
748 I915_READ(GEN8_PCU_IMR));
749 seq_printf(m, "PCU interrupt identity:\t%08x\n",
750 I915_READ(GEN8_PCU_IIR));
751 seq_printf(m, "PCU interrupt enable:\t%08x\n",
752 I915_READ(GEN8_PCU_IER));
80d89350
TU
753 } else if (INTEL_GEN(dev_priv) >= 11) {
754 seq_printf(m, "Master Interrupt Control: %08x\n",
755 I915_READ(GEN11_GFX_MSTR_IRQ));
756
757 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
758 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
759 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
760 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
761 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
762 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
763 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
764 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
765 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
766 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
767 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
768 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
769
770 seq_printf(m, "Display Interrupt Control:\t%08x\n",
771 I915_READ(GEN11_DISPLAY_INT_CTL));
772
773 gen8_display_interrupt_info(m);
36cdd013 774 } else if (INTEL_GEN(dev_priv) >= 8) {
a123f157
BW
775 seq_printf(m, "Master Interrupt Control:\t%08x\n",
776 I915_READ(GEN8_MASTER_IRQ));
777
778 for (i = 0; i < 4; i++) {
779 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
780 i, I915_READ(GEN8_GT_IMR(i)));
781 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
782 i, I915_READ(GEN8_GT_IIR(i)));
783 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
784 i, I915_READ(GEN8_GT_IER(i)));
785 }
786
80d89350 787 gen8_display_interrupt_info(m);
36cdd013 788 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
789 seq_printf(m, "Display IER:\t%08x\n",
790 I915_READ(VLV_IER));
791 seq_printf(m, "Display IIR:\t%08x\n",
792 I915_READ(VLV_IIR));
793 seq_printf(m, "Display IIR_RW:\t%08x\n",
794 I915_READ(VLV_IIR_RW));
795 seq_printf(m, "Display IMR:\t%08x\n",
796 I915_READ(VLV_IMR));
4f4631af
CW
797 for_each_pipe(dev_priv, pipe) {
798 enum intel_display_power_domain power_domain;
0e6e0be4 799 intel_wakeref_t pref;
4f4631af
CW
800
801 power_domain = POWER_DOMAIN_PIPE(pipe);
0e6e0be4
CW
802 pref = intel_display_power_get_if_enabled(dev_priv,
803 power_domain);
804 if (!pref) {
4f4631af
CW
805 seq_printf(m, "Pipe %c power disabled\n",
806 pipe_name(pipe));
807 continue;
808 }
809
7e231dbe
JB
810 seq_printf(m, "Pipe %c stat:\t%08x\n",
811 pipe_name(pipe),
812 I915_READ(PIPESTAT(pipe)));
0e6e0be4 813 intel_display_power_put(dev_priv, power_domain, pref);
4f4631af 814 }
7e231dbe
JB
815
816 seq_printf(m, "Master IER:\t%08x\n",
817 I915_READ(VLV_MASTER_IER));
818
819 seq_printf(m, "Render IER:\t%08x\n",
820 I915_READ(GTIER));
821 seq_printf(m, "Render IIR:\t%08x\n",
822 I915_READ(GTIIR));
823 seq_printf(m, "Render IMR:\t%08x\n",
824 I915_READ(GTIMR));
825
826 seq_printf(m, "PM IER:\t\t%08x\n",
827 I915_READ(GEN6_PMIER));
828 seq_printf(m, "PM IIR:\t\t%08x\n",
829 I915_READ(GEN6_PMIIR));
830 seq_printf(m, "PM IMR:\t\t%08x\n",
831 I915_READ(GEN6_PMIMR));
832
833 seq_printf(m, "Port hotplug:\t%08x\n",
834 I915_READ(PORT_HOTPLUG_EN));
835 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
836 I915_READ(VLV_DPFLIPSTAT));
837 seq_printf(m, "DPINVGTT:\t%08x\n",
838 I915_READ(DPINVGTT));
839
36cdd013 840 } else if (!HAS_PCH_SPLIT(dev_priv)) {
5f6a1695 841 seq_printf(m, "Interrupt enable: %08x\n",
9d9523d8 842 I915_READ(GEN2_IER));
5f6a1695 843 seq_printf(m, "Interrupt identity: %08x\n",
9d9523d8 844 I915_READ(GEN2_IIR));
5f6a1695 845 seq_printf(m, "Interrupt mask: %08x\n",
9d9523d8 846 I915_READ(GEN2_IMR));
055e393f 847 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
848 seq_printf(m, "Pipe %c stat: %08x\n",
849 pipe_name(pipe),
850 I915_READ(PIPESTAT(pipe)));
5f6a1695
ZW
851 } else {
852 seq_printf(m, "North Display Interrupt enable: %08x\n",
853 I915_READ(DEIER));
854 seq_printf(m, "North Display Interrupt identity: %08x\n",
855 I915_READ(DEIIR));
856 seq_printf(m, "North Display Interrupt mask: %08x\n",
857 I915_READ(DEIMR));
858 seq_printf(m, "South Display Interrupt enable: %08x\n",
859 I915_READ(SDEIER));
860 seq_printf(m, "South Display Interrupt identity: %08x\n",
861 I915_READ(SDEIIR));
862 seq_printf(m, "South Display Interrupt mask: %08x\n",
863 I915_READ(SDEIMR));
864 seq_printf(m, "Graphics Interrupt enable: %08x\n",
865 I915_READ(GTIER));
866 seq_printf(m, "Graphics Interrupt identity: %08x\n",
867 I915_READ(GTIIR));
868 seq_printf(m, "Graphics Interrupt mask: %08x\n",
869 I915_READ(GTIMR));
870 }
80d89350
TU
871
872 if (INTEL_GEN(dev_priv) >= 11) {
873 seq_printf(m, "RCS Intr Mask:\t %08x\n",
874 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
875 seq_printf(m, "BCS Intr Mask:\t %08x\n",
876 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
877 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
878 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
879 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
880 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
881 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
882 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
883 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
884 I915_READ(GEN11_GUC_SG_INTR_MASK));
885 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
886 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
887 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
888 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
889 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
890 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
891
892 } else if (INTEL_GEN(dev_priv) >= 6) {
d5acadfe 893 for_each_engine(engine, dev_priv, id) {
a2c7f6fd
CW
894 seq_printf(m,
895 "Graphics Interrupt mask (%s): %08x\n",
baba6e57 896 engine->name, ENGINE_READ(engine, RING_IMR));
9862e600 897 }
9862e600 898 }
80d89350 899
a037121c 900 intel_runtime_pm_put(dev_priv, wakeref);
de227ef0 901
2017263e
BG
902 return 0;
903}
904
a6172a80
CW
905static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
906{
36cdd013
DW
907 struct drm_i915_private *dev_priv = node_to_i915(m->private);
908 struct drm_device *dev = &dev_priv->drm;
de227ef0
CW
909 int i, ret;
910
911 ret = mutex_lock_interruptible(&dev->struct_mutex);
912 if (ret)
913 return ret;
a6172a80 914
a6172a80
CW
915 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
916 for (i = 0; i < dev_priv->num_fence_regs; i++) {
49ef5294 917 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
a6172a80 918
6c085a72
CW
919 seq_printf(m, "Fence %d, pin count = %d, object = ",
920 i, dev_priv->fence_regs[i].pin_count);
49ef5294 921 if (!vma)
267f0c90 922 seq_puts(m, "unused");
c2c347a9 923 else
49ef5294 924 describe_obj(m, vma->obj);
267f0c90 925 seq_putc(m, '\n');
a6172a80
CW
926 }
927
05394f39 928 mutex_unlock(&dev->struct_mutex);
a6172a80
CW
929 return 0;
930}
931
98a2f411 932#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
5a4c6f1b
CW
933static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
934 size_t count, loff_t *pos)
d5442303 935{
0e39037b 936 struct i915_gpu_state *error;
5a4c6f1b 937 ssize_t ret;
0e39037b 938 void *buf;
d5442303 939
0e39037b 940 error = file->private_data;
5a4c6f1b
CW
941 if (!error)
942 return 0;
d5442303 943
0e39037b
CW
944 /* Bounce buffer required because of kernfs __user API convenience. */
945 buf = kmalloc(count, GFP_KERNEL);
946 if (!buf)
947 return -ENOMEM;
d5442303 948
0e39037b
CW
949 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
950 if (ret <= 0)
5a4c6f1b 951 goto out;
d5442303 952
0e39037b
CW
953 if (!copy_to_user(ubuf, buf, ret))
954 *pos += ret;
955 else
956 ret = -EFAULT;
d5442303 957
5a4c6f1b 958out:
0e39037b 959 kfree(buf);
5a4c6f1b
CW
960 return ret;
961}
edc3d884 962
5a4c6f1b
CW
963static int gpu_state_release(struct inode *inode, struct file *file)
964{
965 i915_gpu_state_put(file->private_data);
edc3d884 966 return 0;
d5442303
DV
967}
968
5a4c6f1b 969static int i915_gpu_info_open(struct inode *inode, struct file *file)
d5442303 970{
090e5fe3 971 struct drm_i915_private *i915 = inode->i_private;
5a4c6f1b 972 struct i915_gpu_state *gpu;
a037121c 973 intel_wakeref_t wakeref;
d5442303 974
d4225a53
CW
975 gpu = NULL;
976 with_intel_runtime_pm(i915, wakeref)
977 gpu = i915_capture_gpu_state(i915);
e6154e4c
CW
978 if (IS_ERR(gpu))
979 return PTR_ERR(gpu);
d5442303 980
5a4c6f1b 981 file->private_data = gpu;
edc3d884
MK
982 return 0;
983}
984
5a4c6f1b
CW
985static const struct file_operations i915_gpu_info_fops = {
986 .owner = THIS_MODULE,
987 .open = i915_gpu_info_open,
988 .read = gpu_state_read,
989 .llseek = default_llseek,
990 .release = gpu_state_release,
991};
992
993static ssize_t
994i915_error_state_write(struct file *filp,
995 const char __user *ubuf,
996 size_t cnt,
997 loff_t *ppos)
4dc955f7 998{
5a4c6f1b 999 struct i915_gpu_state *error = filp->private_data;
4dc955f7 1000
5a4c6f1b
CW
1001 if (!error)
1002 return 0;
edc3d884 1003
5a4c6f1b
CW
1004 DRM_DEBUG_DRIVER("Resetting error state\n");
1005 i915_reset_error_state(error->i915);
edc3d884 1006
5a4c6f1b
CW
1007 return cnt;
1008}
edc3d884 1009
5a4c6f1b
CW
1010static int i915_error_state_open(struct inode *inode, struct file *file)
1011{
e6154e4c
CW
1012 struct i915_gpu_state *error;
1013
1014 error = i915_first_error_state(inode->i_private);
1015 if (IS_ERR(error))
1016 return PTR_ERR(error);
1017
1018 file->private_data = error;
5a4c6f1b 1019 return 0;
d5442303
DV
1020}
1021
1022static const struct file_operations i915_error_state_fops = {
1023 .owner = THIS_MODULE,
1024 .open = i915_error_state_open,
5a4c6f1b 1025 .read = gpu_state_read,
d5442303
DV
1026 .write = i915_error_state_write,
1027 .llseek = default_llseek,
5a4c6f1b 1028 .release = gpu_state_release,
d5442303 1029};
98a2f411
CW
1030#endif
1031
adb4bd12 1032static int i915_frequency_info(struct seq_file *m, void *unused)
f97108d1 1033{
36cdd013 1034 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1035 struct intel_rps *rps = &dev_priv->gt_pm.rps;
a037121c 1036 intel_wakeref_t wakeref;
c8c8fb33
PZ
1037 int ret = 0;
1038
a037121c 1039 wakeref = intel_runtime_pm_get(dev_priv);
3b8d8d91 1040
cf819eff 1041 if (IS_GEN(dev_priv, 5)) {
3b8d8d91
JB
1042 u16 rgvswctl = I915_READ16(MEMSWCTL);
1043 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1044
1045 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1046 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1047 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1048 MEMSTAT_VID_SHIFT);
1049 seq_printf(m, "Current P-state: %d\n",
1050 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
36cdd013 1051 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
0d6fc92a 1052 u32 rpmodectl, freq_sts;
666a4537 1053
0d6fc92a
SAK
1054 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1055 seq_printf(m, "Video Turbo Mode: %s\n",
1056 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1057 seq_printf(m, "HW control enabled: %s\n",
1058 yesno(rpmodectl & GEN6_RP_ENABLE));
1059 seq_printf(m, "SW control enabled: %s\n",
1060 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1061 GEN6_RP_MEDIA_SW_MODE));
1062
337fa6e0 1063 vlv_punit_get(dev_priv);
666a4537 1064 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
337fa6e0
CW
1065 vlv_punit_put(dev_priv);
1066
666a4537
WB
1067 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1068 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1069
1070 seq_printf(m, "actual GPU freq: %d MHz\n",
1071 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1072
1073 seq_printf(m, "current GPU freq: %d MHz\n",
562d9bae 1074 intel_gpu_freq(dev_priv, rps->cur_freq));
666a4537
WB
1075
1076 seq_printf(m, "max GPU freq: %d MHz\n",
562d9bae 1077 intel_gpu_freq(dev_priv, rps->max_freq));
666a4537
WB
1078
1079 seq_printf(m, "min GPU freq: %d MHz\n",
562d9bae 1080 intel_gpu_freq(dev_priv, rps->min_freq));
666a4537
WB
1081
1082 seq_printf(m, "idle GPU freq: %d MHz\n",
562d9bae 1083 intel_gpu_freq(dev_priv, rps->idle_freq));
666a4537
WB
1084
1085 seq_printf(m,
1086 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1087 intel_gpu_freq(dev_priv, rps->efficient_freq));
36cdd013 1088 } else if (INTEL_GEN(dev_priv) >= 6) {
35040562
BP
1089 u32 rp_state_limits;
1090 u32 gt_perf_status;
1091 u32 rp_state_cap;
0d8f9491 1092 u32 rpmodectl, rpinclimit, rpdeclimit;
8e8c06cd 1093 u32 rpstat, cagf, reqf;
ccab5c82
JB
1094 u32 rpupei, rpcurup, rpprevup;
1095 u32 rpdownei, rpcurdown, rpprevdown;
9dd3c605 1096 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
3b8d8d91
JB
1097 int max_freq;
1098
35040562 1099 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
cc3f90f0 1100 if (IS_GEN9_LP(dev_priv)) {
35040562
BP
1101 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1102 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1103 } else {
1104 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1105 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1106 }
1107
3b8d8d91 1108 /* RPSTAT1 is in the GT power well */
3ceea6a1 1109 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
3b8d8d91 1110
8e8c06cd 1111 reqf = I915_READ(GEN6_RPNSWREQ);
35ceabf3 1112 if (INTEL_GEN(dev_priv) >= 9)
60260a5b
AG
1113 reqf >>= 23;
1114 else {
1115 reqf &= ~GEN6_TURBO_DISABLE;
36cdd013 1116 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
60260a5b
AG
1117 reqf >>= 24;
1118 else
1119 reqf >>= 25;
1120 }
7c59a9c1 1121 reqf = intel_gpu_freq(dev_priv, reqf);
8e8c06cd 1122
0d8f9491
CW
1123 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1124 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1125 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1126
ccab5c82 1127 rpstat = I915_READ(GEN6_RPSTAT1);
d6cda9c7
AG
1128 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1129 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1130 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1131 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1132 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1133 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
c84b2705
TU
1134 cagf = intel_gpu_freq(dev_priv,
1135 intel_get_cagf(dev_priv, rpstat));
ccab5c82 1136
3ceea6a1 1137 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
d1ebd816 1138
6b7a6a7b
OM
1139 if (INTEL_GEN(dev_priv) >= 11) {
1140 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1141 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1142 /*
1143 * The equivalent to the PM ISR & IIR cannot be read
1144 * without affecting the current state of the system
1145 */
1146 pm_isr = 0;
1147 pm_iir = 0;
1148 } else if (INTEL_GEN(dev_priv) >= 8) {
9dd3c605
PZ
1149 pm_ier = I915_READ(GEN8_GT_IER(2));
1150 pm_imr = I915_READ(GEN8_GT_IMR(2));
1151 pm_isr = I915_READ(GEN8_GT_ISR(2));
1152 pm_iir = I915_READ(GEN8_GT_IIR(2));
6b7a6a7b
OM
1153 } else {
1154 pm_ier = I915_READ(GEN6_PMIER);
1155 pm_imr = I915_READ(GEN6_PMIMR);
1156 pm_isr = I915_READ(GEN6_PMISR);
1157 pm_iir = I915_READ(GEN6_PMIIR);
9dd3c605 1158 }
6b7a6a7b
OM
1159 pm_mask = I915_READ(GEN6_PMINTRMSK);
1160
960e5465
SAK
1161 seq_printf(m, "Video Turbo Mode: %s\n",
1162 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1163 seq_printf(m, "HW control enabled: %s\n",
1164 yesno(rpmodectl & GEN6_RP_ENABLE));
1165 seq_printf(m, "SW control enabled: %s\n",
1166 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1167 GEN6_RP_MEDIA_SW_MODE));
6b7a6a7b
OM
1168
1169 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1170 pm_ier, pm_imr, pm_mask);
1171 if (INTEL_GEN(dev_priv) <= 10)
1172 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1173 pm_isr, pm_iir);
5dd04556 1174 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
562d9bae 1175 rps->pm_intrmsk_mbz);
3b8d8d91 1176 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
3b8d8d91 1177 seq_printf(m, "Render p-state ratio: %d\n",
35ceabf3 1178 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
3b8d8d91
JB
1179 seq_printf(m, "Render p-state VID: %d\n",
1180 gt_perf_status & 0xff);
1181 seq_printf(m, "Render p-state limit: %d\n",
1182 rp_state_limits & 0xff);
0d8f9491
CW
1183 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1184 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1185 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1186 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
8e8c06cd 1187 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
f82855d3 1188 seq_printf(m, "CAGF: %dMHz\n", cagf);
d6cda9c7
AG
1189 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1190 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1191 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1192 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1193 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1194 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
60548c55
CW
1195 seq_printf(m, "Up threshold: %d%%\n",
1196 rps->power.up_threshold);
d86ed34a 1197
d6cda9c7
AG
1198 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1199 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1200 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1201 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1202 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1203 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
60548c55
CW
1204 seq_printf(m, "Down threshold: %d%%\n",
1205 rps->power.down_threshold);
3b8d8d91 1206
cc3f90f0 1207 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
35040562 1208 rp_state_cap >> 16) & 0xff;
35ceabf3 1209 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1210 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1211 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
7c59a9c1 1212 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91
JB
1213
1214 max_freq = (rp_state_cap & 0xff00) >> 8;
35ceabf3 1215 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1216 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1217 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
7c59a9c1 1218 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91 1219
cc3f90f0 1220 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
35040562 1221 rp_state_cap >> 0) & 0xff;
35ceabf3 1222 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1223 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1224 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
7c59a9c1 1225 intel_gpu_freq(dev_priv, max_freq));
31c77388 1226 seq_printf(m, "Max overclocked frequency: %dMHz\n",
562d9bae 1227 intel_gpu_freq(dev_priv, rps->max_freq));
aed242ff 1228
d86ed34a 1229 seq_printf(m, "Current freq: %d MHz\n",
562d9bae 1230 intel_gpu_freq(dev_priv, rps->cur_freq));
d86ed34a 1231 seq_printf(m, "Actual freq: %d MHz\n", cagf);
aed242ff 1232 seq_printf(m, "Idle freq: %d MHz\n",
562d9bae 1233 intel_gpu_freq(dev_priv, rps->idle_freq));
d86ed34a 1234 seq_printf(m, "Min freq: %d MHz\n",
562d9bae 1235 intel_gpu_freq(dev_priv, rps->min_freq));
29ecd78d 1236 seq_printf(m, "Boost freq: %d MHz\n",
562d9bae 1237 intel_gpu_freq(dev_priv, rps->boost_freq));
d86ed34a 1238 seq_printf(m, "Max freq: %d MHz\n",
562d9bae 1239 intel_gpu_freq(dev_priv, rps->max_freq));
d86ed34a
CW
1240 seq_printf(m,
1241 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1242 intel_gpu_freq(dev_priv, rps->efficient_freq));
3b8d8d91 1243 } else {
267f0c90 1244 seq_puts(m, "no P-state info available\n");
3b8d8d91 1245 }
f97108d1 1246
49cd97a3 1247 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1170f28c
MK
1248 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1249 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1250
a037121c 1251 intel_runtime_pm_put(dev_priv, wakeref);
c8c8fb33 1252 return ret;
f97108d1
JB
1253}
1254
d636951e
BW
1255static void i915_instdone_info(struct drm_i915_private *dev_priv,
1256 struct seq_file *m,
1257 struct intel_instdone *instdone)
1258{
f9e61372
BW
1259 int slice;
1260 int subslice;
1261
d636951e
BW
1262 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1263 instdone->instdone);
1264
1265 if (INTEL_GEN(dev_priv) <= 3)
1266 return;
1267
1268 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1269 instdone->slice_common);
1270
1271 if (INTEL_GEN(dev_priv) <= 6)
1272 return;
1273
f9e61372
BW
1274 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1275 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1276 slice, subslice, instdone->sampler[slice][subslice]);
1277
1278 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1279 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1280 slice, subslice, instdone->row[slice][subslice]);
d636951e
BW
1281}
1282
f654449a
CW
1283static int i915_hangcheck_info(struct seq_file *m, void *unused)
1284{
36cdd013 1285 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 1286 struct intel_engine_cs *engine;
666796da
TU
1287 u64 acthd[I915_NUM_ENGINES];
1288 u32 seqno[I915_NUM_ENGINES];
d636951e 1289 struct intel_instdone instdone;
a037121c 1290 intel_wakeref_t wakeref;
c3232b18 1291 enum intel_engine_id id;
f654449a 1292
2caffbf1 1293 seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
8af29b0c 1294 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
2caffbf1 1295 seq_puts(m, "\tWedged\n");
8c185eca 1296 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
2caffbf1 1297 seq_puts(m, "\tDevice (global) reset in progress\n");
8af29b0c 1298
4f044a88 1299 if (!i915_modparams.enable_hangcheck) {
8c185eca 1300 seq_puts(m, "Hangcheck disabled\n");
f654449a
CW
1301 return 0;
1302 }
1303
d4225a53
CW
1304 with_intel_runtime_pm(dev_priv, wakeref) {
1305 for_each_engine(engine, dev_priv, id) {
1306 acthd[id] = intel_engine_get_active_head(engine);
89531e7d 1307 seqno[id] = intel_engine_get_hangcheck_seqno(engine);
d4225a53 1308 }
ebbc7546 1309
8a68d464 1310 intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
ebbc7546
MK
1311 }
1312
8352aea3
CW
1313 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1314 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
f654449a
CW
1315 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1316 jiffies));
8352aea3
CW
1317 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1318 seq_puts(m, "Hangcheck active, work pending\n");
1319 else
1320 seq_puts(m, "Hangcheck inactive\n");
f654449a 1321
f73b5674
CW
1322 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1323
3b3f1650 1324 for_each_engine(engine, dev_priv, id) {
e2f80391 1325 seq_printf(m, "%s:\n", engine->name);
eb8d0f5a 1326 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
89531e7d
CW
1327 engine->hangcheck.last_seqno,
1328 seqno[id],
1329 engine->hangcheck.next_seqno,
eb8d0f5a
CW
1330 jiffies_to_msecs(jiffies -
1331 engine->hangcheck.action_timestamp));
3fe3b030 1332
f654449a 1333 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
e2f80391 1334 (long long)engine->hangcheck.acthd,
c3232b18 1335 (long long)acthd[id]);
61642ff0 1336
8a68d464 1337 if (engine->id == RCS0) {
d636951e 1338 seq_puts(m, "\tinstdone read =\n");
61642ff0 1339
d636951e 1340 i915_instdone_info(dev_priv, m, &instdone);
61642ff0 1341
d636951e 1342 seq_puts(m, "\tinstdone accu =\n");
61642ff0 1343
d636951e
BW
1344 i915_instdone_info(dev_priv, m,
1345 &engine->hangcheck.instdone);
61642ff0 1346 }
f654449a
CW
1347 }
1348
1349 return 0;
1350}
1351
061d06a2
MT
1352static int i915_reset_info(struct seq_file *m, void *unused)
1353{
1354 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1355 struct i915_gpu_error *error = &dev_priv->gpu_error;
1356 struct intel_engine_cs *engine;
1357 enum intel_engine_id id;
1358
1359 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1360
1361 for_each_engine(engine, dev_priv, id) {
1362 seq_printf(m, "%s = %u\n", engine->name,
1363 i915_reset_engine_count(error, engine));
1364 }
1365
1366 return 0;
1367}
1368
4d85529d 1369static int ironlake_drpc_info(struct seq_file *m)
f97108d1 1370{
36cdd013 1371 struct drm_i915_private *dev_priv = node_to_i915(m->private);
616fdb5a
BW
1372 u32 rgvmodectl, rstdbyctl;
1373 u16 crstandvid;
616fdb5a 1374
616fdb5a
BW
1375 rgvmodectl = I915_READ(MEMMODECTL);
1376 rstdbyctl = I915_READ(RSTDBYCTL);
1377 crstandvid = I915_READ16(CRSTANDVID);
1378
742f491d 1379 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
f97108d1
JB
1380 seq_printf(m, "Boost freq: %d\n",
1381 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1382 MEMMODE_BOOST_FREQ_SHIFT);
1383 seq_printf(m, "HW control enabled: %s\n",
742f491d 1384 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
f97108d1 1385 seq_printf(m, "SW control enabled: %s\n",
742f491d 1386 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
f97108d1 1387 seq_printf(m, "Gated voltage change: %s\n",
742f491d 1388 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
f97108d1
JB
1389 seq_printf(m, "Starting frequency: P%d\n",
1390 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
7648fa99 1391 seq_printf(m, "Max P-state: P%d\n",
f97108d1 1392 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
7648fa99
JB
1393 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1394 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1395 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1396 seq_printf(m, "Render standby enabled: %s\n",
742f491d 1397 yesno(!(rstdbyctl & RCX_SW_EXIT)));
267f0c90 1398 seq_puts(m, "Current RS state: ");
88271da3
JB
1399 switch (rstdbyctl & RSX_STATUS_MASK) {
1400 case RSX_STATUS_ON:
267f0c90 1401 seq_puts(m, "on\n");
88271da3
JB
1402 break;
1403 case RSX_STATUS_RC1:
267f0c90 1404 seq_puts(m, "RC1\n");
88271da3
JB
1405 break;
1406 case RSX_STATUS_RC1E:
267f0c90 1407 seq_puts(m, "RC1E\n");
88271da3
JB
1408 break;
1409 case RSX_STATUS_RS1:
267f0c90 1410 seq_puts(m, "RS1\n");
88271da3
JB
1411 break;
1412 case RSX_STATUS_RS2:
267f0c90 1413 seq_puts(m, "RS2 (RC6)\n");
88271da3
JB
1414 break;
1415 case RSX_STATUS_RS3:
267f0c90 1416 seq_puts(m, "RC3 (RC6+)\n");
88271da3
JB
1417 break;
1418 default:
267f0c90 1419 seq_puts(m, "unknown\n");
88271da3
JB
1420 break;
1421 }
f97108d1
JB
1422
1423 return 0;
1424}
1425
f65367b5 1426static int i915_forcewake_domains(struct seq_file *m, void *data)
669ab5aa 1427{
233ebf57 1428 struct drm_i915_private *i915 = node_to_i915(m->private);
f568eeee 1429 struct intel_uncore *uncore = &i915->uncore;
b2cff0db 1430 struct intel_uncore_forcewake_domain *fw_domain;
d2dc94bc 1431 unsigned int tmp;
b2cff0db 1432
d7a133d8 1433 seq_printf(m, "user.bypass_count = %u\n",
f568eeee 1434 uncore->user_forcewake.count);
d7a133d8 1435
f568eeee 1436 for_each_fw_domain(fw_domain, uncore, tmp)
b2cff0db 1437 seq_printf(m, "%s.wake_count = %u\n",
33c582c1 1438 intel_uncore_forcewake_domain_to_str(fw_domain->id),
233ebf57 1439 READ_ONCE(fw_domain->wake_count));
669ab5aa 1440
b2cff0db
CW
1441 return 0;
1442}
1443
1362877e
MK
1444static void print_rc6_res(struct seq_file *m,
1445 const char *title,
1446 const i915_reg_t reg)
1447{
1448 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1449
1450 seq_printf(m, "%s %u (%llu us)\n",
1451 title, I915_READ(reg),
1452 intel_rc6_residency_us(dev_priv, reg));
1453}
1454
b2cff0db
CW
1455static int vlv_drpc_info(struct seq_file *m)
1456{
36cdd013 1457 struct drm_i915_private *dev_priv = node_to_i915(m->private);
0d6fc92a 1458 u32 rcctl1, pw_status;
669ab5aa 1459
6b312cd3 1460 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
669ab5aa
D
1461 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1462
669ab5aa
D
1463 seq_printf(m, "RC6 Enabled: %s\n",
1464 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1465 GEN6_RC_CTL_EI_MODE(1))));
1466 seq_printf(m, "Render Power Well: %s\n",
6b312cd3 1467 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1468 seq_printf(m, "Media Power Well: %s\n",
6b312cd3 1469 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1470
1362877e
MK
1471 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1472 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
9cc19be5 1473
f65367b5 1474 return i915_forcewake_domains(m, NULL);
669ab5aa
D
1475}
1476
4d85529d
BW
1477static int gen6_drpc_info(struct seq_file *m)
1478{
36cdd013 1479 struct drm_i915_private *dev_priv = node_to_i915(m->private);
960e5465 1480 u32 gt_core_status, rcctl1, rc6vids = 0;
f2dd7578 1481 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
4d85529d 1482
75aa3f63 1483 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
ed71f1b4 1484 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
4d85529d 1485
4d85529d 1486 rcctl1 = I915_READ(GEN6_RC_CONTROL);
36cdd013 1487 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1488 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1489 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1490 }
cf632bd6 1491
ebb5eb7d 1492 if (INTEL_GEN(dev_priv) <= 7)
51cc9ade
ID
1493 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1494 &rc6vids);
4d85529d 1495
fff24e21 1496 seq_printf(m, "RC1e Enabled: %s\n",
4d85529d
BW
1497 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1498 seq_printf(m, "RC6 Enabled: %s\n",
1499 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
36cdd013 1500 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1501 seq_printf(m, "Render Well Gating Enabled: %s\n",
1502 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1503 seq_printf(m, "Media Well Gating Enabled: %s\n",
1504 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1505 }
4d85529d
BW
1506 seq_printf(m, "Deep RC6 Enabled: %s\n",
1507 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1508 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1509 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
267f0c90 1510 seq_puts(m, "Current RC state: ");
4d85529d
BW
1511 switch (gt_core_status & GEN6_RCn_MASK) {
1512 case GEN6_RC0:
1513 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
267f0c90 1514 seq_puts(m, "Core Power Down\n");
4d85529d 1515 else
267f0c90 1516 seq_puts(m, "on\n");
4d85529d
BW
1517 break;
1518 case GEN6_RC3:
267f0c90 1519 seq_puts(m, "RC3\n");
4d85529d
BW
1520 break;
1521 case GEN6_RC6:
267f0c90 1522 seq_puts(m, "RC6\n");
4d85529d
BW
1523 break;
1524 case GEN6_RC7:
267f0c90 1525 seq_puts(m, "RC7\n");
4d85529d
BW
1526 break;
1527 default:
267f0c90 1528 seq_puts(m, "Unknown\n");
4d85529d
BW
1529 break;
1530 }
1531
1532 seq_printf(m, "Core Power Down: %s\n",
1533 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
36cdd013 1534 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1535 seq_printf(m, "Render Power Well: %s\n",
1536 (gen9_powergate_status &
1537 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1538 seq_printf(m, "Media Power Well: %s\n",
1539 (gen9_powergate_status &
1540 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1541 }
cce66a28
BW
1542
1543 /* Not exactly sure what this is */
1362877e
MK
1544 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1545 GEN6_GT_GFX_RC6_LOCKED);
1546 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1547 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1548 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
cce66a28 1549
51cc9ade
ID
1550 if (INTEL_GEN(dev_priv) <= 7) {
1551 seq_printf(m, "RC6 voltage: %dmV\n",
1552 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1553 seq_printf(m, "RC6+ voltage: %dmV\n",
1554 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1555 seq_printf(m, "RC6++ voltage: %dmV\n",
1556 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1557 }
1558
f2dd7578 1559 return i915_forcewake_domains(m, NULL);
4d85529d
BW
1560}
1561
1562static int i915_drpc_info(struct seq_file *m, void *unused)
1563{
36cdd013 1564 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 1565 intel_wakeref_t wakeref;
d4225a53 1566 int err = -ENODEV;
cf632bd6 1567
d4225a53
CW
1568 with_intel_runtime_pm(dev_priv, wakeref) {
1569 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1570 err = vlv_drpc_info(m);
1571 else if (INTEL_GEN(dev_priv) >= 6)
1572 err = gen6_drpc_info(m);
1573 else
1574 err = ironlake_drpc_info(m);
1575 }
cf632bd6
CW
1576
1577 return err;
4d85529d
BW
1578}
1579
9a851789
DV
1580static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1581{
36cdd013 1582 struct drm_i915_private *dev_priv = node_to_i915(m->private);
9a851789
DV
1583
1584 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1585 dev_priv->fb_tracking.busy_bits);
1586
1587 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1588 dev_priv->fb_tracking.flip_bits);
1589
1590 return 0;
1591}
1592
b5e50c3f
JB
1593static int i915_fbc_status(struct seq_file *m, void *unused)
1594{
36cdd013 1595 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3138872c 1596 struct intel_fbc *fbc = &dev_priv->fbc;
a037121c 1597 intel_wakeref_t wakeref;
b5e50c3f 1598
ab309a6a
MW
1599 if (!HAS_FBC(dev_priv))
1600 return -ENODEV;
b5e50c3f 1601
a037121c 1602 wakeref = intel_runtime_pm_get(dev_priv);
3138872c 1603 mutex_lock(&fbc->lock);
36623ef8 1604
0e631adc 1605 if (intel_fbc_is_active(dev_priv))
267f0c90 1606 seq_puts(m, "FBC enabled\n");
2e8144a5 1607 else
3138872c
CW
1608 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1609
3fd5d1ec
VS
1610 if (intel_fbc_is_active(dev_priv)) {
1611 u32 mask;
1612
1613 if (INTEL_GEN(dev_priv) >= 8)
1614 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1615 else if (INTEL_GEN(dev_priv) >= 7)
1616 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1617 else if (INTEL_GEN(dev_priv) >= 5)
1618 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1619 else if (IS_G4X(dev_priv))
1620 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1621 else
1622 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1623 FBC_STAT_COMPRESSED);
1624
1625 seq_printf(m, "Compressing: %s\n", yesno(mask));
0fc6a9dc 1626 }
31b9df10 1627
3138872c 1628 mutex_unlock(&fbc->lock);
a037121c 1629 intel_runtime_pm_put(dev_priv, wakeref);
36623ef8 1630
b5e50c3f
JB
1631 return 0;
1632}
1633
4127dc43 1634static int i915_fbc_false_color_get(void *data, u64 *val)
da46f936 1635{
36cdd013 1636 struct drm_i915_private *dev_priv = data;
da46f936 1637
36cdd013 1638 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1639 return -ENODEV;
1640
da46f936 1641 *val = dev_priv->fbc.false_color;
da46f936
RV
1642
1643 return 0;
1644}
1645
4127dc43 1646static int i915_fbc_false_color_set(void *data, u64 val)
da46f936 1647{
36cdd013 1648 struct drm_i915_private *dev_priv = data;
da46f936
RV
1649 u32 reg;
1650
36cdd013 1651 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1652 return -ENODEV;
1653
25ad93fd 1654 mutex_lock(&dev_priv->fbc.lock);
da46f936
RV
1655
1656 reg = I915_READ(ILK_DPFC_CONTROL);
1657 dev_priv->fbc.false_color = val;
1658
1659 I915_WRITE(ILK_DPFC_CONTROL, val ?
1660 (reg | FBC_CTL_FALSE_COLOR) :
1661 (reg & ~FBC_CTL_FALSE_COLOR));
1662
25ad93fd 1663 mutex_unlock(&dev_priv->fbc.lock);
da46f936
RV
1664 return 0;
1665}
1666
4127dc43
VS
1667DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1668 i915_fbc_false_color_get, i915_fbc_false_color_set,
da46f936
RV
1669 "%llu\n");
1670
92d44621
PZ
1671static int i915_ips_status(struct seq_file *m, void *unused)
1672{
36cdd013 1673 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 1674 intel_wakeref_t wakeref;
92d44621 1675
ab309a6a
MW
1676 if (!HAS_IPS(dev_priv))
1677 return -ENODEV;
92d44621 1678
a037121c 1679 wakeref = intel_runtime_pm_get(dev_priv);
36623ef8 1680
0eaa53f0 1681 seq_printf(m, "Enabled by kernel parameter: %s\n",
4f044a88 1682 yesno(i915_modparams.enable_ips));
0eaa53f0 1683
36cdd013 1684 if (INTEL_GEN(dev_priv) >= 8) {
0eaa53f0
RV
1685 seq_puts(m, "Currently: unknown\n");
1686 } else {
1687 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1688 seq_puts(m, "Currently: enabled\n");
1689 else
1690 seq_puts(m, "Currently: disabled\n");
1691 }
92d44621 1692
a037121c 1693 intel_runtime_pm_put(dev_priv, wakeref);
36623ef8 1694
92d44621
PZ
1695 return 0;
1696}
1697
4a9bef37
JB
1698static int i915_sr_status(struct seq_file *m, void *unused)
1699{
36cdd013 1700 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 1701 intel_wakeref_t wakeref;
4a9bef37
JB
1702 bool sr_enabled = false;
1703
0e6e0be4 1704 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
36623ef8 1705
7342a72c
CW
1706 if (INTEL_GEN(dev_priv) >= 9)
1707 /* no global SR status; inspect per-plane WM */;
1708 else if (HAS_PCH_SPLIT(dev_priv))
5ba2aaaa 1709 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
c0f86832 1710 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
36cdd013 1711 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
4a9bef37 1712 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
36cdd013 1713 else if (IS_I915GM(dev_priv))
4a9bef37 1714 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
36cdd013 1715 else if (IS_PINEVIEW(dev_priv))
4a9bef37 1716 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
36cdd013 1717 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
77b64555 1718 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4a9bef37 1719
0e6e0be4 1720 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
36623ef8 1721
08c4d7fc 1722 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
4a9bef37
JB
1723
1724 return 0;
1725}
1726
7648fa99
JB
1727static int i915_emon_status(struct seq_file *m, void *unused)
1728{
4a8ab5ea 1729 struct drm_i915_private *i915 = node_to_i915(m->private);
a037121c 1730 intel_wakeref_t wakeref;
de227ef0 1731
4a8ab5ea 1732 if (!IS_GEN(i915, 5))
582be6b4
CW
1733 return -ENODEV;
1734
4a8ab5ea
CW
1735 with_intel_runtime_pm(i915, wakeref) {
1736 unsigned long temp, chipset, gfx;
7648fa99 1737
4a8ab5ea
CW
1738 temp = i915_mch_val(i915);
1739 chipset = i915_chipset_val(i915);
1740 gfx = i915_gfx_val(i915);
7648fa99 1741
4a8ab5ea
CW
1742 seq_printf(m, "GMCH temp: %ld\n", temp);
1743 seq_printf(m, "Chipset power: %ld\n", chipset);
1744 seq_printf(m, "GFX power: %ld\n", gfx);
1745 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1746 }
7648fa99
JB
1747
1748 return 0;
1749}
1750
23b2f8bb
JB
1751static int i915_ring_freq_table(struct seq_file *m, void *unused)
1752{
36cdd013 1753 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1754 struct intel_rps *rps = &dev_priv->gt_pm.rps;
f936ec34 1755 unsigned int max_gpu_freq, min_gpu_freq;
a037121c 1756 intel_wakeref_t wakeref;
d586b5f4 1757 int gpu_freq, ia_freq;
23b2f8bb 1758
ab309a6a
MW
1759 if (!HAS_LLC(dev_priv))
1760 return -ENODEV;
23b2f8bb 1761
d586b5f4
CW
1762 min_gpu_freq = rps->min_freq;
1763 max_gpu_freq = rps->max_freq;
2b2874ef 1764 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
f936ec34 1765 /* Convert GT frequency to 50 HZ units */
d586b5f4
CW
1766 min_gpu_freq /= GEN9_FREQ_SCALER;
1767 max_gpu_freq /= GEN9_FREQ_SCALER;
f936ec34
AG
1768 }
1769
267f0c90 1770 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
23b2f8bb 1771
ebb5eb7d 1772 wakeref = intel_runtime_pm_get(dev_priv);
f936ec34 1773 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
42c0526c
BW
1774 ia_freq = gpu_freq;
1775 sandybridge_pcode_read(dev_priv,
1776 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1777 &ia_freq);
3ebecd07 1778 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
f936ec34 1779 intel_gpu_freq(dev_priv, (gpu_freq *
35ceabf3 1780 (IS_GEN9_BC(dev_priv) ||
2b2874ef 1781 INTEL_GEN(dev_priv) >= 10 ?
b976dc53 1782 GEN9_FREQ_SCALER : 1))),
3ebecd07
CW
1783 ((ia_freq >> 0) & 0xff) * 100,
1784 ((ia_freq >> 8) & 0xff) * 100);
23b2f8bb 1785 }
a037121c 1786 intel_runtime_pm_put(dev_priv, wakeref);
ebb5eb7d
CW
1787
1788 return 0;
23b2f8bb
JB
1789}
1790
44834a67
CW
1791static int i915_opregion(struct seq_file *m, void *unused)
1792{
36cdd013
DW
1793 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1794 struct drm_device *dev = &dev_priv->drm;
44834a67
CW
1795 struct intel_opregion *opregion = &dev_priv->opregion;
1796 int ret;
1797
1798 ret = mutex_lock_interruptible(&dev->struct_mutex);
1799 if (ret)
0d38f009 1800 goto out;
44834a67 1801
2455a8e4
JN
1802 if (opregion->header)
1803 seq_write(m, opregion->header, OPREGION_SIZE);
44834a67
CW
1804
1805 mutex_unlock(&dev->struct_mutex);
1806
0d38f009 1807out:
44834a67
CW
1808 return 0;
1809}
1810
ada8f955
JN
1811static int i915_vbt(struct seq_file *m, void *unused)
1812{
36cdd013 1813 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
ada8f955
JN
1814
1815 if (opregion->vbt)
1816 seq_write(m, opregion->vbt, opregion->vbt_size);
1817
1818 return 0;
1819}
1820
37811fcc
CW
1821static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1822{
36cdd013
DW
1823 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1824 struct drm_device *dev = &dev_priv->drm;
b13b8402 1825 struct intel_framebuffer *fbdev_fb = NULL;
3a58ee10 1826 struct drm_framebuffer *drm_fb;
188c1ab7
CW
1827 int ret;
1828
1829 ret = mutex_lock_interruptible(&dev->struct_mutex);
1830 if (ret)
1831 return ret;
37811fcc 1832
0695726e 1833#ifdef CONFIG_DRM_FBDEV_EMULATION
346fb4e0 1834 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
36cdd013 1835 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
25bcce94
CW
1836
1837 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1838 fbdev_fb->base.width,
1839 fbdev_fb->base.height,
b00c600e 1840 fbdev_fb->base.format->depth,
272725c7 1841 fbdev_fb->base.format->cpp[0] * 8,
bae781b2 1842 fbdev_fb->base.modifier,
25bcce94 1843 drm_framebuffer_read_refcount(&fbdev_fb->base));
a5ff7a45 1844 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
25bcce94
CW
1845 seq_putc(m, '\n');
1846 }
4520f53a 1847#endif
37811fcc 1848
4b096ac1 1849 mutex_lock(&dev->mode_config.fb_lock);
3a58ee10 1850 drm_for_each_fb(drm_fb, dev) {
b13b8402
NS
1851 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1852 if (fb == fbdev_fb)
37811fcc
CW
1853 continue;
1854
c1ca506d 1855 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
37811fcc
CW
1856 fb->base.width,
1857 fb->base.height,
b00c600e 1858 fb->base.format->depth,
272725c7 1859 fb->base.format->cpp[0] * 8,
bae781b2 1860 fb->base.modifier,
747a598f 1861 drm_framebuffer_read_refcount(&fb->base));
a5ff7a45 1862 describe_obj(m, intel_fb_obj(&fb->base));
267f0c90 1863 seq_putc(m, '\n');
37811fcc 1864 }
4b096ac1 1865 mutex_unlock(&dev->mode_config.fb_lock);
188c1ab7 1866 mutex_unlock(&dev->struct_mutex);
37811fcc
CW
1867
1868 return 0;
1869}
1870
7e37f889 1871static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
c9fe99bd 1872{
ef5032a0
CW
1873 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1874 ring->space, ring->head, ring->tail, ring->emit);
c9fe99bd
OM
1875}
1876
e76d3630
BW
1877static int i915_context_status(struct seq_file *m, void *unused)
1878{
36cdd013
DW
1879 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1880 struct drm_device *dev = &dev_priv->drm;
e2efd130 1881 struct i915_gem_context *ctx;
c3232b18 1882 int ret;
e76d3630 1883
f3d28878 1884 ret = mutex_lock_interruptible(&dev->struct_mutex);
e76d3630
BW
1885 if (ret)
1886 return ret;
1887
829a0af2 1888 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
02684446 1889 struct i915_gem_engines_iter it;
7e3d9a59
CW
1890 struct intel_context *ce;
1891
288f1ced
CW
1892 seq_puts(m, "HW context ");
1893 if (!list_empty(&ctx->hw_id_link))
1894 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1895 atomic_read(&ctx->hw_id_pin_count));
c84455b4 1896 if (ctx->pid) {
d28b99ab
CW
1897 struct task_struct *task;
1898
c84455b4 1899 task = get_pid_task(ctx->pid, PIDTYPE_PID);
d28b99ab
CW
1900 if (task) {
1901 seq_printf(m, "(%s [%d]) ",
1902 task->comm, task->pid);
1903 put_task_struct(task);
1904 }
c84455b4
CW
1905 } else if (IS_ERR(ctx->file_priv)) {
1906 seq_puts(m, "(deleted) ");
d28b99ab
CW
1907 } else {
1908 seq_puts(m, "(kernel) ");
1909 }
1910
bca44d80
CW
1911 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1912 seq_putc(m, '\n');
c9fe99bd 1913
02684446
CW
1914 for_each_gem_engine(ce,
1915 i915_gem_context_lock_engines(ctx), it) {
7e3d9a59 1916 seq_printf(m, "%s: ", ce->engine->name);
bca44d80 1917 if (ce->state)
bf3783e5 1918 describe_obj(m, ce->state->obj);
dca33ecc 1919 if (ce->ring)
7e37f889 1920 describe_ctx_ring(m, ce->ring);
c9fe99bd 1921 seq_putc(m, '\n');
c9fe99bd 1922 }
02684446 1923 i915_gem_context_unlock_engines(ctx);
a33afea5 1924
a33afea5 1925 seq_putc(m, '\n');
a168c293
BW
1926 }
1927
f3d28878 1928 mutex_unlock(&dev->struct_mutex);
e76d3630
BW
1929
1930 return 0;
1931}
1932
ea16a3cd
DV
1933static const char *swizzle_string(unsigned swizzle)
1934{
aee56cff 1935 switch (swizzle) {
ea16a3cd
DV
1936 case I915_BIT_6_SWIZZLE_NONE:
1937 return "none";
1938 case I915_BIT_6_SWIZZLE_9:
1939 return "bit9";
1940 case I915_BIT_6_SWIZZLE_9_10:
1941 return "bit9/bit10";
1942 case I915_BIT_6_SWIZZLE_9_11:
1943 return "bit9/bit11";
1944 case I915_BIT_6_SWIZZLE_9_10_11:
1945 return "bit9/bit10/bit11";
1946 case I915_BIT_6_SWIZZLE_9_17:
1947 return "bit9/bit17";
1948 case I915_BIT_6_SWIZZLE_9_10_17:
1949 return "bit9/bit10/bit17";
1950 case I915_BIT_6_SWIZZLE_UNKNOWN:
8a168ca7 1951 return "unknown";
ea16a3cd
DV
1952 }
1953
1954 return "bug";
1955}
1956
1957static int i915_swizzle_info(struct seq_file *m, void *data)
1958{
36cdd013 1959 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 1960 intel_wakeref_t wakeref;
22bcfc6a 1961
a037121c 1962 wakeref = intel_runtime_pm_get(dev_priv);
ea16a3cd 1963
ea16a3cd
DV
1964 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1965 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1966 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1967 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1968
f3ce44a0 1969 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
ea16a3cd
DV
1970 seq_printf(m, "DDC = 0x%08x\n",
1971 I915_READ(DCC));
656bfa3a
DV
1972 seq_printf(m, "DDC2 = 0x%08x\n",
1973 I915_READ(DCC2));
ea16a3cd
DV
1974 seq_printf(m, "C0DRB3 = 0x%04x\n",
1975 I915_READ16(C0DRB3));
1976 seq_printf(m, "C1DRB3 = 0x%04x\n",
1977 I915_READ16(C1DRB3));
36cdd013 1978 } else if (INTEL_GEN(dev_priv) >= 6) {
3fa7d235
DV
1979 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1980 I915_READ(MAD_DIMM_C0));
1981 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1982 I915_READ(MAD_DIMM_C1));
1983 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1984 I915_READ(MAD_DIMM_C2));
1985 seq_printf(m, "TILECTL = 0x%08x\n",
1986 I915_READ(TILECTL));
36cdd013 1987 if (INTEL_GEN(dev_priv) >= 8)
9d3203e1
BW
1988 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1989 I915_READ(GAMTARBMODE));
1990 else
1991 seq_printf(m, "ARB_MODE = 0x%08x\n",
1992 I915_READ(ARB_MODE));
3fa7d235
DV
1993 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1994 I915_READ(DISP_ARB_CTL));
ea16a3cd 1995 }
656bfa3a
DV
1996
1997 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1998 seq_puts(m, "L-shaped memory detected\n");
1999
a037121c 2000 intel_runtime_pm_put(dev_priv, wakeref);
ea16a3cd
DV
2001
2002 return 0;
2003}
2004
7466c291
CW
2005static const char *rps_power_to_str(unsigned int power)
2006{
2007 static const char * const strings[] = {
2008 [LOW_POWER] = "low power",
2009 [BETWEEN] = "mixed",
2010 [HIGH_POWER] = "high power",
2011 };
2012
2013 if (power >= ARRAY_SIZE(strings) || !strings[power])
2014 return "unknown";
2015
2016 return strings[power];
2017}
2018
1854d5ca
CW
2019static int i915_rps_boost_info(struct seq_file *m, void *data)
2020{
36cdd013 2021 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 2022 struct intel_rps *rps = &dev_priv->gt_pm.rps;
c0a6aa7e 2023 u32 act_freq = rps->cur_freq;
a037121c 2024 intel_wakeref_t wakeref;
1854d5ca 2025
d4225a53 2026 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
c0a6aa7e 2027 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
337fa6e0 2028 vlv_punit_get(dev_priv);
c0a6aa7e
CW
2029 act_freq = vlv_punit_read(dev_priv,
2030 PUNIT_REG_GPU_FREQ_STS);
337fa6e0 2031 vlv_punit_put(dev_priv);
c0a6aa7e 2032 act_freq = (act_freq >> 8) & 0xff;
c0a6aa7e
CW
2033 } else {
2034 act_freq = intel_get_cagf(dev_priv,
2035 I915_READ(GEN6_RPSTAT1));
2036 }
c0a6aa7e
CW
2037 }
2038
562d9bae 2039 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
79ffac85 2040 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
7b92c1bd 2041 seq_printf(m, "Boosts outstanding? %d\n",
562d9bae 2042 atomic_read(&rps->num_waiters));
60548c55 2043 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
c0a6aa7e
CW
2044 seq_printf(m, "Frequency requested %d, actual %d\n",
2045 intel_gpu_freq(dev_priv, rps->cur_freq),
2046 intel_gpu_freq(dev_priv, act_freq));
7466c291 2047 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
562d9bae
SAK
2048 intel_gpu_freq(dev_priv, rps->min_freq),
2049 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2050 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2051 intel_gpu_freq(dev_priv, rps->max_freq));
7466c291 2052 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
562d9bae
SAK
2053 intel_gpu_freq(dev_priv, rps->idle_freq),
2054 intel_gpu_freq(dev_priv, rps->efficient_freq),
2055 intel_gpu_freq(dev_priv, rps->boost_freq));
1d2ac403 2056
62eb3c24 2057 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1854d5ca 2058
79ffac85 2059 if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
7466c291
CW
2060 u32 rpup, rpupei;
2061 u32 rpdown, rpdownei;
2062
3ceea6a1 2063 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
7466c291
CW
2064 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2065 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2066 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2067 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
3ceea6a1 2068 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
7466c291
CW
2069
2070 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
60548c55 2071 rps_power_to_str(rps->power.mode));
7466c291 2072 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
23f4a287 2073 rpup && rpupei ? 100 * rpup / rpupei : 0,
60548c55 2074 rps->power.up_threshold);
7466c291 2075 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
23f4a287 2076 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
60548c55 2077 rps->power.down_threshold);
7466c291
CW
2078 } else {
2079 seq_puts(m, "\nRPS Autotuning inactive\n");
2080 }
2081
8d3afd7d 2082 return 0;
1854d5ca
CW
2083}
2084
63573eb7
BW
2085static int i915_llc(struct seq_file *m, void *data)
2086{
36cdd013 2087 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3accaf7e 2088 const bool edram = INTEL_GEN(dev_priv) > 8;
63573eb7 2089
36cdd013 2090 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
f6ac993f
DCS
2091 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
2092 dev_priv->edram_size_mb);
63573eb7
BW
2093
2094 return 0;
2095}
2096
0509ead1
AS
2097static int i915_huc_load_status_info(struct seq_file *m, void *data)
2098{
2099 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 2100 intel_wakeref_t wakeref;
56ffc742 2101 struct drm_printer p;
0509ead1 2102
ab309a6a
MW
2103 if (!HAS_HUC(dev_priv))
2104 return -ENODEV;
0509ead1 2105
56ffc742
MW
2106 p = drm_seq_file_printer(m);
2107 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
0509ead1 2108
d4225a53
CW
2109 with_intel_runtime_pm(dev_priv, wakeref)
2110 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
0509ead1
AS
2111
2112 return 0;
2113}
2114
fdf5d357
AD
2115static int i915_guc_load_status_info(struct seq_file *m, void *data)
2116{
36cdd013 2117 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 2118 intel_wakeref_t wakeref;
56ffc742 2119 struct drm_printer p;
fdf5d357 2120
ab309a6a
MW
2121 if (!HAS_GUC(dev_priv))
2122 return -ENODEV;
fdf5d357 2123
56ffc742
MW
2124 p = drm_seq_file_printer(m);
2125 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
fdf5d357 2126
d4225a53
CW
2127 with_intel_runtime_pm(dev_priv, wakeref) {
2128 u32 tmp = I915_READ(GUC_STATUS);
2129 u32 i;
2130
2131 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2132 seq_printf(m, "\tBootrom status = 0x%x\n",
2133 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2134 seq_printf(m, "\tuKernel status = 0x%x\n",
2135 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2136 seq_printf(m, "\tMIA Core status = 0x%x\n",
2137 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2138 seq_puts(m, "\nScratch registers:\n");
2139 for (i = 0; i < 16; i++) {
2140 seq_printf(m, "\t%2d: \t0x%x\n",
2141 i, I915_READ(SOFT_SCRATCH(i)));
2142 }
2143 }
3582ad13 2144
fdf5d357
AD
2145 return 0;
2146}
2147
5e24e4a2
MW
2148static const char *
2149stringify_guc_log_type(enum guc_log_buffer_type type)
2150{
2151 switch (type) {
2152 case GUC_ISR_LOG_BUFFER:
2153 return "ISR";
2154 case GUC_DPC_LOG_BUFFER:
2155 return "DPC";
2156 case GUC_CRASH_DUMP_LOG_BUFFER:
2157 return "CRASH";
2158 default:
2159 MISSING_CASE(type);
2160 }
2161
2162 return "";
2163}
2164
5aa1ee4b
AG
2165static void i915_guc_log_info(struct seq_file *m,
2166 struct drm_i915_private *dev_priv)
2167{
5e24e4a2
MW
2168 struct intel_guc_log *log = &dev_priv->guc.log;
2169 enum guc_log_buffer_type type;
5aa1ee4b 2170
5e24e4a2
MW
2171 if (!intel_guc_log_relay_enabled(log)) {
2172 seq_puts(m, "GuC log relay disabled\n");
2173 return;
2174 }
5aa1ee4b 2175
5e24e4a2 2176 seq_puts(m, "GuC logging stats:\n");
5aa1ee4b 2177
6a96be24 2178 seq_printf(m, "\tRelay full count: %u\n",
5e24e4a2
MW
2179 log->relay.full_count);
2180
2181 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2182 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2183 stringify_guc_log_type(type),
2184 log->stats[type].flush,
2185 log->stats[type].sampled_overflow);
2186 }
5aa1ee4b
AG
2187}
2188
8b417c26
DG
2189static void i915_guc_client_info(struct seq_file *m,
2190 struct drm_i915_private *dev_priv,
5afc8b49 2191 struct intel_guc_client *client)
8b417c26 2192{
e2f80391 2193 struct intel_engine_cs *engine;
c18468c4 2194 enum intel_engine_id id;
e5315213 2195 u64 tot = 0;
8b417c26 2196
b09935a6
OM
2197 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2198 client->priority, client->stage_id, client->proc_desc_offset);
59db36cf
MW
2199 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2200 client->doorbell_id, client->doorbell_offset);
8b417c26 2201
3b3f1650 2202 for_each_engine(engine, dev_priv, id) {
c18468c4
DG
2203 u64 submissions = client->submissions[id];
2204 tot += submissions;
8b417c26 2205 seq_printf(m, "\tSubmissions: %llu %s\n",
c18468c4 2206 submissions, engine->name);
8b417c26
DG
2207 }
2208 seq_printf(m, "\tTotal: %llu\n", tot);
2209}
2210
a8b9370f
OM
2211static int i915_guc_info(struct seq_file *m, void *data)
2212{
2213 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2214 const struct intel_guc *guc = &dev_priv->guc;
a8b9370f 2215
db557993 2216 if (!USES_GUC(dev_priv))
ab309a6a
MW
2217 return -ENODEV;
2218
db557993
MW
2219 i915_guc_log_info(m, dev_priv);
2220
2221 if (!USES_GUC_SUBMISSION(dev_priv))
2222 return 0;
2223
ab309a6a 2224 GEM_BUG_ON(!guc->execbuf_client);
a8b9370f 2225
db557993 2226 seq_printf(m, "\nDoorbell map:\n");
abddffdf 2227 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
db557993 2228 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
9636f6db 2229
334636c6
CW
2230 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2231 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
e78c9175
CW
2232 if (guc->preempt_client) {
2233 seq_printf(m, "\nGuC preempt client @ %p:\n",
2234 guc->preempt_client);
2235 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2236 }
8b417c26
DG
2237
2238 /* Add more as required ... */
2239
2240 return 0;
2241}
2242
a8b9370f 2243static int i915_guc_stage_pool(struct seq_file *m, void *data)
4c7e77fc 2244{
36cdd013 2245 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a8b9370f
OM
2246 const struct intel_guc *guc = &dev_priv->guc;
2247 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
5afc8b49 2248 struct intel_guc_client *client = guc->execbuf_client;
3a891a62 2249 intel_engine_mask_t tmp;
a8b9370f 2250 int index;
4c7e77fc 2251
ab309a6a
MW
2252 if (!USES_GUC_SUBMISSION(dev_priv))
2253 return -ENODEV;
4c7e77fc 2254
a8b9370f
OM
2255 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2256 struct intel_engine_cs *engine;
2257
2258 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2259 continue;
2260
2261 seq_printf(m, "GuC stage descriptor %u:\n", index);
2262 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2263 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2264 seq_printf(m, "\tPriority: %d\n", desc->priority);
2265 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2266 seq_printf(m, "\tEngines used: 0x%x\n",
2267 desc->engines_used);
2268 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2269 desc->db_trigger_phy,
2270 desc->db_trigger_cpu,
2271 desc->db_trigger_uk);
2272 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2273 desc->process_desc);
9a09485d 2274 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
a8b9370f
OM
2275 desc->wq_addr, desc->wq_size);
2276 seq_putc(m, '\n');
2277
2278 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2279 u32 guc_engine_id = engine->guc_id;
2280 struct guc_execlist_context *lrc =
2281 &desc->lrc[guc_engine_id];
2282
2283 seq_printf(m, "\t%s LRC:\n", engine->name);
2284 seq_printf(m, "\t\tContext desc: 0x%x\n",
2285 lrc->context_desc);
2286 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2287 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2288 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2289 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2290 seq_putc(m, '\n');
2291 }
2292 }
2293
2294 return 0;
2295}
2296
4c7e77fc
AD
2297static int i915_guc_log_dump(struct seq_file *m, void *data)
2298{
ac58d2ab
DCS
2299 struct drm_info_node *node = m->private;
2300 struct drm_i915_private *dev_priv = node_to_i915(node);
2301 bool dump_load_err = !!node->info_ent->data;
2302 struct drm_i915_gem_object *obj = NULL;
2303 u32 *log;
2304 int i = 0;
4c7e77fc 2305
ab309a6a
MW
2306 if (!HAS_GUC(dev_priv))
2307 return -ENODEV;
2308
ac58d2ab
DCS
2309 if (dump_load_err)
2310 obj = dev_priv->guc.load_err_log;
2311 else if (dev_priv->guc.log.vma)
2312 obj = dev_priv->guc.log.vma->obj;
4c7e77fc 2313
ac58d2ab
DCS
2314 if (!obj)
2315 return 0;
4c7e77fc 2316
ac58d2ab
DCS
2317 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2318 if (IS_ERR(log)) {
2319 DRM_DEBUG("Failed to pin object\n");
2320 seq_puts(m, "(log data unaccessible)\n");
2321 return PTR_ERR(log);
4c7e77fc
AD
2322 }
2323
ac58d2ab
DCS
2324 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2325 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2326 *(log + i), *(log + i + 1),
2327 *(log + i + 2), *(log + i + 3));
2328
4c7e77fc
AD
2329 seq_putc(m, '\n');
2330
ac58d2ab
DCS
2331 i915_gem_object_unpin_map(obj);
2332
4c7e77fc
AD
2333 return 0;
2334}
2335
4977a287 2336static int i915_guc_log_level_get(void *data, u64 *val)
685534ef 2337{
bcc36d8a 2338 struct drm_i915_private *dev_priv = data;
685534ef 2339
86aa8247 2340 if (!USES_GUC(dev_priv))
ab309a6a
MW
2341 return -ENODEV;
2342
50935ac7 2343 *val = intel_guc_log_get_level(&dev_priv->guc.log);
685534ef
SAK
2344
2345 return 0;
2346}
2347
4977a287 2348static int i915_guc_log_level_set(void *data, u64 val)
685534ef 2349{
bcc36d8a 2350 struct drm_i915_private *dev_priv = data;
685534ef 2351
86aa8247 2352 if (!USES_GUC(dev_priv))
ab309a6a
MW
2353 return -ENODEV;
2354
50935ac7 2355 return intel_guc_log_set_level(&dev_priv->guc.log, val);
685534ef
SAK
2356}
2357
4977a287
MW
2358DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2359 i915_guc_log_level_get, i915_guc_log_level_set,
685534ef
SAK
2360 "%lld\n");
2361
4977a287
MW
2362static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2363{
2364 struct drm_i915_private *dev_priv = inode->i_private;
2365
2366 if (!USES_GUC(dev_priv))
2367 return -ENODEV;
2368
2369 file->private_data = &dev_priv->guc.log;
2370
2371 return intel_guc_log_relay_open(&dev_priv->guc.log);
2372}
2373
2374static ssize_t
2375i915_guc_log_relay_write(struct file *filp,
2376 const char __user *ubuf,
2377 size_t cnt,
2378 loff_t *ppos)
2379{
2380 struct intel_guc_log *log = filp->private_data;
2381
2382 intel_guc_log_relay_flush(log);
2383
2384 return cnt;
2385}
2386
2387static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2388{
2389 struct drm_i915_private *dev_priv = inode->i_private;
2390
2391 intel_guc_log_relay_close(&dev_priv->guc.log);
2392
2393 return 0;
2394}
2395
2396static const struct file_operations i915_guc_log_relay_fops = {
2397 .owner = THIS_MODULE,
2398 .open = i915_guc_log_relay_open,
2399 .write = i915_guc_log_relay_write,
2400 .release = i915_guc_log_relay_release,
2401};
2402
5b7b3086
DP
2403static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2404{
2405 u8 val;
2406 static const char * const sink_status[] = {
2407 "inactive",
2408 "transition to active, capture and display",
2409 "active, display from RFB",
2410 "active, capture and display on sink device timings",
2411 "transition to inactive, capture and display, timing re-sync",
2412 "reserved",
2413 "reserved",
2414 "sink internal error",
2415 };
2416 struct drm_connector *connector = m->private;
7a72c78b 2417 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5b7b3086
DP
2418 struct intel_dp *intel_dp =
2419 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
7a72c78b
RV
2420 int ret;
2421
2422 if (!CAN_PSR(dev_priv)) {
2423 seq_puts(m, "PSR Unsupported\n");
2424 return -ENODEV;
2425 }
5b7b3086
DP
2426
2427 if (connector->status != connector_status_connected)
2428 return -ENODEV;
2429
7a72c78b
RV
2430 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2431
2432 if (ret == 1) {
5b7b3086
DP
2433 const char *str = "unknown";
2434
2435 val &= DP_PSR_SINK_STATE_MASK;
2436 if (val < ARRAY_SIZE(sink_status))
2437 str = sink_status[val];
2438 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2439 } else {
7a72c78b 2440 return ret;
5b7b3086
DP
2441 }
2442
2443 return 0;
2444}
2445DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2446
00b06296
VN
2447static void
2448psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2449{
47c6cd54
JRS
2450 u32 val, status_val;
2451 const char *status = "unknown";
b86bef20 2452
00b06296
VN
2453 if (dev_priv->psr.psr2_enabled) {
2454 static const char * const live_status[] = {
2455 "IDLE",
2456 "CAPTURE",
2457 "CAPTURE_FS",
2458 "SLEEP",
2459 "BUFON_FW",
2460 "ML_UP",
2461 "SU_STANDBY",
2462 "FAST_SLEEP",
2463 "DEEP_SLEEP",
2464 "BUF_ON",
2465 "TG_ON"
2466 };
47c6cd54
JRS
2467 val = I915_READ(EDP_PSR2_STATUS);
2468 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2469 EDP_PSR2_STATUS_STATE_SHIFT;
2470 if (status_val < ARRAY_SIZE(live_status))
2471 status = live_status[status_val];
00b06296
VN
2472 } else {
2473 static const char * const live_status[] = {
2474 "IDLE",
2475 "SRDONACK",
2476 "SRDENT",
2477 "BUFOFF",
2478 "BUFON",
2479 "AUXACK",
2480 "SRDOFFACK",
2481 "SRDENT_ON",
2482 };
47c6cd54
JRS
2483 val = I915_READ(EDP_PSR_STATUS);
2484 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2485 EDP_PSR_STATUS_STATE_SHIFT;
2486 if (status_val < ARRAY_SIZE(live_status))
2487 status = live_status[status_val];
00b06296 2488 }
b86bef20 2489
47c6cd54 2490 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
b86bef20
CW
2491}
2492
e91fd8c6
RV
2493static int i915_edp_psr_status(struct seq_file *m, void *data)
2494{
36cdd013 2495 struct drm_i915_private *dev_priv = node_to_i915(m->private);
47c6cd54 2496 struct i915_psr *psr = &dev_priv->psr;
a037121c 2497 intel_wakeref_t wakeref;
47c6cd54
JRS
2498 const char *status;
2499 bool enabled;
2500 u32 val;
e91fd8c6 2501
ab309a6a
MW
2502 if (!HAS_PSR(dev_priv))
2503 return -ENODEV;
3553a8ea 2504
47c6cd54
JRS
2505 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2506 if (psr->dp)
2507 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2508 seq_puts(m, "\n");
2509
2510 if (!psr->sink_support)
c9ef291a
DP
2511 return 0;
2512
a037121c 2513 wakeref = intel_runtime_pm_get(dev_priv);
47c6cd54 2514 mutex_lock(&psr->lock);
c8c8fb33 2515
47c6cd54
JRS
2516 if (psr->enabled)
2517 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
ce3508fd 2518 else
47c6cd54
JRS
2519 status = "disabled";
2520 seq_printf(m, "PSR mode: %s\n", status);
60e5ffe3 2521
47c6cd54
JRS
2522 if (!psr->enabled)
2523 goto unlock;
60e5ffe3 2524
47c6cd54
JRS
2525 if (psr->psr2_enabled) {
2526 val = I915_READ(EDP_PSR2_CTL);
2527 enabled = val & EDP_PSR2_ENABLE;
2528 } else {
2529 val = I915_READ(EDP_PSR_CTL);
2530 enabled = val & EDP_PSR_ENABLE;
2531 }
2532 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2533 enableddisabled(enabled), val);
2534 psr_source_status(dev_priv, m);
2535 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2536 psr->busy_frontbuffer_bits);
e91fd8c6 2537
05eec3c2 2538 /*
05eec3c2
RV
2539 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2540 */
36cdd013 2541 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
47c6cd54
JRS
2542 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2543 seq_printf(m, "Performance counter: %u\n", val);
a6cbdb8e 2544 }
b86bef20 2545
47c6cd54 2546 if (psr->debug & I915_PSR_DEBUG_IRQ) {
3f983e54 2547 seq_printf(m, "Last attempted entry at: %lld\n",
47c6cd54
JRS
2548 psr->last_entry_attempt);
2549 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3f983e54
DP
2550 }
2551
a81f781a
JRS
2552 if (psr->psr2_enabled) {
2553 u32 su_frames_val[3];
2554 int frame;
2555
2556 /*
2557 * Reading all 3 registers before hand to minimize crossing a
2558 * frame boundary between register reads
2559 */
2560 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2561 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2562
2563 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2564
2565 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2566 u32 su_blocks;
2567
2568 su_blocks = su_frames_val[frame / 3] &
2569 PSR2_SU_STATUS_MASK(frame);
2570 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2571 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2572 }
2573 }
2574
47c6cd54
JRS
2575unlock:
2576 mutex_unlock(&psr->lock);
a037121c 2577 intel_runtime_pm_put(dev_priv, wakeref);
47c6cd54 2578
e91fd8c6
RV
2579 return 0;
2580}
2581
54fd3149
DP
2582static int
2583i915_edp_psr_debug_set(void *data, u64 val)
2584{
2585 struct drm_i915_private *dev_priv = data;
a037121c 2586 intel_wakeref_t wakeref;
c44301fc 2587 int ret;
54fd3149
DP
2588
2589 if (!CAN_PSR(dev_priv))
2590 return -ENODEV;
2591
c44301fc 2592 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
54fd3149 2593
a037121c 2594 wakeref = intel_runtime_pm_get(dev_priv);
c44301fc 2595
23ec9f52 2596 ret = intel_psr_debug_set(dev_priv, val);
c44301fc 2597
a037121c 2598 intel_runtime_pm_put(dev_priv, wakeref);
54fd3149 2599
c44301fc 2600 return ret;
54fd3149
DP
2601}
2602
2603static int
2604i915_edp_psr_debug_get(void *data, u64 *val)
2605{
2606 struct drm_i915_private *dev_priv = data;
2607
2608 if (!CAN_PSR(dev_priv))
2609 return -ENODEV;
2610
2611 *val = READ_ONCE(dev_priv->psr.debug);
2612 return 0;
2613}
2614
2615DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2616 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2617 "%llu\n");
2618
ec013e7f
JB
2619static int i915_energy_uJ(struct seq_file *m, void *data)
2620{
36cdd013 2621 struct drm_i915_private *dev_priv = node_to_i915(m->private);
d38014ea 2622 unsigned long long power;
a037121c 2623 intel_wakeref_t wakeref;
ec013e7f
JB
2624 u32 units;
2625
36cdd013 2626 if (INTEL_GEN(dev_priv) < 6)
ec013e7f
JB
2627 return -ENODEV;
2628
d4225a53 2629 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
d38014ea 2630 return -ENODEV;
d38014ea
GKB
2631
2632 units = (power & 0x1f00) >> 8;
d4225a53
CW
2633 with_intel_runtime_pm(dev_priv, wakeref)
2634 power = I915_READ(MCH_SECP_NRG_STTS);
36623ef8 2635
d4225a53 2636 power = (1000000 * power) >> units; /* convert to uJ */
d38014ea 2637 seq_printf(m, "%llu", power);
371db66a
PZ
2638
2639 return 0;
2640}
2641
6455c870 2642static int i915_runtime_pm_status(struct seq_file *m, void *unused)
371db66a 2643{
36cdd013 2644 struct drm_i915_private *dev_priv = node_to_i915(m->private);
52a05c30 2645 struct pci_dev *pdev = dev_priv->drm.pdev;
371db66a 2646
a156e64d
CW
2647 if (!HAS_RUNTIME_PM(dev_priv))
2648 seq_puts(m, "Runtime power management not supported\n");
371db66a 2649
25c896bd
CW
2650 seq_printf(m, "Runtime power status: %s\n",
2651 enableddisabled(!dev_priv->power_domains.wakeref));
2652
d9948a10 2653 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
371db66a 2654 seq_printf(m, "IRQs disabled: %s\n",
9df7575f 2655 yesno(!intel_irqs_enabled(dev_priv)));
0d804184 2656#ifdef CONFIG_PM
a6aaec8b 2657 seq_printf(m, "Usage count: %d\n",
36cdd013 2658 atomic_read(&dev_priv->drm.dev->power.usage_count));
0d804184
CW
2659#else
2660 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2661#endif
a156e64d 2662 seq_printf(m, "PCI device power state: %s [%d]\n",
52a05c30
DW
2663 pci_power_name(pdev->current_state),
2664 pdev->current_state);
371db66a 2665
bd780f37
CW
2666 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2667 struct drm_printer p = drm_seq_file_printer(m);
2668
2669 print_intel_runtime_pm_wakeref(dev_priv, &p);
2670 }
2671
ec013e7f
JB
2672 return 0;
2673}
2674
1da51581
ID
2675static int i915_power_domain_info(struct seq_file *m, void *unused)
2676{
36cdd013 2677 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1da51581
ID
2678 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2679 int i;
2680
2681 mutex_lock(&power_domains->lock);
2682
2683 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2684 for (i = 0; i < power_domains->power_well_count; i++) {
2685 struct i915_power_well *power_well;
2686 enum intel_display_power_domain power_domain;
2687
2688 power_well = &power_domains->power_wells[i];
f28ec6f4 2689 seq_printf(m, "%-25s %d\n", power_well->desc->name,
1da51581
ID
2690 power_well->count);
2691
f28ec6f4 2692 for_each_power_domain(power_domain, power_well->desc->domains)
1da51581 2693 seq_printf(m, " %-23s %d\n",
9895ad03 2694 intel_display_power_domain_str(power_domain),
1da51581 2695 power_domains->domain_use_count[power_domain]);
1da51581
ID
2696 }
2697
2698 mutex_unlock(&power_domains->lock);
2699
2700 return 0;
2701}
2702
b7cec66d
DL
2703static int i915_dmc_info(struct seq_file *m, void *unused)
2704{
36cdd013 2705 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 2706 intel_wakeref_t wakeref;
b7cec66d
DL
2707 struct intel_csr *csr;
2708
ab309a6a
MW
2709 if (!HAS_CSR(dev_priv))
2710 return -ENODEV;
b7cec66d
DL
2711
2712 csr = &dev_priv->csr;
2713
a037121c 2714 wakeref = intel_runtime_pm_get(dev_priv);
6fb403de 2715
b7cec66d
DL
2716 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2717 seq_printf(m, "path: %s\n", csr->fw_path);
2718
2719 if (!csr->dmc_payload)
6fb403de 2720 goto out;
b7cec66d
DL
2721
2722 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2723 CSR_VERSION_MINOR(csr->version));
2724
34b2f8da
ID
2725 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2726 goto out;
2727
2728 seq_printf(m, "DC3 -> DC5 count: %d\n",
2729 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2730 SKL_CSR_DC3_DC5_COUNT));
2731 if (!IS_GEN9_LP(dev_priv))
8337206d
DL
2732 seq_printf(m, "DC5 -> DC6 count: %d\n",
2733 I915_READ(SKL_CSR_DC5_DC6_COUNT));
8337206d 2734
6fb403de
MK
2735out:
2736 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2737 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2738 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2739
a037121c 2740 intel_runtime_pm_put(dev_priv, wakeref);
8337206d 2741
b7cec66d
DL
2742 return 0;
2743}
2744
53f5e3ca
JB
2745static void intel_seq_print_mode(struct seq_file *m, int tabs,
2746 struct drm_display_mode *mode)
2747{
2748 int i;
2749
2750 for (i = 0; i < tabs; i++)
2751 seq_putc(m, '\t');
2752
4fb6bb89 2753 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
53f5e3ca
JB
2754}
2755
2756static void intel_encoder_info(struct seq_file *m,
2757 struct intel_crtc *intel_crtc,
2758 struct intel_encoder *intel_encoder)
2759{
36cdd013
DW
2760 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2761 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2762 struct drm_crtc *crtc = &intel_crtc->base;
2763 struct intel_connector *intel_connector;
2764 struct drm_encoder *encoder;
2765
2766 encoder = &intel_encoder->base;
2767 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
8e329a03 2768 encoder->base.id, encoder->name);
53f5e3ca
JB
2769 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2770 struct drm_connector *connector = &intel_connector->base;
2771 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2772 connector->base.id,
c23cc417 2773 connector->name,
53f5e3ca
JB
2774 drm_get_connector_status_name(connector->status));
2775 if (connector->status == connector_status_connected) {
2776 struct drm_display_mode *mode = &crtc->mode;
2777 seq_printf(m, ", mode:\n");
2778 intel_seq_print_mode(m, 2, mode);
2779 } else {
2780 seq_putc(m, '\n');
2781 }
2782 }
2783}
2784
2785static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2786{
36cdd013
DW
2787 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2788 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2789 struct drm_crtc *crtc = &intel_crtc->base;
2790 struct intel_encoder *intel_encoder;
23a48d53
ML
2791 struct drm_plane_state *plane_state = crtc->primary->state;
2792 struct drm_framebuffer *fb = plane_state->fb;
53f5e3ca 2793
23a48d53 2794 if (fb)
5aa8a937 2795 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
23a48d53
ML
2796 fb->base.id, plane_state->src_x >> 16,
2797 plane_state->src_y >> 16, fb->width, fb->height);
5aa8a937
MR
2798 else
2799 seq_puts(m, "\tprimary plane disabled\n");
53f5e3ca
JB
2800 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2801 intel_encoder_info(m, intel_crtc, intel_encoder);
2802}
2803
2804static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2805{
2806 struct drm_display_mode *mode = panel->fixed_mode;
2807
2808 seq_printf(m, "\tfixed mode:\n");
2809 intel_seq_print_mode(m, 2, mode);
2810}
2811
2812static void intel_dp_info(struct seq_file *m,
2813 struct intel_connector *intel_connector)
2814{
2815 struct intel_encoder *intel_encoder = intel_connector->encoder;
2816 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2817
2818 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
742f491d 2819 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
b6dabe3b 2820 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
53f5e3ca 2821 intel_panel_info(m, &intel_connector->panel);
80209e5f
MK
2822
2823 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2824 &intel_dp->aux);
53f5e3ca
JB
2825}
2826
9a148a96
LY
2827static void intel_dp_mst_info(struct seq_file *m,
2828 struct intel_connector *intel_connector)
2829{
2830 struct intel_encoder *intel_encoder = intel_connector->encoder;
2831 struct intel_dp_mst_encoder *intel_mst =
2832 enc_to_mst(&intel_encoder->base);
2833 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2834 struct intel_dp *intel_dp = &intel_dig_port->dp;
2835 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2836 intel_connector->port);
2837
2838 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2839}
2840
53f5e3ca
JB
2841static void intel_hdmi_info(struct seq_file *m,
2842 struct intel_connector *intel_connector)
2843{
2844 struct intel_encoder *intel_encoder = intel_connector->encoder;
2845 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2846
742f491d 2847 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
53f5e3ca
JB
2848}
2849
2850static void intel_lvds_info(struct seq_file *m,
2851 struct intel_connector *intel_connector)
2852{
2853 intel_panel_info(m, &intel_connector->panel);
2854}
2855
2856static void intel_connector_info(struct seq_file *m,
2857 struct drm_connector *connector)
2858{
2859 struct intel_connector *intel_connector = to_intel_connector(connector);
2860 struct intel_encoder *intel_encoder = intel_connector->encoder;
f103fc7d 2861 struct drm_display_mode *mode;
53f5e3ca
JB
2862
2863 seq_printf(m, "connector %d: type %s, status: %s\n",
c23cc417 2864 connector->base.id, connector->name,
53f5e3ca 2865 drm_get_connector_status_name(connector->status));
3e037f9b
JRS
2866
2867 if (connector->status == connector_status_disconnected)
2868 return;
2869
3e037f9b
JRS
2870 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2871 connector->display_info.width_mm,
2872 connector->display_info.height_mm);
2873 seq_printf(m, "\tsubpixel order: %s\n",
2874 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2875 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
ee648a74 2876
77d1f615 2877 if (!intel_encoder)
ee648a74
ML
2878 return;
2879
2880 switch (connector->connector_type) {
2881 case DRM_MODE_CONNECTOR_DisplayPort:
2882 case DRM_MODE_CONNECTOR_eDP:
9a148a96
LY
2883 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2884 intel_dp_mst_info(m, intel_connector);
2885 else
2886 intel_dp_info(m, intel_connector);
ee648a74
ML
2887 break;
2888 case DRM_MODE_CONNECTOR_LVDS:
2889 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
36cd7444 2890 intel_lvds_info(m, intel_connector);
ee648a74
ML
2891 break;
2892 case DRM_MODE_CONNECTOR_HDMIA:
2893 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
7e732cac 2894 intel_encoder->type == INTEL_OUTPUT_DDI)
ee648a74
ML
2895 intel_hdmi_info(m, intel_connector);
2896 break;
2897 default:
2898 break;
36cd7444 2899 }
53f5e3ca 2900
f103fc7d
JB
2901 seq_printf(m, "\tmodes:\n");
2902 list_for_each_entry(mode, &connector->modes, head)
2903 intel_seq_print_mode(m, 2, mode);
53f5e3ca
JB
2904}
2905
3abc4e09
RF
2906static const char *plane_type(enum drm_plane_type type)
2907{
2908 switch (type) {
2909 case DRM_PLANE_TYPE_OVERLAY:
2910 return "OVL";
2911 case DRM_PLANE_TYPE_PRIMARY:
2912 return "PRI";
2913 case DRM_PLANE_TYPE_CURSOR:
2914 return "CUR";
2915 /*
2916 * Deliberately omitting default: to generate compiler warnings
2917 * when a new drm_plane_type gets added.
2918 */
2919 }
2920
2921 return "unknown";
2922}
2923
5852a15c 2924static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
3abc4e09 2925{
3abc4e09 2926 /*
c2c446ad 2927 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3abc4e09
RF
2928 * will print them all to visualize if the values are misused
2929 */
5852a15c 2930 snprintf(buf, bufsize,
3abc4e09 2931 "%s%s%s%s%s%s(0x%08x)",
c2c446ad
RF
2932 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2933 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2934 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2935 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2936 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2937 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3abc4e09 2938 rotation);
3abc4e09
RF
2939}
2940
2941static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2942{
36cdd013
DW
2943 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2944 struct drm_device *dev = &dev_priv->drm;
3abc4e09
RF
2945 struct intel_plane *intel_plane;
2946
2947 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2948 struct drm_plane_state *state;
2949 struct drm_plane *plane = &intel_plane->base;
b3c11ac2 2950 struct drm_format_name_buf format_name;
5852a15c 2951 char rot_str[48];
3abc4e09
RF
2952
2953 if (!plane->state) {
2954 seq_puts(m, "plane->state is NULL!\n");
2955 continue;
2956 }
2957
2958 state = plane->state;
2959
90844f00 2960 if (state->fb) {
438b74a5
VS
2961 drm_get_format_name(state->fb->format->format,
2962 &format_name);
90844f00 2963 } else {
b3c11ac2 2964 sprintf(format_name.str, "N/A");
90844f00
EE
2965 }
2966
5852a15c
JN
2967 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2968
3abc4e09
RF
2969 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2970 plane->base.id,
2971 plane_type(intel_plane->base.type),
2972 state->crtc_x, state->crtc_y,
2973 state->crtc_w, state->crtc_h,
2974 (state->src_x >> 16),
2975 ((state->src_x & 0xffff) * 15625) >> 10,
2976 (state->src_y >> 16),
2977 ((state->src_y & 0xffff) * 15625) >> 10,
2978 (state->src_w >> 16),
2979 ((state->src_w & 0xffff) * 15625) >> 10,
2980 (state->src_h >> 16),
2981 ((state->src_h & 0xffff) * 15625) >> 10,
b3c11ac2 2982 format_name.str,
5852a15c 2983 rot_str);
3abc4e09
RF
2984 }
2985}
2986
2987static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2988{
2989 struct intel_crtc_state *pipe_config;
2990 int num_scalers = intel_crtc->num_scalers;
2991 int i;
2992
2993 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2994
2995 /* Not all platformas have a scaler */
2996 if (num_scalers) {
2997 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2998 num_scalers,
2999 pipe_config->scaler_state.scaler_users,
3000 pipe_config->scaler_state.scaler_id);
3001
58415918 3002 for (i = 0; i < num_scalers; i++) {
3abc4e09
RF
3003 struct intel_scaler *sc =
3004 &pipe_config->scaler_state.scalers[i];
3005
3006 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3007 i, yesno(sc->in_use), sc->mode);
3008 }
3009 seq_puts(m, "\n");
3010 } else {
3011 seq_puts(m, "\tNo scalers available on this platform\n");
3012 }
3013}
3014
53f5e3ca
JB
3015static int i915_display_info(struct seq_file *m, void *unused)
3016{
36cdd013
DW
3017 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3018 struct drm_device *dev = &dev_priv->drm;
065f2ec2 3019 struct intel_crtc *crtc;
53f5e3ca 3020 struct drm_connector *connector;
3f6a5e1e 3021 struct drm_connector_list_iter conn_iter;
a037121c
CW
3022 intel_wakeref_t wakeref;
3023
3024 wakeref = intel_runtime_pm_get(dev_priv);
53f5e3ca 3025
53f5e3ca
JB
3026 seq_printf(m, "CRTC info\n");
3027 seq_printf(m, "---------\n");
d3fcc808 3028 for_each_intel_crtc(dev, crtc) {
f77076c9 3029 struct intel_crtc_state *pipe_config;
53f5e3ca 3030
3f6a5e1e 3031 drm_modeset_lock(&crtc->base.mutex, NULL);
f77076c9
ML
3032 pipe_config = to_intel_crtc_state(crtc->base.state);
3033
3abc4e09 3034 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
065f2ec2 3035 crtc->base.base.id, pipe_name(crtc->pipe),
f77076c9 3036 yesno(pipe_config->base.active),
3abc4e09
RF
3037 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3038 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3039
f77076c9 3040 if (pipe_config->base.active) {
cd5dcbf1
VS
3041 struct intel_plane *cursor =
3042 to_intel_plane(crtc->base.cursor);
3043
065f2ec2
CW
3044 intel_crtc_info(m, crtc);
3045
cd5dcbf1
VS
3046 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3047 yesno(cursor->base.state->visible),
3048 cursor->base.state->crtc_x,
3049 cursor->base.state->crtc_y,
3050 cursor->base.state->crtc_w,
3051 cursor->base.state->crtc_h,
3052 cursor->cursor.base);
3abc4e09
RF
3053 intel_scaler_info(m, crtc);
3054 intel_plane_info(m, crtc);
a23dc658 3055 }
cace841c
DV
3056
3057 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3058 yesno(!crtc->cpu_fifo_underrun_disabled),
3059 yesno(!crtc->pch_fifo_underrun_disabled));
3f6a5e1e 3060 drm_modeset_unlock(&crtc->base.mutex);
53f5e3ca
JB
3061 }
3062
3063 seq_printf(m, "\n");
3064 seq_printf(m, "Connector info\n");
3065 seq_printf(m, "--------------\n");
3f6a5e1e
DV
3066 mutex_lock(&dev->mode_config.mutex);
3067 drm_connector_list_iter_begin(dev, &conn_iter);
3068 drm_for_each_connector_iter(connector, &conn_iter)
53f5e3ca 3069 intel_connector_info(m, connector);
3f6a5e1e
DV
3070 drm_connector_list_iter_end(&conn_iter);
3071 mutex_unlock(&dev->mode_config.mutex);
3072
a037121c 3073 intel_runtime_pm_put(dev_priv, wakeref);
53f5e3ca
JB
3074
3075 return 0;
3076}
3077
1b36595f
CW
3078static int i915_engine_info(struct seq_file *m, void *unused)
3079{
3080 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3081 struct intel_engine_cs *engine;
a037121c 3082 intel_wakeref_t wakeref;
3b3f1650 3083 enum intel_engine_id id;
f636edb2 3084 struct drm_printer p;
1b36595f 3085
a037121c 3086 wakeref = intel_runtime_pm_get(dev_priv);
9c870d03 3087
79ffac85
CW
3088 seq_printf(m, "GT awake? %s [%d]\n",
3089 yesno(dev_priv->gt.awake),
3090 atomic_read(&dev_priv->gt.wakeref.count));
f577a03b 3091 seq_printf(m, "CS timestamp frequency: %u kHz\n",
0258404f 3092 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
f73b5674 3093
f636edb2
CW
3094 p = drm_seq_file_printer(m);
3095 for_each_engine(engine, dev_priv, id)
0db18b17 3096 intel_engine_dump(engine, &p, "%s\n", engine->name);
1b36595f 3097
a037121c 3098 intel_runtime_pm_put(dev_priv, wakeref);
9c870d03 3099
1b36595f
CW
3100 return 0;
3101}
3102
79e9cd5f
LL
3103static int i915_rcs_topology(struct seq_file *m, void *unused)
3104{
3105 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3106 struct drm_printer p = drm_seq_file_printer(m);
3107
0258404f 3108 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
79e9cd5f
LL
3109
3110 return 0;
3111}
3112
c5418a8b
CW
3113static int i915_shrinker_info(struct seq_file *m, void *unused)
3114{
3115 struct drm_i915_private *i915 = node_to_i915(m->private);
3116
3117 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3118 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3119
3120 return 0;
3121}
3122
728e29d7
DV
3123static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3124{
36cdd013
DW
3125 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3126 struct drm_device *dev = &dev_priv->drm;
728e29d7
DV
3127 int i;
3128
3129 drm_modeset_lock_all(dev);
3130 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3131 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3132
72f775fa 3133 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
0823eb9c 3134 pll->info->id);
2dd66ebd 3135 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2c42e535 3136 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
728e29d7 3137 seq_printf(m, " tracked hardware state:\n");
2c42e535 3138 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
3e369b76 3139 seq_printf(m, " dpll_md: 0x%08x\n",
2c42e535
ACO
3140 pll->state.hw_state.dpll_md);
3141 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3142 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3143 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
c27e917e
PZ
3144 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3145 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3146 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3147 pll->state.hw_state.mg_refclkin_ctl);
3148 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3149 pll->state.hw_state.mg_clktop2_coreclkctl1);
3150 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3151 pll->state.hw_state.mg_clktop2_hsclkctl);
3152 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3153 pll->state.hw_state.mg_pll_div0);
3154 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3155 pll->state.hw_state.mg_pll_div1);
3156 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3157 pll->state.hw_state.mg_pll_lf);
3158 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3159 pll->state.hw_state.mg_pll_frac_lock);
3160 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3161 pll->state.hw_state.mg_pll_ssc);
3162 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3163 pll->state.hw_state.mg_pll_bias);
3164 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3165 pll->state.hw_state.mg_pll_tdc_coldst_bias);
728e29d7
DV
3166 }
3167 drm_modeset_unlock_all(dev);
3168
3169 return 0;
3170}
3171
1ed1ef9d 3172static int i915_wa_registers(struct seq_file *m, void *unused)
888b5995 3173{
452420d2 3174 struct drm_i915_private *i915 = node_to_i915(m->private);
8a68d464 3175 const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
452420d2
TU
3176 struct i915_wa *wa;
3177 unsigned int i;
888b5995 3178
452420d2
TU
3179 seq_printf(m, "Workarounds applied: %u\n", wal->count);
3180 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
548764bb 3181 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
452420d2 3182 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
888b5995
AS
3183
3184 return 0;
3185}
3186
d2d4f39b
KM
3187static int i915_ipc_status_show(struct seq_file *m, void *data)
3188{
3189 struct drm_i915_private *dev_priv = m->private;
3190
3191 seq_printf(m, "Isochronous Priority Control: %s\n",
3192 yesno(dev_priv->ipc_enabled));
3193 return 0;
3194}
3195
3196static int i915_ipc_status_open(struct inode *inode, struct file *file)
3197{
3198 struct drm_i915_private *dev_priv = inode->i_private;
3199
3200 if (!HAS_IPC(dev_priv))
3201 return -ENODEV;
3202
3203 return single_open(file, i915_ipc_status_show, dev_priv);
3204}
3205
3206static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3207 size_t len, loff_t *offp)
3208{
3209 struct seq_file *m = file->private_data;
3210 struct drm_i915_private *dev_priv = m->private;
a037121c 3211 intel_wakeref_t wakeref;
d2d4f39b 3212 bool enable;
d4225a53 3213 int ret;
d2d4f39b
KM
3214
3215 ret = kstrtobool_from_user(ubuf, len, &enable);
3216 if (ret < 0)
3217 return ret;
3218
d4225a53
CW
3219 with_intel_runtime_pm(dev_priv, wakeref) {
3220 if (!dev_priv->ipc_enabled && enable)
3221 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3222 dev_priv->wm.distrust_bios_wm = true;
3223 dev_priv->ipc_enabled = enable;
3224 intel_enable_ipc(dev_priv);
3225 }
d2d4f39b
KM
3226
3227 return len;
3228}
3229
3230static const struct file_operations i915_ipc_status_fops = {
3231 .owner = THIS_MODULE,
3232 .open = i915_ipc_status_open,
3233 .read = seq_read,
3234 .llseek = seq_lseek,
3235 .release = single_release,
3236 .write = i915_ipc_status_write
3237};
3238
c5511e44
DL
3239static int i915_ddb_info(struct seq_file *m, void *unused)
3240{
36cdd013
DW
3241 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3242 struct drm_device *dev = &dev_priv->drm;
c5511e44 3243 struct skl_ddb_entry *entry;
ff43bc37 3244 struct intel_crtc *crtc;
c5511e44 3245
36cdd013 3246 if (INTEL_GEN(dev_priv) < 9)
ab309a6a 3247 return -ENODEV;
2fcffe19 3248
c5511e44
DL
3249 drm_modeset_lock_all(dev);
3250
c5511e44
DL
3251 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3252
ff43bc37
VS
3253 for_each_intel_crtc(&dev_priv->drm, crtc) {
3254 struct intel_crtc_state *crtc_state =
3255 to_intel_crtc_state(crtc->base.state);
3256 enum pipe pipe = crtc->pipe;
3257 enum plane_id plane_id;
3258
c5511e44
DL
3259 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3260
ff43bc37
VS
3261 for_each_plane_id_on_crtc(crtc, plane_id) {
3262 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3263 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
c5511e44
DL
3264 entry->start, entry->end,
3265 skl_ddb_entry_size(entry));
3266 }
3267
ff43bc37 3268 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
c5511e44
DL
3269 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3270 entry->end, skl_ddb_entry_size(entry));
3271 }
3272
3273 drm_modeset_unlock_all(dev);
3274
3275 return 0;
3276}
3277
a54746e3 3278static void drrs_status_per_crtc(struct seq_file *m,
36cdd013
DW
3279 struct drm_device *dev,
3280 struct intel_crtc *intel_crtc)
a54746e3 3281{
fac5e23e 3282 struct drm_i915_private *dev_priv = to_i915(dev);
a54746e3
VK
3283 struct i915_drrs *drrs = &dev_priv->drrs;
3284 int vrefresh = 0;
26875fe5 3285 struct drm_connector *connector;
3f6a5e1e 3286 struct drm_connector_list_iter conn_iter;
a54746e3 3287
3f6a5e1e
DV
3288 drm_connector_list_iter_begin(dev, &conn_iter);
3289 drm_for_each_connector_iter(connector, &conn_iter) {
26875fe5
ML
3290 if (connector->state->crtc != &intel_crtc->base)
3291 continue;
3292
3293 seq_printf(m, "%s:\n", connector->name);
a54746e3 3294 }
3f6a5e1e 3295 drm_connector_list_iter_end(&conn_iter);
a54746e3
VK
3296
3297 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3298 seq_puts(m, "\tVBT: DRRS_type: Static");
3299 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3300 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3301 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3302 seq_puts(m, "\tVBT: DRRS_type: None");
3303 else
3304 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3305
3306 seq_puts(m, "\n\n");
3307
f77076c9 3308 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
a54746e3
VK
3309 struct intel_panel *panel;
3310
3311 mutex_lock(&drrs->mutex);
3312 /* DRRS Supported */
3313 seq_puts(m, "\tDRRS Supported: Yes\n");
3314
3315 /* disable_drrs() will make drrs->dp NULL */
3316 if (!drrs->dp) {
ce6e2137
R
3317 seq_puts(m, "Idleness DRRS: Disabled\n");
3318 if (dev_priv->psr.enabled)
3319 seq_puts(m,
3320 "\tAs PSR is enabled, DRRS is not enabled\n");
a54746e3
VK
3321 mutex_unlock(&drrs->mutex);
3322 return;
3323 }
3324
3325 panel = &drrs->dp->attached_connector->panel;
3326 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3327 drrs->busy_frontbuffer_bits);
3328
3329 seq_puts(m, "\n\t\t");
3330 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3331 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3332 vrefresh = panel->fixed_mode->vrefresh;
3333 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3334 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3335 vrefresh = panel->downclock_mode->vrefresh;
3336 } else {
3337 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3338 drrs->refresh_rate_type);
3339 mutex_unlock(&drrs->mutex);
3340 return;
3341 }
3342 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3343
3344 seq_puts(m, "\n\t\t");
3345 mutex_unlock(&drrs->mutex);
3346 } else {
3347 /* DRRS not supported. Print the VBT parameter*/
3348 seq_puts(m, "\tDRRS Supported : No");
3349 }
3350 seq_puts(m, "\n");
3351}
3352
3353static int i915_drrs_status(struct seq_file *m, void *unused)
3354{
36cdd013
DW
3355 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3356 struct drm_device *dev = &dev_priv->drm;
a54746e3
VK
3357 struct intel_crtc *intel_crtc;
3358 int active_crtc_cnt = 0;
3359
26875fe5 3360 drm_modeset_lock_all(dev);
a54746e3 3361 for_each_intel_crtc(dev, intel_crtc) {
f77076c9 3362 if (intel_crtc->base.state->active) {
a54746e3
VK
3363 active_crtc_cnt++;
3364 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3365
3366 drrs_status_per_crtc(m, dev, intel_crtc);
3367 }
a54746e3 3368 }
26875fe5 3369 drm_modeset_unlock_all(dev);
a54746e3
VK
3370
3371 if (!active_crtc_cnt)
3372 seq_puts(m, "No active crtc found\n");
3373
3374 return 0;
3375}
3376
11bed958
DA
3377static int i915_dp_mst_info(struct seq_file *m, void *unused)
3378{
36cdd013
DW
3379 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3380 struct drm_device *dev = &dev_priv->drm;
11bed958
DA
3381 struct intel_encoder *intel_encoder;
3382 struct intel_digital_port *intel_dig_port;
b6dabe3b 3383 struct drm_connector *connector;
3f6a5e1e 3384 struct drm_connector_list_iter conn_iter;
b6dabe3b 3385
3f6a5e1e
DV
3386 drm_connector_list_iter_begin(dev, &conn_iter);
3387 drm_for_each_connector_iter(connector, &conn_iter) {
b6dabe3b 3388 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
11bed958 3389 continue;
b6dabe3b
ML
3390
3391 intel_encoder = intel_attached_encoder(connector);
3392 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3393 continue;
3394
3395 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
11bed958
DA
3396 if (!intel_dig_port->dp.can_mst)
3397 continue;
b6dabe3b 3398
40ae80cc 3399 seq_printf(m, "MST Source Port %c\n",
8f4f2797 3400 port_name(intel_dig_port->base.port));
11bed958
DA
3401 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3402 }
3f6a5e1e
DV
3403 drm_connector_list_iter_end(&conn_iter);
3404
11bed958
DA
3405 return 0;
3406}
3407
eb3394fa 3408static ssize_t i915_displayport_test_active_write(struct file *file,
36cdd013
DW
3409 const char __user *ubuf,
3410 size_t len, loff_t *offp)
eb3394fa
TP
3411{
3412 char *input_buffer;
3413 int status = 0;
eb3394fa
TP
3414 struct drm_device *dev;
3415 struct drm_connector *connector;
3f6a5e1e 3416 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3417 struct intel_dp *intel_dp;
3418 int val = 0;
3419
9aaffa34 3420 dev = ((struct seq_file *)file->private_data)->private;
eb3394fa 3421
eb3394fa
TP
3422 if (len == 0)
3423 return 0;
3424
261aeba8
GT
3425 input_buffer = memdup_user_nul(ubuf, len);
3426 if (IS_ERR(input_buffer))
3427 return PTR_ERR(input_buffer);
eb3394fa 3428
eb3394fa
TP
3429 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3430
3f6a5e1e
DV
3431 drm_connector_list_iter_begin(dev, &conn_iter);
3432 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3433 struct intel_encoder *encoder;
3434
eb3394fa
TP
3435 if (connector->connector_type !=
3436 DRM_MODE_CONNECTOR_DisplayPort)
3437 continue;
3438
a874b6a3
ML
3439 encoder = to_intel_encoder(connector->encoder);
3440 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3441 continue;
3442
3443 if (encoder && connector->status == connector_status_connected) {
3444 intel_dp = enc_to_intel_dp(&encoder->base);
eb3394fa
TP
3445 status = kstrtoint(input_buffer, 10, &val);
3446 if (status < 0)
3f6a5e1e 3447 break;
eb3394fa
TP
3448 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3449 /* To prevent erroneous activation of the compliance
3450 * testing code, only accept an actual value of 1 here
3451 */
3452 if (val == 1)
c1617abc 3453 intel_dp->compliance.test_active = 1;
eb3394fa 3454 else
c1617abc 3455 intel_dp->compliance.test_active = 0;
eb3394fa
TP
3456 }
3457 }
3f6a5e1e 3458 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3459 kfree(input_buffer);
3460 if (status < 0)
3461 return status;
3462
3463 *offp += len;
3464 return len;
3465}
3466
3467static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3468{
e4006713
AS
3469 struct drm_i915_private *dev_priv = m->private;
3470 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3471 struct drm_connector *connector;
3f6a5e1e 3472 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3473 struct intel_dp *intel_dp;
3474
3f6a5e1e
DV
3475 drm_connector_list_iter_begin(dev, &conn_iter);
3476 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3477 struct intel_encoder *encoder;
3478
eb3394fa
TP
3479 if (connector->connector_type !=
3480 DRM_MODE_CONNECTOR_DisplayPort)
3481 continue;
3482
a874b6a3
ML
3483 encoder = to_intel_encoder(connector->encoder);
3484 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3485 continue;
3486
3487 if (encoder && connector->status == connector_status_connected) {
3488 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3489 if (intel_dp->compliance.test_active)
eb3394fa
TP
3490 seq_puts(m, "1");
3491 else
3492 seq_puts(m, "0");
3493 } else
3494 seq_puts(m, "0");
3495 }
3f6a5e1e 3496 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3497
3498 return 0;
3499}
3500
3501static int i915_displayport_test_active_open(struct inode *inode,
36cdd013 3502 struct file *file)
eb3394fa 3503{
36cdd013 3504 return single_open(file, i915_displayport_test_active_show,
e4006713 3505 inode->i_private);
eb3394fa
TP
3506}
3507
3508static const struct file_operations i915_displayport_test_active_fops = {
3509 .owner = THIS_MODULE,
3510 .open = i915_displayport_test_active_open,
3511 .read = seq_read,
3512 .llseek = seq_lseek,
3513 .release = single_release,
3514 .write = i915_displayport_test_active_write
3515};
3516
3517static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3518{
e4006713
AS
3519 struct drm_i915_private *dev_priv = m->private;
3520 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3521 struct drm_connector *connector;
3f6a5e1e 3522 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3523 struct intel_dp *intel_dp;
3524
3f6a5e1e
DV
3525 drm_connector_list_iter_begin(dev, &conn_iter);
3526 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3527 struct intel_encoder *encoder;
3528
eb3394fa
TP
3529 if (connector->connector_type !=
3530 DRM_MODE_CONNECTOR_DisplayPort)
3531 continue;
3532
a874b6a3
ML
3533 encoder = to_intel_encoder(connector->encoder);
3534 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3535 continue;
3536
3537 if (encoder && connector->status == connector_status_connected) {
3538 intel_dp = enc_to_intel_dp(&encoder->base);
b48a5ba9
MN
3539 if (intel_dp->compliance.test_type ==
3540 DP_TEST_LINK_EDID_READ)
3541 seq_printf(m, "%lx",
3542 intel_dp->compliance.test_data.edid);
611032bf
MN
3543 else if (intel_dp->compliance.test_type ==
3544 DP_TEST_LINK_VIDEO_PATTERN) {
3545 seq_printf(m, "hdisplay: %d\n",
3546 intel_dp->compliance.test_data.hdisplay);
3547 seq_printf(m, "vdisplay: %d\n",
3548 intel_dp->compliance.test_data.vdisplay);
3549 seq_printf(m, "bpc: %u\n",
3550 intel_dp->compliance.test_data.bpc);
3551 }
eb3394fa
TP
3552 } else
3553 seq_puts(m, "0");
3554 }
3f6a5e1e 3555 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3556
3557 return 0;
3558}
e4006713 3559DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
eb3394fa
TP
3560
3561static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3562{
e4006713
AS
3563 struct drm_i915_private *dev_priv = m->private;
3564 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3565 struct drm_connector *connector;
3f6a5e1e 3566 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3567 struct intel_dp *intel_dp;
3568
3f6a5e1e
DV
3569 drm_connector_list_iter_begin(dev, &conn_iter);
3570 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3571 struct intel_encoder *encoder;
3572
eb3394fa
TP
3573 if (connector->connector_type !=
3574 DRM_MODE_CONNECTOR_DisplayPort)
3575 continue;
3576
a874b6a3
ML
3577 encoder = to_intel_encoder(connector->encoder);
3578 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3579 continue;
3580
3581 if (encoder && connector->status == connector_status_connected) {
3582 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3583 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
eb3394fa
TP
3584 } else
3585 seq_puts(m, "0");
3586 }
3f6a5e1e 3587 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3588
3589 return 0;
3590}
e4006713 3591DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
eb3394fa 3592
e5315213 3593static void wm_latency_show(struct seq_file *m, const u16 wm[8])
369a1342 3594{
36cdd013
DW
3595 struct drm_i915_private *dev_priv = m->private;
3596 struct drm_device *dev = &dev_priv->drm;
369a1342 3597 int level;
de38b95c
VS
3598 int num_levels;
3599
36cdd013 3600 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3601 num_levels = 3;
36cdd013 3602 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3603 num_levels = 1;
04548cba
VS
3604 else if (IS_G4X(dev_priv))
3605 num_levels = 3;
de38b95c 3606 else
5db94019 3607 num_levels = ilk_wm_max_level(dev_priv) + 1;
369a1342
VS
3608
3609 drm_modeset_lock_all(dev);
3610
3611 for (level = 0; level < num_levels; level++) {
3612 unsigned int latency = wm[level];
3613
97e94b22
DL
3614 /*
3615 * - WM1+ latency values in 0.5us units
de38b95c 3616 * - latencies are in us on gen9/vlv/chv
97e94b22 3617 */
04548cba
VS
3618 if (INTEL_GEN(dev_priv) >= 9 ||
3619 IS_VALLEYVIEW(dev_priv) ||
3620 IS_CHERRYVIEW(dev_priv) ||
3621 IS_G4X(dev_priv))
97e94b22
DL
3622 latency *= 10;
3623 else if (level > 0)
369a1342
VS
3624 latency *= 5;
3625
3626 seq_printf(m, "WM%d %u (%u.%u usec)\n",
97e94b22 3627 level, wm[level], latency / 10, latency % 10);
369a1342
VS
3628 }
3629
3630 drm_modeset_unlock_all(dev);
3631}
3632
3633static int pri_wm_latency_show(struct seq_file *m, void *data)
3634{
36cdd013 3635 struct drm_i915_private *dev_priv = m->private;
e5315213 3636 const u16 *latencies;
97e94b22 3637
36cdd013 3638 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3639 latencies = dev_priv->wm.skl_latency;
3640 else
36cdd013 3641 latencies = dev_priv->wm.pri_latency;
369a1342 3642
97e94b22 3643 wm_latency_show(m, latencies);
369a1342
VS
3644
3645 return 0;
3646}
3647
3648static int spr_wm_latency_show(struct seq_file *m, void *data)
3649{
36cdd013 3650 struct drm_i915_private *dev_priv = m->private;
e5315213 3651 const u16 *latencies;
97e94b22 3652
36cdd013 3653 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3654 latencies = dev_priv->wm.skl_latency;
3655 else
36cdd013 3656 latencies = dev_priv->wm.spr_latency;
369a1342 3657
97e94b22 3658 wm_latency_show(m, latencies);
369a1342
VS
3659
3660 return 0;
3661}
3662
3663static int cur_wm_latency_show(struct seq_file *m, void *data)
3664{
36cdd013 3665 struct drm_i915_private *dev_priv = m->private;
e5315213 3666 const u16 *latencies;
97e94b22 3667
36cdd013 3668 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3669 latencies = dev_priv->wm.skl_latency;
3670 else
36cdd013 3671 latencies = dev_priv->wm.cur_latency;
369a1342 3672
97e94b22 3673 wm_latency_show(m, latencies);
369a1342
VS
3674
3675 return 0;
3676}
3677
3678static int pri_wm_latency_open(struct inode *inode, struct file *file)
3679{
36cdd013 3680 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3681
04548cba 3682 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
369a1342
VS
3683 return -ENODEV;
3684
36cdd013 3685 return single_open(file, pri_wm_latency_show, dev_priv);
369a1342
VS
3686}
3687
3688static int spr_wm_latency_open(struct inode *inode, struct file *file)
3689{
36cdd013 3690 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3691
b2ae318a 3692 if (HAS_GMCH(dev_priv))
369a1342
VS
3693 return -ENODEV;
3694
36cdd013 3695 return single_open(file, spr_wm_latency_show, dev_priv);
369a1342
VS
3696}
3697
3698static int cur_wm_latency_open(struct inode *inode, struct file *file)
3699{
36cdd013 3700 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3701
b2ae318a 3702 if (HAS_GMCH(dev_priv))
369a1342
VS
3703 return -ENODEV;
3704
36cdd013 3705 return single_open(file, cur_wm_latency_show, dev_priv);
369a1342
VS
3706}
3707
3708static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
e5315213 3709 size_t len, loff_t *offp, u16 wm[8])
369a1342
VS
3710{
3711 struct seq_file *m = file->private_data;
36cdd013
DW
3712 struct drm_i915_private *dev_priv = m->private;
3713 struct drm_device *dev = &dev_priv->drm;
e5315213 3714 u16 new[8] = { 0 };
de38b95c 3715 int num_levels;
369a1342
VS
3716 int level;
3717 int ret;
3718 char tmp[32];
3719
36cdd013 3720 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3721 num_levels = 3;
36cdd013 3722 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3723 num_levels = 1;
04548cba
VS
3724 else if (IS_G4X(dev_priv))
3725 num_levels = 3;
de38b95c 3726 else
5db94019 3727 num_levels = ilk_wm_max_level(dev_priv) + 1;
de38b95c 3728
369a1342
VS
3729 if (len >= sizeof(tmp))
3730 return -EINVAL;
3731
3732 if (copy_from_user(tmp, ubuf, len))
3733 return -EFAULT;
3734
3735 tmp[len] = '\0';
3736
97e94b22
DL
3737 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3738 &new[0], &new[1], &new[2], &new[3],
3739 &new[4], &new[5], &new[6], &new[7]);
369a1342
VS
3740 if (ret != num_levels)
3741 return -EINVAL;
3742
3743 drm_modeset_lock_all(dev);
3744
3745 for (level = 0; level < num_levels; level++)
3746 wm[level] = new[level];
3747
3748 drm_modeset_unlock_all(dev);
3749
3750 return len;
3751}
3752
3753
3754static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3755 size_t len, loff_t *offp)
3756{
3757 struct seq_file *m = file->private_data;
36cdd013 3758 struct drm_i915_private *dev_priv = m->private;
e5315213 3759 u16 *latencies;
369a1342 3760
36cdd013 3761 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3762 latencies = dev_priv->wm.skl_latency;
3763 else
36cdd013 3764 latencies = dev_priv->wm.pri_latency;
97e94b22
DL
3765
3766 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3767}
3768
3769static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3770 size_t len, loff_t *offp)
3771{
3772 struct seq_file *m = file->private_data;
36cdd013 3773 struct drm_i915_private *dev_priv = m->private;
e5315213 3774 u16 *latencies;
369a1342 3775
36cdd013 3776 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3777 latencies = dev_priv->wm.skl_latency;
3778 else
36cdd013 3779 latencies = dev_priv->wm.spr_latency;
97e94b22
DL
3780
3781 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3782}
3783
3784static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3785 size_t len, loff_t *offp)
3786{
3787 struct seq_file *m = file->private_data;
36cdd013 3788 struct drm_i915_private *dev_priv = m->private;
e5315213 3789 u16 *latencies;
97e94b22 3790
36cdd013 3791 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3792 latencies = dev_priv->wm.skl_latency;
3793 else
36cdd013 3794 latencies = dev_priv->wm.cur_latency;
369a1342 3795
97e94b22 3796 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3797}
3798
3799static const struct file_operations i915_pri_wm_latency_fops = {
3800 .owner = THIS_MODULE,
3801 .open = pri_wm_latency_open,
3802 .read = seq_read,
3803 .llseek = seq_lseek,
3804 .release = single_release,
3805 .write = pri_wm_latency_write
3806};
3807
3808static const struct file_operations i915_spr_wm_latency_fops = {
3809 .owner = THIS_MODULE,
3810 .open = spr_wm_latency_open,
3811 .read = seq_read,
3812 .llseek = seq_lseek,
3813 .release = single_release,
3814 .write = spr_wm_latency_write
3815};
3816
3817static const struct file_operations i915_cur_wm_latency_fops = {
3818 .owner = THIS_MODULE,
3819 .open = cur_wm_latency_open,
3820 .read = seq_read,
3821 .llseek = seq_lseek,
3822 .release = single_release,
3823 .write = cur_wm_latency_write
3824};
3825
647416f9
KC
3826static int
3827i915_wedged_get(void *data, u64 *val)
f3cd474b 3828{
c41166f9 3829 int ret = i915_terminally_wedged(data);
f3cd474b 3830
c41166f9
CW
3831 switch (ret) {
3832 case -EIO:
3833 *val = 1;
3834 return 0;
3835 case 0:
3836 *val = 0;
3837 return 0;
3838 default:
3839 return ret;
3840 }
f3cd474b
CW
3841}
3842
647416f9
KC
3843static int
3844i915_wedged_set(void *data, u64 val)
f3cd474b 3845{
598b6b5a 3846 struct drm_i915_private *i915 = data;
d46c0517 3847
15cbf007
CW
3848 /* Flush any previous reset before applying for a new one */
3849 wait_event(i915->gpu_error.reset_queue,
3850 !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
b8d24a06 3851
ce800754
CW
3852 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3853 "Manually set wedged engine mask = %llx", val);
647416f9 3854 return 0;
f3cd474b
CW
3855}
3856
647416f9
KC
3857DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3858 i915_wedged_get, i915_wedged_set,
3a3b4f98 3859 "%llu\n");
f3cd474b 3860
b4a0b32d
CW
3861#define DROP_UNBOUND BIT(0)
3862#define DROP_BOUND BIT(1)
3863#define DROP_RETIRE BIT(2)
3864#define DROP_ACTIVE BIT(3)
3865#define DROP_FREED BIT(4)
3866#define DROP_SHRINK_ALL BIT(5)
3867#define DROP_IDLE BIT(6)
6b048706
CW
3868#define DROP_RESET_ACTIVE BIT(7)
3869#define DROP_RESET_SEQNO BIT(8)
fbbd37b3
CW
3870#define DROP_ALL (DROP_UNBOUND | \
3871 DROP_BOUND | \
3872 DROP_RETIRE | \
3873 DROP_ACTIVE | \
8eadc19b 3874 DROP_FREED | \
b4a0b32d 3875 DROP_SHRINK_ALL |\
6b048706
CW
3876 DROP_IDLE | \
3877 DROP_RESET_ACTIVE | \
3878 DROP_RESET_SEQNO)
647416f9
KC
3879static int
3880i915_drop_caches_get(void *data, u64 *val)
dd624afd 3881{
647416f9 3882 *val = DROP_ALL;
dd624afd 3883
647416f9 3884 return 0;
dd624afd
CW
3885}
3886
647416f9
KC
3887static int
3888i915_drop_caches_set(void *data, u64 val)
dd624afd 3889{
6b048706 3890 struct drm_i915_private *i915 = data;
dd624afd 3891
b4a0b32d
CW
3892 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3893 val, val & DROP_ALL);
dd624afd 3894
ad4062da
CW
3895 if (val & DROP_RESET_ACTIVE &&
3896 wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
6b048706
CW
3897 i915_gem_set_wedged(i915);
3898
dd624afd
CW
3899 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3900 * on ioctls on -EAGAIN. */
6b048706 3901 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
6cffeb83
CW
3902 int ret;
3903
6b048706 3904 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
dd624afd 3905 if (ret)
6cffeb83 3906 return ret;
dd624afd 3907
00c26cf9 3908 if (val & DROP_ACTIVE)
6b048706 3909 ret = i915_gem_wait_for_idle(i915,
00c26cf9 3910 I915_WAIT_INTERRUPTIBLE |
ec625fb9
CW
3911 I915_WAIT_LOCKED,
3912 MAX_SCHEDULE_TIMEOUT);
00c26cf9
CW
3913
3914 if (val & DROP_RETIRE)
6b048706 3915 i915_retire_requests(i915);
00c26cf9 3916
6b048706
CW
3917 mutex_unlock(&i915->drm.struct_mutex);
3918 }
3919
c41166f9 3920 if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
6b048706 3921 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
dd624afd 3922
d92a8cfc 3923 fs_reclaim_acquire(GFP_KERNEL);
21ab4e74 3924 if (val & DROP_BOUND)
6b048706 3925 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4ad72b7f 3926
21ab4e74 3927 if (val & DROP_UNBOUND)
6b048706 3928 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
dd624afd 3929
8eadc19b 3930 if (val & DROP_SHRINK_ALL)
6b048706 3931 i915_gem_shrink_all(i915);
d92a8cfc 3932 fs_reclaim_release(GFP_KERNEL);
8eadc19b 3933
4dfacb0b
CW
3934 if (val & DROP_IDLE) {
3935 do {
79ffac85 3936 flush_delayed_work(&i915->gem.retire_work);
23c3c3d0 3937 drain_delayed_work(&i915->gem.idle_work);
6b048706 3938 } while (READ_ONCE(i915->gt.awake));
4dfacb0b 3939 }
b4a0b32d 3940
c9c70471 3941 if (val & DROP_FREED)
6b048706 3942 i915_gem_drain_freed_objects(i915);
fbbd37b3 3943
6cffeb83 3944 return 0;
dd624afd
CW
3945}
3946
647416f9
KC
3947DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3948 i915_drop_caches_get, i915_drop_caches_set,
3949 "0x%08llx\n");
dd624afd 3950
647416f9
KC
3951static int
3952i915_cache_sharing_get(void *data, u64 *val)
07b7ddd9 3953{
36cdd013 3954 struct drm_i915_private *dev_priv = data;
a037121c 3955 intel_wakeref_t wakeref;
d4225a53 3956 u32 snpcr = 0;
07b7ddd9 3957
f3ce44a0 3958 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
004777cb
DV
3959 return -ENODEV;
3960
d4225a53
CW
3961 with_intel_runtime_pm(dev_priv, wakeref)
3962 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
07b7ddd9 3963
647416f9 3964 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
07b7ddd9 3965
647416f9 3966 return 0;
07b7ddd9
JB
3967}
3968
647416f9
KC
3969static int
3970i915_cache_sharing_set(void *data, u64 val)
07b7ddd9 3971{
36cdd013 3972 struct drm_i915_private *dev_priv = data;
a037121c 3973 intel_wakeref_t wakeref;
07b7ddd9 3974
f3ce44a0 3975 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
004777cb
DV
3976 return -ENODEV;
3977
647416f9 3978 if (val > 3)
07b7ddd9
JB
3979 return -EINVAL;
3980
647416f9 3981 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
d4225a53
CW
3982 with_intel_runtime_pm(dev_priv, wakeref) {
3983 u32 snpcr;
3984
3985 /* Update the cache sharing policy here as well */
3986 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3987 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3988 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3989 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3990 }
07b7ddd9 3991
647416f9 3992 return 0;
07b7ddd9
JB
3993}
3994
647416f9
KC
3995DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3996 i915_cache_sharing_get, i915_cache_sharing_set,
3997 "%llu\n");
07b7ddd9 3998
36cdd013 3999static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4000 struct sseu_dev_info *sseu)
5d39525a 4001{
7aa0b14e
CW
4002#define SS_MAX 2
4003 const int ss_max = SS_MAX;
4004 u32 sig1[SS_MAX], sig2[SS_MAX];
5d39525a 4005 int ss;
5d39525a
JM
4006
4007 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4008 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4009 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4010 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4011
4012 for (ss = 0; ss < ss_max; ss++) {
4013 unsigned int eu_cnt;
4014
4015 if (sig1[ss] & CHV_SS_PG_ENABLE)
4016 /* skip disabled subslice */
4017 continue;
4018
f08a0c92 4019 sseu->slice_mask = BIT(0);
8cc76693 4020 sseu->subslice_mask[0] |= BIT(ss);
5d39525a
JM
4021 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4022 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4023 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4024 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
915490d5
ID
4025 sseu->eu_total += eu_cnt;
4026 sseu->eu_per_subslice = max_t(unsigned int,
4027 sseu->eu_per_subslice, eu_cnt);
5d39525a 4028 }
7aa0b14e 4029#undef SS_MAX
5d39525a
JM
4030}
4031
f8c3dcf9
RV
4032static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4033 struct sseu_dev_info *sseu)
4034{
c7fb3c6c 4035#define SS_MAX 6
0258404f 4036 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
c7fb3c6c 4037 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
f8c3dcf9 4038 int s, ss;
f8c3dcf9 4039
b3e7f866 4040 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4041 /*
4042 * FIXME: Valid SS Mask respects the spec and read
3c64ea8c 4043 * only valid bits for those registers, excluding reserved
f8c3dcf9
RV
4044 * although this seems wrong because it would leave many
4045 * subslices without ACK.
4046 */
4047 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4048 GEN10_PGCTL_VALID_SS_MASK(s);
4049 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4050 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4051 }
4052
4053 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4054 GEN9_PGCTL_SSA_EU19_ACK |
4055 GEN9_PGCTL_SSA_EU210_ACK |
4056 GEN9_PGCTL_SSA_EU311_ACK;
4057 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4058 GEN9_PGCTL_SSB_EU19_ACK |
4059 GEN9_PGCTL_SSB_EU210_ACK |
4060 GEN9_PGCTL_SSB_EU311_ACK;
4061
b3e7f866 4062 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4063 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4064 /* skip disabled slice */
4065 continue;
4066
4067 sseu->slice_mask |= BIT(s);
8cc76693 4068 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
f8c3dcf9 4069
b3e7f866 4070 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
f8c3dcf9
RV
4071 unsigned int eu_cnt;
4072
4073 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4074 /* skip disabled subslice */
4075 continue;
4076
4077 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4078 eu_mask[ss % 2]);
4079 sseu->eu_total += eu_cnt;
4080 sseu->eu_per_subslice = max_t(unsigned int,
4081 sseu->eu_per_subslice,
4082 eu_cnt);
4083 }
4084 }
c7fb3c6c 4085#undef SS_MAX
f8c3dcf9
RV
4086}
4087
36cdd013 4088static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4089 struct sseu_dev_info *sseu)
5d39525a 4090{
c7fb3c6c 4091#define SS_MAX 3
0258404f 4092 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
c7fb3c6c 4093 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
5d39525a 4094 int s, ss;
1c046bc1 4095
b3e7f866 4096 for (s = 0; s < info->sseu.max_slices; s++) {
1c046bc1
JM
4097 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4098 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4099 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4100 }
4101
5d39525a
JM
4102 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4103 GEN9_PGCTL_SSA_EU19_ACK |
4104 GEN9_PGCTL_SSA_EU210_ACK |
4105 GEN9_PGCTL_SSA_EU311_ACK;
4106 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4107 GEN9_PGCTL_SSB_EU19_ACK |
4108 GEN9_PGCTL_SSB_EU210_ACK |
4109 GEN9_PGCTL_SSB_EU311_ACK;
4110
b3e7f866 4111 for (s = 0; s < info->sseu.max_slices; s++) {
5d39525a
JM
4112 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4113 /* skip disabled slice */
4114 continue;
4115
f08a0c92 4116 sseu->slice_mask |= BIT(s);
1c046bc1 4117
f8c3dcf9 4118 if (IS_GEN9_BC(dev_priv))
8cc76693 4119 sseu->subslice_mask[s] =
0258404f 4120 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
1c046bc1 4121
b3e7f866 4122 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
5d39525a
JM
4123 unsigned int eu_cnt;
4124
cc3f90f0 4125 if (IS_GEN9_LP(dev_priv)) {
57ec171e
ID
4126 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4127 /* skip disabled subslice */
4128 continue;
1c046bc1 4129
8cc76693 4130 sseu->subslice_mask[s] |= BIT(ss);
57ec171e 4131 }
1c046bc1 4132
5d39525a
JM
4133 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4134 eu_mask[ss%2]);
915490d5
ID
4135 sseu->eu_total += eu_cnt;
4136 sseu->eu_per_subslice = max_t(unsigned int,
4137 sseu->eu_per_subslice,
4138 eu_cnt);
5d39525a
JM
4139 }
4140 }
c7fb3c6c 4141#undef SS_MAX
5d39525a
JM
4142}
4143
36cdd013 4144static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4145 struct sseu_dev_info *sseu)
91bedd34 4146{
91bedd34 4147 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
36cdd013 4148 int s;
91bedd34 4149
f08a0c92 4150 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
91bedd34 4151
f08a0c92 4152 if (sseu->slice_mask) {
43b67998 4153 sseu->eu_per_subslice =
0258404f 4154 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
8cc76693
LL
4155 for (s = 0; s < fls(sseu->slice_mask); s++) {
4156 sseu->subslice_mask[s] =
0258404f 4157 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
8cc76693 4158 }
57ec171e
ID
4159 sseu->eu_total = sseu->eu_per_subslice *
4160 sseu_subslice_total(sseu);
91bedd34
ŁD
4161
4162 /* subtract fused off EU(s) from enabled slice(s) */
795b38b3 4163 for (s = 0; s < fls(sseu->slice_mask); s++) {
43b67998 4164 u8 subslice_7eu =
0258404f 4165 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
91bedd34 4166
915490d5 4167 sseu->eu_total -= hweight8(subslice_7eu);
91bedd34
ŁD
4168 }
4169 }
4170}
4171
615d8908
ID
4172static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4173 const struct sseu_dev_info *sseu)
4174{
4175 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4176 const char *type = is_available_info ? "Available" : "Enabled";
8cc76693 4177 int s;
615d8908 4178
c67ba538
ID
4179 seq_printf(m, " %s Slice Mask: %04x\n", type,
4180 sseu->slice_mask);
615d8908 4181 seq_printf(m, " %s Slice Total: %u\n", type,
f08a0c92 4182 hweight8(sseu->slice_mask));
615d8908 4183 seq_printf(m, " %s Subslice Total: %u\n", type,
57ec171e 4184 sseu_subslice_total(sseu));
8cc76693
LL
4185 for (s = 0; s < fls(sseu->slice_mask); s++) {
4186 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4187 s, hweight8(sseu->subslice_mask[s]));
4188 }
615d8908
ID
4189 seq_printf(m, " %s EU Total: %u\n", type,
4190 sseu->eu_total);
4191 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4192 sseu->eu_per_subslice);
4193
4194 if (!is_available_info)
4195 return;
4196
4197 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4198 if (HAS_POOLED_EU(dev_priv))
4199 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4200
4201 seq_printf(m, " Has Slice Power Gating: %s\n",
4202 yesno(sseu->has_slice_pg));
4203 seq_printf(m, " Has Subslice Power Gating: %s\n",
4204 yesno(sseu->has_subslice_pg));
4205 seq_printf(m, " Has EU Power Gating: %s\n",
4206 yesno(sseu->has_eu_pg));
4207}
4208
3873218f
JM
4209static int i915_sseu_status(struct seq_file *m, void *unused)
4210{
36cdd013 4211 struct drm_i915_private *dev_priv = node_to_i915(m->private);
915490d5 4212 struct sseu_dev_info sseu;
a037121c 4213 intel_wakeref_t wakeref;
3873218f 4214
36cdd013 4215 if (INTEL_GEN(dev_priv) < 8)
3873218f
JM
4216 return -ENODEV;
4217
4218 seq_puts(m, "SSEU Device Info\n");
0258404f 4219 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
3873218f 4220
7f992aba 4221 seq_puts(m, "SSEU Device Status\n");
915490d5 4222 memset(&sseu, 0, sizeof(sseu));
0258404f
JN
4223 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4224 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
8cc76693 4225 sseu.max_eus_per_subslice =
0258404f 4226 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
238010ed 4227
d4225a53
CW
4228 with_intel_runtime_pm(dev_priv, wakeref) {
4229 if (IS_CHERRYVIEW(dev_priv))
4230 cherryview_sseu_device_status(dev_priv, &sseu);
4231 else if (IS_BROADWELL(dev_priv))
4232 broadwell_sseu_device_status(dev_priv, &sseu);
4233 else if (IS_GEN(dev_priv, 9))
4234 gen9_sseu_device_status(dev_priv, &sseu);
4235 else if (INTEL_GEN(dev_priv) >= 10)
4236 gen10_sseu_device_status(dev_priv, &sseu);
7f992aba 4237 }
238010ed 4238
615d8908 4239 i915_print_sseu_info(m, false, &sseu);
7f992aba 4240
3873218f
JM
4241 return 0;
4242}
4243
6d794d42
BW
4244static int i915_forcewake_open(struct inode *inode, struct file *file)
4245{
d7a133d8 4246 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4247
d7a133d8 4248 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4249 return 0;
4250
6ddbb12e 4251 file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
3ceea6a1 4252 intel_uncore_forcewake_user_get(&i915->uncore);
6d794d42
BW
4253
4254 return 0;
4255}
4256
c43b5634 4257static int i915_forcewake_release(struct inode *inode, struct file *file)
6d794d42 4258{
d7a133d8 4259 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4260
d7a133d8 4261 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4262 return 0;
4263
3ceea6a1 4264 intel_uncore_forcewake_user_put(&i915->uncore);
6ddbb12e
TU
4265 intel_runtime_pm_put(i915,
4266 (intel_wakeref_t)(uintptr_t)file->private_data);
6d794d42
BW
4267
4268 return 0;
4269}
4270
4271static const struct file_operations i915_forcewake_fops = {
4272 .owner = THIS_MODULE,
4273 .open = i915_forcewake_open,
4274 .release = i915_forcewake_release,
4275};
4276
317eaa95
L
4277static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4278{
4279 struct drm_i915_private *dev_priv = m->private;
4280 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4281
6fc5d789
LP
4282 /* Synchronize with everything first in case there's been an HPD
4283 * storm, but we haven't finished handling it in the kernel yet
4284 */
4285 synchronize_irq(dev_priv->drm.irq);
4286 flush_work(&dev_priv->hotplug.dig_port_work);
4287 flush_work(&dev_priv->hotplug.hotplug_work);
4288
317eaa95
L
4289 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4290 seq_printf(m, "Detected: %s\n",
4291 yesno(delayed_work_pending(&hotplug->reenable_work)));
4292
4293 return 0;
4294}
4295
4296static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4297 const char __user *ubuf, size_t len,
4298 loff_t *offp)
4299{
4300 struct seq_file *m = file->private_data;
4301 struct drm_i915_private *dev_priv = m->private;
4302 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4303 unsigned int new_threshold;
4304 int i;
4305 char *newline;
4306 char tmp[16];
4307
4308 if (len >= sizeof(tmp))
4309 return -EINVAL;
4310
4311 if (copy_from_user(tmp, ubuf, len))
4312 return -EFAULT;
4313
4314 tmp[len] = '\0';
4315
4316 /* Strip newline, if any */
4317 newline = strchr(tmp, '\n');
4318 if (newline)
4319 *newline = '\0';
4320
4321 if (strcmp(tmp, "reset") == 0)
4322 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4323 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4324 return -EINVAL;
4325
4326 if (new_threshold > 0)
4327 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4328 new_threshold);
4329 else
4330 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4331
4332 spin_lock_irq(&dev_priv->irq_lock);
4333 hotplug->hpd_storm_threshold = new_threshold;
4334 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4335 for_each_hpd_pin(i)
4336 hotplug->stats[i].count = 0;
4337 spin_unlock_irq(&dev_priv->irq_lock);
4338
4339 /* Re-enable hpd immediately if we were in an irq storm */
4340 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4341
4342 return len;
4343}
4344
4345static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4346{
4347 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4348}
4349
4350static const struct file_operations i915_hpd_storm_ctl_fops = {
4351 .owner = THIS_MODULE,
4352 .open = i915_hpd_storm_ctl_open,
4353 .read = seq_read,
4354 .llseek = seq_lseek,
4355 .release = single_release,
4356 .write = i915_hpd_storm_ctl_write
4357};
4358
9a64c650
LP
4359static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4360{
4361 struct drm_i915_private *dev_priv = m->private;
4362
4363 seq_printf(m, "Enabled: %s\n",
4364 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4365
4366 return 0;
4367}
4368
4369static int
4370i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4371{
4372 return single_open(file, i915_hpd_short_storm_ctl_show,
4373 inode->i_private);
4374}
4375
4376static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4377 const char __user *ubuf,
4378 size_t len, loff_t *offp)
4379{
4380 struct seq_file *m = file->private_data;
4381 struct drm_i915_private *dev_priv = m->private;
4382 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4383 char *newline;
4384 char tmp[16];
4385 int i;
4386 bool new_state;
4387
4388 if (len >= sizeof(tmp))
4389 return -EINVAL;
4390
4391 if (copy_from_user(tmp, ubuf, len))
4392 return -EFAULT;
4393
4394 tmp[len] = '\0';
4395
4396 /* Strip newline, if any */
4397 newline = strchr(tmp, '\n');
4398 if (newline)
4399 *newline = '\0';
4400
4401 /* Reset to the "default" state for this system */
4402 if (strcmp(tmp, "reset") == 0)
4403 new_state = !HAS_DP_MST(dev_priv);
4404 else if (kstrtobool(tmp, &new_state) != 0)
4405 return -EINVAL;
4406
4407 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4408 new_state ? "En" : "Dis");
4409
4410 spin_lock_irq(&dev_priv->irq_lock);
4411 hotplug->hpd_short_storm_enabled = new_state;
4412 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4413 for_each_hpd_pin(i)
4414 hotplug->stats[i].count = 0;
4415 spin_unlock_irq(&dev_priv->irq_lock);
4416
4417 /* Re-enable hpd immediately if we were in an irq storm */
4418 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4419
4420 return len;
4421}
4422
4423static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4424 .owner = THIS_MODULE,
4425 .open = i915_hpd_short_storm_ctl_open,
4426 .read = seq_read,
4427 .llseek = seq_lseek,
4428 .release = single_release,
4429 .write = i915_hpd_short_storm_ctl_write,
4430};
4431
35954e88
R
4432static int i915_drrs_ctl_set(void *data, u64 val)
4433{
4434 struct drm_i915_private *dev_priv = data;
4435 struct drm_device *dev = &dev_priv->drm;
138bdac8 4436 struct intel_crtc *crtc;
35954e88
R
4437
4438 if (INTEL_GEN(dev_priv) < 7)
4439 return -ENODEV;
4440
138bdac8
ML
4441 for_each_intel_crtc(dev, crtc) {
4442 struct drm_connector_list_iter conn_iter;
4443 struct intel_crtc_state *crtc_state;
4444 struct drm_connector *connector;
4445 struct drm_crtc_commit *commit;
4446 int ret;
4447
4448 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4449 if (ret)
4450 return ret;
4451
4452 crtc_state = to_intel_crtc_state(crtc->base.state);
4453
4454 if (!crtc_state->base.active ||
4455 !crtc_state->has_drrs)
4456 goto out;
35954e88 4457
138bdac8
ML
4458 commit = crtc_state->base.commit;
4459 if (commit) {
4460 ret = wait_for_completion_interruptible(&commit->hw_done);
4461 if (ret)
4462 goto out;
4463 }
4464
4465 drm_connector_list_iter_begin(dev, &conn_iter);
4466 drm_for_each_connector_iter(connector, &conn_iter) {
4467 struct intel_encoder *encoder;
4468 struct intel_dp *intel_dp;
4469
4470 if (!(crtc_state->base.connector_mask &
4471 drm_connector_mask(connector)))
4472 continue;
4473
4474 encoder = intel_attached_encoder(connector);
35954e88
R
4475 if (encoder->type != INTEL_OUTPUT_EDP)
4476 continue;
4477
4478 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4479 val ? "en" : "dis", val);
4480
4481 intel_dp = enc_to_intel_dp(&encoder->base);
4482 if (val)
4483 intel_edp_drrs_enable(intel_dp,
138bdac8 4484 crtc_state);
35954e88
R
4485 else
4486 intel_edp_drrs_disable(intel_dp,
138bdac8 4487 crtc_state);
35954e88 4488 }
138bdac8
ML
4489 drm_connector_list_iter_end(&conn_iter);
4490
4491out:
4492 drm_modeset_unlock(&crtc->base.mutex);
4493 if (ret)
4494 return ret;
35954e88 4495 }
35954e88
R
4496
4497 return 0;
4498}
4499
4500DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4501
d52ad9cb
ML
4502static ssize_t
4503i915_fifo_underrun_reset_write(struct file *filp,
4504 const char __user *ubuf,
4505 size_t cnt, loff_t *ppos)
4506{
4507 struct drm_i915_private *dev_priv = filp->private_data;
4508 struct intel_crtc *intel_crtc;
4509 struct drm_device *dev = &dev_priv->drm;
4510 int ret;
4511 bool reset;
4512
4513 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4514 if (ret)
4515 return ret;
4516
4517 if (!reset)
4518 return cnt;
4519
4520 for_each_intel_crtc(dev, intel_crtc) {
4521 struct drm_crtc_commit *commit;
4522 struct intel_crtc_state *crtc_state;
4523
4524 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4525 if (ret)
4526 return ret;
4527
4528 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4529 commit = crtc_state->base.commit;
4530 if (commit) {
4531 ret = wait_for_completion_interruptible(&commit->hw_done);
4532 if (!ret)
4533 ret = wait_for_completion_interruptible(&commit->flip_done);
4534 }
4535
4536 if (!ret && crtc_state->base.active) {
4537 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4538 pipe_name(intel_crtc->pipe));
4539
4540 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4541 }
4542
4543 drm_modeset_unlock(&intel_crtc->base.mutex);
4544
4545 if (ret)
4546 return ret;
4547 }
4548
4549 ret = intel_fbc_reset_underrun(dev_priv);
4550 if (ret)
4551 return ret;
4552
4553 return cnt;
4554}
4555
4556static const struct file_operations i915_fifo_underrun_reset_ops = {
4557 .owner = THIS_MODULE,
4558 .open = simple_open,
4559 .write = i915_fifo_underrun_reset_write,
4560 .llseek = default_llseek,
4561};
4562
06c5bf8c 4563static const struct drm_info_list i915_debugfs_list[] = {
311bd68e 4564 {"i915_capabilities", i915_capabilities, 0},
73aa808f 4565 {"i915_gem_objects", i915_gem_object_info, 0},
08c18323 4566 {"i915_gem_gtt", i915_gem_gtt_info, 0},
6d2b8885 4567 {"i915_gem_stolen", i915_gem_stolen_list_info },
a6172a80 4568 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2017263e 4569 {"i915_gem_interrupt", i915_interrupt_info, 0},
493018dc 4570 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
8b417c26 4571 {"i915_guc_info", i915_guc_info, 0},
fdf5d357 4572 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4c7e77fc 4573 {"i915_guc_log_dump", i915_guc_log_dump, 0},
ac58d2ab 4574 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
a8b9370f 4575 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
0509ead1 4576 {"i915_huc_load_status", i915_huc_load_status_info, 0},
adb4bd12 4577 {"i915_frequency_info", i915_frequency_info, 0},
f654449a 4578 {"i915_hangcheck_info", i915_hangcheck_info, 0},
061d06a2 4579 {"i915_reset_info", i915_reset_info, 0},
f97108d1 4580 {"i915_drpc_info", i915_drpc_info, 0},
7648fa99 4581 {"i915_emon_status", i915_emon_status, 0},
23b2f8bb 4582 {"i915_ring_freq_table", i915_ring_freq_table, 0},
9a851789 4583 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
b5e50c3f 4584 {"i915_fbc_status", i915_fbc_status, 0},
92d44621 4585 {"i915_ips_status", i915_ips_status, 0},
4a9bef37 4586 {"i915_sr_status", i915_sr_status, 0},
44834a67 4587 {"i915_opregion", i915_opregion, 0},
ada8f955 4588 {"i915_vbt", i915_vbt, 0},
37811fcc 4589 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
e76d3630 4590 {"i915_context_status", i915_context_status, 0},
f65367b5 4591 {"i915_forcewake_domains", i915_forcewake_domains, 0},
ea16a3cd 4592 {"i915_swizzle_info", i915_swizzle_info, 0},
63573eb7 4593 {"i915_llc", i915_llc, 0},
e91fd8c6 4594 {"i915_edp_psr_status", i915_edp_psr_status, 0},
ec013e7f 4595 {"i915_energy_uJ", i915_energy_uJ, 0},
6455c870 4596 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
1da51581 4597 {"i915_power_domain_info", i915_power_domain_info, 0},
b7cec66d 4598 {"i915_dmc_info", i915_dmc_info, 0},
53f5e3ca 4599 {"i915_display_info", i915_display_info, 0},
1b36595f 4600 {"i915_engine_info", i915_engine_info, 0},
79e9cd5f 4601 {"i915_rcs_topology", i915_rcs_topology, 0},
c5418a8b 4602 {"i915_shrinker_info", i915_shrinker_info, 0},
728e29d7 4603 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
11bed958 4604 {"i915_dp_mst_info", i915_dp_mst_info, 0},
1ed1ef9d 4605 {"i915_wa_registers", i915_wa_registers, 0},
c5511e44 4606 {"i915_ddb_info", i915_ddb_info, 0},
3873218f 4607 {"i915_sseu_status", i915_sseu_status, 0},
a54746e3 4608 {"i915_drrs_status", i915_drrs_status, 0},
1854d5ca 4609 {"i915_rps_boost_info", i915_rps_boost_info, 0},
2017263e 4610};
27c202ad 4611#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2017263e 4612
06c5bf8c 4613static const struct i915_debugfs_files {
34b9674c
DV
4614 const char *name;
4615 const struct file_operations *fops;
4616} i915_debugfs_files[] = {
4617 {"i915_wedged", &i915_wedged_fops},
34b9674c 4618 {"i915_cache_sharing", &i915_cache_sharing_fops},
34b9674c 4619 {"i915_gem_drop_caches", &i915_drop_caches_fops},
98a2f411 4620#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
34b9674c 4621 {"i915_error_state", &i915_error_state_fops},
5a4c6f1b 4622 {"i915_gpu_info", &i915_gpu_info_fops},
98a2f411 4623#endif
d52ad9cb 4624 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
369a1342
VS
4625 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4626 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4627 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4127dc43 4628 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
eb3394fa
TP
4629 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4630 {"i915_dp_test_type", &i915_displayport_test_type_fops},
685534ef 4631 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4977a287
MW
4632 {"i915_guc_log_level", &i915_guc_log_level_fops},
4633 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
d2d4f39b 4634 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
9a64c650 4635 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
35954e88 4636 {"i915_ipc_status", &i915_ipc_status_fops},
54fd3149
DP
4637 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4638 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
34b9674c
DV
4639};
4640
1dac891c 4641int i915_debugfs_register(struct drm_i915_private *dev_priv)
2017263e 4642{
91c8a326 4643 struct drm_minor *minor = dev_priv->drm.primary;
b05eeb0f 4644 struct dentry *ent;
6cc42152 4645 int i;
f3cd474b 4646
b05eeb0f
NT
4647 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4648 minor->debugfs_root, to_i915(minor->dev),
4649 &i915_forcewake_fops);
4650 if (!ent)
4651 return -ENOMEM;
6a9c308d 4652
34b9674c 4653 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
b05eeb0f
NT
4654 ent = debugfs_create_file(i915_debugfs_files[i].name,
4655 S_IRUGO | S_IWUSR,
4656 minor->debugfs_root,
4657 to_i915(minor->dev),
34b9674c 4658 i915_debugfs_files[i].fops);
b05eeb0f
NT
4659 if (!ent)
4660 return -ENOMEM;
34b9674c 4661 }
40633219 4662
27c202ad
BG
4663 return drm_debugfs_create_files(i915_debugfs_list,
4664 I915_DEBUGFS_ENTRIES,
2017263e
BG
4665 minor->debugfs_root, minor);
4666}
4667
aa7471d2
JN
4668struct dpcd_block {
4669 /* DPCD dump start address. */
4670 unsigned int offset;
4671 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4672 unsigned int end;
4673 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4674 size_t size;
4675 /* Only valid for eDP. */
4676 bool edp;
4677};
4678
4679static const struct dpcd_block i915_dpcd_debug[] = {
4680 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4681 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4682 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4683 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4684 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4685 { .offset = DP_SET_POWER },
4686 { .offset = DP_EDP_DPCD_REV },
4687 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4688 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4689 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4690};
4691
4692static int i915_dpcd_show(struct seq_file *m, void *data)
4693{
4694 struct drm_connector *connector = m->private;
4695 struct intel_dp *intel_dp =
4696 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
e5315213 4697 u8 buf[16];
aa7471d2
JN
4698 ssize_t err;
4699 int i;
4700
5c1a8875
MK
4701 if (connector->status != connector_status_connected)
4702 return -ENODEV;
4703
aa7471d2
JN
4704 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4705 const struct dpcd_block *b = &i915_dpcd_debug[i];
4706 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4707
4708 if (b->edp &&
4709 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4710 continue;
4711
4712 /* low tech for now */
4713 if (WARN_ON(size > sizeof(buf)))
4714 continue;
4715
4716 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
65404c89
CW
4717 if (err < 0)
4718 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4719 else
4720 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
b3f9d7d7 4721 }
aa7471d2
JN
4722
4723 return 0;
4724}
e4006713 4725DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
aa7471d2 4726
ecbd6781
DW
4727static int i915_panel_show(struct seq_file *m, void *data)
4728{
4729 struct drm_connector *connector = m->private;
4730 struct intel_dp *intel_dp =
4731 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4732
4733 if (connector->status != connector_status_connected)
4734 return -ENODEV;
4735
4736 seq_printf(m, "Panel power up delay: %d\n",
4737 intel_dp->panel_power_up_delay);
4738 seq_printf(m, "Panel power down delay: %d\n",
4739 intel_dp->panel_power_down_delay);
4740 seq_printf(m, "Backlight on delay: %d\n",
4741 intel_dp->backlight_on_delay);
4742 seq_printf(m, "Backlight off delay: %d\n",
4743 intel_dp->backlight_off_delay);
4744
4745 return 0;
4746}
e4006713 4747DEFINE_SHOW_ATTRIBUTE(i915_panel);
ecbd6781 4748
bdc93fe0
R
4749static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4750{
4751 struct drm_connector *connector = m->private;
4752 struct intel_connector *intel_connector = to_intel_connector(connector);
4753
4754 if (connector->status != connector_status_connected)
4755 return -ENODEV;
4756
4757 /* HDCP is supported by connector */
d3dacc70 4758 if (!intel_connector->hdcp.shim)
bdc93fe0
R
4759 return -EINVAL;
4760
4761 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4762 connector->base.id);
4763 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4764 "None" : "HDCP1.4");
4765 seq_puts(m, "\n");
4766
4767 return 0;
4768}
4769DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4770
e845f099
MN
4771static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4772{
4773 struct drm_connector *connector = m->private;
4774 struct drm_device *dev = connector->dev;
4775 struct drm_crtc *crtc;
4776 struct intel_dp *intel_dp;
4777 struct drm_modeset_acquire_ctx ctx;
4778 struct intel_crtc_state *crtc_state = NULL;
4779 int ret = 0;
4780 bool try_again = false;
4781
4782 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4783
4784 do {
6afe8925 4785 try_again = false;
e845f099
MN
4786 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4787 &ctx);
4788 if (ret) {
ee6df569
CW
4789 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4790 try_again = true;
4791 continue;
4792 }
e845f099
MN
4793 break;
4794 }
4795 crtc = connector->state->crtc;
4796 if (connector->status != connector_status_connected || !crtc) {
4797 ret = -ENODEV;
4798 break;
4799 }
4800 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4801 if (ret == -EDEADLK) {
4802 ret = drm_modeset_backoff(&ctx);
4803 if (!ret) {
4804 try_again = true;
4805 continue;
4806 }
4807 break;
4808 } else if (ret) {
4809 break;
4810 }
4811 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4812 crtc_state = to_intel_crtc_state(crtc->state);
4813 seq_printf(m, "DSC_Enabled: %s\n",
4814 yesno(crtc_state->dsc_params.compression_enable));
fed85691
RS
4815 seq_printf(m, "DSC_Sink_Support: %s\n",
4816 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
feb8846b
MN
4817 seq_printf(m, "Force_DSC_Enable: %s\n",
4818 yesno(intel_dp->force_dsc_en));
e845f099
MN
4819 if (!intel_dp_is_edp(intel_dp))
4820 seq_printf(m, "FEC_Sink_Support: %s\n",
4821 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4822 } while (try_again);
4823
4824 drm_modeset_drop_locks(&ctx);
4825 drm_modeset_acquire_fini(&ctx);
4826
4827 return ret;
4828}
4829
4830static ssize_t i915_dsc_fec_support_write(struct file *file,
4831 const char __user *ubuf,
4832 size_t len, loff_t *offp)
4833{
4834 bool dsc_enable = false;
4835 int ret;
4836 struct drm_connector *connector =
4837 ((struct seq_file *)file->private_data)->private;
4838 struct intel_encoder *encoder = intel_attached_encoder(connector);
4839 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4840
4841 if (len == 0)
4842 return 0;
4843
4844 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4845 len);
4846
4847 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4848 if (ret < 0)
4849 return ret;
4850
4851 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4852 (dsc_enable) ? "true" : "false");
4853 intel_dp->force_dsc_en = dsc_enable;
4854
4855 *offp += len;
4856 return len;
4857}
4858
4859static int i915_dsc_fec_support_open(struct inode *inode,
4860 struct file *file)
4861{
4862 return single_open(file, i915_dsc_fec_support_show,
4863 inode->i_private);
4864}
4865
4866static const struct file_operations i915_dsc_fec_support_fops = {
4867 .owner = THIS_MODULE,
4868 .open = i915_dsc_fec_support_open,
4869 .read = seq_read,
4870 .llseek = seq_lseek,
4871 .release = single_release,
4872 .write = i915_dsc_fec_support_write
4873};
4874
aa7471d2
JN
4875/**
4876 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4877 * @connector: pointer to a registered drm_connector
4878 *
4879 * Cleanup will be done by drm_connector_unregister() through a call to
4880 * drm_debugfs_connector_remove().
4881 *
4882 * Returns 0 on success, negative error codes on error.
4883 */
4884int i915_debugfs_connector_add(struct drm_connector *connector)
4885{
4886 struct dentry *root = connector->debugfs_entry;
e845f099 4887 struct drm_i915_private *dev_priv = to_i915(connector->dev);
aa7471d2
JN
4888
4889 /* The connector must have been registered beforehands. */
4890 if (!root)
4891 return -ENODEV;
4892
4893 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4894 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
ecbd6781
DW
4895 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4896 connector, &i915_dpcd_fops);
4897
5b7b3086 4898 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
ecbd6781
DW
4899 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4900 connector, &i915_panel_fops);
5b7b3086
DP
4901 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4902 connector, &i915_psr_sink_status_fops);
4903 }
aa7471d2 4904
bdc93fe0
R
4905 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4906 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4907 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4908 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4909 connector, &i915_hdcp_sink_capability_fops);
4910 }
4911
e845f099
MN
4912 if (INTEL_GEN(dev_priv) >= 10 &&
4913 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4914 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4915 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4916 connector, &i915_dsc_fec_support_fops);
4917
aa7471d2
JN
4918 return 0;
4919}