drm/i915/display: Handle lost primary_port across suspend
[linux-2.6-block.git] / drivers / gpu / drm / i915 / selftests / i915_gem.c
CommitLineData
3f51b7e1
CW
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7#include <linux/random.h>
8
10be98a7
CW
9#include "gem/selftests/igt_gem_utils.h"
10#include "gem/selftests/mock_context.h"
11
12#include "i915_selftest.h"
3f51b7e1 13
3f51b7e1 14#include "igt_flush_test.h"
10be98a7 15#include "mock_drm.h"
3f51b7e1
CW
16
17static int switch_to_context(struct drm_i915_private *i915,
18 struct i915_gem_context *ctx)
19{
20 struct intel_engine_cs *engine;
21 enum intel_engine_id id;
3f51b7e1
CW
22
23 for_each_engine(engine, i915, id) {
24 struct i915_request *rq;
25
46472b3e 26 rq = igt_request_alloc(ctx, engine);
79ffac85
CW
27 if (IS_ERR(rq))
28 return PTR_ERR(rq);
3f51b7e1
CW
29
30 i915_request_add(rq);
31 }
32
79ffac85 33 return 0;
3f51b7e1
CW
34}
35
36static void trash_stolen(struct drm_i915_private *i915)
37{
38 struct i915_ggtt *ggtt = &i915->ggtt;
39 const u64 slot = ggtt->error_capture.start;
40 const resource_size_t size = resource_size(&i915->dsm);
41 unsigned long page;
42 u32 prng = 0x12345678;
43
44 for (page = 0; page < size; page += PAGE_SIZE) {
45 const dma_addr_t dma = i915->dsm.start + page;
46 u32 __iomem *s;
47 int x;
48
49 ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
50
51 s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
52 for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
53 prng = next_pseudo_random32(prng);
54 iowrite32(prng, &s[x]);
55 }
56 io_mapping_unmap_atomic(s);
57 }
58
59 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
60}
61
62static void simulate_hibernate(struct drm_i915_private *i915)
63{
c9d08cc3
CW
64 intel_wakeref_t wakeref;
65
d858d569 66 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
3f51b7e1
CW
67
68 /*
69 * As a final sting in the tail, invalidate stolen. Under a real S4,
70 * stolen is lost and needs to be refilled on resume. However, under
71 * CI we merely do S4-device testing (as full S4 is too unreliable
72 * for automated testing across a cluster), so to simulate the effect
73 * of stolen being trashed across S4, we trash it ourselves.
74 */
75 trash_stolen(i915);
76
d858d569 77 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
3f51b7e1
CW
78}
79
80static int pm_prepare(struct drm_i915_private *i915)
81{
5861b013 82 i915_gem_suspend(i915);
3f51b7e1 83
5861b013 84 return 0;
3f51b7e1
CW
85}
86
87static void pm_suspend(struct drm_i915_private *i915)
88{
c9d08cc3
CW
89 intel_wakeref_t wakeref;
90
c447ff7d 91 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
d4225a53
CW
92 i915_gem_suspend_gtt_mappings(i915);
93 i915_gem_suspend_late(i915);
94 }
3f51b7e1
CW
95}
96
97static void pm_hibernate(struct drm_i915_private *i915)
98{
c9d08cc3
CW
99 intel_wakeref_t wakeref;
100
c447ff7d 101 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
d4225a53 102 i915_gem_suspend_gtt_mappings(i915);
3f51b7e1 103
d4225a53
CW
104 i915_gem_freeze(i915);
105 i915_gem_freeze_late(i915);
106 }
3f51b7e1
CW
107}
108
109static void pm_resume(struct drm_i915_private *i915)
110{
c9d08cc3
CW
111 intel_wakeref_t wakeref;
112
3f51b7e1
CW
113 /*
114 * Both suspend and hibernate follow the same wakeup path and assume
115 * that runtime-pm just works.
116 */
c447ff7d 117 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
0c91621c 118 intel_gt_sanitize(&i915->gt, false);
d4225a53
CW
119 i915_gem_sanitize(i915);
120 i915_gem_resume(i915);
121 }
3f51b7e1
CW
122}
123
124static int igt_gem_suspend(void *arg)
125{
126 struct drm_i915_private *i915 = arg;
127 struct i915_gem_context *ctx;
128 struct drm_file *file;
129 int err;
130
131 file = mock_file(i915);
132 if (IS_ERR(file))
133 return PTR_ERR(file);
134
135 err = -ENOMEM;
136 mutex_lock(&i915->drm.struct_mutex);
137 ctx = live_context(i915, file);
138 if (!IS_ERR(ctx))
139 err = switch_to_context(i915, ctx);
140 mutex_unlock(&i915->drm.struct_mutex);
141 if (err)
142 goto out;
143
144 err = pm_prepare(i915);
145 if (err)
146 goto out;
147
148 pm_suspend(i915);
149
150 /* Here be dragons! Note that with S3RST any S3 may become S4! */
151 simulate_hibernate(i915);
152
153 pm_resume(i915);
154
155 mutex_lock(&i915->drm.struct_mutex);
156 err = switch_to_context(i915, ctx);
157 if (igt_flush_test(i915, I915_WAIT_LOCKED))
158 err = -EIO;
159 mutex_unlock(&i915->drm.struct_mutex);
160out:
161 mock_file_free(i915, file);
162 return err;
163}
164
165static int igt_gem_hibernate(void *arg)
166{
167 struct drm_i915_private *i915 = arg;
168 struct i915_gem_context *ctx;
169 struct drm_file *file;
170 int err;
171
172 file = mock_file(i915);
173 if (IS_ERR(file))
174 return PTR_ERR(file);
175
176 err = -ENOMEM;
177 mutex_lock(&i915->drm.struct_mutex);
178 ctx = live_context(i915, file);
179 if (!IS_ERR(ctx))
180 err = switch_to_context(i915, ctx);
181 mutex_unlock(&i915->drm.struct_mutex);
182 if (err)
183 goto out;
184
185 err = pm_prepare(i915);
186 if (err)
187 goto out;
188
189 pm_hibernate(i915);
190
191 /* Here be dragons! */
192 simulate_hibernate(i915);
193
194 pm_resume(i915);
195
196 mutex_lock(&i915->drm.struct_mutex);
197 err = switch_to_context(i915, ctx);
198 if (igt_flush_test(i915, I915_WAIT_LOCKED))
199 err = -EIO;
200 mutex_unlock(&i915->drm.struct_mutex);
201out:
202 mock_file_free(i915, file);
203 return err;
204}
205
206int i915_gem_live_selftests(struct drm_i915_private *i915)
207{
208 static const struct i915_subtest tests[] = {
209 SUBTEST(igt_gem_suspend),
210 SUBTEST(igt_gem_hibernate),
211 };
212
1ab494cc
CW
213 if (i915_terminally_wedged(i915))
214 return 0;
215
3f51b7e1
CW
216 return i915_subtests(tests, i915);
217}