drm/i915: Perform GGTT restore much earlier during resume
[linux-2.6-block.git] / drivers / gpu / drm / i915 / selftests / i915_gem.c
CommitLineData
3f51b7e1
CW
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7#include <linux/random.h>
8
10be98a7
CW
9#include "gem/selftests/igt_gem_utils.h"
10#include "gem/selftests/mock_context.h"
cb823ed9 11#include "gt/intel_gt.h"
10be98a7
CW
12
13#include "i915_selftest.h"
3f51b7e1 14
3f51b7e1 15#include "igt_flush_test.h"
10be98a7 16#include "mock_drm.h"
3f51b7e1
CW
17
18static int switch_to_context(struct drm_i915_private *i915,
19 struct i915_gem_context *ctx)
20{
21 struct intel_engine_cs *engine;
22 enum intel_engine_id id;
3f51b7e1
CW
23
24 for_each_engine(engine, i915, id) {
25 struct i915_request *rq;
26
46472b3e 27 rq = igt_request_alloc(ctx, engine);
79ffac85
CW
28 if (IS_ERR(rq))
29 return PTR_ERR(rq);
3f51b7e1
CW
30
31 i915_request_add(rq);
32 }
33
79ffac85 34 return 0;
3f51b7e1
CW
35}
36
37static void trash_stolen(struct drm_i915_private *i915)
38{
39 struct i915_ggtt *ggtt = &i915->ggtt;
40 const u64 slot = ggtt->error_capture.start;
41 const resource_size_t size = resource_size(&i915->dsm);
42 unsigned long page;
43 u32 prng = 0x12345678;
44
45 for (page = 0; page < size; page += PAGE_SIZE) {
46 const dma_addr_t dma = i915->dsm.start + page;
47 u32 __iomem *s;
48 int x;
49
50 ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
51
52 s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
53 for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
54 prng = next_pseudo_random32(prng);
55 iowrite32(prng, &s[x]);
56 }
57 io_mapping_unmap_atomic(s);
58 }
59
60 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
61}
62
63static void simulate_hibernate(struct drm_i915_private *i915)
64{
c9d08cc3
CW
65 intel_wakeref_t wakeref;
66
d858d569 67 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
3f51b7e1
CW
68
69 /*
70 * As a final sting in the tail, invalidate stolen. Under a real S4,
71 * stolen is lost and needs to be refilled on resume. However, under
72 * CI we merely do S4-device testing (as full S4 is too unreliable
73 * for automated testing across a cluster), so to simulate the effect
74 * of stolen being trashed across S4, we trash it ourselves.
75 */
76 trash_stolen(i915);
77
d858d569 78 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
3f51b7e1
CW
79}
80
81static int pm_prepare(struct drm_i915_private *i915)
82{
5861b013 83 i915_gem_suspend(i915);
3f51b7e1 84
5861b013 85 return 0;
3f51b7e1
CW
86}
87
88static void pm_suspend(struct drm_i915_private *i915)
89{
c9d08cc3
CW
90 intel_wakeref_t wakeref;
91
c447ff7d 92 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
d4225a53
CW
93 i915_gem_suspend_gtt_mappings(i915);
94 i915_gem_suspend_late(i915);
95 }
3f51b7e1
CW
96}
97
98static void pm_hibernate(struct drm_i915_private *i915)
99{
c9d08cc3
CW
100 intel_wakeref_t wakeref;
101
c447ff7d 102 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
d4225a53 103 i915_gem_suspend_gtt_mappings(i915);
3f51b7e1 104
d4225a53
CW
105 i915_gem_freeze(i915);
106 i915_gem_freeze_late(i915);
107 }
3f51b7e1
CW
108}
109
110static void pm_resume(struct drm_i915_private *i915)
111{
c9d08cc3
CW
112 intel_wakeref_t wakeref;
113
3f51b7e1
CW
114 /*
115 * Both suspend and hibernate follow the same wakeup path and assume
116 * that runtime-pm just works.
117 */
c447ff7d 118 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
0c91621c 119 intel_gt_sanitize(&i915->gt, false);
d4225a53 120 i915_gem_sanitize(i915);
cec5ca08
CW
121
122 mutex_lock(&i915->drm.struct_mutex);
123 i915_gem_restore_gtt_mappings(i915);
124 i915_gem_restore_fences(i915);
125 mutex_unlock(&i915->drm.struct_mutex);
126
d4225a53
CW
127 i915_gem_resume(i915);
128 }
3f51b7e1
CW
129}
130
131static int igt_gem_suspend(void *arg)
132{
133 struct drm_i915_private *i915 = arg;
134 struct i915_gem_context *ctx;
135 struct drm_file *file;
136 int err;
137
138 file = mock_file(i915);
139 if (IS_ERR(file))
140 return PTR_ERR(file);
141
142 err = -ENOMEM;
143 mutex_lock(&i915->drm.struct_mutex);
144 ctx = live_context(i915, file);
145 if (!IS_ERR(ctx))
146 err = switch_to_context(i915, ctx);
147 mutex_unlock(&i915->drm.struct_mutex);
148 if (err)
149 goto out;
150
151 err = pm_prepare(i915);
152 if (err)
153 goto out;
154
155 pm_suspend(i915);
156
157 /* Here be dragons! Note that with S3RST any S3 may become S4! */
158 simulate_hibernate(i915);
159
160 pm_resume(i915);
161
162 mutex_lock(&i915->drm.struct_mutex);
163 err = switch_to_context(i915, ctx);
3f51b7e1
CW
164 mutex_unlock(&i915->drm.struct_mutex);
165out:
166 mock_file_free(i915, file);
167 return err;
168}
169
170static int igt_gem_hibernate(void *arg)
171{
172 struct drm_i915_private *i915 = arg;
173 struct i915_gem_context *ctx;
174 struct drm_file *file;
175 int err;
176
177 file = mock_file(i915);
178 if (IS_ERR(file))
179 return PTR_ERR(file);
180
181 err = -ENOMEM;
182 mutex_lock(&i915->drm.struct_mutex);
183 ctx = live_context(i915, file);
184 if (!IS_ERR(ctx))
185 err = switch_to_context(i915, ctx);
186 mutex_unlock(&i915->drm.struct_mutex);
187 if (err)
188 goto out;
189
190 err = pm_prepare(i915);
191 if (err)
192 goto out;
193
194 pm_hibernate(i915);
195
196 /* Here be dragons! */
197 simulate_hibernate(i915);
198
199 pm_resume(i915);
200
201 mutex_lock(&i915->drm.struct_mutex);
202 err = switch_to_context(i915, ctx);
3f51b7e1
CW
203 mutex_unlock(&i915->drm.struct_mutex);
204out:
205 mock_file_free(i915, file);
206 return err;
207}
208
209int i915_gem_live_selftests(struct drm_i915_private *i915)
210{
211 static const struct i915_subtest tests[] = {
212 SUBTEST(igt_gem_suspend),
213 SUBTEST(igt_gem_hibernate),
214 };
215
cb823ed9 216 if (intel_gt_is_wedged(&i915->gt))
1ab494cc
CW
217 return 0;
218
63251685 219 return i915_live_subtests(tests, i915);
3f51b7e1 220}