drm/i915: Teach intel_workarounds to use uncore mmio access
[linux-2.6-block.git] / drivers / gpu / drm / i915 / selftests / i915_gem.c
CommitLineData
3f51b7e1
CW
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7#include <linux/random.h>
8
9#include "../i915_selftest.h"
10
11#include "mock_context.h"
12#include "igt_flush_test.h"
13
14static int switch_to_context(struct drm_i915_private *i915,
15 struct i915_gem_context *ctx)
16{
17 struct intel_engine_cs *engine;
18 enum intel_engine_id id;
c9d08cc3 19 intel_wakeref_t wakeref;
3f51b7e1
CW
20 int err = 0;
21
c9d08cc3 22 wakeref = intel_runtime_pm_get(i915);
3f51b7e1
CW
23
24 for_each_engine(engine, i915, id) {
25 struct i915_request *rq;
26
27 rq = i915_request_alloc(engine, ctx);
28 if (IS_ERR(rq)) {
29 err = PTR_ERR(rq);
30 break;
31 }
32
33 i915_request_add(rq);
34 }
35
c9d08cc3 36 intel_runtime_pm_put(i915, wakeref);
3f51b7e1
CW
37
38 return err;
39}
40
41static void trash_stolen(struct drm_i915_private *i915)
42{
43 struct i915_ggtt *ggtt = &i915->ggtt;
44 const u64 slot = ggtt->error_capture.start;
45 const resource_size_t size = resource_size(&i915->dsm);
46 unsigned long page;
47 u32 prng = 0x12345678;
48
49 for (page = 0; page < size; page += PAGE_SIZE) {
50 const dma_addr_t dma = i915->dsm.start + page;
51 u32 __iomem *s;
52 int x;
53
54 ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
55
56 s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
57 for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
58 prng = next_pseudo_random32(prng);
59 iowrite32(prng, &s[x]);
60 }
61 io_mapping_unmap_atomic(s);
62 }
63
64 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
65}
66
67static void simulate_hibernate(struct drm_i915_private *i915)
68{
c9d08cc3
CW
69 intel_wakeref_t wakeref;
70
71 wakeref = intel_runtime_pm_get(i915);
3f51b7e1
CW
72
73 /*
74 * As a final sting in the tail, invalidate stolen. Under a real S4,
75 * stolen is lost and needs to be refilled on resume. However, under
76 * CI we merely do S4-device testing (as full S4 is too unreliable
77 * for automated testing across a cluster), so to simulate the effect
78 * of stolen being trashed across S4, we trash it ourselves.
79 */
80 trash_stolen(i915);
81
c9d08cc3 82 intel_runtime_pm_put(i915, wakeref);
3f51b7e1
CW
83}
84
85static int pm_prepare(struct drm_i915_private *i915)
86{
5861b013 87 i915_gem_suspend(i915);
3f51b7e1 88
5861b013 89 return 0;
3f51b7e1
CW
90}
91
92static void pm_suspend(struct drm_i915_private *i915)
93{
c9d08cc3
CW
94 intel_wakeref_t wakeref;
95
d4225a53
CW
96 with_intel_runtime_pm(i915, wakeref) {
97 i915_gem_suspend_gtt_mappings(i915);
98 i915_gem_suspend_late(i915);
99 }
3f51b7e1
CW
100}
101
102static void pm_hibernate(struct drm_i915_private *i915)
103{
c9d08cc3
CW
104 intel_wakeref_t wakeref;
105
d4225a53
CW
106 with_intel_runtime_pm(i915, wakeref) {
107 i915_gem_suspend_gtt_mappings(i915);
3f51b7e1 108
d4225a53
CW
109 i915_gem_freeze(i915);
110 i915_gem_freeze_late(i915);
111 }
3f51b7e1
CW
112}
113
114static void pm_resume(struct drm_i915_private *i915)
115{
c9d08cc3
CW
116 intel_wakeref_t wakeref;
117
3f51b7e1
CW
118 /*
119 * Both suspend and hibernate follow the same wakeup path and assume
120 * that runtime-pm just works.
121 */
d4225a53
CW
122 with_intel_runtime_pm(i915, wakeref) {
123 intel_engines_sanitize(i915, false);
124 i915_gem_sanitize(i915);
125 i915_gem_resume(i915);
126 }
3f51b7e1
CW
127}
128
129static int igt_gem_suspend(void *arg)
130{
131 struct drm_i915_private *i915 = arg;
132 struct i915_gem_context *ctx;
133 struct drm_file *file;
134 int err;
135
136 file = mock_file(i915);
137 if (IS_ERR(file))
138 return PTR_ERR(file);
139
140 err = -ENOMEM;
141 mutex_lock(&i915->drm.struct_mutex);
142 ctx = live_context(i915, file);
143 if (!IS_ERR(ctx))
144 err = switch_to_context(i915, ctx);
145 mutex_unlock(&i915->drm.struct_mutex);
146 if (err)
147 goto out;
148
149 err = pm_prepare(i915);
150 if (err)
151 goto out;
152
153 pm_suspend(i915);
154
155 /* Here be dragons! Note that with S3RST any S3 may become S4! */
156 simulate_hibernate(i915);
157
158 pm_resume(i915);
159
160 mutex_lock(&i915->drm.struct_mutex);
161 err = switch_to_context(i915, ctx);
162 if (igt_flush_test(i915, I915_WAIT_LOCKED))
163 err = -EIO;
164 mutex_unlock(&i915->drm.struct_mutex);
165out:
166 mock_file_free(i915, file);
167 return err;
168}
169
170static int igt_gem_hibernate(void *arg)
171{
172 struct drm_i915_private *i915 = arg;
173 struct i915_gem_context *ctx;
174 struct drm_file *file;
175 int err;
176
177 file = mock_file(i915);
178 if (IS_ERR(file))
179 return PTR_ERR(file);
180
181 err = -ENOMEM;
182 mutex_lock(&i915->drm.struct_mutex);
183 ctx = live_context(i915, file);
184 if (!IS_ERR(ctx))
185 err = switch_to_context(i915, ctx);
186 mutex_unlock(&i915->drm.struct_mutex);
187 if (err)
188 goto out;
189
190 err = pm_prepare(i915);
191 if (err)
192 goto out;
193
194 pm_hibernate(i915);
195
196 /* Here be dragons! */
197 simulate_hibernate(i915);
198
199 pm_resume(i915);
200
201 mutex_lock(&i915->drm.struct_mutex);
202 err = switch_to_context(i915, ctx);
203 if (igt_flush_test(i915, I915_WAIT_LOCKED))
204 err = -EIO;
205 mutex_unlock(&i915->drm.struct_mutex);
206out:
207 mock_file_free(i915, file);
208 return err;
209}
210
211int i915_gem_live_selftests(struct drm_i915_private *i915)
212{
213 static const struct i915_subtest tests[] = {
214 SUBTEST(igt_gem_suspend),
215 SUBTEST(igt_gem_hibernate),
216 };
217
218 return i915_subtests(tests, i915);
219}