drm/i915: Drop unused engine->irq_seqno_barrier w/a
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_hangcheck.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "i915_drv.h"
26
27 static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
28 {
29         u32 tmp = current_instdone | *old_instdone;
30         bool unchanged;
31
32         unchanged = tmp == *old_instdone;
33         *old_instdone |= tmp;
34
35         return unchanged;
36 }
37
38 static bool subunits_stuck(struct intel_engine_cs *engine)
39 {
40         struct drm_i915_private *dev_priv = engine->i915;
41         struct intel_instdone instdone;
42         struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
43         bool stuck;
44         int slice;
45         int subslice;
46
47         if (engine->id != RCS)
48                 return true;
49
50         intel_engine_get_instdone(engine, &instdone);
51
52         /* There might be unstable subunit states even when
53          * actual head is not moving. Filter out the unstable ones by
54          * accumulating the undone -> done transitions and only
55          * consider those as progress.
56          */
57         stuck = instdone_unchanged(instdone.instdone,
58                                    &accu_instdone->instdone);
59         stuck &= instdone_unchanged(instdone.slice_common,
60                                     &accu_instdone->slice_common);
61
62         for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
63                 stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
64                                             &accu_instdone->sampler[slice][subslice]);
65                 stuck &= instdone_unchanged(instdone.row[slice][subslice],
66                                             &accu_instdone->row[slice][subslice]);
67         }
68
69         return stuck;
70 }
71
72 static enum intel_engine_hangcheck_action
73 head_stuck(struct intel_engine_cs *engine, u64 acthd)
74 {
75         if (acthd != engine->hangcheck.acthd) {
76
77                 /* Clear subunit states on head movement */
78                 memset(&engine->hangcheck.instdone, 0,
79                        sizeof(engine->hangcheck.instdone));
80
81                 return ENGINE_ACTIVE_HEAD;
82         }
83
84         if (!subunits_stuck(engine))
85                 return ENGINE_ACTIVE_SUBUNITS;
86
87         return ENGINE_DEAD;
88 }
89
90 static enum intel_engine_hangcheck_action
91 engine_stuck(struct intel_engine_cs *engine, u64 acthd)
92 {
93         struct drm_i915_private *dev_priv = engine->i915;
94         enum intel_engine_hangcheck_action ha;
95         u32 tmp;
96
97         ha = head_stuck(engine, acthd);
98         if (ha != ENGINE_DEAD)
99                 return ha;
100
101         if (IS_GEN(dev_priv, 2))
102                 return ENGINE_DEAD;
103
104         /* Is the chip hanging on a WAIT_FOR_EVENT?
105          * If so we can simply poke the RB_WAIT bit
106          * and break the hang. This should work on
107          * all but the second generation chipsets.
108          */
109         tmp = I915_READ_CTL(engine);
110         if (tmp & RING_WAIT) {
111                 i915_handle_error(dev_priv, BIT(engine->id), 0,
112                                   "stuck wait on %s", engine->name);
113                 I915_WRITE_CTL(engine, tmp);
114                 return ENGINE_WAIT_KICK;
115         }
116
117         return ENGINE_DEAD;
118 }
119
120 static void hangcheck_load_sample(struct intel_engine_cs *engine,
121                                   struct intel_engine_hangcheck *hc)
122 {
123         hc->acthd = intel_engine_get_active_head(engine);
124         hc->seqno = intel_engine_get_seqno(engine);
125 }
126
127 static void hangcheck_store_sample(struct intel_engine_cs *engine,
128                                    const struct intel_engine_hangcheck *hc)
129 {
130         engine->hangcheck.acthd = hc->acthd;
131         engine->hangcheck.seqno = hc->seqno;
132         engine->hangcheck.action = hc->action;
133         engine->hangcheck.stalled = hc->stalled;
134         engine->hangcheck.wedged = hc->wedged;
135 }
136
137 static enum intel_engine_hangcheck_action
138 hangcheck_get_action(struct intel_engine_cs *engine,
139                      const struct intel_engine_hangcheck *hc)
140 {
141         if (engine->hangcheck.seqno != hc->seqno)
142                 return ENGINE_ACTIVE_SEQNO;
143
144         if (intel_engine_is_idle(engine))
145                 return ENGINE_IDLE;
146
147         return engine_stuck(engine, hc->acthd);
148 }
149
150 static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
151                                         struct intel_engine_hangcheck *hc)
152 {
153         unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
154
155         hc->action = hangcheck_get_action(engine, hc);
156
157         /* We always increment the progress
158          * if the engine is busy and still processing
159          * the same request, so that no single request
160          * can run indefinitely (such as a chain of
161          * batches). The only time we do not increment
162          * the hangcheck score on this ring, if this
163          * engine is in a legitimate wait for another
164          * engine. In that case the waiting engine is a
165          * victim and we want to be sure we catch the
166          * right culprit. Then every time we do kick
167          * the ring, make it as a progress as the seqno
168          * advancement might ensure and if not, it
169          * will catch the hanging engine.
170          */
171
172         switch (hc->action) {
173         case ENGINE_IDLE:
174         case ENGINE_ACTIVE_SEQNO:
175                 /* Clear head and subunit states on seqno movement */
176                 hc->acthd = 0;
177
178                 memset(&engine->hangcheck.instdone, 0,
179                        sizeof(engine->hangcheck.instdone));
180
181                 /* Intentional fall through */
182         case ENGINE_WAIT_KICK:
183         case ENGINE_WAIT:
184                 engine->hangcheck.action_timestamp = jiffies;
185                 break;
186
187         case ENGINE_ACTIVE_HEAD:
188         case ENGINE_ACTIVE_SUBUNITS:
189                 /*
190                  * Seqno stuck with still active engine gets leeway,
191                  * in hopes that it is just a long shader.
192                  */
193                 timeout = I915_SEQNO_DEAD_TIMEOUT;
194                 break;
195
196         case ENGINE_DEAD:
197                 if (GEM_SHOW_DEBUG()) {
198                         struct drm_printer p = drm_debug_printer("hangcheck");
199                         intel_engine_dump(engine, &p, "%s\n", engine->name);
200                 }
201                 break;
202
203         default:
204                 MISSING_CASE(hc->action);
205         }
206
207         hc->stalled = time_after(jiffies,
208                                  engine->hangcheck.action_timestamp + timeout);
209         hc->wedged = time_after(jiffies,
210                                  engine->hangcheck.action_timestamp +
211                                  I915_ENGINE_WEDGED_TIMEOUT);
212 }
213
214 static void hangcheck_declare_hang(struct drm_i915_private *i915,
215                                    unsigned int hung,
216                                    unsigned int stuck)
217 {
218         struct intel_engine_cs *engine;
219         char msg[80];
220         unsigned int tmp;
221         int len;
222
223         /* If some rings hung but others were still busy, only
224          * blame the hanging rings in the synopsis.
225          */
226         if (stuck != hung)
227                 hung &= ~stuck;
228         len = scnprintf(msg, sizeof(msg),
229                         "%s on ", stuck == hung ? "no progress" : "hang");
230         for_each_engine_masked(engine, i915, hung, tmp)
231                 len += scnprintf(msg + len, sizeof(msg) - len,
232                                  "%s, ", engine->name);
233         msg[len-2] = '\0';
234
235         return i915_handle_error(i915, hung, I915_ERROR_CAPTURE, "%s", msg);
236 }
237
238 /*
239  * This is called when the chip hasn't reported back with completed
240  * batchbuffers in a long time. We keep track per ring seqno progress and
241  * if there are no progress, hangcheck score for that ring is increased.
242  * Further, acthd is inspected to see if the ring is stuck. On stuck case
243  * we kick the ring. If we see no progress on three subsequent calls
244  * we assume chip is wedged and try to fix it by resetting the chip.
245  */
246 static void i915_hangcheck_elapsed(struct work_struct *work)
247 {
248         struct drm_i915_private *dev_priv =
249                 container_of(work, typeof(*dev_priv),
250                              gpu_error.hangcheck_work.work);
251         struct intel_engine_cs *engine;
252         enum intel_engine_id id;
253         unsigned int hung = 0, stuck = 0, wedged = 0;
254
255         if (!i915_modparams.enable_hangcheck)
256                 return;
257
258         if (!READ_ONCE(dev_priv->gt.awake))
259                 return;
260
261         if (i915_terminally_wedged(&dev_priv->gpu_error))
262                 return;
263
264         /* As enabling the GPU requires fairly extensive mmio access,
265          * periodically arm the mmio checker to see if we are triggering
266          * any invalid access.
267          */
268         intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
269
270         for_each_engine(engine, dev_priv, id) {
271                 struct intel_engine_hangcheck hc;
272
273                 hangcheck_load_sample(engine, &hc);
274                 hangcheck_accumulate_sample(engine, &hc);
275                 hangcheck_store_sample(engine, &hc);
276
277                 if (engine->hangcheck.stalled) {
278                         hung |= intel_engine_flag(engine);
279                         if (hc.action != ENGINE_DEAD)
280                                 stuck |= intel_engine_flag(engine);
281                 }
282
283                 if (engine->hangcheck.wedged)
284                         wedged |= intel_engine_flag(engine);
285         }
286
287         if (wedged) {
288                 dev_err(dev_priv->drm.dev,
289                         "GPU recovery timed out,"
290                         " cancelling all in-flight rendering.\n");
291                 GEM_TRACE_DUMP();
292                 i915_gem_set_wedged(dev_priv);
293         }
294
295         if (hung)
296                 hangcheck_declare_hang(dev_priv, hung, stuck);
297
298         /* Reset timer in case GPU hangs without another request being added */
299         i915_queue_hangcheck(dev_priv);
300 }
301
302 void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
303 {
304         memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
305         engine->hangcheck.action_timestamp = jiffies;
306 }
307
308 void intel_hangcheck_init(struct drm_i915_private *i915)
309 {
310         INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
311                           i915_hangcheck_elapsed);
312 }
313
314 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
315 #include "selftests/intel_hangcheck.c"
316 #endif