GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / gpu / drm / i915 / intel_hangcheck.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "i915_drv.h"
26
27 static bool
28 ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
29 {
30         ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
31         return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
32                          MI_SEMAPHORE_REGISTER);
33 }
34
35 static struct intel_engine_cs *
36 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
37                                  u64 offset)
38 {
39         struct drm_i915_private *dev_priv = engine->i915;
40         u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
41         struct intel_engine_cs *signaller;
42         enum intel_engine_id id;
43
44         for_each_engine(signaller, dev_priv, id) {
45                 if (engine == signaller)
46                         continue;
47
48                 if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
49                         return signaller;
50         }
51
52         DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x\n",
53                          engine->name, ipehr);
54
55         return ERR_PTR(-ENODEV);
56 }
57
58 static struct intel_engine_cs *
59 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
60 {
61         struct drm_i915_private *dev_priv = engine->i915;
62         void __iomem *vaddr;
63         u32 cmd, ipehr, head;
64         u64 offset = 0;
65         int i, backwards;
66
67         /*
68          * This function does not support execlist mode - any attempt to
69          * proceed further into this function will result in a kernel panic
70          * when dereferencing ring->buffer, which is not set up in execlist
71          * mode.
72          *
73          * The correct way of doing it would be to derive the currently
74          * executing ring buffer from the current context, which is derived
75          * from the currently running request. Unfortunately, to get the
76          * current request we would have to grab the struct_mutex before doing
77          * anything else, which would be ill-advised since some other thread
78          * might have grabbed it already and managed to hang itself, causing
79          * the hang checker to deadlock.
80          *
81          * Therefore, this function does not support execlist mode in its
82          * current form. Just return NULL and move on.
83          */
84         if (engine->buffer == NULL)
85                 return NULL;
86
87         ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
88         if (!ipehr_is_semaphore_wait(engine, ipehr))
89                 return NULL;
90
91         /*
92          * HEAD is likely pointing to the dword after the actual command,
93          * so scan backwards until we find the MBOX. But limit it to just 3
94          * or 4 dwords depending on the semaphore wait command size.
95          * Note that we don't care about ACTHD here since that might
96          * point at at batch, and semaphores are always emitted into the
97          * ringbuffer itself.
98          */
99         head = I915_READ_HEAD(engine) & HEAD_ADDR;
100         backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
101         vaddr = (void __iomem *)engine->buffer->vaddr;
102
103         for (i = backwards; i; --i) {
104                 /*
105                  * Be paranoid and presume the hw has gone off into the wild -
106                  * our ring is smaller than what the hardware (and hence
107                  * HEAD_ADDR) allows. Also handles wrap-around.
108                  */
109                 head &= engine->buffer->size - 1;
110
111                 /* This here seems to blow up */
112                 cmd = ioread32(vaddr + head);
113                 if (cmd == ipehr)
114                         break;
115
116                 head -= 4;
117         }
118
119         if (!i)
120                 return NULL;
121
122         *seqno = ioread32(vaddr + head + 4) + 1;
123         return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
124 }
125
126 static int semaphore_passed(struct intel_engine_cs *engine)
127 {
128         struct drm_i915_private *dev_priv = engine->i915;
129         struct intel_engine_cs *signaller;
130         u32 seqno;
131
132         engine->hangcheck.deadlock++;
133
134         signaller = semaphore_waits_for(engine, &seqno);
135         if (signaller == NULL)
136                 return -1;
137
138         if (IS_ERR(signaller))
139                 return 0;
140
141         /* Prevent pathological recursion due to driver bugs */
142         if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
143                 return -1;
144
145         if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
146                 return 1;
147
148         /* cursory check for an unkickable deadlock */
149         if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
150             semaphore_passed(signaller) < 0)
151                 return -1;
152
153         return 0;
154 }
155
156 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
157 {
158         struct intel_engine_cs *engine;
159         enum intel_engine_id id;
160
161         for_each_engine(engine, dev_priv, id)
162                 engine->hangcheck.deadlock = 0;
163 }
164
165 static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
166 {
167         u32 tmp = current_instdone | *old_instdone;
168         bool unchanged;
169
170         unchanged = tmp == *old_instdone;
171         *old_instdone |= tmp;
172
173         return unchanged;
174 }
175
176 static bool subunits_stuck(struct intel_engine_cs *engine)
177 {
178         struct drm_i915_private *dev_priv = engine->i915;
179         struct intel_instdone instdone;
180         struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
181         bool stuck;
182         int slice;
183         int subslice;
184
185         if (engine->id != RCS)
186                 return true;
187
188         intel_engine_get_instdone(engine, &instdone);
189
190         /* There might be unstable subunit states even when
191          * actual head is not moving. Filter out the unstable ones by
192          * accumulating the undone -> done transitions and only
193          * consider those as progress.
194          */
195         stuck = instdone_unchanged(instdone.instdone,
196                                    &accu_instdone->instdone);
197         stuck &= instdone_unchanged(instdone.slice_common,
198                                     &accu_instdone->slice_common);
199
200         for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
201                 stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
202                                             &accu_instdone->sampler[slice][subslice]);
203                 stuck &= instdone_unchanged(instdone.row[slice][subslice],
204                                             &accu_instdone->row[slice][subslice]);
205         }
206
207         return stuck;
208 }
209
210 static enum intel_engine_hangcheck_action
211 head_stuck(struct intel_engine_cs *engine, u64 acthd)
212 {
213         if (acthd != engine->hangcheck.acthd) {
214
215                 /* Clear subunit states on head movement */
216                 memset(&engine->hangcheck.instdone, 0,
217                        sizeof(engine->hangcheck.instdone));
218
219                 return ENGINE_ACTIVE_HEAD;
220         }
221
222         if (!subunits_stuck(engine))
223                 return ENGINE_ACTIVE_SUBUNITS;
224
225         return ENGINE_DEAD;
226 }
227
228 static enum intel_engine_hangcheck_action
229 engine_stuck(struct intel_engine_cs *engine, u64 acthd)
230 {
231         struct drm_i915_private *dev_priv = engine->i915;
232         enum intel_engine_hangcheck_action ha;
233         u32 tmp;
234
235         ha = head_stuck(engine, acthd);
236         if (ha != ENGINE_DEAD)
237                 return ha;
238
239         if (IS_GEN2(dev_priv))
240                 return ENGINE_DEAD;
241
242         /* Is the chip hanging on a WAIT_FOR_EVENT?
243          * If so we can simply poke the RB_WAIT bit
244          * and break the hang. This should work on
245          * all but the second generation chipsets.
246          */
247         tmp = I915_READ_CTL(engine);
248         if (tmp & RING_WAIT) {
249                 i915_handle_error(dev_priv, BIT(engine->id), 0,
250                                   "stuck wait on %s", engine->name);
251                 I915_WRITE_CTL(engine, tmp);
252                 return ENGINE_WAIT_KICK;
253         }
254
255         if (IS_GEN(dev_priv, 6, 7) && tmp & RING_WAIT_SEMAPHORE) {
256                 switch (semaphore_passed(engine)) {
257                 default:
258                         return ENGINE_DEAD;
259                 case 1:
260                         i915_handle_error(dev_priv, ALL_ENGINES, 0,
261                                           "stuck semaphore on %s",
262                                           engine->name);
263                         I915_WRITE_CTL(engine, tmp);
264                         return ENGINE_WAIT_KICK;
265                 case 0:
266                         return ENGINE_WAIT;
267                 }
268         }
269
270         return ENGINE_DEAD;
271 }
272
273 static void hangcheck_load_sample(struct intel_engine_cs *engine,
274                                   struct intel_engine_hangcheck *hc)
275 {
276         /* We don't strictly need an irq-barrier here, as we are not
277          * serving an interrupt request, be paranoid in case the
278          * barrier has side-effects (such as preventing a broken
279          * cacheline snoop) and so be sure that we can see the seqno
280          * advance. If the seqno should stick, due to a stale
281          * cacheline, we would erroneously declare the GPU hung.
282          */
283         if (engine->irq_seqno_barrier)
284                 engine->irq_seqno_barrier(engine);
285
286         hc->acthd = intel_engine_get_active_head(engine);
287         hc->seqno = intel_engine_get_seqno(engine);
288 }
289
290 static void hangcheck_store_sample(struct intel_engine_cs *engine,
291                                    const struct intel_engine_hangcheck *hc)
292 {
293         engine->hangcheck.acthd = hc->acthd;
294         engine->hangcheck.seqno = hc->seqno;
295         engine->hangcheck.action = hc->action;
296         engine->hangcheck.stalled = hc->stalled;
297         engine->hangcheck.wedged = hc->wedged;
298 }
299
300 static enum intel_engine_hangcheck_action
301 hangcheck_get_action(struct intel_engine_cs *engine,
302                      const struct intel_engine_hangcheck *hc)
303 {
304         if (engine->hangcheck.seqno != hc->seqno)
305                 return ENGINE_ACTIVE_SEQNO;
306
307         if (intel_engine_is_idle(engine))
308                 return ENGINE_IDLE;
309
310         return engine_stuck(engine, hc->acthd);
311 }
312
313 static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
314                                         struct intel_engine_hangcheck *hc)
315 {
316         unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
317
318         hc->action = hangcheck_get_action(engine, hc);
319
320         /* We always increment the progress
321          * if the engine is busy and still processing
322          * the same request, so that no single request
323          * can run indefinitely (such as a chain of
324          * batches). The only time we do not increment
325          * the hangcheck score on this ring, if this
326          * engine is in a legitimate wait for another
327          * engine. In that case the waiting engine is a
328          * victim and we want to be sure we catch the
329          * right culprit. Then every time we do kick
330          * the ring, make it as a progress as the seqno
331          * advancement might ensure and if not, it
332          * will catch the hanging engine.
333          */
334
335         switch (hc->action) {
336         case ENGINE_IDLE:
337         case ENGINE_ACTIVE_SEQNO:
338                 /* Clear head and subunit states on seqno movement */
339                 hc->acthd = 0;
340
341                 memset(&engine->hangcheck.instdone, 0,
342                        sizeof(engine->hangcheck.instdone));
343
344                 /* Intentional fall through */
345         case ENGINE_WAIT_KICK:
346         case ENGINE_WAIT:
347                 engine->hangcheck.action_timestamp = jiffies;
348                 break;
349
350         case ENGINE_ACTIVE_HEAD:
351         case ENGINE_ACTIVE_SUBUNITS:
352                 /*
353                  * Seqno stuck with still active engine gets leeway,
354                  * in hopes that it is just a long shader.
355                  */
356                 timeout = I915_SEQNO_DEAD_TIMEOUT;
357                 break;
358
359         case ENGINE_DEAD:
360                 if (GEM_SHOW_DEBUG()) {
361                         struct drm_printer p = drm_debug_printer("hangcheck");
362                         intel_engine_dump(engine, &p, "%s\n", engine->name);
363                 }
364                 break;
365
366         default:
367                 MISSING_CASE(hc->action);
368         }
369
370         hc->stalled = time_after(jiffies,
371                                  engine->hangcheck.action_timestamp + timeout);
372         hc->wedged = time_after(jiffies,
373                                  engine->hangcheck.action_timestamp +
374                                  I915_ENGINE_WEDGED_TIMEOUT);
375 }
376
377 static void hangcheck_declare_hang(struct drm_i915_private *i915,
378                                    unsigned int hung,
379                                    unsigned int stuck)
380 {
381         struct intel_engine_cs *engine;
382         char msg[80];
383         unsigned int tmp;
384         int len;
385
386         /* If some rings hung but others were still busy, only
387          * blame the hanging rings in the synopsis.
388          */
389         if (stuck != hung)
390                 hung &= ~stuck;
391         len = scnprintf(msg, sizeof(msg),
392                         "%s on ", stuck == hung ? "no progress" : "hang");
393         for_each_engine_masked(engine, i915, hung, tmp)
394                 len += scnprintf(msg + len, sizeof(msg) - len,
395                                  "%s, ", engine->name);
396         msg[len-2] = '\0';
397
398         return i915_handle_error(i915, hung, I915_ERROR_CAPTURE, "%s", msg);
399 }
400
401 /*
402  * This is called when the chip hasn't reported back with completed
403  * batchbuffers in a long time. We keep track per ring seqno progress and
404  * if there are no progress, hangcheck score for that ring is increased.
405  * Further, acthd is inspected to see if the ring is stuck. On stuck case
406  * we kick the ring. If we see no progress on three subsequent calls
407  * we assume chip is wedged and try to fix it by resetting the chip.
408  */
409 static void i915_hangcheck_elapsed(struct work_struct *work)
410 {
411         struct drm_i915_private *dev_priv =
412                 container_of(work, typeof(*dev_priv),
413                              gpu_error.hangcheck_work.work);
414         struct intel_engine_cs *engine;
415         enum intel_engine_id id;
416         unsigned int hung = 0, stuck = 0, wedged = 0;
417
418         if (!i915_modparams.enable_hangcheck)
419                 return;
420
421         if (!READ_ONCE(dev_priv->gt.awake))
422                 return;
423
424         if (i915_terminally_wedged(&dev_priv->gpu_error))
425                 return;
426
427         /* As enabling the GPU requires fairly extensive mmio access,
428          * periodically arm the mmio checker to see if we are triggering
429          * any invalid access.
430          */
431         intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
432
433         for_each_engine(engine, dev_priv, id) {
434                 struct intel_engine_hangcheck hc;
435
436                 semaphore_clear_deadlocks(dev_priv);
437
438                 hangcheck_load_sample(engine, &hc);
439                 hangcheck_accumulate_sample(engine, &hc);
440                 hangcheck_store_sample(engine, &hc);
441
442                 if (engine->hangcheck.stalled) {
443                         hung |= intel_engine_flag(engine);
444                         if (hc.action != ENGINE_DEAD)
445                                 stuck |= intel_engine_flag(engine);
446                 }
447
448                 if (engine->hangcheck.wedged)
449                         wedged |= intel_engine_flag(engine);
450         }
451
452         if (wedged) {
453                 dev_err(dev_priv->drm.dev,
454                         "GPU recovery timed out,"
455                         " cancelling all in-flight rendering.\n");
456                 GEM_TRACE_DUMP();
457                 i915_gem_set_wedged(dev_priv);
458         }
459
460         if (hung)
461                 hangcheck_declare_hang(dev_priv, hung, stuck);
462
463         /* Reset timer in case GPU hangs without another request being added */
464         i915_queue_hangcheck(dev_priv);
465 }
466
467 void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
468 {
469         memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
470         engine->hangcheck.action_timestamp = jiffies;
471 }
472
473 void intel_hangcheck_init(struct drm_i915_private *i915)
474 {
475         INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
476                           i915_hangcheck_elapsed);
477 }
478
479 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
480 #include "selftests/intel_hangcheck.c"
481 #endif