GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / gpu / drm / i915 / i915_gpu_error.c
1 /*
2  * Copyright (c) 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *    Mika Kuoppala <mika.kuoppala@intel.com>
27  *
28  */
29
30 #include <generated/utsrelease.h>
31 #include <linux/stop_machine.h>
32 #include <linux/zlib.h>
33 #include <drm/drm_print.h>
34 #include <linux/ascii85.h>
35
36 #include "i915_gpu_error.h"
37 #include "i915_drv.h"
38
39 static inline const struct intel_engine_cs *
40 engine_lookup(const struct drm_i915_private *i915, unsigned int id)
41 {
42         if (id >= I915_NUM_ENGINES)
43                 return NULL;
44
45         return i915->engine[id];
46 }
47
48 static inline const char *
49 __engine_name(const struct intel_engine_cs *engine)
50 {
51         return engine ? engine->name : "";
52 }
53
54 static const char *
55 engine_name(const struct drm_i915_private *i915, unsigned int id)
56 {
57         return __engine_name(engine_lookup(i915, id));
58 }
59
60 static const char *tiling_flag(int tiling)
61 {
62         switch (tiling) {
63         default:
64         case I915_TILING_NONE: return "";
65         case I915_TILING_X: return " X";
66         case I915_TILING_Y: return " Y";
67         }
68 }
69
70 static const char *dirty_flag(int dirty)
71 {
72         return dirty ? " dirty" : "";
73 }
74
75 static const char *purgeable_flag(int purgeable)
76 {
77         return purgeable ? " purgeable" : "";
78 }
79
80 static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
81 {
82
83         if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
84                 e->err = -ENOSPC;
85                 return false;
86         }
87
88         if (e->bytes == e->size - 1 || e->err)
89                 return false;
90
91         return true;
92 }
93
94 static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
95                               unsigned len)
96 {
97         if (e->pos + len <= e->start) {
98                 e->pos += len;
99                 return false;
100         }
101
102         /* First vsnprintf needs to fit in its entirety for memmove */
103         if (len >= e->size) {
104                 e->err = -EIO;
105                 return false;
106         }
107
108         return true;
109 }
110
111 static void __i915_error_advance(struct drm_i915_error_state_buf *e,
112                                  unsigned len)
113 {
114         /* If this is first printf in this window, adjust it so that
115          * start position matches start of the buffer
116          */
117
118         if (e->pos < e->start) {
119                 const size_t off = e->start - e->pos;
120
121                 /* Should not happen but be paranoid */
122                 if (off > len || e->bytes) {
123                         e->err = -EIO;
124                         return;
125                 }
126
127                 memmove(e->buf, e->buf + off, len - off);
128                 e->bytes = len - off;
129                 e->pos = e->start;
130                 return;
131         }
132
133         e->bytes += len;
134         e->pos += len;
135 }
136
137 __printf(2, 0)
138 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
139                                const char *f, va_list args)
140 {
141         unsigned len;
142
143         if (!__i915_error_ok(e))
144                 return;
145
146         /* Seek the first printf which is hits start position */
147         if (e->pos < e->start) {
148                 va_list tmp;
149
150                 va_copy(tmp, args);
151                 len = vsnprintf(NULL, 0, f, tmp);
152                 va_end(tmp);
153
154                 if (!__i915_error_seek(e, len))
155                         return;
156         }
157
158         len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
159         if (len >= e->size - e->bytes)
160                 len = e->size - e->bytes - 1;
161
162         __i915_error_advance(e, len);
163 }
164
165 static void i915_error_puts(struct drm_i915_error_state_buf *e,
166                             const char *str)
167 {
168         unsigned len;
169
170         if (!__i915_error_ok(e))
171                 return;
172
173         len = strlen(str);
174
175         /* Seek the first printf which is hits start position */
176         if (e->pos < e->start) {
177                 if (!__i915_error_seek(e, len))
178                         return;
179         }
180
181         if (len >= e->size - e->bytes)
182                 len = e->size - e->bytes - 1;
183         memcpy(e->buf + e->bytes, str, len);
184
185         __i915_error_advance(e, len);
186 }
187
188 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
189 #define err_puts(e, s) i915_error_puts(e, s)
190
191 static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
192 {
193         i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
194 }
195
196 static inline struct drm_printer
197 i915_error_printer(struct drm_i915_error_state_buf *e)
198 {
199         struct drm_printer p = {
200                 .printfn = __i915_printfn_error,
201                 .arg = e,
202         };
203         return p;
204 }
205
206 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
207
208 struct compress {
209         struct z_stream_s zstream;
210         void *tmp;
211 };
212
213 static bool compress_init(struct compress *c)
214 {
215         struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
216
217         zstream->workspace =
218                 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
219                         GFP_ATOMIC | __GFP_NOWARN);
220         if (!zstream->workspace)
221                 return false;
222
223         if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
224                 kfree(zstream->workspace);
225                 return false;
226         }
227
228         c->tmp = NULL;
229         if (i915_has_memcpy_from_wc())
230                 c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
231
232         return true;
233 }
234
235 static void *compress_next_page(struct drm_i915_error_object *dst)
236 {
237         unsigned long page;
238
239         if (dst->page_count >= dst->num_pages)
240                 return ERR_PTR(-ENOSPC);
241
242         page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
243         if (!page)
244                 return ERR_PTR(-ENOMEM);
245
246         return dst->pages[dst->page_count++] = (void *)page;
247 }
248
249 static int compress_page(struct compress *c,
250                          void *src,
251                          struct drm_i915_error_object *dst)
252 {
253         struct z_stream_s *zstream = &c->zstream;
254
255         zstream->next_in = src;
256         if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
257                 zstream->next_in = c->tmp;
258         zstream->avail_in = PAGE_SIZE;
259
260         do {
261                 if (zstream->avail_out == 0) {
262                         zstream->next_out = compress_next_page(dst);
263                         if (IS_ERR(zstream->next_out))
264                                 return PTR_ERR(zstream->next_out);
265
266                         zstream->avail_out = PAGE_SIZE;
267                 }
268
269                 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
270                         return -EIO;
271
272                 cond_resched();
273         } while (zstream->avail_in);
274
275         /* Fallback to uncompressed if we increase size? */
276         if (0 && zstream->total_out > zstream->total_in)
277                 return -E2BIG;
278
279         return 0;
280 }
281
282 static int compress_flush(struct compress *c,
283                           struct drm_i915_error_object *dst)
284 {
285         struct z_stream_s *zstream = &c->zstream;
286
287         do {
288                 switch (zlib_deflate(zstream, Z_FINISH)) {
289                 case Z_OK: /* more space requested */
290                         zstream->next_out = compress_next_page(dst);
291                         if (IS_ERR(zstream->next_out))
292                                 return PTR_ERR(zstream->next_out);
293
294                         zstream->avail_out = PAGE_SIZE;
295                         break;
296
297                 case Z_STREAM_END:
298                         goto end;
299
300                 default: /* any error */
301                         return -EIO;
302                 }
303         } while (1);
304
305 end:
306         memset(zstream->next_out, 0, zstream->avail_out);
307         dst->unused = zstream->avail_out;
308         return 0;
309 }
310
311 static void compress_fini(struct compress *c,
312                           struct drm_i915_error_object *dst)
313 {
314         struct z_stream_s *zstream = &c->zstream;
315
316         zlib_deflateEnd(zstream);
317         kfree(zstream->workspace);
318         if (c->tmp)
319                 free_page((unsigned long)c->tmp);
320 }
321
322 static void err_compression_marker(struct drm_i915_error_state_buf *m)
323 {
324         err_puts(m, ":");
325 }
326
327 #else
328
329 struct compress {
330 };
331
332 static bool compress_init(struct compress *c)
333 {
334         return true;
335 }
336
337 static int compress_page(struct compress *c,
338                          void *src,
339                          struct drm_i915_error_object *dst)
340 {
341         unsigned long page;
342         void *ptr;
343
344         page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
345         if (!page)
346                 return -ENOMEM;
347
348         ptr = (void *)page;
349         if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
350                 memcpy(ptr, src, PAGE_SIZE);
351         dst->pages[dst->page_count++] = ptr;
352         cond_resched();
353
354         return 0;
355 }
356
357 static int compress_flush(struct compress *c,
358                           struct drm_i915_error_object *dst)
359 {
360         return 0;
361 }
362
363 static void compress_fini(struct compress *c,
364                           struct drm_i915_error_object *dst)
365 {
366 }
367
368 static void err_compression_marker(struct drm_i915_error_state_buf *m)
369 {
370         err_puts(m, "~");
371 }
372
373 #endif
374
375 static void print_error_buffers(struct drm_i915_error_state_buf *m,
376                                 const char *name,
377                                 struct drm_i915_error_buffer *err,
378                                 int count)
379 {
380         err_printf(m, "%s [%d]:\n", name, count);
381
382         while (count--) {
383                 err_printf(m, "    %08x_%08x %8u %02x %02x %02x",
384                            upper_32_bits(err->gtt_offset),
385                            lower_32_bits(err->gtt_offset),
386                            err->size,
387                            err->read_domains,
388                            err->write_domain,
389                            err->wseqno);
390                 err_puts(m, tiling_flag(err->tiling));
391                 err_puts(m, dirty_flag(err->dirty));
392                 err_puts(m, purgeable_flag(err->purgeable));
393                 err_puts(m, err->userptr ? " userptr" : "");
394                 err_puts(m, err->engine != -1 ? " " : "");
395                 err_puts(m, engine_name(m->i915, err->engine));
396                 err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
397
398                 if (err->name)
399                         err_printf(m, " (name: %d)", err->name);
400                 if (err->fence_reg != I915_FENCE_REG_NONE)
401                         err_printf(m, " (fence: %d)", err->fence_reg);
402
403                 err_puts(m, "\n");
404                 err++;
405         }
406 }
407
408 static void error_print_instdone(struct drm_i915_error_state_buf *m,
409                                  const struct drm_i915_error_engine *ee)
410 {
411         int slice;
412         int subslice;
413
414         err_printf(m, "  INSTDONE: 0x%08x\n",
415                    ee->instdone.instdone);
416
417         if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
418                 return;
419
420         err_printf(m, "  SC_INSTDONE: 0x%08x\n",
421                    ee->instdone.slice_common);
422
423         if (INTEL_GEN(m->i915) <= 6)
424                 return;
425
426         for_each_instdone_slice_subslice(m->i915, slice, subslice)
427                 err_printf(m, "  SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
428                            slice, subslice,
429                            ee->instdone.sampler[slice][subslice]);
430
431         for_each_instdone_slice_subslice(m->i915, slice, subslice)
432                 err_printf(m, "  ROW_INSTDONE[%d][%d]: 0x%08x\n",
433                            slice, subslice,
434                            ee->instdone.row[slice][subslice]);
435 }
436
437 static const char *bannable(const struct drm_i915_error_context *ctx)
438 {
439         return ctx->bannable ? "" : " (unbannable)";
440 }
441
442 static void error_print_request(struct drm_i915_error_state_buf *m,
443                                 const char *prefix,
444                                 const struct drm_i915_error_request *erq,
445                                 const unsigned long epoch)
446 {
447         if (!erq->seqno)
448                 return;
449
450         err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
451                    prefix, erq->pid, erq->ban_score,
452                    erq->context, erq->seqno, erq->sched_attr.priority,
453                    jiffies_to_msecs(erq->jiffies - epoch),
454                    erq->start, erq->head, erq->tail);
455 }
456
457 static void error_print_context(struct drm_i915_error_state_buf *m,
458                                 const char *header,
459                                 const struct drm_i915_error_context *ctx)
460 {
461         err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
462                    header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
463                    ctx->sched_attr.priority, ctx->ban_score, bannable(ctx),
464                    ctx->guilty, ctx->active);
465 }
466
467 static void error_print_engine(struct drm_i915_error_state_buf *m,
468                                const struct drm_i915_error_engine *ee,
469                                const unsigned long epoch)
470 {
471         int n;
472
473         err_printf(m, "%s command stream:\n",
474                    engine_name(m->i915, ee->engine_id));
475         err_printf(m, "  IDLE?: %s\n", yesno(ee->idle));
476         err_printf(m, "  START: 0x%08x\n", ee->start);
477         err_printf(m, "  HEAD:  0x%08x [0x%08x]\n", ee->head, ee->rq_head);
478         err_printf(m, "  TAIL:  0x%08x [0x%08x, 0x%08x]\n",
479                    ee->tail, ee->rq_post, ee->rq_tail);
480         err_printf(m, "  CTL:   0x%08x\n", ee->ctl);
481         err_printf(m, "  MODE:  0x%08x\n", ee->mode);
482         err_printf(m, "  HWS:   0x%08x\n", ee->hws);
483         err_printf(m, "  ACTHD: 0x%08x %08x\n",
484                    (u32)(ee->acthd>>32), (u32)ee->acthd);
485         err_printf(m, "  IPEIR: 0x%08x\n", ee->ipeir);
486         err_printf(m, "  IPEHR: 0x%08x\n", ee->ipehr);
487
488         error_print_instdone(m, ee);
489
490         if (ee->batchbuffer) {
491                 u64 start = ee->batchbuffer->gtt_offset;
492                 u64 end = start + ee->batchbuffer->gtt_size;
493
494                 err_printf(m, "  batch: [0x%08x_%08x, 0x%08x_%08x]\n",
495                            upper_32_bits(start), lower_32_bits(start),
496                            upper_32_bits(end), lower_32_bits(end));
497         }
498         if (INTEL_GEN(m->i915) >= 4) {
499                 err_printf(m, "  BBADDR: 0x%08x_%08x\n",
500                            (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
501                 err_printf(m, "  BB_STATE: 0x%08x\n", ee->bbstate);
502                 err_printf(m, "  INSTPS: 0x%08x\n", ee->instps);
503         }
504         err_printf(m, "  INSTPM: 0x%08x\n", ee->instpm);
505         err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
506                    lower_32_bits(ee->faddr));
507         if (INTEL_GEN(m->i915) >= 6) {
508                 err_printf(m, "  RC PSMI: 0x%08x\n", ee->rc_psmi);
509                 err_printf(m, "  FAULT_REG: 0x%08x\n", ee->fault_reg);
510                 err_printf(m, "  SYNC_0: 0x%08x\n",
511                            ee->semaphore_mboxes[0]);
512                 err_printf(m, "  SYNC_1: 0x%08x\n",
513                            ee->semaphore_mboxes[1]);
514                 if (HAS_VEBOX(m->i915))
515                         err_printf(m, "  SYNC_2: 0x%08x\n",
516                                    ee->semaphore_mboxes[2]);
517         }
518         if (USES_PPGTT(m->i915)) {
519                 err_printf(m, "  GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
520
521                 if (INTEL_GEN(m->i915) >= 8) {
522                         int i;
523                         for (i = 0; i < 4; i++)
524                                 err_printf(m, "  PDP%d: 0x%016llx\n",
525                                            i, ee->vm_info.pdp[i]);
526                 } else {
527                         err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
528                                    ee->vm_info.pp_dir_base);
529                 }
530         }
531         err_printf(m, "  seqno: 0x%08x\n", ee->seqno);
532         err_printf(m, "  last_seqno: 0x%08x\n", ee->last_seqno);
533         err_printf(m, "  waiting: %s\n", yesno(ee->waiting));
534         err_printf(m, "  ring->head: 0x%08x\n", ee->cpu_ring_head);
535         err_printf(m, "  ring->tail: 0x%08x\n", ee->cpu_ring_tail);
536         err_printf(m, "  hangcheck stall: %s\n", yesno(ee->hangcheck_stalled));
537         err_printf(m, "  hangcheck action: %s\n",
538                    hangcheck_action_to_str(ee->hangcheck_action));
539         err_printf(m, "  hangcheck action timestamp: %dms (%lu%s)\n",
540                    jiffies_to_msecs(ee->hangcheck_timestamp - epoch),
541                    ee->hangcheck_timestamp,
542                    ee->hangcheck_timestamp == epoch ? "; epoch" : "");
543         err_printf(m, "  engine reset count: %u\n", ee->reset_count);
544
545         for (n = 0; n < ee->num_ports; n++) {
546                 err_printf(m, "  ELSP[%d]:", n);
547                 error_print_request(m, " ", &ee->execlist[n], epoch);
548         }
549
550         error_print_context(m, "  Active context: ", &ee->context);
551 }
552
553 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
554 {
555         va_list args;
556
557         va_start(args, f);
558         i915_error_vprintf(e, f, args);
559         va_end(args);
560 }
561
562 static void print_error_obj(struct drm_i915_error_state_buf *m,
563                             struct intel_engine_cs *engine,
564                             const char *name,
565                             struct drm_i915_error_object *obj)
566 {
567         char out[ASCII85_BUFSZ];
568         int page;
569
570         if (!obj)
571                 return;
572
573         if (name) {
574                 err_printf(m, "%s --- %s = 0x%08x %08x\n",
575                            engine ? engine->name : "global", name,
576                            upper_32_bits(obj->gtt_offset),
577                            lower_32_bits(obj->gtt_offset));
578         }
579
580         err_compression_marker(m);
581         for (page = 0; page < obj->page_count; page++) {
582                 int i, len;
583
584                 len = PAGE_SIZE;
585                 if (page == obj->page_count - 1)
586                         len -= obj->unused;
587                 len = ascii85_encode_len(len);
588
589                 for (i = 0; i < len; i++)
590                         err_puts(m, ascii85_encode(obj->pages[page][i], out));
591         }
592         err_puts(m, "\n");
593 }
594
595 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
596                                    const struct intel_device_info *info,
597                                    const struct intel_driver_caps *caps)
598 {
599         struct drm_printer p = i915_error_printer(m);
600
601         intel_device_info_dump_flags(info, &p);
602         intel_driver_caps_print(caps, &p);
603         intel_device_info_dump_topology(&info->sseu, &p);
604 }
605
606 static void err_print_params(struct drm_i915_error_state_buf *m,
607                              const struct i915_params *params)
608 {
609         struct drm_printer p = i915_error_printer(m);
610
611         i915_params_dump(params, &p);
612 }
613
614 static void err_print_pciid(struct drm_i915_error_state_buf *m,
615                             struct drm_i915_private *i915)
616 {
617         struct pci_dev *pdev = i915->drm.pdev;
618
619         err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
620         err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
621         err_printf(m, "PCI Subsystem: %04x:%04x\n",
622                    pdev->subsystem_vendor,
623                    pdev->subsystem_device);
624 }
625
626 static void err_print_uc(struct drm_i915_error_state_buf *m,
627                          const struct i915_error_uc *error_uc)
628 {
629         struct drm_printer p = i915_error_printer(m);
630         const struct i915_gpu_state *error =
631                 container_of(error_uc, typeof(*error), uc);
632
633         if (!error->device_info.has_guc)
634                 return;
635
636         intel_uc_fw_dump(&error_uc->guc_fw, &p);
637         intel_uc_fw_dump(&error_uc->huc_fw, &p);
638         print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
639 }
640
641 int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
642                             const struct i915_gpu_state *error)
643 {
644         struct drm_i915_private *dev_priv = m->i915;
645         struct drm_i915_error_object *obj;
646         struct timespec64 ts;
647         int i, j;
648
649         if (!error) {
650                 err_printf(m, "No error state collected\n");
651                 return 0;
652         }
653
654         if (*error->error_msg)
655                 err_printf(m, "%s\n", error->error_msg);
656         err_printf(m, "Kernel: " UTS_RELEASE "\n");
657         ts = ktime_to_timespec64(error->time);
658         err_printf(m, "Time: %lld s %ld us\n",
659                    (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
660         ts = ktime_to_timespec64(error->boottime);
661         err_printf(m, "Boottime: %lld s %ld us\n",
662                    (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
663         ts = ktime_to_timespec64(error->uptime);
664         err_printf(m, "Uptime: %lld s %ld us\n",
665                    (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
666         err_printf(m, "Epoch: %lu jiffies (%u HZ)\n", error->epoch, HZ);
667         err_printf(m, "Capture: %lu jiffies; %d ms ago, %d ms after epoch\n",
668                    error->capture,
669                    jiffies_to_msecs(jiffies - error->capture),
670                    jiffies_to_msecs(error->capture - error->epoch));
671
672         for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
673                 if (error->engine[i].hangcheck_stalled &&
674                     error->engine[i].context.pid) {
675                         err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n",
676                                    engine_name(m->i915, i),
677                                    error->engine[i].context.comm,
678                                    error->engine[i].context.pid,
679                                    error->engine[i].context.ban_score,
680                                    bannable(&error->engine[i].context));
681                 }
682         }
683         err_printf(m, "Reset count: %u\n", error->reset_count);
684         err_printf(m, "Suspend count: %u\n", error->suspend_count);
685         err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
686         err_print_pciid(m, error->i915);
687
688         err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
689
690         if (HAS_CSR(dev_priv)) {
691                 struct intel_csr *csr = &dev_priv->csr;
692
693                 err_printf(m, "DMC loaded: %s\n",
694                            yesno(csr->dmc_payload != NULL));
695                 err_printf(m, "DMC fw version: %d.%d\n",
696                            CSR_VERSION_MAJOR(csr->version),
697                            CSR_VERSION_MINOR(csr->version));
698         }
699
700         err_printf(m, "GT awake: %s\n", yesno(error->awake));
701         err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
702         err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
703         err_printf(m, "EIR: 0x%08x\n", error->eir);
704         err_printf(m, "IER: 0x%08x\n", error->ier);
705         for (i = 0; i < error->ngtier; i++)
706                 err_printf(m, "GTIER[%d]: 0x%08x\n", i, error->gtier[i]);
707         err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
708         err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
709         err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
710         err_printf(m, "CCID: 0x%08x\n", error->ccid);
711         err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
712
713         for (i = 0; i < error->nfence; i++)
714                 err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
715
716         if (INTEL_GEN(dev_priv) >= 6) {
717                 err_printf(m, "ERROR: 0x%08x\n", error->error);
718
719                 if (INTEL_GEN(dev_priv) >= 8)
720                         err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
721                                    error->fault_data1, error->fault_data0);
722
723                 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
724         }
725
726         if (IS_GEN7(dev_priv))
727                 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
728
729         for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
730                 if (error->engine[i].engine_id != -1)
731                         error_print_engine(m, &error->engine[i], error->epoch);
732         }
733
734         for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
735                 char buf[128];
736                 int len, first = 1;
737
738                 if (!error->active_vm[i])
739                         break;
740
741                 len = scnprintf(buf, sizeof(buf), "Active (");
742                 for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
743                         if (error->engine[j].vm != error->active_vm[i])
744                                 continue;
745
746                         len += scnprintf(buf + len, sizeof(buf), "%s%s",
747                                          first ? "" : ", ",
748                                          dev_priv->engine[j]->name);
749                         first = 0;
750                 }
751                 scnprintf(buf + len, sizeof(buf), ")");
752                 print_error_buffers(m, buf,
753                                     error->active_bo[i],
754                                     error->active_bo_count[i]);
755         }
756
757         print_error_buffers(m, "Pinned (global)",
758                             error->pinned_bo,
759                             error->pinned_bo_count);
760
761         for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
762                 const struct drm_i915_error_engine *ee = &error->engine[i];
763
764                 obj = ee->batchbuffer;
765                 if (obj) {
766                         err_puts(m, dev_priv->engine[i]->name);
767                         if (ee->context.pid)
768                                 err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
769                                            ee->context.comm,
770                                            ee->context.pid,
771                                            ee->context.handle,
772                                            ee->context.hw_id,
773                                            ee->context.ban_score,
774                                            bannable(&ee->context));
775                         err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
776                                    upper_32_bits(obj->gtt_offset),
777                                    lower_32_bits(obj->gtt_offset));
778                         print_error_obj(m, dev_priv->engine[i], NULL, obj);
779                 }
780
781                 for (j = 0; j < ee->user_bo_count; j++)
782                         print_error_obj(m, dev_priv->engine[i],
783                                         "user", ee->user_bo[j]);
784
785                 if (ee->num_requests) {
786                         err_printf(m, "%s --- %d requests\n",
787                                    dev_priv->engine[i]->name,
788                                    ee->num_requests);
789                         for (j = 0; j < ee->num_requests; j++)
790                                 error_print_request(m, " ",
791                                                     &ee->requests[j],
792                                                     error->epoch);
793                 }
794
795                 if (IS_ERR(ee->waiters)) {
796                         err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
797                                    dev_priv->engine[i]->name);
798                 } else if (ee->num_waiters) {
799                         err_printf(m, "%s --- %d waiters\n",
800                                    dev_priv->engine[i]->name,
801                                    ee->num_waiters);
802                         for (j = 0; j < ee->num_waiters; j++) {
803                                 err_printf(m, " seqno 0x%08x for %s [%d]\n",
804                                            ee->waiters[j].seqno,
805                                            ee->waiters[j].comm,
806                                            ee->waiters[j].pid);
807                         }
808                 }
809
810                 print_error_obj(m, dev_priv->engine[i],
811                                 "ringbuffer", ee->ringbuffer);
812
813                 print_error_obj(m, dev_priv->engine[i],
814                                 "HW Status", ee->hws_page);
815
816                 print_error_obj(m, dev_priv->engine[i],
817                                 "HW context", ee->ctx);
818
819                 print_error_obj(m, dev_priv->engine[i],
820                                 "WA context", ee->wa_ctx);
821
822                 print_error_obj(m, dev_priv->engine[i],
823                                 "WA batchbuffer", ee->wa_batchbuffer);
824
825                 print_error_obj(m, dev_priv->engine[i],
826                                 "NULL context", ee->default_state);
827         }
828
829         if (error->overlay)
830                 intel_overlay_print_error_state(m, error->overlay);
831
832         if (error->display)
833                 intel_display_print_error_state(m, error->display);
834
835         err_print_capabilities(m, &error->device_info, &error->driver_caps);
836         err_print_params(m, &error->params);
837         err_print_uc(m, &error->uc);
838
839         if (m->bytes == 0 && m->err)
840                 return m->err;
841
842         return 0;
843 }
844
845 int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
846                               struct drm_i915_private *i915,
847                               size_t count, loff_t pos)
848 {
849         memset(ebuf, 0, sizeof(*ebuf));
850         ebuf->i915 = i915;
851
852         /* We need to have enough room to store any i915_error_state printf
853          * so that we can move it to start position.
854          */
855         ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
856         ebuf->buf = kmalloc(ebuf->size,
857                                 GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
858
859         if (ebuf->buf == NULL) {
860                 ebuf->size = PAGE_SIZE;
861                 ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
862         }
863
864         if (ebuf->buf == NULL) {
865                 ebuf->size = 128;
866                 ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
867         }
868
869         if (ebuf->buf == NULL)
870                 return -ENOMEM;
871
872         ebuf->start = pos;
873
874         return 0;
875 }
876
877 static void i915_error_object_free(struct drm_i915_error_object *obj)
878 {
879         int page;
880
881         if (obj == NULL)
882                 return;
883
884         for (page = 0; page < obj->page_count; page++)
885                 free_page((unsigned long)obj->pages[page]);
886
887         kfree(obj);
888 }
889
890 static __always_inline void free_param(const char *type, void *x)
891 {
892         if (!__builtin_strcmp(type, "char *"))
893                 kfree(*(void **)x);
894 }
895
896 static void cleanup_params(struct i915_gpu_state *error)
897 {
898 #define FREE(T, x, ...) free_param(#T, &error->params.x);
899         I915_PARAMS_FOR_EACH(FREE);
900 #undef FREE
901 }
902
903 static void cleanup_uc_state(struct i915_gpu_state *error)
904 {
905         struct i915_error_uc *error_uc = &error->uc;
906
907         kfree(error_uc->guc_fw.path);
908         kfree(error_uc->huc_fw.path);
909         i915_error_object_free(error_uc->guc_log);
910 }
911
912 void __i915_gpu_state_free(struct kref *error_ref)
913 {
914         struct i915_gpu_state *error =
915                 container_of(error_ref, typeof(*error), ref);
916         long i, j;
917
918         for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
919                 struct drm_i915_error_engine *ee = &error->engine[i];
920
921                 for (j = 0; j < ee->user_bo_count; j++)
922                         i915_error_object_free(ee->user_bo[j]);
923                 kfree(ee->user_bo);
924
925                 i915_error_object_free(ee->batchbuffer);
926                 i915_error_object_free(ee->wa_batchbuffer);
927                 i915_error_object_free(ee->ringbuffer);
928                 i915_error_object_free(ee->hws_page);
929                 i915_error_object_free(ee->ctx);
930                 i915_error_object_free(ee->wa_ctx);
931
932                 kfree(ee->requests);
933                 if (!IS_ERR_OR_NULL(ee->waiters))
934                         kfree(ee->waiters);
935         }
936
937         for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
938                 kfree(error->active_bo[i]);
939         kfree(error->pinned_bo);
940
941         kfree(error->overlay);
942         kfree(error->display);
943
944         cleanup_params(error);
945         cleanup_uc_state(error);
946
947         kfree(error);
948 }
949
950 static struct drm_i915_error_object *
951 i915_error_object_create(struct drm_i915_private *i915,
952                          struct i915_vma *vma)
953 {
954         struct i915_ggtt *ggtt = &i915->ggtt;
955         const u64 slot = ggtt->error_capture.start;
956         struct drm_i915_error_object *dst;
957         struct compress compress;
958         unsigned long num_pages;
959         struct sgt_iter iter;
960         dma_addr_t dma;
961         int ret;
962
963         if (!vma)
964                 return NULL;
965
966         num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
967         num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
968         dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
969                       GFP_ATOMIC | __GFP_NOWARN);
970         if (!dst)
971                 return NULL;
972
973         dst->gtt_offset = vma->node.start;
974         dst->gtt_size = vma->node.size;
975         dst->num_pages = num_pages;
976         dst->page_count = 0;
977         dst->unused = 0;
978
979         if (!compress_init(&compress)) {
980                 kfree(dst);
981                 return NULL;
982         }
983
984         ret = -EINVAL;
985         for_each_sgt_dma(dma, iter, vma->pages) {
986                 void __iomem *s;
987
988                 ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
989
990                 s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
991                 ret = compress_page(&compress, (void  __force *)s, dst);
992                 io_mapping_unmap_atomic(s);
993                 if (ret)
994                         break;
995         }
996
997         if (ret || compress_flush(&compress, dst)) {
998                 while (dst->page_count--)
999                         free_page((unsigned long)dst->pages[dst->page_count]);
1000                 kfree(dst);
1001                 dst = NULL;
1002         }
1003
1004         compress_fini(&compress, dst);
1005         ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1006         return dst;
1007 }
1008
1009 /* The error capture is special as tries to run underneath the normal
1010  * locking rules - so we use the raw version of the i915_gem_active lookup.
1011  */
1012 static inline uint32_t
1013 __active_get_seqno(struct i915_gem_active *active)
1014 {
1015         struct i915_request *request;
1016
1017         request = __i915_gem_active_peek(active);
1018         return request ? request->global_seqno : 0;
1019 }
1020
1021 static inline int
1022 __active_get_engine_id(struct i915_gem_active *active)
1023 {
1024         struct i915_request *request;
1025
1026         request = __i915_gem_active_peek(active);
1027         return request ? request->engine->id : -1;
1028 }
1029
1030 static void capture_bo(struct drm_i915_error_buffer *err,
1031                        struct i915_vma *vma)
1032 {
1033         struct drm_i915_gem_object *obj = vma->obj;
1034
1035         err->size = obj->base.size;
1036         err->name = obj->base.name;
1037
1038         err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
1039         err->engine = __active_get_engine_id(&obj->frontbuffer_write);
1040
1041         err->gtt_offset = vma->node.start;
1042         err->read_domains = obj->read_domains;
1043         err->write_domain = obj->write_domain;
1044         err->fence_reg = vma->fence ? vma->fence->id : -1;
1045         err->tiling = i915_gem_object_get_tiling(obj);
1046         err->dirty = obj->mm.dirty;
1047         err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
1048         err->userptr = obj->userptr.mm != NULL;
1049         err->cache_level = obj->cache_level;
1050 }
1051
1052 static u32 capture_error_bo(struct drm_i915_error_buffer *err,
1053                             int count, struct list_head *head,
1054                             bool pinned_only)
1055 {
1056         struct i915_vma *vma;
1057         int i = 0;
1058
1059         list_for_each_entry(vma, head, vm_link) {
1060                 if (!vma->obj)
1061                         continue;
1062
1063                 if (pinned_only && !i915_vma_is_pinned(vma))
1064                         continue;
1065
1066                 capture_bo(err++, vma);
1067                 if (++i == count)
1068                         break;
1069         }
1070
1071         return i;
1072 }
1073
1074 /* Generate a semi-unique error code. The code is not meant to have meaning, The
1075  * code's only purpose is to try to prevent false duplicated bug reports by
1076  * grossly estimating a GPU error state.
1077  *
1078  * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1079  * the hang if we could strip the GTT offset information from it.
1080  *
1081  * It's only a small step better than a random number in its current form.
1082  */
1083 static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
1084                                          struct i915_gpu_state *error,
1085                                          int *engine_id)
1086 {
1087         uint32_t error_code = 0;
1088         int i;
1089
1090         /* IPEHR would be an ideal way to detect errors, as it's the gross
1091          * measure of "the command that hung." However, has some very common
1092          * synchronization commands which almost always appear in the case
1093          * strictly a client bug. Use instdone to differentiate those some.
1094          */
1095         for (i = 0; i < I915_NUM_ENGINES; i++) {
1096                 if (error->engine[i].hangcheck_stalled) {
1097                         if (engine_id)
1098                                 *engine_id = i;
1099
1100                         return error->engine[i].ipehr ^
1101                                error->engine[i].instdone.instdone;
1102                 }
1103         }
1104
1105         return error_code;
1106 }
1107
1108 static void gem_record_fences(struct i915_gpu_state *error)
1109 {
1110         struct drm_i915_private *dev_priv = error->i915;
1111         int i;
1112
1113         if (INTEL_GEN(dev_priv) >= 6) {
1114                 for (i = 0; i < dev_priv->num_fence_regs; i++)
1115                         error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
1116         } else if (INTEL_GEN(dev_priv) >= 4) {
1117                 for (i = 0; i < dev_priv->num_fence_regs; i++)
1118                         error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
1119         } else {
1120                 for (i = 0; i < dev_priv->num_fence_regs; i++)
1121                         error->fence[i] = I915_READ(FENCE_REG(i));
1122         }
1123         error->nfence = i;
1124 }
1125
1126 static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
1127                                         struct drm_i915_error_engine *ee)
1128 {
1129         struct drm_i915_private *dev_priv = engine->i915;
1130
1131         ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
1132         ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
1133         if (HAS_VEBOX(dev_priv))
1134                 ee->semaphore_mboxes[2] =
1135                         I915_READ(RING_SYNC_2(engine->mmio_base));
1136 }
1137
1138 static void error_record_engine_waiters(struct intel_engine_cs *engine,
1139                                         struct drm_i915_error_engine *ee)
1140 {
1141         struct intel_breadcrumbs *b = &engine->breadcrumbs;
1142         struct drm_i915_error_waiter *waiter;
1143         struct rb_node *rb;
1144         int count;
1145
1146         ee->num_waiters = 0;
1147         ee->waiters = NULL;
1148
1149         if (RB_EMPTY_ROOT(&b->waiters))
1150                 return;
1151
1152         if (!spin_trylock_irq(&b->rb_lock)) {
1153                 ee->waiters = ERR_PTR(-EDEADLK);
1154                 return;
1155         }
1156
1157         count = 0;
1158         for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
1159                 count++;
1160         spin_unlock_irq(&b->rb_lock);
1161
1162         waiter = NULL;
1163         if (count)
1164                 waiter = kmalloc_array(count,
1165                                        sizeof(struct drm_i915_error_waiter),
1166                                        GFP_ATOMIC);
1167         if (!waiter)
1168                 return;
1169
1170         if (!spin_trylock_irq(&b->rb_lock)) {
1171                 kfree(waiter);
1172                 ee->waiters = ERR_PTR(-EDEADLK);
1173                 return;
1174         }
1175
1176         ee->waiters = waiter;
1177         for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1178                 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1179
1180                 strcpy(waiter->comm, w->tsk->comm);
1181                 waiter->pid = w->tsk->pid;
1182                 waiter->seqno = w->seqno;
1183                 waiter++;
1184
1185                 if (++ee->num_waiters == count)
1186                         break;
1187         }
1188         spin_unlock_irq(&b->rb_lock);
1189 }
1190
1191 static void error_record_engine_registers(struct i915_gpu_state *error,
1192                                           struct intel_engine_cs *engine,
1193                                           struct drm_i915_error_engine *ee)
1194 {
1195         struct drm_i915_private *dev_priv = engine->i915;
1196
1197         if (INTEL_GEN(dev_priv) >= 6) {
1198                 ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
1199                 if (INTEL_GEN(dev_priv) >= 8) {
1200                         ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
1201                 } else {
1202                         gen6_record_semaphore_state(engine, ee);
1203                         ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
1204                 }
1205         }
1206
1207         if (INTEL_GEN(dev_priv) >= 4) {
1208                 ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
1209                 ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
1210                 ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
1211                 ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
1212                 ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
1213                 if (INTEL_GEN(dev_priv) >= 8) {
1214                         ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
1215                         ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
1216                 }
1217                 ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
1218         } else {
1219                 ee->faddr = I915_READ(DMA_FADD_I8XX);
1220                 ee->ipeir = I915_READ(IPEIR);
1221                 ee->ipehr = I915_READ(IPEHR);
1222         }
1223
1224         intel_engine_get_instdone(engine, &ee->instdone);
1225
1226         ee->waiting = intel_engine_has_waiter(engine);
1227         ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
1228         ee->acthd = intel_engine_get_active_head(engine);
1229         ee->seqno = intel_engine_get_seqno(engine);
1230         ee->last_seqno = intel_engine_last_submit(engine);
1231         ee->start = I915_READ_START(engine);
1232         ee->head = I915_READ_HEAD(engine);
1233         ee->tail = I915_READ_TAIL(engine);
1234         ee->ctl = I915_READ_CTL(engine);
1235         if (INTEL_GEN(dev_priv) > 2)
1236                 ee->mode = I915_READ_MODE(engine);
1237
1238         if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
1239                 i915_reg_t mmio;
1240
1241                 if (IS_GEN7(dev_priv)) {
1242                         switch (engine->id) {
1243                         default:
1244                         case RCS:
1245                                 mmio = RENDER_HWS_PGA_GEN7;
1246                                 break;
1247                         case BCS:
1248                                 mmio = BLT_HWS_PGA_GEN7;
1249                                 break;
1250                         case VCS:
1251                                 mmio = BSD_HWS_PGA_GEN7;
1252                                 break;
1253                         case VECS:
1254                                 mmio = VEBOX_HWS_PGA_GEN7;
1255                                 break;
1256                         }
1257                 } else if (IS_GEN6(engine->i915)) {
1258                         mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1259                 } else {
1260                         /* XXX: gen8 returns to sanity */
1261                         mmio = RING_HWS_PGA(engine->mmio_base);
1262                 }
1263
1264                 ee->hws = I915_READ(mmio);
1265         }
1266
1267         ee->idle = intel_engine_is_idle(engine);
1268         ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
1269         ee->hangcheck_action = engine->hangcheck.action;
1270         ee->hangcheck_stalled = engine->hangcheck.stalled;
1271         ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
1272                                                   engine);
1273
1274         if (USES_PPGTT(dev_priv)) {
1275                 int i;
1276
1277                 ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
1278
1279                 if (IS_GEN6(dev_priv))
1280                         ee->vm_info.pp_dir_base =
1281                                 I915_READ(RING_PP_DIR_BASE_READ(engine));
1282                 else if (IS_GEN7(dev_priv))
1283                         ee->vm_info.pp_dir_base =
1284                                 I915_READ(RING_PP_DIR_BASE(engine));
1285                 else if (INTEL_GEN(dev_priv) >= 8)
1286                         for (i = 0; i < 4; i++) {
1287                                 ee->vm_info.pdp[i] =
1288                                         I915_READ(GEN8_RING_PDP_UDW(engine, i));
1289                                 ee->vm_info.pdp[i] <<= 32;
1290                                 ee->vm_info.pdp[i] |=
1291                                         I915_READ(GEN8_RING_PDP_LDW(engine, i));
1292                         }
1293         }
1294 }
1295
1296 static void record_request(struct i915_request *request,
1297                            struct drm_i915_error_request *erq)
1298 {
1299         struct i915_gem_context *ctx = request->gem_context;
1300
1301         erq->context = ctx->hw_id;
1302         erq->sched_attr = request->sched.attr;
1303         erq->ban_score = atomic_read(&ctx->ban_score);
1304         erq->seqno = request->global_seqno;
1305         erq->jiffies = request->emitted_jiffies;
1306         erq->start = i915_ggtt_offset(request->ring->vma);
1307         erq->head = request->head;
1308         erq->tail = request->tail;
1309
1310         rcu_read_lock();
1311         erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
1312         rcu_read_unlock();
1313 }
1314
1315 static void engine_record_requests(struct intel_engine_cs *engine,
1316                                    struct i915_request *first,
1317                                    struct drm_i915_error_engine *ee)
1318 {
1319         struct i915_request *request;
1320         int count;
1321
1322         count = 0;
1323         request = first;
1324         list_for_each_entry_from(request, &engine->timeline.requests, link)
1325                 count++;
1326         if (!count)
1327                 return;
1328
1329         ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
1330         if (!ee->requests)
1331                 return;
1332
1333         ee->num_requests = count;
1334
1335         count = 0;
1336         request = first;
1337         list_for_each_entry_from(request, &engine->timeline.requests, link) {
1338                 if (count >= ee->num_requests) {
1339                         /*
1340                          * If the ring request list was changed in
1341                          * between the point where the error request
1342                          * list was created and dimensioned and this
1343                          * point then just exit early to avoid crashes.
1344                          *
1345                          * We don't need to communicate that the
1346                          * request list changed state during error
1347                          * state capture and that the error state is
1348                          * slightly incorrect as a consequence since we
1349                          * are typically only interested in the request
1350                          * list state at the point of error state
1351                          * capture, not in any changes happening during
1352                          * the capture.
1353                          */
1354                         break;
1355                 }
1356
1357                 record_request(request, &ee->requests[count++]);
1358         }
1359         ee->num_requests = count;
1360 }
1361
1362 static void error_record_engine_execlists(struct intel_engine_cs *engine,
1363                                           struct drm_i915_error_engine *ee)
1364 {
1365         const struct intel_engine_execlists * const execlists = &engine->execlists;
1366         unsigned int n;
1367
1368         for (n = 0; n < execlists_num_ports(execlists); n++) {
1369                 struct i915_request *rq = port_request(&execlists->port[n]);
1370
1371                 if (!rq)
1372                         break;
1373
1374                 record_request(rq, &ee->execlist[n]);
1375         }
1376
1377         ee->num_ports = n;
1378 }
1379
1380 static void record_context(struct drm_i915_error_context *e,
1381                            struct i915_gem_context *ctx)
1382 {
1383         if (ctx->pid) {
1384                 struct task_struct *task;
1385
1386                 rcu_read_lock();
1387                 task = pid_task(ctx->pid, PIDTYPE_PID);
1388                 if (task) {
1389                         strcpy(e->comm, task->comm);
1390                         e->pid = task->pid;
1391                 }
1392                 rcu_read_unlock();
1393         }
1394
1395         e->handle = ctx->user_handle;
1396         e->hw_id = ctx->hw_id;
1397         e->sched_attr = ctx->sched;
1398         e->ban_score = atomic_read(&ctx->ban_score);
1399         e->bannable = i915_gem_context_is_bannable(ctx);
1400         e->guilty = atomic_read(&ctx->guilty_count);
1401         e->active = atomic_read(&ctx->active_count);
1402 }
1403
1404 static void request_record_user_bo(struct i915_request *request,
1405                                    struct drm_i915_error_engine *ee)
1406 {
1407         struct i915_capture_list *c;
1408         struct drm_i915_error_object **bo;
1409         long count;
1410
1411         count = 0;
1412         for (c = request->capture_list; c; c = c->next)
1413                 count++;
1414
1415         bo = NULL;
1416         if (count)
1417                 bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
1418         if (!bo)
1419                 return;
1420
1421         count = 0;
1422         for (c = request->capture_list; c; c = c->next) {
1423                 bo[count] = i915_error_object_create(request->i915, c->vma);
1424                 if (!bo[count])
1425                         break;
1426                 count++;
1427         }
1428
1429         ee->user_bo = bo;
1430         ee->user_bo_count = count;
1431 }
1432
1433 static struct drm_i915_error_object *
1434 capture_object(struct drm_i915_private *dev_priv,
1435                struct drm_i915_gem_object *obj)
1436 {
1437         if (obj && i915_gem_object_has_pages(obj)) {
1438                 struct i915_vma fake = {
1439                         .node = { .start = U64_MAX, .size = obj->base.size },
1440                         .size = obj->base.size,
1441                         .pages = obj->mm.pages,
1442                         .obj = obj,
1443                 };
1444
1445                 return i915_error_object_create(dev_priv, &fake);
1446         } else {
1447                 return NULL;
1448         }
1449 }
1450
1451 static void gem_record_rings(struct i915_gpu_state *error)
1452 {
1453         struct drm_i915_private *i915 = error->i915;
1454         struct i915_ggtt *ggtt = &i915->ggtt;
1455         int i;
1456
1457         for (i = 0; i < I915_NUM_ENGINES; i++) {
1458                 struct intel_engine_cs *engine = i915->engine[i];
1459                 struct drm_i915_error_engine *ee = &error->engine[i];
1460                 struct i915_request *request;
1461
1462                 ee->engine_id = -1;
1463
1464                 if (!engine)
1465                         continue;
1466
1467                 ee->engine_id = i;
1468
1469                 error_record_engine_registers(error, engine, ee);
1470                 error_record_engine_waiters(engine, ee);
1471                 error_record_engine_execlists(engine, ee);
1472
1473                 request = i915_gem_find_active_request(engine);
1474                 if (request) {
1475                         struct i915_gem_context *ctx = request->gem_context;
1476                         struct intel_ring *ring;
1477
1478                         ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
1479
1480                         record_context(&ee->context, ctx);
1481
1482                         /* We need to copy these to an anonymous buffer
1483                          * as the simplest method to avoid being overwritten
1484                          * by userspace.
1485                          */
1486                         ee->batchbuffer =
1487                                 i915_error_object_create(i915, request->batch);
1488
1489                         if (HAS_BROKEN_CS_TLB(i915))
1490                                 ee->wa_batchbuffer =
1491                                         i915_error_object_create(i915,
1492                                                                  engine->scratch);
1493                         request_record_user_bo(request, ee);
1494
1495                         ee->ctx =
1496                                 i915_error_object_create(i915,
1497                                                          request->hw_context->state);
1498
1499                         error->simulated |=
1500                                 i915_gem_context_no_error_capture(ctx);
1501
1502                         ee->rq_head = request->head;
1503                         ee->rq_post = request->postfix;
1504                         ee->rq_tail = request->tail;
1505
1506                         ring = request->ring;
1507                         ee->cpu_ring_head = ring->head;
1508                         ee->cpu_ring_tail = ring->tail;
1509                         ee->ringbuffer =
1510                                 i915_error_object_create(i915, ring->vma);
1511
1512                         engine_record_requests(engine, request, ee);
1513                 }
1514
1515                 ee->hws_page =
1516                         i915_error_object_create(i915,
1517                                                  engine->status_page.vma);
1518
1519                 ee->wa_ctx = i915_error_object_create(i915, engine->wa_ctx.vma);
1520
1521                 ee->default_state = capture_object(i915, engine->default_state);
1522         }
1523 }
1524
1525 static void gem_capture_vm(struct i915_gpu_state *error,
1526                            struct i915_address_space *vm,
1527                            int idx)
1528 {
1529         struct drm_i915_error_buffer *active_bo;
1530         struct i915_vma *vma;
1531         int count;
1532
1533         count = 0;
1534         list_for_each_entry(vma, &vm->active_list, vm_link)
1535                 count++;
1536
1537         active_bo = NULL;
1538         if (count)
1539                 active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
1540         if (active_bo)
1541                 count = capture_error_bo(active_bo, count, &vm->active_list, false);
1542         else
1543                 count = 0;
1544
1545         error->active_vm[idx] = vm;
1546         error->active_bo[idx] = active_bo;
1547         error->active_bo_count[idx] = count;
1548 }
1549
1550 static void capture_active_buffers(struct i915_gpu_state *error)
1551 {
1552         int cnt = 0, i, j;
1553
1554         BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
1555         BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
1556         BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
1557
1558         /* Scan each engine looking for unique active contexts/vm */
1559         for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
1560                 struct drm_i915_error_engine *ee = &error->engine[i];
1561                 bool found;
1562
1563                 if (!ee->vm)
1564                         continue;
1565
1566                 found = false;
1567                 for (j = 0; j < i && !found; j++)
1568                         found = error->engine[j].vm == ee->vm;
1569                 if (!found)
1570                         gem_capture_vm(error, ee->vm, cnt++);
1571         }
1572 }
1573
1574 static void capture_pinned_buffers(struct i915_gpu_state *error)
1575 {
1576         struct i915_address_space *vm = &error->i915->ggtt.vm;
1577         struct drm_i915_error_buffer *bo;
1578         struct i915_vma *vma;
1579         int count_inactive, count_active;
1580
1581         count_inactive = 0;
1582         list_for_each_entry(vma, &vm->inactive_list, vm_link)
1583                 count_inactive++;
1584
1585         count_active = 0;
1586         list_for_each_entry(vma, &vm->active_list, vm_link)
1587                 count_active++;
1588
1589         bo = NULL;
1590         if (count_inactive + count_active)
1591                 bo = kcalloc(count_inactive + count_active,
1592                              sizeof(*bo), GFP_ATOMIC);
1593         if (!bo)
1594                 return;
1595
1596         count_inactive = capture_error_bo(bo, count_inactive,
1597                                           &vm->active_list, true);
1598         count_active = capture_error_bo(bo + count_inactive, count_active,
1599                                         &vm->inactive_list, true);
1600         error->pinned_bo_count = count_inactive + count_active;
1601         error->pinned_bo = bo;
1602 }
1603
1604 static void capture_uc_state(struct i915_gpu_state *error)
1605 {
1606         struct drm_i915_private *i915 = error->i915;
1607         struct i915_error_uc *error_uc = &error->uc;
1608
1609         /* Capturing uC state won't be useful if there is no GuC */
1610         if (!error->device_info.has_guc)
1611                 return;
1612
1613         error_uc->guc_fw = i915->guc.fw;
1614         error_uc->huc_fw = i915->huc.fw;
1615
1616         /* Non-default firmware paths will be specified by the modparam.
1617          * As modparams are generally accesible from the userspace make
1618          * explicit copies of the firmware paths.
1619          */
1620         error_uc->guc_fw.path = kstrdup(i915->guc.fw.path, GFP_ATOMIC);
1621         error_uc->huc_fw.path = kstrdup(i915->huc.fw.path, GFP_ATOMIC);
1622         error_uc->guc_log = i915_error_object_create(i915, i915->guc.log.vma);
1623 }
1624
1625 /* Capture all registers which don't fit into another category. */
1626 static void capture_reg_state(struct i915_gpu_state *error)
1627 {
1628         struct drm_i915_private *dev_priv = error->i915;
1629         int i;
1630
1631         /* General organization
1632          * 1. Registers specific to a single generation
1633          * 2. Registers which belong to multiple generations
1634          * 3. Feature specific registers.
1635          * 4. Everything else
1636          * Please try to follow the order.
1637          */
1638
1639         /* 1: Registers specific to a single generation */
1640         if (IS_VALLEYVIEW(dev_priv)) {
1641                 error->gtier[0] = I915_READ(GTIER);
1642                 error->ier = I915_READ(VLV_IER);
1643                 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
1644         }
1645
1646         if (IS_GEN7(dev_priv))
1647                 error->err_int = I915_READ(GEN7_ERR_INT);
1648
1649         if (INTEL_GEN(dev_priv) >= 8) {
1650                 error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1651                 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1652         }
1653
1654         if (IS_GEN6(dev_priv)) {
1655                 error->forcewake = I915_READ_FW(FORCEWAKE);
1656                 error->gab_ctl = I915_READ(GAB_CTL);
1657                 error->gfx_mode = I915_READ(GFX_MODE);
1658         }
1659
1660         /* 2: Registers which belong to multiple generations */
1661         if (INTEL_GEN(dev_priv) >= 7)
1662                 error->forcewake = I915_READ_FW(FORCEWAKE_MT);
1663
1664         if (INTEL_GEN(dev_priv) >= 6) {
1665                 error->derrmr = I915_READ(DERRMR);
1666                 error->error = I915_READ(ERROR_GEN6);
1667                 error->done_reg = I915_READ(DONE_REG);
1668         }
1669
1670         if (INTEL_GEN(dev_priv) >= 5)
1671                 error->ccid = I915_READ(CCID);
1672
1673         /* 3: Feature specific registers */
1674         if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
1675                 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1676                 error->gac_eco = I915_READ(GAC_ECO_BITS);
1677         }
1678
1679         /* 4: Everything else */
1680         if (INTEL_GEN(dev_priv) >= 11) {
1681                 error->ier = I915_READ(GEN8_DE_MISC_IER);
1682                 error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE);
1683                 error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE);
1684                 error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE);
1685                 error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1686                 error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE);
1687                 error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE);
1688                 error->ngtier = 6;
1689         } else if (INTEL_GEN(dev_priv) >= 8) {
1690                 error->ier = I915_READ(GEN8_DE_MISC_IER);
1691                 for (i = 0; i < 4; i++)
1692                         error->gtier[i] = I915_READ(GEN8_GT_IER(i));
1693                 error->ngtier = 4;
1694         } else if (HAS_PCH_SPLIT(dev_priv)) {
1695                 error->ier = I915_READ(DEIER);
1696                 error->gtier[0] = I915_READ(GTIER);
1697                 error->ngtier = 1;
1698         } else if (IS_GEN2(dev_priv)) {
1699                 error->ier = I915_READ16(IER);
1700         } else if (!IS_VALLEYVIEW(dev_priv)) {
1701                 error->ier = I915_READ(IER);
1702         }
1703         error->eir = I915_READ(EIR);
1704         error->pgtbl_er = I915_READ(PGTBL_ER);
1705 }
1706
1707 static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
1708                                    struct i915_gpu_state *error,
1709                                    u32 engine_mask,
1710                                    const char *error_msg)
1711 {
1712         u32 ecode;
1713         int engine_id = -1, len;
1714
1715         ecode = i915_error_generate_code(dev_priv, error, &engine_id);
1716
1717         len = scnprintf(error->error_msg, sizeof(error->error_msg),
1718                         "GPU HANG: ecode %d:%d:0x%08x",
1719                         INTEL_GEN(dev_priv), engine_id, ecode);
1720
1721         if (engine_id != -1 && error->engine[engine_id].context.pid)
1722                 len += scnprintf(error->error_msg + len,
1723                                  sizeof(error->error_msg) - len,
1724                                  ", in %s [%d]",
1725                                  error->engine[engine_id].context.comm,
1726                                  error->engine[engine_id].context.pid);
1727
1728         scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
1729                   ", reason: %s, action: %s",
1730                   error_msg,
1731                   engine_mask ? "reset" : "continue");
1732 }
1733
1734 static void capture_gen_state(struct i915_gpu_state *error)
1735 {
1736         struct drm_i915_private *i915 = error->i915;
1737
1738         error->awake = i915->gt.awake;
1739         error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1740         error->suspended = i915->runtime_pm.suspended;
1741
1742         error->iommu = -1;
1743 #ifdef CONFIG_INTEL_IOMMU
1744         error->iommu = intel_iommu_gfx_mapped;
1745 #endif
1746         error->reset_count = i915_reset_count(&i915->gpu_error);
1747         error->suspend_count = i915->suspend_count;
1748
1749         memcpy(&error->device_info,
1750                INTEL_INFO(i915),
1751                sizeof(error->device_info));
1752         error->driver_caps = i915->caps;
1753 }
1754
1755 static __always_inline void dup_param(const char *type, void *x)
1756 {
1757         if (!__builtin_strcmp(type, "char *"))
1758                 *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
1759 }
1760
1761 static void capture_params(struct i915_gpu_state *error)
1762 {
1763         error->params = i915_modparams;
1764 #define DUP(T, x, ...) dup_param(#T, &error->params.x);
1765         I915_PARAMS_FOR_EACH(DUP);
1766 #undef DUP
1767 }
1768
1769 static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
1770 {
1771         unsigned long epoch = error->capture;
1772         int i;
1773
1774         for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
1775                 const struct drm_i915_error_engine *ee = &error->engine[i];
1776
1777                 if (ee->hangcheck_stalled &&
1778                     time_before(ee->hangcheck_timestamp, epoch))
1779                         epoch = ee->hangcheck_timestamp;
1780         }
1781
1782         return epoch;
1783 }
1784
1785 static int capture(void *data)
1786 {
1787         struct i915_gpu_state *error = data;
1788
1789         error->time = ktime_get_real();
1790         error->boottime = ktime_get_boottime();
1791         error->uptime = ktime_sub(ktime_get(),
1792                                   error->i915->gt.last_init_time);
1793         error->capture = jiffies;
1794
1795         capture_params(error);
1796         capture_gen_state(error);
1797         capture_uc_state(error);
1798         capture_reg_state(error);
1799         gem_record_fences(error);
1800         gem_record_rings(error);
1801         capture_active_buffers(error);
1802         capture_pinned_buffers(error);
1803
1804         error->overlay = intel_overlay_capture_error_state(error->i915);
1805         error->display = intel_display_capture_error_state(error->i915);
1806
1807         error->epoch = capture_find_epoch(error);
1808
1809         return 0;
1810 }
1811
1812 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
1813
1814 struct i915_gpu_state *
1815 i915_capture_gpu_state(struct drm_i915_private *i915)
1816 {
1817         struct i915_gpu_state *error;
1818
1819         error = kzalloc(sizeof(*error), GFP_ATOMIC);
1820         if (!error)
1821                 return NULL;
1822
1823         kref_init(&error->ref);
1824         error->i915 = i915;
1825
1826         stop_machine(capture, error, NULL);
1827
1828         return error;
1829 }
1830
1831 /**
1832  * i915_capture_error_state - capture an error record for later analysis
1833  * @i915: i915 device
1834  * @engine_mask: the mask of engines triggering the hang
1835  * @error_msg: a message to insert into the error capture header
1836  *
1837  * Should be called when an error is detected (either a hang or an error
1838  * interrupt) to capture error state from the time of the error.  Fills
1839  * out a structure which becomes available in debugfs for user level tools
1840  * to pick up.
1841  */
1842 void i915_capture_error_state(struct drm_i915_private *i915,
1843                               u32 engine_mask,
1844                               const char *error_msg)
1845 {
1846         static bool warned;
1847         struct i915_gpu_state *error;
1848         unsigned long flags;
1849
1850         if (!i915_modparams.error_capture)
1851                 return;
1852
1853         if (READ_ONCE(i915->gpu_error.first_error))
1854                 return;
1855
1856         error = i915_capture_gpu_state(i915);
1857         if (!error) {
1858                 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1859                 return;
1860         }
1861
1862         i915_error_capture_msg(i915, error, engine_mask, error_msg);
1863         DRM_INFO("%s\n", error->error_msg);
1864
1865         if (!error->simulated) {
1866                 spin_lock_irqsave(&i915->gpu_error.lock, flags);
1867                 if (!i915->gpu_error.first_error) {
1868                         i915->gpu_error.first_error = error;
1869                         error = NULL;
1870                 }
1871                 spin_unlock_irqrestore(&i915->gpu_error.lock, flags);
1872         }
1873
1874         if (error) {
1875                 __i915_gpu_state_free(&error->ref);
1876                 return;
1877         }
1878
1879         if (!warned &&
1880             ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
1881                 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1882                 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1883                 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1884                 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1885                 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1886                          i915->drm.primary->index);
1887                 warned = true;
1888         }
1889 }
1890
1891 struct i915_gpu_state *
1892 i915_first_error_state(struct drm_i915_private *i915)
1893 {
1894         struct i915_gpu_state *error;
1895
1896         spin_lock_irq(&i915->gpu_error.lock);
1897         error = i915->gpu_error.first_error;
1898         if (error)
1899                 i915_gpu_state_get(error);
1900         spin_unlock_irq(&i915->gpu_error.lock);
1901
1902         return error;
1903 }
1904
1905 void i915_reset_error_state(struct drm_i915_private *i915)
1906 {
1907         struct i915_gpu_state *error;
1908
1909         spin_lock_irq(&i915->gpu_error.lock);
1910         error = i915->gpu_error.first_error;
1911         i915->gpu_error.first_error = NULL;
1912         spin_unlock_irq(&i915->gpu_error.lock);
1913
1914         i915_gpu_state_put(error);
1915 }