1e1001332Skettenis /* 2e1001332Skettenis * Copyright (c) 2008 Intel Corporation 3e1001332Skettenis * 4e1001332Skettenis * Permission is hereby granted, free of charge, to any person obtaining a 5e1001332Skettenis * copy of this software and associated documentation files (the "Software"), 6e1001332Skettenis * to deal in the Software without restriction, including without limitation 7e1001332Skettenis * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8e1001332Skettenis * and/or sell copies of the Software, and to permit persons to whom the 9e1001332Skettenis * Software is furnished to do so, subject to the following conditions: 10e1001332Skettenis * 11e1001332Skettenis * The above copyright notice and this permission notice (including the next 12e1001332Skettenis * paragraph) shall be included in all copies or substantial portions of the 13e1001332Skettenis * Software. 14e1001332Skettenis * 15e1001332Skettenis * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16e1001332Skettenis * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17e1001332Skettenis * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18e1001332Skettenis * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19e1001332Skettenis * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20e1001332Skettenis * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21e1001332Skettenis * IN THE SOFTWARE. 22e1001332Skettenis * 23e1001332Skettenis * Authors: 24e1001332Skettenis * Eric Anholt <eric@anholt.net> 25e1001332Skettenis * Keith Packard <keithp@keithp.com> 26e1001332Skettenis * Mika Kuoppala <mika.kuoppala@intel.com> 27e1001332Skettenis * 28e1001332Skettenis */ 29e1001332Skettenis 307f4dd379Sjsg #include <linux/ascii85.h> 311bb76ff1Sjsg #include <linux/highmem.h> 32c349dbc7Sjsg #include <linux/nmi.h> 33c349dbc7Sjsg #include <linux/pagevec.h> 34c349dbc7Sjsg #include <linux/scatterlist.h> 351bb76ff1Sjsg #include <linux/string_helpers.h> 36c349dbc7Sjsg #include <linux/utsname.h> 37c349dbc7Sjsg #include <linux/zlib.h> 38c349dbc7Sjsg 391bb76ff1Sjsg #include <drm/drm_cache.h> 40c349dbc7Sjsg #include <drm/drm_print.h> 41c349dbc7Sjsg 425ca02815Sjsg #include "display/intel_dmc.h" 43c349dbc7Sjsg #include "display/intel_overlay.h" 44c349dbc7Sjsg 45c349dbc7Sjsg #include "gem/i915_gem_context.h" 46c349dbc7Sjsg #include "gem/i915_gem_lmem.h" 471bb76ff1Sjsg #include "gt/intel_engine_regs.h" 48ad8b1aafSjsg #include "gt/intel_gt.h" 491bb76ff1Sjsg #include "gt/intel_gt_mcr.h" 50c349dbc7Sjsg #include "gt/intel_gt_pm.h" 511bb76ff1Sjsg #include "gt/intel_gt_regs.h" 521bb76ff1Sjsg #include "gt/uc/intel_guc_capture.h" 537f4dd379Sjsg 541bb76ff1Sjsg #include "i915_driver.h" 55e1001332Skettenis #include "i915_drv.h" 567f4dd379Sjsg #include "i915_gpu_error.h" 57c349dbc7Sjsg #include "i915_memcpy.h" 58f005ef32Sjsg #include "i915_reg.h" 59c349dbc7Sjsg #include "i915_scatterlist.h" 601bb76ff1Sjsg #include "i915_utils.h" 61e1001332Skettenis 621bb76ff1Sjsg #define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) 63c349dbc7Sjsg #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN) 64c349dbc7Sjsg 65c349dbc7Sjsg static void __sg_set_buf(struct scatterlist *sg, 66c349dbc7Sjsg void *addr, unsigned int len, loff_t it) 67e1001332Skettenis { 68c349dbc7Sjsg STUB(); 69c349dbc7Sjsg #ifdef notyet 70c349dbc7Sjsg sg->page_link = (unsigned long)virt_to_page(addr); 71c349dbc7Sjsg sg->offset = offset_in_page(addr); 72c349dbc7Sjsg sg->length = len; 73c349dbc7Sjsg sg->dma_address = it; 74c349dbc7Sjsg #endif 75e1001332Skettenis } 76e1001332Skettenis 77c349dbc7Sjsg static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) 78e1001332Skettenis { 79c349dbc7Sjsg STUB(); 80e1001332Skettenis return false; 81c349dbc7Sjsg #ifdef notyet 82c349dbc7Sjsg if (!len) 83e1001332Skettenis return false; 84e1001332Skettenis 85c349dbc7Sjsg if (e->bytes + len + 1 <= e->size) 86e1001332Skettenis return true; 87c349dbc7Sjsg 88c349dbc7Sjsg if (e->bytes) { 89c349dbc7Sjsg __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter); 90c349dbc7Sjsg e->iter += e->bytes; 91c349dbc7Sjsg e->buf = NULL; 92c349dbc7Sjsg e->bytes = 0; 93e1001332Skettenis } 94e1001332Skettenis 95c349dbc7Sjsg if (e->cur == e->end) { 96c349dbc7Sjsg struct scatterlist *sgl; 97c349dbc7Sjsg 98c349dbc7Sjsg sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL); 99c349dbc7Sjsg if (!sgl) { 100c349dbc7Sjsg e->err = -ENOMEM; 101e1001332Skettenis return false; 102e1001332Skettenis } 103e1001332Skettenis 104c349dbc7Sjsg if (e->cur) { 105c349dbc7Sjsg e->cur->offset = 0; 106c349dbc7Sjsg e->cur->length = 0; 107c349dbc7Sjsg e->cur->page_link = 108c349dbc7Sjsg (unsigned long)sgl | SG_CHAIN; 109c349dbc7Sjsg } else { 110c349dbc7Sjsg e->sgl = sgl; 111c349dbc7Sjsg } 112c349dbc7Sjsg 113c349dbc7Sjsg e->cur = sgl; 114c349dbc7Sjsg e->end = sgl + SG_MAX_SINGLE_ALLOC - 1; 115c349dbc7Sjsg } 116c349dbc7Sjsg 117f005ef32Sjsg e->size = ALIGN(len + 1, SZ_64K); 118c349dbc7Sjsg e->buf = kmalloc(e->size, ALLOW_FAIL); 119c349dbc7Sjsg if (!e->buf) { 120c349dbc7Sjsg e->size = PAGE_ALIGN(len + 1); 121c349dbc7Sjsg e->buf = kmalloc(e->size, GFP_KERNEL); 122c349dbc7Sjsg } 123c349dbc7Sjsg if (!e->buf) { 124c349dbc7Sjsg e->err = -ENOMEM; 125e1001332Skettenis return false; 126e1001332Skettenis } 127e1001332Skettenis 128e1001332Skettenis return true; 129c349dbc7Sjsg #endif 130e1001332Skettenis } 131e1001332Skettenis 1327f4dd379Sjsg __printf(2, 0) 133e1001332Skettenis static void i915_error_vprintf(struct drm_i915_error_state_buf *e, 134c349dbc7Sjsg const char *fmt, va_list args) 135e1001332Skettenis { 136c349dbc7Sjsg va_list ap; 137c349dbc7Sjsg int len; 138e1001332Skettenis 139c349dbc7Sjsg if (e->err) 140e1001332Skettenis return; 141e1001332Skettenis 142c349dbc7Sjsg va_copy(ap, args); 143c349dbc7Sjsg len = vsnprintf(NULL, 0, fmt, ap); 144c349dbc7Sjsg va_end(ap); 145c349dbc7Sjsg if (len <= 0) { 146c349dbc7Sjsg e->err = len; 147e1001332Skettenis return; 148e1001332Skettenis } 149e1001332Skettenis 150c349dbc7Sjsg if (!__i915_error_grow(e, len)) 151c349dbc7Sjsg return; 152e1001332Skettenis 153c349dbc7Sjsg GEM_BUG_ON(e->bytes >= e->size); 154c349dbc7Sjsg len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args); 155c349dbc7Sjsg if (len < 0) { 156c349dbc7Sjsg e->err = len; 157c349dbc7Sjsg return; 158c349dbc7Sjsg } 159c349dbc7Sjsg e->bytes += len; 160e1001332Skettenis } 161e1001332Skettenis 162c349dbc7Sjsg static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str) 163e1001332Skettenis { 164e1001332Skettenis unsigned len; 165e1001332Skettenis 166c349dbc7Sjsg if (e->err || !str) 167e1001332Skettenis return; 168e1001332Skettenis 169e1001332Skettenis len = strlen(str); 170c349dbc7Sjsg if (!__i915_error_grow(e, len)) 171e1001332Skettenis return; 172e1001332Skettenis 173c349dbc7Sjsg GEM_BUG_ON(e->bytes + len > e->size); 174e1001332Skettenis memcpy(e->buf + e->bytes, str, len); 175c349dbc7Sjsg e->bytes += len; 176e1001332Skettenis } 177e1001332Skettenis 178e1001332Skettenis #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 179e1001332Skettenis #define err_puts(e, s) i915_error_puts(e, s) 180e1001332Skettenis 1817f4dd379Sjsg static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf) 1827f4dd379Sjsg { 1837f4dd379Sjsg i915_error_vprintf(p->arg, vaf->fmt, *vaf->va); 1847f4dd379Sjsg } 1857f4dd379Sjsg 1867f4dd379Sjsg static inline struct drm_printer 1877f4dd379Sjsg i915_error_printer(struct drm_i915_error_state_buf *e) 1887f4dd379Sjsg { 1897f4dd379Sjsg struct drm_printer p = { 1907f4dd379Sjsg .printfn = __i915_printfn_error, 1917f4dd379Sjsg .arg = e, 1927f4dd379Sjsg }; 1937f4dd379Sjsg return p; 1947f4dd379Sjsg } 1957f4dd379Sjsg 196c349dbc7Sjsg /* single threaded page allocator with a reserved stash for emergencies */ 197f005ef32Sjsg static void pool_fini(struct folio_batch *fbatch) 198c349dbc7Sjsg { 199c349dbc7Sjsg STUB(); 200c349dbc7Sjsg #ifdef notyet 201f005ef32Sjsg folio_batch_release(fbatch); 202c349dbc7Sjsg #endif 203c349dbc7Sjsg } 204c349dbc7Sjsg 205f005ef32Sjsg static int pool_refill(struct folio_batch *fbatch, gfp_t gfp) 206c349dbc7Sjsg { 207f005ef32Sjsg STUB(); 208f005ef32Sjsg return -ENOSYS; 209f005ef32Sjsg #ifdef notyet 210f005ef32Sjsg while (folio_batch_space(fbatch)) { 211f005ef32Sjsg struct folio *folio; 212c349dbc7Sjsg 213f005ef32Sjsg folio = folio_alloc(gfp, 0); 214f005ef32Sjsg if (!folio) 215c349dbc7Sjsg return -ENOMEM; 216c349dbc7Sjsg 217f005ef32Sjsg folio_batch_add(fbatch, folio); 218c349dbc7Sjsg } 219c349dbc7Sjsg 220c349dbc7Sjsg return 0; 221f005ef32Sjsg #endif 222c349dbc7Sjsg } 223c349dbc7Sjsg 224f005ef32Sjsg static int intel_pool_init(struct folio_batch *fbatch, gfp_t gfp) 225c349dbc7Sjsg { 226c349dbc7Sjsg int err; 227c349dbc7Sjsg 228f005ef32Sjsg STUB(); 229f005ef32Sjsg return -ENOSYS; 230f005ef32Sjsg #ifdef notyet 231f005ef32Sjsg folio_batch_init(fbatch); 232c349dbc7Sjsg 233f005ef32Sjsg err = pool_refill(fbatch, gfp); 234c349dbc7Sjsg if (err) 235f005ef32Sjsg pool_fini(fbatch); 236c349dbc7Sjsg 237c349dbc7Sjsg return err; 238f005ef32Sjsg #endif 239c349dbc7Sjsg } 240c349dbc7Sjsg 241f005ef32Sjsg static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp) 242c349dbc7Sjsg { 243c349dbc7Sjsg STUB(); 244c349dbc7Sjsg return NULL; 245c349dbc7Sjsg #ifdef notyet 246f005ef32Sjsg struct folio *folio; 247c349dbc7Sjsg 248f005ef32Sjsg folio = folio_alloc(gfp, 0); 249f005ef32Sjsg if (!folio && folio_batch_count(fbatch)) 250f005ef32Sjsg folio = fbatch->folios[--fbatch->nr]; 251c349dbc7Sjsg 252f005ef32Sjsg return folio ? folio_address(folio) : NULL; 253c349dbc7Sjsg #endif 254c349dbc7Sjsg } 255c349dbc7Sjsg 256f005ef32Sjsg static void pool_free(struct folio_batch *fbatch, void *addr) 257c349dbc7Sjsg { 258c349dbc7Sjsg STUB(); 259c349dbc7Sjsg #ifdef notyet 260f005ef32Sjsg struct folio *folio = virt_to_folio(addr); 261c349dbc7Sjsg 262f005ef32Sjsg if (folio_batch_space(fbatch)) 263f005ef32Sjsg folio_batch_add(fbatch, folio); 264c349dbc7Sjsg else 265f005ef32Sjsg folio_put(folio); 266c349dbc7Sjsg #endif 267c349dbc7Sjsg } 268c349dbc7Sjsg 2697f4dd379Sjsg #ifdef CONFIG_DRM_I915_COMPRESS_ERROR 2707f4dd379Sjsg 271c349dbc7Sjsg struct i915_vma_compress { 272f005ef32Sjsg struct folio_batch pool; 2737f4dd379Sjsg struct z_stream_s zstream; 2747f4dd379Sjsg void *tmp; 2757f4dd379Sjsg }; 2767f4dd379Sjsg 277c349dbc7Sjsg static bool compress_init(struct i915_vma_compress *c) 2787f4dd379Sjsg { 279c349dbc7Sjsg struct z_stream_s *zstream = &c->zstream; 280c349dbc7Sjsg 281c349dbc7Sjsg if (intel_pool_init(&c->pool, ALLOW_FAIL)) 282c349dbc7Sjsg return false; 2837f4dd379Sjsg 2847f4dd379Sjsg zstream->workspace = 2857f4dd379Sjsg kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), 286c349dbc7Sjsg ALLOW_FAIL); 287c349dbc7Sjsg if (!zstream->workspace) { 288c349dbc7Sjsg pool_fini(&c->pool); 2897f4dd379Sjsg return false; 2907f4dd379Sjsg } 2917f4dd379Sjsg 2927f4dd379Sjsg c->tmp = NULL; 2937f4dd379Sjsg if (i915_has_memcpy_from_wc()) 294c349dbc7Sjsg c->tmp = pool_alloc(&c->pool, ALLOW_FAIL); 2957f4dd379Sjsg 2967f4dd379Sjsg return true; 2977f4dd379Sjsg } 2987f4dd379Sjsg 299c349dbc7Sjsg static bool compress_start(struct i915_vma_compress *c) 3007f4dd379Sjsg { 301c349dbc7Sjsg struct z_stream_s *zstream = &c->zstream; 302c349dbc7Sjsg void *workspace = zstream->workspace; 303c349dbc7Sjsg 304c349dbc7Sjsg memset(zstream, 0, sizeof(*zstream)); 305c349dbc7Sjsg zstream->workspace = workspace; 306c349dbc7Sjsg 307c349dbc7Sjsg return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK; 308c349dbc7Sjsg } 309c349dbc7Sjsg 310c349dbc7Sjsg static void *compress_next_page(struct i915_vma_compress *c, 311c349dbc7Sjsg struct i915_vma_coredump *dst) 312c349dbc7Sjsg { 3131bb76ff1Sjsg void *page_addr; 3141bb76ff1Sjsg struct vm_page *page; 3157f4dd379Sjsg 3161bb76ff1Sjsg page_addr = pool_alloc(&c->pool, ALLOW_FAIL); 3171bb76ff1Sjsg if (!page_addr) 3187f4dd379Sjsg return ERR_PTR(-ENOMEM); 3197f4dd379Sjsg 3201bb76ff1Sjsg page = virt_to_page(page_addr); 3211bb76ff1Sjsg list_add_tail(&page->lru, &dst->page_list); 3221bb76ff1Sjsg return page_addr; 3237f4dd379Sjsg } 3247f4dd379Sjsg 325c349dbc7Sjsg static int compress_page(struct i915_vma_compress *c, 3267f4dd379Sjsg void *src, 327c349dbc7Sjsg struct i915_vma_coredump *dst, 328c349dbc7Sjsg bool wc) 3297f4dd379Sjsg { 3307f4dd379Sjsg struct z_stream_s *zstream = &c->zstream; 3317f4dd379Sjsg 3327f4dd379Sjsg zstream->next_in = src; 333c349dbc7Sjsg if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) 3347f4dd379Sjsg zstream->next_in = c->tmp; 3357f4dd379Sjsg zstream->avail_in = PAGE_SIZE; 3367f4dd379Sjsg 3377f4dd379Sjsg do { 3387f4dd379Sjsg if (zstream->avail_out == 0) { 339c349dbc7Sjsg zstream->next_out = compress_next_page(c, dst); 3407f4dd379Sjsg if (IS_ERR(zstream->next_out)) 3417f4dd379Sjsg return PTR_ERR(zstream->next_out); 3427f4dd379Sjsg 3437f4dd379Sjsg zstream->avail_out = PAGE_SIZE; 3447f4dd379Sjsg } 3457f4dd379Sjsg 3467f4dd379Sjsg if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK) 3477f4dd379Sjsg return -EIO; 348ad8b1aafSjsg 349ad8b1aafSjsg cond_resched(); 3507f4dd379Sjsg } while (zstream->avail_in); 3517f4dd379Sjsg 3527f4dd379Sjsg /* Fallback to uncompressed if we increase size? */ 3537f4dd379Sjsg if (0 && zstream->total_out > zstream->total_in) 3547f4dd379Sjsg return -E2BIG; 3557f4dd379Sjsg 3567f4dd379Sjsg return 0; 3577f4dd379Sjsg } 3587f4dd379Sjsg 359c349dbc7Sjsg static int compress_flush(struct i915_vma_compress *c, 360c349dbc7Sjsg struct i915_vma_coredump *dst) 3617f4dd379Sjsg { 3627f4dd379Sjsg struct z_stream_s *zstream = &c->zstream; 3637f4dd379Sjsg 3647f4dd379Sjsg do { 3657f4dd379Sjsg switch (zlib_deflate(zstream, Z_FINISH)) { 3667f4dd379Sjsg case Z_OK: /* more space requested */ 367c349dbc7Sjsg zstream->next_out = compress_next_page(c, dst); 3687f4dd379Sjsg if (IS_ERR(zstream->next_out)) 3697f4dd379Sjsg return PTR_ERR(zstream->next_out); 3707f4dd379Sjsg 3717f4dd379Sjsg zstream->avail_out = PAGE_SIZE; 3727f4dd379Sjsg break; 3737f4dd379Sjsg 3747f4dd379Sjsg case Z_STREAM_END: 3757f4dd379Sjsg goto end; 3767f4dd379Sjsg 3777f4dd379Sjsg default: /* any error */ 3787f4dd379Sjsg return -EIO; 3797f4dd379Sjsg } 3807f4dd379Sjsg } while (1); 3817f4dd379Sjsg 3827f4dd379Sjsg end: 3837f4dd379Sjsg memset(zstream->next_out, 0, zstream->avail_out); 3847f4dd379Sjsg dst->unused = zstream->avail_out; 3857f4dd379Sjsg return 0; 3867f4dd379Sjsg } 3877f4dd379Sjsg 388c349dbc7Sjsg static void compress_finish(struct i915_vma_compress *c) 3897f4dd379Sjsg { 390c349dbc7Sjsg zlib_deflateEnd(&c->zstream); 391c349dbc7Sjsg } 3927f4dd379Sjsg 393c349dbc7Sjsg static void compress_fini(struct i915_vma_compress *c) 394c349dbc7Sjsg { 395c349dbc7Sjsg kfree(c->zstream.workspace); 3967f4dd379Sjsg if (c->tmp) 397c349dbc7Sjsg pool_free(&c->pool, c->tmp); 398c349dbc7Sjsg pool_fini(&c->pool); 3997f4dd379Sjsg } 4007f4dd379Sjsg 4017f4dd379Sjsg static void err_compression_marker(struct drm_i915_error_state_buf *m) 4027f4dd379Sjsg { 4037f4dd379Sjsg err_puts(m, ":"); 4047f4dd379Sjsg } 4057f4dd379Sjsg 4067f4dd379Sjsg #else 4077f4dd379Sjsg 408c349dbc7Sjsg struct i915_vma_compress { 409f005ef32Sjsg struct folio_batch pool; 4107f4dd379Sjsg }; 4117f4dd379Sjsg 412c349dbc7Sjsg static bool compress_init(struct i915_vma_compress *c) 413c349dbc7Sjsg { 414c349dbc7Sjsg return intel_pool_init(&c->pool, ALLOW_FAIL) == 0; 415c349dbc7Sjsg } 416c349dbc7Sjsg 417c349dbc7Sjsg static bool compress_start(struct i915_vma_compress *c) 4187f4dd379Sjsg { 4197f4dd379Sjsg return true; 4207f4dd379Sjsg } 4217f4dd379Sjsg 422c349dbc7Sjsg static int compress_page(struct i915_vma_compress *c, 4237f4dd379Sjsg void *src, 424c349dbc7Sjsg struct i915_vma_coredump *dst, 425c349dbc7Sjsg bool wc) 4267f4dd379Sjsg { 4271bb76ff1Sjsg STUB(); 4281bb76ff1Sjsg return -ENOSYS; 4291bb76ff1Sjsg #ifdef notyet 4307f4dd379Sjsg void *ptr; 4317f4dd379Sjsg 432c349dbc7Sjsg ptr = pool_alloc(&c->pool, ALLOW_FAIL); 433c349dbc7Sjsg if (!ptr) 4347f4dd379Sjsg return -ENOMEM; 4357f4dd379Sjsg 436c349dbc7Sjsg if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) 4377f4dd379Sjsg memcpy(ptr, src, PAGE_SIZE); 4381bb76ff1Sjsg list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list); 439ad8b1aafSjsg cond_resched(); 4407f4dd379Sjsg 4417f4dd379Sjsg return 0; 4421bb76ff1Sjsg #endif 4437f4dd379Sjsg } 4447f4dd379Sjsg 445c349dbc7Sjsg static int compress_flush(struct i915_vma_compress *c, 446c349dbc7Sjsg struct i915_vma_coredump *dst) 4477f4dd379Sjsg { 4487f4dd379Sjsg return 0; 4497f4dd379Sjsg } 4507f4dd379Sjsg 451c349dbc7Sjsg static void compress_finish(struct i915_vma_compress *c) 4527f4dd379Sjsg { 4537f4dd379Sjsg } 4547f4dd379Sjsg 455c349dbc7Sjsg static void compress_fini(struct i915_vma_compress *c) 456c349dbc7Sjsg { 457c349dbc7Sjsg pool_fini(&c->pool); 458c349dbc7Sjsg } 459c349dbc7Sjsg 4607f4dd379Sjsg static void err_compression_marker(struct drm_i915_error_state_buf *m) 4617f4dd379Sjsg { 4627f4dd379Sjsg err_puts(m, "~"); 4637f4dd379Sjsg } 4647f4dd379Sjsg 4657f4dd379Sjsg #endif 4667f4dd379Sjsg 4677f4dd379Sjsg static void error_print_instdone(struct drm_i915_error_state_buf *m, 468c349dbc7Sjsg const struct intel_engine_coredump *ee) 469e1001332Skettenis { 4707f4dd379Sjsg int slice; 4717f4dd379Sjsg int subslice; 4721bb76ff1Sjsg int iter; 473e1001332Skettenis 4747f4dd379Sjsg err_printf(m, " INSTDONE: 0x%08x\n", 4757f4dd379Sjsg ee->instdone.instdone); 476e1001332Skettenis 4775ca02815Sjsg if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3) 478e1001332Skettenis return; 479e1001332Skettenis 4807f4dd379Sjsg err_printf(m, " SC_INSTDONE: 0x%08x\n", 4817f4dd379Sjsg ee->instdone.slice_common); 4823253c27bSkettenis 4835ca02815Sjsg if (GRAPHICS_VER(m->i915) <= 6) 4847f4dd379Sjsg return; 4857f4dd379Sjsg 4861bb76ff1Sjsg for_each_ss_steering(iter, ee->engine->gt, slice, subslice) 4877f4dd379Sjsg err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 4887f4dd379Sjsg slice, subslice, 4897f4dd379Sjsg ee->instdone.sampler[slice][subslice]); 4907f4dd379Sjsg 4911bb76ff1Sjsg for_each_ss_steering(iter, ee->engine->gt, slice, subslice) 4927f4dd379Sjsg err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", 4937f4dd379Sjsg slice, subslice, 4947f4dd379Sjsg ee->instdone.row[slice][subslice]); 4957f4dd379Sjsg 4965ca02815Sjsg if (GRAPHICS_VER(m->i915) < 12) 497c349dbc7Sjsg return; 498c349dbc7Sjsg 4991bb76ff1Sjsg if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) { 5001bb76ff1Sjsg for_each_ss_steering(iter, ee->engine->gt, slice, subslice) 5011bb76ff1Sjsg err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n", 5021bb76ff1Sjsg slice, subslice, 5031bb76ff1Sjsg ee->instdone.geom_svg[slice][subslice]); 5041bb76ff1Sjsg } 5051bb76ff1Sjsg 506c349dbc7Sjsg err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n", 507c349dbc7Sjsg ee->instdone.slice_common_extra[0]); 508c349dbc7Sjsg err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n", 509c349dbc7Sjsg ee->instdone.slice_common_extra[1]); 5107f4dd379Sjsg } 5117f4dd379Sjsg 5127f4dd379Sjsg static void error_print_request(struct drm_i915_error_state_buf *m, 5137f4dd379Sjsg const char *prefix, 514c349dbc7Sjsg const struct i915_request_coredump *erq) 5157f4dd379Sjsg { 5167f4dd379Sjsg if (!erq->seqno) 5177f4dd379Sjsg return; 5187f4dd379Sjsg 519ad8b1aafSjsg err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n", 520c349dbc7Sjsg prefix, erq->pid, erq->context, erq->seqno, 521c349dbc7Sjsg test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 522c349dbc7Sjsg &erq->flags) ? "!" : "", 523c349dbc7Sjsg test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 524c349dbc7Sjsg &erq->flags) ? "+" : "", 525c349dbc7Sjsg erq->sched_attr.priority, 526ad8b1aafSjsg erq->head, erq->tail); 5277f4dd379Sjsg } 5287f4dd379Sjsg 5297f4dd379Sjsg static void error_print_context(struct drm_i915_error_state_buf *m, 5307f4dd379Sjsg const char *header, 531c349dbc7Sjsg const struct i915_gem_context_coredump *ctx) 5327f4dd379Sjsg { 533c349dbc7Sjsg err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n", 534c349dbc7Sjsg header, ctx->comm, ctx->pid, ctx->sched_attr.priority, 535c349dbc7Sjsg ctx->guilty, ctx->active, 5361bb76ff1Sjsg ctx->total_runtime, ctx->avg_runtime); 537f005ef32Sjsg err_printf(m, " context timeline seqno %u\n", ctx->hwsp_seqno); 538c349dbc7Sjsg } 539c349dbc7Sjsg 540c349dbc7Sjsg static struct i915_vma_coredump * 541c349dbc7Sjsg __find_vma(struct i915_vma_coredump *vma, const char *name) 542c349dbc7Sjsg { 543c349dbc7Sjsg while (vma) { 544c349dbc7Sjsg if (strcmp(vma->name, name) == 0) 545c349dbc7Sjsg return vma; 546c349dbc7Sjsg vma = vma->next; 547c349dbc7Sjsg } 548c349dbc7Sjsg 549c349dbc7Sjsg return NULL; 550c349dbc7Sjsg } 551c349dbc7Sjsg 5521bb76ff1Sjsg struct i915_vma_coredump * 5531bb76ff1Sjsg intel_gpu_error_find_batch(const struct intel_engine_coredump *ee) 554c349dbc7Sjsg { 555c349dbc7Sjsg return __find_vma(ee->vma, "batch"); 5567f4dd379Sjsg } 5577f4dd379Sjsg 5587f4dd379Sjsg static void error_print_engine(struct drm_i915_error_state_buf *m, 559c349dbc7Sjsg const struct intel_engine_coredump *ee) 5607f4dd379Sjsg { 561c349dbc7Sjsg struct i915_vma_coredump *batch; 5627f4dd379Sjsg int n; 5637f4dd379Sjsg 564c349dbc7Sjsg err_printf(m, "%s command stream:\n", ee->engine->name); 565c349dbc7Sjsg err_printf(m, " CCID: 0x%08x\n", ee->ccid); 5667f4dd379Sjsg err_printf(m, " START: 0x%08x\n", ee->start); 5677f4dd379Sjsg err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head); 5687f4dd379Sjsg err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n", 5697f4dd379Sjsg ee->tail, ee->rq_post, ee->rq_tail); 5707f4dd379Sjsg err_printf(m, " CTL: 0x%08x\n", ee->ctl); 5717f4dd379Sjsg err_printf(m, " MODE: 0x%08x\n", ee->mode); 5727f4dd379Sjsg err_printf(m, " HWS: 0x%08x\n", ee->hws); 5737f4dd379Sjsg err_printf(m, " ACTHD: 0x%08x %08x\n", 5747f4dd379Sjsg (u32)(ee->acthd>>32), (u32)ee->acthd); 5757f4dd379Sjsg err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir); 5767f4dd379Sjsg err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr); 577c349dbc7Sjsg err_printf(m, " ESR: 0x%08x\n", ee->esr); 5787f4dd379Sjsg 5797f4dd379Sjsg error_print_instdone(m, ee); 5807f4dd379Sjsg 5811bb76ff1Sjsg batch = intel_gpu_error_find_batch(ee); 582c349dbc7Sjsg if (batch) { 583c349dbc7Sjsg u64 start = batch->gtt_offset; 584c349dbc7Sjsg u64 end = start + batch->gtt_size; 5857f4dd379Sjsg 5867f4dd379Sjsg err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n", 5877f4dd379Sjsg upper_32_bits(start), lower_32_bits(start), 5887f4dd379Sjsg upper_32_bits(end), lower_32_bits(end)); 5897f4dd379Sjsg } 5905ca02815Sjsg if (GRAPHICS_VER(m->i915) >= 4) { 5917f4dd379Sjsg err_printf(m, " BBADDR: 0x%08x_%08x\n", 5927f4dd379Sjsg (u32)(ee->bbaddr>>32), (u32)ee->bbaddr); 5937f4dd379Sjsg err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate); 5947f4dd379Sjsg err_printf(m, " INSTPS: 0x%08x\n", ee->instps); 5957f4dd379Sjsg } 5967f4dd379Sjsg err_printf(m, " INSTPM: 0x%08x\n", ee->instpm); 5977f4dd379Sjsg err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr), 5987f4dd379Sjsg lower_32_bits(ee->faddr)); 5995ca02815Sjsg if (GRAPHICS_VER(m->i915) >= 6) { 6007f4dd379Sjsg err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi); 6017f4dd379Sjsg err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg); 6027f4dd379Sjsg } 6031bb76ff1Sjsg if (GRAPHICS_VER(m->i915) >= 11) { 6041bb76ff1Sjsg err_printf(m, " NOPID: 0x%08x\n", ee->nopid); 6051bb76ff1Sjsg err_printf(m, " EXCC: 0x%08x\n", ee->excc); 6061bb76ff1Sjsg err_printf(m, " CMD_CCTL: 0x%08x\n", ee->cmd_cctl); 6071bb76ff1Sjsg err_printf(m, " CSCMDOP: 0x%08x\n", ee->cscmdop); 6081bb76ff1Sjsg err_printf(m, " CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl); 6091bb76ff1Sjsg err_printf(m, " DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi); 6101bb76ff1Sjsg err_printf(m, " DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo); 6111bb76ff1Sjsg } 612c349dbc7Sjsg if (HAS_PPGTT(m->i915)) { 6137f4dd379Sjsg err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode); 6147f4dd379Sjsg 6155ca02815Sjsg if (GRAPHICS_VER(m->i915) >= 8) { 6163253c27bSkettenis int i; 6173253c27bSkettenis for (i = 0; i < 4; i++) 6183253c27bSkettenis err_printf(m, " PDP%d: 0x%016llx\n", 6197f4dd379Sjsg i, ee->vm_info.pdp[i]); 6203253c27bSkettenis } else { 6213253c27bSkettenis err_printf(m, " PP_DIR_BASE: 0x%08x\n", 6227f4dd379Sjsg ee->vm_info.pp_dir_base); 6233253c27bSkettenis } 6243253c27bSkettenis } 6257f4dd379Sjsg 6267f4dd379Sjsg for (n = 0; n < ee->num_ports; n++) { 6277f4dd379Sjsg err_printf(m, " ELSP[%d]:", n); 628c349dbc7Sjsg error_print_request(m, " ", &ee->execlist[n]); 6297f4dd379Sjsg } 630e1001332Skettenis } 631e1001332Skettenis 632e1001332Skettenis void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 633e1001332Skettenis { 634e1001332Skettenis va_list args; 635e1001332Skettenis 636e1001332Skettenis va_start(args, f); 637e1001332Skettenis i915_error_vprintf(e, f, args); 638e1001332Skettenis va_end(args); 639e1001332Skettenis } 640e1001332Skettenis 6411bb76ff1Sjsg void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m, 642c349dbc7Sjsg const struct intel_engine_cs *engine, 643c349dbc7Sjsg const struct i915_vma_coredump *vma) 6443253c27bSkettenis { 6457f4dd379Sjsg STUB(); 6467f4dd379Sjsg #ifdef notyet 6477f4dd379Sjsg char out[ASCII85_BUFSZ]; 6481bb76ff1Sjsg struct vm_page *page; 6493253c27bSkettenis 650c349dbc7Sjsg if (!vma) 6517f4dd379Sjsg return; 6527f4dd379Sjsg 6537f4dd379Sjsg err_printf(m, "%s --- %s = 0x%08x %08x\n", 654c349dbc7Sjsg engine ? engine->name : "global", vma->name, 655c349dbc7Sjsg upper_32_bits(vma->gtt_offset), 656c349dbc7Sjsg lower_32_bits(vma->gtt_offset)); 657c349dbc7Sjsg 658c349dbc7Sjsg if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K) 659c349dbc7Sjsg err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes); 6603253c27bSkettenis 6617f4dd379Sjsg err_compression_marker(m); 6621bb76ff1Sjsg list_for_each_entry(page, &vma->page_list, lru) { 6637f4dd379Sjsg int i, len; 6641bb76ff1Sjsg const u32 *addr = page_address(page); 6657f4dd379Sjsg 6667f4dd379Sjsg len = PAGE_SIZE; 6671bb76ff1Sjsg if (page == list_last_entry(&vma->page_list, typeof(*page), lru)) 668c349dbc7Sjsg len -= vma->unused; 6697f4dd379Sjsg len = ascii85_encode_len(len); 6707f4dd379Sjsg 6717f4dd379Sjsg for (i = 0; i < len; i++) 6721bb76ff1Sjsg err_puts(m, ascii85_encode(addr[i], out)); 6737f4dd379Sjsg } 6747f4dd379Sjsg err_puts(m, "\n"); 6757f4dd379Sjsg #endif 6767f4dd379Sjsg } 6777f4dd379Sjsg 6787f4dd379Sjsg static void err_print_capabilities(struct drm_i915_error_state_buf *m, 679ad8b1aafSjsg struct i915_gpu_coredump *error) 6803253c27bSkettenis { 6817f4dd379Sjsg struct drm_printer p = i915_error_printer(m); 6823253c27bSkettenis 6831bb76ff1Sjsg intel_device_info_print(&error->device_info, &error->runtime_info, &p); 684f005ef32Sjsg intel_display_device_info_print(&error->display_device_info, 685f005ef32Sjsg &error->display_runtime_info, &p); 686ad8b1aafSjsg intel_driver_caps_print(&error->driver_caps, &p); 6873253c27bSkettenis } 6887f4dd379Sjsg 6897f4dd379Sjsg static void err_print_params(struct drm_i915_error_state_buf *m, 6907f4dd379Sjsg const struct i915_params *params) 6917f4dd379Sjsg { 6927f4dd379Sjsg struct drm_printer p = i915_error_printer(m); 6937f4dd379Sjsg 6947f4dd379Sjsg i915_params_dump(params, &p); 6953253c27bSkettenis } 6967f4dd379Sjsg 6977f4dd379Sjsg static void err_print_pciid(struct drm_i915_error_state_buf *m, 6987f4dd379Sjsg struct drm_i915_private *i915) 6997f4dd379Sjsg { 7007f4dd379Sjsg struct pci_dev *pdev = i915->drm.pdev; 7017f4dd379Sjsg 7027f4dd379Sjsg err_printf(m, "PCI ID: 0x%04x\n", pdev->device); 7037f4dd379Sjsg err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision); 7047f4dd379Sjsg err_printf(m, "PCI Subsystem: %04x:%04x\n", 7057f4dd379Sjsg pdev->subsystem_vendor, 7067f4dd379Sjsg pdev->subsystem_device); 7077f4dd379Sjsg } 7087f4dd379Sjsg 7091bb76ff1Sjsg static void err_print_guc_ctb(struct drm_i915_error_state_buf *m, 7101bb76ff1Sjsg const char *name, 7111bb76ff1Sjsg const struct intel_ctb_coredump *ctb) 7121bb76ff1Sjsg { 7131bb76ff1Sjsg if (!ctb->size) 7141bb76ff1Sjsg return; 7151bb76ff1Sjsg 7161bb76ff1Sjsg err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n", 7171bb76ff1Sjsg name, ctb->raw_status, ctb->raw_head, ctb->raw_tail, 7181bb76ff1Sjsg ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size); 7191bb76ff1Sjsg } 7201bb76ff1Sjsg 7217f4dd379Sjsg static void err_print_uc(struct drm_i915_error_state_buf *m, 722c349dbc7Sjsg const struct intel_uc_coredump *error_uc) 7237f4dd379Sjsg { 7247f4dd379Sjsg struct drm_printer p = i915_error_printer(m); 7257f4dd379Sjsg 7267f4dd379Sjsg intel_uc_fw_dump(&error_uc->guc_fw, &p); 7277f4dd379Sjsg intel_uc_fw_dump(&error_uc->huc_fw, &p); 7281bb76ff1Sjsg err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp); 7291bb76ff1Sjsg intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log); 7301bb76ff1Sjsg err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence); 7311bb76ff1Sjsg err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0); 7321bb76ff1Sjsg err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1); 7331bb76ff1Sjsg intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb); 7343253c27bSkettenis } 7353253c27bSkettenis 736c349dbc7Sjsg static void err_free_sgl(struct scatterlist *sgl) 737e1001332Skettenis { 738c349dbc7Sjsg STUB(); 739c349dbc7Sjsg #ifdef notyet 740c349dbc7Sjsg while (sgl) { 741c349dbc7Sjsg struct scatterlist *sg; 742e1001332Skettenis 743c349dbc7Sjsg for (sg = sgl; !sg_is_chain(sg); sg++) { 744c349dbc7Sjsg kfree(sg_virt(sg)); 745c349dbc7Sjsg if (sg_is_last(sg)) 746c349dbc7Sjsg break; 747e1001332Skettenis } 748e1001332Skettenis 749c349dbc7Sjsg sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg); 750c349dbc7Sjsg free_page((unsigned long)sgl); 751c349dbc7Sjsg sgl = sg; 752c349dbc7Sjsg } 753c349dbc7Sjsg #endif 754c349dbc7Sjsg } 755c349dbc7Sjsg 756ad8b1aafSjsg static void err_print_gt_info(struct drm_i915_error_state_buf *m, 757ad8b1aafSjsg struct intel_gt_coredump *gt) 758ad8b1aafSjsg { 759ad8b1aafSjsg struct drm_printer p = i915_error_printer(m); 760ad8b1aafSjsg 761ad8b1aafSjsg intel_gt_info_print(>->info, &p); 7621bb76ff1Sjsg intel_sseu_print_topology(gt->_gt->i915, >->info.sseu, &p); 763ad8b1aafSjsg } 764ad8b1aafSjsg 7651bb76ff1Sjsg static void err_print_gt_display(struct drm_i915_error_state_buf *m, 766c349dbc7Sjsg struct intel_gt_coredump *gt) 767c349dbc7Sjsg { 7681bb76ff1Sjsg err_printf(m, "IER: 0x%08x\n", gt->ier); 7691bb76ff1Sjsg err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr); 7701bb76ff1Sjsg } 7711bb76ff1Sjsg 7721bb76ff1Sjsg static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m, 7731bb76ff1Sjsg struct intel_gt_coredump *gt) 7741bb76ff1Sjsg { 775c349dbc7Sjsg int i; 776c349dbc7Sjsg 7771bb76ff1Sjsg err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake)); 7781bb76ff1Sjsg err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n", 7791bb76ff1Sjsg gt->clock_frequency, gt->clock_period_ns); 780c349dbc7Sjsg err_printf(m, "EIR: 0x%08x\n", gt->eir); 7811bb76ff1Sjsg err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er); 7821bb76ff1Sjsg 783c349dbc7Sjsg for (i = 0; i < gt->ngtier; i++) 784c349dbc7Sjsg err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]); 7851bb76ff1Sjsg } 786c349dbc7Sjsg 7871bb76ff1Sjsg static void err_print_gt_global(struct drm_i915_error_state_buf *m, 7881bb76ff1Sjsg struct intel_gt_coredump *gt) 7891bb76ff1Sjsg { 7901bb76ff1Sjsg err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake); 791c349dbc7Sjsg 7925ca02815Sjsg if (IS_GRAPHICS_VER(m->i915, 6, 11)) { 793c349dbc7Sjsg err_printf(m, "ERROR: 0x%08x\n", gt->error); 794c349dbc7Sjsg err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg); 795c349dbc7Sjsg } 796c349dbc7Sjsg 7975ca02815Sjsg if (GRAPHICS_VER(m->i915) >= 8) 798c349dbc7Sjsg err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", 799c349dbc7Sjsg gt->fault_data1, gt->fault_data0); 800c349dbc7Sjsg 8015ca02815Sjsg if (GRAPHICS_VER(m->i915) == 7) 802c349dbc7Sjsg err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int); 803c349dbc7Sjsg 8045ca02815Sjsg if (IS_GRAPHICS_VER(m->i915, 8, 11)) 805c349dbc7Sjsg err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache); 806c349dbc7Sjsg 8075ca02815Sjsg if (GRAPHICS_VER(m->i915) == 12) 808c349dbc7Sjsg err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err); 809c349dbc7Sjsg 8105ca02815Sjsg if (GRAPHICS_VER(m->i915) >= 12) { 811c349dbc7Sjsg int i; 812c349dbc7Sjsg 8131bb76ff1Sjsg for (i = 0; i < I915_MAX_SFC; i++) { 81450b0eff2Sjsg /* 81550b0eff2Sjsg * SFC_DONE resides in the VD forcewake domain, so it 81650b0eff2Sjsg * only exists if the corresponding VCS engine is 81750b0eff2Sjsg * present. 81850b0eff2Sjsg */ 8191bb76ff1Sjsg if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 || 8201bb76ff1Sjsg !HAS_ENGINE(gt->_gt, _VCS(i * 2))) 82150b0eff2Sjsg continue; 82250b0eff2Sjsg 823c349dbc7Sjsg err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, 824c349dbc7Sjsg gt->sfc_done[i]); 82550b0eff2Sjsg } 826c349dbc7Sjsg 827c349dbc7Sjsg err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done); 828c349dbc7Sjsg } 8291bb76ff1Sjsg } 8301bb76ff1Sjsg 8311bb76ff1Sjsg static void err_print_gt_fences(struct drm_i915_error_state_buf *m, 8321bb76ff1Sjsg struct intel_gt_coredump *gt) 8331bb76ff1Sjsg { 8341bb76ff1Sjsg int i; 8351bb76ff1Sjsg 8361bb76ff1Sjsg for (i = 0; i < gt->nfence; i++) 8371bb76ff1Sjsg err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]); 8381bb76ff1Sjsg } 8391bb76ff1Sjsg 8401bb76ff1Sjsg static void err_print_gt_engines(struct drm_i915_error_state_buf *m, 8411bb76ff1Sjsg struct intel_gt_coredump *gt) 8421bb76ff1Sjsg { 8431bb76ff1Sjsg const struct intel_engine_coredump *ee; 844c349dbc7Sjsg 845c349dbc7Sjsg for (ee = gt->engine; ee; ee = ee->next) { 846c349dbc7Sjsg const struct i915_vma_coredump *vma; 847c349dbc7Sjsg 848f005ef32Sjsg if (gt->uc && gt->uc->guc.is_guc_capture) { 8491bb76ff1Sjsg if (ee->guc_capture_node) 8501bb76ff1Sjsg intel_guc_capture_print_engine_node(m, ee); 8511bb76ff1Sjsg else 852f005ef32Sjsg err_printf(m, " Missing GuC capture node for %s\n", 853f005ef32Sjsg ee->engine->name); 854f005ef32Sjsg } else { 855c349dbc7Sjsg error_print_engine(m, ee); 856f005ef32Sjsg } 8571bb76ff1Sjsg 8581bb76ff1Sjsg err_printf(m, " hung: %u\n", ee->hung); 8591bb76ff1Sjsg err_printf(m, " engine reset count: %u\n", ee->reset_count); 8601bb76ff1Sjsg error_print_context(m, " Active context: ", &ee->context); 8611bb76ff1Sjsg 862c349dbc7Sjsg for (vma = ee->vma; vma; vma = vma->next) 8631bb76ff1Sjsg intel_gpu_error_print_vma(m, ee->engine, vma); 864c349dbc7Sjsg } 865c349dbc7Sjsg 866c349dbc7Sjsg } 867c349dbc7Sjsg 868c349dbc7Sjsg static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, 869c349dbc7Sjsg struct i915_gpu_coredump *error) 870c349dbc7Sjsg { 871c349dbc7Sjsg const struct intel_engine_coredump *ee; 872c349dbc7Sjsg struct timespec64 ts; 873c349dbc7Sjsg 8747f4dd379Sjsg if (*error->error_msg) 8753253c27bSkettenis err_printf(m, "%s\n", error->error_msg); 876c349dbc7Sjsg #ifdef __linux__ 877c349dbc7Sjsg err_printf(m, "Kernel: %s %s\n", 878c349dbc7Sjsg init_utsname()->release, 879c349dbc7Sjsg init_utsname()->machine); 880c349dbc7Sjsg #else 881c349dbc7Sjsg extern char machine[]; 882c349dbc7Sjsg err_printf(m, "Kernel: %s %s\n", 883c349dbc7Sjsg osrelease, 884c349dbc7Sjsg machine); 885c349dbc7Sjsg #endif 886c349dbc7Sjsg err_printf(m, "Driver: %s\n", DRIVER_DATE); 8877f4dd379Sjsg ts = ktime_to_timespec64(error->time); 8887f4dd379Sjsg err_printf(m, "Time: %lld s %ld us\n", 8897f4dd379Sjsg (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 8907f4dd379Sjsg ts = ktime_to_timespec64(error->boottime); 8917f4dd379Sjsg err_printf(m, "Boottime: %lld s %ld us\n", 8927f4dd379Sjsg (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 8937f4dd379Sjsg ts = ktime_to_timespec64(error->uptime); 8947f4dd379Sjsg err_printf(m, "Uptime: %lld s %ld us\n", 8957f4dd379Sjsg (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); 896c349dbc7Sjsg err_printf(m, "Capture: %lu jiffies; %d ms ago\n", 897c349dbc7Sjsg error->capture, jiffies_to_msecs(jiffies - error->capture)); 8987f4dd379Sjsg 899c349dbc7Sjsg for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next) 900c349dbc7Sjsg err_printf(m, "Active process (on ring %s): %s [%d]\n", 901c349dbc7Sjsg ee->engine->name, 902c349dbc7Sjsg ee->context.comm, 903c349dbc7Sjsg ee->context.pid); 904c349dbc7Sjsg 9053253c27bSkettenis err_printf(m, "Reset count: %u\n", error->reset_count); 9063253c27bSkettenis err_printf(m, "Suspend count: %u\n", error->suspend_count); 9077f4dd379Sjsg err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform)); 908c349dbc7Sjsg err_printf(m, "Subplatform: 0x%x\n", 909c349dbc7Sjsg intel_subplatform(&error->runtime_info, 910c349dbc7Sjsg error->device_info.platform)); 911c349dbc7Sjsg err_print_pciid(m, m->i915); 9127f4dd379Sjsg 9133253c27bSkettenis err_printf(m, "IOMMU enabled?: %d\n", error->iommu); 9147f4dd379Sjsg 9151bb76ff1Sjsg intel_dmc_print_error_state(m, m->i915); 9167f4dd379Sjsg 9171bb76ff1Sjsg err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock)); 9181bb76ff1Sjsg err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended)); 9191bb76ff1Sjsg 9201bb76ff1Sjsg if (error->gt) { 9211bb76ff1Sjsg bool print_guc_capture = false; 9221bb76ff1Sjsg 9231bb76ff1Sjsg if (error->gt->uc && error->gt->uc->guc.is_guc_capture) 9241bb76ff1Sjsg print_guc_capture = true; 9251bb76ff1Sjsg 9261bb76ff1Sjsg err_print_gt_display(m, error->gt); 9271bb76ff1Sjsg err_print_gt_global_nonguc(m, error->gt); 9281bb76ff1Sjsg err_print_gt_fences(m, error->gt); 9291bb76ff1Sjsg 9301bb76ff1Sjsg /* 9311bb76ff1Sjsg * GuC dumped global, eng-class and eng-instance registers together 9321bb76ff1Sjsg * as part of engine state dump so we print in err_print_gt_engines 9331bb76ff1Sjsg */ 9341bb76ff1Sjsg if (!print_guc_capture) 9351bb76ff1Sjsg err_print_gt_global(m, error->gt); 9361bb76ff1Sjsg 9371bb76ff1Sjsg err_print_gt_engines(m, error->gt); 9381bb76ff1Sjsg 9391bb76ff1Sjsg if (error->gt->uc) 9401bb76ff1Sjsg err_print_uc(m, error->gt->uc); 9411bb76ff1Sjsg 9421bb76ff1Sjsg err_print_gt_info(m, error->gt); 9437f4dd379Sjsg } 9447f4dd379Sjsg 945e1001332Skettenis if (error->overlay) 946e1001332Skettenis intel_overlay_print_error_state(m, error->overlay); 947e1001332Skettenis 948ad8b1aafSjsg err_print_capabilities(m, error); 9497f4dd379Sjsg err_print_params(m, &error->params); 950c349dbc7Sjsg } 9517f4dd379Sjsg 952c349dbc7Sjsg static int err_print_to_sgl(struct i915_gpu_coredump *error) 953c349dbc7Sjsg { 954c349dbc7Sjsg struct drm_i915_error_state_buf m; 955c349dbc7Sjsg 956c349dbc7Sjsg if (IS_ERR(error)) 957c349dbc7Sjsg return PTR_ERR(error); 958c349dbc7Sjsg 959c349dbc7Sjsg if (READ_ONCE(error->sgl)) 960c349dbc7Sjsg return 0; 961c349dbc7Sjsg 962c349dbc7Sjsg memset(&m, 0, sizeof(m)); 963c349dbc7Sjsg m.i915 = error->i915; 964c349dbc7Sjsg 965c349dbc7Sjsg __err_print_to_sgl(&m, error); 966c349dbc7Sjsg 967c349dbc7Sjsg if (m.buf) { 968c349dbc7Sjsg __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter); 969c349dbc7Sjsg m.bytes = 0; 970c349dbc7Sjsg m.buf = NULL; 971c349dbc7Sjsg } 972c349dbc7Sjsg if (m.cur) { 973c349dbc7Sjsg GEM_BUG_ON(m.end < m.cur); 974c349dbc7Sjsg sg_mark_end(m.cur - 1); 975c349dbc7Sjsg } 976c349dbc7Sjsg GEM_BUG_ON(m.sgl && !m.cur); 977c349dbc7Sjsg 978c349dbc7Sjsg if (m.err) { 979c349dbc7Sjsg err_free_sgl(m.sgl); 980c349dbc7Sjsg return m.err; 981c349dbc7Sjsg } 982c349dbc7Sjsg 983c349dbc7Sjsg if (cmpxchg(&error->sgl, NULL, m.sgl)) 984c349dbc7Sjsg err_free_sgl(m.sgl); 985e1001332Skettenis 986e1001332Skettenis return 0; 987e1001332Skettenis } 988e1001332Skettenis 989c349dbc7Sjsg ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error, 990c349dbc7Sjsg char *buf, loff_t off, size_t rem) 991e1001332Skettenis { 992c349dbc7Sjsg STUB(); 993c349dbc7Sjsg return -ENOSYS; 994c349dbc7Sjsg #ifdef notyet 995c349dbc7Sjsg struct scatterlist *sg; 996c349dbc7Sjsg size_t count; 997c349dbc7Sjsg loff_t pos; 998c349dbc7Sjsg int err; 999e1001332Skettenis 1000c349dbc7Sjsg if (!error || !rem) 1001e1001332Skettenis return 0; 1002c349dbc7Sjsg 1003c349dbc7Sjsg err = err_print_to_sgl(error); 1004c349dbc7Sjsg if (err) 1005c349dbc7Sjsg return err; 1006c349dbc7Sjsg 1007c349dbc7Sjsg sg = READ_ONCE(error->fit); 1008c349dbc7Sjsg if (!sg || off < sg->dma_address) 1009c349dbc7Sjsg sg = error->sgl; 1010c349dbc7Sjsg if (!sg) 1011c349dbc7Sjsg return 0; 1012c349dbc7Sjsg 1013c349dbc7Sjsg pos = sg->dma_address; 1014c349dbc7Sjsg count = 0; 1015c349dbc7Sjsg do { 1016c349dbc7Sjsg size_t len, start; 1017c349dbc7Sjsg 1018c349dbc7Sjsg if (sg_is_chain(sg)) { 1019c349dbc7Sjsg sg = sg_chain_ptr(sg); 1020c349dbc7Sjsg GEM_BUG_ON(sg_is_chain(sg)); 1021e1001332Skettenis } 1022e1001332Skettenis 1023c349dbc7Sjsg len = sg->length; 1024c349dbc7Sjsg if (pos + len <= off) { 1025c349dbc7Sjsg pos += len; 1026c349dbc7Sjsg continue; 1027c349dbc7Sjsg } 1028c349dbc7Sjsg 1029c349dbc7Sjsg start = sg->offset; 1030c349dbc7Sjsg if (pos < off) { 1031c349dbc7Sjsg GEM_BUG_ON(off - pos > len); 1032c349dbc7Sjsg len -= off - pos; 1033c349dbc7Sjsg start += off - pos; 1034c349dbc7Sjsg pos = off; 1035c349dbc7Sjsg } 1036c349dbc7Sjsg 1037c349dbc7Sjsg len = min(len, rem); 1038c349dbc7Sjsg GEM_BUG_ON(!len || len > sg->length); 1039c349dbc7Sjsg 1040c349dbc7Sjsg memcpy(buf, page_address(sg_page(sg)) + start, len); 1041c349dbc7Sjsg 1042c349dbc7Sjsg count += len; 1043c349dbc7Sjsg pos += len; 1044c349dbc7Sjsg 1045c349dbc7Sjsg buf += len; 1046c349dbc7Sjsg rem -= len; 1047c349dbc7Sjsg if (!rem) { 1048c349dbc7Sjsg WRITE_ONCE(error->fit, sg); 1049c349dbc7Sjsg break; 1050c349dbc7Sjsg } 1051c349dbc7Sjsg } while (!sg_is_last(sg++)); 1052c349dbc7Sjsg 1053c349dbc7Sjsg return count; 1054c349dbc7Sjsg #endif 1055c349dbc7Sjsg } 1056c349dbc7Sjsg 1057c349dbc7Sjsg static void i915_vma_coredump_free(struct i915_vma_coredump *vma) 1058e1001332Skettenis { 10591bb76ff1Sjsg STUB(); 10601bb76ff1Sjsg #ifdef notyet 1061c349dbc7Sjsg while (vma) { 1062c349dbc7Sjsg struct i915_vma_coredump *next = vma->next; 10631bb76ff1Sjsg struct vm_page *page, *n; 1064e1001332Skettenis 10651bb76ff1Sjsg list_for_each_entry_safe(page, n, &vma->page_list, lru) { 10661bb76ff1Sjsg list_del_init(&page->lru); 10671bb76ff1Sjsg __free_page(page); 10681bb76ff1Sjsg } 1069e1001332Skettenis 1070c349dbc7Sjsg kfree(vma); 1071c349dbc7Sjsg vma = next; 1072c349dbc7Sjsg } 10731bb76ff1Sjsg #endif 1074e1001332Skettenis } 1075e1001332Skettenis 1076c349dbc7Sjsg static void cleanup_params(struct i915_gpu_coredump *error) 1077e1001332Skettenis { 1078c349dbc7Sjsg i915_params_free(&error->params); 1079e1001332Skettenis } 1080e1001332Skettenis 1081c349dbc7Sjsg static void cleanup_uc(struct intel_uc_coredump *uc) 10827f4dd379Sjsg { 10831bb76ff1Sjsg kfree(uc->guc_fw.file_selected.path); 10841bb76ff1Sjsg kfree(uc->huc_fw.file_selected.path); 10851bb76ff1Sjsg kfree(uc->guc_fw.file_wanted.path); 10861bb76ff1Sjsg kfree(uc->huc_fw.file_wanted.path); 10871bb76ff1Sjsg i915_vma_coredump_free(uc->guc.vma_log); 10881bb76ff1Sjsg i915_vma_coredump_free(uc->guc.vma_ctb); 1089c349dbc7Sjsg 1090c349dbc7Sjsg kfree(uc); 10917f4dd379Sjsg } 10923253c27bSkettenis 1093c349dbc7Sjsg static void cleanup_gt(struct intel_gt_coredump *gt) 10947f4dd379Sjsg { 1095c349dbc7Sjsg while (gt->engine) { 1096c349dbc7Sjsg struct intel_engine_coredump *ee = gt->engine; 10977f4dd379Sjsg 1098c349dbc7Sjsg gt->engine = ee->next; 1099c349dbc7Sjsg 1100c349dbc7Sjsg i915_vma_coredump_free(ee->vma); 11011bb76ff1Sjsg intel_guc_capture_free_node(ee); 1102c349dbc7Sjsg kfree(ee); 11037f4dd379Sjsg } 11047f4dd379Sjsg 1105c349dbc7Sjsg if (gt->uc) 1106c349dbc7Sjsg cleanup_uc(gt->uc); 1107c349dbc7Sjsg 1108c349dbc7Sjsg kfree(gt); 1109c349dbc7Sjsg } 1110c349dbc7Sjsg 1111c349dbc7Sjsg void __i915_gpu_coredump_free(struct kref *error_ref) 11127f4dd379Sjsg { 1113c349dbc7Sjsg struct i915_gpu_coredump *error = 11147f4dd379Sjsg container_of(error_ref, typeof(*error), ref); 11157f4dd379Sjsg 1116c349dbc7Sjsg while (error->gt) { 1117c349dbc7Sjsg struct intel_gt_coredump *gt = error->gt; 11187f4dd379Sjsg 1119c349dbc7Sjsg error->gt = gt->next; 1120c349dbc7Sjsg cleanup_gt(gt); 11217f4dd379Sjsg } 11227f4dd379Sjsg 1123e1001332Skettenis kfree(error->overlay); 11247f4dd379Sjsg 11257f4dd379Sjsg cleanup_params(error); 11267f4dd379Sjsg 1127c349dbc7Sjsg err_free_sgl(error->sgl); 1128e1001332Skettenis kfree(error); 1129e1001332Skettenis } 1130e1001332Skettenis 1131c349dbc7Sjsg static struct i915_vma_coredump * 1132c349dbc7Sjsg i915_vma_coredump_create(const struct intel_gt *gt, 11331bb76ff1Sjsg const struct i915_vma_resource *vma_res, 11341bb76ff1Sjsg struct i915_vma_compress *compress, 11351bb76ff1Sjsg const char *name) 11361bb76ff1Sjsg 1137e1001332Skettenis { 1138c349dbc7Sjsg STUB(); 1139c349dbc7Sjsg return NULL; 1140c349dbc7Sjsg #ifdef notyet 1141c349dbc7Sjsg struct i915_ggtt *ggtt = gt->ggtt; 11427f4dd379Sjsg const u64 slot = ggtt->error_capture.start; 1143c349dbc7Sjsg struct i915_vma_coredump *dst; 11447f4dd379Sjsg struct sgt_iter iter; 11457f4dd379Sjsg int ret; 1146e1001332Skettenis 1147c349dbc7Sjsg might_sleep(); 1148c349dbc7Sjsg 11491bb76ff1Sjsg if (!vma_res || !vma_res->bi.pages || !compress) 11507f4dd379Sjsg return NULL; 1151e1001332Skettenis 11521bb76ff1Sjsg dst = kmalloc(sizeof(*dst), ALLOW_FAIL); 11537f4dd379Sjsg if (!dst) 11547f4dd379Sjsg return NULL; 1155e1001332Skettenis 1156c349dbc7Sjsg if (!compress_start(compress)) { 1157c349dbc7Sjsg kfree(dst); 1158c349dbc7Sjsg return NULL; 1159c349dbc7Sjsg } 1160c349dbc7Sjsg 11611bb76ff1Sjsg INIT_LIST_HEAD(&dst->page_list); 1162c349dbc7Sjsg strlcpy(dst->name, name, sizeof(dst->name)); 1163c349dbc7Sjsg dst->next = NULL; 1164c349dbc7Sjsg 11651bb76ff1Sjsg dst->gtt_offset = vma_res->start; 11661bb76ff1Sjsg dst->gtt_size = vma_res->node_size; 11671bb76ff1Sjsg dst->gtt_page_sizes = vma_res->page_sizes_gtt; 11687f4dd379Sjsg dst->unused = 0; 1169e1001332Skettenis 11707f4dd379Sjsg ret = -EINVAL; 1171c349dbc7Sjsg if (drm_mm_node_allocated(&ggtt->error_capture)) { 11727f4dd379Sjsg void __iomem *s; 1173c349dbc7Sjsg dma_addr_t dma; 11747f4dd379Sjsg 11751bb76ff1Sjsg for_each_sgt_daddr(dma, iter, vma_res->bi.pages) { 11765ca02815Sjsg mutex_lock(&ggtt->error_mutex); 11771bb76ff1Sjsg if (ggtt->vm.raw_insert_page) 11781bb76ff1Sjsg ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot, 1179f005ef32Sjsg i915_gem_get_pat_index(gt->i915, 1180f005ef32Sjsg I915_CACHE_NONE), 1181f005ef32Sjsg 0); 11821bb76ff1Sjsg else 1183c349dbc7Sjsg ggtt->vm.insert_page(&ggtt->vm, dma, slot, 1184f005ef32Sjsg i915_gem_get_pat_index(gt->i915, 1185f005ef32Sjsg I915_CACHE_NONE), 1186f005ef32Sjsg 0); 1187c349dbc7Sjsg mb(); 11887f4dd379Sjsg 1189c349dbc7Sjsg s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); 1190c349dbc7Sjsg ret = compress_page(compress, 1191c349dbc7Sjsg (void __force *)s, dst, 1192c349dbc7Sjsg true); 1193c349dbc7Sjsg io_mapping_unmap(s); 11945ca02815Sjsg 11955ca02815Sjsg mb(); 11965ca02815Sjsg ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); 11975ca02815Sjsg mutex_unlock(&ggtt->error_mutex); 11987f4dd379Sjsg if (ret) 11997f4dd379Sjsg break; 12007f4dd379Sjsg } 12011bb76ff1Sjsg } else if (vma_res->bi.lmem) { 12021bb76ff1Sjsg struct intel_memory_region *mem = vma_res->mr; 1203c349dbc7Sjsg dma_addr_t dma; 12047f4dd379Sjsg 12051bb76ff1Sjsg for_each_sgt_daddr(dma, iter, vma_res->bi.pages) { 12061bb76ff1Sjsg dma_addr_t offset = dma - mem->region.start; 1207c349dbc7Sjsg void __iomem *s; 1208c349dbc7Sjsg 120952571687Sjsg if (offset + PAGE_SIZE > resource_size(&mem->io)) { 12101bb76ff1Sjsg ret = -EINVAL; 12111bb76ff1Sjsg break; 12121bb76ff1Sjsg } 12131bb76ff1Sjsg 12141bb76ff1Sjsg s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE); 1215c349dbc7Sjsg ret = compress_page(compress, 1216c349dbc7Sjsg (void __force *)s, dst, 1217c349dbc7Sjsg true); 1218c349dbc7Sjsg io_mapping_unmap(s); 1219c349dbc7Sjsg if (ret) 1220c349dbc7Sjsg break; 1221c349dbc7Sjsg } 1222c349dbc7Sjsg } else { 1223c349dbc7Sjsg struct vm_page *page; 1224c349dbc7Sjsg 12251bb76ff1Sjsg for_each_sgt_page(page, iter, vma_res->bi.pages) { 1226c349dbc7Sjsg void *s; 1227c349dbc7Sjsg 1228c349dbc7Sjsg drm_clflush_pages(&page, 1); 1229c349dbc7Sjsg 1230f005ef32Sjsg s = kmap_local_page(page); 1231c349dbc7Sjsg ret = compress_page(compress, s, dst, false); 1232f005ef32Sjsg kunmap_local(s); 1233c349dbc7Sjsg 1234c349dbc7Sjsg drm_clflush_pages(&page, 1); 1235c349dbc7Sjsg 1236c349dbc7Sjsg if (ret) 1237c349dbc7Sjsg break; 1238c349dbc7Sjsg } 1239c349dbc7Sjsg } 1240c349dbc7Sjsg 1241c349dbc7Sjsg if (ret || compress_flush(compress, dst)) { 12421bb76ff1Sjsg struct vm_page *page, *n; 12431bb76ff1Sjsg 12441bb76ff1Sjsg list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) { 12451bb76ff1Sjsg list_del_init(&page->lru); 12461bb76ff1Sjsg pool_free(&compress->pool, page_address(page)); 12471bb76ff1Sjsg } 12481bb76ff1Sjsg 12497f4dd379Sjsg kfree(dst); 12507f4dd379Sjsg dst = NULL; 12517f4dd379Sjsg } 1252c349dbc7Sjsg compress_finish(compress); 12537f4dd379Sjsg 12547f4dd379Sjsg return dst; 12557f4dd379Sjsg #endif 12567f4dd379Sjsg } 12577f4dd379Sjsg 1258c349dbc7Sjsg static void gt_record_fences(struct intel_gt_coredump *gt) 12593253c27bSkettenis { 1260c349dbc7Sjsg struct i915_ggtt *ggtt = gt->_gt->ggtt; 1261c349dbc7Sjsg struct intel_uncore *uncore = gt->_gt->uncore; 1262c349dbc7Sjsg int i; 12633253c27bSkettenis 12645ca02815Sjsg if (GRAPHICS_VER(uncore->i915) >= 6) { 1265c349dbc7Sjsg for (i = 0; i < ggtt->num_fences; i++) 1266c349dbc7Sjsg gt->fence[i] = 1267c349dbc7Sjsg intel_uncore_read64(uncore, 1268c349dbc7Sjsg FENCE_REG_GEN6_LO(i)); 12695ca02815Sjsg } else if (GRAPHICS_VER(uncore->i915) >= 4) { 1270c349dbc7Sjsg for (i = 0; i < ggtt->num_fences; i++) 1271c349dbc7Sjsg gt->fence[i] = 1272c349dbc7Sjsg intel_uncore_read64(uncore, 1273c349dbc7Sjsg FENCE_REG_965_LO(i)); 1274e1001332Skettenis } else { 1275c349dbc7Sjsg for (i = 0; i < ggtt->num_fences; i++) 1276c349dbc7Sjsg gt->fence[i] = 1277c349dbc7Sjsg intel_uncore_read(uncore, FENCE_REG(i)); 12787f4dd379Sjsg } 1279c349dbc7Sjsg gt->nfence = i; 1280e1001332Skettenis } 1281e1001332Skettenis 1282c349dbc7Sjsg static void engine_record_registers(struct intel_engine_coredump *ee) 1283c349dbc7Sjsg { 1284c349dbc7Sjsg const struct intel_engine_cs *engine = ee->engine; 1285c349dbc7Sjsg struct drm_i915_private *i915 = engine->i915; 1286c349dbc7Sjsg 12875ca02815Sjsg if (GRAPHICS_VER(i915) >= 6) { 1288c349dbc7Sjsg ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL); 1289c349dbc7Sjsg 1290f005ef32Sjsg if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) 1291f005ef32Sjsg ee->fault_reg = intel_gt_mcr_read_any(engine->gt, 1292f005ef32Sjsg XEHP_RING_FAULT_REG); 1293f005ef32Sjsg else if (GRAPHICS_VER(i915) >= 12) 1294c349dbc7Sjsg ee->fault_reg = intel_uncore_read(engine->uncore, 1295c349dbc7Sjsg GEN12_RING_FAULT_REG); 12965ca02815Sjsg else if (GRAPHICS_VER(i915) >= 8) 1297c349dbc7Sjsg ee->fault_reg = intel_uncore_read(engine->uncore, 1298c349dbc7Sjsg GEN8_RING_FAULT_REG); 1299c349dbc7Sjsg else 1300c349dbc7Sjsg ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine); 13017f4dd379Sjsg } 1302c349dbc7Sjsg 13035ca02815Sjsg if (GRAPHICS_VER(i915) >= 4) { 1304c349dbc7Sjsg ee->esr = ENGINE_READ(engine, RING_ESR); 1305c349dbc7Sjsg ee->faddr = ENGINE_READ(engine, RING_DMA_FADD); 1306c349dbc7Sjsg ee->ipeir = ENGINE_READ(engine, RING_IPEIR); 1307c349dbc7Sjsg ee->ipehr = ENGINE_READ(engine, RING_IPEHR); 1308c349dbc7Sjsg ee->instps = ENGINE_READ(engine, RING_INSTPS); 1309c349dbc7Sjsg ee->bbaddr = ENGINE_READ(engine, RING_BBADDR); 1310c349dbc7Sjsg ee->ccid = ENGINE_READ(engine, CCID); 13115ca02815Sjsg if (GRAPHICS_VER(i915) >= 8) { 1312c349dbc7Sjsg ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32; 1313c349dbc7Sjsg ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32; 1314c349dbc7Sjsg } 1315c349dbc7Sjsg ee->bbstate = ENGINE_READ(engine, RING_BBSTATE); 13167f4dd379Sjsg } else { 1317c349dbc7Sjsg ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX); 1318c349dbc7Sjsg ee->ipeir = ENGINE_READ(engine, IPEIR); 1319c349dbc7Sjsg ee->ipehr = ENGINE_READ(engine, IPEHR); 13207f4dd379Sjsg } 1321e1001332Skettenis 13221bb76ff1Sjsg if (GRAPHICS_VER(i915) >= 11) { 13231bb76ff1Sjsg ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL); 13241bb76ff1Sjsg ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP); 13251bb76ff1Sjsg ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL); 13261bb76ff1Sjsg ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW); 13271bb76ff1Sjsg ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD); 13281bb76ff1Sjsg ee->nopid = ENGINE_READ(engine, RING_NOPID); 13291bb76ff1Sjsg ee->excc = ENGINE_READ(engine, RING_EXCC); 13301bb76ff1Sjsg } 13311bb76ff1Sjsg 13327f4dd379Sjsg intel_engine_get_instdone(engine, &ee->instdone); 1333e1001332Skettenis 1334c349dbc7Sjsg ee->instpm = ENGINE_READ(engine, RING_INSTPM); 13357f4dd379Sjsg ee->acthd = intel_engine_get_active_head(engine); 1336c349dbc7Sjsg ee->start = ENGINE_READ(engine, RING_START); 1337c349dbc7Sjsg ee->head = ENGINE_READ(engine, RING_HEAD); 1338c349dbc7Sjsg ee->tail = ENGINE_READ(engine, RING_TAIL); 1339c349dbc7Sjsg ee->ctl = ENGINE_READ(engine, RING_CTL); 13405ca02815Sjsg if (GRAPHICS_VER(i915) > 2) 1341c349dbc7Sjsg ee->mode = ENGINE_READ(engine, RING_MI_MODE); 13427f4dd379Sjsg 1343c349dbc7Sjsg if (!HWS_NEEDS_PHYSICAL(i915)) { 13447f4dd379Sjsg i915_reg_t mmio; 13457f4dd379Sjsg 13465ca02815Sjsg if (GRAPHICS_VER(i915) == 7) { 13477f4dd379Sjsg switch (engine->id) { 13483253c27bSkettenis default: 1349c349dbc7Sjsg MISSING_CASE(engine->id); 1350ad8b1aafSjsg fallthrough; 1351c349dbc7Sjsg case RCS0: 13523253c27bSkettenis mmio = RENDER_HWS_PGA_GEN7; 13533253c27bSkettenis break; 1354c349dbc7Sjsg case BCS0: 13553253c27bSkettenis mmio = BLT_HWS_PGA_GEN7; 13563253c27bSkettenis break; 1357c349dbc7Sjsg case VCS0: 13583253c27bSkettenis mmio = BSD_HWS_PGA_GEN7; 13593253c27bSkettenis break; 1360c349dbc7Sjsg case VECS0: 13613253c27bSkettenis mmio = VEBOX_HWS_PGA_GEN7; 13623253c27bSkettenis break; 13633253c27bSkettenis } 13645ca02815Sjsg } else if (GRAPHICS_VER(engine->i915) == 6) { 13657f4dd379Sjsg mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 13663253c27bSkettenis } else { 13673253c27bSkettenis /* XXX: gen8 returns to sanity */ 13687f4dd379Sjsg mmio = RING_HWS_PGA(engine->mmio_base); 13693253c27bSkettenis } 13703253c27bSkettenis 1371c349dbc7Sjsg ee->hws = intel_uncore_read(engine->uncore, mmio); 13723253c27bSkettenis } 13733253c27bSkettenis 1374c349dbc7Sjsg ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine); 13753253c27bSkettenis 1376c349dbc7Sjsg if (HAS_PPGTT(i915)) { 13773253c27bSkettenis int i; 13783253c27bSkettenis 1379c349dbc7Sjsg ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7); 13803253c27bSkettenis 13815ca02815Sjsg if (GRAPHICS_VER(i915) == 6) { 13827f4dd379Sjsg ee->vm_info.pp_dir_base = 1383c349dbc7Sjsg ENGINE_READ(engine, RING_PP_DIR_BASE_READ); 13845ca02815Sjsg } else if (GRAPHICS_VER(i915) == 7) { 13857f4dd379Sjsg ee->vm_info.pp_dir_base = 1386c349dbc7Sjsg ENGINE_READ(engine, RING_PP_DIR_BASE); 13875ca02815Sjsg } else if (GRAPHICS_VER(i915) >= 8) { 1388c349dbc7Sjsg u32 base = engine->mmio_base; 1389c349dbc7Sjsg 13903253c27bSkettenis for (i = 0; i < 4; i++) { 13917f4dd379Sjsg ee->vm_info.pdp[i] = 1392c349dbc7Sjsg intel_uncore_read(engine->uncore, 1393c349dbc7Sjsg GEN8_RING_PDP_UDW(base, i)); 13947f4dd379Sjsg ee->vm_info.pdp[i] <<= 32; 13957f4dd379Sjsg ee->vm_info.pdp[i] |= 1396c349dbc7Sjsg intel_uncore_read(engine->uncore, 1397c349dbc7Sjsg GEN8_RING_PDP_LDW(base, i)); 1398c349dbc7Sjsg } 13993253c27bSkettenis } 14003253c27bSkettenis } 1401e1001332Skettenis } 1402e1001332Skettenis 1403c349dbc7Sjsg static void record_request(const struct i915_request *request, 1404c349dbc7Sjsg struct i915_request_coredump *erq) 1405e1001332Skettenis { 1406c349dbc7Sjsg erq->flags = request->fence.flags; 1407c349dbc7Sjsg erq->context = request->fence.context; 1408c349dbc7Sjsg erq->seqno = request->fence.seqno; 14097f4dd379Sjsg erq->sched_attr = request->sched.attr; 14107f4dd379Sjsg erq->head = request->head; 14117f4dd379Sjsg erq->tail = request->tail; 14127f4dd379Sjsg 1413c349dbc7Sjsg erq->pid = 0; 14147f4dd379Sjsg rcu_read_lock(); 1415c349dbc7Sjsg if (!intel_context_is_closed(request->context)) { 1416c349dbc7Sjsg const struct i915_gem_context *ctx; 1417c349dbc7Sjsg 1418c349dbc7Sjsg ctx = rcu_dereference(request->context->gem_context); 1419c349dbc7Sjsg if (ctx) 14207f4dd379Sjsg #ifdef __linux__ 1421c349dbc7Sjsg erq->pid = pid_nr(ctx->pid); 14227f4dd379Sjsg #else 14237f4dd379Sjsg erq->pid = ctx->pid; 14247f4dd379Sjsg #endif 1425c349dbc7Sjsg } 14267f4dd379Sjsg rcu_read_unlock(); 14277f4dd379Sjsg } 14287f4dd379Sjsg 1429c349dbc7Sjsg static void engine_record_execlists(struct intel_engine_coredump *ee) 14307f4dd379Sjsg { 1431c349dbc7Sjsg const struct intel_engine_execlists * const el = &ee->engine->execlists; 1432c349dbc7Sjsg struct i915_request * const *port = el->active; 1433c349dbc7Sjsg unsigned int n = 0; 14347f4dd379Sjsg 1435c349dbc7Sjsg while (*port) 1436c349dbc7Sjsg record_request(*port++, &ee->execlist[n++]); 14377f4dd379Sjsg 14387f4dd379Sjsg ee->num_ports = n; 14397f4dd379Sjsg } 14407f4dd379Sjsg 1441c349dbc7Sjsg static bool record_context(struct i915_gem_context_coredump *e, 1442f005ef32Sjsg struct intel_context *ce) 14437f4dd379Sjsg { 1444c349dbc7Sjsg struct i915_gem_context *ctx; 14457f4dd379Sjsg struct task_struct *task; 1446c349dbc7Sjsg bool simulated; 14477f4dd379Sjsg 14487f4dd379Sjsg rcu_read_lock(); 1449f005ef32Sjsg ctx = rcu_dereference(ce->gem_context); 1450c349dbc7Sjsg if (ctx && !kref_get_unless_zero(&ctx->ref)) 1451c349dbc7Sjsg ctx = NULL; 1452c349dbc7Sjsg rcu_read_unlock(); 1453c349dbc7Sjsg if (!ctx) 1454c349dbc7Sjsg return true; 1455c349dbc7Sjsg 1456c349dbc7Sjsg #ifdef __linux__ 1457c349dbc7Sjsg rcu_read_lock(); 14587f4dd379Sjsg task = pid_task(ctx->pid, PIDTYPE_PID); 14597f4dd379Sjsg if (task) { 14607f4dd379Sjsg strcpy(e->comm, task->comm); 14617f4dd379Sjsg e->pid = task->pid; 14627f4dd379Sjsg } 14637f4dd379Sjsg rcu_read_unlock(); 14647f4dd379Sjsg #endif 14657f4dd379Sjsg 14667f4dd379Sjsg e->sched_attr = ctx->sched; 14677f4dd379Sjsg e->guilty = atomic_read(&ctx->guilty_count); 14687f4dd379Sjsg e->active = atomic_read(&ctx->active_count); 1469f005ef32Sjsg e->hwsp_seqno = (ce->timeline && ce->timeline->hwsp_seqno) ? 1470f005ef32Sjsg *ce->timeline->hwsp_seqno : ~0U; 1471c349dbc7Sjsg 1472f005ef32Sjsg e->total_runtime = intel_context_get_total_runtime_ns(ce); 1473f005ef32Sjsg e->avg_runtime = intel_context_get_avg_runtime_ns(ce); 1474c349dbc7Sjsg 1475c349dbc7Sjsg simulated = i915_gem_context_no_error_capture(ctx); 1476c349dbc7Sjsg 1477c349dbc7Sjsg i915_gem_context_put(ctx); 1478c349dbc7Sjsg return simulated; 14797f4dd379Sjsg } 14807f4dd379Sjsg 1481c349dbc7Sjsg struct intel_engine_capture_vma { 1482c349dbc7Sjsg struct intel_engine_capture_vma *next; 14831bb76ff1Sjsg struct i915_vma_resource *vma_res; 1484c349dbc7Sjsg char name[16]; 14851bb76ff1Sjsg bool lockdep_cookie; 1486c349dbc7Sjsg }; 1487c349dbc7Sjsg 1488c349dbc7Sjsg static struct intel_engine_capture_vma * 14891bb76ff1Sjsg capture_vma_snapshot(struct intel_engine_capture_vma *next, 14901bb76ff1Sjsg struct i915_vma_resource *vma_res, 14911bb76ff1Sjsg gfp_t gfp, const char *name) 14921bb76ff1Sjsg { 14931bb76ff1Sjsg struct intel_engine_capture_vma *c; 14941bb76ff1Sjsg 14951bb76ff1Sjsg if (!vma_res) 14961bb76ff1Sjsg return next; 14971bb76ff1Sjsg 14981bb76ff1Sjsg c = kmalloc(sizeof(*c), gfp); 14991bb76ff1Sjsg if (!c) 15001bb76ff1Sjsg return next; 15011bb76ff1Sjsg 15021bb76ff1Sjsg if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) { 15031bb76ff1Sjsg kfree(c); 15041bb76ff1Sjsg return next; 15051bb76ff1Sjsg } 15061bb76ff1Sjsg 15071bb76ff1Sjsg strlcpy(c->name, name, sizeof(c->name)); 15081bb76ff1Sjsg c->vma_res = i915_vma_resource_get(vma_res); 15091bb76ff1Sjsg 15101bb76ff1Sjsg c->next = next; 15111bb76ff1Sjsg return c; 15121bb76ff1Sjsg } 15131bb76ff1Sjsg 15141bb76ff1Sjsg static struct intel_engine_capture_vma * 1515c349dbc7Sjsg capture_vma(struct intel_engine_capture_vma *next, 1516c349dbc7Sjsg struct i915_vma *vma, 1517c349dbc7Sjsg const char *name, 1518c349dbc7Sjsg gfp_t gfp) 1519c349dbc7Sjsg { 1520c349dbc7Sjsg if (!vma) 1521c349dbc7Sjsg return next; 1522c349dbc7Sjsg 15231bb76ff1Sjsg /* 15241bb76ff1Sjsg * If the vma isn't pinned, then the vma should be snapshotted 15251bb76ff1Sjsg * to a struct i915_vma_snapshot at command submission time. 15261bb76ff1Sjsg * Not here. 15271bb76ff1Sjsg */ 15281bb76ff1Sjsg if (GEM_WARN_ON(!i915_vma_is_pinned(vma))) 1529c349dbc7Sjsg return next; 1530c349dbc7Sjsg 15311bb76ff1Sjsg next = capture_vma_snapshot(next, vma->resource, gfp, name); 15321bb76ff1Sjsg 1533c349dbc7Sjsg return next; 1534c349dbc7Sjsg } 1535c349dbc7Sjsg 1536c349dbc7Sjsg static struct intel_engine_capture_vma * 1537c349dbc7Sjsg capture_user(struct intel_engine_capture_vma *capture, 1538c349dbc7Sjsg const struct i915_request *rq, 1539c349dbc7Sjsg gfp_t gfp) 15407f4dd379Sjsg { 15417f4dd379Sjsg struct i915_capture_list *c; 15427f4dd379Sjsg 1543c349dbc7Sjsg for (c = rq->capture_list; c; c = c->next) 15441bb76ff1Sjsg capture = capture_vma_snapshot(capture, c->vma_res, gfp, 15451bb76ff1Sjsg "user"); 15467f4dd379Sjsg 1547c349dbc7Sjsg return capture; 15487f4dd379Sjsg } 15497f4dd379Sjsg 1550c349dbc7Sjsg static void add_vma(struct intel_engine_coredump *ee, 1551c349dbc7Sjsg struct i915_vma_coredump *vma) 15527f4dd379Sjsg { 1553c349dbc7Sjsg if (vma) { 1554c349dbc7Sjsg vma->next = ee->vma; 1555c349dbc7Sjsg ee->vma = vma; 1556c349dbc7Sjsg } 1557c349dbc7Sjsg } 15587f4dd379Sjsg 15591bb76ff1Sjsg static struct i915_vma_coredump * 15601bb76ff1Sjsg create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma, 15611bb76ff1Sjsg const char *name, struct i915_vma_compress *compress) 15621bb76ff1Sjsg { 15631bb76ff1Sjsg struct i915_vma_coredump *ret = NULL; 15641bb76ff1Sjsg struct i915_vma_resource *vma_res; 15651bb76ff1Sjsg bool lockdep_cookie; 15661bb76ff1Sjsg 15671bb76ff1Sjsg if (!vma) 15681bb76ff1Sjsg return NULL; 15691bb76ff1Sjsg 15701bb76ff1Sjsg vma_res = vma->resource; 15711bb76ff1Sjsg 15721bb76ff1Sjsg if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) { 15731bb76ff1Sjsg ret = i915_vma_coredump_create(gt, vma_res, compress, name); 15741bb76ff1Sjsg i915_vma_resource_unhold(vma_res, lockdep_cookie); 15751bb76ff1Sjsg } 15761bb76ff1Sjsg 15771bb76ff1Sjsg return ret; 15781bb76ff1Sjsg } 15791bb76ff1Sjsg 15801bb76ff1Sjsg static void add_vma_coredump(struct intel_engine_coredump *ee, 15811bb76ff1Sjsg const struct intel_gt *gt, 15821bb76ff1Sjsg struct i915_vma *vma, 15831bb76ff1Sjsg const char *name, 15841bb76ff1Sjsg struct i915_vma_compress *compress) 15851bb76ff1Sjsg { 15861bb76ff1Sjsg add_vma(ee, create_vma_coredump(gt, vma, name, compress)); 15871bb76ff1Sjsg } 15881bb76ff1Sjsg 1589c349dbc7Sjsg struct intel_engine_coredump * 15901bb76ff1Sjsg intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags) 1591c349dbc7Sjsg { 1592c349dbc7Sjsg struct intel_engine_coredump *ee; 15937f4dd379Sjsg 1594c349dbc7Sjsg ee = kzalloc(sizeof(*ee), gfp); 1595c349dbc7Sjsg if (!ee) 1596c349dbc7Sjsg return NULL; 15977f4dd379Sjsg 1598c349dbc7Sjsg ee->engine = engine; 1599e1001332Skettenis 16001bb76ff1Sjsg if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) { 1601c349dbc7Sjsg engine_record_registers(ee); 1602c349dbc7Sjsg engine_record_execlists(ee); 16031bb76ff1Sjsg } 1604e1001332Skettenis 1605c349dbc7Sjsg return ee; 1606c349dbc7Sjsg } 1607e1001332Skettenis 1608f005ef32Sjsg static struct intel_engine_capture_vma * 1609f005ef32Sjsg engine_coredump_add_context(struct intel_engine_coredump *ee, 1610f005ef32Sjsg struct intel_context *ce, 1611f005ef32Sjsg gfp_t gfp) 1612f005ef32Sjsg { 1613f005ef32Sjsg struct intel_engine_capture_vma *vma = NULL; 1614f005ef32Sjsg 1615f005ef32Sjsg ee->simulated |= record_context(&ee->context, ce); 1616f005ef32Sjsg if (ee->simulated) 1617f005ef32Sjsg return NULL; 1618f005ef32Sjsg 1619f005ef32Sjsg /* 1620f005ef32Sjsg * We need to copy these to an anonymous buffer 1621f005ef32Sjsg * as the simplest method to avoid being overwritten 1622f005ef32Sjsg * by userspace. 1623f005ef32Sjsg */ 1624f005ef32Sjsg vma = capture_vma(vma, ce->ring->vma, "ring", gfp); 1625f005ef32Sjsg vma = capture_vma(vma, ce->state, "HW context", gfp); 1626f005ef32Sjsg 1627f005ef32Sjsg return vma; 1628f005ef32Sjsg } 1629f005ef32Sjsg 1630c349dbc7Sjsg struct intel_engine_capture_vma * 1631c349dbc7Sjsg intel_engine_coredump_add_request(struct intel_engine_coredump *ee, 1632c349dbc7Sjsg struct i915_request *rq, 1633c349dbc7Sjsg gfp_t gfp) 1634c349dbc7Sjsg { 1635f005ef32Sjsg struct intel_engine_capture_vma *vma; 16363253c27bSkettenis 1637f005ef32Sjsg vma = engine_coredump_add_context(ee, rq->context, gfp); 1638f005ef32Sjsg if (!vma) 1639c349dbc7Sjsg return NULL; 16407f4dd379Sjsg 1641c349dbc7Sjsg /* 1642c349dbc7Sjsg * We need to copy these to an anonymous buffer 16433253c27bSkettenis * as the simplest method to avoid being overwritten 16443253c27bSkettenis * by userspace. 16453253c27bSkettenis */ 16461bb76ff1Sjsg vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch"); 1647c349dbc7Sjsg vma = capture_user(vma, rq, gfp); 16483253c27bSkettenis 1649c349dbc7Sjsg ee->rq_head = rq->head; 1650c349dbc7Sjsg ee->rq_post = rq->postfix; 1651c349dbc7Sjsg ee->rq_tail = rq->tail; 16523253c27bSkettenis 1653c349dbc7Sjsg return vma; 16543253c27bSkettenis } 16553253c27bSkettenis 1656c349dbc7Sjsg void 1657c349dbc7Sjsg intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, 1658c349dbc7Sjsg struct intel_engine_capture_vma *capture, 1659c349dbc7Sjsg struct i915_vma_compress *compress) 16607f4dd379Sjsg { 1661c349dbc7Sjsg const struct intel_engine_cs *engine = ee->engine; 1662e1001332Skettenis 1663c349dbc7Sjsg while (capture) { 1664c349dbc7Sjsg struct intel_engine_capture_vma *this = capture; 16651bb76ff1Sjsg struct i915_vma_resource *vma_res = this->vma_res; 1666e1001332Skettenis 1667c349dbc7Sjsg add_vma(ee, 16681bb76ff1Sjsg i915_vma_coredump_create(engine->gt, vma_res, 16691bb76ff1Sjsg compress, this->name)); 1670e1001332Skettenis 16711bb76ff1Sjsg i915_vma_resource_unhold(vma_res, this->lockdep_cookie); 16721bb76ff1Sjsg i915_vma_resource_put(vma_res); 1673c349dbc7Sjsg 1674c349dbc7Sjsg capture = this->next; 1675c349dbc7Sjsg kfree(this); 1676e1001332Skettenis } 1677e1001332Skettenis 16781bb76ff1Sjsg add_vma_coredump(ee, engine->gt, engine->status_page.vma, 16791bb76ff1Sjsg "HW Status", compress); 1680c349dbc7Sjsg 16811bb76ff1Sjsg add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma, 16821bb76ff1Sjsg "WA context", compress); 1683c349dbc7Sjsg } 1684c349dbc7Sjsg 1685c349dbc7Sjsg static struct intel_engine_coredump * 1686c349dbc7Sjsg capture_engine(struct intel_engine_cs *engine, 16871bb76ff1Sjsg struct i915_vma_compress *compress, 16881bb76ff1Sjsg u32 dump_flags) 1689e1001332Skettenis { 1690c349dbc7Sjsg struct intel_engine_capture_vma *capture = NULL; 1691c349dbc7Sjsg struct intel_engine_coredump *ee; 16925507fcfaSjsg struct intel_context *ce = NULL; 16935ca02815Sjsg struct i915_request *rq = NULL; 16947f4dd379Sjsg 16951bb76ff1Sjsg ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags); 1696c349dbc7Sjsg if (!ee) 1697c349dbc7Sjsg return NULL; 16987f4dd379Sjsg 16995507fcfaSjsg intel_engine_get_hung_entity(engine, &ce, &rq); 1700*eade7d98Sjsg if (rq && !i915_request_started(rq)) { 1701*eade7d98Sjsg /* 1702*eade7d98Sjsg * We want to know also what is the guc_id of the context, 1703*eade7d98Sjsg * but if we don't have the context reference, then skip 1704*eade7d98Sjsg * printing it. 1705*eade7d98Sjsg */ 1706*eade7d98Sjsg if (ce) 1707*eade7d98Sjsg drm_info(&engine->gt->i915->drm, 1708*eade7d98Sjsg "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n", 1709f005ef32Sjsg engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id); 1710*eade7d98Sjsg else 1711*eade7d98Sjsg drm_info(&engine->gt->i915->drm, 1712*eade7d98Sjsg "Got hung context on %s with active request %lld:%lld not yet started\n", 1713*eade7d98Sjsg engine->name, rq->fence.context, rq->fence.seqno); 1714*eade7d98Sjsg } 17151bb76ff1Sjsg 1716f005ef32Sjsg if (rq) { 17171bb76ff1Sjsg capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL); 1718f005ef32Sjsg i915_request_put(rq); 1719f005ef32Sjsg } else if (ce) { 1720f005ef32Sjsg capture = engine_coredump_add_context(ee, ce, ATOMIC_MAYFAIL); 1721f005ef32Sjsg } 1722f005ef32Sjsg 1723f005ef32Sjsg if (capture) { 1724f005ef32Sjsg intel_engine_coredump_add_vma(ee, capture, compress); 1725f005ef32Sjsg 17261bb76ff1Sjsg if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE) 17271bb76ff1Sjsg intel_guc_capture_get_matching_node(engine->gt, ee, ce); 1728f005ef32Sjsg } else { 1729f005ef32Sjsg kfree(ee); 1730f005ef32Sjsg ee = NULL; 1731f005ef32Sjsg } 17321bb76ff1Sjsg 17331bb76ff1Sjsg return ee; 1734c349dbc7Sjsg } 17357f4dd379Sjsg 1736c349dbc7Sjsg static void 1737c349dbc7Sjsg gt_record_engines(struct intel_gt_coredump *gt, 17385ca02815Sjsg intel_engine_mask_t engine_mask, 17391bb76ff1Sjsg struct i915_vma_compress *compress, 17401bb76ff1Sjsg u32 dump_flags) 1741c349dbc7Sjsg { 1742c349dbc7Sjsg struct intel_engine_cs *engine; 1743c349dbc7Sjsg enum intel_engine_id id; 1744c349dbc7Sjsg 1745c349dbc7Sjsg for_each_engine(engine, gt->_gt, id) { 1746c349dbc7Sjsg struct intel_engine_coredump *ee; 1747c349dbc7Sjsg 1748c349dbc7Sjsg /* Refill our page pool before entering atomic section */ 1749c349dbc7Sjsg pool_refill(&compress->pool, ALLOW_FAIL); 1750c349dbc7Sjsg 17511bb76ff1Sjsg ee = capture_engine(engine, compress, dump_flags); 1752c349dbc7Sjsg if (!ee) 17537f4dd379Sjsg continue; 17547f4dd379Sjsg 17555ca02815Sjsg ee->hung = engine->mask & engine_mask; 17565ca02815Sjsg 1757c349dbc7Sjsg gt->simulated |= ee->simulated; 1758c349dbc7Sjsg if (ee->simulated) { 17591bb76ff1Sjsg if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE) 17601bb76ff1Sjsg intel_guc_capture_free_node(ee); 1761c349dbc7Sjsg kfree(ee); 1762c349dbc7Sjsg continue; 1763c349dbc7Sjsg } 1764c349dbc7Sjsg 1765c349dbc7Sjsg ee->next = gt->engine; 1766c349dbc7Sjsg gt->engine = ee; 17677f4dd379Sjsg } 17687f4dd379Sjsg } 17697f4dd379Sjsg 17701bb76ff1Sjsg static void gt_record_guc_ctb(struct intel_ctb_coredump *saved, 17711bb76ff1Sjsg const struct intel_guc_ct_buffer *ctb, 17721bb76ff1Sjsg const void *blob_ptr, struct intel_guc *guc) 17731bb76ff1Sjsg { 17741bb76ff1Sjsg if (!ctb || !ctb->desc) 17751bb76ff1Sjsg return; 17761bb76ff1Sjsg 17771bb76ff1Sjsg saved->raw_status = ctb->desc->status; 17781bb76ff1Sjsg saved->raw_head = ctb->desc->head; 17791bb76ff1Sjsg saved->raw_tail = ctb->desc->tail; 17801bb76ff1Sjsg saved->head = ctb->head; 17811bb76ff1Sjsg saved->tail = ctb->tail; 17821bb76ff1Sjsg saved->size = ctb->size; 17831bb76ff1Sjsg saved->desc_offset = ((void *)ctb->desc) - blob_ptr; 17841bb76ff1Sjsg saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr; 17851bb76ff1Sjsg } 17861bb76ff1Sjsg 1787c349dbc7Sjsg static struct intel_uc_coredump * 1788c349dbc7Sjsg gt_record_uc(struct intel_gt_coredump *gt, 1789c349dbc7Sjsg struct i915_vma_compress *compress) 17907f4dd379Sjsg { 1791c349dbc7Sjsg const struct intel_uc *uc = >->_gt->uc; 1792c349dbc7Sjsg struct intel_uc_coredump *error_uc; 1793e1001332Skettenis 1794c349dbc7Sjsg error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL); 1795c349dbc7Sjsg if (!error_uc) 1796c349dbc7Sjsg return NULL; 17973253c27bSkettenis 1798c349dbc7Sjsg memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw)); 1799c349dbc7Sjsg memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw)); 1800e1001332Skettenis 18011bb76ff1Sjsg error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL); 18021bb76ff1Sjsg error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL); 18031bb76ff1Sjsg error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL); 18041bb76ff1Sjsg error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL); 18051bb76ff1Sjsg 18061bb76ff1Sjsg /* 18071bb76ff1Sjsg * Save the GuC log and include a timestamp reference for converting the 18081bb76ff1Sjsg * log times to system times (in conjunction with the error->boottime and 18091bb76ff1Sjsg * gt->clock_frequency fields saved elsewhere). 18107f4dd379Sjsg */ 18111bb76ff1Sjsg error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP); 18121bb76ff1Sjsg error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma, 18131bb76ff1Sjsg "GuC log buffer", compress); 18141bb76ff1Sjsg error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma, 18151bb76ff1Sjsg "GuC CT buffer", compress); 18161bb76ff1Sjsg error_uc->guc.last_fence = uc->guc.ct.requests.last_fence; 18171bb76ff1Sjsg gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send, 18181bb76ff1Sjsg uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc); 18191bb76ff1Sjsg gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv, 18201bb76ff1Sjsg uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc); 1821c349dbc7Sjsg 1822c349dbc7Sjsg return error_uc; 1823c349dbc7Sjsg } 1824c349dbc7Sjsg 18251bb76ff1Sjsg /* Capture display registers. */ 18261bb76ff1Sjsg static void gt_record_display_regs(struct intel_gt_coredump *gt) 18271bb76ff1Sjsg { 18281bb76ff1Sjsg struct intel_uncore *uncore = gt->_gt->uncore; 18291bb76ff1Sjsg struct drm_i915_private *i915 = uncore->i915; 18301bb76ff1Sjsg 18311bb76ff1Sjsg if (GRAPHICS_VER(i915) >= 6) 18321bb76ff1Sjsg gt->derrmr = intel_uncore_read(uncore, DERRMR); 18331bb76ff1Sjsg 18341bb76ff1Sjsg if (GRAPHICS_VER(i915) >= 8) 18351bb76ff1Sjsg gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); 18361bb76ff1Sjsg else if (IS_VALLEYVIEW(i915)) 18371bb76ff1Sjsg gt->ier = intel_uncore_read(uncore, VLV_IER); 18381bb76ff1Sjsg else if (HAS_PCH_SPLIT(i915)) 18391bb76ff1Sjsg gt->ier = intel_uncore_read(uncore, DEIER); 18401bb76ff1Sjsg else if (GRAPHICS_VER(i915) == 2) 18411bb76ff1Sjsg gt->ier = intel_uncore_read16(uncore, GEN2_IER); 18421bb76ff1Sjsg else 18431bb76ff1Sjsg gt->ier = intel_uncore_read(uncore, GEN2_IER); 18441bb76ff1Sjsg } 18451bb76ff1Sjsg 18461bb76ff1Sjsg /* Capture all other registers that GuC doesn't capture. */ 18471bb76ff1Sjsg static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt) 18481bb76ff1Sjsg { 18491bb76ff1Sjsg struct intel_uncore *uncore = gt->_gt->uncore; 18501bb76ff1Sjsg struct drm_i915_private *i915 = uncore->i915; 18511bb76ff1Sjsg int i; 18521bb76ff1Sjsg 18531bb76ff1Sjsg if (IS_VALLEYVIEW(i915)) { 18541bb76ff1Sjsg gt->gtier[0] = intel_uncore_read(uncore, GTIER); 18551bb76ff1Sjsg gt->ngtier = 1; 18561bb76ff1Sjsg } else if (GRAPHICS_VER(i915) >= 11) { 18571bb76ff1Sjsg gt->gtier[0] = 18581bb76ff1Sjsg intel_uncore_read(uncore, 18591bb76ff1Sjsg GEN11_RENDER_COPY_INTR_ENABLE); 18601bb76ff1Sjsg gt->gtier[1] = 18611bb76ff1Sjsg intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE); 18621bb76ff1Sjsg gt->gtier[2] = 18631bb76ff1Sjsg intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE); 18641bb76ff1Sjsg gt->gtier[3] = 18651bb76ff1Sjsg intel_uncore_read(uncore, 18661bb76ff1Sjsg GEN11_GPM_WGBOXPERF_INTR_ENABLE); 18671bb76ff1Sjsg gt->gtier[4] = 18681bb76ff1Sjsg intel_uncore_read(uncore, 18691bb76ff1Sjsg GEN11_CRYPTO_RSVD_INTR_ENABLE); 18701bb76ff1Sjsg gt->gtier[5] = 18711bb76ff1Sjsg intel_uncore_read(uncore, 18721bb76ff1Sjsg GEN11_GUNIT_CSME_INTR_ENABLE); 18731bb76ff1Sjsg gt->ngtier = 6; 18741bb76ff1Sjsg } else if (GRAPHICS_VER(i915) >= 8) { 18751bb76ff1Sjsg for (i = 0; i < 4; i++) 18761bb76ff1Sjsg gt->gtier[i] = 18771bb76ff1Sjsg intel_uncore_read(uncore, GEN8_GT_IER(i)); 18781bb76ff1Sjsg gt->ngtier = 4; 18791bb76ff1Sjsg } else if (HAS_PCH_SPLIT(i915)) { 18801bb76ff1Sjsg gt->gtier[0] = intel_uncore_read(uncore, GTIER); 18811bb76ff1Sjsg gt->ngtier = 1; 18821bb76ff1Sjsg } 18831bb76ff1Sjsg 18841bb76ff1Sjsg gt->eir = intel_uncore_read(uncore, EIR); 18851bb76ff1Sjsg gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER); 18861bb76ff1Sjsg } 18871bb76ff1Sjsg 18881bb76ff1Sjsg /* 18891bb76ff1Sjsg * Capture all registers that relate to workload submission. 18901bb76ff1Sjsg * NOTE: In GuC submission, when GuC resets an engine, it can dump these for us 18911bb76ff1Sjsg */ 18921bb76ff1Sjsg static void gt_record_global_regs(struct intel_gt_coredump *gt) 18933253c27bSkettenis { 1894c349dbc7Sjsg struct intel_uncore *uncore = gt->_gt->uncore; 1895c349dbc7Sjsg struct drm_i915_private *i915 = uncore->i915; 18963253c27bSkettenis int i; 18973253c27bSkettenis 1898c349dbc7Sjsg /* 1899c349dbc7Sjsg * General organization 19003253c27bSkettenis * 1. Registers specific to a single generation 19013253c27bSkettenis * 2. Registers which belong to multiple generations 19023253c27bSkettenis * 3. Feature specific registers. 19033253c27bSkettenis * 4. Everything else 19043253c27bSkettenis * Please try to follow the order. 19053253c27bSkettenis */ 19063253c27bSkettenis 19073253c27bSkettenis /* 1: Registers specific to a single generation */ 19081bb76ff1Sjsg if (IS_VALLEYVIEW(i915)) 1909c349dbc7Sjsg gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV); 19103253c27bSkettenis 19115ca02815Sjsg if (GRAPHICS_VER(i915) == 7) 1912c349dbc7Sjsg gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT); 19133253c27bSkettenis 1914f005ef32Sjsg if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { 1915f005ef32Sjsg gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt, 1916f005ef32Sjsg XEHP_FAULT_TLB_DATA0); 1917f005ef32Sjsg gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt, 1918f005ef32Sjsg XEHP_FAULT_TLB_DATA1); 1919f005ef32Sjsg } else if (GRAPHICS_VER(i915) >= 12) { 1920c349dbc7Sjsg gt->fault_data0 = intel_uncore_read(uncore, 1921c349dbc7Sjsg GEN12_FAULT_TLB_DATA0); 1922c349dbc7Sjsg gt->fault_data1 = intel_uncore_read(uncore, 1923c349dbc7Sjsg GEN12_FAULT_TLB_DATA1); 19245ca02815Sjsg } else if (GRAPHICS_VER(i915) >= 8) { 1925c349dbc7Sjsg gt->fault_data0 = intel_uncore_read(uncore, 1926c349dbc7Sjsg GEN8_FAULT_TLB_DATA0); 1927c349dbc7Sjsg gt->fault_data1 = intel_uncore_read(uncore, 1928c349dbc7Sjsg GEN8_FAULT_TLB_DATA1); 19293253c27bSkettenis } 19303253c27bSkettenis 19315ca02815Sjsg if (GRAPHICS_VER(i915) == 6) { 1932c349dbc7Sjsg gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE); 1933c349dbc7Sjsg gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL); 1934c349dbc7Sjsg gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE); 19353253c27bSkettenis } 19363253c27bSkettenis 19373253c27bSkettenis /* 2: Registers which belong to multiple generations */ 19385ca02815Sjsg if (GRAPHICS_VER(i915) >= 7) 1939c349dbc7Sjsg gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT); 19403253c27bSkettenis 19415ca02815Sjsg if (GRAPHICS_VER(i915) >= 6) { 19425ca02815Sjsg if (GRAPHICS_VER(i915) < 12) { 1943c349dbc7Sjsg gt->error = intel_uncore_read(uncore, ERROR_GEN6); 1944c349dbc7Sjsg gt->done_reg = intel_uncore_read(uncore, DONE_REG); 1945c349dbc7Sjsg } 19463253c27bSkettenis } 19473253c27bSkettenis 19483253c27bSkettenis /* 3: Feature specific registers */ 19495ca02815Sjsg if (IS_GRAPHICS_VER(i915, 6, 7)) { 1950c349dbc7Sjsg gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK); 1951c349dbc7Sjsg gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS); 1952c349dbc7Sjsg } 1953c349dbc7Sjsg 19545ca02815Sjsg if (IS_GRAPHICS_VER(i915, 8, 11)) 1955c349dbc7Sjsg gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN); 1956c349dbc7Sjsg 19575ca02815Sjsg if (GRAPHICS_VER(i915) == 12) 1958c349dbc7Sjsg gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG); 1959c349dbc7Sjsg 19605ca02815Sjsg if (GRAPHICS_VER(i915) >= 12) { 19611bb76ff1Sjsg for (i = 0; i < I915_MAX_SFC; i++) { 196250b0eff2Sjsg /* 196350b0eff2Sjsg * SFC_DONE resides in the VD forcewake domain, so it 196450b0eff2Sjsg * only exists if the corresponding VCS engine is 196550b0eff2Sjsg * present. 196650b0eff2Sjsg */ 19671bb76ff1Sjsg if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 || 19681bb76ff1Sjsg !HAS_ENGINE(gt->_gt, _VCS(i * 2))) 196950b0eff2Sjsg continue; 197050b0eff2Sjsg 1971c349dbc7Sjsg gt->sfc_done[i] = 1972c349dbc7Sjsg intel_uncore_read(uncore, GEN12_SFC_DONE(i)); 1973c349dbc7Sjsg } 1974c349dbc7Sjsg 1975c349dbc7Sjsg gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE); 19763253c27bSkettenis } 19773253c27bSkettenis } 19783253c27bSkettenis 1979ad8b1aafSjsg static void gt_record_info(struct intel_gt_coredump *gt) 1980ad8b1aafSjsg { 1981ad8b1aafSjsg memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info)); 19821bb76ff1Sjsg gt->clock_frequency = gt->_gt->clock_frequency; 19831bb76ff1Sjsg gt->clock_period_ns = gt->_gt->clock_period_ns; 1984ad8b1aafSjsg } 1985ad8b1aafSjsg 1986c349dbc7Sjsg /* 1987c349dbc7Sjsg * Generate a semi-unique error code. The code is not meant to have meaning, The 1988c349dbc7Sjsg * code's only purpose is to try to prevent false duplicated bug reports by 1989c349dbc7Sjsg * grossly estimating a GPU error state. 1990c349dbc7Sjsg * 1991c349dbc7Sjsg * TODO Ideally, hashing the batchbuffer would be a very nice way to determine 1992c349dbc7Sjsg * the hang if we could strip the GTT offset information from it. 1993c349dbc7Sjsg * 1994c349dbc7Sjsg * It's only a small step better than a random number in its current form. 1995c349dbc7Sjsg */ 1996c349dbc7Sjsg static u32 generate_ecode(const struct intel_engine_coredump *ee) 19973253c27bSkettenis { 1998c349dbc7Sjsg /* 1999c349dbc7Sjsg * IPEHR would be an ideal way to detect errors, as it's the gross 2000c349dbc7Sjsg * measure of "the command that hung." However, has some very common 2001c349dbc7Sjsg * synchronization commands which almost always appear in the case 2002c349dbc7Sjsg * strictly a client bug. Use instdone to differentiate those some. 2003c349dbc7Sjsg */ 2004c349dbc7Sjsg return ee ? ee->ipehr ^ ee->instdone.instdone : 0; 2005c349dbc7Sjsg } 20063253c27bSkettenis 2007c349dbc7Sjsg static const char *error_msg(struct i915_gpu_coredump *error) 2008c349dbc7Sjsg { 2009c349dbc7Sjsg struct intel_engine_coredump *first = NULL; 20105ca02815Sjsg unsigned int hung_classes = 0; 2011c349dbc7Sjsg struct intel_gt_coredump *gt; 2012c349dbc7Sjsg int len; 2013c349dbc7Sjsg 2014c349dbc7Sjsg for (gt = error->gt; gt; gt = gt->next) { 2015c349dbc7Sjsg struct intel_engine_coredump *cs; 2016c349dbc7Sjsg 20175ca02815Sjsg for (cs = gt->engine; cs; cs = cs->next) { 20185ca02815Sjsg if (cs->hung) { 20195ca02815Sjsg hung_classes |= BIT(cs->engine->uabi_class); 20205ca02815Sjsg if (!first) 20215ca02815Sjsg first = cs; 20225ca02815Sjsg } 20235ca02815Sjsg } 2024c349dbc7Sjsg } 20253253c27bSkettenis 20263253c27bSkettenis len = scnprintf(error->error_msg, sizeof(error->error_msg), 2027c349dbc7Sjsg "GPU HANG: ecode %d:%x:%08x", 20285ca02815Sjsg GRAPHICS_VER(error->i915), hung_classes, 2029c349dbc7Sjsg generate_ecode(first)); 2030c349dbc7Sjsg if (first && first->context.pid) { 2031c349dbc7Sjsg /* Just show the first executing process, more is confusing */ 20323253c27bSkettenis len += scnprintf(error->error_msg + len, 20333253c27bSkettenis sizeof(error->error_msg) - len, 20343253c27bSkettenis ", in %s [%d]", 2035c349dbc7Sjsg first->context.comm, first->context.pid); 20363253c27bSkettenis } 20373253c27bSkettenis 2038c349dbc7Sjsg return error->error_msg; 2039c349dbc7Sjsg } 2040c349dbc7Sjsg 2041c349dbc7Sjsg static void capture_gen(struct i915_gpu_coredump *error) 20423253c27bSkettenis { 20437f4dd379Sjsg struct drm_i915_private *i915 = error->i915; 20447f4dd379Sjsg 20457f4dd379Sjsg error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count); 20467f4dd379Sjsg error->suspended = i915->runtime_pm.suspended; 20477f4dd379Sjsg 20481bb76ff1Sjsg error->iommu = i915_vtd_active(i915); 20497f4dd379Sjsg error->reset_count = i915_reset_count(&i915->gpu_error); 20507f4dd379Sjsg error->suspend_count = i915->suspend_count; 20517f4dd379Sjsg 2052ad8b1aafSjsg i915_params_copy(&error->params, &i915->params); 20537f4dd379Sjsg memcpy(&error->device_info, 20547f4dd379Sjsg INTEL_INFO(i915), 20557f4dd379Sjsg sizeof(error->device_info)); 2056c349dbc7Sjsg memcpy(&error->runtime_info, 2057c349dbc7Sjsg RUNTIME_INFO(i915), 2058c349dbc7Sjsg sizeof(error->runtime_info)); 2059f005ef32Sjsg memcpy(&error->display_device_info, DISPLAY_INFO(i915), 2060f005ef32Sjsg sizeof(error->display_device_info)); 2061f005ef32Sjsg memcpy(&error->display_runtime_info, DISPLAY_RUNTIME_INFO(i915), 2062f005ef32Sjsg sizeof(error->display_runtime_info)); 20637f4dd379Sjsg error->driver_caps = i915->caps; 20647f4dd379Sjsg } 20657f4dd379Sjsg 2066c349dbc7Sjsg struct i915_gpu_coredump * 2067c349dbc7Sjsg i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp) 20687f4dd379Sjsg { 2069c349dbc7Sjsg struct i915_gpu_coredump *error; 20707f4dd379Sjsg 2071ad8b1aafSjsg if (!i915->params.error_capture) 2072c349dbc7Sjsg return NULL; 20737f4dd379Sjsg 2074c349dbc7Sjsg error = kzalloc(sizeof(*error), gfp); 20757f4dd379Sjsg if (!error) 20767f4dd379Sjsg return NULL; 20777f4dd379Sjsg 20787f4dd379Sjsg kref_init(&error->ref); 20797f4dd379Sjsg error->i915 = i915; 20807f4dd379Sjsg 2081c349dbc7Sjsg error->time = ktime_get_real(); 2082c349dbc7Sjsg error->boottime = ktime_get_boottime(); 20831bb76ff1Sjsg error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time); 2084c349dbc7Sjsg error->capture = jiffies; 2085c349dbc7Sjsg 2086c349dbc7Sjsg capture_gen(error); 20877f4dd379Sjsg 20887f4dd379Sjsg return error; 2089e1001332Skettenis } 2090e1001332Skettenis 2091c349dbc7Sjsg #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x)) 2092c349dbc7Sjsg 2093c349dbc7Sjsg struct intel_gt_coredump * 20941bb76ff1Sjsg intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags) 2095c349dbc7Sjsg { 2096c349dbc7Sjsg struct intel_gt_coredump *gc; 2097c349dbc7Sjsg 2098c349dbc7Sjsg gc = kzalloc(sizeof(*gc), gfp); 2099c349dbc7Sjsg if (!gc) 2100c349dbc7Sjsg return NULL; 2101c349dbc7Sjsg 2102c349dbc7Sjsg gc->_gt = gt; 2103c349dbc7Sjsg gc->awake = intel_gt_pm_is_awake(gt); 2104c349dbc7Sjsg 21051bb76ff1Sjsg gt_record_display_regs(gc); 21061bb76ff1Sjsg gt_record_global_nonguc_regs(gc); 21071bb76ff1Sjsg 21081bb76ff1Sjsg /* 21091bb76ff1Sjsg * GuC dumps global, eng-class and eng-instance registers 21101bb76ff1Sjsg * (that can change as part of engine state during execution) 21111bb76ff1Sjsg * before an engine is reset due to a hung context. 21121bb76ff1Sjsg * GuC captures and reports all three groups of registers 21131bb76ff1Sjsg * together as a single set before the engine is reset. 21141bb76ff1Sjsg * Thus, if GuC triggered the context reset we retrieve 21151bb76ff1Sjsg * the register values as part of gt_record_engines. 21161bb76ff1Sjsg */ 21171bb76ff1Sjsg if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) 21181bb76ff1Sjsg gt_record_global_regs(gc); 21191bb76ff1Sjsg 2120c349dbc7Sjsg gt_record_fences(gc); 2121c349dbc7Sjsg 2122c349dbc7Sjsg return gc; 2123c349dbc7Sjsg } 2124c349dbc7Sjsg 2125c349dbc7Sjsg struct i915_vma_compress * 2126c349dbc7Sjsg i915_vma_capture_prepare(struct intel_gt_coredump *gt) 2127c349dbc7Sjsg { 2128c349dbc7Sjsg struct i915_vma_compress *compress; 2129c349dbc7Sjsg 2130c349dbc7Sjsg compress = kmalloc(sizeof(*compress), ALLOW_FAIL); 2131c349dbc7Sjsg if (!compress) 2132c349dbc7Sjsg return NULL; 2133c349dbc7Sjsg 2134c349dbc7Sjsg if (!compress_init(compress)) { 2135c349dbc7Sjsg kfree(compress); 2136c349dbc7Sjsg return NULL; 2137c349dbc7Sjsg } 2138c349dbc7Sjsg 2139c349dbc7Sjsg return compress; 2140c349dbc7Sjsg } 2141c349dbc7Sjsg 2142c349dbc7Sjsg void i915_vma_capture_finish(struct intel_gt_coredump *gt, 2143c349dbc7Sjsg struct i915_vma_compress *compress) 2144c349dbc7Sjsg { 2145c349dbc7Sjsg if (!compress) 2146c349dbc7Sjsg return; 2147c349dbc7Sjsg 2148c349dbc7Sjsg compress_fini(compress); 2149c349dbc7Sjsg kfree(compress); 2150c349dbc7Sjsg } 2151c349dbc7Sjsg 21521bb76ff1Sjsg static struct i915_gpu_coredump * 21531bb76ff1Sjsg __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags) 2154c349dbc7Sjsg { 21555ca02815Sjsg struct drm_i915_private *i915 = gt->i915; 2156c349dbc7Sjsg struct i915_gpu_coredump *error; 2157c349dbc7Sjsg 2158c349dbc7Sjsg /* Check if GPU capture has been disabled */ 2159c349dbc7Sjsg error = READ_ONCE(i915->gpu_error.first_error); 2160c349dbc7Sjsg if (IS_ERR(error)) 2161c349dbc7Sjsg return error; 2162c349dbc7Sjsg 2163c349dbc7Sjsg error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL); 2164c349dbc7Sjsg if (!error) 2165c349dbc7Sjsg return ERR_PTR(-ENOMEM); 2166c349dbc7Sjsg 21671bb76ff1Sjsg error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL, dump_flags); 2168c349dbc7Sjsg if (error->gt) { 2169c349dbc7Sjsg struct i915_vma_compress *compress; 2170c349dbc7Sjsg 2171c349dbc7Sjsg compress = i915_vma_capture_prepare(error->gt); 2172c349dbc7Sjsg if (!compress) { 2173c349dbc7Sjsg kfree(error->gt); 2174c349dbc7Sjsg kfree(error); 2175c349dbc7Sjsg return ERR_PTR(-ENOMEM); 2176c349dbc7Sjsg } 2177c349dbc7Sjsg 21781bb76ff1Sjsg if (INTEL_INFO(i915)->has_gt_uc) { 2179c349dbc7Sjsg error->gt->uc = gt_record_uc(error->gt, compress); 21801bb76ff1Sjsg if (error->gt->uc) { 21811bb76ff1Sjsg if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE) 21821bb76ff1Sjsg error->gt->uc->guc.is_guc_capture = true; 21831bb76ff1Sjsg else 21841bb76ff1Sjsg GEM_BUG_ON(error->gt->uc->guc.is_guc_capture); 21851bb76ff1Sjsg } 21861bb76ff1Sjsg } 21871bb76ff1Sjsg 21881bb76ff1Sjsg gt_record_info(error->gt); 21891bb76ff1Sjsg gt_record_engines(error->gt, engine_mask, compress, dump_flags); 21901bb76ff1Sjsg 2191c349dbc7Sjsg 2192c349dbc7Sjsg i915_vma_capture_finish(error->gt, compress); 2193c349dbc7Sjsg 2194c349dbc7Sjsg error->simulated |= error->gt->simulated; 2195c349dbc7Sjsg } 2196c349dbc7Sjsg 2197c349dbc7Sjsg error->overlay = intel_overlay_capture_error_state(i915); 2198c349dbc7Sjsg 2199c349dbc7Sjsg return error; 2200c349dbc7Sjsg } 2201c349dbc7Sjsg 22026155d736Sbluhm static DEFINE_MUTEX(capture_mutex); 22036155d736Sbluhm 22041bb76ff1Sjsg struct i915_gpu_coredump * 22051bb76ff1Sjsg i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags) 22061bb76ff1Sjsg { 22071bb76ff1Sjsg int ret = mutex_lock_interruptible(&capture_mutex); 22081bb76ff1Sjsg struct i915_gpu_coredump *dump; 22091bb76ff1Sjsg 22101bb76ff1Sjsg if (ret) 22111bb76ff1Sjsg return ERR_PTR(ret); 22121bb76ff1Sjsg 22131bb76ff1Sjsg dump = __i915_gpu_coredump(gt, engine_mask, dump_flags); 22141bb76ff1Sjsg mutex_unlock(&capture_mutex); 22151bb76ff1Sjsg 22161bb76ff1Sjsg return dump; 22171bb76ff1Sjsg } 22181bb76ff1Sjsg 2219c349dbc7Sjsg void i915_error_state_store(struct i915_gpu_coredump *error) 2220c349dbc7Sjsg { 2221c349dbc7Sjsg struct drm_i915_private *i915; 2222c349dbc7Sjsg static bool warned; 2223c349dbc7Sjsg 2224c349dbc7Sjsg if (IS_ERR_OR_NULL(error)) 2225c349dbc7Sjsg return; 2226c349dbc7Sjsg 2227c349dbc7Sjsg i915 = error->i915; 2228ad8b1aafSjsg drm_info(&i915->drm, "%s\n", error_msg(error)); 2229c349dbc7Sjsg 2230c349dbc7Sjsg if (error->simulated || 2231c349dbc7Sjsg cmpxchg(&i915->gpu_error.first_error, NULL, error)) 2232c349dbc7Sjsg return; 2233c349dbc7Sjsg 2234c349dbc7Sjsg i915_gpu_coredump_get(error); 2235c349dbc7Sjsg 2236c349dbc7Sjsg if (!xchg(&warned, true) && 2237c349dbc7Sjsg ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) { 2238c349dbc7Sjsg pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); 2239c349dbc7Sjsg pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n"); 2240c349dbc7Sjsg pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n"); 2241c349dbc7Sjsg pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 2242c349dbc7Sjsg pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n"); 2243c349dbc7Sjsg pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n", 2244c349dbc7Sjsg i915->drm.primary->index); 2245c349dbc7Sjsg } 2246c349dbc7Sjsg } 2247c349dbc7Sjsg 2248e1001332Skettenis /** 2249e1001332Skettenis * i915_capture_error_state - capture an error record for later analysis 22505ca02815Sjsg * @gt: intel_gt which originated the hang 22515ca02815Sjsg * @engine_mask: hung engines 2252f005ef32Sjsg * @dump_flags: dump flags 2253e1001332Skettenis * 2254e1001332Skettenis * Should be called when an error is detected (either a hang or an error 2255e1001332Skettenis * interrupt) to capture error state from the time of the error. Fills 2256e1001332Skettenis * out a structure which becomes available in debugfs for user level tools 2257e1001332Skettenis * to pick up. 2258e1001332Skettenis */ 22595ca02815Sjsg void i915_capture_error_state(struct intel_gt *gt, 22601bb76ff1Sjsg intel_engine_mask_t engine_mask, u32 dump_flags) 2261e1001332Skettenis { 2262c349dbc7Sjsg struct i915_gpu_coredump *error; 2263e1001332Skettenis 22641bb76ff1Sjsg error = i915_gpu_coredump(gt, engine_mask, dump_flags); 2265c349dbc7Sjsg if (IS_ERR(error)) { 22665ca02815Sjsg cmpxchg(>->i915->gpu_error.first_error, NULL, error); 2267e1001332Skettenis return; 2268e1001332Skettenis } 2269e1001332Skettenis 2270c349dbc7Sjsg i915_error_state_store(error); 2271c349dbc7Sjsg i915_gpu_coredump_put(error); 22727f4dd379Sjsg } 2273e1001332Skettenis 2274c349dbc7Sjsg struct i915_gpu_coredump * 22757f4dd379Sjsg i915_first_error_state(struct drm_i915_private *i915) 2276e1001332Skettenis { 2277c349dbc7Sjsg struct i915_gpu_coredump *error; 2278e1001332Skettenis 22797f4dd379Sjsg spin_lock_irq(&i915->gpu_error.lock); 22807f4dd379Sjsg error = i915->gpu_error.first_error; 2281c349dbc7Sjsg if (!IS_ERR_OR_NULL(error)) 2282c349dbc7Sjsg i915_gpu_coredump_get(error); 22837f4dd379Sjsg spin_unlock_irq(&i915->gpu_error.lock); 22847f4dd379Sjsg 22857f4dd379Sjsg return error; 2286e1001332Skettenis } 2287e1001332Skettenis 22887f4dd379Sjsg void i915_reset_error_state(struct drm_i915_private *i915) 2289e1001332Skettenis { 2290c349dbc7Sjsg struct i915_gpu_coredump *error; 2291e1001332Skettenis 22927f4dd379Sjsg spin_lock_irq(&i915->gpu_error.lock); 22937f4dd379Sjsg error = i915->gpu_error.first_error; 2294c349dbc7Sjsg if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */ 22957f4dd379Sjsg i915->gpu_error.first_error = NULL; 22967f4dd379Sjsg spin_unlock_irq(&i915->gpu_error.lock); 2297e1001332Skettenis 2298c349dbc7Sjsg if (!IS_ERR_OR_NULL(error)) 2299c349dbc7Sjsg i915_gpu_coredump_put(error); 2300c349dbc7Sjsg } 2301c349dbc7Sjsg 2302c349dbc7Sjsg void i915_disable_error_state(struct drm_i915_private *i915, int err) 2303c349dbc7Sjsg { 2304c349dbc7Sjsg spin_lock_irq(&i915->gpu_error.lock); 2305c349dbc7Sjsg if (!i915->gpu_error.first_error) 2306c349dbc7Sjsg i915->gpu_error.first_error = ERR_PTR(err); 2307c349dbc7Sjsg spin_unlock_irq(&i915->gpu_error.lock); 2308e1001332Skettenis } 2309f005ef32Sjsg 2310f005ef32Sjsg #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 2311f005ef32Sjsg void intel_klog_error_capture(struct intel_gt *gt, 2312f005ef32Sjsg intel_engine_mask_t engine_mask) 2313f005ef32Sjsg { 2314f005ef32Sjsg static int g_count; 2315f005ef32Sjsg struct drm_i915_private *i915 = gt->i915; 2316f005ef32Sjsg struct i915_gpu_coredump *error; 2317f005ef32Sjsg intel_wakeref_t wakeref; 2318f005ef32Sjsg size_t buf_size = PAGE_SIZE * 128; 2319f005ef32Sjsg size_t pos_err; 2320f005ef32Sjsg char *buf, *ptr, *next; 2321f005ef32Sjsg int l_count = g_count++; 2322f005ef32Sjsg int line = 0; 2323f005ef32Sjsg 2324f005ef32Sjsg /* Can't allocate memory during a reset */ 2325f005ef32Sjsg if (test_bit(I915_RESET_BACKOFF, >->reset.flags)) { 2326f005ef32Sjsg drm_err(>->i915->drm, "[Capture/%d.%d] Inside GT reset, skipping error capture :(\n", 2327f005ef32Sjsg l_count, line++); 2328f005ef32Sjsg return; 2329f005ef32Sjsg } 2330f005ef32Sjsg 2331f005ef32Sjsg error = READ_ONCE(i915->gpu_error.first_error); 2332f005ef32Sjsg if (error) { 2333f005ef32Sjsg drm_err(&i915->drm, "[Capture/%d.%d] Clearing existing error capture first...\n", 2334f005ef32Sjsg l_count, line++); 2335f005ef32Sjsg i915_reset_error_state(i915); 2336f005ef32Sjsg } 2337f005ef32Sjsg 2338f005ef32Sjsg with_intel_runtime_pm(&i915->runtime_pm, wakeref) 2339f005ef32Sjsg error = i915_gpu_coredump(gt, engine_mask, CORE_DUMP_FLAG_NONE); 2340f005ef32Sjsg 2341f005ef32Sjsg if (IS_ERR(error)) { 2342f005ef32Sjsg drm_err(&i915->drm, "[Capture/%d.%d] Failed to capture error capture: %ld!\n", 2343f005ef32Sjsg l_count, line++, PTR_ERR(error)); 2344f005ef32Sjsg return; 2345f005ef32Sjsg } 2346f005ef32Sjsg 2347f005ef32Sjsg buf = kvmalloc(buf_size, GFP_KERNEL); 2348f005ef32Sjsg if (!buf) { 2349f005ef32Sjsg drm_err(&i915->drm, "[Capture/%d.%d] Failed to allocate buffer for error capture!\n", 2350f005ef32Sjsg l_count, line++); 2351f005ef32Sjsg i915_gpu_coredump_put(error); 2352f005ef32Sjsg return; 2353f005ef32Sjsg } 2354f005ef32Sjsg 2355f005ef32Sjsg drm_info(&i915->drm, "[Capture/%d.%d] Dumping i915 error capture for %ps...\n", 2356f005ef32Sjsg l_count, line++, __builtin_return_address(0)); 2357f005ef32Sjsg 2358f005ef32Sjsg /* Largest string length safe to print via dmesg */ 2359f005ef32Sjsg # define MAX_CHUNK 800 2360f005ef32Sjsg 2361f005ef32Sjsg pos_err = 0; 2362f005ef32Sjsg while (1) { 2363f005ef32Sjsg ssize_t got = i915_gpu_coredump_copy_to_buffer(error, buf, pos_err, buf_size - 1); 2364f005ef32Sjsg 2365f005ef32Sjsg if (got <= 0) 2366f005ef32Sjsg break; 2367f005ef32Sjsg 2368f005ef32Sjsg buf[got] = 0; 2369f005ef32Sjsg pos_err += got; 2370f005ef32Sjsg 2371f005ef32Sjsg ptr = buf; 2372f005ef32Sjsg while (got > 0) { 2373f005ef32Sjsg size_t count; 2374f005ef32Sjsg char tag[2]; 2375f005ef32Sjsg 2376f005ef32Sjsg next = strnchr(ptr, got, '\n'); 2377f005ef32Sjsg if (next) { 2378f005ef32Sjsg count = next - ptr; 2379f005ef32Sjsg *next = 0; 2380f005ef32Sjsg tag[0] = '>'; 2381f005ef32Sjsg tag[1] = '<'; 2382f005ef32Sjsg } else { 2383f005ef32Sjsg count = got; 2384f005ef32Sjsg tag[0] = '}'; 2385f005ef32Sjsg tag[1] = '{'; 2386f005ef32Sjsg } 2387f005ef32Sjsg 2388f005ef32Sjsg if (count > MAX_CHUNK) { 2389f005ef32Sjsg size_t pos; 2390f005ef32Sjsg char *ptr2 = ptr; 2391f005ef32Sjsg 2392f005ef32Sjsg for (pos = MAX_CHUNK; pos < count; pos += MAX_CHUNK) { 2393f005ef32Sjsg char chr = ptr[pos]; 2394f005ef32Sjsg 2395f005ef32Sjsg ptr[pos] = 0; 2396f005ef32Sjsg drm_info(&i915->drm, "[Capture/%d.%d] }%s{\n", 2397f005ef32Sjsg l_count, line++, ptr2); 2398f005ef32Sjsg ptr[pos] = chr; 2399f005ef32Sjsg ptr2 = ptr + pos; 2400f005ef32Sjsg 2401f005ef32Sjsg /* 2402f005ef32Sjsg * If spewing large amounts of data via a serial console, 2403f005ef32Sjsg * this can be a very slow process. So be friendly and try 2404f005ef32Sjsg * not to cause 'softlockup on CPU' problems. 2405f005ef32Sjsg */ 2406f005ef32Sjsg cond_resched(); 2407f005ef32Sjsg } 2408f005ef32Sjsg 2409f005ef32Sjsg if (ptr2 < (ptr + count)) 2410f005ef32Sjsg drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n", 2411f005ef32Sjsg l_count, line++, tag[0], ptr2, tag[1]); 2412f005ef32Sjsg else if (tag[0] == '>') 2413f005ef32Sjsg drm_info(&i915->drm, "[Capture/%d.%d] ><\n", 2414f005ef32Sjsg l_count, line++); 2415f005ef32Sjsg } else { 2416f005ef32Sjsg drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n", 2417f005ef32Sjsg l_count, line++, tag[0], ptr, tag[1]); 2418f005ef32Sjsg } 2419f005ef32Sjsg 2420f005ef32Sjsg ptr = next; 2421f005ef32Sjsg got -= count; 2422f005ef32Sjsg if (next) { 2423f005ef32Sjsg ptr++; 2424f005ef32Sjsg got--; 2425f005ef32Sjsg } 2426f005ef32Sjsg 2427f005ef32Sjsg /* As above. */ 2428f005ef32Sjsg cond_resched(); 2429f005ef32Sjsg } 2430f005ef32Sjsg 2431f005ef32Sjsg if (got) 2432f005ef32Sjsg drm_info(&i915->drm, "[Capture/%d.%d] Got %zd bytes remaining!\n", 2433f005ef32Sjsg l_count, line++, got); 2434f005ef32Sjsg } 2435f005ef32Sjsg 2436f005ef32Sjsg kvfree(buf); 2437f005ef32Sjsg 2438f005ef32Sjsg drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err); 2439f005ef32Sjsg } 2440f005ef32Sjsg #endif 2441