1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2014-2019 Intel Corporation 4 */ 5 6 #include <linux/debugfs.h> 7 8 #include "gt/intel_gt.h" 9 #include "i915_drv.h" 10 #include "i915_memcpy.h" 11 #include "intel_guc_log.h" 12 13 static void guc_log_capture_logs(struct intel_guc_log *log); 14 15 /** 16 * DOC: GuC firmware log 17 * 18 * Firmware log is enabled by setting i915.guc_log_level to the positive level. 19 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from 20 * i915_guc_load_status will print out firmware loading status and scratch 21 * registers value. 22 */ 23 24 static int guc_action_flush_log_complete(struct intel_guc *guc) 25 { 26 u32 action[] = { 27 INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE 28 }; 29 30 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 31 } 32 33 static int guc_action_flush_log(struct intel_guc *guc) 34 { 35 u32 action[] = { 36 INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH, 37 0 38 }; 39 40 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 41 } 42 43 static int guc_action_control_log(struct intel_guc *guc, bool enable, 44 bool default_logging, u32 verbosity) 45 { 46 u32 action[] = { 47 INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING, 48 (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) | 49 (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) | 50 (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0) 51 }; 52 53 GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX); 54 55 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 56 } 57 58 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) 59 { 60 return container_of(log, struct intel_guc, log); 61 } 62 63 static void guc_log_enable_flush_events(struct intel_guc_log *log) 64 { 65 intel_guc_enable_msg(log_to_guc(log), 66 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | 67 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); 68 } 69 70 static void guc_log_disable_flush_events(struct intel_guc_log *log) 71 { 72 intel_guc_disable_msg(log_to_guc(log), 73 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | 74 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); 75 } 76 77 #ifdef __linux__ 78 79 /* 80 * Sub buffer switch callback. Called whenever relay has to switch to a new 81 * sub buffer, relay stays on the same sub buffer if 0 is returned. 82 */ 83 static int subbuf_start_callback(struct rchan_buf *buf, 84 void *subbuf, 85 void *prev_subbuf, 86 size_t prev_padding) 87 { 88 /* 89 * Use no-overwrite mode by default, where relay will stop accepting 90 * new data if there are no empty sub buffers left. 91 * There is no strict synchronization enforced by relay between Consumer 92 * and Producer. In overwrite mode, there is a possibility of getting 93 * inconsistent/garbled data, the producer could be writing on to the 94 * same sub buffer from which Consumer is reading. This can't be avoided 95 * unless Consumer is fast enough and can always run in tandem with 96 * Producer. 97 */ 98 if (relay_buf_full(buf)) 99 return 0; 100 101 return 1; 102 } 103 104 /* 105 * file_create() callback. Creates relay file in debugfs. 106 */ 107 static struct dentry *create_buf_file_callback(const char *filename, 108 struct dentry *parent, 109 umode_t mode, 110 struct rchan_buf *buf, 111 int *is_global) 112 { 113 struct dentry *buf_file; 114 115 /* 116 * This to enable the use of a single buffer for the relay channel and 117 * correspondingly have a single file exposed to User, through which 118 * it can collect the logs in order without any post-processing. 119 * Need to set 'is_global' even if parent is NULL for early logging. 120 */ 121 *is_global = 1; 122 123 if (!parent) 124 return NULL; 125 126 buf_file = debugfs_create_file(filename, mode, 127 parent, buf, &relay_file_operations); 128 if (IS_ERR(buf_file)) 129 return NULL; 130 131 return buf_file; 132 } 133 134 /* 135 * file_remove() default callback. Removes relay file in debugfs. 136 */ 137 static int remove_buf_file_callback(struct dentry *dentry) 138 { 139 debugfs_remove(dentry); 140 return 0; 141 } 142 143 /* relay channel callbacks */ 144 static struct rchan_callbacks relay_callbacks = { 145 .subbuf_start = subbuf_start_callback, 146 .create_buf_file = create_buf_file_callback, 147 .remove_buf_file = remove_buf_file_callback, 148 }; 149 150 #endif /* __linux__ */ 151 152 static void guc_move_to_next_buf(struct intel_guc_log *log) 153 { 154 STUB(); 155 #ifdef notyet 156 /* 157 * Make sure the updates made in the sub buffer are visible when 158 * Consumer sees the following update to offset inside the sub buffer. 159 */ 160 smp_wmb(); 161 162 /* All data has been written, so now move the offset of sub buffer. */ 163 relay_reserve(log->relay.channel, log->vma->obj->base.size); 164 165 /* Switch to the next sub buffer */ 166 relay_flush(log->relay.channel); 167 #endif 168 } 169 170 static void *guc_get_write_buffer(struct intel_guc_log *log) 171 { 172 STUB(); 173 return NULL; 174 #ifdef notyet 175 /* 176 * Just get the base address of a new sub buffer and copy data into it 177 * ourselves. NULL will be returned in no-overwrite mode, if all sub 178 * buffers are full. Could have used the relay_write() to indirectly 179 * copy the data, but that would have been bit convoluted, as we need to 180 * write to only certain locations inside a sub buffer which cannot be 181 * done without using relay_reserve() along with relay_write(). So its 182 * better to use relay_reserve() alone. 183 */ 184 return relay_reserve(log->relay.channel, 0); 185 #endif 186 } 187 188 static bool guc_check_log_buf_overflow(struct intel_guc_log *log, 189 enum guc_log_buffer_type type, 190 unsigned int full_cnt) 191 { 192 unsigned int prev_full_cnt = log->stats[type].sampled_overflow; 193 bool overflow = false; 194 195 if (full_cnt != prev_full_cnt) { 196 overflow = true; 197 198 log->stats[type].overflow = full_cnt; 199 log->stats[type].sampled_overflow += full_cnt - prev_full_cnt; 200 201 if (full_cnt < prev_full_cnt) { 202 /* buffer_full_cnt is a 4 bit counter */ 203 log->stats[type].sampled_overflow += 16; 204 } 205 206 dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev, 207 "GuC log buffer overflow\n"); 208 } 209 210 return overflow; 211 } 212 213 static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type) 214 { 215 switch (type) { 216 case GUC_ISR_LOG_BUFFER: 217 return ISR_BUFFER_SIZE; 218 case GUC_DPC_LOG_BUFFER: 219 return DPC_BUFFER_SIZE; 220 case GUC_CRASH_DUMP_LOG_BUFFER: 221 return CRASH_BUFFER_SIZE; 222 default: 223 MISSING_CASE(type); 224 } 225 226 return 0; 227 } 228 229 static void guc_read_update_log_buffer(struct intel_guc_log *log) 230 { 231 unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt; 232 struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state; 233 struct guc_log_buffer_state log_buf_state_local; 234 enum guc_log_buffer_type type; 235 void *src_data, *dst_data; 236 bool new_overflow; 237 238 mutex_lock(&log->relay.lock); 239 240 if (WARN_ON(!intel_guc_log_relay_created(log))) 241 goto out_unlock; 242 243 /* Get the pointer to shared GuC log buffer */ 244 log_buf_state = src_data = log->relay.buf_addr; 245 246 /* Get the pointer to local buffer to store the logs */ 247 log_buf_snapshot_state = dst_data = guc_get_write_buffer(log); 248 249 if (unlikely(!log_buf_snapshot_state)) { 250 /* 251 * Used rate limited to avoid deluge of messages, logs might be 252 * getting consumed by User at a slow rate. 253 */ 254 DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n"); 255 log->relay.full_count++; 256 257 goto out_unlock; 258 } 259 260 /* Actual logs are present from the 2nd page */ 261 src_data += PAGE_SIZE; 262 dst_data += PAGE_SIZE; 263 264 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { 265 /* 266 * Make a copy of the state structure, inside GuC log buffer 267 * (which is uncached mapped), on the stack to avoid reading 268 * from it multiple times. 269 */ 270 memcpy(&log_buf_state_local, log_buf_state, 271 sizeof(struct guc_log_buffer_state)); 272 buffer_size = guc_get_log_buffer_size(type); 273 read_offset = log_buf_state_local.read_ptr; 274 write_offset = log_buf_state_local.sampled_write_ptr; 275 full_cnt = log_buf_state_local.buffer_full_cnt; 276 277 /* Bookkeeping stuff */ 278 log->stats[type].flush += log_buf_state_local.flush_to_file; 279 new_overflow = guc_check_log_buf_overflow(log, type, full_cnt); 280 281 /* Update the state of shared log buffer */ 282 log_buf_state->read_ptr = write_offset; 283 log_buf_state->flush_to_file = 0; 284 log_buf_state++; 285 286 /* First copy the state structure in snapshot buffer */ 287 memcpy(log_buf_snapshot_state, &log_buf_state_local, 288 sizeof(struct guc_log_buffer_state)); 289 290 /* 291 * The write pointer could have been updated by GuC firmware, 292 * after sending the flush interrupt to Host, for consistency 293 * set write pointer value to same value of sampled_write_ptr 294 * in the snapshot buffer. 295 */ 296 log_buf_snapshot_state->write_ptr = write_offset; 297 log_buf_snapshot_state++; 298 299 /* Now copy the actual logs. */ 300 if (unlikely(new_overflow)) { 301 /* copy the whole buffer in case of overflow */ 302 read_offset = 0; 303 write_offset = buffer_size; 304 } else if (unlikely((read_offset > buffer_size) || 305 (write_offset > buffer_size))) { 306 DRM_ERROR("invalid log buffer state\n"); 307 /* copy whole buffer as offsets are unreliable */ 308 read_offset = 0; 309 write_offset = buffer_size; 310 } 311 312 /* Just copy the newly written data */ 313 if (read_offset > write_offset) { 314 i915_memcpy_from_wc(dst_data, src_data, write_offset); 315 bytes_to_copy = buffer_size - read_offset; 316 } else { 317 bytes_to_copy = write_offset - read_offset; 318 } 319 i915_memcpy_from_wc(dst_data + read_offset, 320 src_data + read_offset, bytes_to_copy); 321 322 src_data += buffer_size; 323 dst_data += buffer_size; 324 } 325 326 guc_move_to_next_buf(log); 327 328 out_unlock: 329 mutex_unlock(&log->relay.lock); 330 } 331 332 static void capture_logs_work(struct work_struct *work) 333 { 334 struct intel_guc_log *log = 335 container_of(work, struct intel_guc_log, relay.flush_work); 336 337 guc_log_capture_logs(log); 338 } 339 340 static int guc_log_map(struct intel_guc_log *log) 341 { 342 void *vaddr; 343 344 lockdep_assert_held(&log->relay.lock); 345 346 if (!log->vma) 347 return -ENODEV; 348 349 /* 350 * Create a WC (Uncached for read) vmalloc mapping of log 351 * buffer pages, so that we can directly get the data 352 * (up-to-date) from memory. 353 */ 354 vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC); 355 if (IS_ERR(vaddr)) 356 return PTR_ERR(vaddr); 357 358 log->relay.buf_addr = vaddr; 359 360 return 0; 361 } 362 363 static void guc_log_unmap(struct intel_guc_log *log) 364 { 365 lockdep_assert_held(&log->relay.lock); 366 367 i915_gem_object_unpin_map(log->vma->obj); 368 log->relay.buf_addr = NULL; 369 } 370 371 void intel_guc_log_init_early(struct intel_guc_log *log) 372 { 373 rw_init(&log->relay.lock, "rllk"); 374 INIT_WORK(&log->relay.flush_work, capture_logs_work); 375 log->relay.started = false; 376 } 377 378 static int guc_log_relay_create(struct intel_guc_log *log) 379 { 380 STUB(); 381 return -ENOSYS; 382 #ifdef notyet 383 struct intel_guc *guc = log_to_guc(log); 384 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; 385 struct rchan *guc_log_relay_chan; 386 size_t n_subbufs, subbuf_size; 387 int ret; 388 389 lockdep_assert_held(&log->relay.lock); 390 GEM_BUG_ON(!log->vma); 391 392 /* Keep the size of sub buffers same as shared log buffer */ 393 subbuf_size = log->vma->size; 394 395 /* 396 * Store up to 8 snapshots, which is large enough to buffer sufficient 397 * boot time logs and provides enough leeway to User, in terms of 398 * latency, for consuming the logs from relay. Also doesn't take 399 * up too much memory. 400 */ 401 n_subbufs = 8; 402 403 guc_log_relay_chan = relay_open("guc_log", 404 dev_priv->drm.primary->debugfs_root, 405 subbuf_size, n_subbufs, 406 &relay_callbacks, dev_priv); 407 if (!guc_log_relay_chan) { 408 DRM_ERROR("Couldn't create relay chan for GuC logging\n"); 409 410 ret = -ENOMEM; 411 return ret; 412 } 413 414 GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size); 415 log->relay.channel = guc_log_relay_chan; 416 417 return 0; 418 #endif 419 } 420 421 static void guc_log_relay_destroy(struct intel_guc_log *log) 422 { 423 STUB(); 424 #ifdef notyet 425 lockdep_assert_held(&log->relay.lock); 426 427 relay_close(log->relay.channel); 428 log->relay.channel = NULL; 429 #endif 430 } 431 432 static void guc_log_capture_logs(struct intel_guc_log *log) 433 { 434 struct intel_guc *guc = log_to_guc(log); 435 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; 436 intel_wakeref_t wakeref; 437 438 guc_read_update_log_buffer(log); 439 440 /* 441 * Generally device is expected to be active only at this 442 * time, so get/put should be really quick. 443 */ 444 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) 445 guc_action_flush_log_complete(guc); 446 } 447 448 static u32 __get_default_log_level(struct intel_guc_log *log) 449 { 450 /* A negative value means "use platform/config default" */ 451 if (i915_modparams.guc_log_level < 0) { 452 return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || 453 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? 454 GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE; 455 } 456 457 if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) { 458 DRM_WARN("Incompatible option detected: %s=%d, %s!\n", 459 "guc_log_level", i915_modparams.guc_log_level, 460 "verbosity too high"); 461 return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || 462 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? 463 GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED; 464 } 465 466 GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED); 467 GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX); 468 return i915_modparams.guc_log_level; 469 } 470 471 int intel_guc_log_create(struct intel_guc_log *log) 472 { 473 struct intel_guc *guc = log_to_guc(log); 474 struct i915_vma *vma; 475 u32 guc_log_size; 476 int ret; 477 478 GEM_BUG_ON(log->vma); 479 480 /* 481 * GuC Log buffer Layout 482 * 483 * +===============================+ 00B 484 * | Crash dump state header | 485 * +-------------------------------+ 32B 486 * | DPC state header | 487 * +-------------------------------+ 64B 488 * | ISR state header | 489 * +-------------------------------+ 96B 490 * | | 491 * +===============================+ PAGE_SIZE (4KB) 492 * | Crash Dump logs | 493 * +===============================+ + CRASH_SIZE 494 * | DPC logs | 495 * +===============================+ + DPC_SIZE 496 * | ISR logs | 497 * +===============================+ + ISR_SIZE 498 */ 499 guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE + 500 ISR_BUFFER_SIZE; 501 502 vma = intel_guc_allocate_vma(guc, guc_log_size); 503 if (IS_ERR(vma)) { 504 ret = PTR_ERR(vma); 505 goto err; 506 } 507 508 log->vma = vma; 509 510 log->level = __get_default_log_level(log); 511 DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n", 512 log->level, enableddisabled(log->level), 513 yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)), 514 GUC_LOG_LEVEL_TO_VERBOSITY(log->level)); 515 516 return 0; 517 518 err: 519 DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret); 520 return ret; 521 } 522 523 void intel_guc_log_destroy(struct intel_guc_log *log) 524 { 525 i915_vma_unpin_and_release(&log->vma, 0); 526 } 527 528 int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) 529 { 530 struct intel_guc *guc = log_to_guc(log); 531 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; 532 intel_wakeref_t wakeref; 533 int ret = 0; 534 535 BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); 536 GEM_BUG_ON(!log->vma); 537 538 /* 539 * GuC is recognizing log levels starting from 0 to max, we're using 0 540 * as indication that logging should be disabled. 541 */ 542 if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX) 543 return -EINVAL; 544 545 mutex_lock(&dev_priv->drm.struct_mutex); 546 547 if (log->level == level) 548 goto out_unlock; 549 550 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) 551 ret = guc_action_control_log(guc, 552 GUC_LOG_LEVEL_IS_VERBOSE(level), 553 GUC_LOG_LEVEL_IS_ENABLED(level), 554 GUC_LOG_LEVEL_TO_VERBOSITY(level)); 555 if (ret) { 556 DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); 557 goto out_unlock; 558 } 559 560 log->level = level; 561 562 out_unlock: 563 mutex_unlock(&dev_priv->drm.struct_mutex); 564 565 return ret; 566 } 567 568 bool intel_guc_log_relay_created(const struct intel_guc_log *log) 569 { 570 return log->relay.buf_addr; 571 } 572 573 int intel_guc_log_relay_open(struct intel_guc_log *log) 574 { 575 int ret; 576 577 if (!log->vma) 578 return -ENODEV; 579 580 mutex_lock(&log->relay.lock); 581 582 if (intel_guc_log_relay_created(log)) { 583 ret = -EEXIST; 584 goto out_unlock; 585 } 586 587 /* 588 * We require SSE 4.1 for fast reads from the GuC log buffer and 589 * it should be present on the chipsets supporting GuC based 590 * submisssions. 591 */ 592 if (!i915_has_memcpy_from_wc()) { 593 ret = -ENXIO; 594 goto out_unlock; 595 } 596 597 ret = guc_log_relay_create(log); 598 if (ret) 599 goto out_unlock; 600 601 ret = guc_log_map(log); 602 if (ret) 603 goto out_relay; 604 605 mutex_unlock(&log->relay.lock); 606 607 return 0; 608 609 out_relay: 610 guc_log_relay_destroy(log); 611 out_unlock: 612 mutex_unlock(&log->relay.lock); 613 614 return ret; 615 } 616 617 int intel_guc_log_relay_start(struct intel_guc_log *log) 618 { 619 if (log->relay.started) 620 return -EEXIST; 621 622 guc_log_enable_flush_events(log); 623 624 /* 625 * When GuC is logging without us relaying to userspace, we're ignoring 626 * the flush notification. This means that we need to unconditionally 627 * flush on relay enabling, since GuC only notifies us once. 628 */ 629 queue_work(system_highpri_wq, &log->relay.flush_work); 630 631 log->relay.started = true; 632 633 return 0; 634 } 635 636 void intel_guc_log_relay_flush(struct intel_guc_log *log) 637 { 638 struct intel_guc *guc = log_to_guc(log); 639 intel_wakeref_t wakeref; 640 641 if (!log->relay.started) 642 return; 643 644 /* 645 * Before initiating the forceful flush, wait for any pending/ongoing 646 * flush to complete otherwise forceful flush may not actually happen. 647 */ 648 flush_work(&log->relay.flush_work); 649 650 with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref) 651 guc_action_flush_log(guc); 652 653 /* GuC would have updated log buffer by now, so capture it */ 654 guc_log_capture_logs(log); 655 } 656 657 /* 658 * Stops the relay log. Called from intel_guc_log_relay_close(), so no 659 * possibility of race with start/flush since relay_write cannot race 660 * relay_close. 661 */ 662 static void guc_log_relay_stop(struct intel_guc_log *log) 663 { 664 struct intel_guc *guc = log_to_guc(log); 665 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 666 667 if (!log->relay.started) 668 return; 669 670 guc_log_disable_flush_events(log); 671 intel_synchronize_irq(i915); 672 673 flush_work(&log->relay.flush_work); 674 675 log->relay.started = false; 676 } 677 678 void intel_guc_log_relay_close(struct intel_guc_log *log) 679 { 680 guc_log_relay_stop(log); 681 682 mutex_lock(&log->relay.lock); 683 GEM_BUG_ON(!intel_guc_log_relay_created(log)); 684 guc_log_unmap(log); 685 guc_log_relay_destroy(log); 686 mutex_unlock(&log->relay.lock); 687 } 688 689 void intel_guc_log_handle_flush_event(struct intel_guc_log *log) 690 { 691 queue_work(system_highpri_wq, &log->relay.flush_work); 692 } 693