1 /*- 2 * Copyright (c) 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_shutdown.c 8.3 (Berkeley) 1/21/94 39 * $FreeBSD: src/sys/kern/kern_shutdown.c,v 1.72.2.12 2002/02/21 19:15:10 dillon Exp $ 40 * $DragonFly: src/sys/kern/kern_shutdown.c,v 1.62 2008/01/05 13:23:48 corecode Exp $ 41 */ 42 43 #include "opt_ddb.h" 44 #include "opt_ddb_trace.h" 45 #include "opt_hw_wdog.h" 46 #include "opt_panic.h" 47 #include "opt_show_busybufs.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/eventhandler.h> 52 #include <sys/buf.h> 53 #include <sys/disk.h> 54 #include <sys/diskslice.h> 55 #include <sys/reboot.h> 56 #include <sys/proc.h> 57 #include <sys/priv.h> 58 #include <sys/fcntl.h> /* FREAD */ 59 #include <sys/stat.h> /* S_IFCHR */ 60 #include <sys/vnode.h> 61 #include <sys/kernel.h> 62 #include <sys/kerneldump.h> 63 #include <sys/kthread.h> 64 #include <sys/malloc.h> 65 #include <sys/mount.h> 66 #include <sys/queue.h> 67 #include <sys/sysctl.h> 68 #include <sys/vkernel.h> 69 #include <sys/conf.h> 70 #include <sys/sysproto.h> 71 #include <sys/device.h> 72 #include <sys/cons.h> 73 #include <sys/shm.h> 74 #include <sys/kerneldump.h> 75 #include <sys/kern_syscall.h> 76 #include <vm/vm_map.h> 77 #include <vm/pmap.h> 78 79 #include <sys/thread2.h> 80 #include <sys/buf2.h> 81 82 #include <machine/clock.h> 83 #include <machine/md_var.h> 84 #include <machine/smp.h> /* smp_active_mask, cpuid */ 85 #include <machine/vmparam.h> 86 #include <machine/thread.h> 87 88 #include <sys/signalvar.h> 89 90 #include <sys/wdog.h> 91 #include <dev/misc/gpio/gpio.h> 92 93 #ifndef PANIC_REBOOT_WAIT_TIME 94 #define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */ 95 #endif 96 97 /* 98 * Note that stdarg.h and the ANSI style va_start macro is used for both 99 * ANSI and traditional C compilers. We use the machine version to stay 100 * within the confines of the kernel header files. 101 */ 102 #include <machine/stdarg.h> 103 104 #ifdef DDB 105 #include <ddb/ddb.h> 106 #ifdef DDB_UNATTENDED 107 int debugger_on_panic = 0; 108 #else 109 int debugger_on_panic = 1; 110 #endif 111 SYSCTL_INT(_debug, OID_AUTO, debugger_on_panic, CTLFLAG_RW, 112 &debugger_on_panic, 0, "Run debugger on kernel panic"); 113 114 #ifdef DDB_TRACE 115 int trace_on_panic = 1; 116 #else 117 int trace_on_panic = 0; 118 #endif 119 SYSCTL_INT(_debug, OID_AUTO, trace_on_panic, CTLFLAG_RW, 120 &trace_on_panic, 0, "Print stack trace on kernel panic"); 121 #endif 122 123 static int sync_on_panic = 0; 124 SYSCTL_INT(_kern, OID_AUTO, sync_on_panic, CTLFLAG_RW, 125 &sync_on_panic, 0, "Do a sync before rebooting from a panic"); 126 127 SYSCTL_NODE(_kern, OID_AUTO, shutdown, CTLFLAG_RW, 0, "Shutdown environment"); 128 129 #ifdef HW_WDOG 130 /* 131 * If there is a hardware watchdog, point this at the function needed to 132 * hold it off. 133 * It's needed when the kernel needs to do some lengthy operations. 134 * e.g. in wd.c when dumping core.. It's most annoying to have 135 * your precious core-dump only half written because the wdog kicked in. 136 */ 137 watchdog_tickle_fn wdog_tickler = NULL; 138 #endif /* HW_WDOG */ 139 140 /* 141 * Variable panicstr contains argument to first call to panic; used as flag 142 * to indicate that the kernel has already called panic. 143 */ 144 const char *panicstr; 145 146 int dumping; /* system is dumping */ 147 static struct dumperinfo dumper; /* selected dumper */ 148 149 #ifdef SMP 150 u_int panic_cpu_interlock; /* panic interlock */ 151 globaldata_t panic_cpu_gd; /* which cpu took the panic */ 152 #endif 153 154 int bootverbose = 0; /* note: assignment to force non-bss */ 155 SYSCTL_INT(_debug, OID_AUTO, bootverbose, CTLFLAG_RW, 156 &bootverbose, 0, "Verbose kernel messages"); 157 158 int cold = 1; /* note: assignment to force non-bss */ 159 int dumplo; /* OBSOLETE - savecore compat */ 160 u_int64_t dumplo64; 161 162 static void boot (int) __dead2; 163 static int setdumpdev (cdev_t dev); 164 static void poweroff_wait (void *, int); 165 static void print_uptime (void); 166 static void shutdown_halt (void *junk, int howto); 167 static void shutdown_panic (void *junk, int howto); 168 static void shutdown_reset (void *junk, int howto); 169 static int shutdown_busycount1(struct buf *bp, void *info); 170 static int shutdown_busycount2(struct buf *bp, void *info); 171 static void shutdown_cleanup_proc(struct proc *p); 172 173 /* register various local shutdown events */ 174 static void 175 shutdown_conf(void *unused) 176 { 177 EVENTHANDLER_REGISTER(shutdown_final, poweroff_wait, NULL, SHUTDOWN_PRI_FIRST); 178 EVENTHANDLER_REGISTER(shutdown_final, shutdown_halt, NULL, SHUTDOWN_PRI_LAST + 100); 179 EVENTHANDLER_REGISTER(shutdown_final, shutdown_panic, NULL, SHUTDOWN_PRI_LAST + 100); 180 EVENTHANDLER_REGISTER(shutdown_final, shutdown_reset, NULL, SHUTDOWN_PRI_LAST + 200); 181 } 182 183 SYSINIT(shutdown_conf, SI_BOOT2_MACHDEP, SI_ORDER_ANY, shutdown_conf, NULL) 184 185 /* ARGSUSED */ 186 187 /* 188 * The system call that results in a reboot 189 */ 190 int 191 sys_reboot(struct reboot_args *uap) 192 { 193 struct thread *td = curthread; 194 int error; 195 196 if ((error = priv_check(td, PRIV_REBOOT))) 197 return (error); 198 199 boot(uap->opt); 200 return (0); 201 } 202 203 /* 204 * Called by events that want to shut down.. e.g <CTL><ALT><DEL> on a PC 205 */ 206 static int shutdown_howto = 0; 207 208 void 209 shutdown_nice(int howto) 210 { 211 shutdown_howto = howto; 212 213 /* Send a signal to init(8) and have it shutdown the world */ 214 if (initproc != NULL) { 215 ksignal(initproc, SIGINT); 216 } else { 217 /* No init(8) running, so simply reboot */ 218 boot(RB_NOSYNC); 219 } 220 return; 221 } 222 static int waittime = -1; 223 struct pcb dumppcb; 224 struct thread *dumpthread; 225 226 static void 227 print_uptime(void) 228 { 229 int f; 230 struct timespec ts; 231 232 getnanouptime(&ts); 233 kprintf("Uptime: "); 234 f = 0; 235 if (ts.tv_sec >= 86400) { 236 kprintf("%ldd", ts.tv_sec / 86400); 237 ts.tv_sec %= 86400; 238 f = 1; 239 } 240 if (f || ts.tv_sec >= 3600) { 241 kprintf("%ldh", ts.tv_sec / 3600); 242 ts.tv_sec %= 3600; 243 f = 1; 244 } 245 if (f || ts.tv_sec >= 60) { 246 kprintf("%ldm", ts.tv_sec / 60); 247 ts.tv_sec %= 60; 248 f = 1; 249 } 250 kprintf("%lds\n", ts.tv_sec); 251 } 252 253 /* 254 * Go through the rigmarole of shutting down.. 255 * this used to be in machdep.c but I'll be dammned if I could see 256 * anything machine dependant in it. 257 */ 258 static void 259 boot(int howto) 260 { 261 /* 262 * Get rid of any user scheduler baggage and then give 263 * us a high priority. 264 */ 265 if (curthread->td_release) 266 curthread->td_release(curthread); 267 lwkt_setpri_self(TDPRI_MAX); 268 269 /* collect extra flags that shutdown_nice might have set */ 270 howto |= shutdown_howto; 271 272 #ifdef SMP 273 /* 274 * We really want to shutdown on the BSP. Subsystems such as ACPI 275 * can't power-down the box otherwise. 276 */ 277 if (smp_active_mask > 1) { 278 kprintf("boot() called on cpu#%d\n", mycpu->gd_cpuid); 279 } 280 if (panicstr == NULL && mycpu->gd_cpuid != 0) { 281 kprintf("Switching to cpu #0 for shutdown\n"); 282 lwkt_setcpu_self(globaldata_find(0)); 283 } 284 #endif 285 /* 286 * Do any callouts that should be done BEFORE syncing the filesystems. 287 */ 288 EVENTHANDLER_INVOKE(shutdown_pre_sync, howto); 289 290 /* 291 * Try to get rid of any remaining FS references. The calling 292 * process, proc0, and init may still hold references. The 293 * VFS cache subsystem may still hold a root reference to root. 294 * 295 * XXX this needs work. We really need to SIGSTOP all remaining 296 * processes in order to avoid blowups due to proc0's filesystem 297 * references going away. For now just make sure that the init 298 * process is stopped. 299 */ 300 if (panicstr == NULL) { 301 shutdown_cleanup_proc(curproc); 302 shutdown_cleanup_proc(&proc0); 303 if (initproc) { 304 if (initproc != curproc) { 305 ksignal(initproc, SIGSTOP); 306 tsleep(boot, 0, "shutdn", hz / 20); 307 } 308 shutdown_cleanup_proc(initproc); 309 } 310 vfs_cache_setroot(NULL, NULL); 311 } 312 313 /* 314 * Now sync filesystems 315 */ 316 if (!cold && (howto & RB_NOSYNC) == 0 && waittime < 0) { 317 int iter, nbusy, pbusy; 318 319 waittime = 0; 320 kprintf("\nsyncing disks... "); 321 322 sys_sync(NULL); /* YYY was sync(&proc0, NULL). why proc0 ? */ 323 324 /* 325 * With soft updates, some buffers that are 326 * written will be remarked as dirty until other 327 * buffers are written. 328 */ 329 for (iter = pbusy = 0; iter < 20; iter++) { 330 nbusy = scan_all_buffers(shutdown_busycount1, NULL); 331 if (nbusy == 0) 332 break; 333 kprintf("%d ", nbusy); 334 if (nbusy < pbusy) 335 iter = 0; 336 pbusy = nbusy; 337 /* 338 * XXX: 339 * Process soft update work queue if buffers don't sync 340 * after 6 iterations by permitting the syncer to run. 341 */ 342 if (iter > 5) 343 bio_ops_sync(NULL); 344 345 sys_sync(NULL); /* YYY was sync(&proc0, NULL). why proc0 ? */ 346 tsleep(boot, 0, "shutdn", hz * iter / 20 + 1); 347 } 348 kprintf("\n"); 349 /* 350 * Count only busy local buffers to prevent forcing 351 * a fsck if we're just a client of a wedged NFS server 352 */ 353 nbusy = scan_all_buffers(shutdown_busycount2, NULL); 354 if (nbusy) { 355 /* 356 * Failed to sync all blocks. Indicate this and don't 357 * unmount filesystems (thus forcing an fsck on reboot). 358 */ 359 kprintf("giving up on %d buffers\n", nbusy); 360 #ifdef DDB 361 Debugger("busy buffer problem"); 362 #endif /* DDB */ 363 tsleep(boot, 0, "shutdn", hz * 5 + 1); 364 } else { 365 kprintf("done\n"); 366 /* 367 * Unmount filesystems 368 */ 369 if (panicstr == NULL) 370 vfs_unmountall(); 371 } 372 tsleep(boot, 0, "shutdn", hz / 10 + 1); 373 } 374 375 print_uptime(); 376 377 /* 378 * Dump before doing post_sync shutdown ops 379 */ 380 crit_enter(); 381 if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold && 382 dumper.dumper != NULL && !dumping) { 383 dumping++; 384 dumpsys(&dumper); 385 } 386 387 /* 388 * Ok, now do things that assume all filesystem activity has 389 * been completed. This will also call the device shutdown 390 * methods. 391 */ 392 EVENTHANDLER_INVOKE(shutdown_post_sync, howto); 393 394 /* Now that we're going to really halt the system... */ 395 EVENTHANDLER_INVOKE(shutdown_final, howto); 396 397 for(;;) ; /* safety against shutdown_reset not working */ 398 /* NOTREACHED */ 399 } 400 401 static int 402 shutdown_busycount1(struct buf *bp, void *info) 403 { 404 if ((bp->b_flags & B_INVAL) == 0 && BUF_REFCNT(bp) > 0) 405 return(1); 406 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) 407 return (1); 408 return (0); 409 } 410 411 static int 412 shutdown_busycount2(struct buf *bp, void *info) 413 { 414 if (((bp->b_flags & B_INVAL) == 0 && BUF_REFCNT(bp)) || 415 ((bp->b_flags & (B_DELWRI|B_INVAL)) == B_DELWRI)) { 416 /* 417 * Only count buffers undergoing write I/O 418 * on the related vnode. 419 */ 420 if (bp->b_vp == NULL || 421 bio_track_active(&bp->b_vp->v_track_write) == 0) { 422 return (0); 423 } 424 #if defined(SHOW_BUSYBUFS) || defined(DIAGNOSTIC) 425 kprintf( 426 "%p dev:?, flags:%08x, loffset:%lld, doffset:%lld\n", 427 bp, 428 bp->b_flags, bp->b_loffset, 429 bp->b_bio2.bio_offset); 430 #endif 431 return(1); 432 } 433 return(0); 434 } 435 436 /* 437 * If the shutdown was a clean halt, behave accordingly. 438 */ 439 static void 440 shutdown_halt(void *junk, int howto) 441 { 442 if (howto & RB_HALT) { 443 kprintf("\n"); 444 kprintf("The operating system has halted.\n"); 445 #ifdef _KERNEL_VIRTUAL 446 cpu_halt(); 447 #else 448 kprintf("Please press any key to reboot.\n\n"); 449 switch (cngetc()) { 450 case -1: /* No console, just die */ 451 cpu_halt(); 452 /* NOTREACHED */ 453 default: 454 howto &= ~RB_HALT; 455 break; 456 } 457 #endif 458 } 459 } 460 461 /* 462 * Check to see if the system paniced, pause and then reboot 463 * according to the specified delay. 464 */ 465 static void 466 shutdown_panic(void *junk, int howto) 467 { 468 int loop; 469 470 if (howto & RB_DUMP) { 471 if (PANIC_REBOOT_WAIT_TIME != 0) { 472 if (PANIC_REBOOT_WAIT_TIME != -1) { 473 kprintf("Automatic reboot in %d seconds - " 474 "press a key on the console to abort\n", 475 PANIC_REBOOT_WAIT_TIME); 476 for (loop = PANIC_REBOOT_WAIT_TIME * 10; 477 loop > 0; --loop) { 478 DELAY(1000 * 100); /* 1/10th second */ 479 /* Did user type a key? */ 480 if (cncheckc() != -1) 481 break; 482 } 483 if (!loop) 484 return; 485 } 486 } else { /* zero time specified - reboot NOW */ 487 return; 488 } 489 kprintf("--> Press a key on the console to reboot,\n"); 490 kprintf("--> or switch off the system now.\n"); 491 cngetc(); 492 } 493 } 494 495 /* 496 * Everything done, now reset 497 */ 498 static void 499 shutdown_reset(void *junk, int howto) 500 { 501 kprintf("Rebooting...\n"); 502 DELAY(1000000); /* wait 1 sec for kprintf's to complete and be read */ 503 /* cpu_boot(howto); */ /* doesn't do anything at the moment */ 504 cpu_reset(); 505 /* NOTREACHED */ /* assuming reset worked */ 506 } 507 508 /* 509 * Try to remove FS references in the specified process. This function 510 * is used during shutdown 511 */ 512 static 513 void 514 shutdown_cleanup_proc(struct proc *p) 515 { 516 struct filedesc *fdp; 517 struct vmspace *vm; 518 519 if (p == NULL) 520 return; 521 if ((fdp = p->p_fd) != NULL) { 522 kern_closefrom(0); 523 if (fdp->fd_cdir) { 524 cache_drop(&fdp->fd_ncdir); 525 vrele(fdp->fd_cdir); 526 fdp->fd_cdir = NULL; 527 } 528 if (fdp->fd_rdir) { 529 cache_drop(&fdp->fd_nrdir); 530 vrele(fdp->fd_rdir); 531 fdp->fd_rdir = NULL; 532 } 533 if (fdp->fd_jdir) { 534 cache_drop(&fdp->fd_njdir); 535 vrele(fdp->fd_jdir); 536 fdp->fd_jdir = NULL; 537 } 538 } 539 if (p->p_vkernel) 540 vkernel_exit(p); 541 if (p->p_textvp) { 542 vrele(p->p_textvp); 543 p->p_textvp = NULL; 544 } 545 vm = p->p_vmspace; 546 if (vm != NULL) { 547 pmap_remove_pages(vmspace_pmap(vm), 548 VM_MIN_USER_ADDRESS, 549 VM_MAX_USER_ADDRESS); 550 vm_map_remove(&vm->vm_map, 551 VM_MIN_USER_ADDRESS, 552 VM_MAX_USER_ADDRESS); 553 } 554 } 555 556 /* 557 * Magic number for savecore 558 * 559 * exported (symorder) and used at least by savecore(8) 560 * 561 * Mark it as used so that gcc doesn't optimize it away. 562 */ 563 __attribute__((__used__)) 564 static u_long const dumpmag = 0x8fca0101UL; 565 566 __attribute__((__used__)) 567 static int dumpsize = 0; /* also for savecore */ 568 569 static int dodump = 1; 570 571 SYSCTL_INT(_machdep, OID_AUTO, do_dump, CTLFLAG_RW, &dodump, 0, 572 "Try to perform coredump on kernel panic"); 573 574 void 575 mkdumpheader(struct kerneldumpheader *kdh, char *magic, uint32_t archver, 576 uint64_t dumplen, uint32_t blksz) 577 { 578 bzero(kdh, sizeof(*kdh)); 579 strncpy(kdh->magic, magic, sizeof(kdh->magic)); 580 strncpy(kdh->architecture, MACHINE_ARCH, sizeof(kdh->architecture)); 581 kdh->version = htod32(KERNELDUMPVERSION); 582 kdh->architectureversion = htod32(archver); 583 kdh->dumplength = htod64(dumplen); 584 kdh->dumptime = htod64(time_second); 585 kdh->blocksize = htod32(blksz); 586 strncpy(kdh->hostname, hostname, sizeof(kdh->hostname)); 587 strncpy(kdh->versionstring, version, sizeof(kdh->versionstring)); 588 if (panicstr != NULL) 589 strncpy(kdh->panicstring, panicstr, sizeof(kdh->panicstring)); 590 kdh->parity = kerneldump_parity(kdh); 591 } 592 593 static int 594 setdumpdev(cdev_t dev) 595 { 596 int error; 597 int doopen; 598 599 if (dev == NULL) { 600 disk_dumpconf(NULL, 0/*off*/); 601 return (0); 602 } 603 604 /* 605 * We have to open the device before we can perform ioctls on it, 606 * or the slice/label data may not be present. Device opens are 607 * usually tracked by specfs, but the dump device can be set in 608 * early boot and may not be open so this is somewhat of a hack. 609 */ 610 doopen = (dev->si_sysref.refcnt == 1); 611 if (doopen) { 612 error = dev_dopen(dev, FREAD, S_IFCHR, proc0.p_ucred); 613 if (error) 614 return (error); 615 } 616 error = disk_dumpconf(dev, 1/*on*/); 617 618 return error; 619 } 620 621 /* ARGSUSED */ 622 static void dump_conf (void *dummy); 623 static void 624 dump_conf(void *dummy) 625 { 626 char *path; 627 cdev_t dev; 628 629 path = kmalloc(MNAMELEN, M_TEMP, M_WAITOK); 630 if (TUNABLE_STR_FETCH("dumpdev", path, MNAMELEN) != 0) { 631 dev = kgetdiskbyname(path); 632 if (dev != NULL) 633 dumpdev = dev; 634 } 635 kfree(path, M_TEMP); 636 if (setdumpdev(dumpdev) != 0) 637 dumpdev = NULL; 638 } 639 640 SYSINIT(dump_conf, SI_SUB_DUMP_CONF, SI_ORDER_FIRST, dump_conf, NULL) 641 642 static int 643 sysctl_kern_dumpdev(SYSCTL_HANDLER_ARGS) 644 { 645 int error; 646 udev_t ndumpdev; 647 648 ndumpdev = dev2udev(dumpdev); 649 error = sysctl_handle_opaque(oidp, &ndumpdev, sizeof ndumpdev, req); 650 if (error == 0 && req->newptr != NULL) 651 error = setdumpdev(udev2dev(ndumpdev, 0)); 652 return (error); 653 } 654 655 SYSCTL_PROC(_kern, KERN_DUMPDEV, dumpdev, CTLTYPE_OPAQUE|CTLFLAG_RW, 656 0, sizeof dumpdev, sysctl_kern_dumpdev, "T,udev_t", ""); 657 658 /* 659 * Panic is called on unresolvable fatal errors. It prints "panic: mesg", 660 * and then reboots. If we are called twice, then we avoid trying to sync 661 * the disks as this often leads to recursive panics. 662 */ 663 void 664 panic(const char *fmt, ...) 665 { 666 int bootopt, newpanic; 667 __va_list ap; 668 static char buf[256]; 669 670 #ifdef SMP 671 /* 672 * If a panic occurs on multiple cpus before the first is able to 673 * halt the other cpus, only one cpu is allowed to take the panic. 674 * Attempt to be verbose about this situation but if the kprintf() 675 * itself panics don't let us overrun the kernel stack. 676 * 677 * Be very nasty about descheduling our thread at the lowest 678 * level possible in an attempt to freeze the thread without 679 * inducing further panics. 680 * 681 * Bumping gd_trap_nesting_level will also bypass assertions in 682 * lwkt_switch() and allow us to switch away even if we are a 683 * FAST interrupt or IPI. 684 */ 685 if (atomic_poll_acquire_int(&panic_cpu_interlock)) { 686 panic_cpu_gd = mycpu; 687 } else if (panic_cpu_gd != mycpu) { 688 crit_enter(); 689 ++mycpu->gd_trap_nesting_level; 690 if (mycpu->gd_trap_nesting_level < 25) { 691 kprintf("SECONDARY PANIC ON CPU %d THREAD %p\n", 692 mycpu->gd_cpuid, curthread); 693 } 694 curthread->td_release = NULL; /* be a grinch */ 695 for (;;) { 696 lwkt_deschedule_self(curthread); 697 lwkt_switch(); 698 } 699 /* NOT REACHED */ 700 /* --mycpu->gd_trap_nesting_level */ 701 /* crit_exit() */ 702 } 703 #endif 704 bootopt = RB_AUTOBOOT | RB_DUMP; 705 if (sync_on_panic == 0) 706 bootopt |= RB_NOSYNC; 707 newpanic = 0; 708 if (panicstr) 709 bootopt |= RB_NOSYNC; 710 else { 711 panicstr = fmt; 712 newpanic = 1; 713 } 714 715 __va_start(ap, fmt); 716 kvsnprintf(buf, sizeof(buf), fmt, ap); 717 if (panicstr == fmt) 718 panicstr = buf; 719 __va_end(ap); 720 kprintf("panic: %s\n", buf); 721 #ifdef SMP 722 /* two separate prints in case of an unmapped page and trap */ 723 kprintf("mp_lock = %08x; ", mp_lock); 724 kprintf("cpuid = %d\n", mycpu->gd_cpuid); 725 #endif 726 727 #if (NGPIO > 0) && defined(ERROR_LED_ON_PANIC) 728 led_switch("error", 1); 729 #endif 730 731 #if defined(WDOG_DISABLE_ON_PANIC) && defined(WATCHDOG_ENABLE) 732 wdog_disable(); 733 #endif 734 735 #if defined(DDB) 736 if (newpanic && trace_on_panic) 737 print_backtrace(); 738 if (debugger_on_panic) 739 Debugger("panic"); 740 #endif 741 boot(bootopt); 742 } 743 744 /* 745 * Support for poweroff delay. 746 */ 747 #ifndef POWEROFF_DELAY 748 # define POWEROFF_DELAY 5000 749 #endif 750 static int poweroff_delay = POWEROFF_DELAY; 751 752 SYSCTL_INT(_kern_shutdown, OID_AUTO, poweroff_delay, CTLFLAG_RW, 753 &poweroff_delay, 0, ""); 754 755 static void 756 poweroff_wait(void *junk, int howto) 757 { 758 if(!(howto & RB_POWEROFF) || poweroff_delay <= 0) 759 return; 760 DELAY(poweroff_delay * 1000); 761 } 762 763 /* 764 * Some system processes (e.g. syncer) need to be stopped at appropriate 765 * points in their main loops prior to a system shutdown, so that they 766 * won't interfere with the shutdown process (e.g. by holding a disk buf 767 * to cause sync to fail). For each of these system processes, register 768 * shutdown_kproc() as a handler for one of shutdown events. 769 */ 770 static int kproc_shutdown_wait = 60; 771 SYSCTL_INT(_kern_shutdown, OID_AUTO, kproc_shutdown_wait, CTLFLAG_RW, 772 &kproc_shutdown_wait, 0, ""); 773 774 void 775 shutdown_kproc(void *arg, int howto) 776 { 777 struct thread *td; 778 struct proc *p; 779 int error; 780 781 if (panicstr) 782 return; 783 784 td = (struct thread *)arg; 785 if ((p = td->td_proc) != NULL) { 786 kprintf("Waiting (max %d seconds) for system process `%s' to stop...", 787 kproc_shutdown_wait, p->p_comm); 788 } else { 789 kprintf("Waiting (max %d seconds) for system thread %s to stop...", 790 kproc_shutdown_wait, td->td_comm); 791 } 792 error = suspend_kproc(td, kproc_shutdown_wait * hz); 793 794 if (error == EWOULDBLOCK) 795 kprintf("timed out\n"); 796 else 797 kprintf("stopped\n"); 798 } 799 800 /* Registration of dumpers */ 801 int 802 set_dumper(struct dumperinfo *di) 803 { 804 if (di == NULL) { 805 bzero(&dumper, sizeof(dumper)); 806 return 0; 807 } 808 809 if (dumper.dumper != NULL) 810 return (EBUSY); 811 812 dumper = *di; 813 return 0; 814 } 815 816 #if defined (_KERNEL_VIRTUAL) 817 /* VKERNELs don't support dumps */ 818 void 819 dumpsys(struct dumperinfo *di __unused) 820 { 821 kprintf("VKERNEL doesn't support dumps\n"); 822 } 823 #endif 824