1 /*- 2 * Copyright (c) 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_shutdown.c 8.3 (Berkeley) 1/21/94 39 * $FreeBSD: src/sys/kern/kern_shutdown.c,v 1.72.2.12 2002/02/21 19:15:10 dillon Exp $ 40 * $DragonFly: src/sys/kern/kern_shutdown.c,v 1.55 2007/05/27 23:28:29 dillon Exp $ 41 */ 42 43 #include "opt_ddb.h" 44 #include "opt_ddb_trace.h" 45 #include "opt_hw_wdog.h" 46 #include "opt_panic.h" 47 #include "opt_show_busybufs.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/eventhandler.h> 52 #include <sys/buf.h> 53 #include <sys/diskslice.h> 54 #include <sys/reboot.h> 55 #include <sys/proc.h> 56 #include <sys/fcntl.h> /* FREAD */ 57 #include <sys/stat.h> /* S_IFCHR */ 58 #include <sys/vnode.h> 59 #include <sys/kernel.h> 60 #include <sys/kthread.h> 61 #include <sys/malloc.h> 62 #include <sys/mount.h> 63 #include <sys/queue.h> 64 #include <sys/sysctl.h> 65 #include <sys/vkernel.h> 66 #include <sys/conf.h> 67 #include <sys/sysproto.h> 68 #include <sys/device.h> 69 #include <sys/cons.h> 70 #include <sys/shm.h> 71 #include <sys/kern_syscall.h> 72 #include <vm/vm_map.h> 73 #include <vm/pmap.h> 74 75 #include <sys/thread2.h> 76 #include <sys/buf2.h> 77 78 #include <machine/pcb.h> 79 #include <machine/clock.h> 80 #include <machine/md_var.h> 81 #include <machine/smp.h> /* smp_active_mask, cpuid */ 82 #include <machine/vmparam.h> 83 84 #include <sys/signalvar.h> 85 86 #ifndef PANIC_REBOOT_WAIT_TIME 87 #define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */ 88 #endif 89 90 /* 91 * Note that stdarg.h and the ANSI style va_start macro is used for both 92 * ANSI and traditional C compilers. We use the machine version to stay 93 * within the confines of the kernel header files. 94 */ 95 #include <machine/stdarg.h> 96 97 #ifdef DDB 98 #ifdef DDB_UNATTENDED 99 int debugger_on_panic = 0; 100 #else 101 int debugger_on_panic = 1; 102 #endif 103 SYSCTL_INT(_debug, OID_AUTO, debugger_on_panic, CTLFLAG_RW, 104 &debugger_on_panic, 0, "Run debugger on kernel panic"); 105 106 extern void db_print_backtrace(void); 107 108 #ifdef DDB_TRACE 109 int trace_on_panic = 1; 110 #else 111 int trace_on_panic = 0; 112 #endif 113 SYSCTL_INT(_debug, OID_AUTO, trace_on_panic, CTLFLAG_RW, 114 &trace_on_panic, 0, "Print stack trace on kernel panic"); 115 #endif 116 117 static int sync_on_panic = 1; 118 SYSCTL_INT(_kern, OID_AUTO, sync_on_panic, CTLFLAG_RW, 119 &sync_on_panic, 0, "Do a sync before rebooting from a panic"); 120 121 SYSCTL_NODE(_kern, OID_AUTO, shutdown, CTLFLAG_RW, 0, "Shutdown environment"); 122 123 #ifdef HW_WDOG 124 /* 125 * If there is a hardware watchdog, point this at the function needed to 126 * hold it off. 127 * It's needed when the kernel needs to do some lengthy operations. 128 * e.g. in wd.c when dumping core.. It's most annoying to have 129 * your precious core-dump only half written because the wdog kicked in. 130 */ 131 watchdog_tickle_fn wdog_tickler = NULL; 132 #endif /* HW_WDOG */ 133 134 /* 135 * Variable panicstr contains argument to first call to panic; used as flag 136 * to indicate that the kernel has already called panic. 137 */ 138 const char *panicstr; 139 140 int dumping; /* system is dumping */ 141 #ifdef SMP 142 u_int panic_cpu_interlock; /* panic interlock */ 143 globaldata_t panic_cpu_gd; /* which cpu took the panic */ 144 #endif 145 146 int bootverbose = 0; /* note: assignment to force non-bss */ 147 int cold = 1; /* note: assignment to force non-bss */ 148 int dumplo; /* OBSOLETE - savecore compat */ 149 u_int64_t dumplo64; 150 151 static void boot (int) __dead2; 152 static void dumpsys (void); 153 static int setdumpdev (cdev_t dev); 154 static void poweroff_wait (void *, int); 155 static void print_uptime (void); 156 static void shutdown_halt (void *junk, int howto); 157 static void shutdown_panic (void *junk, int howto); 158 static void shutdown_reset (void *junk, int howto); 159 static int shutdown_busycount1(struct buf *bp, void *info); 160 static int shutdown_busycount2(struct buf *bp, void *info); 161 static void shutdown_cleanup_proc(struct proc *p); 162 163 /* register various local shutdown events */ 164 static void 165 shutdown_conf(void *unused) 166 { 167 EVENTHANDLER_REGISTER(shutdown_final, poweroff_wait, NULL, SHUTDOWN_PRI_FIRST); 168 EVENTHANDLER_REGISTER(shutdown_final, shutdown_halt, NULL, SHUTDOWN_PRI_LAST + 100); 169 EVENTHANDLER_REGISTER(shutdown_final, shutdown_panic, NULL, SHUTDOWN_PRI_LAST + 100); 170 EVENTHANDLER_REGISTER(shutdown_final, shutdown_reset, NULL, SHUTDOWN_PRI_LAST + 200); 171 } 172 173 SYSINIT(shutdown_conf, SI_BOOT2_MACHDEP, SI_ORDER_ANY, shutdown_conf, NULL) 174 175 /* ARGSUSED */ 176 177 /* 178 * The system call that results in a reboot 179 */ 180 int 181 sys_reboot(struct reboot_args *uap) 182 { 183 struct thread *td = curthread; 184 int error; 185 186 if ((error = suser(td))) 187 return (error); 188 189 boot(uap->opt); 190 return (0); 191 } 192 193 /* 194 * Called by events that want to shut down.. e.g <CTL><ALT><DEL> on a PC 195 */ 196 static int shutdown_howto = 0; 197 198 void 199 shutdown_nice(int howto) 200 { 201 shutdown_howto = howto; 202 203 /* Send a signal to init(8) and have it shutdown the world */ 204 if (initproc != NULL) { 205 ksignal(initproc, SIGINT); 206 } else { 207 /* No init(8) running, so simply reboot */ 208 boot(RB_NOSYNC); 209 } 210 return; 211 } 212 static int waittime = -1; 213 static struct thread *dumpthread; 214 static struct pcb dumppcb; 215 216 static void 217 print_uptime(void) 218 { 219 int f; 220 struct timespec ts; 221 222 getnanouptime(&ts); 223 kprintf("Uptime: "); 224 f = 0; 225 if (ts.tv_sec >= 86400) { 226 kprintf("%ldd", ts.tv_sec / 86400); 227 ts.tv_sec %= 86400; 228 f = 1; 229 } 230 if (f || ts.tv_sec >= 3600) { 231 kprintf("%ldh", ts.tv_sec / 3600); 232 ts.tv_sec %= 3600; 233 f = 1; 234 } 235 if (f || ts.tv_sec >= 60) { 236 kprintf("%ldm", ts.tv_sec / 60); 237 ts.tv_sec %= 60; 238 f = 1; 239 } 240 kprintf("%lds\n", ts.tv_sec); 241 } 242 243 /* 244 * Go through the rigmarole of shutting down.. 245 * this used to be in machdep.c but I'll be dammned if I could see 246 * anything machine dependant in it. 247 */ 248 static void 249 boot(int howto) 250 { 251 /* 252 * Get rid of any user scheduler baggage and then give 253 * us a high priority. 254 */ 255 if (curthread->td_release) 256 curthread->td_release(curthread); 257 lwkt_setpri_self(TDPRI_MAX); 258 259 /* collect extra flags that shutdown_nice might have set */ 260 howto |= shutdown_howto; 261 262 #ifdef SMP 263 /* 264 * We really want to shutdown on the BSP. Subsystems such as ACPI 265 * can't power-down the box otherwise. 266 */ 267 if (smp_active_mask > 1) { 268 kprintf("boot() called on cpu#%d\n", mycpu->gd_cpuid); 269 } 270 if (panicstr == NULL && mycpu->gd_cpuid != 0) { 271 kprintf("Switching to cpu #0 for shutdown\n"); 272 lwkt_setcpu_self(globaldata_find(0)); 273 } 274 #endif 275 /* 276 * Do any callouts that should be done BEFORE syncing the filesystems. 277 */ 278 EVENTHANDLER_INVOKE(shutdown_pre_sync, howto); 279 280 /* 281 * Try to get rid of any remaining FS references. The calling 282 * process, proc0, and init may still hold references. The 283 * VFS cache subsystem may still hold a root reference to root. 284 * 285 * XXX this needs work. We really need to SIGSTOP all remaining 286 * processes in order to avoid blowups due to proc0's filesystem 287 * references going away. For now just make sure that the init 288 * process is stopped. 289 */ 290 if (panicstr == NULL) { 291 shutdown_cleanup_proc(curproc); 292 shutdown_cleanup_proc(&proc0); 293 if (initproc) { 294 if (initproc != curproc) { 295 ksignal(initproc, SIGSTOP); 296 tsleep(boot, 0, "shutdn", hz / 20); 297 } 298 shutdown_cleanup_proc(initproc); 299 } 300 vfs_cache_setroot(NULL, NULL); 301 } 302 303 /* 304 * Now sync filesystems 305 */ 306 if (!cold && (howto & RB_NOSYNC) == 0 && waittime < 0) { 307 int iter, nbusy, pbusy; 308 309 waittime = 0; 310 kprintf("\nsyncing disks... "); 311 312 sys_sync(NULL); /* YYY was sync(&proc0, NULL). why proc0 ? */ 313 314 /* 315 * With soft updates, some buffers that are 316 * written will be remarked as dirty until other 317 * buffers are written. 318 */ 319 for (iter = pbusy = 0; iter < 20; iter++) { 320 nbusy = scan_all_buffers(shutdown_busycount1, NULL); 321 if (nbusy == 0) 322 break; 323 kprintf("%d ", nbusy); 324 if (nbusy < pbusy) 325 iter = 0; 326 pbusy = nbusy; 327 /* 328 * XXX: 329 * Process soft update work queue if buffers don't sync 330 * after 6 iterations by permitting the syncer to run. 331 */ 332 if (iter > 5 && bioops.io_sync) 333 (*bioops.io_sync)(NULL); 334 sys_sync(NULL); /* YYY was sync(&proc0, NULL). why proc0 ? */ 335 tsleep(boot, 0, "shutdn", hz * iter / 20 + 1); 336 } 337 kprintf("\n"); 338 /* 339 * Count only busy local buffers to prevent forcing 340 * a fsck if we're just a client of a wedged NFS server 341 */ 342 nbusy = scan_all_buffers(shutdown_busycount2, NULL); 343 if (nbusy) { 344 /* 345 * Failed to sync all blocks. Indicate this and don't 346 * unmount filesystems (thus forcing an fsck on reboot). 347 */ 348 kprintf("giving up on %d buffers\n", nbusy); 349 #ifdef DDB 350 Debugger("busy buffer problem"); 351 #endif /* DDB */ 352 tsleep(boot, 0, "shutdn", hz * 5 + 1); 353 } else { 354 kprintf("done\n"); 355 /* 356 * Unmount filesystems 357 */ 358 if (panicstr == NULL) 359 vfs_unmountall(); 360 } 361 tsleep(boot, 0, "shutdn", hz / 10 + 1); 362 } 363 364 print_uptime(); 365 366 /* 367 * Ok, now do things that assume all filesystem activity has 368 * been completed. 369 */ 370 EVENTHANDLER_INVOKE(shutdown_post_sync, howto); 371 crit_enter(); 372 if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold) 373 dumpsys(); 374 375 /* Now that we're going to really halt the system... */ 376 EVENTHANDLER_INVOKE(shutdown_final, howto); 377 378 for(;;) ; /* safety against shutdown_reset not working */ 379 /* NOTREACHED */ 380 } 381 382 static int 383 shutdown_busycount1(struct buf *bp, void *info) 384 { 385 if ((bp->b_flags & B_INVAL) == 0 && BUF_REFCNT(bp) > 0) 386 return(1); 387 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) 388 return (1); 389 return (0); 390 } 391 392 static int 393 shutdown_busycount2(struct buf *bp, void *info) 394 { 395 if (((bp->b_flags & B_INVAL) == 0 && BUF_REFCNT(bp)) || 396 ((bp->b_flags & (B_DELWRI|B_INVAL)) == B_DELWRI)) { 397 /* 398 * Only count buffers undergoing write I/O 399 * on the related vnode. 400 */ 401 if (bp->b_vp == NULL || 402 bp->b_vp->v_track_write.bk_active == 0) { 403 return (0); 404 } 405 #if defined(SHOW_BUSYBUFS) || defined(DIAGNOSTIC) 406 kprintf( 407 "%p dev:?, flags:%08x, loffset:%lld, doffset:%lld\n", 408 bp, 409 bp->b_flags, bp->b_loffset, 410 bp->b_bio2.bio_offset); 411 #endif 412 return(1); 413 } 414 return(0); 415 } 416 417 /* 418 * If the shutdown was a clean halt, behave accordingly. 419 */ 420 static void 421 shutdown_halt(void *junk, int howto) 422 { 423 if (howto & RB_HALT) { 424 kprintf("\n"); 425 kprintf("The operating system has halted.\n"); 426 #ifdef _KERNEL_VIRTUAL 427 cpu_halt(); 428 #else 429 kprintf("Please press any key to reboot.\n\n"); 430 switch (cngetc()) { 431 case -1: /* No console, just die */ 432 cpu_halt(); 433 /* NOTREACHED */ 434 default: 435 howto &= ~RB_HALT; 436 break; 437 } 438 #endif 439 } 440 } 441 442 /* 443 * Check to see if the system paniced, pause and then reboot 444 * according to the specified delay. 445 */ 446 static void 447 shutdown_panic(void *junk, int howto) 448 { 449 int loop; 450 451 if (howto & RB_DUMP) { 452 if (PANIC_REBOOT_WAIT_TIME != 0) { 453 if (PANIC_REBOOT_WAIT_TIME != -1) { 454 kprintf("Automatic reboot in %d seconds - " 455 "press a key on the console to abort\n", 456 PANIC_REBOOT_WAIT_TIME); 457 for (loop = PANIC_REBOOT_WAIT_TIME * 10; 458 loop > 0; --loop) { 459 DELAY(1000 * 100); /* 1/10th second */ 460 /* Did user type a key? */ 461 if (cncheckc() != -1) 462 break; 463 } 464 if (!loop) 465 return; 466 } 467 } else { /* zero time specified - reboot NOW */ 468 return; 469 } 470 kprintf("--> Press a key on the console to reboot,\n"); 471 kprintf("--> or switch off the system now.\n"); 472 cngetc(); 473 } 474 } 475 476 /* 477 * Everything done, now reset 478 */ 479 static void 480 shutdown_reset(void *junk, int howto) 481 { 482 kprintf("Rebooting...\n"); 483 DELAY(1000000); /* wait 1 sec for kprintf's to complete and be read */ 484 /* cpu_boot(howto); */ /* doesn't do anything at the moment */ 485 cpu_reset(); 486 /* NOTREACHED */ /* assuming reset worked */ 487 } 488 489 /* 490 * Try to remove FS references in the specified process. This function 491 * is used during shutdown 492 */ 493 static 494 void 495 shutdown_cleanup_proc(struct proc *p) 496 { 497 struct filedesc *fdp; 498 struct vmspace *vm; 499 500 if (p == NULL) 501 return; 502 if ((fdp = p->p_fd) != NULL) { 503 kern_closefrom(0); 504 if (fdp->fd_cdir) { 505 cache_drop(&fdp->fd_ncdir); 506 vrele(fdp->fd_cdir); 507 fdp->fd_cdir = NULL; 508 } 509 if (fdp->fd_rdir) { 510 cache_drop(&fdp->fd_nrdir); 511 vrele(fdp->fd_rdir); 512 fdp->fd_rdir = NULL; 513 } 514 if (fdp->fd_jdir) { 515 cache_drop(&fdp->fd_njdir); 516 vrele(fdp->fd_jdir); 517 fdp->fd_jdir = NULL; 518 } 519 } 520 if (p->p_vkernel) 521 vkernel_exit(p); 522 if (p->p_textvp) { 523 vrele(p->p_textvp); 524 p->p_textvp = NULL; 525 } 526 vm = p->p_vmspace; 527 if (vm != NULL) { 528 pmap_remove_pages(vmspace_pmap(vm), 529 VM_MIN_USER_ADDRESS, 530 VM_MAX_USER_ADDRESS); 531 vm_map_remove(&vm->vm_map, 532 VM_MIN_USER_ADDRESS, 533 VM_MAX_USER_ADDRESS); 534 } 535 } 536 537 /* 538 * Magic number for savecore 539 * 540 * exported (symorder) and used at least by savecore(8) 541 * 542 * Mark it as used so that gcc doesn't optimize it away. 543 */ 544 __attribute__((__used__)) 545 static u_long const dumpmag = 0x8fca0101UL; 546 547 static int dumpsize = 0; /* also for savecore */ 548 549 static int dodump = 1; 550 551 SYSCTL_INT(_machdep, OID_AUTO, do_dump, CTLFLAG_RW, &dodump, 0, 552 "Try to perform coredump on kernel panic"); 553 554 static int 555 setdumpdev(cdev_t dev) 556 { 557 struct partinfo pinfo; 558 u_int64_t newdumplo; 559 int error; 560 int doopen; 561 562 if (dev == NULL) { 563 dumpdev = dev; 564 return (0); 565 } 566 bzero(&pinfo, sizeof(pinfo)); 567 568 /* 569 * We have to open the device before we can perform ioctls on it, 570 * or the slice/label data may not be present. Device opens are 571 * usually tracked by specfs, but the dump device can be set in 572 * early boot and may not be open so this is somewhat of a hack. 573 */ 574 doopen = (dev->si_sysref.refcnt == 1); 575 if (doopen) { 576 error = dev_dopen(dev, FREAD, S_IFCHR, proc0.p_ucred); 577 if (error) 578 return (error); 579 } 580 error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0, proc0.p_ucred); 581 if (doopen) 582 dev_dclose(dev, FREAD, S_IFCHR); 583 if (error || pinfo.media_blocks == 0 || pinfo.media_blksize == 0) 584 return (ENXIO); 585 586 newdumplo = pinfo.media_blocks - 587 ((u_int64_t)Maxmem * PAGE_SIZE / DEV_BSIZE); 588 if ((int64_t)newdumplo < (int64_t)pinfo.skip_bsdlabel) 589 return (ENOSPC); 590 dumpdev = dev; 591 dumplo64 = newdumplo; 592 return (0); 593 } 594 595 596 /* ARGSUSED */ 597 static void dump_conf (void *dummy); 598 static void 599 dump_conf(void *dummy) 600 { 601 char *path; 602 cdev_t dev; 603 604 path = kmalloc(MNAMELEN, M_TEMP, M_WAITOK); 605 if (TUNABLE_STR_FETCH("dumpdev", path, MNAMELEN) != 0) { 606 dev = kgetdiskbyname(path); 607 if (dev != NULL) 608 dumpdev = dev; 609 } 610 kfree(path, M_TEMP); 611 if (setdumpdev(dumpdev) != 0) 612 dumpdev = NULL; 613 } 614 615 SYSINIT(dump_conf, SI_SUB_DUMP_CONF, SI_ORDER_FIRST, dump_conf, NULL) 616 617 static int 618 sysctl_kern_dumpdev(SYSCTL_HANDLER_ARGS) 619 { 620 int error; 621 udev_t ndumpdev; 622 623 ndumpdev = dev2udev(dumpdev); 624 error = sysctl_handle_opaque(oidp, &ndumpdev, sizeof ndumpdev, req); 625 if (error == 0 && req->newptr != NULL) 626 error = setdumpdev(udev2dev(ndumpdev, 0)); 627 return (error); 628 } 629 630 SYSCTL_PROC(_kern, KERN_DUMPDEV, dumpdev, CTLTYPE_OPAQUE|CTLFLAG_RW, 631 0, sizeof dumpdev, sysctl_kern_dumpdev, "T,udev_t", ""); 632 633 /* 634 * Doadump comes here after turning off memory management and 635 * getting on the dump stack, either when called above, or by 636 * the auto-restart code. 637 */ 638 static void 639 dumpsys(void) 640 { 641 int error; 642 643 savectx(&dumppcb); 644 dumpthread = curthread; 645 if (dumping++) { 646 kprintf("Dump already in progress, bailing...\n"); 647 return; 648 } 649 if (!dodump) 650 return; 651 if (dumpdev == NULL) 652 return; 653 dumpsize = Maxmem; 654 kprintf("\ndumping to dev %s, blockno %lld\n", 655 devtoname(dumpdev), dumplo64); 656 kprintf("dump "); 657 error = dev_ddump(dumpdev); 658 if (error == 0) { 659 kprintf("succeeded\n"); 660 return; 661 } 662 kprintf("failed, reason: "); 663 switch (error) { 664 case ENOSYS: 665 case ENODEV: 666 kprintf("device doesn't support a dump routine\n"); 667 break; 668 669 case ENXIO: 670 kprintf("device bad\n"); 671 break; 672 673 case EFAULT: 674 kprintf("device not ready\n"); 675 break; 676 677 case EINVAL: 678 kprintf("area improper\n"); 679 break; 680 681 case EIO: 682 kprintf("i/o error\n"); 683 break; 684 685 case EINTR: 686 kprintf("aborted from console\n"); 687 break; 688 689 default: 690 kprintf("unknown, error = %d\n", error); 691 break; 692 } 693 } 694 695 int 696 dumpstatus(vm_offset_t addr, off_t count) 697 { 698 int c; 699 700 if (addr % (1024 * 1024) == 0) { 701 #ifdef HW_WDOG 702 if (wdog_tickler) 703 (*wdog_tickler)(); 704 #endif 705 kprintf("%ld ", (long)(count / (1024 * 1024))); 706 } 707 708 if ((c = cncheckc()) == 0x03) 709 return -1; 710 else if (c != -1) 711 kprintf("[CTRL-C to abort] "); 712 713 return 0; 714 } 715 716 /* 717 * Panic is called on unresolvable fatal errors. It prints "panic: mesg", 718 * and then reboots. If we are called twice, then we avoid trying to sync 719 * the disks as this often leads to recursive panics. 720 */ 721 void 722 panic(const char *fmt, ...) 723 { 724 int bootopt, newpanic; 725 __va_list ap; 726 static char buf[256]; 727 728 #ifdef SMP 729 /* 730 * If a panic occurs on multiple cpus before the first is able to 731 * halt the other cpus, only one cpu is allowed to take the panic. 732 * Attempt to be verbose about this situation but if the kprintf() 733 * itself panics don't let us overrun the kernel stack. 734 * 735 * Be very nasty about descheduling our thread at the lowest 736 * level possible in an attempt to freeze the thread without 737 * inducing further panics. 738 * 739 * Bumping gd_trap_nesting_level will also bypass assertions in 740 * lwkt_switch() and allow us to switch away even if we are a 741 * FAST interrupt or IPI. 742 */ 743 if (atomic_poll_acquire_int(&panic_cpu_interlock)) { 744 panic_cpu_gd = mycpu; 745 } else if (panic_cpu_gd != mycpu) { 746 crit_enter(); 747 ++mycpu->gd_trap_nesting_level; 748 if (mycpu->gd_trap_nesting_level < 25) { 749 kprintf("SECONDARY PANIC ON CPU %d THREAD %p\n", 750 mycpu->gd_cpuid, curthread); 751 } 752 curthread->td_release = NULL; /* be a grinch */ 753 for (;;) { 754 lwkt_deschedule_self(curthread); 755 lwkt_switch(); 756 } 757 /* NOT REACHED */ 758 /* --mycpu->gd_trap_nesting_level */ 759 /* crit_exit() */ 760 } 761 #endif 762 bootopt = RB_AUTOBOOT | RB_DUMP; 763 if (sync_on_panic == 0) 764 bootopt |= RB_NOSYNC; 765 newpanic = 0; 766 if (panicstr) 767 bootopt |= RB_NOSYNC; 768 else { 769 panicstr = fmt; 770 newpanic = 1; 771 } 772 773 __va_start(ap, fmt); 774 kvsnprintf(buf, sizeof(buf), fmt, ap); 775 if (panicstr == fmt) 776 panicstr = buf; 777 __va_end(ap); 778 kprintf("panic: %s\n", buf); 779 #ifdef SMP 780 /* three separate prints in case of an unmapped page and trap */ 781 kprintf("mp_lock = %08x; ", mp_lock); 782 kprintf("cpuid = %d; ", mycpu->gd_cpuid); 783 kprintf("lapic.id = %08x\n", lapic.id); 784 #endif 785 786 #if defined(DDB) 787 if (newpanic && trace_on_panic) 788 db_print_backtrace(); 789 if (debugger_on_panic) 790 Debugger ("panic"); 791 #endif 792 boot(bootopt); 793 } 794 795 /* 796 * Support for poweroff delay. 797 */ 798 #ifndef POWEROFF_DELAY 799 # define POWEROFF_DELAY 5000 800 #endif 801 static int poweroff_delay = POWEROFF_DELAY; 802 803 SYSCTL_INT(_kern_shutdown, OID_AUTO, poweroff_delay, CTLFLAG_RW, 804 &poweroff_delay, 0, ""); 805 806 static void 807 poweroff_wait(void *junk, int howto) 808 { 809 if(!(howto & RB_POWEROFF) || poweroff_delay <= 0) 810 return; 811 DELAY(poweroff_delay * 1000); 812 } 813 814 /* 815 * Some system processes (e.g. syncer) need to be stopped at appropriate 816 * points in their main loops prior to a system shutdown, so that they 817 * won't interfere with the shutdown process (e.g. by holding a disk buf 818 * to cause sync to fail). For each of these system processes, register 819 * shutdown_kproc() as a handler for one of shutdown events. 820 */ 821 static int kproc_shutdown_wait = 60; 822 SYSCTL_INT(_kern_shutdown, OID_AUTO, kproc_shutdown_wait, CTLFLAG_RW, 823 &kproc_shutdown_wait, 0, ""); 824 825 void 826 shutdown_kproc(void *arg, int howto) 827 { 828 struct thread *td; 829 struct proc *p; 830 int error; 831 832 if (panicstr) 833 return; 834 835 td = (struct thread *)arg; 836 if ((p = td->td_proc) != NULL) { 837 kprintf("Waiting (max %d seconds) for system process `%s' to stop...", 838 kproc_shutdown_wait, p->p_comm); 839 } else { 840 kprintf("Waiting (max %d seconds) for system thread %s to stop...", 841 kproc_shutdown_wait, td->td_comm); 842 } 843 error = suspend_kproc(td, kproc_shutdown_wait * hz); 844 845 if (error == EWOULDBLOCK) 846 kprintf("timed out\n"); 847 else 848 kprintf("stopped\n"); 849 } 850