xref: /dflybsd-src/sys/kern/kern_shutdown.c (revision e7302aa08274de307cd2c3345fc64c56dbe56e21)
1 /*-
2  * Copyright (c) 1986, 1988, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_shutdown.c	8.3 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/kern_shutdown.c,v 1.72.2.12 2002/02/21 19:15:10 dillon Exp $
40  * $DragonFly: src/sys/kern/kern_shutdown.c,v 1.62 2008/01/05 13:23:48 corecode Exp $
41  */
42 
43 #include "opt_ddb.h"
44 #include "opt_ddb_trace.h"
45 #include "opt_hw_wdog.h"
46 #include "opt_panic.h"
47 #include "opt_show_busybufs.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/eventhandler.h>
52 #include <sys/buf.h>
53 #include <sys/disk.h>
54 #include <sys/diskslice.h>
55 #include <sys/reboot.h>
56 #include <sys/proc.h>
57 #include <sys/priv.h>
58 #include <sys/fcntl.h>		/* FREAD	*/
59 #include <sys/stat.h>		/* S_IFCHR	*/
60 #include <sys/vnode.h>
61 #include <sys/kernel.h>
62 #include <sys/kerneldump.h>
63 #include <sys/kthread.h>
64 #include <sys/malloc.h>
65 #include <sys/mount.h>
66 #include <sys/queue.h>
67 #include <sys/sysctl.h>
68 #include <sys/vkernel.h>
69 #include <sys/conf.h>
70 #include <sys/sysproto.h>
71 #include <sys/device.h>
72 #include <sys/cons.h>
73 #include <sys/shm.h>
74 #include <sys/kerneldump.h>
75 #include <sys/kern_syscall.h>
76 #include <vm/vm_map.h>
77 #include <vm/pmap.h>
78 
79 #include <sys/thread2.h>
80 #include <sys/buf2.h>
81 #include <sys/mplock2.h>
82 
83 #include <machine/clock.h>
84 #include <machine/md_var.h>
85 #include <machine/smp.h>		/* smp_active_mask, cpuid */
86 #include <machine/vmparam.h>
87 #include <machine/thread.h>
88 
89 #include <sys/signalvar.h>
90 
91 #include <sys/wdog.h>
92 #include <dev/misc/gpio/gpio.h>
93 
94 #ifndef PANIC_REBOOT_WAIT_TIME
95 #define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */
96 #endif
97 
98 /*
99  * Note that stdarg.h and the ANSI style va_start macro is used for both
100  * ANSI and traditional C compilers.  We use the machine version to stay
101  * within the confines of the kernel header files.
102  */
103 #include <machine/stdarg.h>
104 
105 #ifdef DDB
106 #include <ddb/ddb.h>
107 #ifdef DDB_UNATTENDED
108 int debugger_on_panic = 0;
109 #else
110 int debugger_on_panic = 1;
111 #endif
112 SYSCTL_INT(_debug, OID_AUTO, debugger_on_panic, CTLFLAG_RW,
113 	&debugger_on_panic, 0, "Run debugger on kernel panic");
114 
115 #ifdef DDB_TRACE
116 int trace_on_panic = 1;
117 #else
118 int trace_on_panic = 0;
119 #endif
120 SYSCTL_INT(_debug, OID_AUTO, trace_on_panic, CTLFLAG_RW,
121 	&trace_on_panic, 0, "Print stack trace on kernel panic");
122 #endif
123 
124 static int sync_on_panic = 0;
125 SYSCTL_INT(_kern, OID_AUTO, sync_on_panic, CTLFLAG_RW,
126 	&sync_on_panic, 0, "Do a sync before rebooting from a panic");
127 
128 SYSCTL_NODE(_kern, OID_AUTO, shutdown, CTLFLAG_RW, 0, "Shutdown environment");
129 
130 #ifdef	HW_WDOG
131 /*
132  * If there is a hardware watchdog, point this at the function needed to
133  * hold it off.
134  * It's needed when the kernel needs to do some lengthy operations.
135  * e.g. in wd.c when dumping core.. It's most annoying to have
136  * your precious core-dump only half written because the wdog kicked in.
137  */
138 watchdog_tickle_fn wdog_tickler = NULL;
139 #endif	/* HW_WDOG */
140 
141 /*
142  * Variable panicstr contains argument to first call to panic; used as flag
143  * to indicate that the kernel has already called panic.
144  */
145 const char *panicstr;
146 
147 int dumping;				/* system is dumping */
148 static struct dumperinfo dumper;	/* selected dumper */
149 
150 #ifdef SMP
151 u_int panic_cpu_interlock;		/* panic interlock */
152 globaldata_t panic_cpu_gd;		/* which cpu took the panic */
153 #endif
154 
155 int bootverbose = 0;			/* note: assignment to force non-bss */
156 SYSCTL_INT(_debug, OID_AUTO, bootverbose, CTLFLAG_RW,
157 	   &bootverbose, 0, "Verbose kernel messages");
158 
159 int cold = 1;				/* note: assignment to force non-bss */
160 int dumplo;				/* OBSOLETE - savecore compat */
161 u_int64_t dumplo64;
162 
163 static void boot (int) __dead2;
164 static int setdumpdev (cdev_t dev);
165 static void poweroff_wait (void *, int);
166 static void print_uptime (void);
167 static void shutdown_halt (void *junk, int howto);
168 static void shutdown_panic (void *junk, int howto);
169 static void shutdown_reset (void *junk, int howto);
170 static int shutdown_busycount1(struct buf *bp, void *info);
171 static int shutdown_busycount2(struct buf *bp, void *info);
172 static void shutdown_cleanup_proc(struct proc *p);
173 
174 /* register various local shutdown events */
175 static void
176 shutdown_conf(void *unused)
177 {
178 	EVENTHANDLER_REGISTER(shutdown_final, poweroff_wait, NULL, SHUTDOWN_PRI_FIRST);
179 	EVENTHANDLER_REGISTER(shutdown_final, shutdown_halt, NULL, SHUTDOWN_PRI_LAST + 100);
180 	EVENTHANDLER_REGISTER(shutdown_final, shutdown_panic, NULL, SHUTDOWN_PRI_LAST + 100);
181 	EVENTHANDLER_REGISTER(shutdown_final, shutdown_reset, NULL, SHUTDOWN_PRI_LAST + 200);
182 }
183 
184 SYSINIT(shutdown_conf, SI_BOOT2_MACHDEP, SI_ORDER_ANY, shutdown_conf, NULL)
185 
186 /* ARGSUSED */
187 
188 /*
189  * The system call that results in a reboot
190  *
191  * MPALMOSTSAFE
192  */
193 int
194 sys_reboot(struct reboot_args *uap)
195 {
196 	struct thread *td = curthread;
197 	int error;
198 
199 	if ((error = priv_check(td, PRIV_REBOOT)))
200 		return (error);
201 
202 	get_mplock();
203 	boot(uap->opt);
204 	rel_mplock();
205 	return (0);
206 }
207 
208 /*
209  * Called by events that want to shut down.. e.g  <CTL><ALT><DEL> on a PC
210  */
211 static int shutdown_howto = 0;
212 
213 void
214 shutdown_nice(int howto)
215 {
216 	shutdown_howto = howto;
217 
218 	/* Send a signal to init(8) and have it shutdown the world */
219 	if (initproc != NULL) {
220 		ksignal(initproc, SIGINT);
221 	} else {
222 		/* No init(8) running, so simply reboot */
223 		boot(RB_NOSYNC);
224 	}
225 	return;
226 }
227 static int	waittime = -1;
228 struct pcb dumppcb;
229 struct thread *dumpthread;
230 
231 static void
232 print_uptime(void)
233 {
234 	int f;
235 	struct timespec ts;
236 
237 	getnanouptime(&ts);
238 	kprintf("Uptime: ");
239 	f = 0;
240 	if (ts.tv_sec >= 86400) {
241 		kprintf("%ldd", ts.tv_sec / 86400);
242 		ts.tv_sec %= 86400;
243 		f = 1;
244 	}
245 	if (f || ts.tv_sec >= 3600) {
246 		kprintf("%ldh", ts.tv_sec / 3600);
247 		ts.tv_sec %= 3600;
248 		f = 1;
249 	}
250 	if (f || ts.tv_sec >= 60) {
251 		kprintf("%ldm", ts.tv_sec / 60);
252 		ts.tv_sec %= 60;
253 		f = 1;
254 	}
255 	kprintf("%lds\n", ts.tv_sec);
256 }
257 
258 /*
259  *  Go through the rigmarole of shutting down..
260  * this used to be in machdep.c but I'll be dammned if I could see
261  * anything machine dependant in it.
262  */
263 static void
264 boot(int howto)
265 {
266 	/*
267 	 * Get rid of any user scheduler baggage and then give
268 	 * us a high priority.
269 	 */
270 	if (curthread->td_release)
271 		curthread->td_release(curthread);
272 	lwkt_setpri_self(TDPRI_MAX);
273 
274 	/* collect extra flags that shutdown_nice might have set */
275 	howto |= shutdown_howto;
276 
277 #ifdef SMP
278 	/*
279 	 * We really want to shutdown on the BSP.  Subsystems such as ACPI
280 	 * can't power-down the box otherwise.
281 	 */
282 	if (smp_active_mask > 1) {
283 		kprintf("boot() called on cpu#%d\n", mycpu->gd_cpuid);
284 	}
285 	if (panicstr == NULL && mycpu->gd_cpuid != 0) {
286 		kprintf("Switching to cpu #0 for shutdown\n");
287 		lwkt_setcpu_self(globaldata_find(0));
288 	}
289 #endif
290 	/*
291 	 * Do any callouts that should be done BEFORE syncing the filesystems.
292 	 */
293 	EVENTHANDLER_INVOKE(shutdown_pre_sync, howto);
294 
295 	/*
296 	 * Try to get rid of any remaining FS references.  The calling
297 	 * process, proc0, and init may still hold references.  The
298 	 * VFS cache subsystem may still hold a root reference to root.
299 	 *
300 	 * XXX this needs work.  We really need to SIGSTOP all remaining
301 	 * processes in order to avoid blowups due to proc0's filesystem
302 	 * references going away.  For now just make sure that the init
303 	 * process is stopped.
304 	 */
305 	if (panicstr == NULL) {
306 		shutdown_cleanup_proc(curproc);
307 		shutdown_cleanup_proc(&proc0);
308 		if (initproc) {
309 			if (initproc != curproc) {
310 				ksignal(initproc, SIGSTOP);
311 				tsleep(boot, 0, "shutdn", hz / 20);
312 			}
313 			shutdown_cleanup_proc(initproc);
314 		}
315 		vfs_cache_setroot(NULL, NULL);
316 	}
317 
318 	/*
319 	 * Now sync filesystems
320 	 */
321 	if (!cold && (howto & RB_NOSYNC) == 0 && waittime < 0) {
322 		int iter, nbusy, pbusy;
323 
324 		waittime = 0;
325 		kprintf("\nsyncing disks... ");
326 
327 		sys_sync(NULL);	/* YYY was sync(&proc0, NULL). why proc0 ? */
328 
329 		/*
330 		 * With soft updates, some buffers that are
331 		 * written will be remarked as dirty until other
332 		 * buffers are written.
333 		 */
334 		for (iter = pbusy = 0; iter < 20; iter++) {
335 			nbusy = scan_all_buffers(shutdown_busycount1, NULL);
336 			if (nbusy == 0)
337 				break;
338 			kprintf("%d ", nbusy);
339 			if (nbusy < pbusy)
340 				iter = 0;
341 			pbusy = nbusy;
342 			/*
343 			 * XXX:
344 			 * Process soft update work queue if buffers don't sync
345 			 * after 6 iterations by permitting the syncer to run.
346 			 */
347 			if (iter > 5)
348 				bio_ops_sync(NULL);
349 
350 			sys_sync(NULL); /* YYY was sync(&proc0, NULL). why proc0 ? */
351 			tsleep(boot, 0, "shutdn", hz * iter / 20 + 1);
352 		}
353 		kprintf("\n");
354 		/*
355 		 * Count only busy local buffers to prevent forcing
356 		 * a fsck if we're just a client of a wedged NFS server
357 		 */
358 		nbusy = scan_all_buffers(shutdown_busycount2, NULL);
359 		if (nbusy) {
360 			/*
361 			 * Failed to sync all blocks. Indicate this and don't
362 			 * unmount filesystems (thus forcing an fsck on reboot).
363 			 */
364 			kprintf("giving up on %d buffers\n", nbusy);
365 #ifdef DDB
366 			Debugger("busy buffer problem");
367 #endif /* DDB */
368 			tsleep(boot, 0, "shutdn", hz * 5 + 1);
369 		} else {
370 			kprintf("done\n");
371 			/*
372 			 * Unmount filesystems
373 			 */
374 			if (panicstr == NULL)
375 				vfs_unmountall();
376 		}
377 		tsleep(boot, 0, "shutdn", hz / 10 + 1);
378 	}
379 
380 	print_uptime();
381 
382 	/*
383 	 * Dump before doing post_sync shutdown ops
384 	 */
385 	crit_enter();
386 	if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold) {
387 		dumpsys();
388 	}
389 
390 	/*
391 	 * Ok, now do things that assume all filesystem activity has
392 	 * been completed.  This will also call the device shutdown
393 	 * methods.
394 	 */
395 	EVENTHANDLER_INVOKE(shutdown_post_sync, howto);
396 
397 	/* Now that we're going to really halt the system... */
398 	EVENTHANDLER_INVOKE(shutdown_final, howto);
399 
400 	for(;;) ;	/* safety against shutdown_reset not working */
401 	/* NOTREACHED */
402 }
403 
404 static int
405 shutdown_busycount1(struct buf *bp, void *info)
406 {
407 	if ((bp->b_flags & B_INVAL) == 0 && BUF_REFCNT(bp) > 0)
408 		return(1);
409 	if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI)
410 		return (1);
411 	return (0);
412 }
413 
414 static int
415 shutdown_busycount2(struct buf *bp, void *info)
416 {
417 	if (((bp->b_flags & B_INVAL) == 0 && BUF_REFCNT(bp)) ||
418 	    ((bp->b_flags & (B_DELWRI|B_INVAL)) == B_DELWRI)) {
419 		/*
420 		 * Only count buffers undergoing write I/O
421 		 * on the related vnode.
422 		 */
423 		if (bp->b_vp == NULL ||
424 		    bio_track_active(&bp->b_vp->v_track_write) == 0) {
425 			return (0);
426 		}
427 #if defined(SHOW_BUSYBUFS) || defined(DIAGNOSTIC)
428 		kprintf(
429 	    "%p dev:?, flags:%08x, loffset:%jd, doffset:%jd\n",
430 		    bp,
431 		    bp->b_flags, (intmax_t)bp->b_loffset,
432 		    (intmax_t)bp->b_bio2.bio_offset);
433 #endif
434 		return(1);
435 	}
436 	return(0);
437 }
438 
439 /*
440  * If the shutdown was a clean halt, behave accordingly.
441  */
442 static void
443 shutdown_halt(void *junk, int howto)
444 {
445 	if (howto & RB_HALT) {
446 		kprintf("\n");
447 		kprintf("The operating system has halted.\n");
448 #ifdef _KERNEL_VIRTUAL
449 		cpu_halt();
450 #else
451 		kprintf("Please press any key to reboot.\n\n");
452 		switch (cngetc()) {
453 		case -1:		/* No console, just die */
454 			cpu_halt();
455 			/* NOTREACHED */
456 		default:
457 			howto &= ~RB_HALT;
458 			break;
459 		}
460 #endif
461 	}
462 }
463 
464 /*
465  * Check to see if the system paniced, pause and then reboot
466  * according to the specified delay.
467  */
468 static void
469 shutdown_panic(void *junk, int howto)
470 {
471 	int loop;
472 
473 	if (howto & RB_DUMP) {
474 		if (PANIC_REBOOT_WAIT_TIME != 0) {
475 			if (PANIC_REBOOT_WAIT_TIME != -1) {
476 				kprintf("Automatic reboot in %d seconds - "
477 				       "press a key on the console to abort\n",
478 					PANIC_REBOOT_WAIT_TIME);
479 				for (loop = PANIC_REBOOT_WAIT_TIME * 10;
480 				     loop > 0; --loop) {
481 					DELAY(1000 * 100); /* 1/10th second */
482 					/* Did user type a key? */
483 					if (cncheckc() != -1)
484 						break;
485 				}
486 				if (!loop)
487 					return;
488 			}
489 		} else { /* zero time specified - reboot NOW */
490 			return;
491 		}
492 		kprintf("--> Press a key on the console to reboot,\n");
493 		kprintf("--> or switch off the system now.\n");
494 		cngetc();
495 	}
496 }
497 
498 /*
499  * Everything done, now reset
500  */
501 static void
502 shutdown_reset(void *junk, int howto)
503 {
504 	kprintf("Rebooting...\n");
505 	DELAY(1000000);	/* wait 1 sec for kprintf's to complete and be read */
506 	/* cpu_boot(howto); */ /* doesn't do anything at the moment */
507 	cpu_reset();
508 	/* NOTREACHED */ /* assuming reset worked */
509 }
510 
511 /*
512  * Try to remove FS references in the specified process.  This function
513  * is used during shutdown
514  */
515 static
516 void
517 shutdown_cleanup_proc(struct proc *p)
518 {
519 	struct filedesc *fdp;
520 	struct vmspace *vm;
521 
522 	if (p == NULL)
523 		return;
524 	if ((fdp = p->p_fd) != NULL) {
525 		kern_closefrom(0);
526 		if (fdp->fd_cdir) {
527 			cache_drop(&fdp->fd_ncdir);
528 			vrele(fdp->fd_cdir);
529 			fdp->fd_cdir = NULL;
530 		}
531 		if (fdp->fd_rdir) {
532 			cache_drop(&fdp->fd_nrdir);
533 			vrele(fdp->fd_rdir);
534 			fdp->fd_rdir = NULL;
535 		}
536 		if (fdp->fd_jdir) {
537 			cache_drop(&fdp->fd_njdir);
538 			vrele(fdp->fd_jdir);
539 			fdp->fd_jdir = NULL;
540 		}
541 	}
542 	if (p->p_vkernel)
543 		vkernel_exit(p);
544 	if (p->p_textvp) {
545 		vrele(p->p_textvp);
546 		p->p_textvp = NULL;
547 	}
548 	vm = p->p_vmspace;
549 	if (vm != NULL) {
550 		pmap_remove_pages(vmspace_pmap(vm),
551 				  VM_MIN_USER_ADDRESS,
552 				  VM_MAX_USER_ADDRESS);
553 		vm_map_remove(&vm->vm_map,
554 			      VM_MIN_USER_ADDRESS,
555 			      VM_MAX_USER_ADDRESS);
556 	}
557 }
558 
559 /*
560  * Magic number for savecore
561  *
562  * exported (symorder) and used at least by savecore(8)
563  *
564  * Mark it as used so that gcc doesn't optimize it away.
565  */
566 __attribute__((__used__))
567 	static u_long const dumpmag = 0x8fca0101UL;
568 
569 __attribute__((__used__))
570 	static int	dumpsize = 0;		/* also for savecore */
571 
572 static int	dodump = 1;
573 
574 SYSCTL_INT(_machdep, OID_AUTO, do_dump, CTLFLAG_RW, &dodump, 0,
575     "Try to perform coredump on kernel panic");
576 
577 void
578 mkdumpheader(struct kerneldumpheader *kdh, char *magic, uint32_t archver,
579     uint64_t dumplen, uint32_t blksz)
580 {
581 	bzero(kdh, sizeof(*kdh));
582 	strncpy(kdh->magic, magic, sizeof(kdh->magic));
583 	strncpy(kdh->architecture, MACHINE_ARCH, sizeof(kdh->architecture));
584 	kdh->version = htod32(KERNELDUMPVERSION);
585 	kdh->architectureversion = htod32(archver);
586 	kdh->dumplength = htod64(dumplen);
587 	kdh->dumptime = htod64(time_second);
588 	kdh->blocksize = htod32(blksz);
589 	strncpy(kdh->hostname, hostname, sizeof(kdh->hostname));
590 	strncpy(kdh->versionstring, version, sizeof(kdh->versionstring));
591 	if (panicstr != NULL)
592 		strncpy(kdh->panicstring, panicstr, sizeof(kdh->panicstring));
593 	kdh->parity = kerneldump_parity(kdh);
594 }
595 
596 static int
597 setdumpdev(cdev_t dev)
598 {
599 	int error;
600 	int doopen;
601 
602 	if (dev == NULL) {
603 		disk_dumpconf(NULL, 0/*off*/);
604 		return (0);
605 	}
606 
607 	/*
608 	 * We have to open the device before we can perform ioctls on it,
609 	 * or the slice/label data may not be present.  Device opens are
610 	 * usually tracked by specfs, but the dump device can be set in
611 	 * early boot and may not be open so this is somewhat of a hack.
612 	 */
613 	doopen = (dev->si_sysref.refcnt == 1);
614 	if (doopen) {
615 		error = dev_dopen(dev, FREAD, S_IFCHR, proc0.p_ucred);
616 		if (error)
617 			return (error);
618 	}
619 	error = disk_dumpconf(dev, 1/*on*/);
620 
621 	return error;
622 }
623 
624 /* ARGSUSED */
625 static void dump_conf (void *dummy);
626 static void
627 dump_conf(void *dummy)
628 {
629 	char *path;
630 	cdev_t dev;
631 
632 	path = kmalloc(MNAMELEN, M_TEMP, M_WAITOK);
633 	if (TUNABLE_STR_FETCH("dumpdev", path, MNAMELEN) != 0) {
634 		dev = kgetdiskbyname(path);
635 		if (dev != NULL)
636 			dumpdev = dev;
637 	}
638 	kfree(path, M_TEMP);
639 	if (setdumpdev(dumpdev) != 0)
640 		dumpdev = NULL;
641 }
642 
643 SYSINIT(dump_conf, SI_SUB_DUMP_CONF, SI_ORDER_FIRST, dump_conf, NULL)
644 
645 static int
646 sysctl_kern_dumpdev(SYSCTL_HANDLER_ARGS)
647 {
648 	int error;
649 	udev_t ndumpdev;
650 
651 	ndumpdev = dev2udev(dumpdev);
652 	error = sysctl_handle_opaque(oidp, &ndumpdev, sizeof ndumpdev, req);
653 	if (error == 0 && req->newptr != NULL)
654 		error = setdumpdev(udev2dev(ndumpdev, 0));
655 	return (error);
656 }
657 
658 SYSCTL_PROC(_kern, KERN_DUMPDEV, dumpdev, CTLTYPE_OPAQUE|CTLFLAG_RW,
659 	0, sizeof dumpdev, sysctl_kern_dumpdev, "T,udev_t", "");
660 
661 /*
662  * Panic is called on unresolvable fatal errors.  It prints "panic: mesg",
663  * and then reboots.  If we are called twice, then we avoid trying to sync
664  * the disks as this often leads to recursive panics.
665  */
666 void
667 panic(const char *fmt, ...)
668 {
669 	int bootopt, newpanic;
670 	__va_list ap;
671 	static char buf[256];
672 
673 #ifdef SMP
674 	/*
675 	 * If a panic occurs on multiple cpus before the first is able to
676 	 * halt the other cpus, only one cpu is allowed to take the panic.
677 	 * Attempt to be verbose about this situation but if the kprintf()
678 	 * itself panics don't let us overrun the kernel stack.
679 	 *
680 	 * Be very nasty about descheduling our thread at the lowest
681 	 * level possible in an attempt to freeze the thread without
682 	 * inducing further panics.
683 	 *
684 	 * Bumping gd_trap_nesting_level will also bypass assertions in
685 	 * lwkt_switch() and allow us to switch away even if we are a
686 	 * FAST interrupt or IPI.
687 	 */
688 	if (atomic_poll_acquire_int(&panic_cpu_interlock)) {
689 		panic_cpu_gd = mycpu;
690 	} else if (panic_cpu_gd != mycpu) {
691 		crit_enter();
692 		++mycpu->gd_trap_nesting_level;
693 		if (mycpu->gd_trap_nesting_level < 25) {
694 			kprintf("SECONDARY PANIC ON CPU %d THREAD %p\n",
695 				mycpu->gd_cpuid, curthread);
696 		}
697 		curthread->td_release = NULL;	/* be a grinch */
698 		for (;;) {
699 			lwkt_deschedule_self(curthread);
700 			lwkt_switch();
701 		}
702 		/* NOT REACHED */
703 		/* --mycpu->gd_trap_nesting_level */
704 		/* crit_exit() */
705 	}
706 #endif
707 	bootopt = RB_AUTOBOOT | RB_DUMP;
708 	if (sync_on_panic == 0)
709 		bootopt |= RB_NOSYNC;
710 	newpanic = 0;
711 	if (panicstr)
712 		bootopt |= RB_NOSYNC;
713 	else {
714 		panicstr = fmt;
715 		newpanic = 1;
716 	}
717 
718 	__va_start(ap, fmt);
719 	kvsnprintf(buf, sizeof(buf), fmt, ap);
720 	if (panicstr == fmt)
721 		panicstr = buf;
722 	__va_end(ap);
723 	kprintf("panic: %s\n", buf);
724 #ifdef SMP
725 	/* two separate prints in case of an unmapped page and trap */
726 	kprintf("mp_lock = %08x; ", mp_lock);
727 	kprintf("cpuid = %d\n", mycpu->gd_cpuid);
728 #endif
729 
730 #if (NGPIO > 0) && defined(ERROR_LED_ON_PANIC)
731 	led_switch("error", 1);
732 #endif
733 
734 #if defined(WDOG_DISABLE_ON_PANIC) && defined(WATCHDOG_ENABLE)
735 	wdog_disable();
736 #endif
737 
738 #if defined(DDB)
739 	if (newpanic && trace_on_panic)
740 		print_backtrace();
741 	if (debugger_on_panic)
742 		Debugger("panic");
743 #endif
744 	boot(bootopt);
745 }
746 
747 /*
748  * Support for poweroff delay.
749  */
750 #ifndef POWEROFF_DELAY
751 # define POWEROFF_DELAY 5000
752 #endif
753 static int poweroff_delay = POWEROFF_DELAY;
754 
755 SYSCTL_INT(_kern_shutdown, OID_AUTO, poweroff_delay, CTLFLAG_RW,
756 	&poweroff_delay, 0, "");
757 
758 static void
759 poweroff_wait(void *junk, int howto)
760 {
761 	if(!(howto & RB_POWEROFF) || poweroff_delay <= 0)
762 		return;
763 	DELAY(poweroff_delay * 1000);
764 }
765 
766 /*
767  * Some system processes (e.g. syncer) need to be stopped at appropriate
768  * points in their main loops prior to a system shutdown, so that they
769  * won't interfere with the shutdown process (e.g. by holding a disk buf
770  * to cause sync to fail).  For each of these system processes, register
771  * shutdown_kproc() as a handler for one of shutdown events.
772  */
773 static int kproc_shutdown_wait = 60;
774 SYSCTL_INT(_kern_shutdown, OID_AUTO, kproc_shutdown_wait, CTLFLAG_RW,
775     &kproc_shutdown_wait, 0, "");
776 
777 void
778 shutdown_kproc(void *arg, int howto)
779 {
780 	struct thread *td;
781 	struct proc *p;
782 	int error;
783 
784 	if (panicstr)
785 		return;
786 
787 	td = (struct thread *)arg;
788 	if ((p = td->td_proc) != NULL) {
789 	    kprintf("Waiting (max %d seconds) for system process `%s' to stop...",
790 		kproc_shutdown_wait, p->p_comm);
791 	} else {
792 	    kprintf("Waiting (max %d seconds) for system thread %s to stop...",
793 		kproc_shutdown_wait, td->td_comm);
794 	}
795 	error = suspend_kproc(td, kproc_shutdown_wait * hz);
796 
797 	if (error == EWOULDBLOCK)
798 		kprintf("timed out\n");
799 	else
800 		kprintf("stopped\n");
801 }
802 
803 /* Registration of dumpers */
804 int
805 set_dumper(struct dumperinfo *di)
806 {
807 	if (di == NULL) {
808 		bzero(&dumper, sizeof(dumper));
809 		return 0;
810 	}
811 
812 	if (dumper.dumper != NULL)
813 		return (EBUSY);
814 
815 	dumper = *di;
816 	return 0;
817 }
818 
819 void
820 dumpsys(void)
821 {
822 #if defined (_KERNEL_VIRTUAL)
823 	/* VKERNELs don't support dumps */
824 	kprintf("VKERNEL doesn't support dumps\n");
825 	return;
826 #endif
827 	/*
828 	 * If there is a dumper registered and we aren't dumping already, call
829 	 * the machine dependent dumpsys (md_dumpsys) to do the hard work.
830 	 *
831 	 * XXX: while right now the md_dumpsys() of x86 and x86_64 could be
832 	 *      factored out completely into here, I rather keep them machine
833 	 *      dependent in case we ever add a platform which does not share
834 	 *      the same dumpsys() code, such as arm.
835 	 */
836 	if (dumper.dumper != NULL && !dumping) {
837 		dumping++;
838 		md_dumpsys(&dumper);
839 	}
840 }
841