10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
53446Smrj * Common Development and Distribution License (the "License").
63446Smrj * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*11752STrevor.Thompson@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate /*
270Sstevel@tonic-gate * This module contains the guts of checkpoint-resume mechanism.
280Sstevel@tonic-gate * All code in this module is platform independent.
290Sstevel@tonic-gate */
300Sstevel@tonic-gate
310Sstevel@tonic-gate #include <sys/types.h>
320Sstevel@tonic-gate #include <sys/errno.h>
330Sstevel@tonic-gate #include <sys/callb.h>
340Sstevel@tonic-gate #include <sys/processor.h>
350Sstevel@tonic-gate #include <sys/machsystm.h>
360Sstevel@tonic-gate #include <sys/clock.h>
370Sstevel@tonic-gate #include <sys/vfs.h>
380Sstevel@tonic-gate #include <sys/kmem.h>
390Sstevel@tonic-gate #include <nfs/lm.h>
400Sstevel@tonic-gate #include <sys/systm.h>
410Sstevel@tonic-gate #include <sys/cpr.h>
420Sstevel@tonic-gate #include <sys/bootconf.h>
430Sstevel@tonic-gate #include <sys/cyclic.h>
440Sstevel@tonic-gate #include <sys/filio.h>
450Sstevel@tonic-gate #include <sys/fs/ufs_filio.h>
460Sstevel@tonic-gate #include <sys/epm.h>
470Sstevel@tonic-gate #include <sys/modctl.h>
480Sstevel@tonic-gate #include <sys/reboot.h>
490Sstevel@tonic-gate #include <sys/kdi.h>
500Sstevel@tonic-gate #include <sys/promif.h>
515295Srandyf #include <sys/srn.h>
525295Srandyf #include <sys/cpr_impl.h>
535295Srandyf
545295Srandyf #define PPM(dip) ((dev_info_t *)DEVI(dip)->devi_pm_ppm)
550Sstevel@tonic-gate
560Sstevel@tonic-gate extern struct cpr_terminator cpr_term;
570Sstevel@tonic-gate
580Sstevel@tonic-gate extern int cpr_alloc_statefile(int);
590Sstevel@tonic-gate extern void cpr_start_kernel_threads(void);
600Sstevel@tonic-gate extern void cpr_abbreviate_devpath(char *, char *);
610Sstevel@tonic-gate extern void cpr_convert_promtime(cpr_time_t *);
620Sstevel@tonic-gate extern void cpr_send_notice(void);
630Sstevel@tonic-gate extern void cpr_set_bitmap_size(void);
640Sstevel@tonic-gate extern void cpr_stat_init();
650Sstevel@tonic-gate extern void cpr_statef_close(void);
660Sstevel@tonic-gate extern void flush_windows(void);
675295Srandyf extern void (*srn_signal)(int, int);
685295Srandyf extern void init_cpu_syscall(struct cpu *);
695295Srandyf extern void i_cpr_pre_resume_cpus();
705295Srandyf extern void i_cpr_post_resume_cpus();
716423Sgw25295 extern int cpr_is_ufs(struct vfs *);
720Sstevel@tonic-gate
730Sstevel@tonic-gate extern int pm_powering_down;
745295Srandyf extern kmutex_t srn_clone_lock;
755295Srandyf extern int srn_inuse;
760Sstevel@tonic-gate
775295Srandyf static int cpr_suspend(int);
785295Srandyf static int cpr_resume(int);
795295Srandyf static void cpr_suspend_init(int);
805295Srandyf #if defined(__x86)
815295Srandyf static int cpr_suspend_cpus(void);
825295Srandyf static void cpr_resume_cpus(void);
835295Srandyf #endif
845295Srandyf static int cpr_all_online(void);
855295Srandyf static void cpr_restore_offline(void);
860Sstevel@tonic-gate
870Sstevel@tonic-gate cpr_time_t wholecycle_tv;
880Sstevel@tonic-gate int cpr_suspend_succeeded;
890Sstevel@tonic-gate pfn_t curthreadpfn;
900Sstevel@tonic-gate int curthreadremapped;
910Sstevel@tonic-gate
925295Srandyf extern cpuset_t cpu_ready_set;
935295Srandyf extern void *(*cpu_pause_func)(void *);
945295Srandyf
955295Srandyf extern processorid_t i_cpr_bootcpuid(void);
965295Srandyf extern cpu_t *i_cpr_bootcpu(void);
975295Srandyf extern void tsc_adjust_delta(hrtime_t tdelta);
985295Srandyf extern void tsc_resume(void);
995295Srandyf extern int tsc_resume_in_cyclic;
1005295Srandyf
1015295Srandyf /*
1025295Srandyf * Set this variable to 1, to have device drivers resume in an
1035295Srandyf * uniprocessor environment. This is to allow drivers that assume
1045295Srandyf * that they resume on a UP machine to continue to work. Should be
1055295Srandyf * deprecated once the broken drivers are fixed
1065295Srandyf */
1075295Srandyf int cpr_resume_uniproc = 0;
1085295Srandyf
1090Sstevel@tonic-gate /*
1100Sstevel@tonic-gate * save or restore abort_enable; this prevents a drop
1110Sstevel@tonic-gate * to kadb or prom during cpr_resume_devices() when
1120Sstevel@tonic-gate * there is no kbd present; see abort_sequence_enter()
1130Sstevel@tonic-gate */
1140Sstevel@tonic-gate static void
cpr_sae(int stash)1150Sstevel@tonic-gate cpr_sae(int stash)
1160Sstevel@tonic-gate {
1170Sstevel@tonic-gate static int saved_ae = -1;
1180Sstevel@tonic-gate
1190Sstevel@tonic-gate if (stash) {
1200Sstevel@tonic-gate saved_ae = abort_enable;
1210Sstevel@tonic-gate abort_enable = 0;
1220Sstevel@tonic-gate } else if (saved_ae != -1) {
1230Sstevel@tonic-gate abort_enable = saved_ae;
1240Sstevel@tonic-gate saved_ae = -1;
1250Sstevel@tonic-gate }
1260Sstevel@tonic-gate }
1270Sstevel@tonic-gate
1280Sstevel@tonic-gate
1290Sstevel@tonic-gate /*
1300Sstevel@tonic-gate * The main switching point for cpr, this routine starts the ckpt
1310Sstevel@tonic-gate * and state file saving routines; on resume the control is
1320Sstevel@tonic-gate * returned back to here and it then calls the resume routine.
1330Sstevel@tonic-gate */
1340Sstevel@tonic-gate int
cpr_main(int sleeptype)1355295Srandyf cpr_main(int sleeptype)
1360Sstevel@tonic-gate {
1375295Srandyf int rc, rc2;
1385295Srandyf label_t saveq;
1395295Srandyf klwp_t *tlwp = ttolwp(curthread);
1405295Srandyf
1415295Srandyf if (sleeptype == CPR_TODISK) {
1425295Srandyf if ((rc = cpr_default_setup(1)) != 0)
1435295Srandyf return (rc);
1445295Srandyf ASSERT(tlwp);
1455295Srandyf saveq = tlwp->lwp_qsav;
1465295Srandyf }
1475295Srandyf
1485295Srandyf if (sleeptype == CPR_TORAM) {
1495295Srandyf rc = cpr_suspend(sleeptype);
1505295Srandyf PMD(PMD_SX, ("cpr_suspend rets %x\n", rc))
1515295Srandyf if (rc == 0) {
1525295Srandyf int i_cpr_power_down(int sleeptype);
1535295Srandyf
1545295Srandyf /*
1555295Srandyf * From this point on, we should be at a high
1565295Srandyf * spl, interrupts disabled, and all but one
1575295Srandyf * cpu's paused (effectively UP/single threaded).
1585295Srandyf * So this is were we want to put ASSERTS()
1595295Srandyf * to let us know otherwise.
1605295Srandyf */
1615295Srandyf ASSERT(cpus_paused());
1620Sstevel@tonic-gate
1635295Srandyf /*
1645295Srandyf * Now do the work of actually putting this
1655295Srandyf * machine to sleep!
1665295Srandyf */
1675295Srandyf rc = i_cpr_power_down(sleeptype);
1685295Srandyf if (rc == 0) {
1695295Srandyf PMD(PMD_SX, ("back from succssful suspend\n"))
1705295Srandyf }
1715295Srandyf /*
1725295Srandyf * We do care about the return value from cpr_resume
1735295Srandyf * at this point, as it will tell us if one of the
1745295Srandyf * resume functions failed (cpr_resume_devices())
1755295Srandyf * However, for this to return and _not_ panic, means
1765295Srandyf * that we must be in one of the test functions. So
1775295Srandyf * check for that and return an appropriate message.
1785295Srandyf */
1795295Srandyf rc2 = cpr_resume(sleeptype);
1805295Srandyf if (rc2 != 0) {
1815295Srandyf ASSERT(cpr_test_point > 0);
1825295Srandyf cmn_err(CE_NOTE,
1835295Srandyf "cpr_resume returned non-zero: %d\n", rc2);
1845295Srandyf PMD(PMD_SX, ("cpr_resume rets %x\n", rc2))
1855295Srandyf }
1865295Srandyf ASSERT(!cpus_paused());
1875295Srandyf } else {
1885295Srandyf PMD(PMD_SX, ("failed suspend, resuming\n"))
1895295Srandyf rc = cpr_resume(sleeptype);
1905295Srandyf }
1910Sstevel@tonic-gate return (rc);
1925295Srandyf }
1930Sstevel@tonic-gate /*
1945295Srandyf * Remember where we are for resume after reboot
1950Sstevel@tonic-gate */
1965295Srandyf if (!setjmp(&tlwp->lwp_qsav)) {
1970Sstevel@tonic-gate /*
1980Sstevel@tonic-gate * try to checkpoint the system, if failed return back
1990Sstevel@tonic-gate * to userland, otherwise power off.
2000Sstevel@tonic-gate */
2015295Srandyf rc = cpr_suspend(sleeptype);
2020Sstevel@tonic-gate if (rc || cpr_reusable_mode) {
2030Sstevel@tonic-gate /*
2040Sstevel@tonic-gate * We don't really want to go down, or
2050Sstevel@tonic-gate * something went wrong in suspend, do what we can
2060Sstevel@tonic-gate * to put the system back to an operable state then
2070Sstevel@tonic-gate * return back to userland.
2080Sstevel@tonic-gate */
2095295Srandyf PMD(PMD_SX, ("failed suspend, resuming\n"))
2105295Srandyf (void) cpr_resume(sleeptype);
2115295Srandyf PMD(PMD_SX, ("back from failed suspend resume\n"))
2120Sstevel@tonic-gate }
2130Sstevel@tonic-gate } else {
2140Sstevel@tonic-gate /*
2150Sstevel@tonic-gate * This is the resumed side of longjmp, restore the previous
2160Sstevel@tonic-gate * longjmp pointer if there is one so this will be transparent
2170Sstevel@tonic-gate * to the world.
2185295Srandyf * This path is only for CPR_TODISK, where we reboot
2190Sstevel@tonic-gate */
2205295Srandyf ASSERT(sleeptype == CPR_TODISK);
2215295Srandyf tlwp->lwp_qsav = saveq;
2220Sstevel@tonic-gate CPR->c_flags &= ~C_SUSPENDING;
2230Sstevel@tonic-gate CPR->c_flags |= C_RESUMING;
2240Sstevel@tonic-gate
2250Sstevel@tonic-gate /*
2260Sstevel@tonic-gate * resume the system back to the original state
2270Sstevel@tonic-gate */
2285295Srandyf rc = cpr_resume(sleeptype);
2295295Srandyf PMD(PMD_SX, ("back from successful suspend; resume rets %x\n",
2305295Srandyf rc))
2310Sstevel@tonic-gate }
2320Sstevel@tonic-gate
2330Sstevel@tonic-gate (void) cpr_default_setup(0);
2340Sstevel@tonic-gate
2350Sstevel@tonic-gate return (rc);
2360Sstevel@tonic-gate }
2370Sstevel@tonic-gate
2380Sstevel@tonic-gate
2395295Srandyf #if defined(__sparc)
2405295Srandyf
2410Sstevel@tonic-gate /*
2420Sstevel@tonic-gate * check/disable or re-enable UFS logging
2430Sstevel@tonic-gate */
2440Sstevel@tonic-gate static void
cpr_log_status(int enable,int * svstat,vnode_t * vp)2450Sstevel@tonic-gate cpr_log_status(int enable, int *svstat, vnode_t *vp)
2460Sstevel@tonic-gate {
2470Sstevel@tonic-gate int cmd, status, error;
2480Sstevel@tonic-gate char *str, *able;
2490Sstevel@tonic-gate fiolog_t fl;
2500Sstevel@tonic-gate refstr_t *mntpt;
2510Sstevel@tonic-gate
2520Sstevel@tonic-gate str = "cpr_log_status";
2530Sstevel@tonic-gate bzero(&fl, sizeof (fl));
2540Sstevel@tonic-gate fl.error = FIOLOG_ENONE;
2550Sstevel@tonic-gate
2560Sstevel@tonic-gate /*
2570Sstevel@tonic-gate * when disabling, first get and save logging status (0 or 1)
2580Sstevel@tonic-gate */
2590Sstevel@tonic-gate if (enable == 0) {
2600Sstevel@tonic-gate if (error = VOP_IOCTL(vp, _FIOISLOG,
2615331Samw (uintptr_t)&status, FKIOCTL, CRED(), NULL, NULL)) {
2620Sstevel@tonic-gate mntpt = vfs_getmntpoint(vp->v_vfsp);
2633446Smrj prom_printf("%s: \"%s\", cant get logging "
2643446Smrj "status, error %d\n", str, refstr_value(mntpt),
2653446Smrj error);
2660Sstevel@tonic-gate refstr_rele(mntpt);
2670Sstevel@tonic-gate return;
2680Sstevel@tonic-gate }
2690Sstevel@tonic-gate *svstat = status;
2703446Smrj if (cpr_debug & CPR_DEBUG5) {
2710Sstevel@tonic-gate mntpt = vfs_getmntpoint(vp->v_vfsp);
2725295Srandyf errp("%s: \"%s\", logging status = %d\n",
2730Sstevel@tonic-gate str, refstr_value(mntpt), status);
2740Sstevel@tonic-gate refstr_rele(mntpt);
2753446Smrj };
2760Sstevel@tonic-gate
2770Sstevel@tonic-gate able = "disable";
2780Sstevel@tonic-gate cmd = _FIOLOGDISABLE;
2790Sstevel@tonic-gate } else {
2800Sstevel@tonic-gate able = "enable";
2810Sstevel@tonic-gate cmd = _FIOLOGENABLE;
2820Sstevel@tonic-gate }
2830Sstevel@tonic-gate
2840Sstevel@tonic-gate /*
2850Sstevel@tonic-gate * disable or re-enable logging when the saved status is 1
2860Sstevel@tonic-gate */
2870Sstevel@tonic-gate if (*svstat == 1) {
2880Sstevel@tonic-gate error = VOP_IOCTL(vp, cmd, (uintptr_t)&fl,
2895331Samw FKIOCTL, CRED(), NULL, NULL);
2900Sstevel@tonic-gate if (error) {
2910Sstevel@tonic-gate mntpt = vfs_getmntpoint(vp->v_vfsp);
2923446Smrj prom_printf("%s: \"%s\", cant %s logging, error %d\n",
2930Sstevel@tonic-gate str, refstr_value(mntpt), able, error);
2940Sstevel@tonic-gate refstr_rele(mntpt);
2950Sstevel@tonic-gate } else {
2963446Smrj if (cpr_debug & CPR_DEBUG5) {
2970Sstevel@tonic-gate mntpt = vfs_getmntpoint(vp->v_vfsp);
2985295Srandyf errp("%s: \"%s\", logging is now %sd\n",
2990Sstevel@tonic-gate str, refstr_value(mntpt), able);
3000Sstevel@tonic-gate refstr_rele(mntpt);
3015295Srandyf };
3020Sstevel@tonic-gate }
3030Sstevel@tonic-gate }
3040Sstevel@tonic-gate
3050Sstevel@tonic-gate /*
3060Sstevel@tonic-gate * when enabling logging, reset the saved status
3070Sstevel@tonic-gate * to unknown for next time
3080Sstevel@tonic-gate */
3090Sstevel@tonic-gate if (enable)
3100Sstevel@tonic-gate *svstat = -1;
3110Sstevel@tonic-gate }
3120Sstevel@tonic-gate
3130Sstevel@tonic-gate /*
3140Sstevel@tonic-gate * enable/disable UFS logging on filesystems containing cpr_default_path
3150Sstevel@tonic-gate * and cpr statefile. since the statefile can be on any fs, that fs
3160Sstevel@tonic-gate * needs to be handled separately. this routine and cprboot expect that
3170Sstevel@tonic-gate * CPR_CONFIG and CPR_DEFAULT both reside on the same fs, rootfs. cprboot
3180Sstevel@tonic-gate * is loaded from the device with rootfs and uses the same device to open
3190Sstevel@tonic-gate * both CPR_CONFIG and CPR_DEFAULT (see common/support.c). moving either
3200Sstevel@tonic-gate * file outside of rootfs would cause errors during cprboot, plus cpr and
3210Sstevel@tonic-gate * fsck problems with the new fs if logging were enabled.
3220Sstevel@tonic-gate */
3235295Srandyf
3240Sstevel@tonic-gate static int
cpr_ufs_logging(int enable)3250Sstevel@tonic-gate cpr_ufs_logging(int enable)
3260Sstevel@tonic-gate {
3270Sstevel@tonic-gate static int def_status = -1, sf_status = -1;
3280Sstevel@tonic-gate struct vfs *vfsp;
3290Sstevel@tonic-gate char *fname;
3300Sstevel@tonic-gate vnode_t *vp;
3310Sstevel@tonic-gate int error;
3320Sstevel@tonic-gate
3330Sstevel@tonic-gate if (cpr_reusable_mode)
3340Sstevel@tonic-gate return (0);
3350Sstevel@tonic-gate
3360Sstevel@tonic-gate if (error = cpr_open_deffile(FREAD, &vp))
3370Sstevel@tonic-gate return (error);
3386423Sgw25295 vfsp = vp->v_vfsp;
3396423Sgw25295 if (!cpr_is_ufs(vfsp)) {
3406423Sgw25295 (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, CRED(), NULL);
3416423Sgw25295 VN_RELE(vp);
3426423Sgw25295 return (0);
3436423Sgw25295 }
3446423Sgw25295
3450Sstevel@tonic-gate cpr_log_status(enable, &def_status, vp);
3465331Samw (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, CRED(), NULL);
3470Sstevel@tonic-gate VN_RELE(vp);
3480Sstevel@tonic-gate
3490Sstevel@tonic-gate fname = cpr_build_statefile_path();
3500Sstevel@tonic-gate if (fname == NULL)
3510Sstevel@tonic-gate return (ENOENT);
3520Sstevel@tonic-gate if (error = vn_open(fname, UIO_SYSSPACE, FCREAT|FWRITE,
3530Sstevel@tonic-gate 0600, &vp, CRCREAT, 0)) {
3543446Smrj prom_printf("cpr_ufs_logging: cant open/create \"%s\", "
3553446Smrj "error %d\n", fname, error);
3560Sstevel@tonic-gate return (error);
3570Sstevel@tonic-gate }
3580Sstevel@tonic-gate
3590Sstevel@tonic-gate /*
3600Sstevel@tonic-gate * check logging status for the statefile if it resides
3610Sstevel@tonic-gate * on a different fs and the type is a regular file
3620Sstevel@tonic-gate */
3630Sstevel@tonic-gate if (vp->v_vfsp != vfsp && vp->v_type == VREG)
3640Sstevel@tonic-gate cpr_log_status(enable, &sf_status, vp);
3655331Samw (void) VOP_CLOSE(vp, FWRITE, 1, (offset_t)0, CRED(), NULL);
3660Sstevel@tonic-gate VN_RELE(vp);
3670Sstevel@tonic-gate
3680Sstevel@tonic-gate return (0);
3690Sstevel@tonic-gate }
3705295Srandyf #endif
3710Sstevel@tonic-gate
3720Sstevel@tonic-gate
3730Sstevel@tonic-gate /*
3740Sstevel@tonic-gate * Check if klmmod is loaded and call a lock manager service; if klmmod
3750Sstevel@tonic-gate * is not loaded, the services aren't needed and a call would trigger a
3760Sstevel@tonic-gate * modload, which would block since another thread would never run.
3770Sstevel@tonic-gate */
3780Sstevel@tonic-gate static void
cpr_lock_mgr(void (* service)(void))3790Sstevel@tonic-gate cpr_lock_mgr(void (*service)(void))
3800Sstevel@tonic-gate {
3810Sstevel@tonic-gate if (mod_find_by_filename(NULL, "misc/klmmod") != NULL)
3820Sstevel@tonic-gate (*service)();
3830Sstevel@tonic-gate }
3840Sstevel@tonic-gate
3855295Srandyf int
cpr_suspend_cpus(void)3865295Srandyf cpr_suspend_cpus(void)
3875295Srandyf {
3885295Srandyf int ret = 0;
3895295Srandyf extern void *i_cpr_save_context(void *arg);
3905295Srandyf
3915295Srandyf mutex_enter(&cpu_lock);
3925295Srandyf
3935295Srandyf /*
3945295Srandyf * the machine could not have booted without a bootcpu
3955295Srandyf */
3965817Sjan ASSERT(i_cpr_bootcpu() != NULL);
3975295Srandyf
3985295Srandyf /*
3995295Srandyf * bring all the offline cpus online
4005295Srandyf */
4015295Srandyf if ((ret = cpr_all_online())) {
4025295Srandyf mutex_exit(&cpu_lock);
4035295Srandyf return (ret);
4045295Srandyf }
4055295Srandyf
4065295Srandyf /*
4075295Srandyf * Set the affinity to be the boot processor
4085295Srandyf * This is cleared in either cpr_resume_cpus() or cpr_unpause_cpus()
4095295Srandyf */
4105295Srandyf affinity_set(i_cpr_bootcpuid());
4115295Srandyf
4125295Srandyf ASSERT(CPU->cpu_id == 0);
4135295Srandyf
4145295Srandyf PMD(PMD_SX, ("curthread running on bootcpu\n"))
4155295Srandyf
4165295Srandyf /*
4175295Srandyf * pause all other running CPUs and save the CPU state at the sametime
4185295Srandyf */
4195295Srandyf cpu_pause_func = i_cpr_save_context;
4205295Srandyf pause_cpus(NULL);
4215295Srandyf
4225295Srandyf mutex_exit(&cpu_lock);
4235295Srandyf
4245295Srandyf return (0);
4255295Srandyf }
4265295Srandyf
4270Sstevel@tonic-gate /*
4280Sstevel@tonic-gate * Take the system down to a checkpointable state and write
4290Sstevel@tonic-gate * the state file, the following are sequentially executed:
4300Sstevel@tonic-gate *
4310Sstevel@tonic-gate * - Request all user threads to stop themselves
4320Sstevel@tonic-gate * - push out and invalidate user pages
4330Sstevel@tonic-gate * - bring statefile inode incore to prevent a miss later
4340Sstevel@tonic-gate * - request all daemons to stop
4350Sstevel@tonic-gate * - check and make sure all threads are stopped
4360Sstevel@tonic-gate * - sync the file system
4370Sstevel@tonic-gate * - suspend all devices
4380Sstevel@tonic-gate * - block intrpts
4390Sstevel@tonic-gate * - dump system state and memory to state file
4405295Srandyf * - SPARC code will not be called with CPR_TORAM, caller filters
4410Sstevel@tonic-gate */
4420Sstevel@tonic-gate static int
cpr_suspend(int sleeptype)4435295Srandyf cpr_suspend(int sleeptype)
4440Sstevel@tonic-gate {
4455295Srandyf #if defined(__sparc)
4465295Srandyf int sf_realloc, nverr;
4475295Srandyf #endif
4485295Srandyf int rc = 0;
4495295Srandyf int skt_rc = 0;
4500Sstevel@tonic-gate
4515295Srandyf PMD(PMD_SX, ("cpr_suspend %x\n", sleeptype))
4520Sstevel@tonic-gate cpr_set_substate(C_ST_SUSPEND_BEGIN);
4530Sstevel@tonic-gate
4545295Srandyf cpr_suspend_init(sleeptype);
4550Sstevel@tonic-gate
4560Sstevel@tonic-gate cpr_save_time();
4570Sstevel@tonic-gate
4580Sstevel@tonic-gate cpr_tod_get(&wholecycle_tv);
4590Sstevel@tonic-gate CPR_STAT_EVENT_START("Suspend Total");
4600Sstevel@tonic-gate
4615295Srandyf i_cpr_alloc_cpus();
4625295Srandyf
4635295Srandyf #if defined(__sparc)
4645295Srandyf ASSERT(sleeptype == CPR_TODISK);
4650Sstevel@tonic-gate if (!cpr_reusable_mode) {
4660Sstevel@tonic-gate /*
4675295Srandyf * We need to validate default file before fs
4685295Srandyf * functionality is disabled.
4690Sstevel@tonic-gate */
4700Sstevel@tonic-gate if (rc = cpr_validate_definfo(0))
4710Sstevel@tonic-gate return (rc);
4720Sstevel@tonic-gate }
4735295Srandyf i_cpr_save_machdep_info();
4745295Srandyf #endif
4750Sstevel@tonic-gate
4765295Srandyf PMD(PMD_SX, ("cpr_suspend: stop scans\n"))
4770Sstevel@tonic-gate /* Stop PM scans ASAP */
4780Sstevel@tonic-gate (void) callb_execute_class(CB_CL_CPR_PM, CB_CODE_CPR_CHKPT);
4790Sstevel@tonic-gate
4800Sstevel@tonic-gate pm_dispatch_to_dep_thread(PM_DEP_WK_CPR_SUSPEND,
4810Sstevel@tonic-gate NULL, NULL, PM_DEP_WAIT, NULL, 0);
4820Sstevel@tonic-gate
4835295Srandyf #if defined(__sparc)
4845295Srandyf ASSERT(sleeptype == CPR_TODISK);
4850Sstevel@tonic-gate cpr_set_substate(C_ST_MP_OFFLINE);
4860Sstevel@tonic-gate if (rc = cpr_mp_offline())
4870Sstevel@tonic-gate return (rc);
4885295Srandyf #endif
4895295Srandyf /*
4905295Srandyf * Ask Xorg to suspend the frame buffer, and wait for it to happen
4915295Srandyf */
4925295Srandyf mutex_enter(&srn_clone_lock);
4935295Srandyf if (srn_signal) {
4945295Srandyf PMD(PMD_SX, ("cpr_suspend: (*srn_signal)(..., "
4955295Srandyf "SRN_SUSPEND_REQ)\n"))
4965295Srandyf srn_inuse = 1; /* because *(srn_signal) cv_waits */
4975295Srandyf (*srn_signal)(SRN_TYPE_APM, SRN_SUSPEND_REQ);
4985295Srandyf srn_inuse = 0;
4995295Srandyf } else {
5005295Srandyf PMD(PMD_SX, ("cpr_suspend: srn_signal NULL\n"))
5015295Srandyf }
5025295Srandyf mutex_exit(&srn_clone_lock);
5030Sstevel@tonic-gate
5040Sstevel@tonic-gate /*
5050Sstevel@tonic-gate * Ask the user threads to stop by themselves, but
5060Sstevel@tonic-gate * if they don't or can't after 3 retries, we give up on CPR.
5070Sstevel@tonic-gate * The 3 retry is not a random number because 2 is possible if
5080Sstevel@tonic-gate * a thread has been forked before the parent thread is stopped.
5090Sstevel@tonic-gate */
5103446Smrj CPR_DEBUG(CPR_DEBUG1, "\nstopping user threads...");
5110Sstevel@tonic-gate CPR_STAT_EVENT_START(" stop users");
5120Sstevel@tonic-gate cpr_set_substate(C_ST_STOP_USER_THREADS);
5135295Srandyf PMD(PMD_SX, ("cpr_suspend: stop user threads\n"))
5140Sstevel@tonic-gate if (rc = cpr_stop_user_threads())
5150Sstevel@tonic-gate return (rc);
5160Sstevel@tonic-gate CPR_STAT_EVENT_END(" stop users");
5173446Smrj CPR_DEBUG(CPR_DEBUG1, "done\n");
5180Sstevel@tonic-gate
5195295Srandyf PMD(PMD_SX, ("cpr_suspend: save direct levels\n"))
5200Sstevel@tonic-gate pm_save_direct_levels();
5210Sstevel@tonic-gate
5220Sstevel@tonic-gate /*
5230Sstevel@tonic-gate * User threads are stopped. We will start communicating with the
5240Sstevel@tonic-gate * user via prom_printf (some debug output may have already happened)
5250Sstevel@tonic-gate * so let anybody who cares know about this (bug 4096122)
5260Sstevel@tonic-gate */
5270Sstevel@tonic-gate (void) callb_execute_class(CB_CL_CPR_PROMPRINTF, CB_CODE_CPR_CHKPT);
5280Sstevel@tonic-gate
5295295Srandyf PMD(PMD_SX, ("cpr_suspend: send notice\n"))
5305295Srandyf #ifndef DEBUG
5310Sstevel@tonic-gate cpr_send_notice();
5320Sstevel@tonic-gate if (cpr_debug)
5333446Smrj prom_printf("\n");
5345295Srandyf #endif
5350Sstevel@tonic-gate
5365295Srandyf PMD(PMD_SX, ("cpr_suspend: POST USER callback\n"))
5370Sstevel@tonic-gate (void) callb_execute_class(CB_CL_CPR_POST_USER, CB_CODE_CPR_CHKPT);
5380Sstevel@tonic-gate
5390Sstevel@tonic-gate /*
5400Sstevel@tonic-gate * Reattach any drivers which originally exported the
5410Sstevel@tonic-gate * no-involuntary-power-cycles property. We need to do this before
5420Sstevel@tonic-gate * stopping kernel threads because modload is implemented using
5430Sstevel@tonic-gate * a kernel thread.
5440Sstevel@tonic-gate */
5450Sstevel@tonic-gate cpr_set_substate(C_ST_PM_REATTACH_NOINVOL);
5465295Srandyf PMD(PMD_SX, ("cpr_suspend: reattach noinvol\n"))
5470Sstevel@tonic-gate if (!pm_reattach_noinvol())
5480Sstevel@tonic-gate return (ENXIO);
5490Sstevel@tonic-gate
5505295Srandyf #if defined(__sparc)
5515295Srandyf ASSERT(sleeptype == CPR_TODISK);
5520Sstevel@tonic-gate /*
5530Sstevel@tonic-gate * if ufs logging is enabled, we need to disable before
5540Sstevel@tonic-gate * stopping kernel threads so that ufs delete and roll
5550Sstevel@tonic-gate * threads can do the work.
5560Sstevel@tonic-gate */
5570Sstevel@tonic-gate cpr_set_substate(C_ST_DISABLE_UFS_LOGGING);
5580Sstevel@tonic-gate if (rc = cpr_ufs_logging(0))
5590Sstevel@tonic-gate return (rc);
5600Sstevel@tonic-gate
5610Sstevel@tonic-gate /*
5620Sstevel@tonic-gate * Use sync_all to swap out all user pages and find out how much
5630Sstevel@tonic-gate * extra space needed for user pages that don't have back store
5640Sstevel@tonic-gate * space left.
5650Sstevel@tonic-gate */
5660Sstevel@tonic-gate CPR_STAT_EVENT_START(" swapout upages");
5670Sstevel@tonic-gate vfs_sync(SYNC_ALL);
5680Sstevel@tonic-gate CPR_STAT_EVENT_END(" swapout upages");
5690Sstevel@tonic-gate
5700Sstevel@tonic-gate cpr_set_bitmap_size();
5710Sstevel@tonic-gate
5720Sstevel@tonic-gate alloc_statefile:
5730Sstevel@tonic-gate /*
5745295Srandyf * If our last state was C_ST_DUMP_NOSPC, we're trying to
5755295Srandyf * realloc the statefile, otherwise this is the first attempt.
5760Sstevel@tonic-gate */
5770Sstevel@tonic-gate sf_realloc = (CPR->c_substate == C_ST_DUMP_NOSPC) ? 1 : 0;
5780Sstevel@tonic-gate
5790Sstevel@tonic-gate CPR_STAT_EVENT_START(" alloc statefile");
5800Sstevel@tonic-gate cpr_set_substate(C_ST_STATEF_ALLOC);
5810Sstevel@tonic-gate if (rc = cpr_alloc_statefile(sf_realloc)) {
5820Sstevel@tonic-gate if (sf_realloc)
5835295Srandyf errp("realloc failed\n");
5840Sstevel@tonic-gate return (rc);
5850Sstevel@tonic-gate }
5860Sstevel@tonic-gate CPR_STAT_EVENT_END(" alloc statefile");
5870Sstevel@tonic-gate
5880Sstevel@tonic-gate /*
5890Sstevel@tonic-gate * Sync the filesystem to preserve its integrity.
5900Sstevel@tonic-gate *
5915295Srandyf * This sync is also used to flush out all B_DELWRI buffers
5925295Srandyf * (fs cache) which are mapped and neither dirty nor referenced
5935295Srandyf * before cpr_invalidate_pages destroys them.
5945295Srandyf * fsflush does similar thing.
5950Sstevel@tonic-gate */
5960Sstevel@tonic-gate sync();
5970Sstevel@tonic-gate
5980Sstevel@tonic-gate /*
5990Sstevel@tonic-gate * destroy all clean file mapped kernel pages
6000Sstevel@tonic-gate */
6010Sstevel@tonic-gate CPR_STAT_EVENT_START(" clean pages");
6025295Srandyf CPR_DEBUG(CPR_DEBUG1, ("cleaning up mapped pages..."));
6030Sstevel@tonic-gate (void) callb_execute_class(CB_CL_CPR_VM, CB_CODE_CPR_CHKPT);
6045295Srandyf CPR_DEBUG(CPR_DEBUG1, ("done\n"));
6050Sstevel@tonic-gate CPR_STAT_EVENT_END(" clean pages");
6065295Srandyf #endif
6070Sstevel@tonic-gate
6080Sstevel@tonic-gate
6090Sstevel@tonic-gate /*
6100Sstevel@tonic-gate * Hooks needed by lock manager prior to suspending.
6110Sstevel@tonic-gate * Refer to code for more comments.
6120Sstevel@tonic-gate */
6135295Srandyf PMD(PMD_SX, ("cpr_suspend: lock mgr\n"))
6140Sstevel@tonic-gate cpr_lock_mgr(lm_cprsuspend);
6150Sstevel@tonic-gate
6160Sstevel@tonic-gate /*
6170Sstevel@tonic-gate * Now suspend all the devices
6180Sstevel@tonic-gate */
6190Sstevel@tonic-gate CPR_STAT_EVENT_START(" stop drivers");
6203446Smrj CPR_DEBUG(CPR_DEBUG1, "suspending drivers...");
6210Sstevel@tonic-gate cpr_set_substate(C_ST_SUSPEND_DEVICES);
6220Sstevel@tonic-gate pm_powering_down = 1;
6235295Srandyf PMD(PMD_SX, ("cpr_suspend: suspending devices\n"))
6240Sstevel@tonic-gate rc = cpr_suspend_devices(ddi_root_node());
6250Sstevel@tonic-gate pm_powering_down = 0;
6260Sstevel@tonic-gate if (rc)
6270Sstevel@tonic-gate return (rc);
6283446Smrj CPR_DEBUG(CPR_DEBUG1, "done\n");
6290Sstevel@tonic-gate CPR_STAT_EVENT_END(" stop drivers");
6300Sstevel@tonic-gate
6310Sstevel@tonic-gate /*
6320Sstevel@tonic-gate * Stop all daemon activities
6330Sstevel@tonic-gate */
6340Sstevel@tonic-gate cpr_set_substate(C_ST_STOP_KERNEL_THREADS);
6355295Srandyf PMD(PMD_SX, ("cpr_suspend: stopping kernel threads\n"))
6360Sstevel@tonic-gate if (skt_rc = cpr_stop_kernel_threads())
6370Sstevel@tonic-gate return (skt_rc);
6380Sstevel@tonic-gate
6395295Srandyf PMD(PMD_SX, ("cpr_suspend: POST KERNEL callback\n"))
6400Sstevel@tonic-gate (void) callb_execute_class(CB_CL_CPR_POST_KERNEL, CB_CODE_CPR_CHKPT);
6410Sstevel@tonic-gate
6425295Srandyf PMD(PMD_SX, ("cpr_suspend: reattach noinvol fini\n"))
6430Sstevel@tonic-gate pm_reattach_noinvol_fini();
6440Sstevel@tonic-gate
6450Sstevel@tonic-gate cpr_sae(1);
6460Sstevel@tonic-gate
6475295Srandyf PMD(PMD_SX, ("cpr_suspend: CPR CALLOUT callback\n"))
6480Sstevel@tonic-gate (void) callb_execute_class(CB_CL_CPR_CALLOUT, CB_CODE_CPR_CHKPT);
6490Sstevel@tonic-gate
6505295Srandyf if (sleeptype == CPR_TODISK) {
6515295Srandyf /*
6525295Srandyf * It's safer to do tod_get before we disable all intr.
6535295Srandyf */
6545295Srandyf CPR_STAT_EVENT_START(" write statefile");
6555295Srandyf }
6560Sstevel@tonic-gate
6570Sstevel@tonic-gate /*
6580Sstevel@tonic-gate * it's time to ignore the outside world, stop the real time
6590Sstevel@tonic-gate * clock and disable any further intrpt activity.
6600Sstevel@tonic-gate */
6615295Srandyf PMD(PMD_SX, ("cpr_suspend: handle xc\n"))
6620Sstevel@tonic-gate i_cpr_handle_xc(1); /* turn it on to disable xc assertion */
6630Sstevel@tonic-gate
6640Sstevel@tonic-gate mutex_enter(&cpu_lock);
6655295Srandyf PMD(PMD_SX, ("cpr_suspend: cyclic suspend\n"))
6660Sstevel@tonic-gate cyclic_suspend();
6670Sstevel@tonic-gate mutex_exit(&cpu_lock);
6680Sstevel@tonic-gate
6695295Srandyf /*
6705295Srandyf * Due to the different methods of resuming the system between
6715295Srandyf * CPR_TODISK (boot cprboot on SPARC, which reloads kernel image)
6725295Srandyf * and CPR_TORAM (restart via reset into existing kernel image)
6735295Srandyf * cpus are not suspended and restored in the SPARC case, since it
6745295Srandyf * is necessary to restart the cpus and pause them before restoring
6755295Srandyf * the OBP image
6765295Srandyf */
6775295Srandyf
6785295Srandyf #if defined(__x86)
6790Sstevel@tonic-gate
6805295Srandyf /* pause aux cpus */
6815295Srandyf PMD(PMD_SX, ("pause aux cpus\n"))
6825295Srandyf
6835295Srandyf cpr_set_substate(C_ST_MP_PAUSED);
6845295Srandyf
6855295Srandyf if ((rc = cpr_suspend_cpus()) != 0)
6865295Srandyf return (rc);
6875295Srandyf #endif
6885295Srandyf
6895295Srandyf PMD(PMD_SX, ("cpr_suspend: stop intr\n"))
6900Sstevel@tonic-gate i_cpr_stop_intr();
6913446Smrj CPR_DEBUG(CPR_DEBUG1, "interrupt is stopped\n");
6920Sstevel@tonic-gate
6930Sstevel@tonic-gate /*
6940Sstevel@tonic-gate * Since we will now disable the mechanism that causes prom_printfs
6950Sstevel@tonic-gate * to power up (if needed) the console fb/monitor, we assert that
6960Sstevel@tonic-gate * it must be up now.
6970Sstevel@tonic-gate */
6980Sstevel@tonic-gate ASSERT(pm_cfb_is_up());
6995295Srandyf PMD(PMD_SX, ("cpr_suspend: prom suspend prepost\n"))
7000Sstevel@tonic-gate prom_suspend_prepost();
7010Sstevel@tonic-gate
7025295Srandyf #if defined(__sparc)
7030Sstevel@tonic-gate /*
7040Sstevel@tonic-gate * getting ready to write ourself out, flush the register
7050Sstevel@tonic-gate * windows to make sure that our stack is good when we
7060Sstevel@tonic-gate * come back on the resume side.
7070Sstevel@tonic-gate */
7080Sstevel@tonic-gate flush_windows();
7095295Srandyf #endif
7100Sstevel@tonic-gate
7110Sstevel@tonic-gate /*
7125295Srandyf * For S3, we're done
7135295Srandyf */
7145295Srandyf if (sleeptype == CPR_TORAM) {
7155295Srandyf PMD(PMD_SX, ("cpr_suspend rets %x\n", rc))
7165295Srandyf cpr_set_substate(C_ST_NODUMP);
7175295Srandyf return (rc);
7185295Srandyf }
7195295Srandyf #if defined(__sparc)
7205295Srandyf /*
7210Sstevel@tonic-gate * FATAL: NO MORE MEMORY ALLOCATION ALLOWED AFTER THIS POINT!!!
7220Sstevel@tonic-gate *
7230Sstevel@tonic-gate * The system is quiesced at this point, we are ready to either dump
7240Sstevel@tonic-gate * to the state file for a extended sleep or a simple shutdown for
7250Sstevel@tonic-gate * systems with non-volatile memory.
7260Sstevel@tonic-gate */
7270Sstevel@tonic-gate
7280Sstevel@tonic-gate /*
7290Sstevel@tonic-gate * special handling for reusable:
7300Sstevel@tonic-gate */
7310Sstevel@tonic-gate if (cpr_reusable_mode) {
7320Sstevel@tonic-gate cpr_set_substate(C_ST_SETPROPS_1);
7330Sstevel@tonic-gate if (nverr = cpr_set_properties(1))
7340Sstevel@tonic-gate return (nverr);
7350Sstevel@tonic-gate }
7360Sstevel@tonic-gate
7370Sstevel@tonic-gate cpr_set_substate(C_ST_DUMP);
7380Sstevel@tonic-gate rc = cpr_dump(C_VP);
7390Sstevel@tonic-gate
7400Sstevel@tonic-gate /*
7415331Samw * if any error occurred during dump, more
7420Sstevel@tonic-gate * special handling for reusable:
7430Sstevel@tonic-gate */
7440Sstevel@tonic-gate if (rc && cpr_reusable_mode) {
7450Sstevel@tonic-gate cpr_set_substate(C_ST_SETPROPS_0);
7460Sstevel@tonic-gate if (nverr = cpr_set_properties(0))
7470Sstevel@tonic-gate return (nverr);
7480Sstevel@tonic-gate }
7490Sstevel@tonic-gate
7500Sstevel@tonic-gate if (rc == ENOSPC) {
7510Sstevel@tonic-gate cpr_set_substate(C_ST_DUMP_NOSPC);
7525295Srandyf (void) cpr_resume(sleeptype);
7530Sstevel@tonic-gate goto alloc_statefile;
7540Sstevel@tonic-gate } else if (rc == 0) {
7550Sstevel@tonic-gate if (cpr_reusable_mode) {
7560Sstevel@tonic-gate cpr_set_substate(C_ST_REUSABLE);
7570Sstevel@tonic-gate longjmp(&ttolwp(curthread)->lwp_qsav);
7580Sstevel@tonic-gate } else
7590Sstevel@tonic-gate rc = cpr_set_properties(1);
7600Sstevel@tonic-gate }
7615295Srandyf #endif
7625295Srandyf PMD(PMD_SX, ("cpr_suspend: return %d\n", rc))
7630Sstevel@tonic-gate return (rc);
7640Sstevel@tonic-gate }
7650Sstevel@tonic-gate
7665295Srandyf void
cpr_resume_cpus(void)7675295Srandyf cpr_resume_cpus(void)
7685295Srandyf {
7695295Srandyf /*
7705295Srandyf * this is a cut down version of start_other_cpus()
7715295Srandyf * just do the initialization to wake the other cpus
7725295Srandyf */
7735295Srandyf
7745295Srandyf #if defined(__x86)
7755295Srandyf /*
7765295Srandyf * Initialize our syscall handlers
7775295Srandyf */
7785295Srandyf init_cpu_syscall(CPU);
7795295Srandyf
7805295Srandyf #endif
7815295Srandyf
7825295Srandyf i_cpr_pre_resume_cpus();
7835295Srandyf
7845295Srandyf /*
7855295Srandyf * Restart the paused cpus
7865295Srandyf */
7875295Srandyf mutex_enter(&cpu_lock);
7885295Srandyf start_cpus();
7895295Srandyf mutex_exit(&cpu_lock);
7905295Srandyf
7915295Srandyf i_cpr_post_resume_cpus();
7925295Srandyf
7935295Srandyf mutex_enter(&cpu_lock);
7945295Srandyf /*
7955295Srandyf * Restore this cpu to use the regular cpu_pause(), so that
7965295Srandyf * online and offline will work correctly
7975295Srandyf */
7985295Srandyf cpu_pause_func = NULL;
7995295Srandyf
8005295Srandyf /*
8015817Sjan * clear the affinity set in cpr_suspend_cpus()
8025817Sjan */
8035817Sjan affinity_clear();
8045817Sjan
8055817Sjan /*
8065295Srandyf * offline all the cpus that were brought online during suspend
8075295Srandyf */
8085295Srandyf cpr_restore_offline();
8095295Srandyf
8105295Srandyf mutex_exit(&cpu_lock);
8115295Srandyf }
8125295Srandyf
8135295Srandyf void
cpr_unpause_cpus(void)8145295Srandyf cpr_unpause_cpus(void)
8155295Srandyf {
8165295Srandyf /*
8175295Srandyf * Now restore the system back to what it was before we suspended
8185295Srandyf */
8195295Srandyf
8205295Srandyf PMD(PMD_SX, ("cpr_unpause_cpus: restoring system\n"))
8215295Srandyf
8225295Srandyf mutex_enter(&cpu_lock);
8235295Srandyf
8245295Srandyf /*
8255295Srandyf * Restore this cpu to use the regular cpu_pause(), so that
8265295Srandyf * online and offline will work correctly
8275295Srandyf */
8285295Srandyf cpu_pause_func = NULL;
8295295Srandyf
8305295Srandyf /*
8315295Srandyf * Restart the paused cpus
8325295Srandyf */
8335295Srandyf start_cpus();
8345295Srandyf
8355295Srandyf /*
8365817Sjan * clear the affinity set in cpr_suspend_cpus()
8375817Sjan */
8385817Sjan affinity_clear();
8395817Sjan
8405817Sjan /*
8415295Srandyf * offline all the cpus that were brought online during suspend
8425295Srandyf */
8435295Srandyf cpr_restore_offline();
8445295Srandyf
8455295Srandyf mutex_exit(&cpu_lock);
8465295Srandyf }
8470Sstevel@tonic-gate
8480Sstevel@tonic-gate /*
8490Sstevel@tonic-gate * Bring the system back up from a checkpoint, at this point
8500Sstevel@tonic-gate * the VM has been minimally restored by boot, the following
8510Sstevel@tonic-gate * are executed sequentially:
8520Sstevel@tonic-gate *
8530Sstevel@tonic-gate * - machdep setup and enable interrupts (mp startup if it's mp)
8540Sstevel@tonic-gate * - resume all devices
8550Sstevel@tonic-gate * - restart daemons
8560Sstevel@tonic-gate * - put all threads back on run queue
8570Sstevel@tonic-gate */
8580Sstevel@tonic-gate static int
cpr_resume(int sleeptype)8595295Srandyf cpr_resume(int sleeptype)
8600Sstevel@tonic-gate {
8610Sstevel@tonic-gate cpr_time_t pwron_tv, *ctp;
8620Sstevel@tonic-gate char *str;
8630Sstevel@tonic-gate int rc = 0;
8640Sstevel@tonic-gate
8650Sstevel@tonic-gate /*
8660Sstevel@tonic-gate * The following switch is used to resume the system
8670Sstevel@tonic-gate * that was suspended to a different level.
8680Sstevel@tonic-gate */
8693446Smrj CPR_DEBUG(CPR_DEBUG1, "\nEntering cpr_resume...\n");
8705295Srandyf PMD(PMD_SX, ("cpr_resume %x\n", sleeptype))
8710Sstevel@tonic-gate
8720Sstevel@tonic-gate /*
8730Sstevel@tonic-gate * Note:
8740Sstevel@tonic-gate *
8750Sstevel@tonic-gate * The rollback labels rb_xyz do not represent the cpr resume
8760Sstevel@tonic-gate * state when event 'xyz' has happened. Instead they represent
8770Sstevel@tonic-gate * the state during cpr suspend when event 'xyz' was being
8780Sstevel@tonic-gate * entered (and where cpr suspend failed). The actual call that
8790Sstevel@tonic-gate * failed may also need to be partially rolled back, since they
8800Sstevel@tonic-gate * aren't atomic in most cases. In other words, rb_xyz means
8810Sstevel@tonic-gate * "roll back all cpr suspend events that happened before 'xyz',
8820Sstevel@tonic-gate * and the one that caused the failure, if necessary."
8830Sstevel@tonic-gate */
8840Sstevel@tonic-gate switch (CPR->c_substate) {
8855295Srandyf #if defined(__sparc)
8860Sstevel@tonic-gate case C_ST_DUMP:
8870Sstevel@tonic-gate /*
8880Sstevel@tonic-gate * This is most likely a full-fledged cpr_resume after
8890Sstevel@tonic-gate * a complete and successful cpr suspend. Just roll back
8900Sstevel@tonic-gate * everything.
8910Sstevel@tonic-gate */
8925295Srandyf ASSERT(sleeptype == CPR_TODISK);
8930Sstevel@tonic-gate break;
8940Sstevel@tonic-gate
8950Sstevel@tonic-gate case C_ST_REUSABLE:
8960Sstevel@tonic-gate case C_ST_DUMP_NOSPC:
8970Sstevel@tonic-gate case C_ST_SETPROPS_0:
8980Sstevel@tonic-gate case C_ST_SETPROPS_1:
8990Sstevel@tonic-gate /*
9000Sstevel@tonic-gate * C_ST_REUSABLE and C_ST_DUMP_NOSPC are the only two
9010Sstevel@tonic-gate * special switch cases here. The other two do not have
9020Sstevel@tonic-gate * any state change during cpr_suspend() that needs to
9030Sstevel@tonic-gate * be rolled back. But these are exit points from
9040Sstevel@tonic-gate * cpr_suspend, so theoretically (or in the future), it
9050Sstevel@tonic-gate * is possible that a need for roll back of a state
9060Sstevel@tonic-gate * change arises between these exit points.
9070Sstevel@tonic-gate */
9085295Srandyf ASSERT(sleeptype == CPR_TODISK);
9090Sstevel@tonic-gate goto rb_dump;
9105295Srandyf #endif
9115295Srandyf
9125295Srandyf case C_ST_NODUMP:
9135295Srandyf PMD(PMD_SX, ("cpr_resume: NODUMP\n"))
9145295Srandyf goto rb_nodump;
9150Sstevel@tonic-gate
9160Sstevel@tonic-gate case C_ST_STOP_KERNEL_THREADS:
9175295Srandyf PMD(PMD_SX, ("cpr_resume: STOP_KERNEL_THREADS\n"))
9180Sstevel@tonic-gate goto rb_stop_kernel_threads;
9190Sstevel@tonic-gate
9200Sstevel@tonic-gate case C_ST_SUSPEND_DEVICES:
9215295Srandyf PMD(PMD_SX, ("cpr_resume: SUSPEND_DEVICES\n"))
9220Sstevel@tonic-gate goto rb_suspend_devices;
9230Sstevel@tonic-gate
9245295Srandyf #if defined(__sparc)
9250Sstevel@tonic-gate case C_ST_STATEF_ALLOC:
9265295Srandyf ASSERT(sleeptype == CPR_TODISK);
9270Sstevel@tonic-gate goto rb_statef_alloc;
9280Sstevel@tonic-gate
9290Sstevel@tonic-gate case C_ST_DISABLE_UFS_LOGGING:
9305295Srandyf ASSERT(sleeptype == CPR_TODISK);
9310Sstevel@tonic-gate goto rb_disable_ufs_logging;
9325295Srandyf #endif
9330Sstevel@tonic-gate
9340Sstevel@tonic-gate case C_ST_PM_REATTACH_NOINVOL:
9355295Srandyf PMD(PMD_SX, ("cpr_resume: REATTACH_NOINVOL\n"))
9360Sstevel@tonic-gate goto rb_pm_reattach_noinvol;
9370Sstevel@tonic-gate
9380Sstevel@tonic-gate case C_ST_STOP_USER_THREADS:
9395295Srandyf PMD(PMD_SX, ("cpr_resume: STOP_USER_THREADS\n"))
9400Sstevel@tonic-gate goto rb_stop_user_threads;
9410Sstevel@tonic-gate
9425295Srandyf #if defined(__sparc)
9430Sstevel@tonic-gate case C_ST_MP_OFFLINE:
9445295Srandyf PMD(PMD_SX, ("cpr_resume: MP_OFFLINE\n"))
9450Sstevel@tonic-gate goto rb_mp_offline;
9465295Srandyf #endif
9475295Srandyf
9485295Srandyf #if defined(__x86)
9495295Srandyf case C_ST_MP_PAUSED:
9505295Srandyf PMD(PMD_SX, ("cpr_resume: MP_PAUSED\n"))
9515295Srandyf goto rb_mp_paused;
9525295Srandyf #endif
9535295Srandyf
9540Sstevel@tonic-gate
9550Sstevel@tonic-gate default:
9565295Srandyf PMD(PMD_SX, ("cpr_resume: others\n"))
9570Sstevel@tonic-gate goto rb_others;
9580Sstevel@tonic-gate }
9590Sstevel@tonic-gate
9600Sstevel@tonic-gate rb_all:
9610Sstevel@tonic-gate /*
9620Sstevel@tonic-gate * perform platform-dependent initialization
9630Sstevel@tonic-gate */
9640Sstevel@tonic-gate if (cpr_suspend_succeeded)
9650Sstevel@tonic-gate i_cpr_machdep_setup();
9660Sstevel@tonic-gate
9670Sstevel@tonic-gate /*
9680Sstevel@tonic-gate * system did not really go down if we jump here
9690Sstevel@tonic-gate */
9700Sstevel@tonic-gate rb_dump:
9710Sstevel@tonic-gate /*
9720Sstevel@tonic-gate * IMPORTANT: SENSITIVE RESUME SEQUENCE
9730Sstevel@tonic-gate *
9740Sstevel@tonic-gate * DO NOT ADD ANY INITIALIZATION STEP BEFORE THIS POINT!!
9750Sstevel@tonic-gate */
9765295Srandyf rb_nodump:
9775295Srandyf /*
9785295Srandyf * If we did suspend to RAM, we didn't generate a dump
9795295Srandyf */
9805295Srandyf PMD(PMD_SX, ("cpr_resume: CPR DMA callback\n"))
9810Sstevel@tonic-gate (void) callb_execute_class(CB_CL_CPR_DMA, CB_CODE_CPR_RESUME);
9825295Srandyf if (cpr_suspend_succeeded) {
9835295Srandyf PMD(PMD_SX, ("cpr_resume: CPR RPC callback\n"))
9840Sstevel@tonic-gate (void) callb_execute_class(CB_CL_CPR_RPC, CB_CODE_CPR_RESUME);
9855295Srandyf }
9860Sstevel@tonic-gate
9870Sstevel@tonic-gate prom_resume_prepost();
9885295Srandyf #if !defined(__sparc)
9895295Srandyf /*
9905295Srandyf * Need to sync the software clock with the hardware clock.
9915295Srandyf * On Sparc, this occurs in the sparc-specific cbe. However
9925295Srandyf * on x86 this needs to be handled _before_ we bring other cpu's
9935295Srandyf * back online. So we call a resume function in timestamp.c
9945295Srandyf */
9955295Srandyf if (tsc_resume_in_cyclic == 0)
9965295Srandyf tsc_resume();
9970Sstevel@tonic-gate
9985295Srandyf #endif
9995295Srandyf
10005295Srandyf #if defined(__sparc)
10010Sstevel@tonic-gate if (cpr_suspend_succeeded && (boothowto & RB_DEBUG))
10020Sstevel@tonic-gate kdi_dvec_cpr_restart();
10035295Srandyf #endif
10045295Srandyf
10055295Srandyf
10065295Srandyf #if defined(__x86)
10075295Srandyf rb_mp_paused:
10085295Srandyf PT(PT_RMPO);
10095295Srandyf PMD(PMD_SX, ("resume aux cpus\n"))
10105295Srandyf
10115295Srandyf if (cpr_suspend_succeeded) {
10125295Srandyf cpr_resume_cpus();
10135295Srandyf } else {
10145295Srandyf cpr_unpause_cpus();
10155295Srandyf }
10165295Srandyf #endif
10170Sstevel@tonic-gate
10180Sstevel@tonic-gate /*
10190Sstevel@tonic-gate * let the tmp callout catch up.
10200Sstevel@tonic-gate */
10215295Srandyf PMD(PMD_SX, ("cpr_resume: CPR CALLOUT callback\n"))
10220Sstevel@tonic-gate (void) callb_execute_class(CB_CL_CPR_CALLOUT, CB_CODE_CPR_RESUME);
10230Sstevel@tonic-gate
10240Sstevel@tonic-gate i_cpr_enable_intr();
10250Sstevel@tonic-gate
10260Sstevel@tonic-gate mutex_enter(&cpu_lock);
10275295Srandyf PMD(PMD_SX, ("cpr_resume: cyclic resume\n"))
10280Sstevel@tonic-gate cyclic_resume();
10290Sstevel@tonic-gate mutex_exit(&cpu_lock);
10300Sstevel@tonic-gate
10315295Srandyf PMD(PMD_SX, ("cpr_resume: handle xc\n"))
10320Sstevel@tonic-gate i_cpr_handle_xc(0); /* turn it off to allow xc assertion */
10330Sstevel@tonic-gate
10345295Srandyf PMD(PMD_SX, ("cpr_resume: CPR POST KERNEL callback\n"))
10350Sstevel@tonic-gate (void) callb_execute_class(CB_CL_CPR_POST_KERNEL, CB_CODE_CPR_RESUME);
10360Sstevel@tonic-gate
10370Sstevel@tonic-gate /*
10380Sstevel@tonic-gate * statistics gathering
10390Sstevel@tonic-gate */
10400Sstevel@tonic-gate if (cpr_suspend_succeeded) {
10410Sstevel@tonic-gate /*
10420Sstevel@tonic-gate * Prevent false alarm in tod_validate() due to tod
10430Sstevel@tonic-gate * value change between suspend and resume
10440Sstevel@tonic-gate */
1045*11752STrevor.Thompson@Sun.COM cpr_tod_status_set(TOD_CPR_RESUME_DONE);
10460Sstevel@tonic-gate
10470Sstevel@tonic-gate cpr_convert_promtime(&pwron_tv);
10480Sstevel@tonic-gate
10490Sstevel@tonic-gate ctp = &cpr_term.tm_shutdown;
10505295Srandyf if (sleeptype == CPR_TODISK)
10515295Srandyf CPR_STAT_EVENT_END_TMZ(" write statefile", ctp);
10520Sstevel@tonic-gate CPR_STAT_EVENT_END_TMZ("Suspend Total", ctp);
10530Sstevel@tonic-gate
10540Sstevel@tonic-gate CPR_STAT_EVENT_START_TMZ("Resume Total", &pwron_tv);
10550Sstevel@tonic-gate
10560Sstevel@tonic-gate str = " prom time";
10570Sstevel@tonic-gate CPR_STAT_EVENT_START_TMZ(str, &pwron_tv);
10580Sstevel@tonic-gate ctp = &cpr_term.tm_cprboot_start;
10590Sstevel@tonic-gate CPR_STAT_EVENT_END_TMZ(str, ctp);
10600Sstevel@tonic-gate
10610Sstevel@tonic-gate str = " read statefile";
10620Sstevel@tonic-gate CPR_STAT_EVENT_START_TMZ(str, ctp);
10630Sstevel@tonic-gate ctp = &cpr_term.tm_cprboot_end;
10640Sstevel@tonic-gate CPR_STAT_EVENT_END_TMZ(str, ctp);
10650Sstevel@tonic-gate }
10660Sstevel@tonic-gate
10670Sstevel@tonic-gate rb_stop_kernel_threads:
10680Sstevel@tonic-gate /*
10690Sstevel@tonic-gate * Put all threads back to where they belong; get the kernel
10700Sstevel@tonic-gate * daemons straightened up too. Note that the callback table
10710Sstevel@tonic-gate * locked during cpr_stop_kernel_threads() is released only
10720Sstevel@tonic-gate * in cpr_start_kernel_threads(). Ensure modunloading is
10730Sstevel@tonic-gate * disabled before starting kernel threads, we don't want
10740Sstevel@tonic-gate * modunload thread to start changing device tree underneath.
10750Sstevel@tonic-gate */
10765295Srandyf PMD(PMD_SX, ("cpr_resume: modunload disable\n"))
10770Sstevel@tonic-gate modunload_disable();
10785295Srandyf PMD(PMD_SX, ("cpr_resume: start kernel threads\n"))
10790Sstevel@tonic-gate cpr_start_kernel_threads();
10800Sstevel@tonic-gate
10810Sstevel@tonic-gate rb_suspend_devices:
10823446Smrj CPR_DEBUG(CPR_DEBUG1, "resuming devices...");
10830Sstevel@tonic-gate CPR_STAT_EVENT_START(" start drivers");
10840Sstevel@tonic-gate
10855295Srandyf PMD(PMD_SX,
10865295Srandyf ("cpr_resume: rb_suspend_devices: cpr_resume_uniproc = %d\n",
10875295Srandyf cpr_resume_uniproc))
10885295Srandyf
10895295Srandyf #if defined(__x86)
10905295Srandyf /*
10915295Srandyf * If cpr_resume_uniproc is set, then pause all the other cpus
10925295Srandyf * apart from the current cpu, so that broken drivers that think
10935295Srandyf * that they are on a uniprocessor machine will resume
10945295Srandyf */
10955295Srandyf if (cpr_resume_uniproc) {
10965295Srandyf mutex_enter(&cpu_lock);
10975295Srandyf pause_cpus(NULL);
10985295Srandyf mutex_exit(&cpu_lock);
10995295Srandyf }
11005295Srandyf #endif
11015295Srandyf
11020Sstevel@tonic-gate /*
11030Sstevel@tonic-gate * The policy here is to continue resume everything we can if we did
11040Sstevel@tonic-gate * not successfully finish suspend; and panic if we are coming back
11050Sstevel@tonic-gate * from a fully suspended system.
11060Sstevel@tonic-gate */
11075295Srandyf PMD(PMD_SX, ("cpr_resume: resume devices\n"))
11080Sstevel@tonic-gate rc = cpr_resume_devices(ddi_root_node(), 0);
11090Sstevel@tonic-gate
11100Sstevel@tonic-gate cpr_sae(0);
11110Sstevel@tonic-gate
11120Sstevel@tonic-gate str = "Failed to resume one or more devices.";
11135295Srandyf
11145295Srandyf if (rc) {
11155295Srandyf if (CPR->c_substate == C_ST_DUMP ||
11165295Srandyf (sleeptype == CPR_TORAM &&
11175295Srandyf CPR->c_substate == C_ST_NODUMP)) {
11185295Srandyf if (cpr_test_point == FORCE_SUSPEND_TO_RAM) {
11195295Srandyf PMD(PMD_SX, ("cpr_resume: resume device "
11205295Srandyf "warn\n"))
11215295Srandyf cpr_err(CE_WARN, str);
11225295Srandyf } else {
11235295Srandyf PMD(PMD_SX, ("cpr_resume: resume device "
11245295Srandyf "panic\n"))
11255295Srandyf cpr_err(CE_PANIC, str);
11265295Srandyf }
11275295Srandyf } else {
11285295Srandyf PMD(PMD_SX, ("cpr_resume: resume device warn\n"))
11295295Srandyf cpr_err(CE_WARN, str);
11305295Srandyf }
11315295Srandyf }
11325295Srandyf
11330Sstevel@tonic-gate CPR_STAT_EVENT_END(" start drivers");
11343446Smrj CPR_DEBUG(CPR_DEBUG1, "done\n");
11350Sstevel@tonic-gate
11365295Srandyf #if defined(__x86)
11375295Srandyf /*
11385295Srandyf * If cpr_resume_uniproc is set, then unpause all the processors
11395295Srandyf * that were paused before resuming the drivers
11405295Srandyf */
11415295Srandyf if (cpr_resume_uniproc) {
11425295Srandyf mutex_enter(&cpu_lock);
11435295Srandyf start_cpus();
11445295Srandyf mutex_exit(&cpu_lock);
11455295Srandyf }
11465295Srandyf #endif
11475295Srandyf
11480Sstevel@tonic-gate /*
11490Sstevel@tonic-gate * If we had disabled modunloading in this cpr resume cycle (i.e. we
11500Sstevel@tonic-gate * resumed from a state earlier than C_ST_SUSPEND_DEVICES), re-enable
11510Sstevel@tonic-gate * modunloading now.
11520Sstevel@tonic-gate */
11535295Srandyf if (CPR->c_substate != C_ST_SUSPEND_DEVICES) {
11545295Srandyf PMD(PMD_SX, ("cpr_resume: modload enable\n"))
11550Sstevel@tonic-gate modunload_enable();
11565295Srandyf }
11570Sstevel@tonic-gate
11580Sstevel@tonic-gate /*
11590Sstevel@tonic-gate * Hooks needed by lock manager prior to resuming.
11600Sstevel@tonic-gate * Refer to code for more comments.
11610Sstevel@tonic-gate */
11625295Srandyf PMD(PMD_SX, ("cpr_resume: lock mgr\n"))
11630Sstevel@tonic-gate cpr_lock_mgr(lm_cprresume);
11640Sstevel@tonic-gate
11655295Srandyf #if defined(__sparc)
11660Sstevel@tonic-gate /*
11670Sstevel@tonic-gate * This is a partial (half) resume during cpr suspend, we
11680Sstevel@tonic-gate * haven't yet given up on the suspend. On return from here,
11690Sstevel@tonic-gate * cpr_suspend() will try to reallocate and retry the suspend.
11700Sstevel@tonic-gate */
11710Sstevel@tonic-gate if (CPR->c_substate == C_ST_DUMP_NOSPC) {
11720Sstevel@tonic-gate return (0);
11730Sstevel@tonic-gate }
11740Sstevel@tonic-gate
11755295Srandyf if (sleeptype == CPR_TODISK) {
11760Sstevel@tonic-gate rb_statef_alloc:
11775295Srandyf cpr_statef_close();
11780Sstevel@tonic-gate
11790Sstevel@tonic-gate rb_disable_ufs_logging:
11805295Srandyf /*
11815295Srandyf * if ufs logging was disabled, re-enable
11825295Srandyf */
11835295Srandyf (void) cpr_ufs_logging(1);
11845295Srandyf }
11855295Srandyf #endif
11860Sstevel@tonic-gate
11870Sstevel@tonic-gate rb_pm_reattach_noinvol:
11880Sstevel@tonic-gate /*
11890Sstevel@tonic-gate * When pm_reattach_noinvol() succeeds, modunload_thread will
11900Sstevel@tonic-gate * remain disabled until after cpr suspend passes the
11910Sstevel@tonic-gate * C_ST_STOP_KERNEL_THREADS state. If any failure happens before
11920Sstevel@tonic-gate * cpr suspend reaches this state, we'll need to enable modunload
11930Sstevel@tonic-gate * thread during rollback.
11940Sstevel@tonic-gate */
11950Sstevel@tonic-gate if (CPR->c_substate == C_ST_DISABLE_UFS_LOGGING ||
11960Sstevel@tonic-gate CPR->c_substate == C_ST_STATEF_ALLOC ||
11970Sstevel@tonic-gate CPR->c_substate == C_ST_SUSPEND_DEVICES ||
11980Sstevel@tonic-gate CPR->c_substate == C_ST_STOP_KERNEL_THREADS) {
11995295Srandyf PMD(PMD_SX, ("cpr_resume: reattach noinvol fini\n"))
12000Sstevel@tonic-gate pm_reattach_noinvol_fini();
12010Sstevel@tonic-gate }
12020Sstevel@tonic-gate
12035295Srandyf PMD(PMD_SX, ("cpr_resume: CPR POST USER callback\n"))
12040Sstevel@tonic-gate (void) callb_execute_class(CB_CL_CPR_POST_USER, CB_CODE_CPR_RESUME);
12055295Srandyf PMD(PMD_SX, ("cpr_resume: CPR PROMPRINTF callback\n"))
12060Sstevel@tonic-gate (void) callb_execute_class(CB_CL_CPR_PROMPRINTF, CB_CODE_CPR_RESUME);
12070Sstevel@tonic-gate
12085295Srandyf PMD(PMD_SX, ("cpr_resume: restore direct levels\n"))
12090Sstevel@tonic-gate pm_restore_direct_levels();
12100Sstevel@tonic-gate
12110Sstevel@tonic-gate rb_stop_user_threads:
12123446Smrj CPR_DEBUG(CPR_DEBUG1, "starting user threads...");
12135295Srandyf PMD(PMD_SX, ("cpr_resume: starting user threads\n"))
12140Sstevel@tonic-gate cpr_start_user_threads();
12153446Smrj CPR_DEBUG(CPR_DEBUG1, "done\n");
12165295Srandyf /*
12175295Srandyf * Ask Xorg to resume the frame buffer, and wait for it to happen
12185295Srandyf */
12195295Srandyf mutex_enter(&srn_clone_lock);
12205295Srandyf if (srn_signal) {
12215295Srandyf PMD(PMD_SX, ("cpr_suspend: (*srn_signal)(..., "
12225295Srandyf "SRN_NORMAL_RESUME)\n"))
12235295Srandyf srn_inuse = 1; /* because (*srn_signal) cv_waits */
12245295Srandyf (*srn_signal)(SRN_TYPE_APM, SRN_NORMAL_RESUME);
12255295Srandyf srn_inuse = 0;
12265295Srandyf } else {
12275295Srandyf PMD(PMD_SX, ("cpr_suspend: srn_signal NULL\n"))
12285295Srandyf }
12295295Srandyf mutex_exit(&srn_clone_lock);
12300Sstevel@tonic-gate
12315295Srandyf #if defined(__sparc)
12320Sstevel@tonic-gate rb_mp_offline:
12330Sstevel@tonic-gate if (cpr_mp_online())
12340Sstevel@tonic-gate cpr_err(CE_WARN, "Failed to online all the processors.");
12355295Srandyf #endif
12360Sstevel@tonic-gate
12370Sstevel@tonic-gate rb_others:
12385295Srandyf PMD(PMD_SX, ("cpr_resume: dep thread\n"))
12395295Srandyf pm_dispatch_to_dep_thread(PM_DEP_WK_CPR_RESUME, NULL, NULL,
12405295Srandyf PM_DEP_WAIT, NULL, 0);
12410Sstevel@tonic-gate
12425295Srandyf PMD(PMD_SX, ("cpr_resume: CPR PM callback\n"))
12430Sstevel@tonic-gate (void) callb_execute_class(CB_CL_CPR_PM, CB_CODE_CPR_RESUME);
12440Sstevel@tonic-gate
12450Sstevel@tonic-gate if (cpr_suspend_succeeded) {
12460Sstevel@tonic-gate cpr_stat_record_events();
12470Sstevel@tonic-gate }
12480Sstevel@tonic-gate
12495295Srandyf #if defined(__sparc)
12505295Srandyf if (sleeptype == CPR_TODISK && !cpr_reusable_mode)
12510Sstevel@tonic-gate cpr_clear_definfo();
12525295Srandyf #endif
12530Sstevel@tonic-gate
12545295Srandyf i_cpr_free_cpus();
12553446Smrj CPR_DEBUG(CPR_DEBUG1, "Sending SIGTHAW...");
12565295Srandyf PMD(PMD_SX, ("cpr_resume: SIGTHAW\n"))
12570Sstevel@tonic-gate cpr_signal_user(SIGTHAW);
12583446Smrj CPR_DEBUG(CPR_DEBUG1, "done\n");
12590Sstevel@tonic-gate
12600Sstevel@tonic-gate CPR_STAT_EVENT_END("Resume Total");
12610Sstevel@tonic-gate
12620Sstevel@tonic-gate CPR_STAT_EVENT_START_TMZ("WHOLE CYCLE", &wholecycle_tv);
12630Sstevel@tonic-gate CPR_STAT_EVENT_END("WHOLE CYCLE");
12640Sstevel@tonic-gate
12653446Smrj if (cpr_debug & CPR_DEBUG1)
12663446Smrj cmn_err(CE_CONT, "\nThe system is back where you left!\n");
12670Sstevel@tonic-gate
12680Sstevel@tonic-gate CPR_STAT_EVENT_START("POST CPR DELAY");
12690Sstevel@tonic-gate
12700Sstevel@tonic-gate #ifdef CPR_STAT
12710Sstevel@tonic-gate ctp = &cpr_term.tm_shutdown;
12720Sstevel@tonic-gate CPR_STAT_EVENT_START_TMZ("PWROFF TIME", ctp);
12730Sstevel@tonic-gate CPR_STAT_EVENT_END_TMZ("PWROFF TIME", &pwron_tv);
12740Sstevel@tonic-gate
12750Sstevel@tonic-gate CPR_STAT_EVENT_PRINT();
12760Sstevel@tonic-gate #endif /* CPR_STAT */
12770Sstevel@tonic-gate
12785295Srandyf PMD(PMD_SX, ("cpr_resume returns %x\n", rc))
12790Sstevel@tonic-gate return (rc);
12800Sstevel@tonic-gate }
12810Sstevel@tonic-gate
12820Sstevel@tonic-gate static void
cpr_suspend_init(int sleeptype)12835295Srandyf cpr_suspend_init(int sleeptype)
12840Sstevel@tonic-gate {
12850Sstevel@tonic-gate cpr_time_t *ctp;
12860Sstevel@tonic-gate
12870Sstevel@tonic-gate cpr_stat_init();
12880Sstevel@tonic-gate
12890Sstevel@tonic-gate /*
12900Sstevel@tonic-gate * If cpr_suspend() failed before cpr_dump() gets a chance
12910Sstevel@tonic-gate * to reinitialize the terminator of the statefile,
12920Sstevel@tonic-gate * the values of the old terminator will still linger around.
12930Sstevel@tonic-gate * Since the terminator contains information that we need to
12940Sstevel@tonic-gate * decide whether suspend succeeded or not, we need to
12950Sstevel@tonic-gate * reinitialize it as early as possible.
12960Sstevel@tonic-gate */
12970Sstevel@tonic-gate cpr_term.real_statef_size = 0;
12980Sstevel@tonic-gate ctp = &cpr_term.tm_shutdown;
12990Sstevel@tonic-gate bzero(ctp, sizeof (*ctp));
13000Sstevel@tonic-gate ctp = &cpr_term.tm_cprboot_start;
13010Sstevel@tonic-gate bzero(ctp, sizeof (*ctp));
13020Sstevel@tonic-gate ctp = &cpr_term.tm_cprboot_end;
13030Sstevel@tonic-gate bzero(ctp, sizeof (*ctp));
13040Sstevel@tonic-gate
13055295Srandyf if (sleeptype == CPR_TODISK) {
13065295Srandyf /*
13075295Srandyf * Lookup the physical address of our thread structure.
13085295Srandyf * This should never be invalid and the entire thread structure
13095295Srandyf * is expected to reside within the same pfn.
13105295Srandyf */
13115295Srandyf curthreadpfn = hat_getpfnum(kas.a_hat, (caddr_t)curthread);
13125295Srandyf ASSERT(curthreadpfn != PFN_INVALID);
13135295Srandyf ASSERT(curthreadpfn == hat_getpfnum(kas.a_hat,
13145295Srandyf (caddr_t)curthread + sizeof (kthread_t) - 1));
13155295Srandyf }
13160Sstevel@tonic-gate
13170Sstevel@tonic-gate cpr_suspend_succeeded = 0;
13180Sstevel@tonic-gate }
13195295Srandyf
13205295Srandyf /*
13215295Srandyf * bring all the offline cpus online
13225295Srandyf */
13235295Srandyf static int
cpr_all_online(void)13245295Srandyf cpr_all_online(void)
13255295Srandyf {
13265295Srandyf int rc = 0;
13275295Srandyf
13285295Srandyf #ifdef __sparc
13295295Srandyf /*
13305295Srandyf * do nothing
13315295Srandyf */
13325295Srandyf #else
13335295Srandyf
13345295Srandyf cpu_t *cp;
13355295Srandyf
13365295Srandyf ASSERT(MUTEX_HELD(&cpu_lock));
13375295Srandyf
13385295Srandyf cp = cpu_list;
13395295Srandyf do {
13405295Srandyf cp->cpu_cpr_flags &= ~CPU_CPR_ONLINE;
13415295Srandyf if (!CPU_ACTIVE(cp)) {
13425295Srandyf if ((rc = cpu_online(cp)) != 0)
13435295Srandyf break;
13445295Srandyf CPU_SET_CPR_FLAGS(cp, CPU_CPR_ONLINE);
13455295Srandyf }
13465295Srandyf } while ((cp = cp->cpu_next) != cpu_list);
13475295Srandyf
13485295Srandyf if (rc) {
13495295Srandyf /*
13505295Srandyf * an online operation failed so offline the cpus
13515295Srandyf * that were onlined above to restore the system
13525295Srandyf * to its original state
13535295Srandyf */
13545295Srandyf cpr_restore_offline();
13555295Srandyf }
13565295Srandyf #endif
13575295Srandyf return (rc);
13585295Srandyf }
13595295Srandyf
13605295Srandyf /*
13615295Srandyf * offline all the cpus that were brought online by cpr_all_online()
13625295Srandyf */
13635295Srandyf static void
cpr_restore_offline(void)13645295Srandyf cpr_restore_offline(void)
13655295Srandyf {
13665295Srandyf
13675295Srandyf #ifdef __sparc
13685295Srandyf /*
13695295Srandyf * do nothing
13705295Srandyf */
13715295Srandyf #else
13725295Srandyf
13735295Srandyf cpu_t *cp;
13745295Srandyf int rc = 0;
13755295Srandyf
13765295Srandyf ASSERT(MUTEX_HELD(&cpu_lock));
13775295Srandyf
13785295Srandyf cp = cpu_list;
13795295Srandyf do {
13805295Srandyf if (CPU_CPR_IS_ONLINE(cp)) {
13815295Srandyf rc = cpu_offline(cp, 0);
13825295Srandyf /*
13835295Srandyf * this offline should work, since the cpu was
13845295Srandyf * offline originally and was successfully onlined
13855295Srandyf * by cpr_all_online()
13865295Srandyf */
13875295Srandyf ASSERT(rc == 0);
13885295Srandyf cp->cpu_cpr_flags &= ~CPU_CPR_ONLINE;
13895295Srandyf }
13905295Srandyf } while ((cp = cp->cpu_next) != cpu_list);
13915295Srandyf
13925295Srandyf #endif
13935295Srandyf
13945295Srandyf }
1395