15295Srandyf /* 25295Srandyf * CDDL HEADER START 35295Srandyf * 45295Srandyf * The contents of this file are subject to the terms of the 55295Srandyf * Common Development and Distribution License (the "License"). 65295Srandyf * You may not use this file except in compliance with the License. 75295Srandyf * 85295Srandyf * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 95295Srandyf * or http://www.opensolaris.org/os/licensing. 105295Srandyf * See the License for the specific language governing permissions 115295Srandyf * and limitations under the License. 125295Srandyf * 135295Srandyf * When distributing Covered Code, include this CDDL HEADER in each 145295Srandyf * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 155295Srandyf * If applicable, add the following below this CDDL HEADER, with the 165295Srandyf * fields enclosed by brackets "[]" replaced with your own identifying 175295Srandyf * information: Portions Copyright [yyyy] [name of copyright owner] 185295Srandyf * 195295Srandyf * CDDL HEADER END 205295Srandyf */ 215295Srandyf /* 225817Sjan * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 235295Srandyf * Use is subject to license terms. 245295Srandyf */ 255295Srandyf 265295Srandyf #pragma ident "%Z%%M% %I% %E% SMI" 275295Srandyf 285295Srandyf /* 295295Srandyf * Platform specific implementation code 305295Srandyf * Currently only suspend to RAM is supported (ACPI S3) 315295Srandyf */ 325295Srandyf 335295Srandyf #define SUNDDI_IMPL 345295Srandyf 355295Srandyf #include <sys/types.h> 365295Srandyf #include <sys/promif.h> 375295Srandyf #include <sys/prom_isa.h> 385295Srandyf #include <sys/prom_plat.h> 395295Srandyf #include <sys/cpuvar.h> 405295Srandyf #include <sys/pte.h> 415295Srandyf #include <vm/hat.h> 425295Srandyf #include <vm/page.h> 435295Srandyf #include <vm/as.h> 445295Srandyf #include <sys/cpr.h> 455295Srandyf #include <sys/kmem.h> 465295Srandyf #include <sys/clock.h> 475295Srandyf #include <sys/kmem.h> 485295Srandyf #include <sys/panic.h> 495295Srandyf #include <vm/seg_kmem.h> 505295Srandyf #include <sys/cpu_module.h> 515295Srandyf #include <sys/callb.h> 525295Srandyf #include <sys/machsystm.h> 535295Srandyf #include <sys/vmsystm.h> 545295Srandyf #include <sys/systm.h> 555295Srandyf #include <sys/archsystm.h> 565295Srandyf #include <sys/stack.h> 575295Srandyf #include <sys/fs/ufs_fs.h> 585295Srandyf #include <sys/memlist.h> 595295Srandyf #include <sys/bootconf.h> 605295Srandyf #include <sys/thread.h> 615295Srandyf #include <sys/x_call.h> 625295Srandyf #include <sys/smp_impldefs.h> 635295Srandyf #include <vm/vm_dep.h> 645295Srandyf #include <sys/psm.h> 655295Srandyf #include <sys/epm.h> 665295Srandyf #include <sys/cpr_wakecode.h> 675295Srandyf #include <sys/x86_archext.h> 685295Srandyf #include <sys/reboot.h> 695295Srandyf #include <sys/acpi/acpi.h> 705295Srandyf #include <sys/acpica.h> 715295Srandyf 725295Srandyf #define AFMT "%lx" 735295Srandyf 745295Srandyf extern int flushes_require_xcalls; 755295Srandyf extern cpuset_t cpu_ready_set; 765295Srandyf 775295Srandyf #if defined(__amd64) 785295Srandyf extern void *wc_long_mode_64(void); 795295Srandyf #endif /* __amd64 */ 805295Srandyf extern int tsc_gethrtime_enable; 815295Srandyf extern void i_cpr_start_cpu(void); 825295Srandyf 835295Srandyf ushort_t cpr_mach_type = CPR_MACHTYPE_X86; 845295Srandyf void (*cpr_start_cpu_func)(void) = i_cpr_start_cpu; 855295Srandyf 865295Srandyf static wc_cpu_t *wc_other_cpus = NULL; 876336Sbholler static cpuset_t procset; 885295Srandyf 895295Srandyf static void 905295Srandyf init_real_mode_platter(int cpun, uint32_t offset, uint_t cr4, wc_desctbr_t gdt); 915295Srandyf 925295Srandyf static int i_cpr_platform_alloc(psm_state_request_t *req); 935295Srandyf static void i_cpr_platform_free(psm_state_request_t *req); 945295Srandyf static int i_cpr_save_apic(psm_state_request_t *req); 955295Srandyf static int i_cpr_restore_apic(psm_state_request_t *req); 965817Sjan static int wait_for_set(cpuset_t *set, int who); 975295Srandyf 985295Srandyf #if defined(__amd64) 995295Srandyf static void restore_stack(wc_cpu_t *cpup); 1005295Srandyf static void save_stack(wc_cpu_t *cpup); 1015295Srandyf void (*save_stack_func)(wc_cpu_t *) = save_stack; 1025295Srandyf #endif /* __amd64 */ 1035295Srandyf 1045295Srandyf /* 1055295Srandyf * restart paused slave cpus 1065295Srandyf */ 1075295Srandyf void 1085295Srandyf i_cpr_machdep_setup(void) 1095295Srandyf { 1105295Srandyf if (ncpus > 1) { 1115295Srandyf CPR_DEBUG(CPR_DEBUG1, ("MP restarted...\n")); 1125295Srandyf mutex_enter(&cpu_lock); 1135295Srandyf start_cpus(); 1145295Srandyf mutex_exit(&cpu_lock); 1155295Srandyf } 1165295Srandyf } 1175295Srandyf 1185295Srandyf 1195295Srandyf /* 1205295Srandyf * Stop all interrupt activities in the system 1215295Srandyf */ 1225295Srandyf void 1235295Srandyf i_cpr_stop_intr(void) 1245295Srandyf { 1255295Srandyf (void) spl7(); 1265295Srandyf } 1275295Srandyf 1285295Srandyf /* 1295295Srandyf * Set machine up to take interrupts 1305295Srandyf */ 1315295Srandyf void 1325295Srandyf i_cpr_enable_intr(void) 1335295Srandyf { 1345295Srandyf (void) spl0(); 1355295Srandyf } 1365295Srandyf 1375295Srandyf /* 1385295Srandyf * Save miscellaneous information which needs to be written to the 1395295Srandyf * state file. This information is required to re-initialize 1405295Srandyf * kernel/prom handshaking. 1415295Srandyf */ 1425295Srandyf void 1435295Srandyf i_cpr_save_machdep_info(void) 1445295Srandyf { 1455295Srandyf int notcalled = 0; 1465295Srandyf ASSERT(notcalled); 1475295Srandyf } 1485295Srandyf 1495295Srandyf 1505295Srandyf void 1515295Srandyf i_cpr_set_tbr(void) 1525295Srandyf { 1535295Srandyf } 1545295Srandyf 1555295Srandyf 1565295Srandyf processorid_t 1575295Srandyf i_cpr_bootcpuid(void) 1585295Srandyf { 1595295Srandyf return (0); 1605295Srandyf } 1615295Srandyf 1625295Srandyf /* 1635295Srandyf * cpu0 should contain bootcpu info 1645295Srandyf */ 1655295Srandyf cpu_t * 1665295Srandyf i_cpr_bootcpu(void) 1675295Srandyf { 1685295Srandyf ASSERT(MUTEX_HELD(&cpu_lock)); 1695295Srandyf 1705295Srandyf return (cpu_get(i_cpr_bootcpuid())); 1715295Srandyf } 1725295Srandyf 1735295Srandyf /* 1745295Srandyf * Save context for the specified CPU 1755295Srandyf */ 1765295Srandyf void * 1775295Srandyf i_cpr_save_context(void *arg) 1785295Srandyf { 1795295Srandyf long index = (long)arg; 1805295Srandyf psm_state_request_t *papic_state; 1815295Srandyf int resuming; 1825295Srandyf int ret; 1835295Srandyf 1845295Srandyf PMD(PMD_SX, ("i_cpr_save_context() index = %ld\n", index)) 1855295Srandyf 1865295Srandyf ASSERT(index < NCPU); 1875295Srandyf 1885295Srandyf papic_state = &(wc_other_cpus + index)->wc_apic_state; 1895295Srandyf 1905295Srandyf ret = i_cpr_platform_alloc(papic_state); 1915295Srandyf ASSERT(ret == 0); 1925295Srandyf 1935295Srandyf ret = i_cpr_save_apic(papic_state); 1945295Srandyf ASSERT(ret == 0); 1955295Srandyf 1965295Srandyf /* 1975295Srandyf * wc_save_context returns twice, once when susending and 1985295Srandyf * once when resuming, wc_save_context() returns 0 when 1995295Srandyf * suspending and non-zero upon resume 2005295Srandyf */ 2015295Srandyf resuming = (wc_save_context(wc_other_cpus + index) == 0); 2025295Srandyf 2035295Srandyf PMD(PMD_SX, ("i_cpr_save_context: wc_save_context returns %d\n", 2045295Srandyf resuming)) 2055295Srandyf 2065295Srandyf /* 2075295Srandyf * do NOT call any functions after this point, because doing so 2085295Srandyf * will modify the stack that we are running on 2095295Srandyf */ 2105295Srandyf 2115295Srandyf if (resuming) { 2125295Srandyf 2135295Srandyf ret = i_cpr_restore_apic(papic_state); 2145295Srandyf ASSERT(ret == 0); 2155295Srandyf 2165295Srandyf i_cpr_platform_free(papic_state); 2175295Srandyf 2185295Srandyf /* 219*7113Sbholler * Enable interrupts on this cpu. 220*7113Sbholler * Do not bind interrupts to this CPU's local APIC until 221*7113Sbholler * the CPU is ready to recieve interrupts. 222*7113Sbholler */ 223*7113Sbholler ASSERT(CPU->cpu_id != i_cpr_bootcpuid()); 224*7113Sbholler mutex_enter(&cpu_lock); 225*7113Sbholler cpu_enable_intr(CPU); 226*7113Sbholler mutex_exit(&cpu_lock); 227*7113Sbholler 228*7113Sbholler /* 2295295Srandyf * Setting the bit in cpu_ready_set must be the last operation 2305295Srandyf * in processor initialization; the boot CPU will continue to 2315295Srandyf * boot once it sees this bit set for all active CPUs. 2325295Srandyf */ 2335295Srandyf CPUSET_ATOMIC_ADD(cpu_ready_set, CPU->cpu_id); 2345295Srandyf 2355295Srandyf PMD(PMD_SX, 2366336Sbholler ("i_cpr_save_context() resuming cpu %d in cpu_ready_set\n", 2376336Sbholler CPU->cpu_id)) 2385295Srandyf } 2395295Srandyf return (NULL); 2405295Srandyf } 2415295Srandyf 2425295Srandyf static ushort_t *warm_reset_vector = NULL; 2435295Srandyf 2445295Srandyf static ushort_t * 2455295Srandyf map_warm_reset_vector() 2465295Srandyf { 2475295Srandyf /*LINTED*/ 2485295Srandyf if (!(warm_reset_vector = (ushort_t *)psm_map_phys(WARM_RESET_VECTOR, 2495295Srandyf sizeof (ushort_t *), PROT_READ|PROT_WRITE))) 2505295Srandyf return (NULL); 2515295Srandyf 2525295Srandyf /* 2535295Srandyf * setup secondary cpu bios boot up vector 2545295Srandyf */ 2555295Srandyf *warm_reset_vector = (ushort_t)((caddr_t) 2565295Srandyf /*LINTED*/ 2575295Srandyf ((struct rm_platter *)rm_platter_va)->rm_code - rm_platter_va 2585295Srandyf + ((ulong_t)rm_platter_va & 0xf)); 2595295Srandyf warm_reset_vector++; 2605295Srandyf *warm_reset_vector = (ushort_t)(rm_platter_pa >> 4); 2615295Srandyf 2625295Srandyf --warm_reset_vector; 2635295Srandyf return (warm_reset_vector); 2645295Srandyf } 2655295Srandyf 2665295Srandyf void 2675295Srandyf i_cpr_pre_resume_cpus() 2685295Srandyf { 2695295Srandyf /* 2705295Srandyf * this is a cut down version of start_other_cpus() 2715295Srandyf * just do the initialization to wake the other cpus 2725295Srandyf */ 2735295Srandyf unsigned who; 2745817Sjan int boot_cpuid = i_cpr_bootcpuid(); 2755295Srandyf uint32_t code_length = 0; 2765295Srandyf caddr_t wakevirt = rm_platter_va; 2775295Srandyf /*LINTED*/ 2785295Srandyf wakecode_t *wp = (wakecode_t *)wakevirt; 2795295Srandyf char *str = "i_cpr_pre_resume_cpus"; 2805295Srandyf extern int get_tsc_ready(); 2815295Srandyf int err; 2825295Srandyf 2835295Srandyf /*LINTED*/ 2845295Srandyf rm_platter_t *real_mode_platter = (rm_platter_t *)rm_platter_va; 2855295Srandyf 2865295Srandyf /* 2875295Srandyf * Copy the real mode code at "real_mode_start" to the 2885295Srandyf * page at rm_platter_va. 2895295Srandyf */ 2905295Srandyf warm_reset_vector = map_warm_reset_vector(); 2915295Srandyf if (warm_reset_vector == NULL) { 2925295Srandyf PMD(PMD_SX, ("i_cpr_pre_resume_cpus() returning #2\n")) 2935295Srandyf return; 2945295Srandyf } 2955295Srandyf 2965295Srandyf flushes_require_xcalls = 1; 2975295Srandyf 2985295Srandyf /* 2995295Srandyf * We lock our affinity to the master CPU to ensure that all slave CPUs 3005295Srandyf * do their TSC syncs with the same CPU. 3015295Srandyf */ 3025295Srandyf 3035295Srandyf affinity_set(CPU_CURRENT); 3045295Srandyf 3055817Sjan /* 3066336Sbholler * Mark the boot cpu as being ready and in the procset, since we are 3076336Sbholler * running on that cpu. 3085817Sjan */ 3095817Sjan CPUSET_ONLY(cpu_ready_set, boot_cpuid); 3106336Sbholler CPUSET_ONLY(procset, boot_cpuid); 3115295Srandyf 3125295Srandyf for (who = 0; who < ncpus; who++) { 3135295Srandyf 3145295Srandyf wc_cpu_t *cpup = wc_other_cpus + who; 3155295Srandyf wc_desctbr_t gdt; 3165295Srandyf 3175817Sjan if (who == boot_cpuid) 3185295Srandyf continue; 3195295Srandyf 3205295Srandyf if (!CPU_IN_SET(mp_cpus, who)) 3215295Srandyf continue; 3225295Srandyf 3235295Srandyf PMD(PMD_SX, ("%s() waking up %d cpu\n", str, who)) 3245295Srandyf 3255295Srandyf bcopy(cpup, &(wp->wc_cpu), sizeof (wc_cpu_t)); 3265295Srandyf 3275295Srandyf gdt.base = cpup->wc_gdt_base; 3285295Srandyf gdt.limit = cpup->wc_gdt_limit; 3295295Srandyf 3305295Srandyf #if defined(__amd64) 3315295Srandyf code_length = (uint32_t)wc_long_mode_64 - (uint32_t)wc_rm_start; 3325295Srandyf #else 3335295Srandyf code_length = 0; 3345295Srandyf #endif 3355295Srandyf 3365295Srandyf init_real_mode_platter(who, code_length, cpup->wc_cr4, gdt); 3375295Srandyf 3385295Srandyf if ((err = mach_cpuid_start(who, rm_platter_va)) != 0) { 3395295Srandyf cmn_err(CE_WARN, "cpu%d: failed to start during " 3405295Srandyf "suspend/resume error %d", who, err); 3415295Srandyf continue; 3425295Srandyf } 3435295Srandyf 3446336Sbholler PMD(PMD_SX, ("%s() #1 waiting for %d in procset\n", str, who)) 3455295Srandyf 3465817Sjan if (!wait_for_set(&procset, who)) 3475817Sjan continue; 3485295Srandyf 3495295Srandyf PMD(PMD_SX, ("%s() %d cpu started\n", str, who)) 3505295Srandyf 3515817Sjan PMD(PMD_SX, ("%s() tsc_ready = %d\n", str, get_tsc_ready())) 3525295Srandyf 3535295Srandyf if (tsc_gethrtime_enable) { 3545295Srandyf PMD(PMD_SX, ("%s() calling tsc_sync_master\n", str)) 3555295Srandyf tsc_sync_master(who); 3565295Srandyf } 3575295Srandyf 3586336Sbholler PMD(PMD_SX, ("%s() waiting for %d in cpu_ready_set\n", str, 3596336Sbholler who)) 3605295Srandyf /* 3615295Srandyf * Wait for cpu to declare that it is ready, we want the 3625295Srandyf * cpus to start serially instead of in parallel, so that 3635295Srandyf * they do not contend with each other in wc_rm_start() 3645295Srandyf */ 3655817Sjan if (!wait_for_set(&cpu_ready_set, who)) 3665817Sjan continue; 3675295Srandyf 3685295Srandyf /* 3695295Srandyf * do not need to re-initialize dtrace using dtrace_cpu_init 3705295Srandyf * function 3715295Srandyf */ 3725295Srandyf PMD(PMD_SX, ("%s() cpu %d now ready\n", str, who)) 3735295Srandyf } 3745295Srandyf 3755295Srandyf affinity_clear(); 3765295Srandyf 3775295Srandyf PMD(PMD_SX, ("%s() all cpus now ready\n", str)) 3785817Sjan 3795295Srandyf } 3805295Srandyf 3815295Srandyf static void 3825295Srandyf unmap_warm_reset_vector(ushort_t *warm_reset_vector) 3835295Srandyf { 3845295Srandyf psm_unmap_phys((caddr_t)warm_reset_vector, sizeof (ushort_t *)); 3855295Srandyf } 3865295Srandyf 3875295Srandyf /* 3885295Srandyf * We need to setup a 1:1 (virtual to physical) mapping for the 3895295Srandyf * page containing the wakeup code. 3905295Srandyf */ 3915295Srandyf static struct as *save_as; /* when switching to kas */ 3925295Srandyf 3935295Srandyf static void 3945295Srandyf unmap_wakeaddr_1to1(uint64_t wakephys) 3955295Srandyf { 3965295Srandyf uintptr_t wp = (uintptr_t)wakephys; 3975295Srandyf hat_setup(save_as->a_hat, 0); /* switch back from kernel hat */ 3985295Srandyf hat_unload(kas.a_hat, (caddr_t)wp, PAGESIZE, HAT_UNLOAD); 3995295Srandyf } 4005295Srandyf 4015295Srandyf void 4025295Srandyf i_cpr_post_resume_cpus() 4035295Srandyf { 4045295Srandyf uint64_t wakephys = rm_platter_pa; 4055295Srandyf 4065295Srandyf if (warm_reset_vector != NULL) 4075295Srandyf unmap_warm_reset_vector(warm_reset_vector); 4085295Srandyf 4095295Srandyf hat_unload(kas.a_hat, (caddr_t)(uintptr_t)rm_platter_pa, MMU_PAGESIZE, 4105295Srandyf HAT_UNLOAD); 4115295Srandyf 4125295Srandyf /* 4135295Srandyf * cmi_post_mpstartup() is only required upon boot not upon 4145295Srandyf * resume from RAM 4155295Srandyf */ 4165295Srandyf 4175295Srandyf PT(PT_UNDO1to1); 4185295Srandyf /* Tear down 1:1 mapping for wakeup code */ 4195295Srandyf unmap_wakeaddr_1to1(wakephys); 4205295Srandyf } 4215295Srandyf 4225295Srandyf /* ARGSUSED */ 4235295Srandyf void 4245295Srandyf i_cpr_handle_xc(int flag) 4255295Srandyf { 4265295Srandyf } 4275295Srandyf 4285295Srandyf int 4295295Srandyf i_cpr_reusable_supported(void) 4305295Srandyf { 4315295Srandyf return (0); 4325295Srandyf } 4335295Srandyf static void 4345295Srandyf map_wakeaddr_1to1(uint64_t wakephys) 4355295Srandyf { 4365295Srandyf uintptr_t wp = (uintptr_t)wakephys; 4375295Srandyf hat_devload(kas.a_hat, (caddr_t)wp, PAGESIZE, btop(wakephys), 4385295Srandyf (PROT_READ|PROT_WRITE|PROT_EXEC|HAT_STORECACHING_OK|HAT_NOSYNC), 4395295Srandyf HAT_LOAD); 4405295Srandyf save_as = curthread->t_procp->p_as; 4415295Srandyf hat_setup(kas.a_hat, 0); /* switch to kernel-only hat */ 4425295Srandyf } 4435295Srandyf 4445295Srandyf 4455295Srandyf void 4465295Srandyf prt_other_cpus() 4475295Srandyf { 4485295Srandyf int who; 4495295Srandyf 4505295Srandyf if (ncpus == 1) { 4515295Srandyf PMD(PMD_SX, ("prt_other_cpus() other cpu table empty for " 4525295Srandyf "uniprocessor machine\n")) 4535295Srandyf return; 4545295Srandyf } 4555295Srandyf 4565295Srandyf for (who = 0; who < ncpus; who++) { 4575295Srandyf 4585295Srandyf wc_cpu_t *cpup = wc_other_cpus + who; 4595295Srandyf 4605295Srandyf PMD(PMD_SX, ("prt_other_cpus() who = %d, gdt=%p:%x, " 4615295Srandyf "idt=%p:%x, ldt=%lx, tr=%lx, kgsbase=" 4625295Srandyf AFMT ", sp=%lx\n", who, 4635295Srandyf (void *)cpup->wc_gdt_base, cpup->wc_gdt_limit, 4645295Srandyf (void *)cpup->wc_idt_base, cpup->wc_idt_limit, 4655295Srandyf (long)cpup->wc_ldt, (long)cpup->wc_tr, 4665295Srandyf (long)cpup->wc_kgsbase, (long)cpup->wc_rsp)) 4675295Srandyf } 4685295Srandyf } 4695295Srandyf 4705295Srandyf /* 4715295Srandyf * Power down the system. 4725295Srandyf */ 4735295Srandyf int 4745295Srandyf i_cpr_power_down(int sleeptype) 4755295Srandyf { 4765295Srandyf caddr_t wakevirt = rm_platter_va; 4775295Srandyf uint64_t wakephys = rm_platter_pa; 4786336Sbholler ulong_t saved_intr; 4795295Srandyf uint32_t code_length = 0; 4805295Srandyf wc_desctbr_t gdt; 4815295Srandyf /*LINTED*/ 4825295Srandyf wakecode_t *wp = (wakecode_t *)wakevirt; 4835295Srandyf /*LINTED*/ 4845295Srandyf rm_platter_t *wcpp = (rm_platter_t *)wakevirt; 4855295Srandyf wc_cpu_t *cpup = &(wp->wc_cpu); 4865295Srandyf dev_info_t *ppm; 4875295Srandyf int ret = 0; 4885295Srandyf power_req_t power_req; 4895295Srandyf char *str = "i_cpr_power_down"; 4905295Srandyf #if defined(__amd64) 4915295Srandyf /*LINTED*/ 4925295Srandyf rm_platter_t *real_mode_platter = (rm_platter_t *)rm_platter_va; 4935295Srandyf #endif 4945295Srandyf extern int cpr_suspend_succeeded; 4955295Srandyf extern void kernel_wc_code(); 4965295Srandyf 4975295Srandyf ASSERT(sleeptype == CPR_TORAM); 4985295Srandyf ASSERT(CPU->cpu_id == 0); 4995295Srandyf 5005295Srandyf if ((ppm = PPM(ddi_root_node())) == NULL) { 5015295Srandyf PMD(PMD_SX, ("%s: root node not claimed\n", str)) 5025295Srandyf return (ENOTTY); 5035295Srandyf } 5045295Srandyf 5055295Srandyf PMD(PMD_SX, ("Entering %s()\n", str)) 5065295Srandyf 5075295Srandyf PT(PT_IC); 5085295Srandyf saved_intr = intr_clear(); 5095295Srandyf 5105295Srandyf PT(PT_1to1); 5115295Srandyf /* Setup 1:1 mapping for wakeup code */ 5125295Srandyf map_wakeaddr_1to1(wakephys); 5135295Srandyf 5145295Srandyf PMD(PMD_SX, ("ncpus=%d\n", ncpus)) 5155295Srandyf 5165295Srandyf PMD(PMD_SX, ("wc_rm_end - wc_rm_start=%lx WC_CODESIZE=%x\n", 5175295Srandyf ((size_t)((uint_t)wc_rm_end - (uint_t)wc_rm_start)), WC_CODESIZE)) 5185295Srandyf 5195295Srandyf PMD(PMD_SX, ("wakevirt=%p, wakephys=%x\n", 5205295Srandyf (void *)wakevirt, (uint_t)wakephys)) 5215295Srandyf 5225295Srandyf ASSERT(((size_t)((uint_t)wc_rm_end - (uint_t)wc_rm_start)) < 5235295Srandyf WC_CODESIZE); 5245295Srandyf 5255295Srandyf bzero(wakevirt, PAGESIZE); 5265295Srandyf 5275295Srandyf /* Copy code to rm_platter */ 5285295Srandyf bcopy((caddr_t)wc_rm_start, wakevirt, 5295295Srandyf (size_t)((uint_t)wc_rm_end - (uint_t)wc_rm_start)); 5305295Srandyf 5315295Srandyf prt_other_cpus(); 5325295Srandyf 5335295Srandyf #if defined(__amd64) 5345295Srandyf 5355295Srandyf PMD(PMD_SX, ("real_mode_platter->rm_cr4=%lx, getcr4()=%lx\n", 5365295Srandyf (ulong_t)real_mode_platter->rm_cr4, (ulong_t)getcr4())) 5375295Srandyf PMD(PMD_SX, ("real_mode_platter->rm_pdbr=%lx, getcr3()=%lx\n", 5385295Srandyf (ulong_t)real_mode_platter->rm_pdbr, getcr3())) 5395295Srandyf 5405295Srandyf real_mode_platter->rm_cr4 = getcr4(); 5415295Srandyf real_mode_platter->rm_pdbr = getcr3(); 5425295Srandyf 5435295Srandyf rmp_gdt_init(real_mode_platter); 5445295Srandyf 5455295Srandyf /* 5465295Srandyf * Since the CPU needs to jump to protected mode using an identity 5475295Srandyf * mapped address, we need to calculate it here. 5485295Srandyf */ 5495295Srandyf real_mode_platter->rm_longmode64_addr = rm_platter_pa + 5505295Srandyf ((uint32_t)wc_long_mode_64 - (uint32_t)wc_rm_start); 5515295Srandyf 5525295Srandyf PMD(PMD_SX, ("real_mode_platter->rm_cr4=%lx, getcr4()=%lx\n", 5535295Srandyf (ulong_t)real_mode_platter->rm_cr4, getcr4())) 5545295Srandyf 5555295Srandyf PMD(PMD_SX, ("real_mode_platter->rm_pdbr=%lx, getcr3()=%lx\n", 5565295Srandyf (ulong_t)real_mode_platter->rm_pdbr, getcr3())) 5575295Srandyf 5585295Srandyf PMD(PMD_SX, ("real_mode_platter->rm_longmode64_addr=%lx\n", 5595295Srandyf (ulong_t)real_mode_platter->rm_longmode64_addr)) 5605295Srandyf 5615295Srandyf #endif 5625295Srandyf 5635295Srandyf PT(PT_SC); 5645295Srandyf if (wc_save_context(cpup)) { 5655295Srandyf 5665295Srandyf ret = i_cpr_platform_alloc(&(wc_other_cpus->wc_apic_state)); 5675295Srandyf if (ret != 0) 5685295Srandyf return (ret); 5695295Srandyf 5705295Srandyf ret = i_cpr_save_apic(&(wc_other_cpus->wc_apic_state)); 5715295Srandyf PMD(PMD_SX, ("%s: i_cpr_save_apic() returned %d\n", str, ret)) 5725295Srandyf if (ret != 0) 5735295Srandyf return (ret); 5745295Srandyf 5755295Srandyf PMD(PMD_SX, ("wakephys=%x, kernel_wc_code=%p\n", 5765295Srandyf (uint_t)wakephys, (void *)&kernel_wc_code)) 5775295Srandyf PMD(PMD_SX, ("virtaddr=%lx, retaddr=%lx\n", 5785295Srandyf (long)cpup->wc_virtaddr, (long)cpup->wc_retaddr)) 5795295Srandyf PMD(PMD_SX, ("ebx=%x, edi=%x, esi=%x, ebp=%x, esp=%x\n", 5805295Srandyf cpup->wc_ebx, cpup->wc_edi, cpup->wc_esi, cpup->wc_ebp, 5815295Srandyf cpup->wc_esp)) 5825295Srandyf PMD(PMD_SX, ("cr0=%lx, cr3=%lx, cr4=%lx\n", 5835295Srandyf (long)cpup->wc_cr0, (long)cpup->wc_cr3, 5845295Srandyf (long)cpup->wc_cr4)) 5855295Srandyf PMD(PMD_SX, ("cs=%x, ds=%x, es=%x, ss=%x, fs=%lx, gs=%lx, " 5865295Srandyf "flgs=%lx\n", cpup->wc_cs, cpup->wc_ds, cpup->wc_es, 5875295Srandyf cpup->wc_ss, (long)cpup->wc_fs, (long)cpup->wc_gs, 5885295Srandyf (long)cpup->wc_eflags)) 5895295Srandyf 5905295Srandyf PMD(PMD_SX, ("gdt=%p:%x, idt=%p:%x, ldt=%lx, tr=%lx, " 5915295Srandyf "kgbase=%lx\n", (void *)cpup->wc_gdt_base, 5925295Srandyf cpup->wc_gdt_limit, (void *)cpup->wc_idt_base, 5935295Srandyf cpup->wc_idt_limit, (long)cpup->wc_ldt, 5945295Srandyf (long)cpup->wc_tr, (long)cpup->wc_kgsbase)) 5955295Srandyf 5965295Srandyf gdt.base = cpup->wc_gdt_base; 5975295Srandyf gdt.limit = cpup->wc_gdt_limit; 5985295Srandyf 5995295Srandyf #if defined(__amd64) 6005295Srandyf code_length = (uint32_t)wc_long_mode_64 - 6015295Srandyf (uint32_t)wc_rm_start; 6025295Srandyf #else 6035295Srandyf code_length = 0; 6045295Srandyf #endif 6055295Srandyf 6065295Srandyf init_real_mode_platter(0, code_length, cpup->wc_cr4, gdt); 6075295Srandyf 6085295Srandyf #if defined(__amd64) 6095295Srandyf PMD(PMD_SX, ("real_mode_platter->rm_cr4=%lx, getcr4()=%lx\n", 6105295Srandyf (ulong_t)wcpp->rm_cr4, getcr4())) 6115295Srandyf 6125295Srandyf PMD(PMD_SX, ("real_mode_platter->rm_pdbr=%lx, getcr3()=%lx\n", 6135295Srandyf (ulong_t)wcpp->rm_pdbr, getcr3())) 6145295Srandyf 6155295Srandyf PMD(PMD_SX, ("real_mode_platter->rm_longmode64_addr=%lx\n", 6165295Srandyf (ulong_t)wcpp->rm_longmode64_addr)) 6175295Srandyf 6185295Srandyf PMD(PMD_SX, 6195295Srandyf ("real_mode_platter->rm_temp_gdt[TEMPGDT_KCODE64]=%lx\n", 6205295Srandyf (ulong_t)wcpp->rm_temp_gdt[TEMPGDT_KCODE64])) 6215295Srandyf #endif 6225295Srandyf 6235295Srandyf PMD(PMD_SX, ("gdt=%p:%x, idt=%p:%x, ldt=%lx, tr=%lx, " 6245295Srandyf "kgsbase=%lx\n", (void *)wcpp->rm_gdt_base, 6255295Srandyf wcpp->rm_gdt_lim, (void *)wcpp->rm_idt_base, 6265295Srandyf wcpp->rm_idt_lim, (long)cpup->wc_ldt, (long)cpup->wc_tr, 6275295Srandyf (long)cpup->wc_kgsbase)) 6285295Srandyf 6295295Srandyf power_req.request_type = PMR_PPM_ENTER_SX; 6305295Srandyf power_req.req.ppm_power_enter_sx_req.sx_state = S3; 6315295Srandyf power_req.req.ppm_power_enter_sx_req.test_point = 6325295Srandyf cpr_test_point; 6335295Srandyf power_req.req.ppm_power_enter_sx_req.wakephys = wakephys; 6345295Srandyf 6355295Srandyf PMD(PMD_SX, ("%s: pm_ctlops PMR_PPM_ENTER_SX\n", str)) 6365295Srandyf PT(PT_PPMCTLOP); 6375295Srandyf (void) pm_ctlops(ppm, ddi_root_node(), DDI_CTLOPS_POWER, 6385295Srandyf &power_req, &ret); 6395295Srandyf PMD(PMD_SX, ("%s: returns %d\n", str, ret)) 6405295Srandyf 6415295Srandyf /* 6425295Srandyf * If it works, we get control back to the else branch below 6435295Srandyf * If we get control back here, it didn't work. 6445295Srandyf * XXX return EINVAL here? 6455295Srandyf */ 6465295Srandyf 6475295Srandyf unmap_wakeaddr_1to1(wakephys); 6485295Srandyf intr_restore(saved_intr); 6495295Srandyf 6505295Srandyf return (ret); 6515295Srandyf } else { 6525295Srandyf cpr_suspend_succeeded = 1; 6535295Srandyf 6545295Srandyf power_req.request_type = PMR_PPM_EXIT_SX; 6555295Srandyf power_req.req.ppm_power_enter_sx_req.sx_state = S3; 6565295Srandyf 6575295Srandyf PMD(PMD_SX, ("%s: pm_ctlops PMR_PPM_EXIT_SX\n", str)) 6585295Srandyf PT(PT_PPMCTLOP); 6595295Srandyf (void) pm_ctlops(ppm, ddi_root_node(), DDI_CTLOPS_POWER, 6605295Srandyf &power_req, &ret); 6615295Srandyf PMD(PMD_SX, ("%s: returns %d\n", str, ret)) 6625295Srandyf 6635295Srandyf ret = i_cpr_restore_apic(&(wc_other_cpus->wc_apic_state)); 6645295Srandyf /* 6655295Srandyf * the restore should never fail, if the saved suceeded 6665295Srandyf */ 6675295Srandyf ASSERT(ret == 0); 6685295Srandyf 6695295Srandyf i_cpr_platform_free(&(wc_other_cpus->wc_apic_state)); 6705295Srandyf 671*7113Sbholler /* 672*7113Sbholler * Enable interrupts on boot cpu. 673*7113Sbholler */ 674*7113Sbholler ASSERT(CPU->cpu_id == i_cpr_bootcpuid()); 675*7113Sbholler mutex_enter(&cpu_lock); 676*7113Sbholler cpu_enable_intr(CPU); 677*7113Sbholler mutex_exit(&cpu_lock); 678*7113Sbholler 6795295Srandyf PT(PT_INTRRESTORE); 6805295Srandyf intr_restore(saved_intr); 6815295Srandyf PT(PT_CPU); 6825295Srandyf 6835295Srandyf return (ret); 6845295Srandyf } 6855295Srandyf } 6865295Srandyf 6875295Srandyf /* 6885295Srandyf * Stop all other cpu's before halting or rebooting. We pause the cpu's 6895295Srandyf * instead of sending a cross call. 6905295Srandyf * Stolen from sun4/os/mp_states.c 6915295Srandyf */ 6925295Srandyf 6935295Srandyf static int cpu_are_paused; /* sic */ 6945295Srandyf 6955295Srandyf void 6965295Srandyf i_cpr_stop_other_cpus(void) 6975295Srandyf { 6985295Srandyf mutex_enter(&cpu_lock); 6995295Srandyf if (cpu_are_paused) { 7005295Srandyf mutex_exit(&cpu_lock); 7015295Srandyf return; 7025295Srandyf } 7035295Srandyf pause_cpus(NULL); 7045295Srandyf cpu_are_paused = 1; 7055295Srandyf 7065295Srandyf mutex_exit(&cpu_lock); 7075295Srandyf } 7085295Srandyf 7095295Srandyf int 7105295Srandyf i_cpr_is_supported(int sleeptype) 7115295Srandyf { 7125295Srandyf extern int cpr_supported_override; 7135295Srandyf extern int cpr_platform_enable; 7145295Srandyf extern int pm_S3_enabled; 7155295Srandyf 7165295Srandyf if (sleeptype != CPR_TORAM) 7175295Srandyf return (0); 7185295Srandyf 7195295Srandyf /* 7205295Srandyf * The next statement tests if a specific platform has turned off 7215295Srandyf * cpr support. 7225295Srandyf */ 7235295Srandyf if (cpr_supported_override) 7245295Srandyf return (0); 7255295Srandyf 7265295Srandyf /* 7275295Srandyf * If a platform has specifically turned on cpr support ... 7285295Srandyf */ 7295295Srandyf if (cpr_platform_enable) 7305295Srandyf return (1); 7315295Srandyf 7325295Srandyf return (pm_S3_enabled); 7335295Srandyf } 7345295Srandyf 7355295Srandyf void 7365295Srandyf i_cpr_bitmap_cleanup(void) 7375295Srandyf { 7385295Srandyf } 7395295Srandyf 7405295Srandyf void 7415295Srandyf i_cpr_free_memory_resources(void) 7425295Srandyf { 7435295Srandyf } 7445295Srandyf 7455295Srandyf /* 7465295Srandyf * Needed only for S3 so far 7475295Srandyf */ 7485295Srandyf static int 7495295Srandyf i_cpr_platform_alloc(psm_state_request_t *req) 7505295Srandyf { 7515295Srandyf char *str = "i_cpr_platform_alloc"; 7525295Srandyf 7535295Srandyf PMD(PMD_SX, ("cpu = %d, %s(%p) \n", CPU->cpu_id, str, (void *)req)) 7545295Srandyf 7555295Srandyf if (ncpus == 1) { 7565295Srandyf PMD(PMD_SX, ("%s() : ncpus == 1\n", str)) 7575295Srandyf return (0); 7585295Srandyf } 7595295Srandyf 7605295Srandyf req->psr_cmd = PSM_STATE_ALLOC; 7615295Srandyf return ((*psm_state)(req)); 7625295Srandyf } 7635295Srandyf 7645295Srandyf /* 7655295Srandyf * Needed only for S3 so far 7665295Srandyf */ 7675295Srandyf static void 7685295Srandyf i_cpr_platform_free(psm_state_request_t *req) 7695295Srandyf { 7705295Srandyf char *str = "i_cpr_platform_free"; 7715295Srandyf 7725295Srandyf PMD(PMD_SX, ("cpu = %d, %s(%p) \n", CPU->cpu_id, str, (void *)req)) 7735295Srandyf 7745295Srandyf if (ncpus == 1) { 7755295Srandyf PMD(PMD_SX, ("%s() : ncpus == 1\n", str)) 7765295Srandyf } 7775295Srandyf 7785295Srandyf req->psr_cmd = PSM_STATE_FREE; 7795295Srandyf (void) (*psm_state)(req); 7805295Srandyf } 7815295Srandyf 7825295Srandyf static int 7835295Srandyf i_cpr_save_apic(psm_state_request_t *req) 7845295Srandyf { 7855295Srandyf char *str = "i_cpr_save_apic"; 7865295Srandyf 7875295Srandyf if (ncpus == 1) { 7885295Srandyf PMD(PMD_SX, ("%s() : ncpus == 1\n", str)) 7895295Srandyf return (0); 7905295Srandyf } 7915295Srandyf 7925295Srandyf req->psr_cmd = PSM_STATE_SAVE; 7935295Srandyf return ((*psm_state)(req)); 7945295Srandyf } 7955295Srandyf 7965295Srandyf static int 7975295Srandyf i_cpr_restore_apic(psm_state_request_t *req) 7985295Srandyf { 7995295Srandyf char *str = "i_cpr_restore_apic"; 8005295Srandyf 8015295Srandyf if (ncpus == 1) { 8025295Srandyf PMD(PMD_SX, ("%s() : ncpus == 1\n", str)) 8035295Srandyf return (0); 8045295Srandyf } 8055295Srandyf 8065295Srandyf req->psr_cmd = PSM_STATE_RESTORE; 8075295Srandyf return ((*psm_state)(req)); 8085295Srandyf } 8095295Srandyf 8105295Srandyf 8115295Srandyf /* stop lint complaining about offset not being used in 32bit mode */ 8125295Srandyf #if !defined(__amd64) 8135295Srandyf /*ARGSUSED*/ 8145295Srandyf #endif 8155295Srandyf static void 8165295Srandyf init_real_mode_platter(int cpun, uint32_t offset, uint_t cr4, wc_desctbr_t gdt) 8175295Srandyf { 8185295Srandyf /*LINTED*/ 8195295Srandyf rm_platter_t *real_mode_platter = (rm_platter_t *)rm_platter_va; 8205295Srandyf 8215295Srandyf /* 8225295Srandyf * Fill up the real mode platter to make it easy for real mode code to 8235295Srandyf * kick it off. This area should really be one passed by boot to kernel 8245295Srandyf * and guaranteed to be below 1MB and aligned to 16 bytes. Should also 8255295Srandyf * have identical physical and virtual address in paged mode. 8265295Srandyf */ 8275295Srandyf 8285295Srandyf real_mode_platter->rm_pdbr = getcr3(); 8295295Srandyf real_mode_platter->rm_cpu = cpun; 8305295Srandyf real_mode_platter->rm_cr4 = cr4; 8315295Srandyf 8325295Srandyf real_mode_platter->rm_gdt_base = gdt.base; 8335295Srandyf real_mode_platter->rm_gdt_lim = gdt.limit; 8345295Srandyf 8355295Srandyf #if defined(__amd64) 8365295Srandyf real_mode_platter->rm_x86feature = x86_feature; 8375295Srandyf 8385295Srandyf if (getcr3() > 0xffffffffUL) 8395295Srandyf panic("Cannot initialize CPUs; kernel's 64-bit page tables\n" 8405295Srandyf "located above 4G in physical memory (@ 0x%llx).", 8415295Srandyf (unsigned long long)getcr3()); 8425295Srandyf 8435295Srandyf /* 8445295Srandyf * Setup pseudo-descriptors for temporary GDT and IDT for use ONLY 8455295Srandyf * by code in real_mode_start(): 8465295Srandyf * 8475295Srandyf * GDT[0]: NULL selector 8485295Srandyf * GDT[1]: 64-bit CS: Long = 1, Present = 1, bits 12, 11 = 1 8495295Srandyf * 8505295Srandyf * Clear the IDT as interrupts will be off and a limit of 0 will cause 8515295Srandyf * the CPU to triple fault and reset on an NMI, seemingly as reasonable 8525295Srandyf * a course of action as any other, though it may cause the entire 8535295Srandyf * platform to reset in some cases... 8545295Srandyf */ 8555295Srandyf real_mode_platter->rm_temp_gdt[0] = 0ULL; 8565295Srandyf real_mode_platter->rm_temp_gdt[TEMPGDT_KCODE64] = 0x20980000000000ULL; 8575295Srandyf 8585295Srandyf real_mode_platter->rm_temp_gdt_lim = (ushort_t) 8595295Srandyf (sizeof (real_mode_platter->rm_temp_gdt) - 1); 8605295Srandyf real_mode_platter->rm_temp_gdt_base = rm_platter_pa + 8615295Srandyf (uint32_t)(&((rm_platter_t *)0)->rm_temp_gdt); 8625295Srandyf 8635295Srandyf real_mode_platter->rm_temp_idt_lim = 0; 8645295Srandyf real_mode_platter->rm_temp_idt_base = 0; 8655295Srandyf 8665295Srandyf /* 8675295Srandyf * Since the CPU needs to jump to protected mode using an identity 8685295Srandyf * mapped address, we need to calculate it here. 8695295Srandyf */ 8705295Srandyf real_mode_platter->rm_longmode64_addr = rm_platter_pa + offset; 8715295Srandyf #endif /* __amd64 */ 8725295Srandyf 8735295Srandyf /* return; */ 8745295Srandyf } 8755295Srandyf 8765295Srandyf void 8775295Srandyf i_cpr_start_cpu(void) 8785295Srandyf { 8795295Srandyf 8805295Srandyf struct cpu *cp = CPU; 8815295Srandyf 8825295Srandyf char *str = "i_cpr_start_cpu"; 8835295Srandyf extern void init_cpu_syscall(struct cpu *cp); 8845295Srandyf 8855295Srandyf #if defined(__amd64) 8865295Srandyf wc_cpu_t *cpup = wc_other_cpus + cp->cpu_id; 8875295Srandyf #endif /* __amd64 */ 8885295Srandyf 8895295Srandyf PMD(PMD_SX, ("%s() called\n", str)) 8905295Srandyf 8915295Srandyf PMD(PMD_SX, ("%s() #0 cp->cpu_base_spl %d\n", str, 8925295Srandyf cp->cpu_base_spl)) 8935295Srandyf 8945295Srandyf mutex_enter(&cpu_lock); 8955295Srandyf if (cp == i_cpr_bootcpu()) { 8965295Srandyf mutex_exit(&cpu_lock); 8975295Srandyf PMD(PMD_SX, 8985295Srandyf ("%s() called on bootcpu nothing to do!\n", str)) 8995295Srandyf return; 9005295Srandyf } 9015295Srandyf mutex_exit(&cpu_lock); 9025295Srandyf 9035295Srandyf /* 9045295Srandyf * We need to Sync PAT with cpu0's PAT. We have to do 9055295Srandyf * this with interrupts disabled. 9065295Srandyf */ 9075295Srandyf if (x86_feature & X86_PAT) 9085295Srandyf pat_sync(); 9095295Srandyf 9105295Srandyf /* 9115295Srandyf * Initialize this CPU's syscall handlers 9125295Srandyf */ 9135295Srandyf init_cpu_syscall(cp); 9145295Srandyf 9155295Srandyf PMD(PMD_SX, ("%s() #1 cp->cpu_base_spl %d\n", str, cp->cpu_base_spl)) 9165295Srandyf 9175295Srandyf /* 9185295Srandyf * Do not need to call cpuid_pass2(), cpuid_pass3(), cpuid_pass4() or 9195295Srandyf * init_cpu_info(), since the work that they do is only needed to 9205295Srandyf * be done once at boot time 9215295Srandyf */ 9225295Srandyf 9235295Srandyf 9245295Srandyf mutex_enter(&cpu_lock); 9255295Srandyf 9265295Srandyf #if defined(__amd64) 9275295Srandyf restore_stack(cpup); 9285295Srandyf #endif /* __amd64 */ 9295295Srandyf 9305295Srandyf CPUSET_ADD(procset, cp->cpu_id); 9315295Srandyf mutex_exit(&cpu_lock); 9325295Srandyf 9335295Srandyf PMD(PMD_SX, ("%s() #2 cp->cpu_base_spl %d\n", str, 9345295Srandyf cp->cpu_base_spl)) 9355295Srandyf 9365295Srandyf if (tsc_gethrtime_enable) { 9375295Srandyf PMD(PMD_SX, ("%s() calling tsc_sync_slave\n", str)) 9385295Srandyf tsc_sync_slave(); 9395295Srandyf } 9405295Srandyf 9415295Srandyf PMD(PMD_SX, ("%s() cp->cpu_id %d, cp->cpu_intr_actv %d\n", str, 9425295Srandyf cp->cpu_id, cp->cpu_intr_actv)) 9435295Srandyf PMD(PMD_SX, ("%s() #3 cp->cpu_base_spl %d\n", str, 9445295Srandyf cp->cpu_base_spl)) 9455295Srandyf 9465295Srandyf (void) spl0(); /* enable interrupts */ 9475295Srandyf 9485295Srandyf PMD(PMD_SX, ("%s() #4 cp->cpu_base_spl %d\n", str, 9495295Srandyf cp->cpu_base_spl)) 9505295Srandyf 9515295Srandyf /* 9525295Srandyf * Set up the CPU module for this CPU. This can't be done before 9535295Srandyf * this CPU is made CPU_READY, because we may (in heterogeneous systems) 9545295Srandyf * need to go load another CPU module. The act of attempting to load 9555295Srandyf * a module may trigger a cross-call, which will ASSERT unless this 9565295Srandyf * cpu is CPU_READY. 9575295Srandyf */ 9585295Srandyf 9595295Srandyf /* 9605295Srandyf * cmi already been init'd (during boot), so do not need to do it again 9615295Srandyf */ 9625295Srandyf #ifdef PM_REINITMCAONRESUME 9635295Srandyf if (x86_feature & X86_MCA) 9645295Srandyf cmi_mca_init(); 9655295Srandyf #endif 9665295Srandyf 9675295Srandyf PMD(PMD_SX, ("%s() returning\n", str)) 9685295Srandyf 9695295Srandyf /* return; */ 9705295Srandyf } 9715295Srandyf 9725295Srandyf #if defined(__amd64) 9735295Srandyf /* 9745295Srandyf * we only need to do this for amd64! 9755295Srandyf */ 9765295Srandyf 9775295Srandyf /* 9785295Srandyf * save the stack 9795295Srandyf */ 9805295Srandyf void 9815295Srandyf save_stack(wc_cpu_t *cpup) 9825295Srandyf { 9835295Srandyf char *str = "save_stack"; 9845295Srandyf caddr_t base = curthread->t_stk; 9855295Srandyf caddr_t sp = (caddr_t)cpup->wc_rsp; 9865295Srandyf 9875295Srandyf 9885295Srandyf PMD(PMD_SX, ("%s() CPU->cpu_id %d\n", str, CPU->cpu_id)) 9895295Srandyf PMD(PMD_SX, ("save_stack() curthread->t_stk = %p, sp = %p\n", 9905295Srandyf (void *)base, (void *)sp)) 9915295Srandyf 9925295Srandyf ASSERT(base > sp); 9935295Srandyf /*LINTED*/ 9945295Srandyf bcopy(sp, cpup->wc_stack, base - sp); 9955295Srandyf 9965295Srandyf } 9975295Srandyf 9985295Srandyf /* 9995295Srandyf * restore the stack 10005295Srandyf */ 10015295Srandyf static void 10025295Srandyf restore_stack(wc_cpu_t *cpup) 10035295Srandyf { 10045295Srandyf /* 10055295Srandyf * we only need to do this for amd64! 10065295Srandyf */ 10075295Srandyf 10085295Srandyf char *str = "restore_stack"; 10095295Srandyf caddr_t base = curthread->t_stk; 10105295Srandyf caddr_t sp = (caddr_t)cpup->wc_rsp; 10115295Srandyf 10125295Srandyf PMD(PMD_SX, ("%s() CPU->cpu_id %d\n", str, CPU->cpu_id)) 10135295Srandyf PMD(PMD_SX, ("%s() curthread->t_stk = %p, sp = %p\n", str, 10145295Srandyf (void *)base, (void *)sp)) 10155295Srandyf 10165295Srandyf ASSERT(base > sp); 10175295Srandyf /*LINTED*/ 10185295Srandyf bcopy(cpup->wc_stack, sp, base - sp); 10195295Srandyf 10205295Srandyf } 10215295Srandyf 10225295Srandyf #endif /* __amd64 */ 10235295Srandyf 10245295Srandyf 10255295Srandyf void 10265295Srandyf i_cpr_alloc_cpus(void) 10275295Srandyf { 10285295Srandyf char *str = "i_cpr_alloc_cpus"; 10295295Srandyf 10305295Srandyf PMD(PMD_SX, ("%s() CPU->cpu_id %d\n", str, CPU->cpu_id)) 10315295Srandyf /* 10325295Srandyf * we allocate this only when we actually need it to save on 10335295Srandyf * kernel memory 10345295Srandyf */ 10355295Srandyf 10365295Srandyf if (wc_other_cpus == NULL) { 10375295Srandyf wc_other_cpus = kmem_zalloc(ncpus * sizeof (wc_cpu_t), 10385295Srandyf KM_SLEEP); 10395295Srandyf } 10405295Srandyf 10415295Srandyf } 10425295Srandyf 10435295Srandyf void 10445295Srandyf i_cpr_free_cpus(void) 10455295Srandyf { 10465295Srandyf if (wc_other_cpus != NULL) { 10475295Srandyf kmem_free((void *) wc_other_cpus, ncpus * sizeof (wc_cpu_t)); 10485295Srandyf wc_other_cpus = NULL; 10495295Srandyf } 10505295Srandyf } 10515295Srandyf 10525295Srandyf /* 10535295Srandyf * wrapper for acpica_ddi_save_resources() 10545295Srandyf */ 10555295Srandyf void 10565295Srandyf i_cpr_save_configuration(dev_info_t *dip) 10575295Srandyf { 10585295Srandyf acpica_ddi_save_resources(dip); 10595295Srandyf } 10605295Srandyf 10615295Srandyf /* 10625295Srandyf * wrapper for acpica_ddi_restore_resources() 10635295Srandyf */ 10645295Srandyf void 10655295Srandyf i_cpr_restore_configuration(dev_info_t *dip) 10665295Srandyf { 10675295Srandyf acpica_ddi_restore_resources(dip); 10685295Srandyf } 10695817Sjan 10705817Sjan static int 10715817Sjan wait_for_set(cpuset_t *set, int who) 10725817Sjan { 10735817Sjan int delays; 10745817Sjan char *str = "wait_for_set"; 10755817Sjan 10765817Sjan for (delays = 0; !CPU_IN_SET(*set, who); delays++) { 10775817Sjan if (delays == 500) { 10785817Sjan /* 10795817Sjan * After five seconds, things are probably 10805817Sjan * looking a bit bleak - explain the hang. 10815817Sjan */ 10825817Sjan cmn_err(CE_NOTE, "cpu%d: started, " 10835817Sjan "but not running in the kernel yet", who); 10845817Sjan PMD(PMD_SX, ("%s() %d cpu started " 10855817Sjan "but not running in the kernel yet\n", 10865817Sjan str, who)) 10875817Sjan } else if (delays > 2000) { 10885817Sjan /* 10895817Sjan * We waited at least 20 seconds, bail .. 10905817Sjan */ 10915817Sjan cmn_err(CE_WARN, "cpu%d: timed out", who); 10925817Sjan PMD(PMD_SX, ("%s() %d cpu timed out\n", 10935817Sjan str, who)) 10945817Sjan return (0); 10955817Sjan } 10965817Sjan 10975817Sjan /* 10985817Sjan * wait at least 10ms, then check again.. 10995817Sjan */ 11005817Sjan drv_usecwait(10000); 11015817Sjan } 11025817Sjan 11035817Sjan return (1); 11045817Sjan } 1105