1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include <sys/types.h> 30*0Sstevel@tonic-gate #include <sys/thread.h> 31*0Sstevel@tonic-gate #include <sys/cpuvar.h> 32*0Sstevel@tonic-gate #include <sys/t_lock.h> 33*0Sstevel@tonic-gate #include <sys/param.h> 34*0Sstevel@tonic-gate #include <sys/proc.h> 35*0Sstevel@tonic-gate #include <sys/disp.h> 36*0Sstevel@tonic-gate #include <sys/mmu.h> 37*0Sstevel@tonic-gate #include <sys/class.h> 38*0Sstevel@tonic-gate #include <sys/cmn_err.h> 39*0Sstevel@tonic-gate #include <sys/debug.h> 40*0Sstevel@tonic-gate #include <sys/asm_linkage.h> 41*0Sstevel@tonic-gate #include <sys/x_call.h> 42*0Sstevel@tonic-gate #include <sys/systm.h> 43*0Sstevel@tonic-gate #include <sys/var.h> 44*0Sstevel@tonic-gate #include <sys/vtrace.h> 45*0Sstevel@tonic-gate #include <vm/hat.h> 46*0Sstevel@tonic-gate #include <sys/mmu.h> 47*0Sstevel@tonic-gate #include <vm/as.h> 48*0Sstevel@tonic-gate #include <vm/seg_kmem.h> 49*0Sstevel@tonic-gate #include <sys/segments.h> 50*0Sstevel@tonic-gate #include <sys/kmem.h> 51*0Sstevel@tonic-gate #include <sys/stack.h> 52*0Sstevel@tonic-gate #include <sys/smp_impldefs.h> 53*0Sstevel@tonic-gate #include <sys/x86_archext.h> 54*0Sstevel@tonic-gate #include <sys/machsystm.h> 55*0Sstevel@tonic-gate #include <sys/traptrace.h> 56*0Sstevel@tonic-gate #include <sys/clock.h> 57*0Sstevel@tonic-gate #include <sys/cpc_impl.h> 58*0Sstevel@tonic-gate #include <sys/chip.h> 59*0Sstevel@tonic-gate #include <sys/dtrace.h> 60*0Sstevel@tonic-gate #include <sys/archsystm.h> 61*0Sstevel@tonic-gate #include <sys/fp.h> 62*0Sstevel@tonic-gate #include <sys/reboot.h> 63*0Sstevel@tonic-gate #include <sys/kdi.h> 64*0Sstevel@tonic-gate #include <vm/hat_i86.h> 65*0Sstevel@tonic-gate #include <sys/memnode.h> 66*0Sstevel@tonic-gate 67*0Sstevel@tonic-gate struct cpu cpus[1]; /* CPU data */ 68*0Sstevel@tonic-gate struct cpu *cpu[NCPU] = {&cpus[0]}; /* pointers to all CPUs */ 69*0Sstevel@tonic-gate cpu_core_t cpu_core[NCPU]; /* cpu_core structures */ 70*0Sstevel@tonic-gate 71*0Sstevel@tonic-gate /* 72*0Sstevel@tonic-gate * Useful for disabling MP bring-up for an MP capable kernel 73*0Sstevel@tonic-gate * (a kernel that was built with MP defined) 74*0Sstevel@tonic-gate */ 75*0Sstevel@tonic-gate int use_mp = 1; 76*0Sstevel@tonic-gate 77*0Sstevel@tonic-gate int mp_cpus = 0x1; /* to be set by platform specific module */ 78*0Sstevel@tonic-gate 79*0Sstevel@tonic-gate /* 80*0Sstevel@tonic-gate * This variable is used by the hat layer to decide whether or not 81*0Sstevel@tonic-gate * critical sections are needed to prevent race conditions. For sun4m, 82*0Sstevel@tonic-gate * this variable is set once enough MP initialization has been done in 83*0Sstevel@tonic-gate * order to allow cross calls. 84*0Sstevel@tonic-gate */ 85*0Sstevel@tonic-gate int flushes_require_xcalls = 0; 86*0Sstevel@tonic-gate ulong_t cpu_ready_set = 1; 87*0Sstevel@tonic-gate 88*0Sstevel@tonic-gate extern void real_mode_start(void); 89*0Sstevel@tonic-gate extern void real_mode_end(void); 90*0Sstevel@tonic-gate static void mp_startup(void); 91*0Sstevel@tonic-gate 92*0Sstevel@tonic-gate static void cpu_sep_enable(void); 93*0Sstevel@tonic-gate static void cpu_sep_disable(void); 94*0Sstevel@tonic-gate static void cpu_asysc_enable(void); 95*0Sstevel@tonic-gate static void cpu_asysc_disable(void); 96*0Sstevel@tonic-gate 97*0Sstevel@tonic-gate extern int tsc_gethrtime_enable; 98*0Sstevel@tonic-gate 99*0Sstevel@tonic-gate /* 100*0Sstevel@tonic-gate * Init CPU info - get CPU type info for processor_info system call. 101*0Sstevel@tonic-gate */ 102*0Sstevel@tonic-gate void 103*0Sstevel@tonic-gate init_cpu_info(struct cpu *cp) 104*0Sstevel@tonic-gate { 105*0Sstevel@tonic-gate processor_info_t *pi = &cp->cpu_type_info; 106*0Sstevel@tonic-gate char buf[CPU_IDSTRLEN]; 107*0Sstevel@tonic-gate 108*0Sstevel@tonic-gate /* 109*0Sstevel@tonic-gate * Get clock-frequency property for the CPU. 110*0Sstevel@tonic-gate */ 111*0Sstevel@tonic-gate pi->pi_clock = cpu_freq; 112*0Sstevel@tonic-gate 113*0Sstevel@tonic-gate (void) strcpy(pi->pi_processor_type, "i386"); 114*0Sstevel@tonic-gate if (fpu_exists) 115*0Sstevel@tonic-gate (void) strcpy(pi->pi_fputypes, "i387 compatible"); 116*0Sstevel@tonic-gate 117*0Sstevel@tonic-gate (void) cpuid_getidstr(cp, buf, sizeof (buf)); 118*0Sstevel@tonic-gate 119*0Sstevel@tonic-gate cp->cpu_idstr = kmem_alloc(strlen(buf) + 1, KM_SLEEP); 120*0Sstevel@tonic-gate (void) strcpy(cp->cpu_idstr, buf); 121*0Sstevel@tonic-gate 122*0Sstevel@tonic-gate cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_idstr); 123*0Sstevel@tonic-gate 124*0Sstevel@tonic-gate (void) cpuid_getbrandstr(cp, buf, sizeof (buf)); 125*0Sstevel@tonic-gate cp->cpu_brandstr = kmem_alloc(strlen(buf) + 1, KM_SLEEP); 126*0Sstevel@tonic-gate (void) strcpy(cp->cpu_brandstr, buf); 127*0Sstevel@tonic-gate 128*0Sstevel@tonic-gate cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_brandstr); 129*0Sstevel@tonic-gate } 130*0Sstevel@tonic-gate 131*0Sstevel@tonic-gate /* 132*0Sstevel@tonic-gate * Configure syscall support on this CPU. 133*0Sstevel@tonic-gate */ 134*0Sstevel@tonic-gate /*ARGSUSED*/ 135*0Sstevel@tonic-gate static void 136*0Sstevel@tonic-gate init_cpu_syscall(struct cpu *cp) 137*0Sstevel@tonic-gate { 138*0Sstevel@tonic-gate uint64_t value; 139*0Sstevel@tonic-gate 140*0Sstevel@tonic-gate kpreempt_disable(); 141*0Sstevel@tonic-gate 142*0Sstevel@tonic-gate #if defined(__amd64) 143*0Sstevel@tonic-gate if (x86_feature & X86_ASYSC) { 144*0Sstevel@tonic-gate 145*0Sstevel@tonic-gate #if !defined(__lint) 146*0Sstevel@tonic-gate /* 147*0Sstevel@tonic-gate * The syscall instruction imposes a certain ordering on 148*0Sstevel@tonic-gate * segment selectors, so we double-check that ordering 149*0Sstevel@tonic-gate * here. 150*0Sstevel@tonic-gate */ 151*0Sstevel@tonic-gate ASSERT(KDS_SEL == KCS_SEL + 8); 152*0Sstevel@tonic-gate ASSERT(UDS_SEL == U32CS_SEL + 8); 153*0Sstevel@tonic-gate ASSERT(UCS_SEL == U32CS_SEL + 16); 154*0Sstevel@tonic-gate #endif 155*0Sstevel@tonic-gate /* 156*0Sstevel@tonic-gate * Turn syscall/sysret extensions on. 157*0Sstevel@tonic-gate */ 158*0Sstevel@tonic-gate cpu_asysc_enable(); 159*0Sstevel@tonic-gate 160*0Sstevel@tonic-gate /* 161*0Sstevel@tonic-gate * Program the magic registers .. 162*0Sstevel@tonic-gate */ 163*0Sstevel@tonic-gate value = ((uint64_t)(U32CS_SEL << 16 | KCS_SEL)) << 32; 164*0Sstevel@tonic-gate wrmsr(MSR_AMD_STAR, &value); 165*0Sstevel@tonic-gate value = (uintptr_t)sys_syscall; 166*0Sstevel@tonic-gate wrmsr(MSR_AMD_LSTAR, &value); 167*0Sstevel@tonic-gate value = (uintptr_t)sys_syscall32; 168*0Sstevel@tonic-gate wrmsr(MSR_AMD_CSTAR, &value); 169*0Sstevel@tonic-gate 170*0Sstevel@tonic-gate /* 171*0Sstevel@tonic-gate * This list of flags is masked off the incoming 172*0Sstevel@tonic-gate * %rfl when we enter the kernel. 173*0Sstevel@tonic-gate */ 174*0Sstevel@tonic-gate value = PS_IE | PS_T; 175*0Sstevel@tonic-gate wrmsr(MSR_AMD_SFMASK, &value); 176*0Sstevel@tonic-gate } 177*0Sstevel@tonic-gate #endif 178*0Sstevel@tonic-gate 179*0Sstevel@tonic-gate /* 180*0Sstevel@tonic-gate * On 32-bit kernels, we use sysenter/sysexit because it's too 181*0Sstevel@tonic-gate * hard to use syscall/sysret, and it is more portable anyway. 182*0Sstevel@tonic-gate * 183*0Sstevel@tonic-gate * On 64-bit kernels on Nocona machines, the 32-bit syscall 184*0Sstevel@tonic-gate * variant isn't available to 32-bit applications, but sysenter is. 185*0Sstevel@tonic-gate */ 186*0Sstevel@tonic-gate if (x86_feature & X86_SEP) { 187*0Sstevel@tonic-gate 188*0Sstevel@tonic-gate #if !defined(__lint) 189*0Sstevel@tonic-gate /* 190*0Sstevel@tonic-gate * The sysenter instruction imposes a certain ordering on 191*0Sstevel@tonic-gate * segment selectors, so we double-check that ordering 192*0Sstevel@tonic-gate * here. See "sysenter" in Intel document 245471-012, "IA-32 193*0Sstevel@tonic-gate * Intel Architecture Software Developer's Manual Volume 2: 194*0Sstevel@tonic-gate * Instruction Set Reference" 195*0Sstevel@tonic-gate */ 196*0Sstevel@tonic-gate ASSERT(KDS_SEL == KCS_SEL + 8); 197*0Sstevel@tonic-gate 198*0Sstevel@tonic-gate ASSERT32(UCS_SEL == ((KCS_SEL + 16) | 3)); 199*0Sstevel@tonic-gate ASSERT32(UDS_SEL == UCS_SEL + 8); 200*0Sstevel@tonic-gate 201*0Sstevel@tonic-gate ASSERT64(U32CS_SEL == ((KCS_SEL + 16) | 3)); 202*0Sstevel@tonic-gate ASSERT64(UDS_SEL == U32CS_SEL + 8); 203*0Sstevel@tonic-gate #endif 204*0Sstevel@tonic-gate 205*0Sstevel@tonic-gate cpu_sep_enable(); 206*0Sstevel@tonic-gate 207*0Sstevel@tonic-gate /* 208*0Sstevel@tonic-gate * resume() sets this value to the base of the threads stack 209*0Sstevel@tonic-gate * via a context handler. 210*0Sstevel@tonic-gate */ 211*0Sstevel@tonic-gate value = 0; 212*0Sstevel@tonic-gate wrmsr(MSR_INTC_SEP_ESP, &value); 213*0Sstevel@tonic-gate 214*0Sstevel@tonic-gate value = (uintptr_t)sys_sysenter; 215*0Sstevel@tonic-gate wrmsr(MSR_INTC_SEP_EIP, &value); 216*0Sstevel@tonic-gate } 217*0Sstevel@tonic-gate 218*0Sstevel@tonic-gate kpreempt_enable(); 219*0Sstevel@tonic-gate } 220*0Sstevel@tonic-gate 221*0Sstevel@tonic-gate /* 222*0Sstevel@tonic-gate * Multiprocessor initialization. 223*0Sstevel@tonic-gate * 224*0Sstevel@tonic-gate * Allocate and initialize the cpu structure, TRAPTRACE buffer, and the 225*0Sstevel@tonic-gate * startup and idle threads for the specified CPU. 226*0Sstevel@tonic-gate */ 227*0Sstevel@tonic-gate static void 228*0Sstevel@tonic-gate mp_startup_init(int cpun) 229*0Sstevel@tonic-gate { 230*0Sstevel@tonic-gate #if defined(__amd64) 231*0Sstevel@tonic-gate extern void *long_mode_64(void); 232*0Sstevel@tonic-gate #endif /* __amd64 */ 233*0Sstevel@tonic-gate 234*0Sstevel@tonic-gate struct cpu *cp; 235*0Sstevel@tonic-gate struct tss *ntss; 236*0Sstevel@tonic-gate kthread_id_t tp; 237*0Sstevel@tonic-gate caddr_t sp; 238*0Sstevel@tonic-gate int size; 239*0Sstevel@tonic-gate proc_t *procp; 240*0Sstevel@tonic-gate extern void idle(); 241*0Sstevel@tonic-gate extern void init_intr_threads(struct cpu *); 242*0Sstevel@tonic-gate 243*0Sstevel@tonic-gate struct cpu_tables *tablesp; 244*0Sstevel@tonic-gate extern chip_t cpu0_chip; 245*0Sstevel@tonic-gate rm_platter_t *real_mode_platter = (rm_platter_t *)rm_platter_va; 246*0Sstevel@tonic-gate 247*0Sstevel@tonic-gate #ifdef TRAPTRACE 248*0Sstevel@tonic-gate trap_trace_ctl_t *ttc = &trap_trace_ctl[cpun]; 249*0Sstevel@tonic-gate #endif 250*0Sstevel@tonic-gate 251*0Sstevel@tonic-gate ASSERT(cpun < NCPU && cpu[cpun] == NULL); 252*0Sstevel@tonic-gate 253*0Sstevel@tonic-gate if ((cp = kmem_zalloc(sizeof (*cp), KM_NOSLEEP)) == NULL) { 254*0Sstevel@tonic-gate panic("mp_startup_init: cpu%d: " 255*0Sstevel@tonic-gate "no memory for cpu structure", cpun); 256*0Sstevel@tonic-gate /*NOTREACHED*/ 257*0Sstevel@tonic-gate } 258*0Sstevel@tonic-gate procp = curthread->t_procp; 259*0Sstevel@tonic-gate 260*0Sstevel@tonic-gate mutex_enter(&cpu_lock); 261*0Sstevel@tonic-gate /* 262*0Sstevel@tonic-gate * Initialize the dispatcher first. 263*0Sstevel@tonic-gate */ 264*0Sstevel@tonic-gate disp_cpu_init(cp); 265*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 266*0Sstevel@tonic-gate 267*0Sstevel@tonic-gate /* 268*0Sstevel@tonic-gate * Allocate and initialize the startup thread for this CPU. 269*0Sstevel@tonic-gate * Interrupt and process switch stacks get allocated later 270*0Sstevel@tonic-gate * when the CPU starts running. 271*0Sstevel@tonic-gate */ 272*0Sstevel@tonic-gate tp = thread_create(NULL, 0, NULL, NULL, 0, procp, 273*0Sstevel@tonic-gate TS_STOPPED, maxclsyspri); 274*0Sstevel@tonic-gate 275*0Sstevel@tonic-gate /* 276*0Sstevel@tonic-gate * Set state to TS_ONPROC since this thread will start running 277*0Sstevel@tonic-gate * as soon as the CPU comes online. 278*0Sstevel@tonic-gate * 279*0Sstevel@tonic-gate * All the other fields of the thread structure are setup by 280*0Sstevel@tonic-gate * thread_create(). 281*0Sstevel@tonic-gate */ 282*0Sstevel@tonic-gate THREAD_ONPROC(tp, cp); 283*0Sstevel@tonic-gate tp->t_preempt = 1; 284*0Sstevel@tonic-gate tp->t_bound_cpu = cp; 285*0Sstevel@tonic-gate tp->t_affinitycnt = 1; 286*0Sstevel@tonic-gate tp->t_cpu = cp; 287*0Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp; 288*0Sstevel@tonic-gate 289*0Sstevel@tonic-gate /* 290*0Sstevel@tonic-gate * Setup thread to start in mp_startup. 291*0Sstevel@tonic-gate */ 292*0Sstevel@tonic-gate sp = tp->t_stk; 293*0Sstevel@tonic-gate tp->t_pc = (uintptr_t)mp_startup; 294*0Sstevel@tonic-gate tp->t_sp = (uintptr_t)(sp - MINFRAME); 295*0Sstevel@tonic-gate 296*0Sstevel@tonic-gate cp->cpu_id = cpun; 297*0Sstevel@tonic-gate cp->cpu_self = cp; 298*0Sstevel@tonic-gate cp->cpu_mask = 1 << cpun; 299*0Sstevel@tonic-gate cp->cpu_thread = tp; 300*0Sstevel@tonic-gate cp->cpu_lwp = NULL; 301*0Sstevel@tonic-gate cp->cpu_dispthread = tp; 302*0Sstevel@tonic-gate cp->cpu_dispatch_pri = DISP_PRIO(tp); 303*0Sstevel@tonic-gate 304*0Sstevel@tonic-gate /* 305*0Sstevel@tonic-gate * Bootstrap cpu_chip in case mp_startup blocks 306*0Sstevel@tonic-gate */ 307*0Sstevel@tonic-gate cp->cpu_chip = &cpu0_chip; 308*0Sstevel@tonic-gate 309*0Sstevel@tonic-gate /* 310*0Sstevel@tonic-gate * Now, initialize per-CPU idle thread for this CPU. 311*0Sstevel@tonic-gate */ 312*0Sstevel@tonic-gate tp = thread_create(NULL, PAGESIZE, idle, NULL, 0, procp, TS_ONPROC, -1); 313*0Sstevel@tonic-gate 314*0Sstevel@tonic-gate cp->cpu_idle_thread = tp; 315*0Sstevel@tonic-gate 316*0Sstevel@tonic-gate tp->t_preempt = 1; 317*0Sstevel@tonic-gate tp->t_bound_cpu = cp; 318*0Sstevel@tonic-gate tp->t_affinitycnt = 1; 319*0Sstevel@tonic-gate tp->t_cpu = cp; 320*0Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp; 321*0Sstevel@tonic-gate 322*0Sstevel@tonic-gate /* 323*0Sstevel@tonic-gate * Perform CPC intialization on the new CPU. 324*0Sstevel@tonic-gate */ 325*0Sstevel@tonic-gate kcpc_hw_init(cp); 326*0Sstevel@tonic-gate 327*0Sstevel@tonic-gate /* 328*0Sstevel@tonic-gate * Allocate virtual addresses for cpu_caddr1 and cpu_caddr2 329*0Sstevel@tonic-gate * for each CPU. 330*0Sstevel@tonic-gate */ 331*0Sstevel@tonic-gate 332*0Sstevel@tonic-gate setup_vaddr_for_ppcopy(cp); 333*0Sstevel@tonic-gate 334*0Sstevel@tonic-gate /* 335*0Sstevel@tonic-gate * Allocate space for page directory, stack, tss, gdt and idt. 336*0Sstevel@tonic-gate * This assumes that kmem_alloc will return memory which is aligned 337*0Sstevel@tonic-gate * to the next higher power of 2 or a page(if size > MAXABIG) 338*0Sstevel@tonic-gate * If this assumption goes wrong at any time due to change in 339*0Sstevel@tonic-gate * kmem alloc, things may not work as the page directory has to be 340*0Sstevel@tonic-gate * page aligned 341*0Sstevel@tonic-gate */ 342*0Sstevel@tonic-gate if ((tablesp = kmem_zalloc(sizeof (*tablesp), KM_NOSLEEP)) == NULL) 343*0Sstevel@tonic-gate panic("mp_startup_init: cpu%d cannot allocate tables", cpun); 344*0Sstevel@tonic-gate 345*0Sstevel@tonic-gate if ((uintptr_t)tablesp & ~MMU_STD_PAGEMASK) { 346*0Sstevel@tonic-gate kmem_free(tablesp, sizeof (struct cpu_tables)); 347*0Sstevel@tonic-gate size = sizeof (struct cpu_tables) + MMU_STD_PAGESIZE; 348*0Sstevel@tonic-gate tablesp = kmem_zalloc(size, KM_NOSLEEP); 349*0Sstevel@tonic-gate tablesp = (struct cpu_tables *) 350*0Sstevel@tonic-gate (((uintptr_t)tablesp + MMU_STD_PAGESIZE) & 351*0Sstevel@tonic-gate MMU_STD_PAGEMASK); 352*0Sstevel@tonic-gate } 353*0Sstevel@tonic-gate 354*0Sstevel@tonic-gate ntss = cp->cpu_tss = &tablesp->ct_tss; 355*0Sstevel@tonic-gate cp->cpu_gdt = tablesp->ct_gdt; 356*0Sstevel@tonic-gate bcopy(CPU->cpu_gdt, cp->cpu_gdt, NGDT * (sizeof (user_desc_t))); 357*0Sstevel@tonic-gate 358*0Sstevel@tonic-gate #if defined(__amd64) 359*0Sstevel@tonic-gate 360*0Sstevel@tonic-gate /* 361*0Sstevel@tonic-gate * #DF (double fault). 362*0Sstevel@tonic-gate */ 363*0Sstevel@tonic-gate ntss->tss_ist1 = 364*0Sstevel@tonic-gate (uint64_t)&tablesp->ct_stack[sizeof (tablesp->ct_stack)]; 365*0Sstevel@tonic-gate 366*0Sstevel@tonic-gate #elif defined(__i386) 367*0Sstevel@tonic-gate 368*0Sstevel@tonic-gate ntss->tss_esp0 = ntss->tss_esp1 = ntss->tss_esp2 = ntss->tss_esp = 369*0Sstevel@tonic-gate (uint32_t)&tablesp->ct_stack[sizeof (tablesp->ct_stack)]; 370*0Sstevel@tonic-gate 371*0Sstevel@tonic-gate ntss->tss_ss0 = ntss->tss_ss1 = ntss->tss_ss2 = ntss->tss_ss = KDS_SEL; 372*0Sstevel@tonic-gate 373*0Sstevel@tonic-gate ntss->tss_eip = (uint32_t)mp_startup; 374*0Sstevel@tonic-gate 375*0Sstevel@tonic-gate ntss->tss_cs = KCS_SEL; 376*0Sstevel@tonic-gate ntss->tss_fs = KFS_SEL; 377*0Sstevel@tonic-gate ntss->tss_gs = KGS_SEL; 378*0Sstevel@tonic-gate 379*0Sstevel@tonic-gate /* 380*0Sstevel@tonic-gate * setup kernel %gs. 381*0Sstevel@tonic-gate */ 382*0Sstevel@tonic-gate set_usegd(&cp->cpu_gdt[GDT_GS], cp, sizeof (struct cpu) -1, SDT_MEMRWA, 383*0Sstevel@tonic-gate SEL_KPL, 0, 1); 384*0Sstevel@tonic-gate 385*0Sstevel@tonic-gate #endif /* __i386 */ 386*0Sstevel@tonic-gate 387*0Sstevel@tonic-gate /* 388*0Sstevel@tonic-gate * Set I/O bit map offset equal to size of TSS segment limit 389*0Sstevel@tonic-gate * for no I/O permission map. This will cause all user I/O 390*0Sstevel@tonic-gate * instructions to generate #gp fault. 391*0Sstevel@tonic-gate */ 392*0Sstevel@tonic-gate ntss->tss_bitmapbase = sizeof (*ntss); 393*0Sstevel@tonic-gate 394*0Sstevel@tonic-gate /* 395*0Sstevel@tonic-gate * setup kernel tss. 396*0Sstevel@tonic-gate */ 397*0Sstevel@tonic-gate set_syssegd((system_desc_t *)&cp->cpu_gdt[GDT_KTSS], cp->cpu_tss, 398*0Sstevel@tonic-gate sizeof (*cp->cpu_tss) -1, SDT_SYSTSS, SEL_KPL); 399*0Sstevel@tonic-gate 400*0Sstevel@tonic-gate /* 401*0Sstevel@tonic-gate * If we have more than one node, each cpu gets a copy of IDT 402*0Sstevel@tonic-gate * local to its node. If this is a Pentium box, we use cpu 0's 403*0Sstevel@tonic-gate * IDT. cpu 0's IDT has been made read-only to workaround the 404*0Sstevel@tonic-gate * cmpxchgl register bug 405*0Sstevel@tonic-gate */ 406*0Sstevel@tonic-gate cp->cpu_idt = CPU->cpu_idt; 407*0Sstevel@tonic-gate if (system_hardware.hd_nodes && x86_type != X86_TYPE_P5) { 408*0Sstevel@tonic-gate cp->cpu_idt = kmem_alloc(sizeof (idt0), KM_SLEEP); 409*0Sstevel@tonic-gate bcopy(idt0, cp->cpu_idt, sizeof (idt0)); 410*0Sstevel@tonic-gate } 411*0Sstevel@tonic-gate 412*0Sstevel@tonic-gate /* 413*0Sstevel@tonic-gate * Get interrupt priority data from cpu 0 414*0Sstevel@tonic-gate */ 415*0Sstevel@tonic-gate cp->cpu_pri_data = CPU->cpu_pri_data; 416*0Sstevel@tonic-gate 417*0Sstevel@tonic-gate hat_cpu_online(cp); 418*0Sstevel@tonic-gate 419*0Sstevel@tonic-gate /* Should remove all entries for the current process/thread here */ 420*0Sstevel@tonic-gate 421*0Sstevel@tonic-gate /* 422*0Sstevel@tonic-gate * Fill up the real mode platter to make it easy for real mode code to 423*0Sstevel@tonic-gate * kick it off. This area should really be one passed by boot to kernel 424*0Sstevel@tonic-gate * and guaranteed to be below 1MB and aligned to 16 bytes. Should also 425*0Sstevel@tonic-gate * have identical physical and virtual address in paged mode. 426*0Sstevel@tonic-gate */ 427*0Sstevel@tonic-gate real_mode_platter->rm_idt_base = cp->cpu_idt; 428*0Sstevel@tonic-gate real_mode_platter->rm_idt_lim = sizeof (idt0) - 1; 429*0Sstevel@tonic-gate real_mode_platter->rm_gdt_base = cp->cpu_gdt; 430*0Sstevel@tonic-gate real_mode_platter->rm_gdt_lim = sizeof (gdt0) -1; 431*0Sstevel@tonic-gate real_mode_platter->rm_pdbr = getcr3(); 432*0Sstevel@tonic-gate real_mode_platter->rm_cpu = cpun; 433*0Sstevel@tonic-gate real_mode_platter->rm_x86feature = x86_feature; 434*0Sstevel@tonic-gate real_mode_platter->rm_cr4 = cr4_value; 435*0Sstevel@tonic-gate 436*0Sstevel@tonic-gate #if defined(__amd64) 437*0Sstevel@tonic-gate if (getcr3() > 0xffffffffUL) 438*0Sstevel@tonic-gate panic("Cannot initialize CPUs; kernel's 64-bit page tables\n" 439*0Sstevel@tonic-gate "located above 4G in physical memory (@ 0x%llx).", 440*0Sstevel@tonic-gate (unsigned long long)getcr3()); 441*0Sstevel@tonic-gate 442*0Sstevel@tonic-gate /* 443*0Sstevel@tonic-gate * Setup pseudo-descriptors for temporary GDT and IDT for use ONLY 444*0Sstevel@tonic-gate * by code in real_mode_start(): 445*0Sstevel@tonic-gate * 446*0Sstevel@tonic-gate * GDT[0]: NULL selector 447*0Sstevel@tonic-gate * GDT[1]: 64-bit CS: Long = 1, Present = 1, bits 12, 11 = 1 448*0Sstevel@tonic-gate * 449*0Sstevel@tonic-gate * Clear the IDT as interrupts will be off and a limit of 0 will cause 450*0Sstevel@tonic-gate * the CPU to triple fault and reset on an NMI, seemingly as reasonable 451*0Sstevel@tonic-gate * a course of action as any other, though it may cause the entire 452*0Sstevel@tonic-gate * platform to reset in some cases... 453*0Sstevel@tonic-gate */ 454*0Sstevel@tonic-gate real_mode_platter->rm_temp_gdt[0] = 0ULL; 455*0Sstevel@tonic-gate real_mode_platter->rm_temp_gdt[TEMPGDT_KCODE64] = 0x20980000000000ULL; 456*0Sstevel@tonic-gate 457*0Sstevel@tonic-gate real_mode_platter->rm_temp_gdt_lim = (ushort_t) 458*0Sstevel@tonic-gate (sizeof (real_mode_platter->rm_temp_gdt) - 1); 459*0Sstevel@tonic-gate real_mode_platter->rm_temp_gdt_base = rm_platter_pa + 460*0Sstevel@tonic-gate (uint32_t)(&((rm_platter_t *)0)->rm_temp_gdt); 461*0Sstevel@tonic-gate 462*0Sstevel@tonic-gate real_mode_platter->rm_temp_idt_lim = 0; 463*0Sstevel@tonic-gate real_mode_platter->rm_temp_idt_base = 0; 464*0Sstevel@tonic-gate 465*0Sstevel@tonic-gate /* 466*0Sstevel@tonic-gate * Since the CPU needs to jump to protected mode using an identity 467*0Sstevel@tonic-gate * mapped address, we need to calculate it here. 468*0Sstevel@tonic-gate */ 469*0Sstevel@tonic-gate real_mode_platter->rm_longmode64_addr = rm_platter_pa + 470*0Sstevel@tonic-gate ((uint32_t)long_mode_64 - (uint32_t)real_mode_start); 471*0Sstevel@tonic-gate #endif /* __amd64 */ 472*0Sstevel@tonic-gate 473*0Sstevel@tonic-gate #ifdef TRAPTRACE 474*0Sstevel@tonic-gate /* 475*0Sstevel@tonic-gate * If this is a TRAPTRACE kernel, allocate TRAPTRACE buffers for this 476*0Sstevel@tonic-gate * CPU. 477*0Sstevel@tonic-gate */ 478*0Sstevel@tonic-gate ttc->ttc_first = (uintptr_t)kmem_zalloc(trap_trace_bufsize, KM_SLEEP); 479*0Sstevel@tonic-gate ttc->ttc_next = ttc->ttc_first; 480*0Sstevel@tonic-gate ttc->ttc_limit = ttc->ttc_first + trap_trace_bufsize; 481*0Sstevel@tonic-gate #endif 482*0Sstevel@tonic-gate 483*0Sstevel@tonic-gate /* 484*0Sstevel@tonic-gate * Record that we have another CPU. 485*0Sstevel@tonic-gate */ 486*0Sstevel@tonic-gate mutex_enter(&cpu_lock); 487*0Sstevel@tonic-gate /* 488*0Sstevel@tonic-gate * Initialize the interrupt threads for this CPU 489*0Sstevel@tonic-gate */ 490*0Sstevel@tonic-gate init_intr_threads(cp); 491*0Sstevel@tonic-gate /* 492*0Sstevel@tonic-gate * Add CPU to list of available CPUs. It'll be on the active list 493*0Sstevel@tonic-gate * after mp_startup(). 494*0Sstevel@tonic-gate */ 495*0Sstevel@tonic-gate cpu_add_unit(cp); 496*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 497*0Sstevel@tonic-gate } 498*0Sstevel@tonic-gate 499*0Sstevel@tonic-gate /* 500*0Sstevel@tonic-gate * Apply workarounds for known errata, and warn about those that are absent. 501*0Sstevel@tonic-gate * 502*0Sstevel@tonic-gate * System vendors occasionally create configurations which contain different 503*0Sstevel@tonic-gate * revisions of the CPUs that are almost but not exactly the same. At the 504*0Sstevel@tonic-gate * time of writing, this meant that their clock rates were the same, their 505*0Sstevel@tonic-gate * feature sets were the same, but the required workaround were -not- 506*0Sstevel@tonic-gate * necessarily the same. So, this routine is invoked on -every- CPU soon 507*0Sstevel@tonic-gate * after starting to make sure that the resulting system contains the most 508*0Sstevel@tonic-gate * pessimal set of workarounds needed to cope with *any* of the CPUs in the 509*0Sstevel@tonic-gate * system. 510*0Sstevel@tonic-gate * 511*0Sstevel@tonic-gate * These workarounds are based on Rev 3.50 of the Revision Guide for 512*0Sstevel@tonic-gate * AMD Athlon(tm) 64 and AMD Opteron(tm) Processors, May 2005. 513*0Sstevel@tonic-gate */ 514*0Sstevel@tonic-gate 515*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_91) 516*0Sstevel@tonic-gate int opteron_erratum_91; /* if non-zero -> at least one cpu has it */ 517*0Sstevel@tonic-gate #endif 518*0Sstevel@tonic-gate 519*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_93) 520*0Sstevel@tonic-gate int opteron_erratum_93; /* if non-zero -> at least one cpu has it */ 521*0Sstevel@tonic-gate #endif 522*0Sstevel@tonic-gate 523*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_100) 524*0Sstevel@tonic-gate int opteron_erratum_100; /* if non-zero -> at least one cpu has it */ 525*0Sstevel@tonic-gate #endif 526*0Sstevel@tonic-gate 527*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109) 528*0Sstevel@tonic-gate int opteron_erratum_109; /* if non-zero -> at least one cpu has it */ 529*0Sstevel@tonic-gate #endif 530*0Sstevel@tonic-gate 531*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121) 532*0Sstevel@tonic-gate int opteron_erratum_121; /* if non-zero -> at least one cpu has it */ 533*0Sstevel@tonic-gate #endif 534*0Sstevel@tonic-gate 535*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_122) 536*0Sstevel@tonic-gate int opteron_erratum_122; /* if non-zero -> at least one cpu has it */ 537*0Sstevel@tonic-gate #endif 538*0Sstevel@tonic-gate 539*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_123) 540*0Sstevel@tonic-gate int opteron_erratum_123; /* if non-zero -> at least one cpu has it */ 541*0Sstevel@tonic-gate #endif 542*0Sstevel@tonic-gate 543*0Sstevel@tonic-gate 544*0Sstevel@tonic-gate #define WARNING(cpu, n) \ 545*0Sstevel@tonic-gate cmn_err(CE_WARN, "cpu%d: no workaround for erratum %d", \ 546*0Sstevel@tonic-gate (cpu)->cpu_id, (n)) 547*0Sstevel@tonic-gate 548*0Sstevel@tonic-gate uint_t 549*0Sstevel@tonic-gate workaround_errata(struct cpu *cpu) 550*0Sstevel@tonic-gate { 551*0Sstevel@tonic-gate uint_t missing = 0; 552*0Sstevel@tonic-gate 553*0Sstevel@tonic-gate ASSERT(cpu == CPU); 554*0Sstevel@tonic-gate 555*0Sstevel@tonic-gate /*LINTED*/ 556*0Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 88) > 0) { 557*0Sstevel@tonic-gate /* 558*0Sstevel@tonic-gate * SWAPGS May Fail To Read Correct GS Base 559*0Sstevel@tonic-gate */ 560*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_88) 561*0Sstevel@tonic-gate /* 562*0Sstevel@tonic-gate * The workaround is an mfence in the relevant assembler code 563*0Sstevel@tonic-gate */ 564*0Sstevel@tonic-gate #else 565*0Sstevel@tonic-gate WARNING(cpu, 88); 566*0Sstevel@tonic-gate missing++; 567*0Sstevel@tonic-gate #endif 568*0Sstevel@tonic-gate } 569*0Sstevel@tonic-gate 570*0Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 91) > 0) { 571*0Sstevel@tonic-gate /* 572*0Sstevel@tonic-gate * Software Prefetches May Report A Page Fault 573*0Sstevel@tonic-gate */ 574*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_91) 575*0Sstevel@tonic-gate /* 576*0Sstevel@tonic-gate * fix is in trap.c 577*0Sstevel@tonic-gate */ 578*0Sstevel@tonic-gate opteron_erratum_91++; 579*0Sstevel@tonic-gate #else 580*0Sstevel@tonic-gate WARNING(cpu, 91); 581*0Sstevel@tonic-gate missing++; 582*0Sstevel@tonic-gate #endif 583*0Sstevel@tonic-gate } 584*0Sstevel@tonic-gate 585*0Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 93) > 0) { 586*0Sstevel@tonic-gate /* 587*0Sstevel@tonic-gate * RSM Auto-Halt Restart Returns to Incorrect RIP 588*0Sstevel@tonic-gate */ 589*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_93) 590*0Sstevel@tonic-gate /* 591*0Sstevel@tonic-gate * fix is in trap.c 592*0Sstevel@tonic-gate */ 593*0Sstevel@tonic-gate opteron_erratum_93++; 594*0Sstevel@tonic-gate #else 595*0Sstevel@tonic-gate WARNING(cpu, 93); 596*0Sstevel@tonic-gate missing++; 597*0Sstevel@tonic-gate #endif 598*0Sstevel@tonic-gate } 599*0Sstevel@tonic-gate 600*0Sstevel@tonic-gate /*LINTED*/ 601*0Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 95) > 0) { 602*0Sstevel@tonic-gate /* 603*0Sstevel@tonic-gate * RET Instruction May Return to Incorrect EIP 604*0Sstevel@tonic-gate */ 605*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_95) 606*0Sstevel@tonic-gate #if defined(_LP64) 607*0Sstevel@tonic-gate /* 608*0Sstevel@tonic-gate * Workaround this by ensuring that 32-bit user code and 609*0Sstevel@tonic-gate * 64-bit kernel code never occupy the same address 610*0Sstevel@tonic-gate * range mod 4G. 611*0Sstevel@tonic-gate */ 612*0Sstevel@tonic-gate if (_userlimit32 > 0xc0000000ul) 613*0Sstevel@tonic-gate *(uintptr_t *)&_userlimit32 = 0xc0000000ul; 614*0Sstevel@tonic-gate 615*0Sstevel@tonic-gate /*LINTED*/ 616*0Sstevel@tonic-gate ASSERT((uint32_t)COREHEAP_BASE == 0xc0000000u); 617*0Sstevel@tonic-gate #endif /* _LP64 */ 618*0Sstevel@tonic-gate #else 619*0Sstevel@tonic-gate WARNING(cpu, 95); 620*0Sstevel@tonic-gate missing++; 621*0Sstevel@tonic-gate #endif /* OPTERON_ERRATUM_95 */ 622*0Sstevel@tonic-gate } 623*0Sstevel@tonic-gate 624*0Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 100) > 0) { 625*0Sstevel@tonic-gate /* 626*0Sstevel@tonic-gate * Compatibility Mode Branches Transfer to Illegal Address 627*0Sstevel@tonic-gate */ 628*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_100) 629*0Sstevel@tonic-gate /* 630*0Sstevel@tonic-gate * fix is in trap.c 631*0Sstevel@tonic-gate */ 632*0Sstevel@tonic-gate opteron_erratum_100++; 633*0Sstevel@tonic-gate #else 634*0Sstevel@tonic-gate WARNING(cpu, 100); 635*0Sstevel@tonic-gate missing++; 636*0Sstevel@tonic-gate #endif 637*0Sstevel@tonic-gate } 638*0Sstevel@tonic-gate 639*0Sstevel@tonic-gate /*LINTED*/ 640*0Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 108) > 0) { 641*0Sstevel@tonic-gate /* 642*0Sstevel@tonic-gate * CPUID Instruction May Return Incorrect Model Number In 643*0Sstevel@tonic-gate * Some Processors 644*0Sstevel@tonic-gate */ 645*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_108) 646*0Sstevel@tonic-gate /* 647*0Sstevel@tonic-gate * (Our cpuid-handling code corrects the model number on 648*0Sstevel@tonic-gate * those processors) 649*0Sstevel@tonic-gate */ 650*0Sstevel@tonic-gate #else 651*0Sstevel@tonic-gate WARNING(cpu, 108); 652*0Sstevel@tonic-gate missing++; 653*0Sstevel@tonic-gate #endif 654*0Sstevel@tonic-gate } 655*0Sstevel@tonic-gate 656*0Sstevel@tonic-gate /*LINTED*/ 657*0Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 109) > 0) { 658*0Sstevel@tonic-gate /* 659*0Sstevel@tonic-gate * Certain Reverse REP MOVS May Produce Unpredictable Behaviour 660*0Sstevel@tonic-gate */ 661*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109) 662*0Sstevel@tonic-gate uint64_t patchlevel; 663*0Sstevel@tonic-gate 664*0Sstevel@tonic-gate (void) rdmsr(MSR_AMD_PATCHLEVEL, &patchlevel); 665*0Sstevel@tonic-gate /* workaround is to print a warning to upgrade BIOS */ 666*0Sstevel@tonic-gate if (patchlevel == 0) 667*0Sstevel@tonic-gate opteron_erratum_109++; 668*0Sstevel@tonic-gate #else 669*0Sstevel@tonic-gate WARNING(cpu, 109); 670*0Sstevel@tonic-gate missing++; 671*0Sstevel@tonic-gate #endif 672*0Sstevel@tonic-gate } 673*0Sstevel@tonic-gate /*LINTED*/ 674*0Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 121) > 0) { 675*0Sstevel@tonic-gate /* 676*0Sstevel@tonic-gate * Sequential Execution Across Non_Canonical Boundary Caused 677*0Sstevel@tonic-gate * Processor Hang 678*0Sstevel@tonic-gate */ 679*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121) 680*0Sstevel@tonic-gate static int lma; 681*0Sstevel@tonic-gate 682*0Sstevel@tonic-gate if (opteron_erratum_121) 683*0Sstevel@tonic-gate opteron_erratum_121++; 684*0Sstevel@tonic-gate 685*0Sstevel@tonic-gate /* 686*0Sstevel@tonic-gate * Erratum 121 is only present in long (64 bit) mode. 687*0Sstevel@tonic-gate * Workaround is to include the page immediately before the 688*0Sstevel@tonic-gate * va hole to eliminate the possibility of system hangs due to 689*0Sstevel@tonic-gate * sequential execution across the va hole boundary. 690*0Sstevel@tonic-gate */ 691*0Sstevel@tonic-gate if (lma == 0) { 692*0Sstevel@tonic-gate uint64_t efer; 693*0Sstevel@tonic-gate 694*0Sstevel@tonic-gate /* 695*0Sstevel@tonic-gate * check LMA once: assume all cpus are in long mode 696*0Sstevel@tonic-gate * or not. 697*0Sstevel@tonic-gate */ 698*0Sstevel@tonic-gate lma = 1; 699*0Sstevel@tonic-gate 700*0Sstevel@tonic-gate (void) rdmsr(MSR_AMD_EFER, &efer); 701*0Sstevel@tonic-gate if (efer & AMD_EFER_LMA) { 702*0Sstevel@tonic-gate if (hole_start) { 703*0Sstevel@tonic-gate hole_start -= PAGESIZE; 704*0Sstevel@tonic-gate } else { 705*0Sstevel@tonic-gate /* 706*0Sstevel@tonic-gate * hole_start not yet initialized by 707*0Sstevel@tonic-gate * mmu_init. Initialize hole_start 708*0Sstevel@tonic-gate * with value to be subtracted. 709*0Sstevel@tonic-gate */ 710*0Sstevel@tonic-gate hole_start = PAGESIZE; 711*0Sstevel@tonic-gate } 712*0Sstevel@tonic-gate opteron_erratum_121++; 713*0Sstevel@tonic-gate } 714*0Sstevel@tonic-gate } 715*0Sstevel@tonic-gate #else 716*0Sstevel@tonic-gate WARNING(cpu, 121); 717*0Sstevel@tonic-gate missing++; 718*0Sstevel@tonic-gate #endif 719*0Sstevel@tonic-gate } 720*0Sstevel@tonic-gate 721*0Sstevel@tonic-gate /*LINTED*/ 722*0Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 122) > 0) { 723*0Sstevel@tonic-gate /* 724*0Sstevel@tonic-gate * TLB Flush Filter May Cause Cohenrency Problem in 725*0Sstevel@tonic-gate * Multiprocessor Systems 726*0Sstevel@tonic-gate */ 727*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_122) 728*0Sstevel@tonic-gate /* 729*0Sstevel@tonic-gate * Erratum 122 is only present in MP configurations (multi-core 730*0Sstevel@tonic-gate * or multi-processor). 731*0Sstevel@tonic-gate */ 732*0Sstevel@tonic-gate 733*0Sstevel@tonic-gate if (opteron_erratum_122 || lgrp_plat_node_cnt > 1 || 734*0Sstevel@tonic-gate cpuid_get_ncpu_per_chip(cpu) > 1) { 735*0Sstevel@tonic-gate uint64_t hwcrval; 736*0Sstevel@tonic-gate 737*0Sstevel@tonic-gate /* disable TLB Flush Filter */ 738*0Sstevel@tonic-gate (void) rdmsr(MSR_AMD_HWCR, &hwcrval); 739*0Sstevel@tonic-gate hwcrval |= AMD_HWCR_FFDIS; 740*0Sstevel@tonic-gate wrmsr(MSR_AMD_HWCR, &hwcrval); 741*0Sstevel@tonic-gate opteron_erratum_122++; 742*0Sstevel@tonic-gate } 743*0Sstevel@tonic-gate 744*0Sstevel@tonic-gate #else 745*0Sstevel@tonic-gate WARNING(cpu, 122); 746*0Sstevel@tonic-gate missing++; 747*0Sstevel@tonic-gate #endif 748*0Sstevel@tonic-gate } 749*0Sstevel@tonic-gate /*LINTED*/ 750*0Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 123) > 0) { 751*0Sstevel@tonic-gate /* 752*0Sstevel@tonic-gate * Bypassed Reads May Cause Data Corruption of System Hang in 753*0Sstevel@tonic-gate * Dual Core Processors 754*0Sstevel@tonic-gate */ 755*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_123) 756*0Sstevel@tonic-gate /* 757*0Sstevel@tonic-gate * Erratum 123 applies only to multi-core cpus. 758*0Sstevel@tonic-gate */ 759*0Sstevel@tonic-gate 760*0Sstevel@tonic-gate if (cpuid_get_ncpu_per_chip(cpu) > 1) { 761*0Sstevel@tonic-gate uint64_t patchlevel; 762*0Sstevel@tonic-gate 763*0Sstevel@tonic-gate (void) rdmsr(MSR_AMD_PATCHLEVEL, &patchlevel); 764*0Sstevel@tonic-gate /* workaround is to print a warning to upgrade BIOS */ 765*0Sstevel@tonic-gate if (patchlevel == 0) 766*0Sstevel@tonic-gate opteron_erratum_123++; 767*0Sstevel@tonic-gate } 768*0Sstevel@tonic-gate #else 769*0Sstevel@tonic-gate WARNING(cpu, 123); 770*0Sstevel@tonic-gate missing++; 771*0Sstevel@tonic-gate #endif 772*0Sstevel@tonic-gate } 773*0Sstevel@tonic-gate return (missing); 774*0Sstevel@tonic-gate } 775*0Sstevel@tonic-gate 776*0Sstevel@tonic-gate void 777*0Sstevel@tonic-gate workaround_errata_end() 778*0Sstevel@tonic-gate { 779*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109) 780*0Sstevel@tonic-gate if (opteron_erratum_109) { 781*0Sstevel@tonic-gate cmn_err(CE_WARN, "!BIOS microcode patch for AMD Processor" 782*0Sstevel@tonic-gate " Erratum 109 was not detected. Updating BIOS with the" 783*0Sstevel@tonic-gate " microcode patch is highly recommended."); 784*0Sstevel@tonic-gate } 785*0Sstevel@tonic-gate #endif 786*0Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_123) 787*0Sstevel@tonic-gate if (opteron_erratum_123) { 788*0Sstevel@tonic-gate cmn_err(CE_WARN, "!BIOS microcode patch for AMD Processor" 789*0Sstevel@tonic-gate " Erratum 123 was not detected. Updating BIOS with the" 790*0Sstevel@tonic-gate " microcode patch is highly recommended."); 791*0Sstevel@tonic-gate } 792*0Sstevel@tonic-gate #endif 793*0Sstevel@tonic-gate } 794*0Sstevel@tonic-gate 795*0Sstevel@tonic-gate static ushort_t *mp_map_warm_reset_vector(); 796*0Sstevel@tonic-gate static void mp_unmap_warm_reset_vector(ushort_t *warm_reset_vector); 797*0Sstevel@tonic-gate 798*0Sstevel@tonic-gate /*ARGSUSED*/ 799*0Sstevel@tonic-gate void 800*0Sstevel@tonic-gate start_other_cpus(int cprboot) 801*0Sstevel@tonic-gate { 802*0Sstevel@tonic-gate unsigned who; 803*0Sstevel@tonic-gate int cpuid = getbootcpuid(); 804*0Sstevel@tonic-gate int delays = 0; 805*0Sstevel@tonic-gate int started_cpu; 806*0Sstevel@tonic-gate ushort_t *warm_reset_vector = NULL; 807*0Sstevel@tonic-gate extern int procset; 808*0Sstevel@tonic-gate 809*0Sstevel@tonic-gate /* 810*0Sstevel@tonic-gate * Initialize our own cpu_info. 811*0Sstevel@tonic-gate */ 812*0Sstevel@tonic-gate init_cpu_info(CPU); 813*0Sstevel@tonic-gate 814*0Sstevel@tonic-gate /* 815*0Sstevel@tonic-gate * Initialize our syscall handlers 816*0Sstevel@tonic-gate */ 817*0Sstevel@tonic-gate init_cpu_syscall(CPU); 818*0Sstevel@tonic-gate 819*0Sstevel@tonic-gate /* 820*0Sstevel@tonic-gate * if only 1 cpu or not using MP, skip the rest of this 821*0Sstevel@tonic-gate */ 822*0Sstevel@tonic-gate if (!(mp_cpus & ~(1 << cpuid)) || use_mp == 0) { 823*0Sstevel@tonic-gate if (use_mp == 0) 824*0Sstevel@tonic-gate cmn_err(CE_CONT, "?***** Not in MP mode\n"); 825*0Sstevel@tonic-gate goto done; 826*0Sstevel@tonic-gate } 827*0Sstevel@tonic-gate 828*0Sstevel@tonic-gate /* 829*0Sstevel@tonic-gate * perform such initialization as is needed 830*0Sstevel@tonic-gate * to be able to take CPUs on- and off-line. 831*0Sstevel@tonic-gate */ 832*0Sstevel@tonic-gate cpu_pause_init(); 833*0Sstevel@tonic-gate 834*0Sstevel@tonic-gate xc_init(); /* initialize processor crosscalls */ 835*0Sstevel@tonic-gate 836*0Sstevel@tonic-gate /* 837*0Sstevel@tonic-gate * Copy the real mode code at "real_mode_start" to the 838*0Sstevel@tonic-gate * page at rm_platter_va. 839*0Sstevel@tonic-gate */ 840*0Sstevel@tonic-gate warm_reset_vector = mp_map_warm_reset_vector(); 841*0Sstevel@tonic-gate if (warm_reset_vector == NULL) 842*0Sstevel@tonic-gate goto done; 843*0Sstevel@tonic-gate 844*0Sstevel@tonic-gate bcopy((caddr_t)real_mode_start, 845*0Sstevel@tonic-gate (caddr_t)((rm_platter_t *)rm_platter_va)->rm_code, 846*0Sstevel@tonic-gate (size_t)real_mode_end - (size_t)real_mode_start); 847*0Sstevel@tonic-gate 848*0Sstevel@tonic-gate flushes_require_xcalls = 1; 849*0Sstevel@tonic-gate 850*0Sstevel@tonic-gate affinity_set(CPU_CURRENT); 851*0Sstevel@tonic-gate 852*0Sstevel@tonic-gate for (who = 0; who < NCPU; who++) { 853*0Sstevel@tonic-gate if (who == cpuid) 854*0Sstevel@tonic-gate continue; 855*0Sstevel@tonic-gate 856*0Sstevel@tonic-gate if ((mp_cpus & (1 << who)) == 0) 857*0Sstevel@tonic-gate continue; 858*0Sstevel@tonic-gate 859*0Sstevel@tonic-gate mp_startup_init(who); 860*0Sstevel@tonic-gate started_cpu = 1; 861*0Sstevel@tonic-gate (*cpu_startf)(who, rm_platter_pa); 862*0Sstevel@tonic-gate 863*0Sstevel@tonic-gate while ((procset & (1 << who)) == 0) { 864*0Sstevel@tonic-gate 865*0Sstevel@tonic-gate delay(1); 866*0Sstevel@tonic-gate if (++delays > (20 * hz)) { 867*0Sstevel@tonic-gate 868*0Sstevel@tonic-gate cmn_err(CE_WARN, 869*0Sstevel@tonic-gate "cpu%d failed to start", who); 870*0Sstevel@tonic-gate 871*0Sstevel@tonic-gate mutex_enter(&cpu_lock); 872*0Sstevel@tonic-gate cpu[who]->cpu_flags = 0; 873*0Sstevel@tonic-gate cpu_del_unit(who); 874*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 875*0Sstevel@tonic-gate 876*0Sstevel@tonic-gate started_cpu = 0; 877*0Sstevel@tonic-gate break; 878*0Sstevel@tonic-gate } 879*0Sstevel@tonic-gate } 880*0Sstevel@tonic-gate if (!started_cpu) 881*0Sstevel@tonic-gate continue; 882*0Sstevel@tonic-gate if (tsc_gethrtime_enable) 883*0Sstevel@tonic-gate tsc_sync_master(who); 884*0Sstevel@tonic-gate 885*0Sstevel@tonic-gate 886*0Sstevel@tonic-gate if (dtrace_cpu_init != NULL) { 887*0Sstevel@tonic-gate /* 888*0Sstevel@tonic-gate * DTrace CPU initialization expects cpu_lock 889*0Sstevel@tonic-gate * to be held. 890*0Sstevel@tonic-gate */ 891*0Sstevel@tonic-gate mutex_enter(&cpu_lock); 892*0Sstevel@tonic-gate (*dtrace_cpu_init)(who); 893*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 894*0Sstevel@tonic-gate } 895*0Sstevel@tonic-gate } 896*0Sstevel@tonic-gate 897*0Sstevel@tonic-gate affinity_clear(); 898*0Sstevel@tonic-gate 899*0Sstevel@tonic-gate for (who = 0; who < NCPU; who++) { 900*0Sstevel@tonic-gate if (who == cpuid) 901*0Sstevel@tonic-gate continue; 902*0Sstevel@tonic-gate 903*0Sstevel@tonic-gate if (!(procset & (1 << who))) 904*0Sstevel@tonic-gate continue; 905*0Sstevel@tonic-gate 906*0Sstevel@tonic-gate while (!(cpu_ready_set & (1 << who))) 907*0Sstevel@tonic-gate delay(1); 908*0Sstevel@tonic-gate } 909*0Sstevel@tonic-gate 910*0Sstevel@tonic-gate done: 911*0Sstevel@tonic-gate workaround_errata_end(); 912*0Sstevel@tonic-gate 913*0Sstevel@tonic-gate if (warm_reset_vector != NULL) 914*0Sstevel@tonic-gate mp_unmap_warm_reset_vector(warm_reset_vector); 915*0Sstevel@tonic-gate hat_unload(kas.a_hat, (caddr_t)(uintptr_t)rm_platter_pa, MMU_PAGESIZE, 916*0Sstevel@tonic-gate HAT_UNLOAD); 917*0Sstevel@tonic-gate } 918*0Sstevel@tonic-gate 919*0Sstevel@tonic-gate /* 920*0Sstevel@tonic-gate * Dummy functions - no i86pc platforms support dynamic cpu allocation. 921*0Sstevel@tonic-gate */ 922*0Sstevel@tonic-gate /*ARGSUSED*/ 923*0Sstevel@tonic-gate int 924*0Sstevel@tonic-gate mp_cpu_configure(int cpuid) 925*0Sstevel@tonic-gate { 926*0Sstevel@tonic-gate return (ENOTSUP); /* not supported */ 927*0Sstevel@tonic-gate } 928*0Sstevel@tonic-gate 929*0Sstevel@tonic-gate /*ARGSUSED*/ 930*0Sstevel@tonic-gate int 931*0Sstevel@tonic-gate mp_cpu_unconfigure(int cpuid) 932*0Sstevel@tonic-gate { 933*0Sstevel@tonic-gate return (ENOTSUP); /* not supported */ 934*0Sstevel@tonic-gate } 935*0Sstevel@tonic-gate 936*0Sstevel@tonic-gate /* 937*0Sstevel@tonic-gate * Startup function for 'other' CPUs (besides boot cpu). 938*0Sstevel@tonic-gate * Resumed from cpu_startup. 939*0Sstevel@tonic-gate */ 940*0Sstevel@tonic-gate void 941*0Sstevel@tonic-gate mp_startup(void) 942*0Sstevel@tonic-gate { 943*0Sstevel@tonic-gate struct cpu *cp = CPU; 944*0Sstevel@tonic-gate extern int procset; 945*0Sstevel@tonic-gate uint_t new_x86_feature; 946*0Sstevel@tonic-gate 947*0Sstevel@tonic-gate new_x86_feature = cpuid_pass1(cp); 948*0Sstevel@tonic-gate 949*0Sstevel@tonic-gate /* 950*0Sstevel@tonic-gate * We need to Sync MTRR with cpu0's MTRR. We have to do 951*0Sstevel@tonic-gate * this with interrupts disabled. 952*0Sstevel@tonic-gate */ 953*0Sstevel@tonic-gate if (x86_feature & X86_MTRR) 954*0Sstevel@tonic-gate mtrr_sync(); 955*0Sstevel@tonic-gate /* 956*0Sstevel@tonic-gate * Enable machine check architecture 957*0Sstevel@tonic-gate */ 958*0Sstevel@tonic-gate if (x86_feature & X86_MCA) 959*0Sstevel@tonic-gate setup_mca(); 960*0Sstevel@tonic-gate 961*0Sstevel@tonic-gate /* 962*0Sstevel@tonic-gate * Initialize this CPU's syscall handlers 963*0Sstevel@tonic-gate */ 964*0Sstevel@tonic-gate init_cpu_syscall(cp); 965*0Sstevel@tonic-gate 966*0Sstevel@tonic-gate /* 967*0Sstevel@tonic-gate * Enable interrupts with spl set to LOCK_LEVEL. LOCK_LEVEL is the 968*0Sstevel@tonic-gate * highest level at which a routine is permitted to block on 969*0Sstevel@tonic-gate * an adaptive mutex (allows for cpu poke interrupt in case 970*0Sstevel@tonic-gate * the cpu is blocked on a mutex and halts). Setting LOCK_LEVEL blocks 971*0Sstevel@tonic-gate * device interrupts that may end up in the hat layer issuing cross 972*0Sstevel@tonic-gate * calls before CPU_READY is set. 973*0Sstevel@tonic-gate */ 974*0Sstevel@tonic-gate (void) splx(ipltospl(LOCK_LEVEL)); 975*0Sstevel@tonic-gate 976*0Sstevel@tonic-gate /* 977*0Sstevel@tonic-gate * Do a sanity check to make sure this new CPU is a sane thing 978*0Sstevel@tonic-gate * to add to the collection of processors running this system. 979*0Sstevel@tonic-gate * 980*0Sstevel@tonic-gate * XXX Clearly this needs to get more sophisticated, if x86 981*0Sstevel@tonic-gate * systems start to get built out of heterogenous CPUs; as is 982*0Sstevel@tonic-gate * likely to happen once the number of processors in a configuration 983*0Sstevel@tonic-gate * gets large enough. 984*0Sstevel@tonic-gate */ 985*0Sstevel@tonic-gate if ((x86_feature & new_x86_feature) != x86_feature) { 986*0Sstevel@tonic-gate cmn_err(CE_CONT, "?cpu%d: %b\n", 987*0Sstevel@tonic-gate cp->cpu_id, new_x86_feature, FMT_X86_FEATURE); 988*0Sstevel@tonic-gate cmn_err(CE_WARN, "cpu%d feature mismatch", cp->cpu_id); 989*0Sstevel@tonic-gate } 990*0Sstevel@tonic-gate 991*0Sstevel@tonic-gate /* 992*0Sstevel@tonic-gate * We could be more sophisticated here, and just mark the CPU 993*0Sstevel@tonic-gate * as "faulted" but at this point we'll opt for the easier 994*0Sstevel@tonic-gate * answer of dieing horribly. Provided the boot cpu is ok, 995*0Sstevel@tonic-gate * the system can be recovered by booting with use_mp set to zero. 996*0Sstevel@tonic-gate */ 997*0Sstevel@tonic-gate if (workaround_errata(cp) != 0) 998*0Sstevel@tonic-gate panic("critical workaround(s) missing for cpu%d", cp->cpu_id); 999*0Sstevel@tonic-gate 1000*0Sstevel@tonic-gate cpuid_pass2(cp); 1001*0Sstevel@tonic-gate cpuid_pass3(cp); 1002*0Sstevel@tonic-gate (void) cpuid_pass4(cp); 1003*0Sstevel@tonic-gate 1004*0Sstevel@tonic-gate init_cpu_info(cp); 1005*0Sstevel@tonic-gate 1006*0Sstevel@tonic-gate add_cpunode2devtree(cp->cpu_id, cp->cpu_m.mcpu_cpi); 1007*0Sstevel@tonic-gate 1008*0Sstevel@tonic-gate mutex_enter(&cpu_lock); 1009*0Sstevel@tonic-gate procset |= 1 << cp->cpu_id; 1010*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 1011*0Sstevel@tonic-gate 1012*0Sstevel@tonic-gate if (tsc_gethrtime_enable) 1013*0Sstevel@tonic-gate tsc_sync_slave(); 1014*0Sstevel@tonic-gate 1015*0Sstevel@tonic-gate mutex_enter(&cpu_lock); 1016*0Sstevel@tonic-gate /* 1017*0Sstevel@tonic-gate * It's unfortunate that chip_cpu_init() has to be called here. 1018*0Sstevel@tonic-gate * It really belongs in cpu_add_unit(), but unfortunately it is 1019*0Sstevel@tonic-gate * dependent on the cpuid probing, which must be done in the 1020*0Sstevel@tonic-gate * context of the current CPU. Care must be taken on x86 to ensure 1021*0Sstevel@tonic-gate * that mp_startup can safely block even though chip_cpu_init() and 1022*0Sstevel@tonic-gate * cpu_add_active() have not yet been called. 1023*0Sstevel@tonic-gate */ 1024*0Sstevel@tonic-gate chip_cpu_init(cp); 1025*0Sstevel@tonic-gate chip_cpu_startup(cp); 1026*0Sstevel@tonic-gate 1027*0Sstevel@tonic-gate cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_ENABLE | CPU_EXISTS; 1028*0Sstevel@tonic-gate cpu_add_active(cp); 1029*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 1030*0Sstevel@tonic-gate 1031*0Sstevel@tonic-gate (void) spl0(); /* enable interrupts */ 1032*0Sstevel@tonic-gate 1033*0Sstevel@tonic-gate if (boothowto & RB_DEBUG) 1034*0Sstevel@tonic-gate kdi_dvec_cpu_init(cp); 1035*0Sstevel@tonic-gate 1036*0Sstevel@tonic-gate /* 1037*0Sstevel@tonic-gate * Setting the bit in cpu_ready_set must be the last operation in 1038*0Sstevel@tonic-gate * processor initialization; the boot CPU will continue to boot once 1039*0Sstevel@tonic-gate * it sees this bit set for all active CPUs. 1040*0Sstevel@tonic-gate */ 1041*0Sstevel@tonic-gate CPUSET_ATOMIC_ADD(cpu_ready_set, cp->cpu_id); 1042*0Sstevel@tonic-gate 1043*0Sstevel@tonic-gate /* 1044*0Sstevel@tonic-gate * Because mp_startup() gets fired off after init() starts, we 1045*0Sstevel@tonic-gate * can't use the '?' trick to do 'boot -v' printing - so we 1046*0Sstevel@tonic-gate * always direct the 'cpu .. online' messages to the log. 1047*0Sstevel@tonic-gate */ 1048*0Sstevel@tonic-gate cmn_err(CE_CONT, "!cpu%d initialization complete - online\n", 1049*0Sstevel@tonic-gate cp->cpu_id); 1050*0Sstevel@tonic-gate 1051*0Sstevel@tonic-gate /* 1052*0Sstevel@tonic-gate * Now we are done with the startup thread, so free it up. 1053*0Sstevel@tonic-gate */ 1054*0Sstevel@tonic-gate thread_exit(); 1055*0Sstevel@tonic-gate panic("mp_startup: cannot return"); 1056*0Sstevel@tonic-gate /*NOTREACHED*/ 1057*0Sstevel@tonic-gate } 1058*0Sstevel@tonic-gate 1059*0Sstevel@tonic-gate 1060*0Sstevel@tonic-gate /* 1061*0Sstevel@tonic-gate * Start CPU on user request. 1062*0Sstevel@tonic-gate */ 1063*0Sstevel@tonic-gate /* ARGSUSED */ 1064*0Sstevel@tonic-gate int 1065*0Sstevel@tonic-gate mp_cpu_start(struct cpu *cp) 1066*0Sstevel@tonic-gate { 1067*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1068*0Sstevel@tonic-gate if (cp->cpu_id == getbootcpuid()) 1069*0Sstevel@tonic-gate return (EBUSY); /* Cannot start boot CPU */ 1070*0Sstevel@tonic-gate return (0); 1071*0Sstevel@tonic-gate } 1072*0Sstevel@tonic-gate 1073*0Sstevel@tonic-gate /* 1074*0Sstevel@tonic-gate * Stop CPU on user request. 1075*0Sstevel@tonic-gate */ 1076*0Sstevel@tonic-gate /* ARGSUSED */ 1077*0Sstevel@tonic-gate int 1078*0Sstevel@tonic-gate mp_cpu_stop(struct cpu *cp) 1079*0Sstevel@tonic-gate { 1080*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1081*0Sstevel@tonic-gate if (cp->cpu_id == getbootcpuid()) 1082*0Sstevel@tonic-gate return (EBUSY); /* Cannot stop boot CPU */ 1083*0Sstevel@tonic-gate 1084*0Sstevel@tonic-gate return (0); 1085*0Sstevel@tonic-gate } 1086*0Sstevel@tonic-gate 1087*0Sstevel@tonic-gate /* 1088*0Sstevel@tonic-gate * Power on CPU. 1089*0Sstevel@tonic-gate */ 1090*0Sstevel@tonic-gate /* ARGSUSED */ 1091*0Sstevel@tonic-gate int 1092*0Sstevel@tonic-gate mp_cpu_poweron(struct cpu *cp) 1093*0Sstevel@tonic-gate { 1094*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1095*0Sstevel@tonic-gate return (ENOTSUP); /* not supported */ 1096*0Sstevel@tonic-gate } 1097*0Sstevel@tonic-gate 1098*0Sstevel@tonic-gate /* 1099*0Sstevel@tonic-gate * Power off CPU. 1100*0Sstevel@tonic-gate */ 1101*0Sstevel@tonic-gate /* ARGSUSED */ 1102*0Sstevel@tonic-gate int 1103*0Sstevel@tonic-gate mp_cpu_poweroff(struct cpu *cp) 1104*0Sstevel@tonic-gate { 1105*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1106*0Sstevel@tonic-gate return (ENOTSUP); /* not supported */ 1107*0Sstevel@tonic-gate } 1108*0Sstevel@tonic-gate 1109*0Sstevel@tonic-gate 1110*0Sstevel@tonic-gate /* 1111*0Sstevel@tonic-gate * Take the specified CPU out of participation in interrupts. 1112*0Sstevel@tonic-gate */ 1113*0Sstevel@tonic-gate int 1114*0Sstevel@tonic-gate cpu_disable_intr(struct cpu *cp) 1115*0Sstevel@tonic-gate { 1116*0Sstevel@tonic-gate /* 1117*0Sstevel@tonic-gate * cannot disable interrupts on boot cpu 1118*0Sstevel@tonic-gate */ 1119*0Sstevel@tonic-gate if (cp == cpu[getbootcpuid()]) 1120*0Sstevel@tonic-gate return (EBUSY); 1121*0Sstevel@tonic-gate 1122*0Sstevel@tonic-gate if (psm_disable_intr(cp->cpu_id) != DDI_SUCCESS) 1123*0Sstevel@tonic-gate return (EBUSY); 1124*0Sstevel@tonic-gate 1125*0Sstevel@tonic-gate cp->cpu_flags &= ~CPU_ENABLE; 1126*0Sstevel@tonic-gate return (0); 1127*0Sstevel@tonic-gate } 1128*0Sstevel@tonic-gate 1129*0Sstevel@tonic-gate /* 1130*0Sstevel@tonic-gate * Allow the specified CPU to participate in interrupts. 1131*0Sstevel@tonic-gate */ 1132*0Sstevel@tonic-gate void 1133*0Sstevel@tonic-gate cpu_enable_intr(struct cpu *cp) 1134*0Sstevel@tonic-gate { 1135*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1136*0Sstevel@tonic-gate if (cp == cpu[getbootcpuid()]) 1137*0Sstevel@tonic-gate return; 1138*0Sstevel@tonic-gate 1139*0Sstevel@tonic-gate cp->cpu_flags |= CPU_ENABLE; 1140*0Sstevel@tonic-gate psm_enable_intr(cp->cpu_id); 1141*0Sstevel@tonic-gate } 1142*0Sstevel@tonic-gate 1143*0Sstevel@tonic-gate 1144*0Sstevel@tonic-gate /* 1145*0Sstevel@tonic-gate * return the cpu id of the initial startup cpu 1146*0Sstevel@tonic-gate */ 1147*0Sstevel@tonic-gate processorid_t 1148*0Sstevel@tonic-gate getbootcpuid(void) 1149*0Sstevel@tonic-gate { 1150*0Sstevel@tonic-gate return (0); 1151*0Sstevel@tonic-gate } 1152*0Sstevel@tonic-gate 1153*0Sstevel@tonic-gate static ushort_t * 1154*0Sstevel@tonic-gate mp_map_warm_reset_vector() 1155*0Sstevel@tonic-gate { 1156*0Sstevel@tonic-gate ushort_t *warm_reset_vector; 1157*0Sstevel@tonic-gate 1158*0Sstevel@tonic-gate if (!(warm_reset_vector = (ushort_t *)psm_map_phys(WARM_RESET_VECTOR, 1159*0Sstevel@tonic-gate sizeof (ushort_t *), PROT_READ|PROT_WRITE))) 1160*0Sstevel@tonic-gate return (NULL); 1161*0Sstevel@tonic-gate 1162*0Sstevel@tonic-gate /* 1163*0Sstevel@tonic-gate * setup secondary cpu bios boot up vector 1164*0Sstevel@tonic-gate */ 1165*0Sstevel@tonic-gate *warm_reset_vector = (ushort_t)((caddr_t) 1166*0Sstevel@tonic-gate ((struct rm_platter *)rm_platter_va)->rm_code - rm_platter_va 1167*0Sstevel@tonic-gate + ((ulong_t)rm_platter_va & 0xf)); 1168*0Sstevel@tonic-gate warm_reset_vector++; 1169*0Sstevel@tonic-gate *warm_reset_vector = (ushort_t)(rm_platter_pa >> 4); 1170*0Sstevel@tonic-gate 1171*0Sstevel@tonic-gate --warm_reset_vector; 1172*0Sstevel@tonic-gate return (warm_reset_vector); 1173*0Sstevel@tonic-gate } 1174*0Sstevel@tonic-gate 1175*0Sstevel@tonic-gate static void 1176*0Sstevel@tonic-gate mp_unmap_warm_reset_vector(ushort_t *warm_reset_vector) 1177*0Sstevel@tonic-gate { 1178*0Sstevel@tonic-gate psm_unmap_phys((caddr_t)warm_reset_vector, sizeof (ushort_t *)); 1179*0Sstevel@tonic-gate } 1180*0Sstevel@tonic-gate 1181*0Sstevel@tonic-gate /*ARGSUSED*/ 1182*0Sstevel@tonic-gate void 1183*0Sstevel@tonic-gate mp_cpu_faulted_enter(struct cpu *cp) 1184*0Sstevel@tonic-gate {} 1185*0Sstevel@tonic-gate 1186*0Sstevel@tonic-gate /*ARGSUSED*/ 1187*0Sstevel@tonic-gate void 1188*0Sstevel@tonic-gate mp_cpu_faulted_exit(struct cpu *cp) 1189*0Sstevel@tonic-gate {} 1190*0Sstevel@tonic-gate 1191*0Sstevel@tonic-gate /* 1192*0Sstevel@tonic-gate * The following two routines are used as context operators on threads belonging 1193*0Sstevel@tonic-gate * to processes with a private LDT (see sysi86). Due to the rarity of such 1194*0Sstevel@tonic-gate * processes, these routines are currently written for best code readability and 1195*0Sstevel@tonic-gate * organization rather than speed. We could avoid checking x86_feature at every 1196*0Sstevel@tonic-gate * context switch by installing different context ops, depending on the 1197*0Sstevel@tonic-gate * x86_feature flags, at LDT creation time -- one for each combination of fast 1198*0Sstevel@tonic-gate * syscall feature flags. 1199*0Sstevel@tonic-gate */ 1200*0Sstevel@tonic-gate 1201*0Sstevel@tonic-gate /*ARGSUSED*/ 1202*0Sstevel@tonic-gate void 1203*0Sstevel@tonic-gate cpu_fast_syscall_disable(void *arg) 1204*0Sstevel@tonic-gate { 1205*0Sstevel@tonic-gate if (x86_feature & X86_SEP) 1206*0Sstevel@tonic-gate cpu_sep_disable(); 1207*0Sstevel@tonic-gate if (x86_feature & X86_ASYSC) 1208*0Sstevel@tonic-gate cpu_asysc_disable(); 1209*0Sstevel@tonic-gate } 1210*0Sstevel@tonic-gate 1211*0Sstevel@tonic-gate /*ARGSUSED*/ 1212*0Sstevel@tonic-gate void 1213*0Sstevel@tonic-gate cpu_fast_syscall_enable(void *arg) 1214*0Sstevel@tonic-gate { 1215*0Sstevel@tonic-gate if (x86_feature & X86_SEP) 1216*0Sstevel@tonic-gate cpu_sep_enable(); 1217*0Sstevel@tonic-gate if (x86_feature & X86_ASYSC) 1218*0Sstevel@tonic-gate cpu_asysc_enable(); 1219*0Sstevel@tonic-gate } 1220*0Sstevel@tonic-gate 1221*0Sstevel@tonic-gate static void 1222*0Sstevel@tonic-gate cpu_sep_enable(void) 1223*0Sstevel@tonic-gate { 1224*0Sstevel@tonic-gate uint64_t value; 1225*0Sstevel@tonic-gate 1226*0Sstevel@tonic-gate ASSERT(x86_feature & X86_SEP); 1227*0Sstevel@tonic-gate ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 1228*0Sstevel@tonic-gate 1229*0Sstevel@tonic-gate value = KCS_SEL; 1230*0Sstevel@tonic-gate wrmsr(MSR_INTC_SEP_CS, &value); 1231*0Sstevel@tonic-gate } 1232*0Sstevel@tonic-gate 1233*0Sstevel@tonic-gate static void 1234*0Sstevel@tonic-gate cpu_sep_disable(void) 1235*0Sstevel@tonic-gate { 1236*0Sstevel@tonic-gate uint64_t value; 1237*0Sstevel@tonic-gate 1238*0Sstevel@tonic-gate ASSERT(x86_feature & X86_SEP); 1239*0Sstevel@tonic-gate ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 1240*0Sstevel@tonic-gate 1241*0Sstevel@tonic-gate /* 1242*0Sstevel@tonic-gate * Setting the SYSENTER_CS_MSR register to 0 causes software executing 1243*0Sstevel@tonic-gate * the sysenter or sysexit instruction to trigger a #gp fault. 1244*0Sstevel@tonic-gate */ 1245*0Sstevel@tonic-gate value = 0; 1246*0Sstevel@tonic-gate wrmsr(MSR_INTC_SEP_CS, &value); 1247*0Sstevel@tonic-gate } 1248*0Sstevel@tonic-gate 1249*0Sstevel@tonic-gate static void 1250*0Sstevel@tonic-gate cpu_asysc_enable(void) 1251*0Sstevel@tonic-gate { 1252*0Sstevel@tonic-gate uint64_t value; 1253*0Sstevel@tonic-gate 1254*0Sstevel@tonic-gate ASSERT(x86_feature & X86_ASYSC); 1255*0Sstevel@tonic-gate ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 1256*0Sstevel@tonic-gate 1257*0Sstevel@tonic-gate (void) rdmsr(MSR_AMD_EFER, &value); 1258*0Sstevel@tonic-gate value |= AMD_EFER_SCE; 1259*0Sstevel@tonic-gate wrmsr(MSR_AMD_EFER, &value); 1260*0Sstevel@tonic-gate } 1261*0Sstevel@tonic-gate 1262*0Sstevel@tonic-gate static void 1263*0Sstevel@tonic-gate cpu_asysc_disable(void) 1264*0Sstevel@tonic-gate { 1265*0Sstevel@tonic-gate uint64_t value; 1266*0Sstevel@tonic-gate 1267*0Sstevel@tonic-gate ASSERT(x86_feature & X86_ASYSC); 1268*0Sstevel@tonic-gate ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 1269*0Sstevel@tonic-gate 1270*0Sstevel@tonic-gate /* 1271*0Sstevel@tonic-gate * Turn off the SCE (syscall enable) bit in the EFER register. Software 1272*0Sstevel@tonic-gate * executing syscall or sysret with this bit off will incur a #ud trap. 1273*0Sstevel@tonic-gate */ 1274*0Sstevel@tonic-gate (void) rdmsr(MSR_AMD_EFER, &value); 1275*0Sstevel@tonic-gate value &= ~AMD_EFER_SCE; 1276*0Sstevel@tonic-gate wrmsr(MSR_AMD_EFER, &value); 1277*0Sstevel@tonic-gate } 1278