1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include <sys/sysmacros.h> 30*0Sstevel@tonic-gate #include <sys/prom_plat.h> 31*0Sstevel@tonic-gate #include <sys/prom_debug.h> 32*0Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 33*0Sstevel@tonic-gate #include <vm/seg_kp.h> 34*0Sstevel@tonic-gate #include <vm/seg_kmem.h> 35*0Sstevel@tonic-gate #include <sys/machsystm.h> 36*0Sstevel@tonic-gate #include <sys/callb.h> 37*0Sstevel@tonic-gate #include <sys/cpu_module.h> 38*0Sstevel@tonic-gate #include <sys/chip.h> 39*0Sstevel@tonic-gate #include <sys/dtrace.h> 40*0Sstevel@tonic-gate #include <sys/reboot.h> 41*0Sstevel@tonic-gate #include <sys/kdi.h> 42*0Sstevel@tonic-gate 43*0Sstevel@tonic-gate #ifdef TRAPTRACE 44*0Sstevel@tonic-gate #include <sys/traptrace.h> 45*0Sstevel@tonic-gate #include <sys/bootconf.h> 46*0Sstevel@tonic-gate #endif /* TRAPTRACE */ 47*0Sstevel@tonic-gate 48*0Sstevel@tonic-gate #include <sys/cpu_sgnblk_defs.h> 49*0Sstevel@tonic-gate 50*0Sstevel@tonic-gate extern void cpu_intrq_setup(struct cpu *); 51*0Sstevel@tonic-gate extern void cpu_intrq_register(struct cpu *); 52*0Sstevel@tonic-gate 53*0Sstevel@tonic-gate struct cpu *cpus; /* pointer to other cpus; dynamically allocate */ 54*0Sstevel@tonic-gate struct cpu *cpu[NCPU]; /* pointers to all CPUs */ 55*0Sstevel@tonic-gate uint64_t cpu_pa[NCPU]; /* pointers to all CPUs in PA */ 56*0Sstevel@tonic-gate cpu_core_t cpu_core[NCPU]; /* cpu_core structures */ 57*0Sstevel@tonic-gate 58*0Sstevel@tonic-gate #ifdef TRAPTRACE 59*0Sstevel@tonic-gate caddr_t ttrace_buf; /* bop alloced traptrace for all cpus except 0 */ 60*0Sstevel@tonic-gate #endif /* TRAPTRACE */ 61*0Sstevel@tonic-gate 62*0Sstevel@tonic-gate /* bit mask of cpus ready for x-calls, protected by cpu_lock */ 63*0Sstevel@tonic-gate cpuset_t cpu_ready_set; 64*0Sstevel@tonic-gate 65*0Sstevel@tonic-gate /* bit mask used to communicate with cpus during bringup */ 66*0Sstevel@tonic-gate static cpuset_t proxy_ready_set; 67*0Sstevel@tonic-gate 68*0Sstevel@tonic-gate static void slave_startup(void); 69*0Sstevel@tonic-gate 70*0Sstevel@tonic-gate /* 71*0Sstevel@tonic-gate * Defined in $KARCH/os/mach_mp_startup.c 72*0Sstevel@tonic-gate */ 73*0Sstevel@tonic-gate #pragma weak init_cpu_info 74*0Sstevel@tonic-gate 75*0Sstevel@tonic-gate /* 76*0Sstevel@tonic-gate * Amount of time (in milliseconds) we should wait before giving up on CPU 77*0Sstevel@tonic-gate * initialization and assuming that the CPU we're trying to wake up is dead 78*0Sstevel@tonic-gate * or out of control. 79*0Sstevel@tonic-gate */ 80*0Sstevel@tonic-gate #define CPU_WAKEUP_GRACE_MSEC 1000 81*0Sstevel@tonic-gate 82*0Sstevel@tonic-gate /* 83*0Sstevel@tonic-gate * MP configurations may reserve additional interrupt request entries. 84*0Sstevel@tonic-gate * intr_add_{div,max} can be modified to tune memory usage. 85*0Sstevel@tonic-gate */ 86*0Sstevel@tonic-gate 87*0Sstevel@tonic-gate uint_t intr_add_div = 1; /* 1=worst case memory usage */ 88*0Sstevel@tonic-gate size_t intr_add_max = 0; 89*0Sstevel@tonic-gate 90*0Sstevel@tonic-gate /* intr_add_{pools,head,tail} calculated based on intr_add_{div,max} */ 91*0Sstevel@tonic-gate 92*0Sstevel@tonic-gate size_t intr_add_pools = 0; /* additional pools per cpu */ 93*0Sstevel@tonic-gate struct intr_req *intr_add_head = (struct intr_req *)NULL; 94*0Sstevel@tonic-gate #ifdef DEBUG 95*0Sstevel@tonic-gate struct intr_req *intr_add_tail = (struct intr_req *)NULL; 96*0Sstevel@tonic-gate #endif /* DEBUG */ 97*0Sstevel@tonic-gate 98*0Sstevel@tonic-gate 99*0Sstevel@tonic-gate #ifdef TRAPTRACE 100*0Sstevel@tonic-gate /* 101*0Sstevel@tonic-gate * This function bop allocs traptrace buffers for all cpus 102*0Sstevel@tonic-gate * other than boot cpu. 103*0Sstevel@tonic-gate */ 104*0Sstevel@tonic-gate caddr_t 105*0Sstevel@tonic-gate trap_trace_alloc(caddr_t base) 106*0Sstevel@tonic-gate { 107*0Sstevel@tonic-gate caddr_t vaddr; 108*0Sstevel@tonic-gate extern int max_ncpus; 109*0Sstevel@tonic-gate 110*0Sstevel@tonic-gate if (max_ncpus == 1) { 111*0Sstevel@tonic-gate return (base); 112*0Sstevel@tonic-gate } 113*0Sstevel@tonic-gate 114*0Sstevel@tonic-gate if ((vaddr = (caddr_t)BOP_ALLOC(bootops, base, (TRAP_TBUF_SIZE * 115*0Sstevel@tonic-gate (max_ncpus - 1)), TRAP_TBUF_SIZE)) == NULL) { 116*0Sstevel@tonic-gate panic("traptrace_alloc: can't bop alloc"); 117*0Sstevel@tonic-gate } 118*0Sstevel@tonic-gate ttrace_buf = vaddr; 119*0Sstevel@tonic-gate PRM_DEBUG(ttrace_buf); 120*0Sstevel@tonic-gate return (vaddr + (TRAP_TBUF_SIZE * (max_ncpus - 1))); 121*0Sstevel@tonic-gate } 122*0Sstevel@tonic-gate #endif /* TRAPTRACE */ 123*0Sstevel@tonic-gate 124*0Sstevel@tonic-gate /* 125*0Sstevel@tonic-gate * common slave cpu initialization code 126*0Sstevel@tonic-gate */ 127*0Sstevel@tonic-gate void 128*0Sstevel@tonic-gate common_startup_init(cpu_t *cp, int cpuid) 129*0Sstevel@tonic-gate { 130*0Sstevel@tonic-gate kthread_id_t tp; 131*0Sstevel@tonic-gate sfmmu_t *sfmmup; 132*0Sstevel@tonic-gate caddr_t sp; 133*0Sstevel@tonic-gate 134*0Sstevel@tonic-gate /* 135*0Sstevel@tonic-gate * Allocate and initialize the startup thread for this CPU. 136*0Sstevel@tonic-gate */ 137*0Sstevel@tonic-gate tp = thread_create(NULL, 0, slave_startup, NULL, 0, &p0, 138*0Sstevel@tonic-gate TS_STOPPED, maxclsyspri); 139*0Sstevel@tonic-gate 140*0Sstevel@tonic-gate /* 141*0Sstevel@tonic-gate * Set state to TS_ONPROC since this thread will start running 142*0Sstevel@tonic-gate * as soon as the CPU comes online. 143*0Sstevel@tonic-gate * 144*0Sstevel@tonic-gate * All the other fields of the thread structure are setup by 145*0Sstevel@tonic-gate * thread_create(). 146*0Sstevel@tonic-gate */ 147*0Sstevel@tonic-gate THREAD_ONPROC(tp, cp); 148*0Sstevel@tonic-gate tp->t_preempt = 1; 149*0Sstevel@tonic-gate tp->t_bound_cpu = cp; 150*0Sstevel@tonic-gate tp->t_affinitycnt = 1; 151*0Sstevel@tonic-gate tp->t_cpu = cp; 152*0Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp; 153*0Sstevel@tonic-gate 154*0Sstevel@tonic-gate sfmmup = astosfmmu(&kas); 155*0Sstevel@tonic-gate CPUSET_ADD(sfmmup->sfmmu_cpusran, cpuid); 156*0Sstevel@tonic-gate 157*0Sstevel@tonic-gate /* 158*0Sstevel@tonic-gate * Setup thread to start in slave_startup. 159*0Sstevel@tonic-gate */ 160*0Sstevel@tonic-gate sp = tp->t_stk; 161*0Sstevel@tonic-gate tp->t_pc = (uintptr_t)slave_startup - 8; 162*0Sstevel@tonic-gate tp->t_sp = (uintptr_t)((struct rwindow *)sp - 1) - STACK_BIAS; 163*0Sstevel@tonic-gate 164*0Sstevel@tonic-gate cp->cpu_id = cpuid; 165*0Sstevel@tonic-gate cp->cpu_self = cp; 166*0Sstevel@tonic-gate cp->cpu_thread = tp; 167*0Sstevel@tonic-gate cp->cpu_lwp = NULL; 168*0Sstevel@tonic-gate cp->cpu_dispthread = tp; 169*0Sstevel@tonic-gate cp->cpu_dispatch_pri = DISP_PRIO(tp); 170*0Sstevel@tonic-gate } 171*0Sstevel@tonic-gate 172*0Sstevel@tonic-gate /* 173*0Sstevel@tonic-gate * parametric flag setting functions. these routines set the cpu 174*0Sstevel@tonic-gate * state just prior to releasing the slave cpu. 175*0Sstevel@tonic-gate */ 176*0Sstevel@tonic-gate void 177*0Sstevel@tonic-gate cold_flag_set(int cpuid) 178*0Sstevel@tonic-gate { 179*0Sstevel@tonic-gate cpu_t *cp; 180*0Sstevel@tonic-gate 181*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 182*0Sstevel@tonic-gate 183*0Sstevel@tonic-gate cp = cpu[cpuid]; 184*0Sstevel@tonic-gate cp->cpu_flags |= CPU_RUNNING | CPU_ENABLE | CPU_EXISTS; 185*0Sstevel@tonic-gate cpu_add_active(cp); 186*0Sstevel@tonic-gate /* 187*0Sstevel@tonic-gate * Add CPU_READY after the cpu_add_active() call 188*0Sstevel@tonic-gate * to avoid pausing cp. 189*0Sstevel@tonic-gate */ 190*0Sstevel@tonic-gate cp->cpu_flags |= CPU_READY; /* ready */ 191*0Sstevel@tonic-gate cpu_set_state(cp); 192*0Sstevel@tonic-gate } 193*0Sstevel@tonic-gate 194*0Sstevel@tonic-gate static void 195*0Sstevel@tonic-gate warm_flag_set(int cpuid) 196*0Sstevel@tonic-gate { 197*0Sstevel@tonic-gate cpu_t *cp; 198*0Sstevel@tonic-gate 199*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 200*0Sstevel@tonic-gate 201*0Sstevel@tonic-gate /* 202*0Sstevel@tonic-gate * warm start activates cpus into the OFFLINE state 203*0Sstevel@tonic-gate */ 204*0Sstevel@tonic-gate cp = cpu[cpuid]; 205*0Sstevel@tonic-gate cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_EXISTS 206*0Sstevel@tonic-gate | CPU_OFFLINE | CPU_QUIESCED; 207*0Sstevel@tonic-gate cpu_set_state(cp); 208*0Sstevel@tonic-gate } 209*0Sstevel@tonic-gate 210*0Sstevel@tonic-gate /* 211*0Sstevel@tonic-gate * Internal cpu startup sequencer 212*0Sstevel@tonic-gate * The sequence is as follows: 213*0Sstevel@tonic-gate * 214*0Sstevel@tonic-gate * MASTER SLAVE 215*0Sstevel@tonic-gate * ------- ---------- 216*0Sstevel@tonic-gate * assume the kernel data is initialized 217*0Sstevel@tonic-gate * clear the proxy bit 218*0Sstevel@tonic-gate * start the slave cpu 219*0Sstevel@tonic-gate * wait for the slave cpu to set the proxy 220*0Sstevel@tonic-gate * 221*0Sstevel@tonic-gate * the slave runs slave_startup and then sets the proxy 222*0Sstevel@tonic-gate * the slave waits for the master to add slave to the ready set 223*0Sstevel@tonic-gate * 224*0Sstevel@tonic-gate * the master finishes the initialization and 225*0Sstevel@tonic-gate * adds the slave to the ready set 226*0Sstevel@tonic-gate * 227*0Sstevel@tonic-gate * the slave exits the startup thread and is running 228*0Sstevel@tonic-gate */ 229*0Sstevel@tonic-gate void 230*0Sstevel@tonic-gate start_cpu(int cpuid, void(*flag_func)(int)) 231*0Sstevel@tonic-gate { 232*0Sstevel@tonic-gate extern caddr_t cpu_startup; 233*0Sstevel@tonic-gate int timout; 234*0Sstevel@tonic-gate 235*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 236*0Sstevel@tonic-gate 237*0Sstevel@tonic-gate /* 238*0Sstevel@tonic-gate * Before we begin the dance, tell DTrace that we're about to start 239*0Sstevel@tonic-gate * a CPU. 240*0Sstevel@tonic-gate */ 241*0Sstevel@tonic-gate if (dtrace_cpustart_init != NULL) 242*0Sstevel@tonic-gate (*dtrace_cpustart_init)(); 243*0Sstevel@tonic-gate 244*0Sstevel@tonic-gate /* start the slave cpu */ 245*0Sstevel@tonic-gate CPUSET_DEL(proxy_ready_set, cpuid); 246*0Sstevel@tonic-gate if (prom_test("SUNW,start-cpu-by-cpuid") == 0) { 247*0Sstevel@tonic-gate (void) prom_startcpu_bycpuid(cpuid, (caddr_t)&cpu_startup, 248*0Sstevel@tonic-gate cpuid); 249*0Sstevel@tonic-gate } else { 250*0Sstevel@tonic-gate /* "by-cpuid" interface didn't exist. Do it the old way */ 251*0Sstevel@tonic-gate dnode_t nodeid = cpunodes[cpuid].nodeid; 252*0Sstevel@tonic-gate 253*0Sstevel@tonic-gate ASSERT(nodeid != (dnode_t)0); 254*0Sstevel@tonic-gate (void) prom_startcpu(nodeid, (caddr_t)&cpu_startup, cpuid); 255*0Sstevel@tonic-gate } 256*0Sstevel@tonic-gate 257*0Sstevel@tonic-gate /* wait for the slave cpu to check in. */ 258*0Sstevel@tonic-gate for (timout = CPU_WAKEUP_GRACE_MSEC; timout; timout--) { 259*0Sstevel@tonic-gate if (CPU_IN_SET(proxy_ready_set, cpuid)) 260*0Sstevel@tonic-gate break; 261*0Sstevel@tonic-gate DELAY(1000); 262*0Sstevel@tonic-gate } 263*0Sstevel@tonic-gate if (timout == 0) { 264*0Sstevel@tonic-gate panic("cpu%d failed to start (2)", cpuid); 265*0Sstevel@tonic-gate } 266*0Sstevel@tonic-gate 267*0Sstevel@tonic-gate /* 268*0Sstevel@tonic-gate * The slave has started; we can tell DTrace that it's safe again. 269*0Sstevel@tonic-gate */ 270*0Sstevel@tonic-gate if (dtrace_cpustart_fini != NULL) 271*0Sstevel@tonic-gate (*dtrace_cpustart_fini)(); 272*0Sstevel@tonic-gate 273*0Sstevel@tonic-gate /* run the master side of stick synchronization for the slave cpu */ 274*0Sstevel@tonic-gate sticksync_master(); 275*0Sstevel@tonic-gate 276*0Sstevel@tonic-gate /* 277*0Sstevel@tonic-gate * deal with the cpu flags in a phase-specific manner 278*0Sstevel@tonic-gate * for various reasons, this needs to run after the slave 279*0Sstevel@tonic-gate * is checked in but before the slave is released. 280*0Sstevel@tonic-gate */ 281*0Sstevel@tonic-gate (*flag_func)(cpuid); 282*0Sstevel@tonic-gate 283*0Sstevel@tonic-gate /* release the slave */ 284*0Sstevel@tonic-gate CPUSET_ADD(cpu_ready_set, cpuid); 285*0Sstevel@tonic-gate } 286*0Sstevel@tonic-gate 287*0Sstevel@tonic-gate #ifdef TRAPTRACE 288*0Sstevel@tonic-gate int trap_tr0_inuse = 1; /* it is always used on the boot cpu */ 289*0Sstevel@tonic-gate int trap_trace_inuse[NCPU]; 290*0Sstevel@tonic-gate #endif /* TRAPTRACE */ 291*0Sstevel@tonic-gate 292*0Sstevel@tonic-gate #define cpu_next_free cpu_prev 293*0Sstevel@tonic-gate 294*0Sstevel@tonic-gate /* 295*0Sstevel@tonic-gate * Routine to set up a CPU to prepare for starting it up. 296*0Sstevel@tonic-gate */ 297*0Sstevel@tonic-gate void 298*0Sstevel@tonic-gate setup_cpu_common(int cpuid) 299*0Sstevel@tonic-gate { 300*0Sstevel@tonic-gate struct cpu *cp = NULL; 301*0Sstevel@tonic-gate kthread_id_t tp; 302*0Sstevel@tonic-gate #ifdef TRAPTRACE 303*0Sstevel@tonic-gate int tt_index; 304*0Sstevel@tonic-gate TRAP_TRACE_CTL *ctlp; 305*0Sstevel@tonic-gate caddr_t newbuf; 306*0Sstevel@tonic-gate #endif /* TRAPTRACE */ 307*0Sstevel@tonic-gate 308*0Sstevel@tonic-gate extern void idle(); 309*0Sstevel@tonic-gate extern void init_intr_threads(struct cpu *); 310*0Sstevel@tonic-gate 311*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 312*0Sstevel@tonic-gate ASSERT(cpu[cpuid] == NULL); 313*0Sstevel@tonic-gate 314*0Sstevel@tonic-gate ASSERT(ncpus <= max_ncpus); 315*0Sstevel@tonic-gate 316*0Sstevel@tonic-gate #ifdef TRAPTRACE 317*0Sstevel@tonic-gate /* 318*0Sstevel@tonic-gate * allocate a traptrace buffer for this CPU. 319*0Sstevel@tonic-gate */ 320*0Sstevel@tonic-gate ctlp = &trap_trace_ctl[cpuid]; 321*0Sstevel@tonic-gate if (!trap_tr0_inuse) { 322*0Sstevel@tonic-gate trap_tr0_inuse = 1; 323*0Sstevel@tonic-gate newbuf = trap_tr0; 324*0Sstevel@tonic-gate tt_index = -1; 325*0Sstevel@tonic-gate } else { 326*0Sstevel@tonic-gate for (tt_index = 0; tt_index < (max_ncpus-1); tt_index++) 327*0Sstevel@tonic-gate if (!trap_trace_inuse[tt_index]) 328*0Sstevel@tonic-gate break; 329*0Sstevel@tonic-gate ASSERT(tt_index < max_ncpus - 1); 330*0Sstevel@tonic-gate trap_trace_inuse[tt_index] = 1; 331*0Sstevel@tonic-gate newbuf = (caddr_t)(ttrace_buf + (tt_index * TRAP_TBUF_SIZE)); 332*0Sstevel@tonic-gate } 333*0Sstevel@tonic-gate ctlp->d.vaddr_base = newbuf; 334*0Sstevel@tonic-gate ctlp->d.offset = ctlp->d.last_offset = 0; 335*0Sstevel@tonic-gate ctlp->d.limit = trap_trace_bufsize; 336*0Sstevel@tonic-gate ctlp->d.paddr_base = va_to_pa(newbuf); 337*0Sstevel@tonic-gate ASSERT(ctlp->d.paddr_base != (uint64_t)-1); 338*0Sstevel@tonic-gate /* 339*0Sstevel@tonic-gate * initialize HV trap trace buffer for other cpus 340*0Sstevel@tonic-gate */ 341*0Sstevel@tonic-gate htrap_trace_setup((newbuf + TRAP_TSIZE), cpuid); 342*0Sstevel@tonic-gate #endif /* TRAPTRACE */ 343*0Sstevel@tonic-gate 344*0Sstevel@tonic-gate /* 345*0Sstevel@tonic-gate * Obtain pointer to the appropriate cpu structure. 346*0Sstevel@tonic-gate */ 347*0Sstevel@tonic-gate if (cpu0.cpu_flags == 0) { 348*0Sstevel@tonic-gate cp = &cpu0; 349*0Sstevel@tonic-gate } else { 350*0Sstevel@tonic-gate /* 351*0Sstevel@tonic-gate * When dynamically allocating cpu structs, 352*0Sstevel@tonic-gate * cpus is used as a pointer to a list of freed 353*0Sstevel@tonic-gate * cpu structs. 354*0Sstevel@tonic-gate */ 355*0Sstevel@tonic-gate if (cpus) { 356*0Sstevel@tonic-gate /* grab the first cpu struct on the free list */ 357*0Sstevel@tonic-gate cp = cpus; 358*0Sstevel@tonic-gate if (cp->cpu_next_free) 359*0Sstevel@tonic-gate cpus = cp->cpu_next_free; 360*0Sstevel@tonic-gate else 361*0Sstevel@tonic-gate cpus = NULL; 362*0Sstevel@tonic-gate } 363*0Sstevel@tonic-gate } 364*0Sstevel@tonic-gate 365*0Sstevel@tonic-gate if (cp == NULL) 366*0Sstevel@tonic-gate cp = vmem_xalloc(static_alloc_arena, CPU_ALLOC_SIZE, 367*0Sstevel@tonic-gate CPU_ALLOC_SIZE, 0, 0, NULL, NULL, VM_SLEEP); 368*0Sstevel@tonic-gate 369*0Sstevel@tonic-gate bzero(cp, sizeof (*cp)); 370*0Sstevel@tonic-gate 371*0Sstevel@tonic-gate cp->cpu_id = cpuid; 372*0Sstevel@tonic-gate cp->cpu_self = cp; 373*0Sstevel@tonic-gate 374*0Sstevel@tonic-gate /* 375*0Sstevel@tonic-gate * Initialize ptl1_panic stack 376*0Sstevel@tonic-gate */ 377*0Sstevel@tonic-gate ptl1_init_cpu(cp); 378*0Sstevel@tonic-gate 379*0Sstevel@tonic-gate /* 380*0Sstevel@tonic-gate * Initialize the dispatcher for this CPU. 381*0Sstevel@tonic-gate */ 382*0Sstevel@tonic-gate disp_cpu_init(cp); 383*0Sstevel@tonic-gate 384*0Sstevel@tonic-gate /* 385*0Sstevel@tonic-gate * Now, initialize per-CPU idle thread for this CPU. 386*0Sstevel@tonic-gate */ 387*0Sstevel@tonic-gate tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_ONPROC, -1); 388*0Sstevel@tonic-gate 389*0Sstevel@tonic-gate cp->cpu_idle_thread = tp; 390*0Sstevel@tonic-gate 391*0Sstevel@tonic-gate tp->t_preempt = 1; 392*0Sstevel@tonic-gate tp->t_bound_cpu = cp; 393*0Sstevel@tonic-gate tp->t_affinitycnt = 1; 394*0Sstevel@tonic-gate tp->t_cpu = cp; 395*0Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp; 396*0Sstevel@tonic-gate 397*0Sstevel@tonic-gate /* 398*0Sstevel@tonic-gate * Registering a thread in the callback table is usually 399*0Sstevel@tonic-gate * done in the initialization code of the thread. In this 400*0Sstevel@tonic-gate * case, we do it right after thread creation to avoid 401*0Sstevel@tonic-gate * blocking idle thread while registering itself. It also 402*0Sstevel@tonic-gate * avoids the possibility of reregistration in case a CPU 403*0Sstevel@tonic-gate * restarts its idle thread. 404*0Sstevel@tonic-gate */ 405*0Sstevel@tonic-gate CALLB_CPR_INIT_SAFE(tp, "idle"); 406*0Sstevel@tonic-gate 407*0Sstevel@tonic-gate init_cpu_info(cp); 408*0Sstevel@tonic-gate 409*0Sstevel@tonic-gate /* 410*0Sstevel@tonic-gate * Initialize the interrupt threads for this CPU 411*0Sstevel@tonic-gate */ 412*0Sstevel@tonic-gate init_intr_pool(cp); 413*0Sstevel@tonic-gate init_intr_threads(cp); 414*0Sstevel@tonic-gate 415*0Sstevel@tonic-gate /* 416*0Sstevel@tonic-gate * Add CPU to list of available CPUs, and associate it 417*0Sstevel@tonic-gate * with a chip.. It'll be on the active list after it is 418*0Sstevel@tonic-gate * started. 419*0Sstevel@tonic-gate */ 420*0Sstevel@tonic-gate cpu_add_unit(cp); 421*0Sstevel@tonic-gate chip_cpu_init(cp); 422*0Sstevel@tonic-gate 423*0Sstevel@tonic-gate /* 424*0Sstevel@tonic-gate * Allocate and init cpu module private data structures, 425*0Sstevel@tonic-gate * including scrubber. 426*0Sstevel@tonic-gate */ 427*0Sstevel@tonic-gate cpu_init_private(cp); 428*0Sstevel@tonic-gate 429*0Sstevel@tonic-gate cpu_intrq_setup(cp); 430*0Sstevel@tonic-gate } 431*0Sstevel@tonic-gate 432*0Sstevel@tonic-gate /* 433*0Sstevel@tonic-gate * Routine to clean up a CPU after shutting it down. 434*0Sstevel@tonic-gate */ 435*0Sstevel@tonic-gate int 436*0Sstevel@tonic-gate cleanup_cpu_common(int cpuid) 437*0Sstevel@tonic-gate { 438*0Sstevel@tonic-gate struct cpu *cp; 439*0Sstevel@tonic-gate #ifdef TRAPTRACE 440*0Sstevel@tonic-gate int i; 441*0Sstevel@tonic-gate TRAP_TRACE_CTL *ctlp; 442*0Sstevel@tonic-gate caddr_t newbuf; 443*0Sstevel@tonic-gate #endif /* TRAPTRACE */ 444*0Sstevel@tonic-gate 445*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 446*0Sstevel@tonic-gate ASSERT(cpu[cpuid] != NULL); 447*0Sstevel@tonic-gate 448*0Sstevel@tonic-gate cp = cpu[cpuid]; 449*0Sstevel@tonic-gate 450*0Sstevel@tonic-gate /* Free cpu module private data structures, including scrubber. */ 451*0Sstevel@tonic-gate cpu_uninit_private(cp); 452*0Sstevel@tonic-gate 453*0Sstevel@tonic-gate /* 454*0Sstevel@tonic-gate * Remove CPU from list of available CPUs. 455*0Sstevel@tonic-gate */ 456*0Sstevel@tonic-gate cpu_del_unit(cpuid); 457*0Sstevel@tonic-gate 458*0Sstevel@tonic-gate /* 459*0Sstevel@tonic-gate * Clean up the interrupt pool. 460*0Sstevel@tonic-gate */ 461*0Sstevel@tonic-gate cleanup_intr_pool(cp); 462*0Sstevel@tonic-gate 463*0Sstevel@tonic-gate /* 464*0Sstevel@tonic-gate * At this point, the only threads bound to this CPU should be 465*0Sstevel@tonic-gate * special per-cpu threads: it's idle thread, it's pause thread, 466*0Sstevel@tonic-gate * and it's interrupt threads. Clean these up. 467*0Sstevel@tonic-gate */ 468*0Sstevel@tonic-gate cpu_destroy_bound_threads(cp); 469*0Sstevel@tonic-gate 470*0Sstevel@tonic-gate /* 471*0Sstevel@tonic-gate * Free the interrupt stack. 472*0Sstevel@tonic-gate */ 473*0Sstevel@tonic-gate segkp_release(segkp, cp->cpu_intr_stack); 474*0Sstevel@tonic-gate 475*0Sstevel@tonic-gate #ifdef TRAPTRACE 476*0Sstevel@tonic-gate /* 477*0Sstevel@tonic-gate * Free the traptrace buffer for this CPU. 478*0Sstevel@tonic-gate */ 479*0Sstevel@tonic-gate ctlp = &trap_trace_ctl[cpuid]; 480*0Sstevel@tonic-gate newbuf = ctlp->d.vaddr_base; 481*0Sstevel@tonic-gate i = (newbuf - ttrace_buf) / (TRAP_TBUF_SIZE); 482*0Sstevel@tonic-gate if (((newbuf - ttrace_buf) % (TRAP_TBUF_SIZE) == 0) && 483*0Sstevel@tonic-gate ((i >= 0) && (i < (max_ncpus-1)))) { 484*0Sstevel@tonic-gate /* 485*0Sstevel@tonic-gate * This CPU got it's trap trace buffer from the 486*0Sstevel@tonic-gate * boot-alloc'd bunch of them. 487*0Sstevel@tonic-gate */ 488*0Sstevel@tonic-gate trap_trace_inuse[i] = 0; 489*0Sstevel@tonic-gate bzero(newbuf, (TRAP_TBUF_SIZE)); 490*0Sstevel@tonic-gate } else if (newbuf == trap_tr0) { 491*0Sstevel@tonic-gate trap_tr0_inuse = 0; 492*0Sstevel@tonic-gate bzero(trap_tr0, (TRAP_TBUF_SIZE)); 493*0Sstevel@tonic-gate } else { 494*0Sstevel@tonic-gate cmn_err(CE_WARN, "failed to free trap trace buffer from cpu%d", 495*0Sstevel@tonic-gate cpuid); 496*0Sstevel@tonic-gate } 497*0Sstevel@tonic-gate bzero(ctlp, sizeof (*ctlp)); 498*0Sstevel@tonic-gate #endif /* TRAPTRACE */ 499*0Sstevel@tonic-gate 500*0Sstevel@tonic-gate /* 501*0Sstevel@tonic-gate * There is a race condition with mutex_vector_enter() which 502*0Sstevel@tonic-gate * caches a cpu pointer. The race is detected by checking cpu_next. 503*0Sstevel@tonic-gate */ 504*0Sstevel@tonic-gate disp_cpu_fini(cp); 505*0Sstevel@tonic-gate cpu_pa[cpuid] = 0; 506*0Sstevel@tonic-gate bzero(cp, sizeof (*cp)); 507*0Sstevel@tonic-gate 508*0Sstevel@tonic-gate /* 509*0Sstevel@tonic-gate * Place the freed cpu structure on the list of freed cpus. 510*0Sstevel@tonic-gate */ 511*0Sstevel@tonic-gate if (cp != &cpu0) { 512*0Sstevel@tonic-gate if (cpus) { 513*0Sstevel@tonic-gate cp->cpu_next_free = cpus; 514*0Sstevel@tonic-gate cpus = cp; 515*0Sstevel@tonic-gate } 516*0Sstevel@tonic-gate else 517*0Sstevel@tonic-gate cpus = cp; 518*0Sstevel@tonic-gate } 519*0Sstevel@tonic-gate 520*0Sstevel@tonic-gate return (0); 521*0Sstevel@tonic-gate } 522*0Sstevel@tonic-gate 523*0Sstevel@tonic-gate /* 524*0Sstevel@tonic-gate * This routine is used to start a previously powered off processor. 525*0Sstevel@tonic-gate * Note that restarted cpus are initialized into the offline state. 526*0Sstevel@tonic-gate */ 527*0Sstevel@tonic-gate void 528*0Sstevel@tonic-gate restart_other_cpu(int cpuid) 529*0Sstevel@tonic-gate { 530*0Sstevel@tonic-gate struct cpu *cp; 531*0Sstevel@tonic-gate kthread_id_t tp; 532*0Sstevel@tonic-gate caddr_t sp; 533*0Sstevel@tonic-gate extern void idle(); 534*0Sstevel@tonic-gate 535*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 536*0Sstevel@tonic-gate ASSERT(cpuid < NCPU && cpu[cpuid] != NULL); 537*0Sstevel@tonic-gate 538*0Sstevel@tonic-gate /* 539*0Sstevel@tonic-gate * Obtain pointer to the appropriate cpu structure. 540*0Sstevel@tonic-gate */ 541*0Sstevel@tonic-gate cp = cpu[cpuid]; 542*0Sstevel@tonic-gate 543*0Sstevel@tonic-gate common_startup_init(cp, cpuid); 544*0Sstevel@tonic-gate 545*0Sstevel@tonic-gate /* 546*0Sstevel@tonic-gate * idle thread t_lock is held when the idle thread is suspended. 547*0Sstevel@tonic-gate * Manually unlock the t_lock of idle loop so that we can resume 548*0Sstevel@tonic-gate * the suspended idle thread. 549*0Sstevel@tonic-gate * Also adjust the PC of idle thread for re-retry. 550*0Sstevel@tonic-gate */ 551*0Sstevel@tonic-gate cp->cpu_intr_actv = 0; /* clear the value from previous life */ 552*0Sstevel@tonic-gate cp->cpu_m.mutex_ready = 0; /* we are not ready yet */ 553*0Sstevel@tonic-gate lock_clear(&cp->cpu_idle_thread->t_lock); 554*0Sstevel@tonic-gate tp = cp->cpu_idle_thread; 555*0Sstevel@tonic-gate 556*0Sstevel@tonic-gate sp = tp->t_stk; 557*0Sstevel@tonic-gate tp->t_sp = (uintptr_t)((struct rwindow *)sp - 1) - STACK_BIAS; 558*0Sstevel@tonic-gate tp->t_pc = (uintptr_t)idle - 8; 559*0Sstevel@tonic-gate 560*0Sstevel@tonic-gate /* 561*0Sstevel@tonic-gate * restart the cpu now 562*0Sstevel@tonic-gate */ 563*0Sstevel@tonic-gate promsafe_pause_cpus(); 564*0Sstevel@tonic-gate start_cpu(cpuid, warm_flag_set); 565*0Sstevel@tonic-gate start_cpus(); 566*0Sstevel@tonic-gate 567*0Sstevel@tonic-gate /* call cmn_err outside pause_cpus/start_cpus to avoid deadlock */ 568*0Sstevel@tonic-gate cmn_err(CE_CONT, "!cpu%d initialization complete - restarted\n", 569*0Sstevel@tonic-gate cpuid); 570*0Sstevel@tonic-gate } 571*0Sstevel@tonic-gate 572*0Sstevel@tonic-gate /* 573*0Sstevel@tonic-gate * Startup function executed on 'other' CPUs. This is the first 574*0Sstevel@tonic-gate * C function after cpu_start sets up the cpu registers. 575*0Sstevel@tonic-gate */ 576*0Sstevel@tonic-gate static void 577*0Sstevel@tonic-gate slave_startup(void) 578*0Sstevel@tonic-gate { 579*0Sstevel@tonic-gate struct cpu *cp = CPU; 580*0Sstevel@tonic-gate ushort_t original_flags = cp->cpu_flags; 581*0Sstevel@tonic-gate 582*0Sstevel@tonic-gate #ifdef TRAPTRACE 583*0Sstevel@tonic-gate htrap_trace_register(cp->cpu_id); 584*0Sstevel@tonic-gate #endif 585*0Sstevel@tonic-gate cpu_intrq_register(CPU); 586*0Sstevel@tonic-gate cp->cpu_m.mutex_ready = 1; 587*0Sstevel@tonic-gate cp->cpu_m.poke_cpu_outstanding = B_FALSE; 588*0Sstevel@tonic-gate 589*0Sstevel@tonic-gate /* acknowledge that we are done with initialization */ 590*0Sstevel@tonic-gate CPUSET_ADD(proxy_ready_set, cp->cpu_id); 591*0Sstevel@tonic-gate 592*0Sstevel@tonic-gate /* synchronize STICK */ 593*0Sstevel@tonic-gate sticksync_slave(); 594*0Sstevel@tonic-gate 595*0Sstevel@tonic-gate if (boothowto & RB_DEBUG) 596*0Sstevel@tonic-gate kdi_dvec_cpu_init(cp); 597*0Sstevel@tonic-gate 598*0Sstevel@tonic-gate /* 599*0Sstevel@tonic-gate * the slave will wait here forever -- assuming that the master 600*0Sstevel@tonic-gate * will get back to us. if it doesn't we've got bigger problems 601*0Sstevel@tonic-gate * than a master not replying to this slave. 602*0Sstevel@tonic-gate * the small delay improves the slave's responsiveness to the 603*0Sstevel@tonic-gate * master's ack and decreases the time window between master and 604*0Sstevel@tonic-gate * slave operations. 605*0Sstevel@tonic-gate */ 606*0Sstevel@tonic-gate while (!CPU_IN_SET(cpu_ready_set, cp->cpu_id)) 607*0Sstevel@tonic-gate DELAY(1); 608*0Sstevel@tonic-gate 609*0Sstevel@tonic-gate /* enable interrupts */ 610*0Sstevel@tonic-gate (void) spl0(); 611*0Sstevel@tonic-gate 612*0Sstevel@tonic-gate /* 613*0Sstevel@tonic-gate * Signature block update to indicate that this CPU is in OS now. 614*0Sstevel@tonic-gate * This needs to be done after the PIL is lowered since on 615*0Sstevel@tonic-gate * some platforms the update code may block. 616*0Sstevel@tonic-gate */ 617*0Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cp->cpu_id); 618*0Sstevel@tonic-gate 619*0Sstevel@tonic-gate /* 620*0Sstevel@tonic-gate * park the slave thread in a safe/quiet state and wait for the master 621*0Sstevel@tonic-gate * to finish configuring this CPU before proceeding to thread_exit(). 622*0Sstevel@tonic-gate */ 623*0Sstevel@tonic-gate while (((volatile ushort_t)cp->cpu_flags) & CPU_QUIESCED) 624*0Sstevel@tonic-gate DELAY(1); 625*0Sstevel@tonic-gate 626*0Sstevel@tonic-gate /* 627*0Sstevel@tonic-gate * Initialize CPC CPU state. 628*0Sstevel@tonic-gate */ 629*0Sstevel@tonic-gate kcpc_hw_startup_cpu(original_flags); 630*0Sstevel@tonic-gate 631*0Sstevel@tonic-gate /* 632*0Sstevel@tonic-gate * Notify the CMT subsystem that the slave has started 633*0Sstevel@tonic-gate */ 634*0Sstevel@tonic-gate chip_cpu_startup(CPU); 635*0Sstevel@tonic-gate 636*0Sstevel@tonic-gate /* 637*0Sstevel@tonic-gate * Now we are done with the startup thread, so free it up. 638*0Sstevel@tonic-gate */ 639*0Sstevel@tonic-gate thread_exit(); 640*0Sstevel@tonic-gate cmn_err(CE_PANIC, "slave_startup: cannot return"); 641*0Sstevel@tonic-gate /*NOTREACHED*/ 642*0Sstevel@tonic-gate } 643*0Sstevel@tonic-gate 644*0Sstevel@tonic-gate /* 645*0Sstevel@tonic-gate * 4163850 changes the allocation method for cpu structs. cpu structs 646*0Sstevel@tonic-gate * are dynamically allocated. This routine now determines if additional 647*0Sstevel@tonic-gate * per-cpu intr_req entries need to be allocated. 648*0Sstevel@tonic-gate */ 649*0Sstevel@tonic-gate int 650*0Sstevel@tonic-gate ndata_alloc_cpus(struct memlist *ndata) 651*0Sstevel@tonic-gate { 652*0Sstevel@tonic-gate size_t real_sz; 653*0Sstevel@tonic-gate extern int niobus; 654*0Sstevel@tonic-gate 655*0Sstevel@tonic-gate if (niobus > 1) { 656*0Sstevel@tonic-gate 657*0Sstevel@tonic-gate /* 658*0Sstevel@tonic-gate * Allocate additional intr_req entries if we have more than 659*0Sstevel@tonic-gate * one io bus. The memory to allocate is calculated from four 660*0Sstevel@tonic-gate * variables: niobus, max_ncpus, intr_add_div, and intr_add_max. 661*0Sstevel@tonic-gate * Allocate multiple of INTR_POOL_SIZE bytes (512). Each cpu 662*0Sstevel@tonic-gate * already reserves 512 bytes in its machcpu structure, so the 663*0Sstevel@tonic-gate * worst case is (512 * (niobus - 1) * max_ncpus) add'l bytes. 664*0Sstevel@tonic-gate * 665*0Sstevel@tonic-gate * While niobus and max_ncpus reflect the h/w, the following 666*0Sstevel@tonic-gate * may be tuned (before boot): 667*0Sstevel@tonic-gate * 668*0Sstevel@tonic-gate * intr_add_div - divisor for scaling the number of 669*0Sstevel@tonic-gate * additional intr_req entries. use '1' 670*0Sstevel@tonic-gate * for worst case memory, '2' for half, 671*0Sstevel@tonic-gate * etc. 672*0Sstevel@tonic-gate * 673*0Sstevel@tonic-gate * intr_add_max - upper limit on bytes of memory to reserve 674*0Sstevel@tonic-gate */ 675*0Sstevel@tonic-gate 676*0Sstevel@tonic-gate real_sz = INTR_POOL_SIZE * (niobus - 1) * max_ncpus; 677*0Sstevel@tonic-gate 678*0Sstevel@tonic-gate /* tune memory usage by applying divisor and maximum */ 679*0Sstevel@tonic-gate 680*0Sstevel@tonic-gate if (intr_add_max == 0) 681*0Sstevel@tonic-gate intr_add_max = max_ncpus * INTR_POOL_SIZE; 682*0Sstevel@tonic-gate real_sz = MIN(intr_add_max, real_sz / MAX(intr_add_div, 1)); 683*0Sstevel@tonic-gate 684*0Sstevel@tonic-gate /* round down to multiple of (max_ncpus * INTR_POOL_SIZE) */ 685*0Sstevel@tonic-gate 686*0Sstevel@tonic-gate intr_add_pools = real_sz / (max_ncpus * INTR_POOL_SIZE); 687*0Sstevel@tonic-gate real_sz = intr_add_pools * (max_ncpus * INTR_POOL_SIZE); 688*0Sstevel@tonic-gate 689*0Sstevel@tonic-gate /* actually reserve the space */ 690*0Sstevel@tonic-gate 691*0Sstevel@tonic-gate intr_add_head = ndata_alloc(ndata, real_sz, ecache_alignsize); 692*0Sstevel@tonic-gate if (intr_add_head == NULL) 693*0Sstevel@tonic-gate return (-1); 694*0Sstevel@tonic-gate 695*0Sstevel@tonic-gate PRM_DEBUG(intr_add_head); 696*0Sstevel@tonic-gate #ifdef DEBUG 697*0Sstevel@tonic-gate intr_add_tail = (struct intr_req *) 698*0Sstevel@tonic-gate ((uintptr_t)intr_add_head + real_sz); 699*0Sstevel@tonic-gate #endif /* DEBUG */ 700*0Sstevel@tonic-gate } 701*0Sstevel@tonic-gate 702*0Sstevel@tonic-gate return (0); 703*0Sstevel@tonic-gate } 704*0Sstevel@tonic-gate 705*0Sstevel@tonic-gate 706*0Sstevel@tonic-gate extern struct cpu *cpu[NCPU]; /* pointers to all CPUs */ 707*0Sstevel@tonic-gate 708*0Sstevel@tonic-gate extern void setup_cpu_common(int); 709*0Sstevel@tonic-gate extern void common_startup_init(cpu_t *, int); 710*0Sstevel@tonic-gate extern void start_cpu(int, void(*func)(int)); 711*0Sstevel@tonic-gate extern void cold_flag_set(int cpuid); 712*0Sstevel@tonic-gate 713*0Sstevel@tonic-gate /* 714*0Sstevel@tonic-gate * cpu_bringup_set is a tunable (via /etc/system, debugger, etc.) that 715*0Sstevel@tonic-gate * can be used during debugging to control which processors are brought 716*0Sstevel@tonic-gate * online at boot time. The variable represents a bitmap of the id's 717*0Sstevel@tonic-gate * of the processors that will be brought online. The initialization 718*0Sstevel@tonic-gate * of this variable depends on the type of cpuset_t, which varies 719*0Sstevel@tonic-gate * depending on the number of processors supported (see cpuvar.h). 720*0Sstevel@tonic-gate */ 721*0Sstevel@tonic-gate cpuset_t cpu_bringup_set; 722*0Sstevel@tonic-gate 723*0Sstevel@tonic-gate 724*0Sstevel@tonic-gate /* 725*0Sstevel@tonic-gate * Generic start-all cpus entry. Typically used during cold initialization. 726*0Sstevel@tonic-gate * Note that cold start cpus are initialized into the online state. 727*0Sstevel@tonic-gate */ 728*0Sstevel@tonic-gate /*ARGSUSED*/ 729*0Sstevel@tonic-gate void 730*0Sstevel@tonic-gate start_other_cpus(int flag) 731*0Sstevel@tonic-gate { 732*0Sstevel@tonic-gate int cpuid; 733*0Sstevel@tonic-gate extern void idlestop_init(void); 734*0Sstevel@tonic-gate int bootcpu; 735*0Sstevel@tonic-gate 736*0Sstevel@tonic-gate /* 737*0Sstevel@tonic-gate * Check if cpu_bringup_set has been explicitly set before 738*0Sstevel@tonic-gate * initializing it. 739*0Sstevel@tonic-gate */ 740*0Sstevel@tonic-gate if (CPUSET_ISNULL(cpu_bringup_set)) { 741*0Sstevel@tonic-gate #ifdef MPSAS 742*0Sstevel@tonic-gate /* just CPU 0 */ 743*0Sstevel@tonic-gate CPUSET_ADD(cpu_bringup_set, 0); 744*0Sstevel@tonic-gate #else 745*0Sstevel@tonic-gate CPUSET_ALL(cpu_bringup_set); 746*0Sstevel@tonic-gate #endif 747*0Sstevel@tonic-gate } 748*0Sstevel@tonic-gate 749*0Sstevel@tonic-gate if (&cpu_feature_init) 750*0Sstevel@tonic-gate cpu_feature_init(); 751*0Sstevel@tonic-gate 752*0Sstevel@tonic-gate /* 753*0Sstevel@tonic-gate * Initialize CPC. 754*0Sstevel@tonic-gate */ 755*0Sstevel@tonic-gate kcpc_hw_init(); 756*0Sstevel@tonic-gate 757*0Sstevel@tonic-gate mutex_enter(&cpu_lock); 758*0Sstevel@tonic-gate 759*0Sstevel@tonic-gate /* 760*0Sstevel@tonic-gate * Initialize our own cpu_info. 761*0Sstevel@tonic-gate */ 762*0Sstevel@tonic-gate init_cpu_info(CPU); 763*0Sstevel@tonic-gate 764*0Sstevel@tonic-gate /* 765*0Sstevel@tonic-gate * Initialize CPU 0 cpu module private data area, including scrubber. 766*0Sstevel@tonic-gate */ 767*0Sstevel@tonic-gate cpu_init_private(CPU); 768*0Sstevel@tonic-gate 769*0Sstevel@tonic-gate /* 770*0Sstevel@tonic-gate * perform such initialization as is needed 771*0Sstevel@tonic-gate * to be able to take CPUs on- and off-line. 772*0Sstevel@tonic-gate */ 773*0Sstevel@tonic-gate cpu_pause_init(); 774*0Sstevel@tonic-gate xc_init(); /* initialize processor crosscalls */ 775*0Sstevel@tonic-gate idlestop_init(); 776*0Sstevel@tonic-gate 777*0Sstevel@tonic-gate if (!use_mp) { 778*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 779*0Sstevel@tonic-gate cmn_err(CE_CONT, "?***** Not in MP mode\n"); 780*0Sstevel@tonic-gate return; 781*0Sstevel@tonic-gate } 782*0Sstevel@tonic-gate /* 783*0Sstevel@tonic-gate * should we be initializing this cpu? 784*0Sstevel@tonic-gate */ 785*0Sstevel@tonic-gate bootcpu = getprocessorid(); 786*0Sstevel@tonic-gate 787*0Sstevel@tonic-gate /* 788*0Sstevel@tonic-gate * launch all the slave cpus now 789*0Sstevel@tonic-gate */ 790*0Sstevel@tonic-gate for (cpuid = 0; cpuid < NCPU; cpuid++) { 791*0Sstevel@tonic-gate dnode_t nodeid = cpunodes[cpuid].nodeid; 792*0Sstevel@tonic-gate 793*0Sstevel@tonic-gate if (nodeid == (dnode_t)0) 794*0Sstevel@tonic-gate continue; 795*0Sstevel@tonic-gate 796*0Sstevel@tonic-gate if (cpuid == bootcpu) { 797*0Sstevel@tonic-gate if (!CPU_IN_SET(cpu_bringup_set, cpuid)) { 798*0Sstevel@tonic-gate cmn_err(CE_WARN, "boot cpu not a member " 799*0Sstevel@tonic-gate "of cpu_bringup_set, adding it"); 800*0Sstevel@tonic-gate CPUSET_ADD(cpu_bringup_set, cpuid); 801*0Sstevel@tonic-gate } 802*0Sstevel@tonic-gate continue; 803*0Sstevel@tonic-gate } 804*0Sstevel@tonic-gate if (!CPU_IN_SET(cpu_bringup_set, cpuid)) 805*0Sstevel@tonic-gate continue; 806*0Sstevel@tonic-gate 807*0Sstevel@tonic-gate ASSERT(cpu[cpuid] == NULL); 808*0Sstevel@tonic-gate 809*0Sstevel@tonic-gate setup_cpu_common(cpuid); 810*0Sstevel@tonic-gate 811*0Sstevel@tonic-gate common_startup_init(cpu[cpuid], cpuid); 812*0Sstevel@tonic-gate 813*0Sstevel@tonic-gate start_cpu(cpuid, cold_flag_set); 814*0Sstevel@tonic-gate /* 815*0Sstevel@tonic-gate * Because slave_startup() gets fired off after init() 816*0Sstevel@tonic-gate * starts, we can't use the '?' trick to do 'boot -v' 817*0Sstevel@tonic-gate * printing - so we always direct the 'cpu .. online' 818*0Sstevel@tonic-gate * messages to the log. 819*0Sstevel@tonic-gate */ 820*0Sstevel@tonic-gate cmn_err(CE_CONT, "!cpu%d initialization complete - online\n", 821*0Sstevel@tonic-gate cpuid); 822*0Sstevel@tonic-gate 823*0Sstevel@tonic-gate /* 824*0Sstevel@tonic-gate * XXX: register_cpu_setup() callbacks should be called here 825*0Sstevel@tonic-gate * with a new setup code, CPU_BOOT (or something). 826*0Sstevel@tonic-gate */ 827*0Sstevel@tonic-gate if (dtrace_cpu_init != NULL) 828*0Sstevel@tonic-gate (*dtrace_cpu_init)(cpuid); 829*0Sstevel@tonic-gate } 830*0Sstevel@tonic-gate 831*0Sstevel@tonic-gate /* 832*0Sstevel@tonic-gate * since all the cpus are online now, redistribute interrupts to them. 833*0Sstevel@tonic-gate */ 834*0Sstevel@tonic-gate intr_redist_all_cpus(); 835*0Sstevel@tonic-gate 836*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 837*0Sstevel@tonic-gate 838*0Sstevel@tonic-gate /* 839*0Sstevel@tonic-gate * Start the Ecache scrubber. Must be done after all calls to 840*0Sstevel@tonic-gate * cpu_init_private for every cpu (including CPU 0). 841*0Sstevel@tonic-gate */ 842*0Sstevel@tonic-gate cpu_init_cache_scrub(); 843*0Sstevel@tonic-gate 844*0Sstevel@tonic-gate if (&cpu_mp_init) 845*0Sstevel@tonic-gate cpu_mp_init(); 846*0Sstevel@tonic-gate } 847