10e6594a8SSascha Wildner /*
20e6594a8SSascha Wildner * Copyright (c) 2007 The DragonFly Project. All rights reserved.
30e6594a8SSascha Wildner *
40e6594a8SSascha Wildner * This code is derived from software contributed to The DragonFly Project
50e6594a8SSascha Wildner * by Matthew Dillon <dillon@backplane.com>
60e6594a8SSascha Wildner *
70e6594a8SSascha Wildner * Redistribution and use in source and binary forms, with or without
80e6594a8SSascha Wildner * modification, are permitted provided that the following conditions
90e6594a8SSascha Wildner * are met:
100e6594a8SSascha Wildner *
110e6594a8SSascha Wildner * 1. Redistributions of source code must retain the above copyright
120e6594a8SSascha Wildner * notice, this list of conditions and the following disclaimer.
130e6594a8SSascha Wildner * 2. Redistributions in binary form must reproduce the above copyright
140e6594a8SSascha Wildner * notice, this list of conditions and the following disclaimer in
150e6594a8SSascha Wildner * the documentation and/or other materials provided with the
160e6594a8SSascha Wildner * distribution.
170e6594a8SSascha Wildner * 3. Neither the name of The DragonFly Project nor the names of its
180e6594a8SSascha Wildner * contributors may be used to endorse or promote products derived
190e6594a8SSascha Wildner * from this software without specific, prior written permission.
200e6594a8SSascha Wildner *
210e6594a8SSascha Wildner * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
220e6594a8SSascha Wildner * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
230e6594a8SSascha Wildner * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
240e6594a8SSascha Wildner * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
250e6594a8SSascha Wildner * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
260e6594a8SSascha Wildner * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
270e6594a8SSascha Wildner * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
280e6594a8SSascha Wildner * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
290e6594a8SSascha Wildner * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
300e6594a8SSascha Wildner * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
310e6594a8SSascha Wildner * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
320e6594a8SSascha Wildner * SUCH DAMAGE.
330e6594a8SSascha Wildner */
340e6594a8SSascha Wildner
35da82a65aSzrj #include <sys/cpumask.h>
360e6594a8SSascha Wildner #include <sys/interrupt.h>
370e6594a8SSascha Wildner #include <sys/kernel.h>
384499490fSSascha Wildner #include <sys/malloc.h>
390e6594a8SSascha Wildner #include <sys/memrange.h>
400e6594a8SSascha Wildner #include <sys/tls.h>
410e6594a8SSascha Wildner #include <sys/types.h>
420e6594a8SSascha Wildner
430e6594a8SSascha Wildner #include <vm/vm_extern.h>
440e6594a8SSascha Wildner #include <vm/vm_kern.h>
450e6594a8SSascha Wildner #include <vm/vm_object.h>
460e6594a8SSascha Wildner #include <vm/vm_page.h>
470e6594a8SSascha Wildner
480e6594a8SSascha Wildner #include <sys/mplock2.h>
49fcf6efefSSascha Wildner #include <sys/thread2.h>
500e6594a8SSascha Wildner
510e6594a8SSascha Wildner #include <machine/cpu.h>
520e6594a8SSascha Wildner #include <machine/cpufunc.h>
530e6594a8SSascha Wildner #include <machine/globaldata.h>
540e6594a8SSascha Wildner #include <machine/md_var.h>
550e6594a8SSascha Wildner #include <machine/pmap.h>
560e6594a8SSascha Wildner #include <machine/smp.h>
570e6594a8SSascha Wildner #include <machine/tls.h>
58a86ce0cdSMatthew Dillon #include <machine/param.h>
590e6594a8SSascha Wildner
600e6594a8SSascha Wildner #include <unistd.h>
610e6594a8SSascha Wildner #include <pthread.h>
620e6594a8SSascha Wildner #include <signal.h>
630e6594a8SSascha Wildner #include <stdio.h>
640e6594a8SSascha Wildner
650e6594a8SSascha Wildner extern pt_entry_t *KPTphys;
660e6594a8SSascha Wildner
67da23a592SMatthew Dillon volatile cpumask_t stopped_cpus;
681ad93419SNuno Antunes /* which cpus are ready for IPIs etc? */
691ad93419SNuno Antunes cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE;
700e6594a8SSascha Wildner static int boot_address;
711ad93419SNuno Antunes /* which cpus have been started */
721ad93419SNuno Antunes static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE;
730e6594a8SSascha Wildner static int mp_finish;
740e6594a8SSascha Wildner
759bea6114SMihai Carabas /* Local data for detecting CPU TOPOLOGY */
769bea6114SMihai Carabas static int core_bits = 0;
779bea6114SMihai Carabas static int logical_CPU_bits = 0;
789bea6114SMihai Carabas
790e6594a8SSascha Wildner /* function prototypes XXX these should go elsewhere */
800e6594a8SSascha Wildner void bootstrap_idle(void);
810e6594a8SSascha Wildner void single_cpu_ipi(int, int, int);
82da23a592SMatthew Dillon void selected_cpu_ipi(cpumask_t, int, int);
830e6594a8SSascha Wildner #if 0
840e6594a8SSascha Wildner void ipi_handler(int);
850e6594a8SSascha Wildner #endif
860e6594a8SSascha Wildner
870e6594a8SSascha Wildner pt_entry_t *SMPpt;
880e6594a8SSascha Wildner
890e6594a8SSascha Wildner /* AP uses this during bootstrap. Do not staticize. */
900e6594a8SSascha Wildner char *bootSTK;
910e6594a8SSascha Wildner static int bootAP;
920e6594a8SSascha Wildner
930e6594a8SSascha Wildner
940e6594a8SSascha Wildner /* XXX these need to go into the appropriate header file */
950e6594a8SSascha Wildner static int start_all_aps(u_int);
960e6594a8SSascha Wildner void init_secondary(void);
970e6594a8SSascha Wildner void *start_ap(void *);
980e6594a8SSascha Wildner
990e6594a8SSascha Wildner /*
1000e6594a8SSascha Wildner * Get SMP fully working before we start initializing devices.
1010e6594a8SSascha Wildner */
1020e6594a8SSascha Wildner static
1030e6594a8SSascha Wildner void
ap_finish(void)1040e6594a8SSascha Wildner ap_finish(void)
1050e6594a8SSascha Wildner {
1060e6594a8SSascha Wildner mp_finish = 1;
1070e6594a8SSascha Wildner if (bootverbose)
1080e6594a8SSascha Wildner kprintf("Finish MP startup\n");
1090e6594a8SSascha Wildner
1100e6594a8SSascha Wildner /* build our map of 'other' CPUs */
111c07315c4SMatthew Dillon mycpu->gd_other_cpus = smp_startup_mask;
112c07315c4SMatthew Dillon CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
1130e6594a8SSascha Wildner
1140e6594a8SSascha Wildner /*
1150e6594a8SSascha Wildner * Let the other cpu's finish initializing and build their map
1160e6594a8SSascha Wildner * of 'other' CPUs.
1170e6594a8SSascha Wildner */
1180e6594a8SSascha Wildner rel_mplock();
1191ad93419SNuno Antunes while (CPUMASK_CMPMASKNEQ(smp_active_mask,smp_startup_mask)) {
1200e6594a8SSascha Wildner DELAY(100000);
1210e6594a8SSascha Wildner cpu_lfence();
1220e6594a8SSascha Wildner }
1230e6594a8SSascha Wildner
1240e6594a8SSascha Wildner while (try_mplock() == 0)
1250e6594a8SSascha Wildner DELAY(100000);
1260e6594a8SSascha Wildner if (bootverbose)
127739d9bd3SMatthew Dillon kprintf("Active CPU Mask: %08lx\n",
128739d9bd3SMatthew Dillon (long)CPUMASK_LOWMASK(smp_active_mask));
1290e6594a8SSascha Wildner }
1300e6594a8SSascha Wildner
131f3f3eadbSSascha Wildner SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL);
1320e6594a8SSascha Wildner
1330e6594a8SSascha Wildner void *
start_ap(void * arg __unused)1340e6594a8SSascha Wildner start_ap(void *arg __unused)
1350e6594a8SSascha Wildner {
1360e6594a8SSascha Wildner init_secondary();
1370e6594a8SSascha Wildner setrealcpu();
1380e6594a8SSascha Wildner bootstrap_idle();
1390e6594a8SSascha Wildner
1400e6594a8SSascha Wildner return(NULL); /* NOTREACHED */
1410e6594a8SSascha Wildner }
1420e6594a8SSascha Wildner
1430e6594a8SSascha Wildner /* storage for AP thread IDs */
1440e6594a8SSascha Wildner pthread_t ap_tids[MAXCPU];
1450e6594a8SSascha Wildner
146c91894e0SMatthew Dillon int naps;
147c91894e0SMatthew Dillon
1480e6594a8SSascha Wildner void
mp_start(void)1490e6594a8SSascha Wildner mp_start(void)
1500e6594a8SSascha Wildner {
1511997b4c2SMatthew Dillon size_t ipiq_size;
1520e6594a8SSascha Wildner int shift;
1531997b4c2SMatthew Dillon
1540e6594a8SSascha Wildner ncpus = optcpus;
155c91894e0SMatthew Dillon naps = ncpus - 1;
1560e6594a8SSascha Wildner
1570e6594a8SSascha Wildner for (shift = 0; (1 << shift) <= ncpus; ++shift)
1580e6594a8SSascha Wildner ;
1590e6594a8SSascha Wildner --shift;
1600e6594a8SSascha Wildner
1610e6594a8SSascha Wildner /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
1620e6594a8SSascha Wildner if ((1 << shift) < ncpus)
1630e6594a8SSascha Wildner ++shift;
1640e6594a8SSascha Wildner ncpus_fit = 1 << shift;
1650e6594a8SSascha Wildner ncpus_fit_mask = ncpus_fit - 1;
1660e6594a8SSascha Wildner
1673ab3ae18SMatthew Dillon malloc_reinit_ncpus();
1683ab3ae18SMatthew Dillon
1690e6594a8SSascha Wildner /*
1700e6594a8SSascha Wildner * cpu0 initialization
1710e6594a8SSascha Wildner */
1721997b4c2SMatthew Dillon ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
1731eeaf6b2SAaron LI mycpu->gd_ipiq = (void *)kmem_alloc(kernel_map, ipiq_size,
1743091de50SMatthew Dillon VM_SUBSYS_IPIQ);
1751997b4c2SMatthew Dillon bzero(mycpu->gd_ipiq, ipiq_size);
1760e6594a8SSascha Wildner
177cc3685b0SSepherosa Ziehau /* initialize arc4random. */
178cc3685b0SSepherosa Ziehau arc4_init_pcpu(0);
179cc3685b0SSepherosa Ziehau
1800e6594a8SSascha Wildner /*
1810e6594a8SSascha Wildner * cpu 1-(n-1)
1820e6594a8SSascha Wildner */
1830e6594a8SSascha Wildner start_all_aps(boot_address);
1840e6594a8SSascha Wildner
1850e6594a8SSascha Wildner }
1860e6594a8SSascha Wildner
1870e6594a8SSascha Wildner void
mp_announce(void)1880e6594a8SSascha Wildner mp_announce(void)
1890e6594a8SSascha Wildner {
1900e6594a8SSascha Wildner int x;
1910e6594a8SSascha Wildner
1920e6594a8SSascha Wildner kprintf("DragonFly/MP: Multiprocessor\n");
1930e6594a8SSascha Wildner kprintf(" cpu0 (BSP)\n");
1940e6594a8SSascha Wildner
195c91894e0SMatthew Dillon for (x = 1; x <= naps; ++x)
1960e6594a8SSascha Wildner kprintf(" cpu%d (AP)\n", x);
1970e6594a8SSascha Wildner }
1980e6594a8SSascha Wildner
1990e6594a8SSascha Wildner void
cpu_send_ipiq(int dcpu)2000e6594a8SSascha Wildner cpu_send_ipiq(int dcpu)
2010e6594a8SSascha Wildner {
202c07315c4SMatthew Dillon if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) {
2030e6594a8SSascha Wildner if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0)
2040e6594a8SSascha Wildner panic("pthread_kill failed in cpu_send_ipiq");
205da0b0e8bSMatthew Dillon }
2060e6594a8SSascha Wildner #if 0
2070e6594a8SSascha Wildner panic("XXX cpu_send_ipiq()");
2080e6594a8SSascha Wildner #endif
2090e6594a8SSascha Wildner }
2100e6594a8SSascha Wildner
2110e6594a8SSascha Wildner void
single_cpu_ipi(int cpu,int vector,int delivery_mode)2120e6594a8SSascha Wildner single_cpu_ipi(int cpu, int vector, int delivery_mode)
2130e6594a8SSascha Wildner {
2140e6594a8SSascha Wildner kprintf("XXX single_cpu_ipi\n");
2150e6594a8SSascha Wildner }
2160e6594a8SSascha Wildner
2170e6594a8SSascha Wildner void
selected_cpu_ipi(cpumask_t target,int vector,int delivery_mode)218da23a592SMatthew Dillon selected_cpu_ipi(cpumask_t target, int vector, int delivery_mode)
2190e6594a8SSascha Wildner {
2200e6594a8SSascha Wildner crit_enter();
221c07315c4SMatthew Dillon while (CPUMASK_TESTNZERO(target)) {
222da23a592SMatthew Dillon int n = BSFCPUMASK(target);
223c07315c4SMatthew Dillon CPUMASK_NANDBIT(target, n);
2240e6594a8SSascha Wildner single_cpu_ipi(n, vector, delivery_mode);
2250e6594a8SSascha Wildner }
2260e6594a8SSascha Wildner crit_exit();
2270e6594a8SSascha Wildner }
2280e6594a8SSascha Wildner
2290e6594a8SSascha Wildner int
stop_cpus(cpumask_t map)230da23a592SMatthew Dillon stop_cpus(cpumask_t map)
2310e6594a8SSascha Wildner {
232c07315c4SMatthew Dillon CPUMASK_ANDMASK(map, smp_active_mask);
2330e6594a8SSascha Wildner
2340e6594a8SSascha Wildner crit_enter();
235c07315c4SMatthew Dillon while (CPUMASK_TESTNZERO(map)) {
236da23a592SMatthew Dillon int n = BSFCPUMASK(map);
237c07315c4SMatthew Dillon CPUMASK_NANDBIT(map, n);
238c07315c4SMatthew Dillon ATOMIC_CPUMASK_ORBIT(stopped_cpus, n);
2390e6594a8SSascha Wildner if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
2400e6594a8SSascha Wildner panic("stop_cpus: pthread_kill failed");
2410e6594a8SSascha Wildner }
2420e6594a8SSascha Wildner crit_exit();
2430e6594a8SSascha Wildner #if 0
2440e6594a8SSascha Wildner panic("XXX stop_cpus()");
2450e6594a8SSascha Wildner #endif
2460e6594a8SSascha Wildner
2470e6594a8SSascha Wildner return(1);
2480e6594a8SSascha Wildner }
2490e6594a8SSascha Wildner
2500e6594a8SSascha Wildner int
restart_cpus(cpumask_t map)251da23a592SMatthew Dillon restart_cpus(cpumask_t map)
2520e6594a8SSascha Wildner {
253c07315c4SMatthew Dillon CPUMASK_ANDMASK(map, smp_active_mask);
2540e6594a8SSascha Wildner
2550e6594a8SSascha Wildner crit_enter();
256c07315c4SMatthew Dillon while (CPUMASK_TESTNZERO(map)) {
257da23a592SMatthew Dillon int n = BSFCPUMASK(map);
258c07315c4SMatthew Dillon CPUMASK_NANDBIT(map, n);
259c07315c4SMatthew Dillon ATOMIC_CPUMASK_NANDBIT(stopped_cpus, n);
2600e6594a8SSascha Wildner if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
2610e6594a8SSascha Wildner panic("restart_cpus: pthread_kill failed");
2620e6594a8SSascha Wildner }
2630e6594a8SSascha Wildner crit_exit();
2640e6594a8SSascha Wildner #if 0
2650e6594a8SSascha Wildner panic("XXX restart_cpus()");
2660e6594a8SSascha Wildner #endif
2670e6594a8SSascha Wildner
2680e6594a8SSascha Wildner return(1);
2690e6594a8SSascha Wildner }
2700e6594a8SSascha Wildner void
ap_init(void)2710e6594a8SSascha Wildner ap_init(void)
2720e6594a8SSascha Wildner {
2730e6594a8SSascha Wildner /*
2740e6594a8SSascha Wildner * Adjust smp_startup_mask to signal the BSP that we have started
2750e6594a8SSascha Wildner * up successfully. Note that we do not yet hold the BGL. The BSP
2760e6594a8SSascha Wildner * is waiting for our signal.
2770e6594a8SSascha Wildner *
2780e6594a8SSascha Wildner * We can't set our bit in smp_active_mask yet because we are holding
2790e6594a8SSascha Wildner * interrupts physically disabled and remote cpus could deadlock
2800e6594a8SSascha Wildner * trying to send us an IPI.
2810e6594a8SSascha Wildner */
282c07315c4SMatthew Dillon ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid);
2830e6594a8SSascha Wildner cpu_mfence();
2840e6594a8SSascha Wildner
2850e6594a8SSascha Wildner /*
2860e6594a8SSascha Wildner * Interlock for finalization. Wait until mp_finish is non-zero,
2870e6594a8SSascha Wildner * then get the MP lock.
2880e6594a8SSascha Wildner *
2890e6594a8SSascha Wildner * Note: We are in a critical section.
2900e6594a8SSascha Wildner *
2910e6594a8SSascha Wildner * Note: we are the idle thread, we can only spin.
2920e6594a8SSascha Wildner *
2930e6594a8SSascha Wildner * Note: The load fence is memory volatile and prevents the compiler
2940e6594a8SSascha Wildner * from improperly caching mp_finish, and the cpu from improperly
2950e6594a8SSascha Wildner * caching it.
2960e6594a8SSascha Wildner */
2970e6594a8SSascha Wildner
2980e6594a8SSascha Wildner while (mp_finish == 0) {
2990e6594a8SSascha Wildner cpu_lfence();
3000e6594a8SSascha Wildner DELAY(500000);
3010e6594a8SSascha Wildner }
302b5d16701SMatthew Dillon while (try_mplock() == 0)
3030e6594a8SSascha Wildner DELAY(100000);
3040e6594a8SSascha Wildner
3050e6594a8SSascha Wildner /* BSP may have changed PTD while we're waiting for the lock */
3060e6594a8SSascha Wildner cpu_invltlb();
3070e6594a8SSascha Wildner
3080e6594a8SSascha Wildner /* Build our map of 'other' CPUs. */
309c07315c4SMatthew Dillon mycpu->gd_other_cpus = smp_startup_mask;
310c07315c4SMatthew Dillon CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
3110e6594a8SSascha Wildner
3120e6594a8SSascha Wildner kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
3130e6594a8SSascha Wildner
3140e6594a8SSascha Wildner
3150e6594a8SSascha Wildner /* Set memory range attributes for this CPU to match the BSP */
3160e6594a8SSascha Wildner mem_range_AP_init();
3170e6594a8SSascha Wildner /*
3180e6594a8SSascha Wildner * Once we go active we must process any IPIQ messages that may
3190e6594a8SSascha Wildner * have been queued, because no actual IPI will occur until we
3200e6594a8SSascha Wildner * set our bit in the smp_active_mask. If we don't the IPI
3210e6594a8SSascha Wildner * message interlock could be left set which would also prevent
3220e6594a8SSascha Wildner * further IPIs.
3230e6594a8SSascha Wildner *
3240e6594a8SSascha Wildner * The idle loop doesn't expect the BGL to be held and while
3250e6594a8SSascha Wildner * lwkt_switch() normally cleans things up this is a special case
3260e6594a8SSascha Wildner * because we returning almost directly into the idle loop.
3270e6594a8SSascha Wildner *
3280e6594a8SSascha Wildner * The idle thread is never placed on the runq, make sure
3290e6594a8SSascha Wildner * nothing we've done put it there.
3300e6594a8SSascha Wildner */
331b5d16701SMatthew Dillon KKASSERT(get_mplock_count(curthread) == 1);
332c07315c4SMatthew Dillon ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid);
3330e6594a8SSascha Wildner
3340e6594a8SSascha Wildner mdcpu->gd_fpending = 0;
3350e6594a8SSascha Wildner mdcpu->gd_ipending = 0;
3360e6594a8SSascha Wildner initclocks_pcpu(); /* clock interrupts (via IPIs) */
3374dd1b994SAntonio Huete Jimenez
3384dd1b994SAntonio Huete Jimenez /*
3394dd1b994SAntonio Huete Jimenez * Since we may have cleaned up the interrupt triggers, manually
3404dd1b994SAntonio Huete Jimenez * process any pending IPIs before exiting our critical section.
3414dd1b994SAntonio Huete Jimenez * Once the critical section has exited, normal interrupt processing
3424dd1b994SAntonio Huete Jimenez * may occur.
3434dd1b994SAntonio Huete Jimenez */
3444dd1b994SAntonio Huete Jimenez atomic_swap_int(&mycpu->gd_npoll, 0);
3450e6594a8SSascha Wildner lwkt_process_ipiq();
3460e6594a8SSascha Wildner
3470e6594a8SSascha Wildner /*
3480e6594a8SSascha Wildner * Releasing the mp lock lets the BSP finish up the SMP init
3490e6594a8SSascha Wildner */
3500e6594a8SSascha Wildner rel_mplock();
3510e6594a8SSascha Wildner KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
3520e6594a8SSascha Wildner }
3530e6594a8SSascha Wildner
3540e6594a8SSascha Wildner void
init_secondary(void)3550e6594a8SSascha Wildner init_secondary(void)
3560e6594a8SSascha Wildner {
3570e6594a8SSascha Wildner int myid = bootAP;
3580e6594a8SSascha Wildner struct mdglobaldata *md;
3590e6594a8SSascha Wildner struct privatespace *ps;
3600e6594a8SSascha Wildner
3610e6594a8SSascha Wildner ps = &CPU_prvspace[myid];
3620e6594a8SSascha Wildner
3630e6594a8SSascha Wildner KKASSERT(ps->mdglobaldata.mi.gd_prvspace == ps);
3640e6594a8SSascha Wildner
3650e6594a8SSascha Wildner /*
3660e6594a8SSascha Wildner * Setup the %gs for cpu #n. The mycpu macro works after this
3670e6594a8SSascha Wildner * point. Note that %fs is used by pthreads.
3680e6594a8SSascha Wildner */
3690e6594a8SSascha Wildner tls_set_gs(&CPU_prvspace[myid], sizeof(struct privatespace));
3700e6594a8SSascha Wildner
3710e6594a8SSascha Wildner md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
3720e6594a8SSascha Wildner
3730e6594a8SSascha Wildner /* JG */
3740e6594a8SSascha Wildner md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */
3750e6594a8SSascha Wildner //md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
3760e6594a8SSascha Wildner //md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
3770e6594a8SSascha Wildner
3780e6594a8SSascha Wildner /*
3790e6594a8SSascha Wildner * Set to a known state:
3800e6594a8SSascha Wildner * Set by mpboot.s: CR0_PG, CR0_PE
3810e6594a8SSascha Wildner * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
3820e6594a8SSascha Wildner */
3830e6594a8SSascha Wildner }
3840e6594a8SSascha Wildner
3850e6594a8SSascha Wildner static int
start_all_aps(u_int boot_addr)3860e6594a8SSascha Wildner start_all_aps(u_int boot_addr)
3870e6594a8SSascha Wildner {
3880e6594a8SSascha Wildner int x, i;
3890e6594a8SSascha Wildner struct mdglobaldata *gd;
3900e6594a8SSascha Wildner struct privatespace *ps;
3910e6594a8SSascha Wildner vm_page_t m;
3920e6594a8SSascha Wildner vm_offset_t va;
393a86ce0cdSMatthew Dillon pthread_attr_t attr;
394d97990a1SMatthew Dillon size_t ipiq_size;
3950e6594a8SSascha Wildner #if 0
3960e6594a8SSascha Wildner struct lwp_params params;
3970e6594a8SSascha Wildner #endif
3980e6594a8SSascha Wildner
3990e6594a8SSascha Wildner /*
4000e6594a8SSascha Wildner * needed for ipis to initial thread
4010e6594a8SSascha Wildner * FIXME: rename ap_tids?
4020e6594a8SSascha Wildner */
4030e6594a8SSascha Wildner ap_tids[0] = pthread_self();
404a86ce0cdSMatthew Dillon pthread_attr_init(&attr);
4050e6594a8SSascha Wildner
406*712b6620SAaron LI vm_object_hold(kernel_object);
407c91894e0SMatthew Dillon for (x = 1; x <= naps; ++x) {
4080e6594a8SSascha Wildner /* Allocate space for the CPU's private space. */
4090e6594a8SSascha Wildner for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
4100e6594a8SSascha Wildner va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i;
411*712b6620SAaron LI m = vm_page_alloc(kernel_object, va, VM_ALLOC_SYSTEM);
4120e6594a8SSascha Wildner pmap_kenter_quick(va, m->phys_addr);
4130e6594a8SSascha Wildner }
4140e6594a8SSascha Wildner
4150e6594a8SSascha Wildner for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) {
4160e6594a8SSascha Wildner va =(vm_offset_t)&CPU_prvspace[x].idlestack + i;
417*712b6620SAaron LI m = vm_page_alloc(kernel_object, va, VM_ALLOC_SYSTEM);
4180e6594a8SSascha Wildner pmap_kenter_quick(va, m->phys_addr);
4190e6594a8SSascha Wildner }
4200e6594a8SSascha Wildner
4210e6594a8SSascha Wildner gd = &CPU_prvspace[x].mdglobaldata; /* official location */
4220e6594a8SSascha Wildner bzero(gd, sizeof(*gd));
4230e6594a8SSascha Wildner gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
4240e6594a8SSascha Wildner
4250e6594a8SSascha Wildner /* prime data page for it to use */
4260e6594a8SSascha Wildner mi_gdinit(&gd->mi, x);
4270e6594a8SSascha Wildner cpu_gdinit(gd, x);
4280e6594a8SSascha Wildner
4290e6594a8SSascha Wildner #if 0
4300e6594a8SSascha Wildner gd->gd_CMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE1);
4310e6594a8SSascha Wildner gd->gd_CMAP2 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE2);
4320e6594a8SSascha Wildner gd->gd_CMAP3 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE3);
4330e6594a8SSascha Wildner gd->gd_PMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].PPAGE1);
4340e6594a8SSascha Wildner gd->gd_CADDR1 = ps->CPAGE1;
4350e6594a8SSascha Wildner gd->gd_CADDR2 = ps->CPAGE2;
4360e6594a8SSascha Wildner gd->gd_CADDR3 = ps->CPAGE3;
4370e6594a8SSascha Wildner gd->gd_PADDR1 = (vpte_t *)ps->PPAGE1;
4380e6594a8SSascha Wildner #endif
4390e6594a8SSascha Wildner
440c91894e0SMatthew Dillon ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1);
4411eeaf6b2SAaron LI gd->mi.gd_ipiq = (void *)kmem_alloc(kernel_map, ipiq_size,
4423091de50SMatthew Dillon VM_SUBSYS_IPIQ);
4431997b4c2SMatthew Dillon bzero(gd->mi.gd_ipiq, ipiq_size);
4440e6594a8SSascha Wildner
445cc3685b0SSepherosa Ziehau /* initialize arc4random. */
446cc3685b0SSepherosa Ziehau arc4_init_pcpu(x);
447cc3685b0SSepherosa Ziehau
4480e6594a8SSascha Wildner /*
4490e6594a8SSascha Wildner * Setup the AP boot stack
4500e6594a8SSascha Wildner */
4510e6594a8SSascha Wildner bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
4520e6594a8SSascha Wildner bootAP = x;
4530e6594a8SSascha Wildner
4540e6594a8SSascha Wildner /*
4550e6594a8SSascha Wildner * Setup the AP's lwp, this is the 'cpu'
4560e6594a8SSascha Wildner *
4570e6594a8SSascha Wildner * We have to make sure our signals are masked or the new LWP
4580e6594a8SSascha Wildner * may pick up a signal that it isn't ready for yet. SMP
4590e6594a8SSascha Wildner * startup occurs after SI_BOOT2_LEAVE_CRIT so interrupts
4600e6594a8SSascha Wildner * have already been enabled.
4610e6594a8SSascha Wildner */
4620e6594a8SSascha Wildner cpu_disable_intr();
463a86ce0cdSMatthew Dillon
464a86ce0cdSMatthew Dillon pthread_create(&ap_tids[x], &attr, start_ap, NULL);
4650e6594a8SSascha Wildner cpu_enable_intr();
4660e6594a8SSascha Wildner
467c07315c4SMatthew Dillon while (CPUMASK_TESTBIT(smp_startup_mask, x) == 0) {
4680e6594a8SSascha Wildner cpu_lfence(); /* XXX spin until the AP has started */
4690e6594a8SSascha Wildner DELAY(1000);
4700e6594a8SSascha Wildner }
4710e6594a8SSascha Wildner }
472*712b6620SAaron LI vm_object_drop(kernel_object);
473a86ce0cdSMatthew Dillon pthread_attr_destroy(&attr);
4740e6594a8SSascha Wildner
4750e6594a8SSascha Wildner return(ncpus - 1);
4760e6594a8SSascha Wildner }
4779bea6114SMihai Carabas
4789bea6114SMihai Carabas /*
4799bea6114SMihai Carabas * CPU TOPOLOGY DETECTION FUNCTIONS.
4809bea6114SMihai Carabas */
4819bea6114SMihai Carabas void
detect_cpu_topology(void)4829bea6114SMihai Carabas detect_cpu_topology(void)
4839bea6114SMihai Carabas {
4849bea6114SMihai Carabas logical_CPU_bits = vkernel_b_arg;
4859bea6114SMihai Carabas core_bits = vkernel_B_arg;
4869bea6114SMihai Carabas }
4879bea6114SMihai Carabas
4889bea6114SMihai Carabas int
get_chip_ID(int cpuid)4899bea6114SMihai Carabas get_chip_ID(int cpuid)
4909bea6114SMihai Carabas {
4919bea6114SMihai Carabas return get_apicid_from_cpuid(cpuid) >>
4929bea6114SMihai Carabas (logical_CPU_bits + core_bits);
4939bea6114SMihai Carabas }
4949bea6114SMihai Carabas
4959bea6114SMihai Carabas int
get_chip_ID_from_APICID(int apicid)496c91894e0SMatthew Dillon get_chip_ID_from_APICID(int apicid)
497c91894e0SMatthew Dillon {
498c91894e0SMatthew Dillon return apicid >> (logical_CPU_bits + core_bits);
499c91894e0SMatthew Dillon }
500c91894e0SMatthew Dillon
501c91894e0SMatthew Dillon int
get_core_number_within_chip(int cpuid)5029bea6114SMihai Carabas get_core_number_within_chip(int cpuid)
5039bea6114SMihai Carabas {
504c91894e0SMatthew Dillon return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) &
505c91894e0SMatthew Dillon ((1 << core_bits) - 1));
5069bea6114SMihai Carabas }
5079bea6114SMihai Carabas
5089bea6114SMihai Carabas int
get_logical_CPU_number_within_core(int cpuid)5099bea6114SMihai Carabas get_logical_CPU_number_within_core(int cpuid)
5109bea6114SMihai Carabas {
511c91894e0SMatthew Dillon return (get_apicid_from_cpuid(cpuid) &
512c91894e0SMatthew Dillon ((1 << logical_CPU_bits) - 1));
5139bea6114SMihai Carabas }
514