1*aec6f0cfSskrll /* $NetBSD: rmixl_cpu.c,v 1.15 2022/09/29 07:00:47 skrll Exp $ */
23e67b512Smatt
33e67b512Smatt /*
43e67b512Smatt * Copyright 2002 Wasabi Systems, Inc.
53e67b512Smatt * All rights reserved.
63e67b512Smatt *
73e67b512Smatt * Written by Simon Burge for Wasabi Systems, Inc.
83e67b512Smatt *
93e67b512Smatt * Redistribution and use in source and binary forms, with or without
103e67b512Smatt * modification, are permitted provided that the following conditions
113e67b512Smatt * are met:
123e67b512Smatt * 1. Redistributions of source code must retain the above copyright
133e67b512Smatt * notice, this list of conditions and the following disclaimer.
143e67b512Smatt * 2. Redistributions in binary form must reproduce the above copyright
153e67b512Smatt * notice, this list of conditions and the following disclaimer in the
163e67b512Smatt * documentation and/or other materials provided with the distribution.
173e67b512Smatt * 3. All advertising materials mentioning features or use of this software
183e67b512Smatt * must display the following acknowledgement:
193e67b512Smatt * This product includes software developed for the NetBSD Project by
203e67b512Smatt * Wasabi Systems, Inc.
213e67b512Smatt * 4. The name of Wasabi Systems, Inc. may not be used to endorse
223e67b512Smatt * or promote products derived from this software without specific prior
233e67b512Smatt * written permission.
243e67b512Smatt *
253e67b512Smatt * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
263e67b512Smatt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
273e67b512Smatt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
283e67b512Smatt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
293e67b512Smatt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
303e67b512Smatt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
313e67b512Smatt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
323e67b512Smatt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
333e67b512Smatt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
343e67b512Smatt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
353e67b512Smatt * POSSIBILITY OF SUCH DAMAGE.
363e67b512Smatt */
373e67b512Smatt
383e67b512Smatt #include "locators.h"
393e67b512Smatt
403e67b512Smatt #include <sys/cdefs.h>
41*aec6f0cfSskrll __KERNEL_RCSID(0, "$NetBSD: rmixl_cpu.c,v 1.15 2022/09/29 07:00:47 skrll Exp $");
423e67b512Smatt
433e67b512Smatt #include "opt_multiprocessor.h"
443e67b512Smatt #include "opt_ddb.h"
453e67b512Smatt
463e67b512Smatt #include <sys/param.h>
473e67b512Smatt #include <sys/device.h>
483e67b512Smatt #include <sys/systm.h>
493e67b512Smatt #include <sys/cpu.h>
503e67b512Smatt #include <sys/lock.h>
513e67b512Smatt #include <sys/lwp.h>
523e67b512Smatt #include <sys/cpu.h>
533e67b512Smatt #include <uvm/uvm_pglist.h>
543e67b512Smatt #include <uvm/uvm_extern.h>
553e67b512Smatt #include <mips/regnum.h>
563e67b512Smatt #include <mips/pmap.h>
573e67b512Smatt #include <mips/rmi/rmixlreg.h>
583e67b512Smatt #include <mips/rmi/rmixlvar.h>
593e67b512Smatt #include <mips/rmi/rmixl_cpucorevar.h>
603e67b512Smatt #include <mips/rmi/rmixl_cpuvar.h>
613e67b512Smatt #include <mips/rmi/rmixl_intr.h>
623e67b512Smatt #include <mips/rmi/rmixl_fmnvar.h>
633e67b512Smatt #ifdef DDB
643e67b512Smatt #include <mips/db_machdep.h>
653e67b512Smatt #endif
663e67b512Smatt
67bb4dc7b6Sriastradh #include <mips/asm.h> /* XXX CALLFRAME_SIZ */
683e67b512Smatt
693e67b512Smatt static int cpu_rmixl_match(device_t, cfdata_t, void *);
703e67b512Smatt static void cpu_rmixl_attach(device_t, device_t, void *);
713e67b512Smatt static void cpu_rmixl_attach_primary(struct rmixl_cpu_softc * const);
723e67b512Smatt #ifdef NOTYET
733e67b512Smatt static int cpu_fmn_intr(void *, rmixl_fmn_rxmsg_t *);
743e67b512Smatt #endif
753e67b512Smatt
763e67b512Smatt #ifdef MULTIPROCESSOR
773e67b512Smatt void cpu_rmixl_hatch(struct cpu_info *);
7823acf29aScliff void cpu_rmixl_run(struct cpu_info *);
793e67b512Smatt static int cpu_setup_trampoline_common(struct cpu_info *, struct rmixl_cpu_trampoline_args *);
803e67b512Smatt static void cpu_setup_trampoline_callback(struct cpu_info *);
813e67b512Smatt #endif /* MULTIPROCESSOR */
823e67b512Smatt
833e67b512Smatt #ifdef DEBUG
843e67b512Smatt void rmixl_cpu_data_print(struct cpu_data *);
853e67b512Smatt struct cpu_info *
863e67b512Smatt rmixl_cpuinfo_print(u_int);
873e67b512Smatt #endif /* DEBUG */
883e67b512Smatt
893e67b512Smatt CFATTACH_DECL_NEW(cpu_rmixl, sizeof(struct rmixl_cpu_softc),
903e67b512Smatt cpu_rmixl_match, cpu_rmixl_attach, NULL, NULL);
913e67b512Smatt
923e67b512Smatt #ifdef MULTIPROCESSOR
933e67b512Smatt static struct rmixl_cpu_trampoline_args rmixl_cpu_trampoline_args;
943e67b512Smatt #endif
953e67b512Smatt
963e67b512Smatt /*
970bebd783Smatt * cpu_rmixl_watchpoint_init - initialize COP0 watchpoint stuff
983e67b512Smatt *
993e67b512Smatt * clear IEU_DEFEATURE[DBE] to ensure T_WATCH on watchpoint exception
1003e67b512Smatt * set COP0 watchhi and watchlo
10123acf29aScliff *
10223acf29aScliff * disable all watchpoints
1033e67b512Smatt */
1043e67b512Smatt static void
cpu_rmixl_watchpoint_init(void)1050bebd783Smatt cpu_rmixl_watchpoint_init(void)
1063e67b512Smatt {
10723acf29aScliff uint32_t r;
10823acf29aScliff
10923acf29aScliff r = rmixl_mfcr(RMIXL_PCR_IEU_DEFEATURE);
11023acf29aScliff r &= ~__BIT(7); /* DBE */
11123acf29aScliff rmixl_mtcr(RMIXL_PCR_IEU_DEFEATURE, r);
11223acf29aScliff
11323acf29aScliff cpuwatch_clr_all();
1143e67b512Smatt }
1153e67b512Smatt
1163e67b512Smatt /*
1173e67b512Smatt * cpu_xls616_erratum
1183e67b512Smatt *
1193e67b512Smatt * on the XLS616, COUNT/COMPARE clock regs seem to interact between
1203e67b512Smatt * threads on a core
1213e67b512Smatt *
1223e67b512Smatt * the symptom of the error is retarded clock interrupts
1233e67b512Smatt * and very slow apparent system performance
1243e67b512Smatt *
1253e67b512Smatt * other XLS chips may have the same problem.
1263e67b512Smatt * we may need to add other PID checks.
1273e67b512Smatt */
1283e67b512Smatt static inline bool
cpu_xls616_erratum(device_t parent,struct cpucore_attach_args * ca)1293e67b512Smatt cpu_xls616_erratum(device_t parent, struct cpucore_attach_args *ca)
1303e67b512Smatt {
1313e67b512Smatt #if 0
1323e67b512Smatt if (mips_options.mips_cpu->cpu_pid == MIPS_XLS616) {
1333e67b512Smatt if (ca->ca_thread > 0) {
1343e67b512Smatt aprint_error_dev(parent, "XLS616 CLOCK ERRATUM: "
1353e67b512Smatt "deconfigure cpu%d\n", ca->ca_thread);
1363e67b512Smatt return true;
1373e67b512Smatt }
1383e67b512Smatt }
1393e67b512Smatt #endif
1403e67b512Smatt return false;
1413e67b512Smatt }
1423e67b512Smatt
1433e67b512Smatt static bool
cpu_rmixl_erratum(device_t parent,struct cpucore_attach_args * ca)1443e67b512Smatt cpu_rmixl_erratum(device_t parent, struct cpucore_attach_args *ca)
1453e67b512Smatt {
1463e67b512Smatt return cpu_xls616_erratum(parent, ca);
1473e67b512Smatt }
1483e67b512Smatt
1493e67b512Smatt static int
cpu_rmixl_match(device_t parent,cfdata_t cf,void * aux)1503e67b512Smatt cpu_rmixl_match(device_t parent, cfdata_t cf, void *aux)
1513e67b512Smatt {
1523e67b512Smatt struct cpucore_attach_args *ca = aux;
1533e67b512Smatt int thread = cf->cf_loc[CPUCORECF_THREAD];
1543e67b512Smatt
1553e67b512Smatt if (!cpu_rmixl(mips_options.mips_cpu))
1563e67b512Smatt return 0;
1573e67b512Smatt
1583e67b512Smatt if (strncmp(ca->ca_name, cf->cf_name, strlen(cf->cf_name)) == 0
1593e67b512Smatt #ifndef MULTIPROCESSOR
1603e67b512Smatt && ca->ca_thread == 0
1613e67b512Smatt #endif
1623e67b512Smatt && (thread == CPUCORECF_THREAD_DEFAULT || thread == ca->ca_thread)
1633e67b512Smatt && (!cpu_rmixl_erratum(parent, ca)))
1643e67b512Smatt return 1;
1653e67b512Smatt
1663e67b512Smatt return 0;
1673e67b512Smatt }
1683e67b512Smatt
1693e67b512Smatt static void
cpu_rmixl_attach(device_t parent,device_t self,void * aux)1703e67b512Smatt cpu_rmixl_attach(device_t parent, device_t self, void *aux)
1713e67b512Smatt {
1723e67b512Smatt struct rmixl_cpu_softc * const sc = device_private(self);
1733e67b512Smatt struct cpu_info *ci = NULL;
1743e67b512Smatt static bool once = false;
1753e67b512Smatt extern void rmixl_spl_init_cpu(void);
1763e67b512Smatt
1773e67b512Smatt if (once == false) {
1783e67b512Smatt /* first attach is the primary cpu */
1793e67b512Smatt once = true;
1803e67b512Smatt ci = curcpu();
1813e67b512Smatt sc->sc_dev = self;
1823e67b512Smatt sc->sc_ci = ci;
1833e67b512Smatt ci->ci_softc = (void *)sc;
1843e67b512Smatt
1853e67b512Smatt rmixl_spl_init_cpu(); /* spl initialization for CPU#0 */
1863e67b512Smatt cpu_rmixl_attach_primary(sc);
1873e67b512Smatt
1883e67b512Smatt #ifdef MULTIPROCESSOR
1893e67b512Smatt mips_locoresw.lsw_cpu_init = cpu_rmixl_hatch;
19023acf29aScliff mips_locoresw.lsw_cpu_run = cpu_rmixl_run;
1913e67b512Smatt } else {
1920bebd783Smatt struct cpucore_attach_args * const ca = aux;
1933e67b512Smatt struct cpucore_softc * const ccsc = device_private(parent);
1943e67b512Smatt rmixlfw_psb_type_t psb_type = rmixl_configuration.rc_psb_type;
1953e67b512Smatt cpuid_t cpuid;
1963e67b512Smatt
1973e67b512Smatt KASSERT(ca->ca_core < 8);
1983e67b512Smatt KASSERT(ca->ca_thread < 4);
1993e67b512Smatt cpuid = (ca->ca_core << 2) | ca->ca_thread;
2003e67b512Smatt ci = cpu_info_alloc(ccsc->sc_tlbinfo, cpuid,
2013e67b512Smatt /* XXX */ 0, ca->ca_core, ca->ca_thread);
2023e67b512Smatt KASSERT(ci != NULL);
2033e67b512Smatt if (ccsc->sc_tlbinfo == NULL)
2043e67b512Smatt ccsc->sc_tlbinfo = ci->ci_tlb_info;
2053e67b512Smatt sc->sc_dev = self;
2063e67b512Smatt sc->sc_ci = ci;
2073e67b512Smatt ci->ci_softc = (void *)sc;
2083e67b512Smatt
2093e67b512Smatt switch (psb_type) {
2103e67b512Smatt case PSB_TYPE_RMI:
2113e67b512Smatt case PSB_TYPE_DELL:
2123e67b512Smatt cpu_setup_trampoline_callback(ci);
2133e67b512Smatt break;
2143e67b512Smatt default:
2153e67b512Smatt aprint_error(": psb type=%s cpu_wakeup unsupported\n",
2163e67b512Smatt rmixlfw_psb_type_name(psb_type));
2173e67b512Smatt return;
2183e67b512Smatt }
2193e67b512Smatt
2203e67b512Smatt for (size_t i=0; i < 10000; i++) {
221fcf879e0Smatt if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
2223e67b512Smatt break;
2233e67b512Smatt DELAY(100);
2243e67b512Smatt }
225fcf879e0Smatt if (!kcpuset_isset(cpus_hatched, cpu_index(ci))) {
2263e67b512Smatt aprint_error(": failed to hatch\n");
2273e67b512Smatt return;
2283e67b512Smatt }
2293e67b512Smatt #endif /* MULTIPROCESSOR */
2303e67b512Smatt }
2313e67b512Smatt
2323e67b512Smatt /*
2333e67b512Smatt * do per-cpu interrupt initialization
2343e67b512Smatt */
2353e67b512Smatt rmixl_intr_init_cpu(ci);
2363e67b512Smatt
2373e67b512Smatt aprint_normal("\n");
2383e67b512Smatt
2393e67b512Smatt cpu_attach_common(self, ci);
2403e67b512Smatt }
2413e67b512Smatt
2423e67b512Smatt /*
2433e67b512Smatt * attach the primary processor
2443e67b512Smatt */
2453e67b512Smatt static void
cpu_rmixl_attach_primary(struct rmixl_cpu_softc * const sc)2463e67b512Smatt cpu_rmixl_attach_primary(struct rmixl_cpu_softc * const sc)
2473e67b512Smatt {
2483e67b512Smatt struct cpu_info *ci = sc->sc_ci;
2493e67b512Smatt uint32_t ebase;
2503e67b512Smatt
2513e67b512Smatt KASSERT(CPU_IS_PRIMARY(ci));
2523e67b512Smatt
2533e67b512Smatt /*
2543e67b512Smatt * obtain and set cpuid of the primary processor
2553e67b512Smatt */
2563e67b512Smatt asm volatile("dmfc0 %0, $15, 1;" : "=r"(ebase));
2573e67b512Smatt ci->ci_cpuid = ebase & __BITS(9,0);
2583e67b512Smatt
2590bebd783Smatt cpu_rmixl_watchpoint_init();
2603e67b512Smatt
2613e67b512Smatt rmixl_fmn_init();
2623e67b512Smatt
2633e67b512Smatt rmixl_intr_init_clk();
2643e67b512Smatt #ifdef MULTIPROCESSOR
2653e67b512Smatt rmixl_intr_init_ipi();
2663e67b512Smatt #endif
2673e67b512Smatt
2683e67b512Smatt #ifdef NOTYET
2693e67b512Smatt void *ih = rmixl_fmn_intr_establish(RMIXL_FMN_STID_CORE0,
2703e67b512Smatt cpu_fmn_intr, ci);
2713e67b512Smatt if (ih == NULL)
2723e67b512Smatt panic("%s: rmixl_fmn_intr_establish failed",
2733e67b512Smatt __func__);
2743e67b512Smatt sc->sc_ih_fmn = ih;
2753e67b512Smatt #endif
2763e67b512Smatt }
2773e67b512Smatt
2783e67b512Smatt #ifdef NOTYET
2793e67b512Smatt static int
cpu_fmn_intr(void * arg,rmixl_fmn_rxmsg_t * rxmsg)2803e67b512Smatt cpu_fmn_intr(void *arg, rmixl_fmn_rxmsg_t *rxmsg)
2813e67b512Smatt {
2823e67b512Smatt if (CPU_IS_PRIMARY(curcpu())) {
2833e67b512Smatt printf("%s: cpu%ld: rxsid=%#x, code=%d, size=%d\n",
2843e67b512Smatt __func__, cpu_number(),
2853e67b512Smatt rxmsg->rxsid, rxmsg->code, rxmsg->size);
2863e67b512Smatt for (int i=0; i < rxmsg->size; i++)
2873e67b512Smatt printf("\t%#"PRIx64"\n", rxmsg->msg.data[i]);
2883e67b512Smatt }
2893e67b512Smatt
2903e67b512Smatt return 1;
2913e67b512Smatt }
2923e67b512Smatt #endif
2933e67b512Smatt
2943e67b512Smatt #ifdef MULTIPROCESSOR
2953e67b512Smatt /*
29623acf29aScliff * cpu_rmixl_run
29723acf29aScliff *
29823acf29aScliff * - chip-specific post-running code called from cpu_hatch via lsw_cpu_run
29923acf29aScliff */
30023acf29aScliff void
cpu_rmixl_run(struct cpu_info * ci)30123acf29aScliff cpu_rmixl_run(struct cpu_info *ci)
30223acf29aScliff {
30323acf29aScliff struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
30423acf29aScliff cpucore_rmixl_run(device_parent(sc->sc_dev));
30523acf29aScliff }
30623acf29aScliff
30723acf29aScliff /*
3083e67b512Smatt * cpu_rmixl_hatch
3093e67b512Smatt *
3103e67b512Smatt * - chip-specific hatch code called from cpu_hatch via lsw_cpu_init
3113e67b512Smatt */
3123e67b512Smatt void
cpu_rmixl_hatch(struct cpu_info * ci)3133e67b512Smatt cpu_rmixl_hatch(struct cpu_info *ci)
3143e67b512Smatt {
3153e67b512Smatt struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
3163e67b512Smatt extern void rmixl_spl_init_cpu(void);
3173e67b512Smatt
3183e67b512Smatt rmixl_spl_init_cpu(); /* spl initialization for this CPU */
3193e67b512Smatt
3203e67b512Smatt (void)splhigh();
3213e67b512Smatt
3222d299731Smatt #ifdef DIAGNOSTIC
3232d299731Smatt uint32_t ebase = mipsNN_cp0_ebase_read();
3242d299731Smatt KASSERT((ebase & MIPS_EBASE_CPUNUM) == ci->ci_cpuid);
3253e67b512Smatt KASSERT(curcpu() == ci);
3263e67b512Smatt #endif
3273e67b512Smatt
3283e67b512Smatt cpucore_rmixl_hatch(device_parent(sc->sc_dev));
3293e67b512Smatt
3300bebd783Smatt cpu_rmixl_watchpoint_init();
3313e67b512Smatt }
3323e67b512Smatt
3333e67b512Smatt static int
cpu_setup_trampoline_common(struct cpu_info * ci,struct rmixl_cpu_trampoline_args * ta)3343e67b512Smatt cpu_setup_trampoline_common(struct cpu_info *ci, struct rmixl_cpu_trampoline_args *ta)
3353e67b512Smatt {
3363e67b512Smatt struct lwp *l = ci->ci_data.cpu_idlelwp;
3373e67b512Smatt uintptr_t stacktop;
3383e67b512Smatt
3393e67b512Smatt #ifdef DIAGNOSTIC
3403e67b512Smatt /* Ensure our current stack can be used by the firmware */
3413e67b512Smatt uint64_t sp;
3423e67b512Smatt __asm__ volatile("move %0, $sp\n" : "=r"(sp));
3433e67b512Smatt #ifdef _LP64
3443e67b512Smatt /* can be made into a KSEG0 addr */
3453e67b512Smatt KASSERT(MIPS_XKPHYS_P(sp));
3463e67b512Smatt KASSERT((MIPS_XKPHYS_TO_PHYS(sp) >> 32) == 0);
3473e67b512Smatt #else
3483e67b512Smatt /* is a KSEG0 addr */
3493e67b512Smatt KASSERT(MIPS_KSEG0_P(sp));
3503e67b512Smatt #endif /* _LP64 */
3513e67b512Smatt #endif /* DIAGNOSTIC */
3523e67b512Smatt
3533e67b512Smatt #ifndef _LP64
3543e67b512Smatt /*
3553e67b512Smatt * Ensure 'ci' is a KSEG0 address for trampoline args
3563e67b512Smatt * to avoid TLB fault in cpu_trampoline() when loading ci_idlelwp
3573e67b512Smatt */
3583e67b512Smatt KASSERT(MIPS_KSEG0_P(ci));
3593e67b512Smatt #endif
3603e67b512Smatt
3613e67b512Smatt /*
3623e67b512Smatt * Ensure 'ta' is a KSEG0 address for trampoline args
3633e67b512Smatt * to avoid TLB fault in trampoline when loading args.
3643e67b512Smatt *
3653e67b512Smatt * Note:
3663e67b512Smatt * RMI firmware only passes the lower 32-bit half of 'ta'
3673e67b512Smatt * to rmixl_cpu_trampoline (the upper half is clear)
3683e67b512Smatt * so rmixl_cpu_trampoline must reconstruct the missing upper half
36923acf29aScliff * rmixl_cpu_trampoline "knows" 'ta' is a KSEG0 address
37023acf29aScliff * and sign-extends to make an LP64 KSEG0 address.
3713e67b512Smatt */
3723e67b512Smatt KASSERT(MIPS_KSEG0_P(ta));
3733e67b512Smatt
3743e67b512Smatt /*
3753e67b512Smatt * marshal args for rmixl_cpu_trampoline;
3763e67b512Smatt * note for non-LP64 kernel, use of intptr_t
3773e67b512Smatt * forces sign extension of 32 bit pointers
3783e67b512Smatt */
3793e67b512Smatt stacktop = (uintptr_t)l->l_md.md_utf - CALLFRAME_SIZ;
3803e67b512Smatt ta->ta_sp = (uint64_t)(intptr_t)stacktop;
3813e67b512Smatt ta->ta_lwp = (uint64_t)(intptr_t)l;
3823e67b512Smatt ta->ta_cpuinfo = (uint64_t)(intptr_t)ci;
3833e67b512Smatt
3843e67b512Smatt return 0;
3853e67b512Smatt }
3863e67b512Smatt
3873e67b512Smatt static void
cpu_setup_trampoline_callback(struct cpu_info * ci)3883e67b512Smatt cpu_setup_trampoline_callback(struct cpu_info *ci)
3893e67b512Smatt {
3903e67b512Smatt void (*wakeup_cpu)(void *, void *, unsigned int);
3913e67b512Smatt struct rmixl_cpu_trampoline_args *ta = &rmixl_cpu_trampoline_args;
3923e67b512Smatt extern void rmixl_cpu_trampoline(void *);
3933e67b512Smatt extern void rmixlfw_wakeup_cpu(void *, void *, u_int64_t, void *);
3943e67b512Smatt
3953e67b512Smatt cpu_setup_trampoline_common(ci, ta);
3963e67b512Smatt
3973e67b512Smatt #if _LP64
3983e67b512Smatt wakeup_cpu = (void *)rmixl_configuration.rc_psb_info.wakeup;
3993e67b512Smatt #else
4003e67b512Smatt wakeup_cpu = (void *)(intptr_t)
4013e67b512Smatt (rmixl_configuration.rc_psb_info.wakeup & 0xffffffff);
4023e67b512Smatt #endif
4033e67b512Smatt
4043e67b512Smatt rmixlfw_wakeup_cpu(rmixl_cpu_trampoline, (void *)ta,
4053e67b512Smatt (uint64_t)1 << ci->ci_cpuid, wakeup_cpu);
4063e67b512Smatt }
4073e67b512Smatt #endif /* MULTIPROCESSOR */
4083e67b512Smatt
4093e67b512Smatt
4103e67b512Smatt #ifdef DEBUG
4113e67b512Smatt void
rmixl_cpu_data_print(struct cpu_data * dp)4123e67b512Smatt rmixl_cpu_data_print(struct cpu_data *dp)
4133e67b512Smatt {
4143e67b512Smatt printf("cpu_biglock_wanted %p\n", dp->cpu_biglock_wanted);
4153e67b512Smatt printf("cpu_callout %p\n", dp->cpu_callout);
4163e67b512Smatt printf("&cpu_schedstate %p\n", &dp->cpu_schedstate); /* TBD */
4173e67b512Smatt printf("&cpu_xcall %p\n", &dp->cpu_xcall); /* TBD */
4183e67b512Smatt printf("cpu_xcall_pending %d\n", dp->cpu_xcall_pending);
4193e67b512Smatt printf("cpu_idlelwp %p\n", dp->cpu_idlelwp);
4203e67b512Smatt printf("cpu_lockstat %p\n", dp->cpu_lockstat);
4213e67b512Smatt printf("cpu_index %d\n", dp->cpu_index);
4223e67b512Smatt printf("cpu_biglock_count %d\n", dp->cpu_biglock_count);
423a0c864ecSriastradh printf("cpu_psz_read_depth %d\n", dp->cpu_psz_read_depth);
4243e67b512Smatt printf("cpu_lkdebug_recurse %d\n", dp->cpu_lkdebug_recurse);
4253e67b512Smatt printf("cpu_softints %d\n", dp->cpu_softints);
4263e67b512Smatt printf("cpu_nsyscall %"PRIu64"\n", dp->cpu_nsyscall);
4273e67b512Smatt printf("cpu_ntrap %"PRIu64"\n", dp->cpu_ntrap);
4283e67b512Smatt printf("cpu_nfault %"PRIu64"\n", dp->cpu_nfault);
4293e67b512Smatt printf("cpu_nintr %"PRIu64"\n", dp->cpu_nintr);
4303e67b512Smatt printf("cpu_nsoft %"PRIu64"\n", dp->cpu_nsoft);
4313e67b512Smatt printf("cpu_nswtch %"PRIu64"\n", dp->cpu_nswtch);
4323e67b512Smatt printf("cpu_uvm %p\n", dp->cpu_uvm);
4333e67b512Smatt printf("cpu_softcpu %p\n", dp->cpu_softcpu);
4343e67b512Smatt printf("&cpu_biodone %p\n", &dp->cpu_biodone); /* TBD */
4353e67b512Smatt printf("&cpu_percpu %p\n", &dp->cpu_percpu); /* TBD */
4363e67b512Smatt printf("cpu_selcluster %p\n", dp->cpu_selcluster);
4373e67b512Smatt printf("cpu_nch %p\n", dp->cpu_nch);
4383e67b512Smatt printf("&cpu_ld_locks %p\n", &dp->cpu_ld_locks); /* TBD */
4393e67b512Smatt printf("&cpu_ld_lock %p\n", &dp->cpu_ld_lock); /* TBD */
4403e67b512Smatt printf("cpu_cc_freq %#"PRIx64"\n", dp->cpu_cc_freq);
4413e67b512Smatt printf("cpu_cc_skew %#"PRIx64"\n", dp->cpu_cc_skew);
4423e67b512Smatt }
4433e67b512Smatt
4443e67b512Smatt struct cpu_info *
rmixl_cpuinfo_print(u_int cpuindex)4453e67b512Smatt rmixl_cpuinfo_print(u_int cpuindex)
4463e67b512Smatt {
4473e67b512Smatt struct cpu_info * const ci = cpu_lookup(cpuindex);
4483e67b512Smatt
4493e67b512Smatt if (ci != NULL) {
4503e67b512Smatt rmixl_cpu_data_print(&ci->ci_data);
4513e67b512Smatt printf("ci_dev %p\n", ci->ci_dev);
4523e67b512Smatt printf("ci_cpuid %ld\n", ci->ci_cpuid);
4533e67b512Smatt printf("ci_cctr_freq %ld\n", ci->ci_cctr_freq);
4543e67b512Smatt printf("ci_cpu_freq %ld\n", ci->ci_cpu_freq);
4553e67b512Smatt printf("ci_cycles_per_hz %ld\n", ci->ci_cycles_per_hz);
4563e67b512Smatt printf("ci_divisor_delay %ld\n", ci->ci_divisor_delay);
4573e67b512Smatt printf("ci_divisor_recip %ld\n", ci->ci_divisor_recip);
4583e67b512Smatt printf("ci_curlwp %p\n", ci->ci_curlwp);
45957eb66c6Sad printf("ci_onproc %p\n", dp->ci_onproc);
4603e67b512Smatt printf("ci_want_resched %d\n", ci->ci_want_resched);
4613e67b512Smatt printf("ci_mtx_count %d\n", ci->ci_mtx_count);
4623e67b512Smatt printf("ci_mtx_oldspl %d\n", ci->ci_mtx_oldspl);
4633e67b512Smatt printf("ci_idepth %d\n", ci->ci_idepth);
4643e67b512Smatt printf("ci_cpl %d\n", ci->ci_cpl);
4653e67b512Smatt printf("&ci_cpl %p\n", &ci->ci_cpl); /* XXX */
4663e67b512Smatt printf("ci_next_cp0_clk_intr %#x\n", ci->ci_next_cp0_clk_intr);
4673e67b512Smatt for (int i=0; i < SOFTINT_COUNT; i++)
4683e67b512Smatt printf("ci_softlwps[%d] %p\n", i, ci->ci_softlwps[i]);
4693e67b512Smatt printf("ci_tlb_slot %d\n", ci->ci_tlb_slot);
4703e67b512Smatt printf("ci_pmap_asid_cur %d\n", ci->ci_pmap_asid_cur);
4713e67b512Smatt printf("ci_tlb_info %p\n", ci->ci_tlb_info);
472ee3e3ec7Smatt printf("ci_pmap_kern_segtab %p\n", ci->ci_pmap_kern_segtab);
473ee3e3ec7Smatt printf("ci_pmap_user_segtab %p\n", ci->ci_pmap_user_segtab);
4743e67b512Smatt #ifdef _LP64
475ee3e3ec7Smatt printf("ci_pmap_kern_seg0tab %p\n", ci->ci_pmap_kern_seg0tab);
476ee3e3ec7Smatt printf("ci_pmap_user_seg0tab %p\n", ci->ci_pmap_user_seg0tab);
4773e67b512Smatt #else
4783e67b512Smatt printf("ci_pmap_srcbase %#"PRIxVADDR"\n", ci->ci_pmap_srcbase);
4793e67b512Smatt printf("ci_pmap_dstbase %#"PRIxVADDR"\n", ci->ci_pmap_dstbase);
4803e67b512Smatt #endif
4813e67b512Smatt #ifdef MULTIPROCESSOR
4823e67b512Smatt printf("ci_flags %#lx\n", ci->ci_flags);
4833e67b512Smatt printf("ci_request_ipis %#"PRIx64"\n", ci->ci_request_ipis);
4843e67b512Smatt printf("ci_active_ipis %#"PRIx64"\n", ci->ci_active_ipis);
4853e67b512Smatt printf("ci_ksp_tlb_slot %d\n", ci->ci_ksp_tlb_slot);
4863e67b512Smatt #endif
4873e67b512Smatt }
4883e67b512Smatt
4893e67b512Smatt return ci;
4903e67b512Smatt }
4913e67b512Smatt #endif /* DEBUG */
492