13446Smrj /*
23446Smrj * CDDL HEADER START
33446Smrj *
43446Smrj * The contents of this file are subject to the terms of the
53446Smrj * Common Development and Distribution License (the "License").
63446Smrj * You may not use this file except in compliance with the License.
73446Smrj *
83446Smrj * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93446Smrj * or http://www.opensolaris.org/os/licensing.
103446Smrj * See the License for the specific language governing permissions
113446Smrj * and limitations under the License.
123446Smrj *
133446Smrj * When distributing Covered Code, include this CDDL HEADER in each
143446Smrj * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153446Smrj * If applicable, add the following below this CDDL HEADER, with the
163446Smrj * fields enclosed by brackets "[]" replaced with your own identifying
173446Smrj * information: Portions Copyright [yyyy] [name of copyright owner]
183446Smrj *
193446Smrj * CDDL HEADER END
203446Smrj */
213446Smrj /*
228679SSeth.Goldberg@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
233446Smrj * Use is subject to license terms.
243446Smrj */
253446Smrj
263446Smrj /*
273446Smrj * Management of KMDB's IDT, which is installed upon KMDB activation.
283446Smrj *
293446Smrj * Debugger activation has two flavors, which cover the cases where KMDB is
303446Smrj * loaded at boot, and when it is loaded after boot. In brief, in both cases,
313446Smrj * the KDI needs to interpose upon several handlers in the IDT. When
323446Smrj * mod-loaded KMDB is deactivated, we undo the IDT interposition, restoring the
333446Smrj * handlers to what they were before we started.
343446Smrj *
353446Smrj * We also take over the entirety of IDT (except the double-fault handler) on
363446Smrj * the active CPU when we're in kmdb so we can handle things like page faults
373446Smrj * sensibly.
383446Smrj *
393446Smrj * Boot-loaded KMDB
403446Smrj *
413446Smrj * When we're first activated, we're running on boot's IDT. We need to be able
423446Smrj * to function in this world, so we'll install our handlers into boot's IDT.
433446Smrj * This is a little complicated: we're using the fake cpu_t set up by
443446Smrj * boot_kdi_tmpinit(), so we can't access cpu_idt directly. Instead,
453446Smrj * kdi_idt_write() notices that cpu_idt is NULL, and works around this problem.
463446Smrj *
473446Smrj * Later, when we're about to switch to the kernel's IDT, it'll call us via
483446Smrj * kdi_idt_sync(), allowing us to add our handlers to the new IDT. While
493446Smrj * boot-loaded KMDB can't be unloaded, we still need to save the descriptors we
503446Smrj * replace so we can pass traps back to the kernel as necessary.
513446Smrj *
523446Smrj * The last phase of boot-loaded KMDB activation occurs at non-boot CPU
533446Smrj * startup. We will be called on each non-boot CPU, thus allowing us to set up
543446Smrj * any watchpoints that may have been configured on the boot CPU and interpose
553446Smrj * on the given CPU's IDT. We don't save the interposed descriptors in this
563446Smrj * case -- see kdi_cpu_init() for details.
573446Smrj *
583446Smrj * Mod-loaded KMDB
593446Smrj *
603446Smrj * This style of activation is much simpler, as the CPUs are already running,
613446Smrj * and are using their own copy of the kernel's IDT. We simply interpose upon
623446Smrj * each CPU's IDT. We save the handlers we replace, both for deactivation and
635084Sjohnlev * for passing traps back to the kernel. Note that for the hypervisors'
645084Sjohnlev * benefit, we need to xcall to the other CPUs to do this, since we need to
655084Sjohnlev * actively set the trap entries in its virtual IDT from that vcpu's context
665084Sjohnlev * rather than just modifying the IDT table from the CPU running kdi_activate().
673446Smrj */
683446Smrj
693446Smrj #include <sys/types.h>
703446Smrj #include <sys/segments.h>
713446Smrj #include <sys/trap.h>
723446Smrj #include <sys/cpuvar.h>
733446Smrj #include <sys/reboot.h>
743446Smrj #include <sys/sunddi.h>
753446Smrj #include <sys/archsystm.h>
763446Smrj #include <sys/kdi_impl.h>
773446Smrj #include <sys/x_call.h>
783446Smrj #include <ia32/sys/psw.h>
793446Smrj
803446Smrj #define KDI_GATE_NVECS 3
813446Smrj
823446Smrj #define KDI_IDT_NOSAVE 0
833446Smrj #define KDI_IDT_SAVE 1
843446Smrj
853446Smrj #define KDI_IDT_DTYPE_KERNEL 0
863446Smrj #define KDI_IDT_DTYPE_BOOT 1
873446Smrj
883446Smrj kdi_cpusave_t *kdi_cpusave;
893446Smrj int kdi_ncpusave;
903446Smrj
913446Smrj static kdi_main_t kdi_kmdb_main;
923446Smrj
933446Smrj kdi_drreg_t kdi_drreg;
943446Smrj
953446Smrj #ifndef __amd64
963446Smrj /* Used to track the current set of valid kernel selectors. */
973446Smrj uint32_t kdi_cs;
983446Smrj uint32_t kdi_ds;
993446Smrj uint32_t kdi_fs;
1003446Smrj uint32_t kdi_gs;
1013446Smrj #endif
1023446Smrj
1033446Smrj uint_t kdi_msr_wrexit_msr;
1043446Smrj uint64_t *kdi_msr_wrexit_valp;
1053446Smrj
1063446Smrj uintptr_t kdi_kernel_handler;
1073446Smrj
1083446Smrj int kdi_trap_switch;
1093446Smrj
1103446Smrj #define KDI_MEMRANGES_MAX 2
1113446Smrj
1123446Smrj kdi_memrange_t kdi_memranges[KDI_MEMRANGES_MAX];
1133446Smrj int kdi_nmemranges;
1143446Smrj
1153446Smrj typedef void idt_hdlr_f(void);
1163446Smrj
1173446Smrj extern idt_hdlr_f kdi_trap0, kdi_trap1, kdi_int2, kdi_trap3, kdi_trap4;
1183446Smrj extern idt_hdlr_f kdi_trap5, kdi_trap6, kdi_trap7, kdi_trap9;
1193446Smrj extern idt_hdlr_f kdi_traperr10, kdi_traperr11, kdi_traperr12;
1203446Smrj extern idt_hdlr_f kdi_traperr13, kdi_traperr14, kdi_trap16, kdi_trap17;
1213446Smrj extern idt_hdlr_f kdi_trap18, kdi_trap19, kdi_trap20, kdi_ivct32;
1223446Smrj extern idt_hdlr_f kdi_invaltrap;
1233446Smrj extern size_t kdi_ivct_size;
1243446Smrj extern char kdi_slave_entry_patch;
1253446Smrj
1263446Smrj typedef struct kdi_gate_spec {
1273446Smrj uint_t kgs_vec;
1283446Smrj uint_t kgs_dpl;
1293446Smrj } kdi_gate_spec_t;
1303446Smrj
1315084Sjohnlev /*
1325084Sjohnlev * Beware: kdi_pass_to_kernel() has unpleasant knowledge of this list.
1335084Sjohnlev */
1343446Smrj static const kdi_gate_spec_t kdi_gate_specs[KDI_GATE_NVECS] = {
1355084Sjohnlev { T_SGLSTP, TRP_KPL },
1365084Sjohnlev { T_BPTFLT, TRP_UPL },
1375084Sjohnlev { T_DBGENTR, TRP_KPL }
1383446Smrj };
1393446Smrj
1403446Smrj static gate_desc_t kdi_kgates[KDI_GATE_NVECS];
1413446Smrj
1423446Smrj gate_desc_t kdi_idt[NIDT];
1433446Smrj
1443446Smrj struct idt_description {
1453446Smrj uint_t id_low;
1463446Smrj uint_t id_high;
1473446Smrj idt_hdlr_f *id_basehdlr;
1483446Smrj size_t *id_incrp;
1493446Smrj } idt_description[] = {
1503446Smrj { T_ZERODIV, 0, kdi_trap0, NULL },
1513446Smrj { T_SGLSTP, 0, kdi_trap1, NULL },
1523446Smrj { T_NMIFLT, 0, kdi_int2, NULL },
1533446Smrj { T_BPTFLT, 0, kdi_trap3, NULL },
1543446Smrj { T_OVFLW, 0, kdi_trap4, NULL },
1553446Smrj { T_BOUNDFLT, 0, kdi_trap5, NULL },
1563446Smrj { T_ILLINST, 0, kdi_trap6, NULL },
1573446Smrj { T_NOEXTFLT, 0, kdi_trap7, NULL },
1585084Sjohnlev #if !defined(__xpv)
1593446Smrj { T_DBLFLT, 0, syserrtrap, NULL },
1605084Sjohnlev #endif
1613446Smrj { T_EXTOVRFLT, 0, kdi_trap9, NULL },
1623446Smrj { T_TSSFLT, 0, kdi_traperr10, NULL },
1633446Smrj { T_SEGFLT, 0, kdi_traperr11, NULL },
1643446Smrj { T_STKFLT, 0, kdi_traperr12, NULL },
1653446Smrj { T_GPFLT, 0, kdi_traperr13, NULL },
1663446Smrj { T_PGFLT, 0, kdi_traperr14, NULL },
1673446Smrj { 15, 0, kdi_invaltrap, NULL },
1683446Smrj { T_EXTERRFLT, 0, kdi_trap16, NULL },
1693446Smrj { T_ALIGNMENT, 0, kdi_trap17, NULL },
1703446Smrj { T_MCE, 0, kdi_trap18, NULL },
1713446Smrj { T_SIMDFPE, 0, kdi_trap19, NULL },
1723446Smrj { T_DBGENTR, 0, kdi_trap20, NULL },
1733446Smrj { 21, 31, kdi_invaltrap, NULL },
1743446Smrj { 32, 255, kdi_ivct32, &kdi_ivct_size },
1753446Smrj { 0, 0, NULL },
1763446Smrj };
1773446Smrj
1783446Smrj void
kdi_idt_init(selector_t sel)1793446Smrj kdi_idt_init(selector_t sel)
1803446Smrj {
1813446Smrj struct idt_description *id;
1823446Smrj int i;
1833446Smrj
1843446Smrj for (id = idt_description; id->id_basehdlr != NULL; id++) {
1853446Smrj uint_t high = id->id_high != 0 ? id->id_high : id->id_low;
1863446Smrj size_t incr = id->id_incrp != NULL ? *id->id_incrp : 0;
1873446Smrj
1883446Smrj for (i = id->id_low; i <= high; i++) {
1893446Smrj caddr_t hdlr = (caddr_t)id->id_basehdlr +
1903446Smrj incr * (i - id->id_low);
1913446Smrj set_gatesegd(&kdi_idt[i], (void (*)())hdlr, sel,
1928679SSeth.Goldberg@Sun.COM SDT_SYSIGT, TRP_KPL, i);
1933446Smrj }
1943446Smrj }
1953446Smrj }
1963446Smrj
1973446Smrj /*
1983446Smrj * Patch caller-provided code into the debugger's IDT handlers. This code is
1993446Smrj * used to save MSRs that must be saved before the first branch. All handlers
2003446Smrj * are essentially the same, and end with a branch to kdi_cmnint. To save the
2013446Smrj * MSR, we need to patch in before the branch. The handlers have the following
2023446Smrj * structure: KDI_MSR_PATCHOFF bytes of code, KDI_MSR_PATCHSZ bytes of
2033446Smrj * patchable space, followed by more code.
2043446Smrj */
2053446Smrj void
kdi_idt_patch(caddr_t code,size_t sz)2063446Smrj kdi_idt_patch(caddr_t code, size_t sz)
2073446Smrj {
2083446Smrj int i;
2093446Smrj
2103446Smrj ASSERT(sz <= KDI_MSR_PATCHSZ);
2113446Smrj
2123446Smrj for (i = 0; i < sizeof (kdi_idt) / sizeof (struct gate_desc); i++) {
2133446Smrj gate_desc_t *gd;
2143446Smrj uchar_t *patch;
2153446Smrj
2163446Smrj if (i == T_DBLFLT)
2173446Smrj continue; /* uses kernel's handler */
2183446Smrj
2193446Smrj gd = &kdi_idt[i];
2203446Smrj patch = (uchar_t *)GATESEG_GETOFFSET(gd) + KDI_MSR_PATCHOFF;
2213446Smrj
2223446Smrj /*
2233446Smrj * We can't ASSERT that there's a nop here, because this may be
2243446Smrj * a debugger restart. In that case, we're copying the new
2253446Smrj * patch point over the old one.
2263446Smrj */
2273446Smrj /* FIXME: dtrace fbt ... */
2283446Smrj bcopy(code, patch, sz);
2293446Smrj
2303446Smrj /* Fill the rest with nops to be sure */
2313446Smrj while (sz < KDI_MSR_PATCHSZ)
2323446Smrj patch[sz++] = 0x90; /* nop */
2333446Smrj }
2343446Smrj }
2353446Smrj
2363446Smrj static void
kdi_idt_gates_install(selector_t sel,int saveold)2373446Smrj kdi_idt_gates_install(selector_t sel, int saveold)
2383446Smrj {
2393446Smrj gate_desc_t gates[KDI_GATE_NVECS];
2403446Smrj int i;
2413446Smrj
2423446Smrj bzero(gates, sizeof (*gates));
2433446Smrj
2443446Smrj for (i = 0; i < KDI_GATE_NVECS; i++) {
2453446Smrj const kdi_gate_spec_t *gs = &kdi_gate_specs[i];
2463446Smrj uintptr_t func = GATESEG_GETOFFSET(&kdi_idt[gs->kgs_vec]);
2473446Smrj set_gatesegd(&gates[i], (void (*)())func, sel, SDT_SYSIGT,
2488679SSeth.Goldberg@Sun.COM gs->kgs_dpl, gs->kgs_vec);
2493446Smrj }
2503446Smrj
2513446Smrj for (i = 0; i < KDI_GATE_NVECS; i++) {
2523446Smrj uint_t vec = kdi_gate_specs[i].kgs_vec;
2533446Smrj
2543446Smrj if (saveold)
2553446Smrj kdi_kgates[i] = CPU->cpu_m.mcpu_idt[vec];
2563446Smrj
2573446Smrj kdi_idt_write(&gates[i], vec);
2583446Smrj }
2593446Smrj }
2603446Smrj
2613446Smrj static void
kdi_idt_gates_restore(void)2623446Smrj kdi_idt_gates_restore(void)
2633446Smrj {
2643446Smrj int i;
2653446Smrj
2663446Smrj for (i = 0; i < KDI_GATE_NVECS; i++)
2673446Smrj kdi_idt_write(&kdi_kgates[i], kdi_gate_specs[i].kgs_vec);
2683446Smrj }
2693446Smrj
2703446Smrj /*
2713446Smrj * Called when we switch to the kernel's IDT. We need to interpose on the
2723446Smrj * kernel's IDT entries and stop using KMDBCODE_SEL.
2733446Smrj */
2743446Smrj void
kdi_idt_sync(void)2753446Smrj kdi_idt_sync(void)
2763446Smrj {
2773446Smrj kdi_idt_init(KCS_SEL);
2783446Smrj kdi_idt_gates_install(KCS_SEL, KDI_IDT_SAVE);
2793446Smrj }
2803446Smrj
2813446Smrj /*
2823446Smrj * On some processors, we'll need to clear a certain MSR before proceeding into
2833446Smrj * the debugger. Complicating matters, this MSR must be cleared before we take
2843446Smrj * any branches. We have patch points in every trap handler, which will cover
2853446Smrj * all entry paths for master CPUs. We also have a patch point in the slave
2863446Smrj * entry code.
2873446Smrj */
2883446Smrj static void
kdi_msr_add_clrentry(uint_t msr)2893446Smrj kdi_msr_add_clrentry(uint_t msr)
2903446Smrj {
2913446Smrj #ifdef __amd64
2923446Smrj uchar_t code[] = {
2933446Smrj 0x51, 0x50, 0x52, /* pushq %rcx, %rax, %rdx */
2943446Smrj 0xb9, 0x00, 0x00, 0x00, 0x00, /* movl $MSRNUM, %ecx */
2953446Smrj 0x31, 0xc0, /* clr %eax */
2963446Smrj 0x31, 0xd2, /* clr %edx */
2973446Smrj 0x0f, 0x30, /* wrmsr */
2983446Smrj 0x5a, 0x58, 0x59 /* popq %rdx, %rax, %rcx */
2993446Smrj };
3003446Smrj uchar_t *patch = &code[4];
3013446Smrj #else
3023446Smrj uchar_t code[] = {
3033446Smrj 0x60, /* pushal */
3043446Smrj 0xb9, 0x00, 0x00, 0x00, 0x00, /* movl $MSRNUM, %ecx */
3053446Smrj 0x31, 0xc0, /* clr %eax */
3063446Smrj 0x31, 0xd2, /* clr %edx */
3073446Smrj 0x0f, 0x30, /* wrmsr */
3083446Smrj 0x61 /* popal */
3093446Smrj };
3103446Smrj uchar_t *patch = &code[2];
3113446Smrj #endif
3123446Smrj
3133446Smrj bcopy(&msr, patch, sizeof (uint32_t));
3143446Smrj
3153446Smrj kdi_idt_patch((caddr_t)code, sizeof (code));
3163446Smrj
3173446Smrj bcopy(code, &kdi_slave_entry_patch, sizeof (code));
3183446Smrj }
3193446Smrj
3203446Smrj static void
kdi_msr_add_wrexit(uint_t msr,uint64_t * valp)3213446Smrj kdi_msr_add_wrexit(uint_t msr, uint64_t *valp)
3223446Smrj {
3233446Smrj kdi_msr_wrexit_msr = msr;
3243446Smrj kdi_msr_wrexit_valp = valp;
3253446Smrj }
3263446Smrj
3273446Smrj void
kdi_set_debug_msrs(kdi_msr_t * msrs)3283446Smrj kdi_set_debug_msrs(kdi_msr_t *msrs)
3293446Smrj {
3303446Smrj int nmsrs, i;
3313446Smrj
3323446Smrj ASSERT(kdi_cpusave[0].krs_msr == NULL);
3333446Smrj
3343446Smrj /* Look in CPU0's MSRs for any special MSRs. */
3353446Smrj for (nmsrs = 0; msrs[nmsrs].msr_num != 0; nmsrs++) {
3363446Smrj switch (msrs[nmsrs].msr_type) {
3373446Smrj case KDI_MSR_CLEARENTRY:
3383446Smrj kdi_msr_add_clrentry(msrs[nmsrs].msr_num);
3393446Smrj break;
3403446Smrj
3413446Smrj case KDI_MSR_WRITEDELAY:
3423446Smrj kdi_msr_add_wrexit(msrs[nmsrs].msr_num,
3433446Smrj msrs[nmsrs].kdi_msr_valp);
3443446Smrj break;
3453446Smrj }
3463446Smrj }
3473446Smrj
3483446Smrj nmsrs++;
3493446Smrj
3503446Smrj for (i = 0; i < kdi_ncpusave; i++)
3513446Smrj kdi_cpusave[i].krs_msr = &msrs[nmsrs * i];
3523446Smrj }
3533446Smrj
3543446Smrj void
kdi_update_drreg(kdi_drreg_t * drreg)3553446Smrj kdi_update_drreg(kdi_drreg_t *drreg)
3563446Smrj {
3573446Smrj kdi_drreg = *drreg;
3583446Smrj }
3593446Smrj
3603446Smrj void
kdi_memrange_add(caddr_t base,size_t len)3613446Smrj kdi_memrange_add(caddr_t base, size_t len)
3623446Smrj {
3633446Smrj kdi_memrange_t *mr = &kdi_memranges[kdi_nmemranges];
3643446Smrj
3653446Smrj ASSERT(kdi_nmemranges != KDI_MEMRANGES_MAX);
3663446Smrj
3673446Smrj mr->mr_base = base;
3683446Smrj mr->mr_lim = base + len - 1;
3693446Smrj kdi_nmemranges++;
3703446Smrj }
3713446Smrj
3723446Smrj void
kdi_idt_switch(kdi_cpusave_t * cpusave)3733446Smrj kdi_idt_switch(kdi_cpusave_t *cpusave)
3743446Smrj {
3753446Smrj if (cpusave == NULL)
3763446Smrj kdi_idtr_set(kdi_idt, sizeof (kdi_idt) - 1);
3773446Smrj else
3785460Sjosephb kdi_idtr_set(cpusave->krs_idt, (sizeof (*idt0) * NIDT) - 1);
3793446Smrj }
3803446Smrj
3813446Smrj /*
3823446Smrj * Activation for CPUs other than the boot CPU, called from that CPU's
3833446Smrj * mp_startup(). We saved the kernel's descriptors when we initialized the
3843446Smrj * boot CPU, so we don't want to do it again. Saving the handlers from this
3853446Smrj * CPU's IDT would actually be dangerous with the CPU initialization method in
3863446Smrj * use at the time of this writing. With that method, the startup code creates
3873446Smrj * the IDTs for slave CPUs by copying the one used by the boot CPU, which has
3883446Smrj * already been interposed upon by KMDB. Were we to interpose again, we'd
3893446Smrj * replace the kernel's descriptors with our own in the save area. By not
3903446Smrj * saving, but still overwriting, we'll work in the current world, and in any
3913446Smrj * future world where the IDT is generated from scratch.
3923446Smrj */
3933446Smrj void
kdi_cpu_init(void)3943446Smrj kdi_cpu_init(void)
3953446Smrj {
3963446Smrj kdi_idt_gates_install(KCS_SEL, KDI_IDT_NOSAVE);
3973446Smrj /* Load the debug registers and MSRs */
3983446Smrj kdi_cpu_debug_init(&kdi_cpusave[CPU->cpu_id]);
3993446Smrj }
4003446Smrj
4013446Smrj /*
4023446Smrj * Activation for all CPUs for mod-loaded kmdb, i.e. a kmdb that wasn't
4033446Smrj * loaded at boot.
4043446Smrj */
4053446Smrj static int
kdi_cpu_activate(void)4063446Smrj kdi_cpu_activate(void)
4073446Smrj {
4083446Smrj kdi_idt_gates_install(KCS_SEL, KDI_IDT_SAVE);
4093446Smrj return (0);
4103446Smrj }
4113446Smrj
4123446Smrj void
kdi_activate(kdi_main_t main,kdi_cpusave_t * cpusave,uint_t ncpusave)4133446Smrj kdi_activate(kdi_main_t main, kdi_cpusave_t *cpusave, uint_t ncpusave)
4143446Smrj {
4153446Smrj int i;
4163446Smrj cpuset_t cpuset;
4173446Smrj
4183446Smrj CPUSET_ALL(cpuset);
4193446Smrj
4203446Smrj kdi_cpusave = cpusave;
4213446Smrj kdi_ncpusave = ncpusave;
4223446Smrj
4233446Smrj kdi_kmdb_main = main;
4243446Smrj
4253446Smrj for (i = 0; i < kdi_ncpusave; i++) {
4263446Smrj kdi_cpusave[i].krs_cpu_id = i;
4273446Smrj
4283446Smrj kdi_cpusave[i].krs_curcrumb =
4293446Smrj &kdi_cpusave[i].krs_crumbs[KDI_NCRUMBS - 1];
4303446Smrj kdi_cpusave[i].krs_curcrumbidx = KDI_NCRUMBS - 1;
4313446Smrj }
4323446Smrj
4333446Smrj if (boothowto & RB_KMDB)
4343446Smrj kdi_idt_init(KMDBCODE_SEL);
4353446Smrj else
4363446Smrj kdi_idt_init(KCS_SEL);
4373446Smrj
4383446Smrj /* The initial selector set. Updated by the debugger-entry code */
4393446Smrj #ifndef __amd64
4403446Smrj kdi_cs = B32CODE_SEL;
4413446Smrj kdi_ds = kdi_fs = kdi_gs = B32DATA_SEL;
4423446Smrj #endif
4433446Smrj
4443446Smrj kdi_memranges[0].mr_base = kdi_segdebugbase;
4453446Smrj kdi_memranges[0].mr_lim = kdi_segdebugbase + kdi_segdebugsize - 1;
4463446Smrj kdi_nmemranges = 1;
4473446Smrj
4483446Smrj kdi_drreg.dr_ctl = KDIREG_DRCTL_RESERVED;
4493446Smrj kdi_drreg.dr_stat = KDIREG_DRSTAT_RESERVED;
4503446Smrj
4513446Smrj kdi_msr_wrexit_msr = 0;
4523446Smrj kdi_msr_wrexit_valp = NULL;
4533446Smrj
4543446Smrj if (boothowto & RB_KMDB) {
4553446Smrj kdi_idt_gates_install(KMDBCODE_SEL, KDI_IDT_NOSAVE);
4563446Smrj } else {
457*9489SJoe.Bonasera@sun.com xc_call(0, 0, 0, CPUSET2BV(cpuset),
4583446Smrj (xc_func_t)kdi_cpu_activate);
4593446Smrj }
4603446Smrj }
4613446Smrj
4623446Smrj static int
kdi_cpu_deactivate(void)4633446Smrj kdi_cpu_deactivate(void)
4643446Smrj {
4653446Smrj kdi_idt_gates_restore();
4663446Smrj return (0);
4673446Smrj }
4683446Smrj
4693446Smrj void
kdi_deactivate(void)4703446Smrj kdi_deactivate(void)
4713446Smrj {
4723446Smrj cpuset_t cpuset;
4733446Smrj CPUSET_ALL(cpuset);
4743446Smrj
475*9489SJoe.Bonasera@sun.com xc_call(0, 0, 0, CPUSET2BV(cpuset), (xc_func_t)kdi_cpu_deactivate);
4763446Smrj kdi_nmemranges = 0;
4773446Smrj }
4783446Smrj
4793446Smrj /*
4803446Smrj * We receive all breakpoints and single step traps. Some of them,
4813446Smrj * including those from userland and those induced by DTrace providers,
4823446Smrj * are intended for the kernel, and must be processed there. We adopt
4833446Smrj * this ours-until-proven-otherwise position due to the painful
4843446Smrj * consequences of sending the kernel an unexpected breakpoint or
4853446Smrj * single step. Unless someone can prove to us that the kernel is
4863446Smrj * prepared to handle the trap, we'll assume there's a problem and will
4873446Smrj * give the user a chance to debug it.
4883446Smrj */
4895084Sjohnlev int
kdi_trap_pass(kdi_cpusave_t * cpusave)4903446Smrj kdi_trap_pass(kdi_cpusave_t *cpusave)
4913446Smrj {
4923446Smrj greg_t tt = cpusave->krs_gregs[KDIREG_TRAPNO];
4933446Smrj greg_t pc = cpusave->krs_gregs[KDIREG_PC];
4943446Smrj greg_t cs = cpusave->krs_gregs[KDIREG_CS];
4953446Smrj
4963446Smrj if (USERMODE(cs))
4973446Smrj return (1);
4983446Smrj
4993446Smrj if (tt != T_BPTFLT && tt != T_SGLSTP)
5003446Smrj return (0);
5013446Smrj
5023446Smrj if (tt == T_BPTFLT && kdi_dtrace_get_state() ==
5033446Smrj KDI_DTSTATE_DTRACE_ACTIVE)
5043446Smrj return (1);
5053446Smrj
5063446Smrj /*
5073446Smrj * See the comments in the kernel's T_SGLSTP handler for why we need to
5083446Smrj * do this.
5093446Smrj */
5103446Smrj if (tt == T_SGLSTP &&
5115084Sjohnlev (pc == (greg_t)sys_sysenter || pc == (greg_t)brand_sys_sysenter))
5123446Smrj return (1);
5133446Smrj
5143446Smrj return (0);
5153446Smrj }
5163446Smrj
5173446Smrj /*
5183446Smrj * State has been saved, and all CPUs are on the CPU-specific stacks. All
5193446Smrj * CPUs enter here, and head off into the debugger proper.
5203446Smrj */
5215084Sjohnlev void
kdi_debugger_entry(kdi_cpusave_t * cpusave)5223446Smrj kdi_debugger_entry(kdi_cpusave_t *cpusave)
5233446Smrj {
5243446Smrj /*
5253446Smrj * BPTFLT gives us control with %eip set to the instruction *after*
5263446Smrj * the int 3. Back it off, so we're looking at the instruction that
5273446Smrj * triggered the fault.
5283446Smrj */
5293446Smrj if (cpusave->krs_gregs[KDIREG_TRAPNO] == T_BPTFLT)
5303446Smrj cpusave->krs_gregs[KDIREG_PC]--;
5313446Smrj
5323446Smrj kdi_kmdb_main(cpusave);
5333446Smrj }
534