1*5084Sjohnlev /*
2*5084Sjohnlev * CDDL HEADER START
3*5084Sjohnlev *
4*5084Sjohnlev * The contents of this file are subject to the terms of the
5*5084Sjohnlev * Common Development and Distribution License (the "License").
6*5084Sjohnlev * You may not use this file except in compliance with the License.
7*5084Sjohnlev *
8*5084Sjohnlev * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*5084Sjohnlev * or http://www.opensolaris.org/os/licensing.
10*5084Sjohnlev * See the License for the specific language governing permissions
11*5084Sjohnlev * and limitations under the License.
12*5084Sjohnlev *
13*5084Sjohnlev * When distributing Covered Code, include this CDDL HEADER in each
14*5084Sjohnlev * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*5084Sjohnlev * If applicable, add the following below this CDDL HEADER, with the
16*5084Sjohnlev * fields enclosed by brackets "[]" replaced with your own identifying
17*5084Sjohnlev * information: Portions Copyright [yyyy] [name of copyright owner]
18*5084Sjohnlev *
19*5084Sjohnlev * CDDL HEADER END
20*5084Sjohnlev */
21*5084Sjohnlev /*
22*5084Sjohnlev * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23*5084Sjohnlev * Use is subject to license terms.
24*5084Sjohnlev */
25*5084Sjohnlev
26*5084Sjohnlev #pragma ident "%Z%%M% %I% %E% SMI"
27*5084Sjohnlev
28*5084Sjohnlev /*
29*5084Sjohnlev * Kernel/Debugger Interface (KDI) routines. Called during debugger under
30*5084Sjohnlev * various system states (boot, while running, while the debugger has control).
31*5084Sjohnlev * Functions intended for use while the debugger has control may not grab any
32*5084Sjohnlev * locks or perform any functions that assume the availability of other system
33*5084Sjohnlev * services.
34*5084Sjohnlev */
35*5084Sjohnlev
36*5084Sjohnlev #include <sys/systm.h>
37*5084Sjohnlev #include <sys/x86_archext.h>
38*5084Sjohnlev #include <sys/kdi_impl.h>
39*5084Sjohnlev #include <sys/smp_impldefs.h>
40*5084Sjohnlev #include <sys/psm_types.h>
41*5084Sjohnlev #include <sys/segments.h>
42*5084Sjohnlev #include <sys/archsystm.h>
43*5084Sjohnlev #include <sys/controlregs.h>
44*5084Sjohnlev #include <sys/trap.h>
45*5084Sjohnlev #include <sys/kobj.h>
46*5084Sjohnlev #include <sys/kobj_impl.h>
47*5084Sjohnlev #include <sys/hypervisor.h>
48*5084Sjohnlev #include <sys/bootconf.h>
49*5084Sjohnlev #include <sys/bootinfo.h>
50*5084Sjohnlev #include <sys/promif.h>
51*5084Sjohnlev #include <sys/evtchn_impl.h>
52*5084Sjohnlev #include <sys/cpu.h>
53*5084Sjohnlev #include <vm/kboot_mmu.h>
54*5084Sjohnlev #include <vm/hat_pte.h>
55*5084Sjohnlev
56*5084Sjohnlev static volatile int kdi_slaves_go;
57*5084Sjohnlev
58*5084Sjohnlev /*
59*5084Sjohnlev * These are not safe against dropping into kmdb when fbt::: is active. This is
60*5084Sjohnlev * also broken on i86pc...
61*5084Sjohnlev */
62*5084Sjohnlev
63*5084Sjohnlev void
kdi_idtr_write(desctbr_t * idtr)64*5084Sjohnlev kdi_idtr_write(desctbr_t *idtr)
65*5084Sjohnlev {
66*5084Sjohnlev gate_desc_t *idt = (gate_desc_t *)idtr->dtr_base;
67*5084Sjohnlev uint_t nidt = (idtr->dtr_limit + 1) / sizeof (*idt);
68*5084Sjohnlev uint_t vec;
69*5084Sjohnlev
70*5084Sjohnlev for (vec = 0; vec < nidt; vec++, idt++)
71*5084Sjohnlev xen_idt_write(idt, vec);
72*5084Sjohnlev }
73*5084Sjohnlev
74*5084Sjohnlev void
kdi_idt_write(gate_desc_t * gate,uint_t vec)75*5084Sjohnlev kdi_idt_write(gate_desc_t *gate, uint_t vec)
76*5084Sjohnlev {
77*5084Sjohnlev gate_desc_t *idt = CPU->cpu_m.mcpu_idt;
78*5084Sjohnlev
79*5084Sjohnlev /*
80*5084Sjohnlev * See kdi_idtr_set().
81*5084Sjohnlev */
82*5084Sjohnlev if (idt != NULL)
83*5084Sjohnlev idt[vec] = *gate;
84*5084Sjohnlev
85*5084Sjohnlev xen_idt_write(gate, vec);
86*5084Sjohnlev }
87*5084Sjohnlev
88*5084Sjohnlev ulong_t
kdi_dreg_get(int reg)89*5084Sjohnlev kdi_dreg_get(int reg)
90*5084Sjohnlev {
91*5084Sjohnlev return (__hypercall1(__HYPERVISOR_get_debugreg, (long)reg));
92*5084Sjohnlev }
93*5084Sjohnlev
94*5084Sjohnlev void
kdi_dreg_set(int reg,ulong_t value)95*5084Sjohnlev kdi_dreg_set(int reg, ulong_t value)
96*5084Sjohnlev {
97*5084Sjohnlev (void) __hypercall2(__HYPERVISOR_set_debugreg, (long)reg, value);
98*5084Sjohnlev }
99*5084Sjohnlev
100*5084Sjohnlev void
kdi_flush_caches(void)101*5084Sjohnlev kdi_flush_caches(void)
102*5084Sjohnlev {
103*5084Sjohnlev }
104*5084Sjohnlev
105*5084Sjohnlev /*
106*5084Sjohnlev * To avoid domains sucking up CPU while sitting in kmdb, we make all the slave
107*5084Sjohnlev * CPUs wait for a wake-up evtchn. The master CPU, meanwhile, sleeps for
108*5084Sjohnlev * console activity.
109*5084Sjohnlev */
110*5084Sjohnlev
111*5084Sjohnlev extern void kdi_slave_entry(void);
112*5084Sjohnlev
113*5084Sjohnlev void
kdi_stop_slaves(int cpu,int doxc)114*5084Sjohnlev kdi_stop_slaves(int cpu, int doxc)
115*5084Sjohnlev {
116*5084Sjohnlev if (doxc)
117*5084Sjohnlev kdi_xc_others(cpu, kdi_slave_entry);
118*5084Sjohnlev kdi_slaves_go = 0;
119*5084Sjohnlev }
120*5084Sjohnlev
121*5084Sjohnlev void
kdi_start_slaves(void)122*5084Sjohnlev kdi_start_slaves(void)
123*5084Sjohnlev {
124*5084Sjohnlev int c;
125*5084Sjohnlev
126*5084Sjohnlev kdi_slaves_go = 1;
127*5084Sjohnlev
128*5084Sjohnlev for (c = 0; c < NCPU; c++) {
129*5084Sjohnlev if (cpu[c] == NULL || !(cpu[c]->cpu_flags & CPU_READY))
130*5084Sjohnlev continue;
131*5084Sjohnlev ec_try_ipi(XC_CPUPOKE_PIL, c);
132*5084Sjohnlev }
133*5084Sjohnlev }
134*5084Sjohnlev
135*5084Sjohnlev /*ARGSUSED*/
136*5084Sjohnlev static int
check_slave(void * arg)137*5084Sjohnlev check_slave(void *arg)
138*5084Sjohnlev {
139*5084Sjohnlev return (kdi_slaves_go == 1);
140*5084Sjohnlev }
141*5084Sjohnlev
142*5084Sjohnlev void
kdi_slave_wait(void)143*5084Sjohnlev kdi_slave_wait(void)
144*5084Sjohnlev {
145*5084Sjohnlev if (!(cpu[CPU->cpu_id]->cpu_flags & CPU_READY))
146*5084Sjohnlev return;
147*5084Sjohnlev
148*5084Sjohnlev ec_wait_on_ipi(XC_CPUPOKE_PIL, check_slave, NULL);
149*5084Sjohnlev }
150*5084Sjohnlev
151*5084Sjohnlev /*
152*5084Sjohnlev * Caution.
153*5084Sjohnlev * These routines are called -extremely- early, during kmdb initialization.
154*5084Sjohnlev *
155*5084Sjohnlev * Many common kernel functions assume that %gs has been initialized,
156*5084Sjohnlev * and fail horribly if it hasn't. At this point, the boot code has
157*5084Sjohnlev * reserved a descriptor for us (KMDBGS_SEL) in it's GDT; arrange for it
158*5084Sjohnlev * to point at a dummy cpu_t, temporarily at least.
159*5084Sjohnlev *
160*5084Sjohnlev * Note that kmdb entry relies on the fake cpu_t having zero cpu_idt/cpu_id.
161*5084Sjohnlev */
162*5084Sjohnlev
163*5084Sjohnlev #if defined(__amd64)
164*5084Sjohnlev
165*5084Sjohnlev void *
boot_kdi_tmpinit(void)166*5084Sjohnlev boot_kdi_tmpinit(void)
167*5084Sjohnlev {
168*5084Sjohnlev cpu_t *cpu = kobj_zalloc(sizeof (*cpu), KM_TMP);
169*5084Sjohnlev user_desc_t *bgdt;
170*5084Sjohnlev uint64_t gdtpa;
171*5084Sjohnlev ulong_t ma[1];
172*5084Sjohnlev
173*5084Sjohnlev cpu->cpu_self = cpu;
174*5084Sjohnlev
175*5084Sjohnlev /*
176*5084Sjohnlev * (Note that we had better switch to a -new- GDT before
177*5084Sjohnlev * we discard the KM_TMP mappings, or disaster will ensue.)
178*5084Sjohnlev */
179*5084Sjohnlev bgdt = kobj_zalloc(PAGESIZE, KM_TMP);
180*5084Sjohnlev ASSERT(((uintptr_t)bgdt & PAGEOFFSET) == 0);
181*5084Sjohnlev
182*5084Sjohnlev init_boot_gdt(bgdt);
183*5084Sjohnlev
184*5084Sjohnlev gdtpa = pfn_to_pa(va_to_pfn(bgdt));
185*5084Sjohnlev ma[0] = (ulong_t)(pa_to_ma(gdtpa) >> PAGESHIFT);
186*5084Sjohnlev kbm_read_only((uintptr_t)bgdt, gdtpa);
187*5084Sjohnlev if (HYPERVISOR_set_gdt(ma, PAGESIZE / sizeof (user_desc_t)))
188*5084Sjohnlev panic("boot_kdi_tmpinit:HYPERVISOR_set_gdt() failed");
189*5084Sjohnlev
190*5084Sjohnlev load_segment_registers(B64CODE_SEL, 0, 0, B32DATA_SEL);
191*5084Sjohnlev
192*5084Sjohnlev /*
193*5084Sjohnlev * Now point %gsbase to our temp cpu structure.
194*5084Sjohnlev */
195*5084Sjohnlev xen_set_segment_base(SEGBASE_GS_KERNEL, (ulong_t)cpu);
196*5084Sjohnlev return (0);
197*5084Sjohnlev }
198*5084Sjohnlev
199*5084Sjohnlev /*ARGSUSED*/
200*5084Sjohnlev void
boot_kdi_tmpfini(void * old)201*5084Sjohnlev boot_kdi_tmpfini(void *old)
202*5084Sjohnlev {
203*5084Sjohnlev /*
204*5084Sjohnlev * This breaks, why do we need it anyway?
205*5084Sjohnlev */
206*5084Sjohnlev #if 0 /* XXPV */
207*5084Sjohnlev load_segment_registers(B64CODE_SEL, 0, KMDBGS_SEL, B32DATA_SEL);
208*5084Sjohnlev #endif
209*5084Sjohnlev }
210*5084Sjohnlev
211*5084Sjohnlev #elif defined(__i386)
212*5084Sjohnlev
213*5084Sjohnlev /*
214*5084Sjohnlev * Sigh. We're called before we've initialized the kernels GDT, living
215*5084Sjohnlev * off the hypervisor's default GDT. For kmdb's sake, we switch now to
216*5084Sjohnlev * a GDT that looks like dboot's GDT; very shortly we'll initialize and
217*5084Sjohnlev * switch to the kernel's GDT.
218*5084Sjohnlev */
219*5084Sjohnlev
220*5084Sjohnlev void *
boot_kdi_tmpinit(void)221*5084Sjohnlev boot_kdi_tmpinit(void)
222*5084Sjohnlev {
223*5084Sjohnlev cpu_t *cpu = kobj_zalloc(sizeof (*cpu), KM_TMP);
224*5084Sjohnlev user_desc_t *bgdt;
225*5084Sjohnlev uint64_t gdtpa;
226*5084Sjohnlev ulong_t ma[1];
227*5084Sjohnlev
228*5084Sjohnlev cpu->cpu_self = cpu;
229*5084Sjohnlev
230*5084Sjohnlev /*
231*5084Sjohnlev * (Note that we had better switch to a -new- GDT before
232*5084Sjohnlev * we discard the KM_TMP mappings, or disaster will ensue.)
233*5084Sjohnlev */
234*5084Sjohnlev bgdt = kobj_zalloc(PAGESIZE, KM_TMP);
235*5084Sjohnlev
236*5084Sjohnlev ASSERT(((uintptr_t)bgdt & PAGEOFFSET) == 0);
237*5084Sjohnlev gdtpa = pfn_to_pa(va_to_pfn(bgdt));
238*5084Sjohnlev
239*5084Sjohnlev init_boot_gdt(bgdt);
240*5084Sjohnlev
241*5084Sjohnlev set_usegd(&bgdt[GDT_BGSTMP],
242*5084Sjohnlev cpu, sizeof (*cpu), SDT_MEMRWA, SEL_KPL, SDP_BYTES, SDP_OP32);
243*5084Sjohnlev
244*5084Sjohnlev ma[0] = (ulong_t)(pa_to_ma(gdtpa) >> PAGESHIFT);
245*5084Sjohnlev kbm_read_only((uintptr_t)bgdt, gdtpa);
246*5084Sjohnlev if (HYPERVISOR_set_gdt(ma, PAGESIZE / sizeof (user_desc_t)))
247*5084Sjohnlev panic("boot_kdi_tmpinit:HYPERVISOR_set_gdt() failed");
248*5084Sjohnlev
249*5084Sjohnlev load_segment_registers(B32CODE_SEL, B32DATA_SEL, B32DATA_SEL, 0,
250*5084Sjohnlev KMDBGS_SEL, B32DATA_SEL);
251*5084Sjohnlev return (0);
252*5084Sjohnlev }
253*5084Sjohnlev
254*5084Sjohnlev /*ARGSUSED*/
255*5084Sjohnlev void
boot_kdi_tmpfini(void * old)256*5084Sjohnlev boot_kdi_tmpfini(void *old)
257*5084Sjohnlev {
258*5084Sjohnlev load_segment_registers(B32CODE_SEL, B32DATA_SEL, B32DATA_SEL, 0,
259*5084Sjohnlev 0, B32DATA_SEL);
260*5084Sjohnlev }
261*5084Sjohnlev
262*5084Sjohnlev #endif /* __i386 */
263