10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52712Snn35248 * Common Development and Distribution License (the "License"). 62712Snn35248 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*11474SJonathan.Adams@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */ 270Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */ 280Sstevel@tonic-gate /* All Rights Reserved */ 290Sstevel@tonic-gate 300Sstevel@tonic-gate #include <sys/types.h> 310Sstevel@tonic-gate #include <sys/param.h> 320Sstevel@tonic-gate #include <sys/sysmacros.h> 330Sstevel@tonic-gate #include <sys/signal.h> 340Sstevel@tonic-gate #include <sys/systm.h> 350Sstevel@tonic-gate #include <sys/user.h> 360Sstevel@tonic-gate #include <sys/mman.h> 370Sstevel@tonic-gate #include <sys/class.h> 380Sstevel@tonic-gate #include <sys/proc.h> 390Sstevel@tonic-gate #include <sys/procfs.h> 400Sstevel@tonic-gate #include <sys/buf.h> 410Sstevel@tonic-gate #include <sys/kmem.h> 420Sstevel@tonic-gate #include <sys/cred.h> 430Sstevel@tonic-gate #include <sys/archsystm.h> 440Sstevel@tonic-gate #include <sys/vmparam.h> 450Sstevel@tonic-gate #include <sys/prsystm.h> 460Sstevel@tonic-gate #include <sys/reboot.h> 470Sstevel@tonic-gate #include <sys/uadmin.h> 480Sstevel@tonic-gate #include <sys/vfs.h> 490Sstevel@tonic-gate #include <sys/vnode.h> 500Sstevel@tonic-gate #include <sys/file.h> 510Sstevel@tonic-gate #include <sys/session.h> 520Sstevel@tonic-gate #include <sys/ucontext.h> 530Sstevel@tonic-gate #include <sys/dnlc.h> 540Sstevel@tonic-gate #include <sys/var.h> 550Sstevel@tonic-gate #include <sys/cmn_err.h> 560Sstevel@tonic-gate #include <sys/debugreg.h> 570Sstevel@tonic-gate #include <sys/thread.h> 580Sstevel@tonic-gate #include <sys/vtrace.h> 590Sstevel@tonic-gate #include <sys/consdev.h> 600Sstevel@tonic-gate #include <sys/psw.h> 610Sstevel@tonic-gate #include <sys/regset.h> 620Sstevel@tonic-gate #include <sys/privregs.h> 633451Smrj #include <sys/cpu.h> 640Sstevel@tonic-gate #include <sys/stack.h> 650Sstevel@tonic-gate #include <sys/swap.h> 660Sstevel@tonic-gate #include <vm/hat.h> 670Sstevel@tonic-gate #include <vm/anon.h> 680Sstevel@tonic-gate #include <vm/as.h> 690Sstevel@tonic-gate #include <vm/page.h> 700Sstevel@tonic-gate #include <vm/seg.h> 710Sstevel@tonic-gate #include <vm/seg_kmem.h> 720Sstevel@tonic-gate #include <vm/seg_map.h> 730Sstevel@tonic-gate #include <vm/seg_vn.h> 740Sstevel@tonic-gate #include <sys/exec.h> 750Sstevel@tonic-gate #include <sys/acct.h> 760Sstevel@tonic-gate #include <sys/core.h> 770Sstevel@tonic-gate #include <sys/corectl.h> 780Sstevel@tonic-gate #include <sys/modctl.h> 790Sstevel@tonic-gate #include <sys/tuneable.h> 800Sstevel@tonic-gate #include <c2/audit.h> 810Sstevel@tonic-gate #include <sys/bootconf.h> 822712Snn35248 #include <sys/brand.h> 830Sstevel@tonic-gate #include <sys/dumphdr.h> 840Sstevel@tonic-gate #include <sys/promif.h> 850Sstevel@tonic-gate #include <sys/systeminfo.h> 860Sstevel@tonic-gate #include <sys/kdi.h> 870Sstevel@tonic-gate #include <sys/contract_impl.h> 880Sstevel@tonic-gate #include <sys/x86_archext.h> 890Sstevel@tonic-gate #include <sys/segments.h> 903446Smrj #include <sys/ontrap.h> 915084Sjohnlev #include <sys/cpu.h> 925084Sjohnlev #ifdef __xpv 935084Sjohnlev #include <sys/hypervisor.h> 945084Sjohnlev #endif 950Sstevel@tonic-gate 960Sstevel@tonic-gate /* 970Sstevel@tonic-gate * Compare the version of boot that boot says it is against 980Sstevel@tonic-gate * the version of boot the kernel expects. 990Sstevel@tonic-gate */ 1000Sstevel@tonic-gate int 1010Sstevel@tonic-gate check_boot_version(int boots_version) 1020Sstevel@tonic-gate { 1030Sstevel@tonic-gate if (boots_version == BO_VERSION) 1040Sstevel@tonic-gate return (0); 1050Sstevel@tonic-gate 1060Sstevel@tonic-gate prom_printf("Wrong boot interface - kernel needs v%d found v%d\n", 1070Sstevel@tonic-gate BO_VERSION, boots_version); 1080Sstevel@tonic-gate prom_panic("halting"); 1090Sstevel@tonic-gate /*NOTREACHED*/ 1100Sstevel@tonic-gate } 1110Sstevel@tonic-gate 1120Sstevel@tonic-gate /* 1130Sstevel@tonic-gate * Process the physical installed list for boot. 1140Sstevel@tonic-gate * Finds: 1150Sstevel@tonic-gate * 1) the pfn of the highest installed physical page, 1160Sstevel@tonic-gate * 2) the number of pages installed 1170Sstevel@tonic-gate * 3) the number of distinct contiguous regions these pages fall into. 1180Sstevel@tonic-gate */ 1190Sstevel@tonic-gate void 1200Sstevel@tonic-gate installed_top_size( 1210Sstevel@tonic-gate struct memlist *list, /* pointer to start of installed list */ 1220Sstevel@tonic-gate pfn_t *high_pfn, /* return ptr for top value */ 1230Sstevel@tonic-gate pgcnt_t *pgcnt, /* return ptr for sum of installed pages */ 1240Sstevel@tonic-gate int *ranges) /* return ptr for the count of contig. ranges */ 1250Sstevel@tonic-gate { 1260Sstevel@tonic-gate pfn_t top = 0; 1270Sstevel@tonic-gate pgcnt_t sumpages = 0; 1280Sstevel@tonic-gate pfn_t highp; /* high page in a chunk */ 1290Sstevel@tonic-gate int cnt = 0; 1300Sstevel@tonic-gate 131*11474SJonathan.Adams@Sun.COM for (; list; list = list->ml_next) { 1320Sstevel@tonic-gate ++cnt; 133*11474SJonathan.Adams@Sun.COM highp = (list->ml_address + list->ml_size - 1) >> PAGESHIFT; 1340Sstevel@tonic-gate if (top < highp) 1350Sstevel@tonic-gate top = highp; 136*11474SJonathan.Adams@Sun.COM sumpages += btop(list->ml_size); 1370Sstevel@tonic-gate } 1380Sstevel@tonic-gate 1390Sstevel@tonic-gate *high_pfn = top; 1400Sstevel@tonic-gate *pgcnt = sumpages; 1410Sstevel@tonic-gate *ranges = cnt; 1420Sstevel@tonic-gate } 1430Sstevel@tonic-gate 1440Sstevel@tonic-gate /* 1450Sstevel@tonic-gate * Copy in a memory list from boot to kernel, with a filter function 1460Sstevel@tonic-gate * to remove pages. The filter function can increase the address and/or 147842Smec * decrease the size to filter out pages. It will also align addresses and 148842Smec * sizes to PAGESIZE. 1490Sstevel@tonic-gate */ 1500Sstevel@tonic-gate void 1510Sstevel@tonic-gate copy_memlist_filter( 1520Sstevel@tonic-gate struct memlist *src, 1530Sstevel@tonic-gate struct memlist **dstp, 1540Sstevel@tonic-gate void (*filter)(uint64_t *, uint64_t *)) 1550Sstevel@tonic-gate { 1560Sstevel@tonic-gate struct memlist *dst, *prev; 1570Sstevel@tonic-gate uint64_t addr; 1580Sstevel@tonic-gate uint64_t size; 1590Sstevel@tonic-gate uint64_t eaddr; 1600Sstevel@tonic-gate 1610Sstevel@tonic-gate dst = *dstp; 1620Sstevel@tonic-gate prev = dst; 1630Sstevel@tonic-gate 1640Sstevel@tonic-gate /* 1650Sstevel@tonic-gate * Move through the memlist applying a filter against 1660Sstevel@tonic-gate * each range of memory. Note that we may apply the 1670Sstevel@tonic-gate * filter multiple times against each memlist entry. 1680Sstevel@tonic-gate */ 169*11474SJonathan.Adams@Sun.COM for (; src; src = src->ml_next) { 170*11474SJonathan.Adams@Sun.COM addr = P2ROUNDUP(src->ml_address, PAGESIZE); 171*11474SJonathan.Adams@Sun.COM eaddr = P2ALIGN(src->ml_address + src->ml_size, PAGESIZE); 1720Sstevel@tonic-gate while (addr < eaddr) { 1730Sstevel@tonic-gate size = eaddr - addr; 1740Sstevel@tonic-gate if (filter != NULL) 1750Sstevel@tonic-gate filter(&addr, &size); 1760Sstevel@tonic-gate if (size == 0) 1770Sstevel@tonic-gate break; 178*11474SJonathan.Adams@Sun.COM dst->ml_address = addr; 179*11474SJonathan.Adams@Sun.COM dst->ml_size = size; 180*11474SJonathan.Adams@Sun.COM dst->ml_next = 0; 1810Sstevel@tonic-gate if (prev == dst) { 182*11474SJonathan.Adams@Sun.COM dst->ml_prev = 0; 1830Sstevel@tonic-gate dst++; 1840Sstevel@tonic-gate } else { 185*11474SJonathan.Adams@Sun.COM dst->ml_prev = prev; 186*11474SJonathan.Adams@Sun.COM prev->ml_next = dst; 1870Sstevel@tonic-gate dst++; 1880Sstevel@tonic-gate prev++; 1890Sstevel@tonic-gate } 1900Sstevel@tonic-gate addr += size; 1910Sstevel@tonic-gate } 1920Sstevel@tonic-gate } 1930Sstevel@tonic-gate 1940Sstevel@tonic-gate *dstp = dst; 1950Sstevel@tonic-gate } 1960Sstevel@tonic-gate 1970Sstevel@tonic-gate /* 1980Sstevel@tonic-gate * Kernel setup code, called from startup(). 1990Sstevel@tonic-gate */ 2000Sstevel@tonic-gate void 2010Sstevel@tonic-gate kern_setup1(void) 2020Sstevel@tonic-gate { 2030Sstevel@tonic-gate proc_t *pp; 2040Sstevel@tonic-gate 2050Sstevel@tonic-gate pp = &p0; 2060Sstevel@tonic-gate 2070Sstevel@tonic-gate proc_sched = pp; 2080Sstevel@tonic-gate 2090Sstevel@tonic-gate /* 2100Sstevel@tonic-gate * Initialize process 0 data structures 2110Sstevel@tonic-gate */ 2120Sstevel@tonic-gate pp->p_stat = SRUN; 2130Sstevel@tonic-gate pp->p_flag = SSYS; 2140Sstevel@tonic-gate 2150Sstevel@tonic-gate pp->p_pidp = &pid0; 2160Sstevel@tonic-gate pp->p_pgidp = &pid0; 2170Sstevel@tonic-gate pp->p_sessp = &session0; 2180Sstevel@tonic-gate pp->p_tlist = &t0; 2190Sstevel@tonic-gate pid0.pid_pglink = pp; 220749Ssusans pid0.pid_pgtail = pp; 2210Sstevel@tonic-gate 2220Sstevel@tonic-gate /* 2230Sstevel@tonic-gate * XXX - we asssume that the u-area is zeroed out except for 2240Sstevel@tonic-gate * ttolwp(curthread)->lwp_regs. 2250Sstevel@tonic-gate */ 2263446Smrj PTOU(curproc)->u_cmask = (mode_t)CMASK; 2270Sstevel@tonic-gate 2280Sstevel@tonic-gate thread_init(); /* init thread_free list */ 2290Sstevel@tonic-gate pid_init(); /* initialize pid (proc) table */ 2300Sstevel@tonic-gate contract_init(); /* initialize contracts */ 2310Sstevel@tonic-gate 2320Sstevel@tonic-gate init_pages_pp_maximum(); 2330Sstevel@tonic-gate } 2340Sstevel@tonic-gate 2350Sstevel@tonic-gate /* 2360Sstevel@tonic-gate * Load a procedure into a thread. 2370Sstevel@tonic-gate */ 2380Sstevel@tonic-gate void 2390Sstevel@tonic-gate thread_load(kthread_t *t, void (*start)(), caddr_t arg, size_t len) 2400Sstevel@tonic-gate { 2410Sstevel@tonic-gate caddr_t sp; 2420Sstevel@tonic-gate size_t framesz; 2430Sstevel@tonic-gate caddr_t argp; 2440Sstevel@tonic-gate long *p; 2450Sstevel@tonic-gate extern void thread_start(); 2460Sstevel@tonic-gate 2470Sstevel@tonic-gate /* 2480Sstevel@tonic-gate * Push a "c" call frame onto the stack to represent 2490Sstevel@tonic-gate * the caller of "start". 2500Sstevel@tonic-gate */ 2510Sstevel@tonic-gate sp = t->t_stk; 2520Sstevel@tonic-gate ASSERT(((uintptr_t)t->t_stk & (STACK_ENTRY_ALIGN - 1)) == 0); 2530Sstevel@tonic-gate if (len != 0) { 2540Sstevel@tonic-gate /* 2550Sstevel@tonic-gate * the object that arg points at is copied into the 2560Sstevel@tonic-gate * caller's frame. 2570Sstevel@tonic-gate */ 2580Sstevel@tonic-gate framesz = SA(len); 2590Sstevel@tonic-gate sp -= framesz; 2600Sstevel@tonic-gate ASSERT(sp > t->t_stkbase); 2610Sstevel@tonic-gate argp = sp + SA(MINFRAME); 2620Sstevel@tonic-gate bcopy(arg, argp, len); 2630Sstevel@tonic-gate arg = argp; 2640Sstevel@tonic-gate } 2650Sstevel@tonic-gate /* 2660Sstevel@tonic-gate * Set up arguments (arg and len) on the caller's stack frame. 2670Sstevel@tonic-gate */ 2680Sstevel@tonic-gate p = (long *)sp; 2690Sstevel@tonic-gate 2700Sstevel@tonic-gate *--p = 0; /* fake call */ 2710Sstevel@tonic-gate *--p = 0; /* null frame pointer terminates stack trace */ 2720Sstevel@tonic-gate *--p = (long)len; 2730Sstevel@tonic-gate *--p = (intptr_t)arg; 2740Sstevel@tonic-gate *--p = (intptr_t)start; 2750Sstevel@tonic-gate 2760Sstevel@tonic-gate /* 2770Sstevel@tonic-gate * initialize thread to resume at thread_start() which will 2780Sstevel@tonic-gate * turn around and invoke (*start)(arg, len). 2790Sstevel@tonic-gate */ 2800Sstevel@tonic-gate t->t_pc = (uintptr_t)thread_start; 2810Sstevel@tonic-gate t->t_sp = (uintptr_t)p; 2820Sstevel@tonic-gate 2830Sstevel@tonic-gate ASSERT((t->t_sp & (STACK_ENTRY_ALIGN - 1)) == 0); 2840Sstevel@tonic-gate } 2850Sstevel@tonic-gate 2860Sstevel@tonic-gate /* 2870Sstevel@tonic-gate * load user registers into lwp. 2880Sstevel@tonic-gate */ 2890Sstevel@tonic-gate /*ARGSUSED2*/ 2900Sstevel@tonic-gate void 2910Sstevel@tonic-gate lwp_load(klwp_t *lwp, gregset_t grp, uintptr_t thrptr) 2920Sstevel@tonic-gate { 2930Sstevel@tonic-gate struct regs *rp = lwptoregs(lwp); 2940Sstevel@tonic-gate 2950Sstevel@tonic-gate setgregs(lwp, grp); 2960Sstevel@tonic-gate rp->r_ps = PSL_USER; 2970Sstevel@tonic-gate 2980Sstevel@tonic-gate /* 2995084Sjohnlev * For 64-bit lwps, we allow one magic %fs selector value, and one 3005084Sjohnlev * magic %gs selector to point anywhere in the address space using 3010Sstevel@tonic-gate * %fsbase and %gsbase behind the scenes. libc uses %fs to point 3020Sstevel@tonic-gate * at the ulwp_t structure. 3030Sstevel@tonic-gate * 3040Sstevel@tonic-gate * For 32-bit lwps, libc wedges its lwp thread pointer into the 3050Sstevel@tonic-gate * ucontext ESP slot (which is otherwise irrelevant to setting a 3060Sstevel@tonic-gate * ucontext) and LWPGS_SEL value into gregs[REG_GS]. This is so 3070Sstevel@tonic-gate * syslwp_create() can atomically setup %gs. 3080Sstevel@tonic-gate * 3090Sstevel@tonic-gate * See setup_context() in libc. 3100Sstevel@tonic-gate */ 3110Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 3120Sstevel@tonic-gate if (lwp_getdatamodel(lwp) == DATAMODEL_ILP32) { 3130Sstevel@tonic-gate if (grp[REG_GS] == LWPGS_SEL) 3140Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, thrptr); 3153446Smrj } else { 3163446Smrj /* 3173446Smrj * See lwp_setprivate in kernel and setup_context in libc. 3183446Smrj * 3193446Smrj * Currently libc constructs a ucontext from whole cloth for 3203446Smrj * every new (not main) lwp created. For 64 bit processes 3213446Smrj * %fsbase is directly set to point to current thread pointer. 3223446Smrj * In the past (solaris 10) %fs was also set LWPFS_SEL to 3233446Smrj * indicate %fsbase. Now we use the null GDT selector for 3243446Smrj * this purpose. LWP[FS|GS]_SEL are only intended for 32 bit 3253446Smrj * processes. To ease transition we support older libcs in 3263446Smrj * the newer kernel by forcing %fs or %gs selector to null 3273446Smrj * by calling lwp_setprivate if LWP[FS|GS]_SEL is passed in 3283446Smrj * the ucontext. This is should be ripped out at some future 3293446Smrj * date. Another fix would be for libc to do a getcontext 3303446Smrj * and inherit the null %fs/%gs from the current context but 3313446Smrj * that means an extra system call and could hurt performance. 3323446Smrj */ 3333446Smrj if (grp[REG_FS] == 0x1bb) /* hard code legacy LWPFS_SEL */ 3344883Ssp92102 (void) lwp_setprivate(lwp, _LWP_FSBASE, 3354883Ssp92102 (uintptr_t)grp[REG_FSBASE]); 3363446Smrj 3373446Smrj if (grp[REG_GS] == 0x1c3) /* hard code legacy LWPGS_SEL */ 3384883Ssp92102 (void) lwp_setprivate(lwp, _LWP_GSBASE, 3394883Ssp92102 (uintptr_t)grp[REG_GSBASE]); 3400Sstevel@tonic-gate } 3410Sstevel@tonic-gate #else 3420Sstevel@tonic-gate if (grp[GS] == LWPGS_SEL) 3430Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, thrptr); 3440Sstevel@tonic-gate #endif 3450Sstevel@tonic-gate 3460Sstevel@tonic-gate lwp->lwp_eosys = JUSTRETURN; 3470Sstevel@tonic-gate lwptot(lwp)->t_post_sys = 1; 3480Sstevel@tonic-gate } 3490Sstevel@tonic-gate 3500Sstevel@tonic-gate /* 3510Sstevel@tonic-gate * set syscall()'s return values for a lwp. 3520Sstevel@tonic-gate */ 3530Sstevel@tonic-gate void 3540Sstevel@tonic-gate lwp_setrval(klwp_t *lwp, int v1, int v2) 3550Sstevel@tonic-gate { 3560Sstevel@tonic-gate lwptoregs(lwp)->r_ps &= ~PS_C; 3570Sstevel@tonic-gate lwptoregs(lwp)->r_r0 = v1; 3580Sstevel@tonic-gate lwptoregs(lwp)->r_r1 = v2; 3590Sstevel@tonic-gate } 3600Sstevel@tonic-gate 3610Sstevel@tonic-gate /* 3620Sstevel@tonic-gate * set syscall()'s return values for a lwp. 3630Sstevel@tonic-gate */ 3640Sstevel@tonic-gate void 3650Sstevel@tonic-gate lwp_setsp(klwp_t *lwp, caddr_t sp) 3660Sstevel@tonic-gate { 3670Sstevel@tonic-gate lwptoregs(lwp)->r_sp = (intptr_t)sp; 3680Sstevel@tonic-gate } 3690Sstevel@tonic-gate 3700Sstevel@tonic-gate /* 3710Sstevel@tonic-gate * Copy regs from parent to child. 3720Sstevel@tonic-gate */ 3730Sstevel@tonic-gate void 3740Sstevel@tonic-gate lwp_forkregs(klwp_t *lwp, klwp_t *clwp) 3750Sstevel@tonic-gate { 3760Sstevel@tonic-gate #if defined(__amd64) 3772712Snn35248 struct pcb *pcb = &clwp->lwp_pcb; 3782712Snn35248 struct regs *rp = lwptoregs(lwp); 3792712Snn35248 3804503Ssudheer if (pcb->pcb_rupdate == 0) { 3812712Snn35248 pcb->pcb_ds = rp->r_ds; 3822712Snn35248 pcb->pcb_es = rp->r_es; 3832712Snn35248 pcb->pcb_fs = rp->r_fs; 3842712Snn35248 pcb->pcb_gs = rp->r_gs; 3854503Ssudheer pcb->pcb_rupdate = 1; 3862712Snn35248 lwptot(clwp)->t_post_sys = 1; 3872712Snn35248 } 3882712Snn35248 ASSERT(lwptot(clwp)->t_post_sys); 3890Sstevel@tonic-gate #endif 3902712Snn35248 3910Sstevel@tonic-gate bcopy(lwp->lwp_regs, clwp->lwp_regs, sizeof (struct regs)); 3920Sstevel@tonic-gate } 3930Sstevel@tonic-gate 3940Sstevel@tonic-gate /* 3950Sstevel@tonic-gate * This function is currently unused on x86. 3960Sstevel@tonic-gate */ 3970Sstevel@tonic-gate /*ARGSUSED*/ 3980Sstevel@tonic-gate void 3990Sstevel@tonic-gate lwp_freeregs(klwp_t *lwp, int isexec) 4000Sstevel@tonic-gate {} 4010Sstevel@tonic-gate 4020Sstevel@tonic-gate /* 4030Sstevel@tonic-gate * This function is currently unused on x86. 4040Sstevel@tonic-gate */ 4050Sstevel@tonic-gate void 4060Sstevel@tonic-gate lwp_pcb_exit(void) 4070Sstevel@tonic-gate {} 4080Sstevel@tonic-gate 4090Sstevel@tonic-gate /* 4100Sstevel@tonic-gate * Lwp context ops for segment registers. 4110Sstevel@tonic-gate */ 4120Sstevel@tonic-gate 4130Sstevel@tonic-gate /* 4140Sstevel@tonic-gate * Every time we come into the kernel (syscall, interrupt or trap 4150Sstevel@tonic-gate * but not fast-traps) we capture the current values of the user's 4160Sstevel@tonic-gate * segment registers into the lwp's reg structure. This includes 4170Sstevel@tonic-gate * lcall for i386 generic system call support since it is handled 4180Sstevel@tonic-gate * as a segment-not-present trap. 4190Sstevel@tonic-gate * 4200Sstevel@tonic-gate * Here we save the current values from the lwp regs into the pcb 4214503Ssudheer * and set pcb->pcb_rupdate to 1 to tell the rest of the kernel 4220Sstevel@tonic-gate * that the pcb copy of the segment registers is the current one. 4230Sstevel@tonic-gate * This ensures the lwp's next trip to user land via update_sregs. 4240Sstevel@tonic-gate * Finally we set t_post_sys to ensure that no system call fast-path's 4250Sstevel@tonic-gate * its way out of the kernel via sysret. 4260Sstevel@tonic-gate * 4270Sstevel@tonic-gate * (This means that we need to have interrupts disabled when we test 4280Sstevel@tonic-gate * t->t_post_sys in the syscall handlers; if the test fails, we need 4290Sstevel@tonic-gate * to keep interrupts disabled until we return to userland so we can't 4300Sstevel@tonic-gate * be switched away.) 4310Sstevel@tonic-gate * 4320Sstevel@tonic-gate * As a result of all this, we don't really have to do a whole lot if 4330Sstevel@tonic-gate * the thread is just mucking about in the kernel, switching on and 4340Sstevel@tonic-gate * off the cpu for whatever reason it feels like. And yet we still 4350Sstevel@tonic-gate * preserve fast syscalls, cause if we -don't- get descheduled, 4360Sstevel@tonic-gate * we never come here either. 4370Sstevel@tonic-gate */ 4380Sstevel@tonic-gate 4390Sstevel@tonic-gate #define VALID_LWP_DESC(udp) ((udp)->usd_type == SDT_MEMRWA && \ 4400Sstevel@tonic-gate (udp)->usd_p == 1 && (udp)->usd_dpl == SEL_UPL) 4410Sstevel@tonic-gate 4425084Sjohnlev /*ARGSUSED*/ 4430Sstevel@tonic-gate void 4440Sstevel@tonic-gate lwp_segregs_save(klwp_t *lwp) 4450Sstevel@tonic-gate { 4460Sstevel@tonic-gate #if defined(__amd64) 4470Sstevel@tonic-gate pcb_t *pcb = &lwp->lwp_pcb; 4480Sstevel@tonic-gate struct regs *rp; 4490Sstevel@tonic-gate 4500Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_fsdesc)); 4510Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_gsdesc)); 4520Sstevel@tonic-gate 4534503Ssudheer if (pcb->pcb_rupdate == 0) { 4540Sstevel@tonic-gate rp = lwptoregs(lwp); 4550Sstevel@tonic-gate 4560Sstevel@tonic-gate /* 4570Sstevel@tonic-gate * If there's no update already pending, capture the current 4580Sstevel@tonic-gate * %ds/%es/%fs/%gs values from lwp's regs in case the user 4590Sstevel@tonic-gate * changed them; %fsbase and %gsbase are privileged so the 4600Sstevel@tonic-gate * kernel versions of these registers in pcb_fsbase and 4610Sstevel@tonic-gate * pcb_gsbase are always up-to-date. 4620Sstevel@tonic-gate */ 4630Sstevel@tonic-gate pcb->pcb_ds = rp->r_ds; 4640Sstevel@tonic-gate pcb->pcb_es = rp->r_es; 4650Sstevel@tonic-gate pcb->pcb_fs = rp->r_fs; 4660Sstevel@tonic-gate pcb->pcb_gs = rp->r_gs; 4674503Ssudheer pcb->pcb_rupdate = 1; 4680Sstevel@tonic-gate lwp->lwp_thread->t_post_sys = 1; 4690Sstevel@tonic-gate } 4700Sstevel@tonic-gate #endif /* __amd64 */ 4710Sstevel@tonic-gate 4725084Sjohnlev #if !defined(__xpv) /* XXPV not sure if we can re-read gdt? */ 4730Sstevel@tonic-gate ASSERT(bcmp(&CPU->cpu_gdt[GDT_LWPFS], &lwp->lwp_pcb.pcb_fsdesc, 4740Sstevel@tonic-gate sizeof (lwp->lwp_pcb.pcb_fsdesc)) == 0); 4750Sstevel@tonic-gate ASSERT(bcmp(&CPU->cpu_gdt[GDT_LWPGS], &lwp->lwp_pcb.pcb_gsdesc, 4760Sstevel@tonic-gate sizeof (lwp->lwp_pcb.pcb_gsdesc)) == 0); 4775084Sjohnlev #endif 4780Sstevel@tonic-gate } 4790Sstevel@tonic-gate 4803446Smrj #if defined(__amd64) 4813446Smrj 4823446Smrj /* 4835084Sjohnlev * Update the segment registers with new values from the pcb. 4843446Smrj * 4853446Smrj * We have to do this carefully, and in the following order, 4863446Smrj * in case any of the selectors points at a bogus descriptor. 4873446Smrj * If they do, we'll catch trap with on_trap and return 1. 4883446Smrj * returns 0 on success. 4893446Smrj * 4903446Smrj * This is particularly tricky for %gs. 4913446Smrj * This routine must be executed under a cli. 4923446Smrj */ 4933446Smrj int 4943446Smrj update_sregs(struct regs *rp, klwp_t *lwp) 4953446Smrj { 4963446Smrj pcb_t *pcb = &lwp->lwp_pcb; 4973446Smrj ulong_t kgsbase; 4983446Smrj on_trap_data_t otd; 4993446Smrj int rc = 0; 5003446Smrj 5013446Smrj if (!on_trap(&otd, OT_SEGMENT_ACCESS)) { 5023446Smrj 5035084Sjohnlev #if defined(__xpv) 5045084Sjohnlev /* 5055084Sjohnlev * On the hyervisor this is easy. The hypercall below will 5065084Sjohnlev * swapgs and load %gs with the user selector. If the user 5075084Sjohnlev * selector is bad the hypervisor will catch the fault and 5085084Sjohnlev * load %gs with the null selector instead. Either way the 5095084Sjohnlev * kernel's gsbase is not damaged. 5105084Sjohnlev */ 5115084Sjohnlev kgsbase = (ulong_t)CPU; 5125084Sjohnlev if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, 5135084Sjohnlev pcb->pcb_gs) != 0) { 5145084Sjohnlev no_trap(); 5155084Sjohnlev return (1); 5165084Sjohnlev } 5175084Sjohnlev 5185084Sjohnlev rp->r_gs = pcb->pcb_gs; 5195084Sjohnlev ASSERT((cpu_t *)kgsbase == CPU); 5205084Sjohnlev 5215084Sjohnlev #else /* __xpv */ 5225084Sjohnlev 5235084Sjohnlev /* 5245084Sjohnlev * A little more complicated running native. 5255084Sjohnlev */ 5263446Smrj kgsbase = (ulong_t)CPU; 5273446Smrj __set_gs(pcb->pcb_gs); 5283446Smrj 5293446Smrj /* 5303446Smrj * If __set_gs fails it's because the new %gs is a bad %gs, 5313446Smrj * we'll be taking a trap but with the original %gs and %gsbase 5323446Smrj * undamaged (i.e. pointing at curcpu). 5333446Smrj * 5343446Smrj * We've just mucked up the kernel's gsbase. Oops. In 5353446Smrj * particular we can't take any traps at all. Make the newly 5365084Sjohnlev * computed gsbase be the hidden gs via __swapgs, and fix 5373446Smrj * the kernel's gsbase back again. Later, when we return to 5383446Smrj * userland we'll swapgs again restoring gsbase just loaded 5393446Smrj * above. 5403446Smrj */ 5413446Smrj __swapgs(); 5423446Smrj rp->r_gs = pcb->pcb_gs; 5433446Smrj 5443446Smrj /* 5453446Smrj * restore kernel's gsbase 5463446Smrj */ 5473446Smrj wrmsr(MSR_AMD_GSBASE, kgsbase); 5483446Smrj 5495084Sjohnlev #endif /* __xpv */ 5505084Sjohnlev 5513446Smrj /* 5523446Smrj * Only override the descriptor base address if 5533446Smrj * r_gs == LWPGS_SEL or if r_gs == NULL. A note on 5543446Smrj * NULL descriptors -- 32-bit programs take faults 5553446Smrj * if they deference NULL descriptors; however, 5563446Smrj * when 64-bit programs load them into %fs or %gs, 5573446Smrj * they DONT fault -- only the base address remains 5583446Smrj * whatever it was from the last load. Urk. 5593446Smrj * 5603446Smrj * XXX - note that lwp_setprivate now sets %fs/%gs to the 5613446Smrj * null selector for 64 bit processes. Whereas before 5623446Smrj * %fs/%gs were set to LWP(FS|GS)_SEL regardless of 5633446Smrj * the process's data model. For now we check for both 5643446Smrj * values so that the kernel can also support the older 5653446Smrj * libc. This should be ripped out at some point in the 5663446Smrj * future. 5673446Smrj */ 5685084Sjohnlev if (pcb->pcb_gs == LWPGS_SEL || pcb->pcb_gs == 0) { 5695084Sjohnlev #if defined(__xpv) 5705084Sjohnlev if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER, 5715084Sjohnlev pcb->pcb_gsbase)) { 5725084Sjohnlev no_trap(); 5735084Sjohnlev return (1); 5745084Sjohnlev } 5755084Sjohnlev #else 5763446Smrj wrmsr(MSR_AMD_KGSBASE, pcb->pcb_gsbase); 5775084Sjohnlev #endif 5785084Sjohnlev } 5793446Smrj 5803446Smrj __set_ds(pcb->pcb_ds); 5813446Smrj rp->r_ds = pcb->pcb_ds; 5823446Smrj 5833446Smrj __set_es(pcb->pcb_es); 5843446Smrj rp->r_es = pcb->pcb_es; 5853446Smrj 5863446Smrj __set_fs(pcb->pcb_fs); 5873446Smrj rp->r_fs = pcb->pcb_fs; 5883446Smrj 5893446Smrj /* 5903446Smrj * Same as for %gs 5913446Smrj */ 5925084Sjohnlev if (pcb->pcb_fs == LWPFS_SEL || pcb->pcb_fs == 0) { 5935084Sjohnlev #if defined(__xpv) 5945084Sjohnlev if (HYPERVISOR_set_segment_base(SEGBASE_FS, 5955084Sjohnlev pcb->pcb_fsbase)) { 5965084Sjohnlev no_trap(); 5975084Sjohnlev return (1); 5985084Sjohnlev } 5995084Sjohnlev #else 6003446Smrj wrmsr(MSR_AMD_FSBASE, pcb->pcb_fsbase); 6015084Sjohnlev #endif 6025084Sjohnlev } 6033446Smrj 6043446Smrj } else { 6053446Smrj cli(); 6063446Smrj rc = 1; 6073446Smrj } 6083446Smrj no_trap(); 6093446Smrj return (rc); 6103446Smrj } 6115084Sjohnlev 6125084Sjohnlev /* 6135084Sjohnlev * Make sure any stale selectors are cleared from the segment registers 6145084Sjohnlev * by putting KDS_SEL (the kernel's default %ds gdt selector) into them. 6155084Sjohnlev * This is necessary because the kernel itself does not use %es, %fs, nor 6165084Sjohnlev * %ds. (%cs and %ss are necessary, and are set up by the kernel - along with 6175084Sjohnlev * %gs - to point to the current cpu struct.) If we enter kmdb while in the 6185084Sjohnlev * kernel and resume with a stale ldt or brandz selector sitting there in a 6195084Sjohnlev * segment register, kmdb will #gp fault if the stale selector points to, 6205084Sjohnlev * for example, an ldt in the context of another process. 6215084Sjohnlev * 6225084Sjohnlev * WARNING: Intel and AMD chips behave differently when storing 6235084Sjohnlev * the null selector into %fs and %gs while in long mode. On AMD 6245084Sjohnlev * chips fsbase and gsbase are not cleared. But on Intel chips, storing 6255084Sjohnlev * a null selector into %fs or %gs has the side effect of clearing 6265084Sjohnlev * fsbase or gsbase. For that reason we use KDS_SEL, which has 6275084Sjohnlev * consistent behavor between AMD and Intel. 6285084Sjohnlev * 6295084Sjohnlev * Caller responsible for preventing cpu migration. 6305084Sjohnlev */ 6315084Sjohnlev void 6325084Sjohnlev reset_sregs(void) 6335084Sjohnlev { 6345084Sjohnlev ulong_t kgsbase = (ulong_t)CPU; 6355084Sjohnlev 6365084Sjohnlev ASSERT(curthread->t_preempt != 0 || getpil() >= DISP_LEVEL); 6375084Sjohnlev 6385084Sjohnlev cli(); 6395084Sjohnlev __set_gs(KGS_SEL); 6405084Sjohnlev 6415084Sjohnlev /* 6425084Sjohnlev * restore kernel gsbase 6435084Sjohnlev */ 6445084Sjohnlev #if defined(__xpv) 6455084Sjohnlev xen_set_segment_base(SEGBASE_GS_KERNEL, kgsbase); 6465084Sjohnlev #else 6475084Sjohnlev wrmsr(MSR_AMD_GSBASE, kgsbase); 6485084Sjohnlev #endif 6495084Sjohnlev 6505084Sjohnlev sti(); 6515084Sjohnlev 6525084Sjohnlev __set_ds(KDS_SEL); 6535084Sjohnlev __set_es(0 | SEL_KPL); /* selector RPL not ring 0 on hypervisor */ 6545084Sjohnlev __set_fs(KFS_SEL); 6555084Sjohnlev } 6565084Sjohnlev 6573446Smrj #endif /* __amd64 */ 6583446Smrj 6593446Smrj #ifdef _SYSCALL32_IMPL 6603446Smrj 6613446Smrj /* 6623446Smrj * Make it impossible for a process to change its data model. 6633446Smrj * We do this by toggling the present bits for the 32 and 6643446Smrj * 64-bit user code descriptors. That way if a user lwp attempts 6653446Smrj * to change its data model (by using the wrong code descriptor in 6663446Smrj * %cs) it will fault immediately. This also allows us to simplify 6673446Smrj * assertions and checks in the kernel. 6683446Smrj */ 6695084Sjohnlev 6703446Smrj static void 6713446Smrj gdt_ucode_model(model_t model) 6723446Smrj { 6733446Smrj kpreempt_disable(); 6743446Smrj if (model == DATAMODEL_NATIVE) { 6755084Sjohnlev gdt_update_usegd(GDT_UCODE, &ucs_on); 6765084Sjohnlev gdt_update_usegd(GDT_U32CODE, &ucs32_off); 6773446Smrj } else { 6785084Sjohnlev gdt_update_usegd(GDT_U32CODE, &ucs32_on); 6795084Sjohnlev gdt_update_usegd(GDT_UCODE, &ucs_off); 6803446Smrj } 6813446Smrj kpreempt_enable(); 6823446Smrj } 6833446Smrj 6843446Smrj #endif /* _SYSCALL32_IMPL */ 6853446Smrj 6860Sstevel@tonic-gate /* 6870Sstevel@tonic-gate * Restore lwp private fs and gs segment descriptors 6880Sstevel@tonic-gate * on current cpu's GDT. 6890Sstevel@tonic-gate */ 6900Sstevel@tonic-gate static void 6910Sstevel@tonic-gate lwp_segregs_restore(klwp_t *lwp) 6920Sstevel@tonic-gate { 6933446Smrj pcb_t *pcb = &lwp->lwp_pcb; 6940Sstevel@tonic-gate 6950Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_fsdesc)); 6960Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_gsdesc)); 6970Sstevel@tonic-gate 6983446Smrj #ifdef _SYSCALL32_IMPL 6993446Smrj gdt_ucode_model(DATAMODEL_NATIVE); 7003446Smrj #endif 7013446Smrj 7025084Sjohnlev gdt_update_usegd(GDT_LWPFS, &pcb->pcb_fsdesc); 7035084Sjohnlev gdt_update_usegd(GDT_LWPGS, &pcb->pcb_gsdesc); 7040Sstevel@tonic-gate 7050Sstevel@tonic-gate } 7060Sstevel@tonic-gate 7070Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 7080Sstevel@tonic-gate 7090Sstevel@tonic-gate static void 7100Sstevel@tonic-gate lwp_segregs_restore32(klwp_t *lwp) 7110Sstevel@tonic-gate { 7123446Smrj /*LINTED*/ 7130Sstevel@tonic-gate cpu_t *cpu = CPU; 7140Sstevel@tonic-gate pcb_t *pcb = &lwp->lwp_pcb; 7150Sstevel@tonic-gate 7163446Smrj ASSERT(VALID_LWP_DESC(&lwp->lwp_pcb.pcb_fsdesc)); 7173446Smrj ASSERT(VALID_LWP_DESC(&lwp->lwp_pcb.pcb_gsdesc)); 7180Sstevel@tonic-gate 7193446Smrj gdt_ucode_model(DATAMODEL_ILP32); 7205084Sjohnlev gdt_update_usegd(GDT_LWPFS, &pcb->pcb_fsdesc); 7215084Sjohnlev gdt_update_usegd(GDT_LWPGS, &pcb->pcb_gsdesc); 7220Sstevel@tonic-gate } 7230Sstevel@tonic-gate 7240Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 7250Sstevel@tonic-gate 7260Sstevel@tonic-gate /* 7272712Snn35248 * If this is a process in a branded zone, then we want it to use the brand 7282712Snn35248 * syscall entry points instead of the standard Solaris entry points. This 7292712Snn35248 * routine must be called when a new lwp is created within a branded zone 7302712Snn35248 * or when an existing lwp moves into a branded zone via a zone_enter() 7312712Snn35248 * operation. 7322712Snn35248 */ 7332712Snn35248 void 7342712Snn35248 lwp_attach_brand_hdlrs(klwp_t *lwp) 7352712Snn35248 { 7362712Snn35248 kthread_t *t = lwptot(lwp); 7372712Snn35248 7382712Snn35248 ASSERT(PROC_IS_BRANDED(lwptoproc(lwp))); 7396994Sedp 7402712Snn35248 ASSERT(removectx(t, NULL, brand_interpositioning_disable, 7414883Ssp92102 brand_interpositioning_enable, NULL, NULL, 7424883Ssp92102 brand_interpositioning_disable, NULL) == 0); 7432712Snn35248 installctx(t, NULL, brand_interpositioning_disable, 7444883Ssp92102 brand_interpositioning_enable, NULL, NULL, 7454883Ssp92102 brand_interpositioning_disable, NULL); 7462712Snn35248 7472712Snn35248 if (t == curthread) { 7482712Snn35248 kpreempt_disable(); 7492712Snn35248 brand_interpositioning_enable(); 7502712Snn35248 kpreempt_enable(); 7512712Snn35248 } 7522712Snn35248 } 7532712Snn35248 7542712Snn35248 /* 7556994Sedp * If this is a process in a branded zone, then we want it to disable the 7566994Sedp * brand syscall entry points. This routine must be called when the last 7576994Sedp * lwp in a process is exiting in proc_exit(). 7586994Sedp */ 7596994Sedp void 7606994Sedp lwp_detach_brand_hdlrs(klwp_t *lwp) 7616994Sedp { 7626994Sedp kthread_t *t = lwptot(lwp); 7636994Sedp 7646994Sedp ASSERT(PROC_IS_BRANDED(lwptoproc(lwp))); 7656994Sedp if (t == curthread) 7666994Sedp kpreempt_disable(); 7676994Sedp 7686994Sedp /* Remove the original context handlers */ 7696994Sedp VERIFY(removectx(t, NULL, brand_interpositioning_disable, 7706994Sedp brand_interpositioning_enable, NULL, NULL, 7716994Sedp brand_interpositioning_disable, NULL) != 0); 7726994Sedp 7736994Sedp if (t == curthread) { 7746994Sedp /* Cleanup our MSR and IDT entries. */ 7756994Sedp brand_interpositioning_disable(); 7766994Sedp kpreempt_enable(); 7776994Sedp } 7786994Sedp } 7796994Sedp 7806994Sedp /* 7810Sstevel@tonic-gate * Add any lwp-associated context handlers to the lwp at the beginning 7820Sstevel@tonic-gate * of the lwp's useful life. 7830Sstevel@tonic-gate * 7840Sstevel@tonic-gate * All paths which create lwp's invoke lwp_create(); lwp_create() 7850Sstevel@tonic-gate * invokes lwp_stk_init() which initializes the stack, sets up 7860Sstevel@tonic-gate * lwp_regs, and invokes this routine. 7870Sstevel@tonic-gate * 7880Sstevel@tonic-gate * All paths which destroy lwp's invoke lwp_exit() to rip the lwp 7890Sstevel@tonic-gate * apart and put it on 'lwp_deathrow'; if the lwp is destroyed it 7900Sstevel@tonic-gate * ends up in thread_free() which invokes freectx(t, 0) before 7910Sstevel@tonic-gate * invoking lwp_stk_fini(). When the lwp is recycled from death 7920Sstevel@tonic-gate * row, lwp_stk_fini() is invoked, then thread_free(), and thus 7930Sstevel@tonic-gate * freectx(t, 0) as before. 7940Sstevel@tonic-gate * 7950Sstevel@tonic-gate * In the case of exec, the surviving lwp is thoroughly scrubbed 7960Sstevel@tonic-gate * clean; exec invokes freectx(t, 1) to destroy associated contexts. 7970Sstevel@tonic-gate * On the way back to the new image, it invokes setregs() which 7980Sstevel@tonic-gate * in turn invokes this routine. 7990Sstevel@tonic-gate */ 8000Sstevel@tonic-gate void 8010Sstevel@tonic-gate lwp_installctx(klwp_t *lwp) 8020Sstevel@tonic-gate { 8030Sstevel@tonic-gate kthread_t *t = lwptot(lwp); 8040Sstevel@tonic-gate int thisthread = t == curthread; 8050Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 8060Sstevel@tonic-gate void (*restop)(klwp_t *) = lwp_getdatamodel(lwp) == DATAMODEL_NATIVE ? 8070Sstevel@tonic-gate lwp_segregs_restore : lwp_segregs_restore32; 8080Sstevel@tonic-gate #else 8090Sstevel@tonic-gate void (*restop)(klwp_t *) = lwp_segregs_restore; 8100Sstevel@tonic-gate #endif 8110Sstevel@tonic-gate 8120Sstevel@tonic-gate /* 8130Sstevel@tonic-gate * Install the basic lwp context handlers on each lwp. 8140Sstevel@tonic-gate * 8150Sstevel@tonic-gate * On the amd64 kernel, the context handlers are responsible for 8160Sstevel@tonic-gate * virtualizing %ds, %es, %fs, and %gs to the lwp. The register 8170Sstevel@tonic-gate * values are only ever changed via sys_rtt when the 8184503Ssudheer * pcb->pcb_rupdate == 1. Only sys_rtt gets to clear the bit. 8190Sstevel@tonic-gate * 8200Sstevel@tonic-gate * On the i386 kernel, the context handlers are responsible for 8210Sstevel@tonic-gate * virtualizing %gs/%fs to the lwp by updating the per-cpu GDTs 8220Sstevel@tonic-gate */ 8230Sstevel@tonic-gate ASSERT(removectx(t, lwp, lwp_segregs_save, restop, 8240Sstevel@tonic-gate NULL, NULL, NULL, NULL) == 0); 8250Sstevel@tonic-gate if (thisthread) 8260Sstevel@tonic-gate kpreempt_disable(); 8270Sstevel@tonic-gate installctx(t, lwp, lwp_segregs_save, restop, 8280Sstevel@tonic-gate NULL, NULL, NULL, NULL); 8290Sstevel@tonic-gate if (thisthread) { 8300Sstevel@tonic-gate /* 8310Sstevel@tonic-gate * Since we're the right thread, set the values in the GDT 8320Sstevel@tonic-gate */ 8330Sstevel@tonic-gate restop(lwp); 8340Sstevel@tonic-gate kpreempt_enable(); 8350Sstevel@tonic-gate } 8360Sstevel@tonic-gate 8370Sstevel@tonic-gate /* 8380Sstevel@tonic-gate * If we have sysenter/sysexit instructions enabled, we need 8390Sstevel@tonic-gate * to ensure that the hardware mechanism is kept up-to-date with the 8400Sstevel@tonic-gate * lwp's kernel stack pointer across context switches. 8410Sstevel@tonic-gate * 8420Sstevel@tonic-gate * sep_save zeros the sysenter stack pointer msr; sep_restore sets 8430Sstevel@tonic-gate * it to the lwp's kernel stack pointer (kstktop). 8440Sstevel@tonic-gate */ 8450Sstevel@tonic-gate if (x86_feature & X86_SEP) { 8460Sstevel@tonic-gate #if defined(__amd64) 8470Sstevel@tonic-gate caddr_t kstktop = (caddr_t)lwp->lwp_regs; 8480Sstevel@tonic-gate #elif defined(__i386) 8490Sstevel@tonic-gate caddr_t kstktop = ((caddr_t)lwp->lwp_regs - MINFRAME) + 8500Sstevel@tonic-gate SA(sizeof (struct regs) + MINFRAME); 8510Sstevel@tonic-gate #endif 8520Sstevel@tonic-gate ASSERT(removectx(t, kstktop, 8530Sstevel@tonic-gate sep_save, sep_restore, NULL, NULL, NULL, NULL) == 0); 8540Sstevel@tonic-gate 8550Sstevel@tonic-gate if (thisthread) 8560Sstevel@tonic-gate kpreempt_disable(); 8570Sstevel@tonic-gate installctx(t, kstktop, 8580Sstevel@tonic-gate sep_save, sep_restore, NULL, NULL, NULL, NULL); 8590Sstevel@tonic-gate if (thisthread) { 8600Sstevel@tonic-gate /* 8610Sstevel@tonic-gate * We're the right thread, so set the stack pointer 8620Sstevel@tonic-gate * for the first sysenter instruction to use 8630Sstevel@tonic-gate */ 8640Sstevel@tonic-gate sep_restore(kstktop); 8650Sstevel@tonic-gate kpreempt_enable(); 8660Sstevel@tonic-gate } 8670Sstevel@tonic-gate } 8682712Snn35248 8692712Snn35248 if (PROC_IS_BRANDED(ttoproc(t))) 8702712Snn35248 lwp_attach_brand_hdlrs(lwp); 8710Sstevel@tonic-gate } 8720Sstevel@tonic-gate 8730Sstevel@tonic-gate /* 8740Sstevel@tonic-gate * Clear registers on exec(2). 8750Sstevel@tonic-gate */ 8760Sstevel@tonic-gate void 8770Sstevel@tonic-gate setregs(uarg_t *args) 8780Sstevel@tonic-gate { 8790Sstevel@tonic-gate struct regs *rp; 8800Sstevel@tonic-gate kthread_t *t = curthread; 8810Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 8820Sstevel@tonic-gate pcb_t *pcb = &lwp->lwp_pcb; 8830Sstevel@tonic-gate greg_t sp; 8840Sstevel@tonic-gate 8850Sstevel@tonic-gate /* 8860Sstevel@tonic-gate * Initialize user registers 8870Sstevel@tonic-gate */ 8880Sstevel@tonic-gate (void) save_syscall_args(); /* copy args from registers first */ 8890Sstevel@tonic-gate rp = lwptoregs(lwp); 8900Sstevel@tonic-gate sp = rp->r_sp; 8910Sstevel@tonic-gate bzero(rp, sizeof (*rp)); 8920Sstevel@tonic-gate 8930Sstevel@tonic-gate rp->r_ss = UDS_SEL; 8940Sstevel@tonic-gate rp->r_sp = sp; 8950Sstevel@tonic-gate rp->r_pc = args->entry; 8960Sstevel@tonic-gate rp->r_ps = PSL_USER; 8970Sstevel@tonic-gate 8980Sstevel@tonic-gate #if defined(__amd64) 8990Sstevel@tonic-gate 9000Sstevel@tonic-gate pcb->pcb_fs = pcb->pcb_gs = 0; 9010Sstevel@tonic-gate pcb->pcb_fsbase = pcb->pcb_gsbase = 0; 9020Sstevel@tonic-gate 9030Sstevel@tonic-gate if (ttoproc(t)->p_model == DATAMODEL_NATIVE) { 9040Sstevel@tonic-gate 9050Sstevel@tonic-gate rp->r_cs = UCS_SEL; 9060Sstevel@tonic-gate 9070Sstevel@tonic-gate /* 9080Sstevel@tonic-gate * Only allow 64-bit user code descriptor to be present. 9090Sstevel@tonic-gate */ 9103446Smrj gdt_ucode_model(DATAMODEL_NATIVE); 9110Sstevel@tonic-gate 9120Sstevel@tonic-gate /* 9130Sstevel@tonic-gate * Arrange that the virtualized %fs and %gs GDT descriptors 9140Sstevel@tonic-gate * have a well-defined initial state (present, ring 3 9150Sstevel@tonic-gate * and of type data). 9160Sstevel@tonic-gate */ 9170Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc; 9180Sstevel@tonic-gate 9190Sstevel@tonic-gate /* 9200Sstevel@tonic-gate * thrptr is either NULL or a value used by DTrace. 9210Sstevel@tonic-gate * 64-bit processes use %fs as their "thread" register. 9220Sstevel@tonic-gate */ 9230Sstevel@tonic-gate if (args->thrptr) 9240Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_FSBASE, args->thrptr); 9250Sstevel@tonic-gate 9260Sstevel@tonic-gate } else { 9270Sstevel@tonic-gate 9280Sstevel@tonic-gate rp->r_cs = U32CS_SEL; 9290Sstevel@tonic-gate rp->r_ds = rp->r_es = UDS_SEL; 9300Sstevel@tonic-gate 9310Sstevel@tonic-gate /* 9320Sstevel@tonic-gate * only allow 32-bit user code selector to be present. 9330Sstevel@tonic-gate */ 9343446Smrj gdt_ucode_model(DATAMODEL_ILP32); 9350Sstevel@tonic-gate 9360Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_u32desc; 9370Sstevel@tonic-gate 9380Sstevel@tonic-gate /* 9390Sstevel@tonic-gate * thrptr is either NULL or a value used by DTrace. 9400Sstevel@tonic-gate * 32-bit processes use %gs as their "thread" register. 9410Sstevel@tonic-gate */ 9420Sstevel@tonic-gate if (args->thrptr) 9430Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, args->thrptr); 9440Sstevel@tonic-gate 9450Sstevel@tonic-gate } 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate pcb->pcb_ds = rp->r_ds; 9480Sstevel@tonic-gate pcb->pcb_es = rp->r_es; 9494503Ssudheer pcb->pcb_rupdate = 1; 9500Sstevel@tonic-gate 9510Sstevel@tonic-gate #elif defined(__i386) 9520Sstevel@tonic-gate 9530Sstevel@tonic-gate rp->r_cs = UCS_SEL; 9540Sstevel@tonic-gate rp->r_ds = rp->r_es = UDS_SEL; 9550Sstevel@tonic-gate 9560Sstevel@tonic-gate /* 9570Sstevel@tonic-gate * Arrange that the virtualized %fs and %gs GDT descriptors 9580Sstevel@tonic-gate * have a well-defined initial state (present, ring 3 9590Sstevel@tonic-gate * and of type data). 9600Sstevel@tonic-gate */ 9610Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc; 9620Sstevel@tonic-gate 9630Sstevel@tonic-gate /* 9640Sstevel@tonic-gate * For %gs we need to reset LWP_GSBASE in pcb and the 9650Sstevel@tonic-gate * per-cpu GDT descriptor. thrptr is either NULL 9660Sstevel@tonic-gate * or a value used by DTrace. 9670Sstevel@tonic-gate */ 9680Sstevel@tonic-gate if (args->thrptr) 9690Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, args->thrptr); 9700Sstevel@tonic-gate #endif 9710Sstevel@tonic-gate 9720Sstevel@tonic-gate lwp->lwp_eosys = JUSTRETURN; 9730Sstevel@tonic-gate t->t_post_sys = 1; 9740Sstevel@tonic-gate 9750Sstevel@tonic-gate /* 9760Sstevel@tonic-gate * Here we initialize minimal fpu state. 9770Sstevel@tonic-gate * The rest is done at the first floating 9780Sstevel@tonic-gate * point instruction that a process executes. 9790Sstevel@tonic-gate */ 9800Sstevel@tonic-gate pcb->pcb_fpu.fpu_flags = 0; 9810Sstevel@tonic-gate 9820Sstevel@tonic-gate /* 9830Sstevel@tonic-gate * Add the lwp context handlers that virtualize segment registers, 9840Sstevel@tonic-gate * and/or system call stacks etc. 9850Sstevel@tonic-gate */ 9860Sstevel@tonic-gate lwp_installctx(lwp); 9870Sstevel@tonic-gate } 9880Sstevel@tonic-gate 9892712Snn35248 user_desc_t * 9902712Snn35248 cpu_get_gdt(void) 9912712Snn35248 { 9922712Snn35248 return (CPU->cpu_gdt); 9932712Snn35248 } 9942712Snn35248 9952712Snn35248 9960Sstevel@tonic-gate #if !defined(lwp_getdatamodel) 9970Sstevel@tonic-gate 9980Sstevel@tonic-gate /* 9990Sstevel@tonic-gate * Return the datamodel of the given lwp. 10000Sstevel@tonic-gate */ 10010Sstevel@tonic-gate /*ARGSUSED*/ 10020Sstevel@tonic-gate model_t 10030Sstevel@tonic-gate lwp_getdatamodel(klwp_t *lwp) 10040Sstevel@tonic-gate { 10050Sstevel@tonic-gate return (lwp->lwp_procp->p_model); 10060Sstevel@tonic-gate } 10070Sstevel@tonic-gate 10080Sstevel@tonic-gate #endif /* !lwp_getdatamodel */ 10090Sstevel@tonic-gate 10100Sstevel@tonic-gate #if !defined(get_udatamodel) 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate model_t 10130Sstevel@tonic-gate get_udatamodel(void) 10140Sstevel@tonic-gate { 10150Sstevel@tonic-gate return (curproc->p_model); 10160Sstevel@tonic-gate } 10170Sstevel@tonic-gate 10180Sstevel@tonic-gate #endif /* !get_udatamodel */ 1019