10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52712Snn35248 * Common Development and Distribution License (the "License"). 62712Snn35248 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*6994Sedp * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */ 270Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */ 280Sstevel@tonic-gate /* All Rights Reserved */ 290Sstevel@tonic-gate 300Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 310Sstevel@tonic-gate 320Sstevel@tonic-gate #include <sys/types.h> 330Sstevel@tonic-gate #include <sys/param.h> 340Sstevel@tonic-gate #include <sys/sysmacros.h> 350Sstevel@tonic-gate #include <sys/signal.h> 360Sstevel@tonic-gate #include <sys/systm.h> 370Sstevel@tonic-gate #include <sys/user.h> 380Sstevel@tonic-gate #include <sys/mman.h> 390Sstevel@tonic-gate #include <sys/class.h> 400Sstevel@tonic-gate #include <sys/proc.h> 410Sstevel@tonic-gate #include <sys/procfs.h> 420Sstevel@tonic-gate #include <sys/buf.h> 430Sstevel@tonic-gate #include <sys/kmem.h> 440Sstevel@tonic-gate #include <sys/cred.h> 450Sstevel@tonic-gate #include <sys/archsystm.h> 460Sstevel@tonic-gate #include <sys/vmparam.h> 470Sstevel@tonic-gate #include <sys/prsystm.h> 480Sstevel@tonic-gate #include <sys/reboot.h> 490Sstevel@tonic-gate #include <sys/uadmin.h> 500Sstevel@tonic-gate #include <sys/vfs.h> 510Sstevel@tonic-gate #include <sys/vnode.h> 520Sstevel@tonic-gate #include <sys/file.h> 530Sstevel@tonic-gate #include <sys/session.h> 540Sstevel@tonic-gate #include <sys/ucontext.h> 550Sstevel@tonic-gate #include <sys/dnlc.h> 560Sstevel@tonic-gate #include <sys/var.h> 570Sstevel@tonic-gate #include <sys/cmn_err.h> 580Sstevel@tonic-gate #include <sys/debugreg.h> 590Sstevel@tonic-gate #include <sys/thread.h> 600Sstevel@tonic-gate #include <sys/vtrace.h> 610Sstevel@tonic-gate #include <sys/consdev.h> 620Sstevel@tonic-gate #include <sys/psw.h> 630Sstevel@tonic-gate #include <sys/regset.h> 640Sstevel@tonic-gate #include <sys/privregs.h> 653451Smrj #include <sys/cpu.h> 660Sstevel@tonic-gate #include <sys/stack.h> 670Sstevel@tonic-gate #include <sys/swap.h> 680Sstevel@tonic-gate #include <vm/hat.h> 690Sstevel@tonic-gate #include <vm/anon.h> 700Sstevel@tonic-gate #include <vm/as.h> 710Sstevel@tonic-gate #include <vm/page.h> 720Sstevel@tonic-gate #include <vm/seg.h> 730Sstevel@tonic-gate #include <vm/seg_kmem.h> 740Sstevel@tonic-gate #include <vm/seg_map.h> 750Sstevel@tonic-gate #include <vm/seg_vn.h> 760Sstevel@tonic-gate #include <sys/exec.h> 770Sstevel@tonic-gate #include <sys/acct.h> 780Sstevel@tonic-gate #include <sys/core.h> 790Sstevel@tonic-gate #include <sys/corectl.h> 800Sstevel@tonic-gate #include <sys/modctl.h> 810Sstevel@tonic-gate #include <sys/tuneable.h> 820Sstevel@tonic-gate #include <c2/audit.h> 830Sstevel@tonic-gate #include <sys/bootconf.h> 842712Snn35248 #include <sys/brand.h> 850Sstevel@tonic-gate #include <sys/dumphdr.h> 860Sstevel@tonic-gate #include <sys/promif.h> 870Sstevel@tonic-gate #include <sys/systeminfo.h> 880Sstevel@tonic-gate #include <sys/kdi.h> 890Sstevel@tonic-gate #include <sys/contract_impl.h> 900Sstevel@tonic-gate #include <sys/x86_archext.h> 910Sstevel@tonic-gate #include <sys/segments.h> 923446Smrj #include <sys/ontrap.h> 935084Sjohnlev #include <sys/cpu.h> 945084Sjohnlev #ifdef __xpv 955084Sjohnlev #include <sys/hypervisor.h> 965084Sjohnlev #endif 970Sstevel@tonic-gate 980Sstevel@tonic-gate /* 990Sstevel@tonic-gate * Compare the version of boot that boot says it is against 1000Sstevel@tonic-gate * the version of boot the kernel expects. 1010Sstevel@tonic-gate */ 1020Sstevel@tonic-gate int 1030Sstevel@tonic-gate check_boot_version(int boots_version) 1040Sstevel@tonic-gate { 1050Sstevel@tonic-gate if (boots_version == BO_VERSION) 1060Sstevel@tonic-gate return (0); 1070Sstevel@tonic-gate 1080Sstevel@tonic-gate prom_printf("Wrong boot interface - kernel needs v%d found v%d\n", 1090Sstevel@tonic-gate BO_VERSION, boots_version); 1100Sstevel@tonic-gate prom_panic("halting"); 1110Sstevel@tonic-gate /*NOTREACHED*/ 1120Sstevel@tonic-gate } 1130Sstevel@tonic-gate 1140Sstevel@tonic-gate /* 1150Sstevel@tonic-gate * Process the physical installed list for boot. 1160Sstevel@tonic-gate * Finds: 1170Sstevel@tonic-gate * 1) the pfn of the highest installed physical page, 1180Sstevel@tonic-gate * 2) the number of pages installed 1190Sstevel@tonic-gate * 3) the number of distinct contiguous regions these pages fall into. 1200Sstevel@tonic-gate */ 1210Sstevel@tonic-gate void 1220Sstevel@tonic-gate installed_top_size( 1230Sstevel@tonic-gate struct memlist *list, /* pointer to start of installed list */ 1240Sstevel@tonic-gate pfn_t *high_pfn, /* return ptr for top value */ 1250Sstevel@tonic-gate pgcnt_t *pgcnt, /* return ptr for sum of installed pages */ 1260Sstevel@tonic-gate int *ranges) /* return ptr for the count of contig. ranges */ 1270Sstevel@tonic-gate { 1280Sstevel@tonic-gate pfn_t top = 0; 1290Sstevel@tonic-gate pgcnt_t sumpages = 0; 1300Sstevel@tonic-gate pfn_t highp; /* high page in a chunk */ 1310Sstevel@tonic-gate int cnt = 0; 1320Sstevel@tonic-gate 1330Sstevel@tonic-gate for (; list; list = list->next) { 1340Sstevel@tonic-gate ++cnt; 1350Sstevel@tonic-gate highp = (list->address + list->size - 1) >> PAGESHIFT; 1360Sstevel@tonic-gate if (top < highp) 1370Sstevel@tonic-gate top = highp; 1380Sstevel@tonic-gate sumpages += btop(list->size); 1390Sstevel@tonic-gate } 1400Sstevel@tonic-gate 1410Sstevel@tonic-gate *high_pfn = top; 1420Sstevel@tonic-gate *pgcnt = sumpages; 1430Sstevel@tonic-gate *ranges = cnt; 1440Sstevel@tonic-gate } 1450Sstevel@tonic-gate 1460Sstevel@tonic-gate /* 1470Sstevel@tonic-gate * Copy in a memory list from boot to kernel, with a filter function 1480Sstevel@tonic-gate * to remove pages. The filter function can increase the address and/or 149842Smec * decrease the size to filter out pages. It will also align addresses and 150842Smec * sizes to PAGESIZE. 1510Sstevel@tonic-gate */ 1520Sstevel@tonic-gate void 1530Sstevel@tonic-gate copy_memlist_filter( 1540Sstevel@tonic-gate struct memlist *src, 1550Sstevel@tonic-gate struct memlist **dstp, 1560Sstevel@tonic-gate void (*filter)(uint64_t *, uint64_t *)) 1570Sstevel@tonic-gate { 1580Sstevel@tonic-gate struct memlist *dst, *prev; 1590Sstevel@tonic-gate uint64_t addr; 1600Sstevel@tonic-gate uint64_t size; 1610Sstevel@tonic-gate uint64_t eaddr; 1620Sstevel@tonic-gate 1630Sstevel@tonic-gate dst = *dstp; 1640Sstevel@tonic-gate prev = dst; 1650Sstevel@tonic-gate 1660Sstevel@tonic-gate /* 1670Sstevel@tonic-gate * Move through the memlist applying a filter against 1680Sstevel@tonic-gate * each range of memory. Note that we may apply the 1690Sstevel@tonic-gate * filter multiple times against each memlist entry. 1700Sstevel@tonic-gate */ 1710Sstevel@tonic-gate for (; src; src = src->next) { 172842Smec addr = P2ROUNDUP(src->address, PAGESIZE); 173842Smec eaddr = P2ALIGN(src->address + src->size, PAGESIZE); 1740Sstevel@tonic-gate while (addr < eaddr) { 1750Sstevel@tonic-gate size = eaddr - addr; 1760Sstevel@tonic-gate if (filter != NULL) 1770Sstevel@tonic-gate filter(&addr, &size); 1780Sstevel@tonic-gate if (size == 0) 1790Sstevel@tonic-gate break; 1800Sstevel@tonic-gate dst->address = addr; 1810Sstevel@tonic-gate dst->size = size; 1820Sstevel@tonic-gate dst->next = 0; 1830Sstevel@tonic-gate if (prev == dst) { 1840Sstevel@tonic-gate dst->prev = 0; 1850Sstevel@tonic-gate dst++; 1860Sstevel@tonic-gate } else { 1870Sstevel@tonic-gate dst->prev = prev; 1880Sstevel@tonic-gate prev->next = dst; 1890Sstevel@tonic-gate dst++; 1900Sstevel@tonic-gate prev++; 1910Sstevel@tonic-gate } 1920Sstevel@tonic-gate addr += size; 1930Sstevel@tonic-gate } 1940Sstevel@tonic-gate } 1950Sstevel@tonic-gate 1960Sstevel@tonic-gate *dstp = dst; 1970Sstevel@tonic-gate } 1980Sstevel@tonic-gate 1990Sstevel@tonic-gate /* 2000Sstevel@tonic-gate * Kernel setup code, called from startup(). 2010Sstevel@tonic-gate */ 2020Sstevel@tonic-gate void 2030Sstevel@tonic-gate kern_setup1(void) 2040Sstevel@tonic-gate { 2050Sstevel@tonic-gate proc_t *pp; 2060Sstevel@tonic-gate 2070Sstevel@tonic-gate pp = &p0; 2080Sstevel@tonic-gate 2090Sstevel@tonic-gate proc_sched = pp; 2100Sstevel@tonic-gate 2110Sstevel@tonic-gate /* 2120Sstevel@tonic-gate * Initialize process 0 data structures 2130Sstevel@tonic-gate */ 2140Sstevel@tonic-gate pp->p_stat = SRUN; 2150Sstevel@tonic-gate pp->p_flag = SSYS; 2160Sstevel@tonic-gate 2170Sstevel@tonic-gate pp->p_pidp = &pid0; 2180Sstevel@tonic-gate pp->p_pgidp = &pid0; 2190Sstevel@tonic-gate pp->p_sessp = &session0; 2200Sstevel@tonic-gate pp->p_tlist = &t0; 2210Sstevel@tonic-gate pid0.pid_pglink = pp; 222749Ssusans pid0.pid_pgtail = pp; 2230Sstevel@tonic-gate 2240Sstevel@tonic-gate /* 2250Sstevel@tonic-gate * XXX - we asssume that the u-area is zeroed out except for 2260Sstevel@tonic-gate * ttolwp(curthread)->lwp_regs. 2270Sstevel@tonic-gate */ 2283446Smrj PTOU(curproc)->u_cmask = (mode_t)CMASK; 2290Sstevel@tonic-gate 2300Sstevel@tonic-gate thread_init(); /* init thread_free list */ 2310Sstevel@tonic-gate pid_init(); /* initialize pid (proc) table */ 2320Sstevel@tonic-gate contract_init(); /* initialize contracts */ 2330Sstevel@tonic-gate 2340Sstevel@tonic-gate init_pages_pp_maximum(); 2350Sstevel@tonic-gate } 2360Sstevel@tonic-gate 2370Sstevel@tonic-gate /* 2380Sstevel@tonic-gate * Load a procedure into a thread. 2390Sstevel@tonic-gate */ 2400Sstevel@tonic-gate void 2410Sstevel@tonic-gate thread_load(kthread_t *t, void (*start)(), caddr_t arg, size_t len) 2420Sstevel@tonic-gate { 2430Sstevel@tonic-gate caddr_t sp; 2440Sstevel@tonic-gate size_t framesz; 2450Sstevel@tonic-gate caddr_t argp; 2460Sstevel@tonic-gate long *p; 2470Sstevel@tonic-gate extern void thread_start(); 2480Sstevel@tonic-gate 2490Sstevel@tonic-gate /* 2500Sstevel@tonic-gate * Push a "c" call frame onto the stack to represent 2510Sstevel@tonic-gate * the caller of "start". 2520Sstevel@tonic-gate */ 2530Sstevel@tonic-gate sp = t->t_stk; 2540Sstevel@tonic-gate ASSERT(((uintptr_t)t->t_stk & (STACK_ENTRY_ALIGN - 1)) == 0); 2550Sstevel@tonic-gate if (len != 0) { 2560Sstevel@tonic-gate /* 2570Sstevel@tonic-gate * the object that arg points at is copied into the 2580Sstevel@tonic-gate * caller's frame. 2590Sstevel@tonic-gate */ 2600Sstevel@tonic-gate framesz = SA(len); 2610Sstevel@tonic-gate sp -= framesz; 2620Sstevel@tonic-gate ASSERT(sp > t->t_stkbase); 2630Sstevel@tonic-gate argp = sp + SA(MINFRAME); 2640Sstevel@tonic-gate bcopy(arg, argp, len); 2650Sstevel@tonic-gate arg = argp; 2660Sstevel@tonic-gate } 2670Sstevel@tonic-gate /* 2680Sstevel@tonic-gate * Set up arguments (arg and len) on the caller's stack frame. 2690Sstevel@tonic-gate */ 2700Sstevel@tonic-gate p = (long *)sp; 2710Sstevel@tonic-gate 2720Sstevel@tonic-gate *--p = 0; /* fake call */ 2730Sstevel@tonic-gate *--p = 0; /* null frame pointer terminates stack trace */ 2740Sstevel@tonic-gate *--p = (long)len; 2750Sstevel@tonic-gate *--p = (intptr_t)arg; 2760Sstevel@tonic-gate *--p = (intptr_t)start; 2770Sstevel@tonic-gate 2780Sstevel@tonic-gate /* 2790Sstevel@tonic-gate * initialize thread to resume at thread_start() which will 2800Sstevel@tonic-gate * turn around and invoke (*start)(arg, len). 2810Sstevel@tonic-gate */ 2820Sstevel@tonic-gate t->t_pc = (uintptr_t)thread_start; 2830Sstevel@tonic-gate t->t_sp = (uintptr_t)p; 2840Sstevel@tonic-gate 2850Sstevel@tonic-gate ASSERT((t->t_sp & (STACK_ENTRY_ALIGN - 1)) == 0); 2860Sstevel@tonic-gate } 2870Sstevel@tonic-gate 2880Sstevel@tonic-gate /* 2890Sstevel@tonic-gate * load user registers into lwp. 2900Sstevel@tonic-gate */ 2910Sstevel@tonic-gate /*ARGSUSED2*/ 2920Sstevel@tonic-gate void 2930Sstevel@tonic-gate lwp_load(klwp_t *lwp, gregset_t grp, uintptr_t thrptr) 2940Sstevel@tonic-gate { 2950Sstevel@tonic-gate struct regs *rp = lwptoregs(lwp); 2960Sstevel@tonic-gate 2970Sstevel@tonic-gate setgregs(lwp, grp); 2980Sstevel@tonic-gate rp->r_ps = PSL_USER; 2990Sstevel@tonic-gate 3000Sstevel@tonic-gate /* 3015084Sjohnlev * For 64-bit lwps, we allow one magic %fs selector value, and one 3025084Sjohnlev * magic %gs selector to point anywhere in the address space using 3030Sstevel@tonic-gate * %fsbase and %gsbase behind the scenes. libc uses %fs to point 3040Sstevel@tonic-gate * at the ulwp_t structure. 3050Sstevel@tonic-gate * 3060Sstevel@tonic-gate * For 32-bit lwps, libc wedges its lwp thread pointer into the 3070Sstevel@tonic-gate * ucontext ESP slot (which is otherwise irrelevant to setting a 3080Sstevel@tonic-gate * ucontext) and LWPGS_SEL value into gregs[REG_GS]. This is so 3090Sstevel@tonic-gate * syslwp_create() can atomically setup %gs. 3100Sstevel@tonic-gate * 3110Sstevel@tonic-gate * See setup_context() in libc. 3120Sstevel@tonic-gate */ 3130Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 3140Sstevel@tonic-gate if (lwp_getdatamodel(lwp) == DATAMODEL_ILP32) { 3150Sstevel@tonic-gate if (grp[REG_GS] == LWPGS_SEL) 3160Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, thrptr); 3173446Smrj } else { 3183446Smrj /* 3193446Smrj * See lwp_setprivate in kernel and setup_context in libc. 3203446Smrj * 3213446Smrj * Currently libc constructs a ucontext from whole cloth for 3223446Smrj * every new (not main) lwp created. For 64 bit processes 3233446Smrj * %fsbase is directly set to point to current thread pointer. 3243446Smrj * In the past (solaris 10) %fs was also set LWPFS_SEL to 3253446Smrj * indicate %fsbase. Now we use the null GDT selector for 3263446Smrj * this purpose. LWP[FS|GS]_SEL are only intended for 32 bit 3273446Smrj * processes. To ease transition we support older libcs in 3283446Smrj * the newer kernel by forcing %fs or %gs selector to null 3293446Smrj * by calling lwp_setprivate if LWP[FS|GS]_SEL is passed in 3303446Smrj * the ucontext. This is should be ripped out at some future 3313446Smrj * date. Another fix would be for libc to do a getcontext 3323446Smrj * and inherit the null %fs/%gs from the current context but 3333446Smrj * that means an extra system call and could hurt performance. 3343446Smrj */ 3353446Smrj if (grp[REG_FS] == 0x1bb) /* hard code legacy LWPFS_SEL */ 3364883Ssp92102 (void) lwp_setprivate(lwp, _LWP_FSBASE, 3374883Ssp92102 (uintptr_t)grp[REG_FSBASE]); 3383446Smrj 3393446Smrj if (grp[REG_GS] == 0x1c3) /* hard code legacy LWPGS_SEL */ 3404883Ssp92102 (void) lwp_setprivate(lwp, _LWP_GSBASE, 3414883Ssp92102 (uintptr_t)grp[REG_GSBASE]); 3420Sstevel@tonic-gate } 3430Sstevel@tonic-gate #else 3440Sstevel@tonic-gate if (grp[GS] == LWPGS_SEL) 3450Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, thrptr); 3460Sstevel@tonic-gate #endif 3470Sstevel@tonic-gate 3480Sstevel@tonic-gate lwp->lwp_eosys = JUSTRETURN; 3490Sstevel@tonic-gate lwptot(lwp)->t_post_sys = 1; 3500Sstevel@tonic-gate } 3510Sstevel@tonic-gate 3520Sstevel@tonic-gate /* 3530Sstevel@tonic-gate * set syscall()'s return values for a lwp. 3540Sstevel@tonic-gate */ 3550Sstevel@tonic-gate void 3560Sstevel@tonic-gate lwp_setrval(klwp_t *lwp, int v1, int v2) 3570Sstevel@tonic-gate { 3580Sstevel@tonic-gate lwptoregs(lwp)->r_ps &= ~PS_C; 3590Sstevel@tonic-gate lwptoregs(lwp)->r_r0 = v1; 3600Sstevel@tonic-gate lwptoregs(lwp)->r_r1 = v2; 3610Sstevel@tonic-gate } 3620Sstevel@tonic-gate 3630Sstevel@tonic-gate /* 3640Sstevel@tonic-gate * set syscall()'s return values for a lwp. 3650Sstevel@tonic-gate */ 3660Sstevel@tonic-gate void 3670Sstevel@tonic-gate lwp_setsp(klwp_t *lwp, caddr_t sp) 3680Sstevel@tonic-gate { 3690Sstevel@tonic-gate lwptoregs(lwp)->r_sp = (intptr_t)sp; 3700Sstevel@tonic-gate } 3710Sstevel@tonic-gate 3720Sstevel@tonic-gate /* 3730Sstevel@tonic-gate * Copy regs from parent to child. 3740Sstevel@tonic-gate */ 3750Sstevel@tonic-gate void 3760Sstevel@tonic-gate lwp_forkregs(klwp_t *lwp, klwp_t *clwp) 3770Sstevel@tonic-gate { 3780Sstevel@tonic-gate #if defined(__amd64) 3792712Snn35248 struct pcb *pcb = &clwp->lwp_pcb; 3802712Snn35248 struct regs *rp = lwptoregs(lwp); 3812712Snn35248 3824503Ssudheer if (pcb->pcb_rupdate == 0) { 3832712Snn35248 pcb->pcb_ds = rp->r_ds; 3842712Snn35248 pcb->pcb_es = rp->r_es; 3852712Snn35248 pcb->pcb_fs = rp->r_fs; 3862712Snn35248 pcb->pcb_gs = rp->r_gs; 3874503Ssudheer pcb->pcb_rupdate = 1; 3882712Snn35248 lwptot(clwp)->t_post_sys = 1; 3892712Snn35248 } 3902712Snn35248 ASSERT(lwptot(clwp)->t_post_sys); 3910Sstevel@tonic-gate #endif 3922712Snn35248 3930Sstevel@tonic-gate bcopy(lwp->lwp_regs, clwp->lwp_regs, sizeof (struct regs)); 3940Sstevel@tonic-gate } 3950Sstevel@tonic-gate 3960Sstevel@tonic-gate /* 3970Sstevel@tonic-gate * This function is currently unused on x86. 3980Sstevel@tonic-gate */ 3990Sstevel@tonic-gate /*ARGSUSED*/ 4000Sstevel@tonic-gate void 4010Sstevel@tonic-gate lwp_freeregs(klwp_t *lwp, int isexec) 4020Sstevel@tonic-gate {} 4030Sstevel@tonic-gate 4040Sstevel@tonic-gate /* 4050Sstevel@tonic-gate * This function is currently unused on x86. 4060Sstevel@tonic-gate */ 4070Sstevel@tonic-gate void 4080Sstevel@tonic-gate lwp_pcb_exit(void) 4090Sstevel@tonic-gate {} 4100Sstevel@tonic-gate 4110Sstevel@tonic-gate /* 4120Sstevel@tonic-gate * Lwp context ops for segment registers. 4130Sstevel@tonic-gate */ 4140Sstevel@tonic-gate 4150Sstevel@tonic-gate /* 4160Sstevel@tonic-gate * Every time we come into the kernel (syscall, interrupt or trap 4170Sstevel@tonic-gate * but not fast-traps) we capture the current values of the user's 4180Sstevel@tonic-gate * segment registers into the lwp's reg structure. This includes 4190Sstevel@tonic-gate * lcall for i386 generic system call support since it is handled 4200Sstevel@tonic-gate * as a segment-not-present trap. 4210Sstevel@tonic-gate * 4220Sstevel@tonic-gate * Here we save the current values from the lwp regs into the pcb 4234503Ssudheer * and set pcb->pcb_rupdate to 1 to tell the rest of the kernel 4240Sstevel@tonic-gate * that the pcb copy of the segment registers is the current one. 4250Sstevel@tonic-gate * This ensures the lwp's next trip to user land via update_sregs. 4260Sstevel@tonic-gate * Finally we set t_post_sys to ensure that no system call fast-path's 4270Sstevel@tonic-gate * its way out of the kernel via sysret. 4280Sstevel@tonic-gate * 4290Sstevel@tonic-gate * (This means that we need to have interrupts disabled when we test 4300Sstevel@tonic-gate * t->t_post_sys in the syscall handlers; if the test fails, we need 4310Sstevel@tonic-gate * to keep interrupts disabled until we return to userland so we can't 4320Sstevel@tonic-gate * be switched away.) 4330Sstevel@tonic-gate * 4340Sstevel@tonic-gate * As a result of all this, we don't really have to do a whole lot if 4350Sstevel@tonic-gate * the thread is just mucking about in the kernel, switching on and 4360Sstevel@tonic-gate * off the cpu for whatever reason it feels like. And yet we still 4370Sstevel@tonic-gate * preserve fast syscalls, cause if we -don't- get descheduled, 4380Sstevel@tonic-gate * we never come here either. 4390Sstevel@tonic-gate */ 4400Sstevel@tonic-gate 4410Sstevel@tonic-gate #define VALID_LWP_DESC(udp) ((udp)->usd_type == SDT_MEMRWA && \ 4420Sstevel@tonic-gate (udp)->usd_p == 1 && (udp)->usd_dpl == SEL_UPL) 4430Sstevel@tonic-gate 4445084Sjohnlev /*ARGSUSED*/ 4450Sstevel@tonic-gate void 4460Sstevel@tonic-gate lwp_segregs_save(klwp_t *lwp) 4470Sstevel@tonic-gate { 4480Sstevel@tonic-gate #if defined(__amd64) 4490Sstevel@tonic-gate pcb_t *pcb = &lwp->lwp_pcb; 4500Sstevel@tonic-gate struct regs *rp; 4510Sstevel@tonic-gate 4520Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_fsdesc)); 4530Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_gsdesc)); 4540Sstevel@tonic-gate 4554503Ssudheer if (pcb->pcb_rupdate == 0) { 4560Sstevel@tonic-gate rp = lwptoregs(lwp); 4570Sstevel@tonic-gate 4580Sstevel@tonic-gate /* 4590Sstevel@tonic-gate * If there's no update already pending, capture the current 4600Sstevel@tonic-gate * %ds/%es/%fs/%gs values from lwp's regs in case the user 4610Sstevel@tonic-gate * changed them; %fsbase and %gsbase are privileged so the 4620Sstevel@tonic-gate * kernel versions of these registers in pcb_fsbase and 4630Sstevel@tonic-gate * pcb_gsbase are always up-to-date. 4640Sstevel@tonic-gate */ 4650Sstevel@tonic-gate pcb->pcb_ds = rp->r_ds; 4660Sstevel@tonic-gate pcb->pcb_es = rp->r_es; 4670Sstevel@tonic-gate pcb->pcb_fs = rp->r_fs; 4680Sstevel@tonic-gate pcb->pcb_gs = rp->r_gs; 4694503Ssudheer pcb->pcb_rupdate = 1; 4700Sstevel@tonic-gate lwp->lwp_thread->t_post_sys = 1; 4710Sstevel@tonic-gate } 4720Sstevel@tonic-gate #endif /* __amd64 */ 4730Sstevel@tonic-gate 4745084Sjohnlev #if !defined(__xpv) /* XXPV not sure if we can re-read gdt? */ 4750Sstevel@tonic-gate ASSERT(bcmp(&CPU->cpu_gdt[GDT_LWPFS], &lwp->lwp_pcb.pcb_fsdesc, 4760Sstevel@tonic-gate sizeof (lwp->lwp_pcb.pcb_fsdesc)) == 0); 4770Sstevel@tonic-gate ASSERT(bcmp(&CPU->cpu_gdt[GDT_LWPGS], &lwp->lwp_pcb.pcb_gsdesc, 4780Sstevel@tonic-gate sizeof (lwp->lwp_pcb.pcb_gsdesc)) == 0); 4795084Sjohnlev #endif 4800Sstevel@tonic-gate } 4810Sstevel@tonic-gate 4823446Smrj #if defined(__amd64) 4833446Smrj 4843446Smrj /* 4855084Sjohnlev * Update the segment registers with new values from the pcb. 4863446Smrj * 4873446Smrj * We have to do this carefully, and in the following order, 4883446Smrj * in case any of the selectors points at a bogus descriptor. 4893446Smrj * If they do, we'll catch trap with on_trap and return 1. 4903446Smrj * returns 0 on success. 4913446Smrj * 4923446Smrj * This is particularly tricky for %gs. 4933446Smrj * This routine must be executed under a cli. 4943446Smrj */ 4953446Smrj int 4963446Smrj update_sregs(struct regs *rp, klwp_t *lwp) 4973446Smrj { 4983446Smrj pcb_t *pcb = &lwp->lwp_pcb; 4993446Smrj ulong_t kgsbase; 5003446Smrj on_trap_data_t otd; 5013446Smrj int rc = 0; 5023446Smrj 5033446Smrj if (!on_trap(&otd, OT_SEGMENT_ACCESS)) { 5043446Smrj 5055084Sjohnlev #if defined(__xpv) 5065084Sjohnlev /* 5075084Sjohnlev * On the hyervisor this is easy. The hypercall below will 5085084Sjohnlev * swapgs and load %gs with the user selector. If the user 5095084Sjohnlev * selector is bad the hypervisor will catch the fault and 5105084Sjohnlev * load %gs with the null selector instead. Either way the 5115084Sjohnlev * kernel's gsbase is not damaged. 5125084Sjohnlev */ 5135084Sjohnlev kgsbase = (ulong_t)CPU; 5145084Sjohnlev if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, 5155084Sjohnlev pcb->pcb_gs) != 0) { 5165084Sjohnlev no_trap(); 5175084Sjohnlev return (1); 5185084Sjohnlev } 5195084Sjohnlev 5205084Sjohnlev rp->r_gs = pcb->pcb_gs; 5215084Sjohnlev ASSERT((cpu_t *)kgsbase == CPU); 5225084Sjohnlev 5235084Sjohnlev #else /* __xpv */ 5245084Sjohnlev 5255084Sjohnlev /* 5265084Sjohnlev * A little more complicated running native. 5275084Sjohnlev */ 5283446Smrj kgsbase = (ulong_t)CPU; 5293446Smrj __set_gs(pcb->pcb_gs); 5303446Smrj 5313446Smrj /* 5323446Smrj * If __set_gs fails it's because the new %gs is a bad %gs, 5333446Smrj * we'll be taking a trap but with the original %gs and %gsbase 5343446Smrj * undamaged (i.e. pointing at curcpu). 5353446Smrj * 5363446Smrj * We've just mucked up the kernel's gsbase. Oops. In 5373446Smrj * particular we can't take any traps at all. Make the newly 5385084Sjohnlev * computed gsbase be the hidden gs via __swapgs, and fix 5393446Smrj * the kernel's gsbase back again. Later, when we return to 5403446Smrj * userland we'll swapgs again restoring gsbase just loaded 5413446Smrj * above. 5423446Smrj */ 5433446Smrj __swapgs(); 5443446Smrj rp->r_gs = pcb->pcb_gs; 5453446Smrj 5463446Smrj /* 5473446Smrj * restore kernel's gsbase 5483446Smrj */ 5493446Smrj wrmsr(MSR_AMD_GSBASE, kgsbase); 5503446Smrj 5515084Sjohnlev #endif /* __xpv */ 5525084Sjohnlev 5533446Smrj /* 5543446Smrj * Only override the descriptor base address if 5553446Smrj * r_gs == LWPGS_SEL or if r_gs == NULL. A note on 5563446Smrj * NULL descriptors -- 32-bit programs take faults 5573446Smrj * if they deference NULL descriptors; however, 5583446Smrj * when 64-bit programs load them into %fs or %gs, 5593446Smrj * they DONT fault -- only the base address remains 5603446Smrj * whatever it was from the last load. Urk. 5613446Smrj * 5623446Smrj * XXX - note that lwp_setprivate now sets %fs/%gs to the 5633446Smrj * null selector for 64 bit processes. Whereas before 5643446Smrj * %fs/%gs were set to LWP(FS|GS)_SEL regardless of 5653446Smrj * the process's data model. For now we check for both 5663446Smrj * values so that the kernel can also support the older 5673446Smrj * libc. This should be ripped out at some point in the 5683446Smrj * future. 5693446Smrj */ 5705084Sjohnlev if (pcb->pcb_gs == LWPGS_SEL || pcb->pcb_gs == 0) { 5715084Sjohnlev #if defined(__xpv) 5725084Sjohnlev if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER, 5735084Sjohnlev pcb->pcb_gsbase)) { 5745084Sjohnlev no_trap(); 5755084Sjohnlev return (1); 5765084Sjohnlev } 5775084Sjohnlev #else 5783446Smrj wrmsr(MSR_AMD_KGSBASE, pcb->pcb_gsbase); 5795084Sjohnlev #endif 5805084Sjohnlev } 5813446Smrj 5823446Smrj __set_ds(pcb->pcb_ds); 5833446Smrj rp->r_ds = pcb->pcb_ds; 5843446Smrj 5853446Smrj __set_es(pcb->pcb_es); 5863446Smrj rp->r_es = pcb->pcb_es; 5873446Smrj 5883446Smrj __set_fs(pcb->pcb_fs); 5893446Smrj rp->r_fs = pcb->pcb_fs; 5903446Smrj 5913446Smrj /* 5923446Smrj * Same as for %gs 5933446Smrj */ 5945084Sjohnlev if (pcb->pcb_fs == LWPFS_SEL || pcb->pcb_fs == 0) { 5955084Sjohnlev #if defined(__xpv) 5965084Sjohnlev if (HYPERVISOR_set_segment_base(SEGBASE_FS, 5975084Sjohnlev pcb->pcb_fsbase)) { 5985084Sjohnlev no_trap(); 5995084Sjohnlev return (1); 6005084Sjohnlev } 6015084Sjohnlev #else 6023446Smrj wrmsr(MSR_AMD_FSBASE, pcb->pcb_fsbase); 6035084Sjohnlev #endif 6045084Sjohnlev } 6053446Smrj 6063446Smrj } else { 6073446Smrj cli(); 6083446Smrj rc = 1; 6093446Smrj } 6103446Smrj no_trap(); 6113446Smrj return (rc); 6123446Smrj } 6135084Sjohnlev 6145084Sjohnlev /* 6155084Sjohnlev * Make sure any stale selectors are cleared from the segment registers 6165084Sjohnlev * by putting KDS_SEL (the kernel's default %ds gdt selector) into them. 6175084Sjohnlev * This is necessary because the kernel itself does not use %es, %fs, nor 6185084Sjohnlev * %ds. (%cs and %ss are necessary, and are set up by the kernel - along with 6195084Sjohnlev * %gs - to point to the current cpu struct.) If we enter kmdb while in the 6205084Sjohnlev * kernel and resume with a stale ldt or brandz selector sitting there in a 6215084Sjohnlev * segment register, kmdb will #gp fault if the stale selector points to, 6225084Sjohnlev * for example, an ldt in the context of another process. 6235084Sjohnlev * 6245084Sjohnlev * WARNING: Intel and AMD chips behave differently when storing 6255084Sjohnlev * the null selector into %fs and %gs while in long mode. On AMD 6265084Sjohnlev * chips fsbase and gsbase are not cleared. But on Intel chips, storing 6275084Sjohnlev * a null selector into %fs or %gs has the side effect of clearing 6285084Sjohnlev * fsbase or gsbase. For that reason we use KDS_SEL, which has 6295084Sjohnlev * consistent behavor between AMD and Intel. 6305084Sjohnlev * 6315084Sjohnlev * Caller responsible for preventing cpu migration. 6325084Sjohnlev */ 6335084Sjohnlev void 6345084Sjohnlev reset_sregs(void) 6355084Sjohnlev { 6365084Sjohnlev ulong_t kgsbase = (ulong_t)CPU; 6375084Sjohnlev 6385084Sjohnlev ASSERT(curthread->t_preempt != 0 || getpil() >= DISP_LEVEL); 6395084Sjohnlev 6405084Sjohnlev cli(); 6415084Sjohnlev __set_gs(KGS_SEL); 6425084Sjohnlev 6435084Sjohnlev /* 6445084Sjohnlev * restore kernel gsbase 6455084Sjohnlev */ 6465084Sjohnlev #if defined(__xpv) 6475084Sjohnlev xen_set_segment_base(SEGBASE_GS_KERNEL, kgsbase); 6485084Sjohnlev #else 6495084Sjohnlev wrmsr(MSR_AMD_GSBASE, kgsbase); 6505084Sjohnlev #endif 6515084Sjohnlev 6525084Sjohnlev sti(); 6535084Sjohnlev 6545084Sjohnlev __set_ds(KDS_SEL); 6555084Sjohnlev __set_es(0 | SEL_KPL); /* selector RPL not ring 0 on hypervisor */ 6565084Sjohnlev __set_fs(KFS_SEL); 6575084Sjohnlev } 6585084Sjohnlev 6593446Smrj #endif /* __amd64 */ 6603446Smrj 6613446Smrj #ifdef _SYSCALL32_IMPL 6623446Smrj 6633446Smrj /* 6643446Smrj * Make it impossible for a process to change its data model. 6653446Smrj * We do this by toggling the present bits for the 32 and 6663446Smrj * 64-bit user code descriptors. That way if a user lwp attempts 6673446Smrj * to change its data model (by using the wrong code descriptor in 6683446Smrj * %cs) it will fault immediately. This also allows us to simplify 6693446Smrj * assertions and checks in the kernel. 6703446Smrj */ 6715084Sjohnlev 6723446Smrj static void 6733446Smrj gdt_ucode_model(model_t model) 6743446Smrj { 6753446Smrj kpreempt_disable(); 6763446Smrj if (model == DATAMODEL_NATIVE) { 6775084Sjohnlev gdt_update_usegd(GDT_UCODE, &ucs_on); 6785084Sjohnlev gdt_update_usegd(GDT_U32CODE, &ucs32_off); 6793446Smrj } else { 6805084Sjohnlev gdt_update_usegd(GDT_U32CODE, &ucs32_on); 6815084Sjohnlev gdt_update_usegd(GDT_UCODE, &ucs_off); 6823446Smrj } 6833446Smrj kpreempt_enable(); 6843446Smrj } 6853446Smrj 6863446Smrj #endif /* _SYSCALL32_IMPL */ 6873446Smrj 6880Sstevel@tonic-gate /* 6890Sstevel@tonic-gate * Restore lwp private fs and gs segment descriptors 6900Sstevel@tonic-gate * on current cpu's GDT. 6910Sstevel@tonic-gate */ 6920Sstevel@tonic-gate static void 6930Sstevel@tonic-gate lwp_segregs_restore(klwp_t *lwp) 6940Sstevel@tonic-gate { 6953446Smrj pcb_t *pcb = &lwp->lwp_pcb; 6960Sstevel@tonic-gate 6970Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_fsdesc)); 6980Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_gsdesc)); 6990Sstevel@tonic-gate 7003446Smrj #ifdef _SYSCALL32_IMPL 7013446Smrj gdt_ucode_model(DATAMODEL_NATIVE); 7023446Smrj #endif 7033446Smrj 7045084Sjohnlev gdt_update_usegd(GDT_LWPFS, &pcb->pcb_fsdesc); 7055084Sjohnlev gdt_update_usegd(GDT_LWPGS, &pcb->pcb_gsdesc); 7060Sstevel@tonic-gate 7070Sstevel@tonic-gate } 7080Sstevel@tonic-gate 7090Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 7100Sstevel@tonic-gate 7110Sstevel@tonic-gate static void 7120Sstevel@tonic-gate lwp_segregs_restore32(klwp_t *lwp) 7130Sstevel@tonic-gate { 7143446Smrj /*LINTED*/ 7150Sstevel@tonic-gate cpu_t *cpu = CPU; 7160Sstevel@tonic-gate pcb_t *pcb = &lwp->lwp_pcb; 7170Sstevel@tonic-gate 7183446Smrj ASSERT(VALID_LWP_DESC(&lwp->lwp_pcb.pcb_fsdesc)); 7193446Smrj ASSERT(VALID_LWP_DESC(&lwp->lwp_pcb.pcb_gsdesc)); 7200Sstevel@tonic-gate 7213446Smrj gdt_ucode_model(DATAMODEL_ILP32); 7225084Sjohnlev gdt_update_usegd(GDT_LWPFS, &pcb->pcb_fsdesc); 7235084Sjohnlev gdt_update_usegd(GDT_LWPGS, &pcb->pcb_gsdesc); 7240Sstevel@tonic-gate } 7250Sstevel@tonic-gate 7260Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 7270Sstevel@tonic-gate 7280Sstevel@tonic-gate /* 7292712Snn35248 * If this is a process in a branded zone, then we want it to use the brand 7302712Snn35248 * syscall entry points instead of the standard Solaris entry points. This 7312712Snn35248 * routine must be called when a new lwp is created within a branded zone 7322712Snn35248 * or when an existing lwp moves into a branded zone via a zone_enter() 7332712Snn35248 * operation. 7342712Snn35248 */ 7352712Snn35248 void 7362712Snn35248 lwp_attach_brand_hdlrs(klwp_t *lwp) 7372712Snn35248 { 7382712Snn35248 kthread_t *t = lwptot(lwp); 7392712Snn35248 7402712Snn35248 ASSERT(PROC_IS_BRANDED(lwptoproc(lwp))); 741*6994Sedp 7422712Snn35248 ASSERT(removectx(t, NULL, brand_interpositioning_disable, 7434883Ssp92102 brand_interpositioning_enable, NULL, NULL, 7444883Ssp92102 brand_interpositioning_disable, NULL) == 0); 7452712Snn35248 installctx(t, NULL, brand_interpositioning_disable, 7464883Ssp92102 brand_interpositioning_enable, NULL, NULL, 7474883Ssp92102 brand_interpositioning_disable, NULL); 7482712Snn35248 7492712Snn35248 if (t == curthread) { 7502712Snn35248 kpreempt_disable(); 7512712Snn35248 brand_interpositioning_enable(); 7522712Snn35248 kpreempt_enable(); 7532712Snn35248 } 7542712Snn35248 } 7552712Snn35248 7562712Snn35248 /* 757*6994Sedp * If this is a process in a branded zone, then we want it to disable the 758*6994Sedp * brand syscall entry points. This routine must be called when the last 759*6994Sedp * lwp in a process is exiting in proc_exit(). 760*6994Sedp */ 761*6994Sedp void 762*6994Sedp lwp_detach_brand_hdlrs(klwp_t *lwp) 763*6994Sedp { 764*6994Sedp kthread_t *t = lwptot(lwp); 765*6994Sedp 766*6994Sedp ASSERT(PROC_IS_BRANDED(lwptoproc(lwp))); 767*6994Sedp if (t == curthread) 768*6994Sedp kpreempt_disable(); 769*6994Sedp 770*6994Sedp /* Remove the original context handlers */ 771*6994Sedp VERIFY(removectx(t, NULL, brand_interpositioning_disable, 772*6994Sedp brand_interpositioning_enable, NULL, NULL, 773*6994Sedp brand_interpositioning_disable, NULL) != 0); 774*6994Sedp 775*6994Sedp if (t == curthread) { 776*6994Sedp /* Cleanup our MSR and IDT entries. */ 777*6994Sedp brand_interpositioning_disable(); 778*6994Sedp kpreempt_enable(); 779*6994Sedp } 780*6994Sedp } 781*6994Sedp 782*6994Sedp /* 7830Sstevel@tonic-gate * Add any lwp-associated context handlers to the lwp at the beginning 7840Sstevel@tonic-gate * of the lwp's useful life. 7850Sstevel@tonic-gate * 7860Sstevel@tonic-gate * All paths which create lwp's invoke lwp_create(); lwp_create() 7870Sstevel@tonic-gate * invokes lwp_stk_init() which initializes the stack, sets up 7880Sstevel@tonic-gate * lwp_regs, and invokes this routine. 7890Sstevel@tonic-gate * 7900Sstevel@tonic-gate * All paths which destroy lwp's invoke lwp_exit() to rip the lwp 7910Sstevel@tonic-gate * apart and put it on 'lwp_deathrow'; if the lwp is destroyed it 7920Sstevel@tonic-gate * ends up in thread_free() which invokes freectx(t, 0) before 7930Sstevel@tonic-gate * invoking lwp_stk_fini(). When the lwp is recycled from death 7940Sstevel@tonic-gate * row, lwp_stk_fini() is invoked, then thread_free(), and thus 7950Sstevel@tonic-gate * freectx(t, 0) as before. 7960Sstevel@tonic-gate * 7970Sstevel@tonic-gate * In the case of exec, the surviving lwp is thoroughly scrubbed 7980Sstevel@tonic-gate * clean; exec invokes freectx(t, 1) to destroy associated contexts. 7990Sstevel@tonic-gate * On the way back to the new image, it invokes setregs() which 8000Sstevel@tonic-gate * in turn invokes this routine. 8010Sstevel@tonic-gate */ 8020Sstevel@tonic-gate void 8030Sstevel@tonic-gate lwp_installctx(klwp_t *lwp) 8040Sstevel@tonic-gate { 8050Sstevel@tonic-gate kthread_t *t = lwptot(lwp); 8060Sstevel@tonic-gate int thisthread = t == curthread; 8070Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 8080Sstevel@tonic-gate void (*restop)(klwp_t *) = lwp_getdatamodel(lwp) == DATAMODEL_NATIVE ? 8090Sstevel@tonic-gate lwp_segregs_restore : lwp_segregs_restore32; 8100Sstevel@tonic-gate #else 8110Sstevel@tonic-gate void (*restop)(klwp_t *) = lwp_segregs_restore; 8120Sstevel@tonic-gate #endif 8130Sstevel@tonic-gate 8140Sstevel@tonic-gate /* 8150Sstevel@tonic-gate * Install the basic lwp context handlers on each lwp. 8160Sstevel@tonic-gate * 8170Sstevel@tonic-gate * On the amd64 kernel, the context handlers are responsible for 8180Sstevel@tonic-gate * virtualizing %ds, %es, %fs, and %gs to the lwp. The register 8190Sstevel@tonic-gate * values are only ever changed via sys_rtt when the 8204503Ssudheer * pcb->pcb_rupdate == 1. Only sys_rtt gets to clear the bit. 8210Sstevel@tonic-gate * 8220Sstevel@tonic-gate * On the i386 kernel, the context handlers are responsible for 8230Sstevel@tonic-gate * virtualizing %gs/%fs to the lwp by updating the per-cpu GDTs 8240Sstevel@tonic-gate */ 8250Sstevel@tonic-gate ASSERT(removectx(t, lwp, lwp_segregs_save, restop, 8260Sstevel@tonic-gate NULL, NULL, NULL, NULL) == 0); 8270Sstevel@tonic-gate if (thisthread) 8280Sstevel@tonic-gate kpreempt_disable(); 8290Sstevel@tonic-gate installctx(t, lwp, lwp_segregs_save, restop, 8300Sstevel@tonic-gate NULL, NULL, NULL, NULL); 8310Sstevel@tonic-gate if (thisthread) { 8320Sstevel@tonic-gate /* 8330Sstevel@tonic-gate * Since we're the right thread, set the values in the GDT 8340Sstevel@tonic-gate */ 8350Sstevel@tonic-gate restop(lwp); 8360Sstevel@tonic-gate kpreempt_enable(); 8370Sstevel@tonic-gate } 8380Sstevel@tonic-gate 8390Sstevel@tonic-gate /* 8400Sstevel@tonic-gate * If we have sysenter/sysexit instructions enabled, we need 8410Sstevel@tonic-gate * to ensure that the hardware mechanism is kept up-to-date with the 8420Sstevel@tonic-gate * lwp's kernel stack pointer across context switches. 8430Sstevel@tonic-gate * 8440Sstevel@tonic-gate * sep_save zeros the sysenter stack pointer msr; sep_restore sets 8450Sstevel@tonic-gate * it to the lwp's kernel stack pointer (kstktop). 8460Sstevel@tonic-gate */ 8470Sstevel@tonic-gate if (x86_feature & X86_SEP) { 8480Sstevel@tonic-gate #if defined(__amd64) 8490Sstevel@tonic-gate caddr_t kstktop = (caddr_t)lwp->lwp_regs; 8500Sstevel@tonic-gate #elif defined(__i386) 8510Sstevel@tonic-gate caddr_t kstktop = ((caddr_t)lwp->lwp_regs - MINFRAME) + 8520Sstevel@tonic-gate SA(sizeof (struct regs) + MINFRAME); 8530Sstevel@tonic-gate #endif 8540Sstevel@tonic-gate ASSERT(removectx(t, kstktop, 8550Sstevel@tonic-gate sep_save, sep_restore, NULL, NULL, NULL, NULL) == 0); 8560Sstevel@tonic-gate 8570Sstevel@tonic-gate if (thisthread) 8580Sstevel@tonic-gate kpreempt_disable(); 8590Sstevel@tonic-gate installctx(t, kstktop, 8600Sstevel@tonic-gate sep_save, sep_restore, NULL, NULL, NULL, NULL); 8610Sstevel@tonic-gate if (thisthread) { 8620Sstevel@tonic-gate /* 8630Sstevel@tonic-gate * We're the right thread, so set the stack pointer 8640Sstevel@tonic-gate * for the first sysenter instruction to use 8650Sstevel@tonic-gate */ 8660Sstevel@tonic-gate sep_restore(kstktop); 8670Sstevel@tonic-gate kpreempt_enable(); 8680Sstevel@tonic-gate } 8690Sstevel@tonic-gate } 8702712Snn35248 8712712Snn35248 if (PROC_IS_BRANDED(ttoproc(t))) 8722712Snn35248 lwp_attach_brand_hdlrs(lwp); 8730Sstevel@tonic-gate } 8740Sstevel@tonic-gate 8750Sstevel@tonic-gate /* 8760Sstevel@tonic-gate * Clear registers on exec(2). 8770Sstevel@tonic-gate */ 8780Sstevel@tonic-gate void 8790Sstevel@tonic-gate setregs(uarg_t *args) 8800Sstevel@tonic-gate { 8810Sstevel@tonic-gate struct regs *rp; 8820Sstevel@tonic-gate kthread_t *t = curthread; 8830Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 8840Sstevel@tonic-gate pcb_t *pcb = &lwp->lwp_pcb; 8850Sstevel@tonic-gate greg_t sp; 8860Sstevel@tonic-gate 8870Sstevel@tonic-gate /* 8880Sstevel@tonic-gate * Initialize user registers 8890Sstevel@tonic-gate */ 8900Sstevel@tonic-gate (void) save_syscall_args(); /* copy args from registers first */ 8910Sstevel@tonic-gate rp = lwptoregs(lwp); 8920Sstevel@tonic-gate sp = rp->r_sp; 8930Sstevel@tonic-gate bzero(rp, sizeof (*rp)); 8940Sstevel@tonic-gate 8950Sstevel@tonic-gate rp->r_ss = UDS_SEL; 8960Sstevel@tonic-gate rp->r_sp = sp; 8970Sstevel@tonic-gate rp->r_pc = args->entry; 8980Sstevel@tonic-gate rp->r_ps = PSL_USER; 8990Sstevel@tonic-gate 9000Sstevel@tonic-gate #if defined(__amd64) 9010Sstevel@tonic-gate 9020Sstevel@tonic-gate pcb->pcb_fs = pcb->pcb_gs = 0; 9030Sstevel@tonic-gate pcb->pcb_fsbase = pcb->pcb_gsbase = 0; 9040Sstevel@tonic-gate 9050Sstevel@tonic-gate if (ttoproc(t)->p_model == DATAMODEL_NATIVE) { 9060Sstevel@tonic-gate 9070Sstevel@tonic-gate rp->r_cs = UCS_SEL; 9080Sstevel@tonic-gate 9090Sstevel@tonic-gate /* 9100Sstevel@tonic-gate * Only allow 64-bit user code descriptor to be present. 9110Sstevel@tonic-gate */ 9123446Smrj gdt_ucode_model(DATAMODEL_NATIVE); 9130Sstevel@tonic-gate 9140Sstevel@tonic-gate /* 9150Sstevel@tonic-gate * Arrange that the virtualized %fs and %gs GDT descriptors 9160Sstevel@tonic-gate * have a well-defined initial state (present, ring 3 9170Sstevel@tonic-gate * and of type data). 9180Sstevel@tonic-gate */ 9190Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc; 9200Sstevel@tonic-gate 9210Sstevel@tonic-gate /* 9220Sstevel@tonic-gate * thrptr is either NULL or a value used by DTrace. 9230Sstevel@tonic-gate * 64-bit processes use %fs as their "thread" register. 9240Sstevel@tonic-gate */ 9250Sstevel@tonic-gate if (args->thrptr) 9260Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_FSBASE, args->thrptr); 9270Sstevel@tonic-gate 9280Sstevel@tonic-gate } else { 9290Sstevel@tonic-gate 9300Sstevel@tonic-gate rp->r_cs = U32CS_SEL; 9310Sstevel@tonic-gate rp->r_ds = rp->r_es = UDS_SEL; 9320Sstevel@tonic-gate 9330Sstevel@tonic-gate /* 9340Sstevel@tonic-gate * only allow 32-bit user code selector to be present. 9350Sstevel@tonic-gate */ 9363446Smrj gdt_ucode_model(DATAMODEL_ILP32); 9370Sstevel@tonic-gate 9380Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_u32desc; 9390Sstevel@tonic-gate 9400Sstevel@tonic-gate /* 9410Sstevel@tonic-gate * thrptr is either NULL or a value used by DTrace. 9420Sstevel@tonic-gate * 32-bit processes use %gs as their "thread" register. 9430Sstevel@tonic-gate */ 9440Sstevel@tonic-gate if (args->thrptr) 9450Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, args->thrptr); 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate } 9480Sstevel@tonic-gate 9490Sstevel@tonic-gate pcb->pcb_ds = rp->r_ds; 9500Sstevel@tonic-gate pcb->pcb_es = rp->r_es; 9514503Ssudheer pcb->pcb_rupdate = 1; 9520Sstevel@tonic-gate 9530Sstevel@tonic-gate #elif defined(__i386) 9540Sstevel@tonic-gate 9550Sstevel@tonic-gate rp->r_cs = UCS_SEL; 9560Sstevel@tonic-gate rp->r_ds = rp->r_es = UDS_SEL; 9570Sstevel@tonic-gate 9580Sstevel@tonic-gate /* 9590Sstevel@tonic-gate * Arrange that the virtualized %fs and %gs GDT descriptors 9600Sstevel@tonic-gate * have a well-defined initial state (present, ring 3 9610Sstevel@tonic-gate * and of type data). 9620Sstevel@tonic-gate */ 9630Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc; 9640Sstevel@tonic-gate 9650Sstevel@tonic-gate /* 9660Sstevel@tonic-gate * For %gs we need to reset LWP_GSBASE in pcb and the 9670Sstevel@tonic-gate * per-cpu GDT descriptor. thrptr is either NULL 9680Sstevel@tonic-gate * or a value used by DTrace. 9690Sstevel@tonic-gate */ 9700Sstevel@tonic-gate if (args->thrptr) 9710Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, args->thrptr); 9720Sstevel@tonic-gate #endif 9730Sstevel@tonic-gate 9740Sstevel@tonic-gate lwp->lwp_eosys = JUSTRETURN; 9750Sstevel@tonic-gate t->t_post_sys = 1; 9760Sstevel@tonic-gate 9770Sstevel@tonic-gate /* 9780Sstevel@tonic-gate * Here we initialize minimal fpu state. 9790Sstevel@tonic-gate * The rest is done at the first floating 9800Sstevel@tonic-gate * point instruction that a process executes. 9810Sstevel@tonic-gate */ 9820Sstevel@tonic-gate pcb->pcb_fpu.fpu_flags = 0; 9830Sstevel@tonic-gate 9840Sstevel@tonic-gate /* 9850Sstevel@tonic-gate * Add the lwp context handlers that virtualize segment registers, 9860Sstevel@tonic-gate * and/or system call stacks etc. 9870Sstevel@tonic-gate */ 9880Sstevel@tonic-gate lwp_installctx(lwp); 9890Sstevel@tonic-gate } 9900Sstevel@tonic-gate 9912712Snn35248 user_desc_t * 9922712Snn35248 cpu_get_gdt(void) 9932712Snn35248 { 9942712Snn35248 return (CPU->cpu_gdt); 9952712Snn35248 } 9962712Snn35248 9972712Snn35248 9980Sstevel@tonic-gate #if !defined(lwp_getdatamodel) 9990Sstevel@tonic-gate 10000Sstevel@tonic-gate /* 10010Sstevel@tonic-gate * Return the datamodel of the given lwp. 10020Sstevel@tonic-gate */ 10030Sstevel@tonic-gate /*ARGSUSED*/ 10040Sstevel@tonic-gate model_t 10050Sstevel@tonic-gate lwp_getdatamodel(klwp_t *lwp) 10060Sstevel@tonic-gate { 10070Sstevel@tonic-gate return (lwp->lwp_procp->p_model); 10080Sstevel@tonic-gate } 10090Sstevel@tonic-gate 10100Sstevel@tonic-gate #endif /* !lwp_getdatamodel */ 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate #if !defined(get_udatamodel) 10130Sstevel@tonic-gate 10140Sstevel@tonic-gate model_t 10150Sstevel@tonic-gate get_udatamodel(void) 10160Sstevel@tonic-gate { 10170Sstevel@tonic-gate return (curproc->p_model); 10180Sstevel@tonic-gate } 10190Sstevel@tonic-gate 10200Sstevel@tonic-gate #endif /* !get_udatamodel */ 1021