10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52712Snn35248 * Common Development and Distribution License (the "License"). 62712Snn35248 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 2211474SJonathan.Adams@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */ 270Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */ 280Sstevel@tonic-gate /* All Rights Reserved */ 290Sstevel@tonic-gate 300Sstevel@tonic-gate #include <sys/types.h> 310Sstevel@tonic-gate #include <sys/param.h> 320Sstevel@tonic-gate #include <sys/sysmacros.h> 330Sstevel@tonic-gate #include <sys/signal.h> 340Sstevel@tonic-gate #include <sys/systm.h> 350Sstevel@tonic-gate #include <sys/user.h> 360Sstevel@tonic-gate #include <sys/mman.h> 370Sstevel@tonic-gate #include <sys/class.h> 380Sstevel@tonic-gate #include <sys/proc.h> 390Sstevel@tonic-gate #include <sys/procfs.h> 400Sstevel@tonic-gate #include <sys/buf.h> 410Sstevel@tonic-gate #include <sys/kmem.h> 420Sstevel@tonic-gate #include <sys/cred.h> 430Sstevel@tonic-gate #include <sys/archsystm.h> 440Sstevel@tonic-gate #include <sys/vmparam.h> 450Sstevel@tonic-gate #include <sys/prsystm.h> 460Sstevel@tonic-gate #include <sys/reboot.h> 470Sstevel@tonic-gate #include <sys/uadmin.h> 480Sstevel@tonic-gate #include <sys/vfs.h> 490Sstevel@tonic-gate #include <sys/vnode.h> 500Sstevel@tonic-gate #include <sys/file.h> 510Sstevel@tonic-gate #include <sys/session.h> 520Sstevel@tonic-gate #include <sys/ucontext.h> 530Sstevel@tonic-gate #include <sys/dnlc.h> 540Sstevel@tonic-gate #include <sys/var.h> 550Sstevel@tonic-gate #include <sys/cmn_err.h> 560Sstevel@tonic-gate #include <sys/debugreg.h> 570Sstevel@tonic-gate #include <sys/thread.h> 580Sstevel@tonic-gate #include <sys/vtrace.h> 590Sstevel@tonic-gate #include <sys/consdev.h> 600Sstevel@tonic-gate #include <sys/psw.h> 610Sstevel@tonic-gate #include <sys/regset.h> 620Sstevel@tonic-gate #include <sys/privregs.h> 633451Smrj #include <sys/cpu.h> 640Sstevel@tonic-gate #include <sys/stack.h> 650Sstevel@tonic-gate #include <sys/swap.h> 660Sstevel@tonic-gate #include <vm/hat.h> 670Sstevel@tonic-gate #include <vm/anon.h> 680Sstevel@tonic-gate #include <vm/as.h> 690Sstevel@tonic-gate #include <vm/page.h> 700Sstevel@tonic-gate #include <vm/seg.h> 710Sstevel@tonic-gate #include <vm/seg_kmem.h> 720Sstevel@tonic-gate #include <vm/seg_map.h> 730Sstevel@tonic-gate #include <vm/seg_vn.h> 740Sstevel@tonic-gate #include <sys/exec.h> 750Sstevel@tonic-gate #include <sys/acct.h> 760Sstevel@tonic-gate #include <sys/core.h> 770Sstevel@tonic-gate #include <sys/corectl.h> 780Sstevel@tonic-gate #include <sys/modctl.h> 790Sstevel@tonic-gate #include <sys/tuneable.h> 800Sstevel@tonic-gate #include <c2/audit.h> 810Sstevel@tonic-gate #include <sys/bootconf.h> 822712Snn35248 #include <sys/brand.h> 830Sstevel@tonic-gate #include <sys/dumphdr.h> 840Sstevel@tonic-gate #include <sys/promif.h> 850Sstevel@tonic-gate #include <sys/systeminfo.h> 860Sstevel@tonic-gate #include <sys/kdi.h> 870Sstevel@tonic-gate #include <sys/contract_impl.h> 880Sstevel@tonic-gate #include <sys/x86_archext.h> 890Sstevel@tonic-gate #include <sys/segments.h> 903446Smrj #include <sys/ontrap.h> 915084Sjohnlev #include <sys/cpu.h> 925084Sjohnlev #ifdef __xpv 935084Sjohnlev #include <sys/hypervisor.h> 945084Sjohnlev #endif 950Sstevel@tonic-gate 960Sstevel@tonic-gate /* 970Sstevel@tonic-gate * Compare the version of boot that boot says it is against 980Sstevel@tonic-gate * the version of boot the kernel expects. 990Sstevel@tonic-gate */ 1000Sstevel@tonic-gate int 1010Sstevel@tonic-gate check_boot_version(int boots_version) 1020Sstevel@tonic-gate { 1030Sstevel@tonic-gate if (boots_version == BO_VERSION) 1040Sstevel@tonic-gate return (0); 1050Sstevel@tonic-gate 1060Sstevel@tonic-gate prom_printf("Wrong boot interface - kernel needs v%d found v%d\n", 1070Sstevel@tonic-gate BO_VERSION, boots_version); 1080Sstevel@tonic-gate prom_panic("halting"); 1090Sstevel@tonic-gate /*NOTREACHED*/ 1100Sstevel@tonic-gate } 1110Sstevel@tonic-gate 1120Sstevel@tonic-gate /* 1130Sstevel@tonic-gate * Process the physical installed list for boot. 1140Sstevel@tonic-gate * Finds: 1150Sstevel@tonic-gate * 1) the pfn of the highest installed physical page, 1160Sstevel@tonic-gate * 2) the number of pages installed 1170Sstevel@tonic-gate * 3) the number of distinct contiguous regions these pages fall into. 118*12004Sjiang.liu@intel.com * 4) the number of contiguous memory ranges 1190Sstevel@tonic-gate */ 1200Sstevel@tonic-gate void 121*12004Sjiang.liu@intel.com installed_top_size_ex( 1220Sstevel@tonic-gate struct memlist *list, /* pointer to start of installed list */ 1230Sstevel@tonic-gate pfn_t *high_pfn, /* return ptr for top value */ 1240Sstevel@tonic-gate pgcnt_t *pgcnt, /* return ptr for sum of installed pages */ 1250Sstevel@tonic-gate int *ranges) /* return ptr for the count of contig. ranges */ 1260Sstevel@tonic-gate { 1270Sstevel@tonic-gate pfn_t top = 0; 1280Sstevel@tonic-gate pgcnt_t sumpages = 0; 1290Sstevel@tonic-gate pfn_t highp; /* high page in a chunk */ 1300Sstevel@tonic-gate int cnt = 0; 1310Sstevel@tonic-gate 13211474SJonathan.Adams@Sun.COM for (; list; list = list->ml_next) { 1330Sstevel@tonic-gate ++cnt; 13411474SJonathan.Adams@Sun.COM highp = (list->ml_address + list->ml_size - 1) >> PAGESHIFT; 1350Sstevel@tonic-gate if (top < highp) 1360Sstevel@tonic-gate top = highp; 13711474SJonathan.Adams@Sun.COM sumpages += btop(list->ml_size); 1380Sstevel@tonic-gate } 1390Sstevel@tonic-gate 1400Sstevel@tonic-gate *high_pfn = top; 1410Sstevel@tonic-gate *pgcnt = sumpages; 1420Sstevel@tonic-gate *ranges = cnt; 1430Sstevel@tonic-gate } 1440Sstevel@tonic-gate 145*12004Sjiang.liu@intel.com void 146*12004Sjiang.liu@intel.com installed_top_size( 147*12004Sjiang.liu@intel.com struct memlist *list, /* pointer to start of installed list */ 148*12004Sjiang.liu@intel.com pfn_t *high_pfn, /* return ptr for top value */ 149*12004Sjiang.liu@intel.com pgcnt_t *pgcnt) /* return ptr for sum of installed pages */ 150*12004Sjiang.liu@intel.com { 151*12004Sjiang.liu@intel.com int ranges; 152*12004Sjiang.liu@intel.com 153*12004Sjiang.liu@intel.com installed_top_size_ex(list, high_pfn, pgcnt, &ranges); 154*12004Sjiang.liu@intel.com } 155*12004Sjiang.liu@intel.com 156*12004Sjiang.liu@intel.com void 157*12004Sjiang.liu@intel.com phys_install_has_changed(void) 158*12004Sjiang.liu@intel.com {} 159*12004Sjiang.liu@intel.com 1600Sstevel@tonic-gate /* 1610Sstevel@tonic-gate * Copy in a memory list from boot to kernel, with a filter function 1620Sstevel@tonic-gate * to remove pages. The filter function can increase the address and/or 163842Smec * decrease the size to filter out pages. It will also align addresses and 164842Smec * sizes to PAGESIZE. 1650Sstevel@tonic-gate */ 1660Sstevel@tonic-gate void 1670Sstevel@tonic-gate copy_memlist_filter( 1680Sstevel@tonic-gate struct memlist *src, 1690Sstevel@tonic-gate struct memlist **dstp, 1700Sstevel@tonic-gate void (*filter)(uint64_t *, uint64_t *)) 1710Sstevel@tonic-gate { 1720Sstevel@tonic-gate struct memlist *dst, *prev; 1730Sstevel@tonic-gate uint64_t addr; 1740Sstevel@tonic-gate uint64_t size; 1750Sstevel@tonic-gate uint64_t eaddr; 1760Sstevel@tonic-gate 1770Sstevel@tonic-gate dst = *dstp; 1780Sstevel@tonic-gate prev = dst; 1790Sstevel@tonic-gate 1800Sstevel@tonic-gate /* 1810Sstevel@tonic-gate * Move through the memlist applying a filter against 1820Sstevel@tonic-gate * each range of memory. Note that we may apply the 1830Sstevel@tonic-gate * filter multiple times against each memlist entry. 1840Sstevel@tonic-gate */ 18511474SJonathan.Adams@Sun.COM for (; src; src = src->ml_next) { 18611474SJonathan.Adams@Sun.COM addr = P2ROUNDUP(src->ml_address, PAGESIZE); 18711474SJonathan.Adams@Sun.COM eaddr = P2ALIGN(src->ml_address + src->ml_size, PAGESIZE); 1880Sstevel@tonic-gate while (addr < eaddr) { 1890Sstevel@tonic-gate size = eaddr - addr; 1900Sstevel@tonic-gate if (filter != NULL) 1910Sstevel@tonic-gate filter(&addr, &size); 1920Sstevel@tonic-gate if (size == 0) 1930Sstevel@tonic-gate break; 19411474SJonathan.Adams@Sun.COM dst->ml_address = addr; 19511474SJonathan.Adams@Sun.COM dst->ml_size = size; 19611474SJonathan.Adams@Sun.COM dst->ml_next = 0; 1970Sstevel@tonic-gate if (prev == dst) { 19811474SJonathan.Adams@Sun.COM dst->ml_prev = 0; 1990Sstevel@tonic-gate dst++; 2000Sstevel@tonic-gate } else { 20111474SJonathan.Adams@Sun.COM dst->ml_prev = prev; 20211474SJonathan.Adams@Sun.COM prev->ml_next = dst; 2030Sstevel@tonic-gate dst++; 2040Sstevel@tonic-gate prev++; 2050Sstevel@tonic-gate } 2060Sstevel@tonic-gate addr += size; 2070Sstevel@tonic-gate } 2080Sstevel@tonic-gate } 2090Sstevel@tonic-gate 2100Sstevel@tonic-gate *dstp = dst; 2110Sstevel@tonic-gate } 2120Sstevel@tonic-gate 2130Sstevel@tonic-gate /* 2140Sstevel@tonic-gate * Kernel setup code, called from startup(). 2150Sstevel@tonic-gate */ 2160Sstevel@tonic-gate void 2170Sstevel@tonic-gate kern_setup1(void) 2180Sstevel@tonic-gate { 2190Sstevel@tonic-gate proc_t *pp; 2200Sstevel@tonic-gate 2210Sstevel@tonic-gate pp = &p0; 2220Sstevel@tonic-gate 2230Sstevel@tonic-gate proc_sched = pp; 2240Sstevel@tonic-gate 2250Sstevel@tonic-gate /* 2260Sstevel@tonic-gate * Initialize process 0 data structures 2270Sstevel@tonic-gate */ 2280Sstevel@tonic-gate pp->p_stat = SRUN; 2290Sstevel@tonic-gate pp->p_flag = SSYS; 2300Sstevel@tonic-gate 2310Sstevel@tonic-gate pp->p_pidp = &pid0; 2320Sstevel@tonic-gate pp->p_pgidp = &pid0; 2330Sstevel@tonic-gate pp->p_sessp = &session0; 2340Sstevel@tonic-gate pp->p_tlist = &t0; 2350Sstevel@tonic-gate pid0.pid_pglink = pp; 236749Ssusans pid0.pid_pgtail = pp; 2370Sstevel@tonic-gate 2380Sstevel@tonic-gate /* 2390Sstevel@tonic-gate * XXX - we asssume that the u-area is zeroed out except for 2400Sstevel@tonic-gate * ttolwp(curthread)->lwp_regs. 2410Sstevel@tonic-gate */ 2423446Smrj PTOU(curproc)->u_cmask = (mode_t)CMASK; 2430Sstevel@tonic-gate 2440Sstevel@tonic-gate thread_init(); /* init thread_free list */ 2450Sstevel@tonic-gate pid_init(); /* initialize pid (proc) table */ 2460Sstevel@tonic-gate contract_init(); /* initialize contracts */ 2470Sstevel@tonic-gate 2480Sstevel@tonic-gate init_pages_pp_maximum(); 2490Sstevel@tonic-gate } 2500Sstevel@tonic-gate 2510Sstevel@tonic-gate /* 2520Sstevel@tonic-gate * Load a procedure into a thread. 2530Sstevel@tonic-gate */ 2540Sstevel@tonic-gate void 2550Sstevel@tonic-gate thread_load(kthread_t *t, void (*start)(), caddr_t arg, size_t len) 2560Sstevel@tonic-gate { 2570Sstevel@tonic-gate caddr_t sp; 2580Sstevel@tonic-gate size_t framesz; 2590Sstevel@tonic-gate caddr_t argp; 2600Sstevel@tonic-gate long *p; 2610Sstevel@tonic-gate extern void thread_start(); 2620Sstevel@tonic-gate 2630Sstevel@tonic-gate /* 2640Sstevel@tonic-gate * Push a "c" call frame onto the stack to represent 2650Sstevel@tonic-gate * the caller of "start". 2660Sstevel@tonic-gate */ 2670Sstevel@tonic-gate sp = t->t_stk; 2680Sstevel@tonic-gate ASSERT(((uintptr_t)t->t_stk & (STACK_ENTRY_ALIGN - 1)) == 0); 2690Sstevel@tonic-gate if (len != 0) { 2700Sstevel@tonic-gate /* 2710Sstevel@tonic-gate * the object that arg points at is copied into the 2720Sstevel@tonic-gate * caller's frame. 2730Sstevel@tonic-gate */ 2740Sstevel@tonic-gate framesz = SA(len); 2750Sstevel@tonic-gate sp -= framesz; 2760Sstevel@tonic-gate ASSERT(sp > t->t_stkbase); 2770Sstevel@tonic-gate argp = sp + SA(MINFRAME); 2780Sstevel@tonic-gate bcopy(arg, argp, len); 2790Sstevel@tonic-gate arg = argp; 2800Sstevel@tonic-gate } 2810Sstevel@tonic-gate /* 2820Sstevel@tonic-gate * Set up arguments (arg and len) on the caller's stack frame. 2830Sstevel@tonic-gate */ 2840Sstevel@tonic-gate p = (long *)sp; 2850Sstevel@tonic-gate 2860Sstevel@tonic-gate *--p = 0; /* fake call */ 2870Sstevel@tonic-gate *--p = 0; /* null frame pointer terminates stack trace */ 2880Sstevel@tonic-gate *--p = (long)len; 2890Sstevel@tonic-gate *--p = (intptr_t)arg; 2900Sstevel@tonic-gate *--p = (intptr_t)start; 2910Sstevel@tonic-gate 2920Sstevel@tonic-gate /* 2930Sstevel@tonic-gate * initialize thread to resume at thread_start() which will 2940Sstevel@tonic-gate * turn around and invoke (*start)(arg, len). 2950Sstevel@tonic-gate */ 2960Sstevel@tonic-gate t->t_pc = (uintptr_t)thread_start; 2970Sstevel@tonic-gate t->t_sp = (uintptr_t)p; 2980Sstevel@tonic-gate 2990Sstevel@tonic-gate ASSERT((t->t_sp & (STACK_ENTRY_ALIGN - 1)) == 0); 3000Sstevel@tonic-gate } 3010Sstevel@tonic-gate 3020Sstevel@tonic-gate /* 3030Sstevel@tonic-gate * load user registers into lwp. 3040Sstevel@tonic-gate */ 3050Sstevel@tonic-gate /*ARGSUSED2*/ 3060Sstevel@tonic-gate void 3070Sstevel@tonic-gate lwp_load(klwp_t *lwp, gregset_t grp, uintptr_t thrptr) 3080Sstevel@tonic-gate { 3090Sstevel@tonic-gate struct regs *rp = lwptoregs(lwp); 3100Sstevel@tonic-gate 3110Sstevel@tonic-gate setgregs(lwp, grp); 3120Sstevel@tonic-gate rp->r_ps = PSL_USER; 3130Sstevel@tonic-gate 3140Sstevel@tonic-gate /* 3155084Sjohnlev * For 64-bit lwps, we allow one magic %fs selector value, and one 3165084Sjohnlev * magic %gs selector to point anywhere in the address space using 3170Sstevel@tonic-gate * %fsbase and %gsbase behind the scenes. libc uses %fs to point 3180Sstevel@tonic-gate * at the ulwp_t structure. 3190Sstevel@tonic-gate * 3200Sstevel@tonic-gate * For 32-bit lwps, libc wedges its lwp thread pointer into the 3210Sstevel@tonic-gate * ucontext ESP slot (which is otherwise irrelevant to setting a 3220Sstevel@tonic-gate * ucontext) and LWPGS_SEL value into gregs[REG_GS]. This is so 3230Sstevel@tonic-gate * syslwp_create() can atomically setup %gs. 3240Sstevel@tonic-gate * 3250Sstevel@tonic-gate * See setup_context() in libc. 3260Sstevel@tonic-gate */ 3270Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 3280Sstevel@tonic-gate if (lwp_getdatamodel(lwp) == DATAMODEL_ILP32) { 3290Sstevel@tonic-gate if (grp[REG_GS] == LWPGS_SEL) 3300Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, thrptr); 3313446Smrj } else { 3323446Smrj /* 3333446Smrj * See lwp_setprivate in kernel and setup_context in libc. 3343446Smrj * 3353446Smrj * Currently libc constructs a ucontext from whole cloth for 3363446Smrj * every new (not main) lwp created. For 64 bit processes 3373446Smrj * %fsbase is directly set to point to current thread pointer. 3383446Smrj * In the past (solaris 10) %fs was also set LWPFS_SEL to 3393446Smrj * indicate %fsbase. Now we use the null GDT selector for 3403446Smrj * this purpose. LWP[FS|GS]_SEL are only intended for 32 bit 3413446Smrj * processes. To ease transition we support older libcs in 3423446Smrj * the newer kernel by forcing %fs or %gs selector to null 3433446Smrj * by calling lwp_setprivate if LWP[FS|GS]_SEL is passed in 3443446Smrj * the ucontext. This is should be ripped out at some future 3453446Smrj * date. Another fix would be for libc to do a getcontext 3463446Smrj * and inherit the null %fs/%gs from the current context but 3473446Smrj * that means an extra system call and could hurt performance. 3483446Smrj */ 3493446Smrj if (grp[REG_FS] == 0x1bb) /* hard code legacy LWPFS_SEL */ 3504883Ssp92102 (void) lwp_setprivate(lwp, _LWP_FSBASE, 3514883Ssp92102 (uintptr_t)grp[REG_FSBASE]); 3523446Smrj 3533446Smrj if (grp[REG_GS] == 0x1c3) /* hard code legacy LWPGS_SEL */ 3544883Ssp92102 (void) lwp_setprivate(lwp, _LWP_GSBASE, 3554883Ssp92102 (uintptr_t)grp[REG_GSBASE]); 3560Sstevel@tonic-gate } 3570Sstevel@tonic-gate #else 3580Sstevel@tonic-gate if (grp[GS] == LWPGS_SEL) 3590Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, thrptr); 3600Sstevel@tonic-gate #endif 3610Sstevel@tonic-gate 3620Sstevel@tonic-gate lwp->lwp_eosys = JUSTRETURN; 3630Sstevel@tonic-gate lwptot(lwp)->t_post_sys = 1; 3640Sstevel@tonic-gate } 3650Sstevel@tonic-gate 3660Sstevel@tonic-gate /* 3670Sstevel@tonic-gate * set syscall()'s return values for a lwp. 3680Sstevel@tonic-gate */ 3690Sstevel@tonic-gate void 3700Sstevel@tonic-gate lwp_setrval(klwp_t *lwp, int v1, int v2) 3710Sstevel@tonic-gate { 3720Sstevel@tonic-gate lwptoregs(lwp)->r_ps &= ~PS_C; 3730Sstevel@tonic-gate lwptoregs(lwp)->r_r0 = v1; 3740Sstevel@tonic-gate lwptoregs(lwp)->r_r1 = v2; 3750Sstevel@tonic-gate } 3760Sstevel@tonic-gate 3770Sstevel@tonic-gate /* 3780Sstevel@tonic-gate * set syscall()'s return values for a lwp. 3790Sstevel@tonic-gate */ 3800Sstevel@tonic-gate void 3810Sstevel@tonic-gate lwp_setsp(klwp_t *lwp, caddr_t sp) 3820Sstevel@tonic-gate { 3830Sstevel@tonic-gate lwptoregs(lwp)->r_sp = (intptr_t)sp; 3840Sstevel@tonic-gate } 3850Sstevel@tonic-gate 3860Sstevel@tonic-gate /* 3870Sstevel@tonic-gate * Copy regs from parent to child. 3880Sstevel@tonic-gate */ 3890Sstevel@tonic-gate void 3900Sstevel@tonic-gate lwp_forkregs(klwp_t *lwp, klwp_t *clwp) 3910Sstevel@tonic-gate { 3920Sstevel@tonic-gate #if defined(__amd64) 3932712Snn35248 struct pcb *pcb = &clwp->lwp_pcb; 3942712Snn35248 struct regs *rp = lwptoregs(lwp); 3952712Snn35248 3964503Ssudheer if (pcb->pcb_rupdate == 0) { 3972712Snn35248 pcb->pcb_ds = rp->r_ds; 3982712Snn35248 pcb->pcb_es = rp->r_es; 3992712Snn35248 pcb->pcb_fs = rp->r_fs; 4002712Snn35248 pcb->pcb_gs = rp->r_gs; 4014503Ssudheer pcb->pcb_rupdate = 1; 4022712Snn35248 lwptot(clwp)->t_post_sys = 1; 4032712Snn35248 } 4042712Snn35248 ASSERT(lwptot(clwp)->t_post_sys); 4050Sstevel@tonic-gate #endif 4062712Snn35248 4070Sstevel@tonic-gate bcopy(lwp->lwp_regs, clwp->lwp_regs, sizeof (struct regs)); 4080Sstevel@tonic-gate } 4090Sstevel@tonic-gate 4100Sstevel@tonic-gate /* 4110Sstevel@tonic-gate * This function is currently unused on x86. 4120Sstevel@tonic-gate */ 4130Sstevel@tonic-gate /*ARGSUSED*/ 4140Sstevel@tonic-gate void 4150Sstevel@tonic-gate lwp_freeregs(klwp_t *lwp, int isexec) 4160Sstevel@tonic-gate {} 4170Sstevel@tonic-gate 4180Sstevel@tonic-gate /* 4190Sstevel@tonic-gate * This function is currently unused on x86. 4200Sstevel@tonic-gate */ 4210Sstevel@tonic-gate void 4220Sstevel@tonic-gate lwp_pcb_exit(void) 4230Sstevel@tonic-gate {} 4240Sstevel@tonic-gate 4250Sstevel@tonic-gate /* 4260Sstevel@tonic-gate * Lwp context ops for segment registers. 4270Sstevel@tonic-gate */ 4280Sstevel@tonic-gate 4290Sstevel@tonic-gate /* 4300Sstevel@tonic-gate * Every time we come into the kernel (syscall, interrupt or trap 4310Sstevel@tonic-gate * but not fast-traps) we capture the current values of the user's 4320Sstevel@tonic-gate * segment registers into the lwp's reg structure. This includes 4330Sstevel@tonic-gate * lcall for i386 generic system call support since it is handled 4340Sstevel@tonic-gate * as a segment-not-present trap. 4350Sstevel@tonic-gate * 4360Sstevel@tonic-gate * Here we save the current values from the lwp regs into the pcb 4374503Ssudheer * and set pcb->pcb_rupdate to 1 to tell the rest of the kernel 4380Sstevel@tonic-gate * that the pcb copy of the segment registers is the current one. 4390Sstevel@tonic-gate * This ensures the lwp's next trip to user land via update_sregs. 4400Sstevel@tonic-gate * Finally we set t_post_sys to ensure that no system call fast-path's 4410Sstevel@tonic-gate * its way out of the kernel via sysret. 4420Sstevel@tonic-gate * 4430Sstevel@tonic-gate * (This means that we need to have interrupts disabled when we test 4440Sstevel@tonic-gate * t->t_post_sys in the syscall handlers; if the test fails, we need 4450Sstevel@tonic-gate * to keep interrupts disabled until we return to userland so we can't 4460Sstevel@tonic-gate * be switched away.) 4470Sstevel@tonic-gate * 4480Sstevel@tonic-gate * As a result of all this, we don't really have to do a whole lot if 4490Sstevel@tonic-gate * the thread is just mucking about in the kernel, switching on and 4500Sstevel@tonic-gate * off the cpu for whatever reason it feels like. And yet we still 4510Sstevel@tonic-gate * preserve fast syscalls, cause if we -don't- get descheduled, 4520Sstevel@tonic-gate * we never come here either. 4530Sstevel@tonic-gate */ 4540Sstevel@tonic-gate 4550Sstevel@tonic-gate #define VALID_LWP_DESC(udp) ((udp)->usd_type == SDT_MEMRWA && \ 4560Sstevel@tonic-gate (udp)->usd_p == 1 && (udp)->usd_dpl == SEL_UPL) 4570Sstevel@tonic-gate 4585084Sjohnlev /*ARGSUSED*/ 4590Sstevel@tonic-gate void 4600Sstevel@tonic-gate lwp_segregs_save(klwp_t *lwp) 4610Sstevel@tonic-gate { 4620Sstevel@tonic-gate #if defined(__amd64) 4630Sstevel@tonic-gate pcb_t *pcb = &lwp->lwp_pcb; 4640Sstevel@tonic-gate struct regs *rp; 4650Sstevel@tonic-gate 4660Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_fsdesc)); 4670Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_gsdesc)); 4680Sstevel@tonic-gate 4694503Ssudheer if (pcb->pcb_rupdate == 0) { 4700Sstevel@tonic-gate rp = lwptoregs(lwp); 4710Sstevel@tonic-gate 4720Sstevel@tonic-gate /* 4730Sstevel@tonic-gate * If there's no update already pending, capture the current 4740Sstevel@tonic-gate * %ds/%es/%fs/%gs values from lwp's regs in case the user 4750Sstevel@tonic-gate * changed them; %fsbase and %gsbase are privileged so the 4760Sstevel@tonic-gate * kernel versions of these registers in pcb_fsbase and 4770Sstevel@tonic-gate * pcb_gsbase are always up-to-date. 4780Sstevel@tonic-gate */ 4790Sstevel@tonic-gate pcb->pcb_ds = rp->r_ds; 4800Sstevel@tonic-gate pcb->pcb_es = rp->r_es; 4810Sstevel@tonic-gate pcb->pcb_fs = rp->r_fs; 4820Sstevel@tonic-gate pcb->pcb_gs = rp->r_gs; 4834503Ssudheer pcb->pcb_rupdate = 1; 4840Sstevel@tonic-gate lwp->lwp_thread->t_post_sys = 1; 4850Sstevel@tonic-gate } 4860Sstevel@tonic-gate #endif /* __amd64 */ 4870Sstevel@tonic-gate 4885084Sjohnlev #if !defined(__xpv) /* XXPV not sure if we can re-read gdt? */ 4890Sstevel@tonic-gate ASSERT(bcmp(&CPU->cpu_gdt[GDT_LWPFS], &lwp->lwp_pcb.pcb_fsdesc, 4900Sstevel@tonic-gate sizeof (lwp->lwp_pcb.pcb_fsdesc)) == 0); 4910Sstevel@tonic-gate ASSERT(bcmp(&CPU->cpu_gdt[GDT_LWPGS], &lwp->lwp_pcb.pcb_gsdesc, 4920Sstevel@tonic-gate sizeof (lwp->lwp_pcb.pcb_gsdesc)) == 0); 4935084Sjohnlev #endif 4940Sstevel@tonic-gate } 4950Sstevel@tonic-gate 4963446Smrj #if defined(__amd64) 4973446Smrj 4983446Smrj /* 4995084Sjohnlev * Update the segment registers with new values from the pcb. 5003446Smrj * 5013446Smrj * We have to do this carefully, and in the following order, 5023446Smrj * in case any of the selectors points at a bogus descriptor. 5033446Smrj * If they do, we'll catch trap with on_trap and return 1. 5043446Smrj * returns 0 on success. 5053446Smrj * 5063446Smrj * This is particularly tricky for %gs. 5073446Smrj * This routine must be executed under a cli. 5083446Smrj */ 5093446Smrj int 5103446Smrj update_sregs(struct regs *rp, klwp_t *lwp) 5113446Smrj { 5123446Smrj pcb_t *pcb = &lwp->lwp_pcb; 5133446Smrj ulong_t kgsbase; 5143446Smrj on_trap_data_t otd; 5153446Smrj int rc = 0; 5163446Smrj 5173446Smrj if (!on_trap(&otd, OT_SEGMENT_ACCESS)) { 5183446Smrj 5195084Sjohnlev #if defined(__xpv) 5205084Sjohnlev /* 5215084Sjohnlev * On the hyervisor this is easy. The hypercall below will 5225084Sjohnlev * swapgs and load %gs with the user selector. If the user 5235084Sjohnlev * selector is bad the hypervisor will catch the fault and 5245084Sjohnlev * load %gs with the null selector instead. Either way the 5255084Sjohnlev * kernel's gsbase is not damaged. 5265084Sjohnlev */ 5275084Sjohnlev kgsbase = (ulong_t)CPU; 5285084Sjohnlev if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, 5295084Sjohnlev pcb->pcb_gs) != 0) { 5305084Sjohnlev no_trap(); 5315084Sjohnlev return (1); 5325084Sjohnlev } 5335084Sjohnlev 5345084Sjohnlev rp->r_gs = pcb->pcb_gs; 5355084Sjohnlev ASSERT((cpu_t *)kgsbase == CPU); 5365084Sjohnlev 5375084Sjohnlev #else /* __xpv */ 5385084Sjohnlev 5395084Sjohnlev /* 5405084Sjohnlev * A little more complicated running native. 5415084Sjohnlev */ 5423446Smrj kgsbase = (ulong_t)CPU; 5433446Smrj __set_gs(pcb->pcb_gs); 5443446Smrj 5453446Smrj /* 5463446Smrj * If __set_gs fails it's because the new %gs is a bad %gs, 5473446Smrj * we'll be taking a trap but with the original %gs and %gsbase 5483446Smrj * undamaged (i.e. pointing at curcpu). 5493446Smrj * 5503446Smrj * We've just mucked up the kernel's gsbase. Oops. In 5513446Smrj * particular we can't take any traps at all. Make the newly 5525084Sjohnlev * computed gsbase be the hidden gs via __swapgs, and fix 5533446Smrj * the kernel's gsbase back again. Later, when we return to 5543446Smrj * userland we'll swapgs again restoring gsbase just loaded 5553446Smrj * above. 5563446Smrj */ 5573446Smrj __swapgs(); 5583446Smrj rp->r_gs = pcb->pcb_gs; 5593446Smrj 5603446Smrj /* 5613446Smrj * restore kernel's gsbase 5623446Smrj */ 5633446Smrj wrmsr(MSR_AMD_GSBASE, kgsbase); 5643446Smrj 5655084Sjohnlev #endif /* __xpv */ 5665084Sjohnlev 5673446Smrj /* 5683446Smrj * Only override the descriptor base address if 5693446Smrj * r_gs == LWPGS_SEL or if r_gs == NULL. A note on 5703446Smrj * NULL descriptors -- 32-bit programs take faults 5713446Smrj * if they deference NULL descriptors; however, 5723446Smrj * when 64-bit programs load them into %fs or %gs, 5733446Smrj * they DONT fault -- only the base address remains 5743446Smrj * whatever it was from the last load. Urk. 5753446Smrj * 5763446Smrj * XXX - note that lwp_setprivate now sets %fs/%gs to the 5773446Smrj * null selector for 64 bit processes. Whereas before 5783446Smrj * %fs/%gs were set to LWP(FS|GS)_SEL regardless of 5793446Smrj * the process's data model. For now we check for both 5803446Smrj * values so that the kernel can also support the older 5813446Smrj * libc. This should be ripped out at some point in the 5823446Smrj * future. 5833446Smrj */ 5845084Sjohnlev if (pcb->pcb_gs == LWPGS_SEL || pcb->pcb_gs == 0) { 5855084Sjohnlev #if defined(__xpv) 5865084Sjohnlev if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER, 5875084Sjohnlev pcb->pcb_gsbase)) { 5885084Sjohnlev no_trap(); 5895084Sjohnlev return (1); 5905084Sjohnlev } 5915084Sjohnlev #else 5923446Smrj wrmsr(MSR_AMD_KGSBASE, pcb->pcb_gsbase); 5935084Sjohnlev #endif 5945084Sjohnlev } 5953446Smrj 5963446Smrj __set_ds(pcb->pcb_ds); 5973446Smrj rp->r_ds = pcb->pcb_ds; 5983446Smrj 5993446Smrj __set_es(pcb->pcb_es); 6003446Smrj rp->r_es = pcb->pcb_es; 6013446Smrj 6023446Smrj __set_fs(pcb->pcb_fs); 6033446Smrj rp->r_fs = pcb->pcb_fs; 6043446Smrj 6053446Smrj /* 6063446Smrj * Same as for %gs 6073446Smrj */ 6085084Sjohnlev if (pcb->pcb_fs == LWPFS_SEL || pcb->pcb_fs == 0) { 6095084Sjohnlev #if defined(__xpv) 6105084Sjohnlev if (HYPERVISOR_set_segment_base(SEGBASE_FS, 6115084Sjohnlev pcb->pcb_fsbase)) { 6125084Sjohnlev no_trap(); 6135084Sjohnlev return (1); 6145084Sjohnlev } 6155084Sjohnlev #else 6163446Smrj wrmsr(MSR_AMD_FSBASE, pcb->pcb_fsbase); 6175084Sjohnlev #endif 6185084Sjohnlev } 6193446Smrj 6203446Smrj } else { 6213446Smrj cli(); 6223446Smrj rc = 1; 6233446Smrj } 6243446Smrj no_trap(); 6253446Smrj return (rc); 6263446Smrj } 6275084Sjohnlev 6285084Sjohnlev /* 6295084Sjohnlev * Make sure any stale selectors are cleared from the segment registers 6305084Sjohnlev * by putting KDS_SEL (the kernel's default %ds gdt selector) into them. 6315084Sjohnlev * This is necessary because the kernel itself does not use %es, %fs, nor 6325084Sjohnlev * %ds. (%cs and %ss are necessary, and are set up by the kernel - along with 6335084Sjohnlev * %gs - to point to the current cpu struct.) If we enter kmdb while in the 6345084Sjohnlev * kernel and resume with a stale ldt or brandz selector sitting there in a 6355084Sjohnlev * segment register, kmdb will #gp fault if the stale selector points to, 6365084Sjohnlev * for example, an ldt in the context of another process. 6375084Sjohnlev * 6385084Sjohnlev * WARNING: Intel and AMD chips behave differently when storing 6395084Sjohnlev * the null selector into %fs and %gs while in long mode. On AMD 6405084Sjohnlev * chips fsbase and gsbase are not cleared. But on Intel chips, storing 6415084Sjohnlev * a null selector into %fs or %gs has the side effect of clearing 6425084Sjohnlev * fsbase or gsbase. For that reason we use KDS_SEL, which has 6435084Sjohnlev * consistent behavor between AMD and Intel. 6445084Sjohnlev * 6455084Sjohnlev * Caller responsible for preventing cpu migration. 6465084Sjohnlev */ 6475084Sjohnlev void 6485084Sjohnlev reset_sregs(void) 6495084Sjohnlev { 6505084Sjohnlev ulong_t kgsbase = (ulong_t)CPU; 6515084Sjohnlev 6525084Sjohnlev ASSERT(curthread->t_preempt != 0 || getpil() >= DISP_LEVEL); 6535084Sjohnlev 6545084Sjohnlev cli(); 6555084Sjohnlev __set_gs(KGS_SEL); 6565084Sjohnlev 6575084Sjohnlev /* 6585084Sjohnlev * restore kernel gsbase 6595084Sjohnlev */ 6605084Sjohnlev #if defined(__xpv) 6615084Sjohnlev xen_set_segment_base(SEGBASE_GS_KERNEL, kgsbase); 6625084Sjohnlev #else 6635084Sjohnlev wrmsr(MSR_AMD_GSBASE, kgsbase); 6645084Sjohnlev #endif 6655084Sjohnlev 6665084Sjohnlev sti(); 6675084Sjohnlev 6685084Sjohnlev __set_ds(KDS_SEL); 6695084Sjohnlev __set_es(0 | SEL_KPL); /* selector RPL not ring 0 on hypervisor */ 6705084Sjohnlev __set_fs(KFS_SEL); 6715084Sjohnlev } 6725084Sjohnlev 6733446Smrj #endif /* __amd64 */ 6743446Smrj 6753446Smrj #ifdef _SYSCALL32_IMPL 6763446Smrj 6773446Smrj /* 6783446Smrj * Make it impossible for a process to change its data model. 6793446Smrj * We do this by toggling the present bits for the 32 and 6803446Smrj * 64-bit user code descriptors. That way if a user lwp attempts 6813446Smrj * to change its data model (by using the wrong code descriptor in 6823446Smrj * %cs) it will fault immediately. This also allows us to simplify 6833446Smrj * assertions and checks in the kernel. 6843446Smrj */ 6855084Sjohnlev 6863446Smrj static void 6873446Smrj gdt_ucode_model(model_t model) 6883446Smrj { 6893446Smrj kpreempt_disable(); 6903446Smrj if (model == DATAMODEL_NATIVE) { 6915084Sjohnlev gdt_update_usegd(GDT_UCODE, &ucs_on); 6925084Sjohnlev gdt_update_usegd(GDT_U32CODE, &ucs32_off); 6933446Smrj } else { 6945084Sjohnlev gdt_update_usegd(GDT_U32CODE, &ucs32_on); 6955084Sjohnlev gdt_update_usegd(GDT_UCODE, &ucs_off); 6963446Smrj } 6973446Smrj kpreempt_enable(); 6983446Smrj } 6993446Smrj 7003446Smrj #endif /* _SYSCALL32_IMPL */ 7013446Smrj 7020Sstevel@tonic-gate /* 7030Sstevel@tonic-gate * Restore lwp private fs and gs segment descriptors 7040Sstevel@tonic-gate * on current cpu's GDT. 7050Sstevel@tonic-gate */ 7060Sstevel@tonic-gate static void 7070Sstevel@tonic-gate lwp_segregs_restore(klwp_t *lwp) 7080Sstevel@tonic-gate { 7093446Smrj pcb_t *pcb = &lwp->lwp_pcb; 7100Sstevel@tonic-gate 7110Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_fsdesc)); 7120Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_gsdesc)); 7130Sstevel@tonic-gate 7143446Smrj #ifdef _SYSCALL32_IMPL 7153446Smrj gdt_ucode_model(DATAMODEL_NATIVE); 7163446Smrj #endif 7173446Smrj 7185084Sjohnlev gdt_update_usegd(GDT_LWPFS, &pcb->pcb_fsdesc); 7195084Sjohnlev gdt_update_usegd(GDT_LWPGS, &pcb->pcb_gsdesc); 7200Sstevel@tonic-gate 7210Sstevel@tonic-gate } 7220Sstevel@tonic-gate 7230Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 7240Sstevel@tonic-gate 7250Sstevel@tonic-gate static void 7260Sstevel@tonic-gate lwp_segregs_restore32(klwp_t *lwp) 7270Sstevel@tonic-gate { 7283446Smrj /*LINTED*/ 7290Sstevel@tonic-gate cpu_t *cpu = CPU; 7300Sstevel@tonic-gate pcb_t *pcb = &lwp->lwp_pcb; 7310Sstevel@tonic-gate 7323446Smrj ASSERT(VALID_LWP_DESC(&lwp->lwp_pcb.pcb_fsdesc)); 7333446Smrj ASSERT(VALID_LWP_DESC(&lwp->lwp_pcb.pcb_gsdesc)); 7340Sstevel@tonic-gate 7353446Smrj gdt_ucode_model(DATAMODEL_ILP32); 7365084Sjohnlev gdt_update_usegd(GDT_LWPFS, &pcb->pcb_fsdesc); 7375084Sjohnlev gdt_update_usegd(GDT_LWPGS, &pcb->pcb_gsdesc); 7380Sstevel@tonic-gate } 7390Sstevel@tonic-gate 7400Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 7410Sstevel@tonic-gate 7420Sstevel@tonic-gate /* 7432712Snn35248 * If this is a process in a branded zone, then we want it to use the brand 7442712Snn35248 * syscall entry points instead of the standard Solaris entry points. This 7452712Snn35248 * routine must be called when a new lwp is created within a branded zone 7462712Snn35248 * or when an existing lwp moves into a branded zone via a zone_enter() 7472712Snn35248 * operation. 7482712Snn35248 */ 7492712Snn35248 void 7502712Snn35248 lwp_attach_brand_hdlrs(klwp_t *lwp) 7512712Snn35248 { 7522712Snn35248 kthread_t *t = lwptot(lwp); 7532712Snn35248 7542712Snn35248 ASSERT(PROC_IS_BRANDED(lwptoproc(lwp))); 7556994Sedp 7562712Snn35248 ASSERT(removectx(t, NULL, brand_interpositioning_disable, 7574883Ssp92102 brand_interpositioning_enable, NULL, NULL, 7584883Ssp92102 brand_interpositioning_disable, NULL) == 0); 7592712Snn35248 installctx(t, NULL, brand_interpositioning_disable, 7604883Ssp92102 brand_interpositioning_enable, NULL, NULL, 7614883Ssp92102 brand_interpositioning_disable, NULL); 7622712Snn35248 7632712Snn35248 if (t == curthread) { 7642712Snn35248 kpreempt_disable(); 7652712Snn35248 brand_interpositioning_enable(); 7662712Snn35248 kpreempt_enable(); 7672712Snn35248 } 7682712Snn35248 } 7692712Snn35248 7702712Snn35248 /* 7716994Sedp * If this is a process in a branded zone, then we want it to disable the 7726994Sedp * brand syscall entry points. This routine must be called when the last 7736994Sedp * lwp in a process is exiting in proc_exit(). 7746994Sedp */ 7756994Sedp void 7766994Sedp lwp_detach_brand_hdlrs(klwp_t *lwp) 7776994Sedp { 7786994Sedp kthread_t *t = lwptot(lwp); 7796994Sedp 7806994Sedp ASSERT(PROC_IS_BRANDED(lwptoproc(lwp))); 7816994Sedp if (t == curthread) 7826994Sedp kpreempt_disable(); 7836994Sedp 7846994Sedp /* Remove the original context handlers */ 7856994Sedp VERIFY(removectx(t, NULL, brand_interpositioning_disable, 7866994Sedp brand_interpositioning_enable, NULL, NULL, 7876994Sedp brand_interpositioning_disable, NULL) != 0); 7886994Sedp 7896994Sedp if (t == curthread) { 7906994Sedp /* Cleanup our MSR and IDT entries. */ 7916994Sedp brand_interpositioning_disable(); 7926994Sedp kpreempt_enable(); 7936994Sedp } 7946994Sedp } 7956994Sedp 7966994Sedp /* 7970Sstevel@tonic-gate * Add any lwp-associated context handlers to the lwp at the beginning 7980Sstevel@tonic-gate * of the lwp's useful life. 7990Sstevel@tonic-gate * 8000Sstevel@tonic-gate * All paths which create lwp's invoke lwp_create(); lwp_create() 8010Sstevel@tonic-gate * invokes lwp_stk_init() which initializes the stack, sets up 8020Sstevel@tonic-gate * lwp_regs, and invokes this routine. 8030Sstevel@tonic-gate * 8040Sstevel@tonic-gate * All paths which destroy lwp's invoke lwp_exit() to rip the lwp 8050Sstevel@tonic-gate * apart and put it on 'lwp_deathrow'; if the lwp is destroyed it 8060Sstevel@tonic-gate * ends up in thread_free() which invokes freectx(t, 0) before 8070Sstevel@tonic-gate * invoking lwp_stk_fini(). When the lwp is recycled from death 8080Sstevel@tonic-gate * row, lwp_stk_fini() is invoked, then thread_free(), and thus 8090Sstevel@tonic-gate * freectx(t, 0) as before. 8100Sstevel@tonic-gate * 8110Sstevel@tonic-gate * In the case of exec, the surviving lwp is thoroughly scrubbed 8120Sstevel@tonic-gate * clean; exec invokes freectx(t, 1) to destroy associated contexts. 8130Sstevel@tonic-gate * On the way back to the new image, it invokes setregs() which 8140Sstevel@tonic-gate * in turn invokes this routine. 8150Sstevel@tonic-gate */ 8160Sstevel@tonic-gate void 8170Sstevel@tonic-gate lwp_installctx(klwp_t *lwp) 8180Sstevel@tonic-gate { 8190Sstevel@tonic-gate kthread_t *t = lwptot(lwp); 8200Sstevel@tonic-gate int thisthread = t == curthread; 8210Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 8220Sstevel@tonic-gate void (*restop)(klwp_t *) = lwp_getdatamodel(lwp) == DATAMODEL_NATIVE ? 8230Sstevel@tonic-gate lwp_segregs_restore : lwp_segregs_restore32; 8240Sstevel@tonic-gate #else 8250Sstevel@tonic-gate void (*restop)(klwp_t *) = lwp_segregs_restore; 8260Sstevel@tonic-gate #endif 8270Sstevel@tonic-gate 8280Sstevel@tonic-gate /* 8290Sstevel@tonic-gate * Install the basic lwp context handlers on each lwp. 8300Sstevel@tonic-gate * 8310Sstevel@tonic-gate * On the amd64 kernel, the context handlers are responsible for 8320Sstevel@tonic-gate * virtualizing %ds, %es, %fs, and %gs to the lwp. The register 8330Sstevel@tonic-gate * values are only ever changed via sys_rtt when the 8344503Ssudheer * pcb->pcb_rupdate == 1. Only sys_rtt gets to clear the bit. 8350Sstevel@tonic-gate * 8360Sstevel@tonic-gate * On the i386 kernel, the context handlers are responsible for 8370Sstevel@tonic-gate * virtualizing %gs/%fs to the lwp by updating the per-cpu GDTs 8380Sstevel@tonic-gate */ 8390Sstevel@tonic-gate ASSERT(removectx(t, lwp, lwp_segregs_save, restop, 8400Sstevel@tonic-gate NULL, NULL, NULL, NULL) == 0); 8410Sstevel@tonic-gate if (thisthread) 8420Sstevel@tonic-gate kpreempt_disable(); 8430Sstevel@tonic-gate installctx(t, lwp, lwp_segregs_save, restop, 8440Sstevel@tonic-gate NULL, NULL, NULL, NULL); 8450Sstevel@tonic-gate if (thisthread) { 8460Sstevel@tonic-gate /* 8470Sstevel@tonic-gate * Since we're the right thread, set the values in the GDT 8480Sstevel@tonic-gate */ 8490Sstevel@tonic-gate restop(lwp); 8500Sstevel@tonic-gate kpreempt_enable(); 8510Sstevel@tonic-gate } 8520Sstevel@tonic-gate 8530Sstevel@tonic-gate /* 8540Sstevel@tonic-gate * If we have sysenter/sysexit instructions enabled, we need 8550Sstevel@tonic-gate * to ensure that the hardware mechanism is kept up-to-date with the 8560Sstevel@tonic-gate * lwp's kernel stack pointer across context switches. 8570Sstevel@tonic-gate * 8580Sstevel@tonic-gate * sep_save zeros the sysenter stack pointer msr; sep_restore sets 8590Sstevel@tonic-gate * it to the lwp's kernel stack pointer (kstktop). 8600Sstevel@tonic-gate */ 8610Sstevel@tonic-gate if (x86_feature & X86_SEP) { 8620Sstevel@tonic-gate #if defined(__amd64) 8630Sstevel@tonic-gate caddr_t kstktop = (caddr_t)lwp->lwp_regs; 8640Sstevel@tonic-gate #elif defined(__i386) 8650Sstevel@tonic-gate caddr_t kstktop = ((caddr_t)lwp->lwp_regs - MINFRAME) + 8660Sstevel@tonic-gate SA(sizeof (struct regs) + MINFRAME); 8670Sstevel@tonic-gate #endif 8680Sstevel@tonic-gate ASSERT(removectx(t, kstktop, 8690Sstevel@tonic-gate sep_save, sep_restore, NULL, NULL, NULL, NULL) == 0); 8700Sstevel@tonic-gate 8710Sstevel@tonic-gate if (thisthread) 8720Sstevel@tonic-gate kpreempt_disable(); 8730Sstevel@tonic-gate installctx(t, kstktop, 8740Sstevel@tonic-gate sep_save, sep_restore, NULL, NULL, NULL, NULL); 8750Sstevel@tonic-gate if (thisthread) { 8760Sstevel@tonic-gate /* 8770Sstevel@tonic-gate * We're the right thread, so set the stack pointer 8780Sstevel@tonic-gate * for the first sysenter instruction to use 8790Sstevel@tonic-gate */ 8800Sstevel@tonic-gate sep_restore(kstktop); 8810Sstevel@tonic-gate kpreempt_enable(); 8820Sstevel@tonic-gate } 8830Sstevel@tonic-gate } 8842712Snn35248 8852712Snn35248 if (PROC_IS_BRANDED(ttoproc(t))) 8862712Snn35248 lwp_attach_brand_hdlrs(lwp); 8870Sstevel@tonic-gate } 8880Sstevel@tonic-gate 8890Sstevel@tonic-gate /* 8900Sstevel@tonic-gate * Clear registers on exec(2). 8910Sstevel@tonic-gate */ 8920Sstevel@tonic-gate void 8930Sstevel@tonic-gate setregs(uarg_t *args) 8940Sstevel@tonic-gate { 8950Sstevel@tonic-gate struct regs *rp; 8960Sstevel@tonic-gate kthread_t *t = curthread; 8970Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 8980Sstevel@tonic-gate pcb_t *pcb = &lwp->lwp_pcb; 8990Sstevel@tonic-gate greg_t sp; 9000Sstevel@tonic-gate 9010Sstevel@tonic-gate /* 9020Sstevel@tonic-gate * Initialize user registers 9030Sstevel@tonic-gate */ 9040Sstevel@tonic-gate (void) save_syscall_args(); /* copy args from registers first */ 9050Sstevel@tonic-gate rp = lwptoregs(lwp); 9060Sstevel@tonic-gate sp = rp->r_sp; 9070Sstevel@tonic-gate bzero(rp, sizeof (*rp)); 9080Sstevel@tonic-gate 9090Sstevel@tonic-gate rp->r_ss = UDS_SEL; 9100Sstevel@tonic-gate rp->r_sp = sp; 9110Sstevel@tonic-gate rp->r_pc = args->entry; 9120Sstevel@tonic-gate rp->r_ps = PSL_USER; 9130Sstevel@tonic-gate 9140Sstevel@tonic-gate #if defined(__amd64) 9150Sstevel@tonic-gate 9160Sstevel@tonic-gate pcb->pcb_fs = pcb->pcb_gs = 0; 9170Sstevel@tonic-gate pcb->pcb_fsbase = pcb->pcb_gsbase = 0; 9180Sstevel@tonic-gate 9190Sstevel@tonic-gate if (ttoproc(t)->p_model == DATAMODEL_NATIVE) { 9200Sstevel@tonic-gate 9210Sstevel@tonic-gate rp->r_cs = UCS_SEL; 9220Sstevel@tonic-gate 9230Sstevel@tonic-gate /* 9240Sstevel@tonic-gate * Only allow 64-bit user code descriptor to be present. 9250Sstevel@tonic-gate */ 9263446Smrj gdt_ucode_model(DATAMODEL_NATIVE); 9270Sstevel@tonic-gate 9280Sstevel@tonic-gate /* 9290Sstevel@tonic-gate * Arrange that the virtualized %fs and %gs GDT descriptors 9300Sstevel@tonic-gate * have a well-defined initial state (present, ring 3 9310Sstevel@tonic-gate * and of type data). 9320Sstevel@tonic-gate */ 9330Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc; 9340Sstevel@tonic-gate 9350Sstevel@tonic-gate /* 9360Sstevel@tonic-gate * thrptr is either NULL or a value used by DTrace. 9370Sstevel@tonic-gate * 64-bit processes use %fs as their "thread" register. 9380Sstevel@tonic-gate */ 9390Sstevel@tonic-gate if (args->thrptr) 9400Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_FSBASE, args->thrptr); 9410Sstevel@tonic-gate 9420Sstevel@tonic-gate } else { 9430Sstevel@tonic-gate 9440Sstevel@tonic-gate rp->r_cs = U32CS_SEL; 9450Sstevel@tonic-gate rp->r_ds = rp->r_es = UDS_SEL; 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate /* 9480Sstevel@tonic-gate * only allow 32-bit user code selector to be present. 9490Sstevel@tonic-gate */ 9503446Smrj gdt_ucode_model(DATAMODEL_ILP32); 9510Sstevel@tonic-gate 9520Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_u32desc; 9530Sstevel@tonic-gate 9540Sstevel@tonic-gate /* 9550Sstevel@tonic-gate * thrptr is either NULL or a value used by DTrace. 9560Sstevel@tonic-gate * 32-bit processes use %gs as their "thread" register. 9570Sstevel@tonic-gate */ 9580Sstevel@tonic-gate if (args->thrptr) 9590Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, args->thrptr); 9600Sstevel@tonic-gate 9610Sstevel@tonic-gate } 9620Sstevel@tonic-gate 9630Sstevel@tonic-gate pcb->pcb_ds = rp->r_ds; 9640Sstevel@tonic-gate pcb->pcb_es = rp->r_es; 9654503Ssudheer pcb->pcb_rupdate = 1; 9660Sstevel@tonic-gate 9670Sstevel@tonic-gate #elif defined(__i386) 9680Sstevel@tonic-gate 9690Sstevel@tonic-gate rp->r_cs = UCS_SEL; 9700Sstevel@tonic-gate rp->r_ds = rp->r_es = UDS_SEL; 9710Sstevel@tonic-gate 9720Sstevel@tonic-gate /* 9730Sstevel@tonic-gate * Arrange that the virtualized %fs and %gs GDT descriptors 9740Sstevel@tonic-gate * have a well-defined initial state (present, ring 3 9750Sstevel@tonic-gate * and of type data). 9760Sstevel@tonic-gate */ 9770Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc; 9780Sstevel@tonic-gate 9790Sstevel@tonic-gate /* 9800Sstevel@tonic-gate * For %gs we need to reset LWP_GSBASE in pcb and the 9810Sstevel@tonic-gate * per-cpu GDT descriptor. thrptr is either NULL 9820Sstevel@tonic-gate * or a value used by DTrace. 9830Sstevel@tonic-gate */ 9840Sstevel@tonic-gate if (args->thrptr) 9850Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, args->thrptr); 9860Sstevel@tonic-gate #endif 9870Sstevel@tonic-gate 9880Sstevel@tonic-gate lwp->lwp_eosys = JUSTRETURN; 9890Sstevel@tonic-gate t->t_post_sys = 1; 9900Sstevel@tonic-gate 9910Sstevel@tonic-gate /* 9920Sstevel@tonic-gate * Here we initialize minimal fpu state. 9930Sstevel@tonic-gate * The rest is done at the first floating 9940Sstevel@tonic-gate * point instruction that a process executes. 9950Sstevel@tonic-gate */ 9960Sstevel@tonic-gate pcb->pcb_fpu.fpu_flags = 0; 9970Sstevel@tonic-gate 9980Sstevel@tonic-gate /* 9990Sstevel@tonic-gate * Add the lwp context handlers that virtualize segment registers, 10000Sstevel@tonic-gate * and/or system call stacks etc. 10010Sstevel@tonic-gate */ 10020Sstevel@tonic-gate lwp_installctx(lwp); 10030Sstevel@tonic-gate } 10040Sstevel@tonic-gate 10052712Snn35248 user_desc_t * 10062712Snn35248 cpu_get_gdt(void) 10072712Snn35248 { 10082712Snn35248 return (CPU->cpu_gdt); 10092712Snn35248 } 10102712Snn35248 10112712Snn35248 10120Sstevel@tonic-gate #if !defined(lwp_getdatamodel) 10130Sstevel@tonic-gate 10140Sstevel@tonic-gate /* 10150Sstevel@tonic-gate * Return the datamodel of the given lwp. 10160Sstevel@tonic-gate */ 10170Sstevel@tonic-gate /*ARGSUSED*/ 10180Sstevel@tonic-gate model_t 10190Sstevel@tonic-gate lwp_getdatamodel(klwp_t *lwp) 10200Sstevel@tonic-gate { 10210Sstevel@tonic-gate return (lwp->lwp_procp->p_model); 10220Sstevel@tonic-gate } 10230Sstevel@tonic-gate 10240Sstevel@tonic-gate #endif /* !lwp_getdatamodel */ 10250Sstevel@tonic-gate 10260Sstevel@tonic-gate #if !defined(get_udatamodel) 10270Sstevel@tonic-gate 10280Sstevel@tonic-gate model_t 10290Sstevel@tonic-gate get_udatamodel(void) 10300Sstevel@tonic-gate { 10310Sstevel@tonic-gate return (curproc->p_model); 10320Sstevel@tonic-gate } 10330Sstevel@tonic-gate 10340Sstevel@tonic-gate #endif /* !get_udatamodel */ 1035