10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52712Snn35248 * Common Development and Distribution License (the "License"). 62712Snn35248 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*12826Skuriakose.kuruvilla@oracle.com * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved. 230Sstevel@tonic-gate */ 240Sstevel@tonic-gate 250Sstevel@tonic-gate /* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */ 260Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */ 270Sstevel@tonic-gate /* All Rights Reserved */ 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <sys/types.h> 300Sstevel@tonic-gate #include <sys/param.h> 310Sstevel@tonic-gate #include <sys/sysmacros.h> 320Sstevel@tonic-gate #include <sys/signal.h> 330Sstevel@tonic-gate #include <sys/systm.h> 340Sstevel@tonic-gate #include <sys/user.h> 350Sstevel@tonic-gate #include <sys/mman.h> 360Sstevel@tonic-gate #include <sys/class.h> 370Sstevel@tonic-gate #include <sys/proc.h> 380Sstevel@tonic-gate #include <sys/procfs.h> 390Sstevel@tonic-gate #include <sys/buf.h> 400Sstevel@tonic-gate #include <sys/kmem.h> 410Sstevel@tonic-gate #include <sys/cred.h> 420Sstevel@tonic-gate #include <sys/archsystm.h> 430Sstevel@tonic-gate #include <sys/vmparam.h> 440Sstevel@tonic-gate #include <sys/prsystm.h> 450Sstevel@tonic-gate #include <sys/reboot.h> 460Sstevel@tonic-gate #include <sys/uadmin.h> 470Sstevel@tonic-gate #include <sys/vfs.h> 480Sstevel@tonic-gate #include <sys/vnode.h> 490Sstevel@tonic-gate #include <sys/file.h> 500Sstevel@tonic-gate #include <sys/session.h> 510Sstevel@tonic-gate #include <sys/ucontext.h> 520Sstevel@tonic-gate #include <sys/dnlc.h> 530Sstevel@tonic-gate #include <sys/var.h> 540Sstevel@tonic-gate #include <sys/cmn_err.h> 550Sstevel@tonic-gate #include <sys/debugreg.h> 560Sstevel@tonic-gate #include <sys/thread.h> 570Sstevel@tonic-gate #include <sys/vtrace.h> 580Sstevel@tonic-gate #include <sys/consdev.h> 590Sstevel@tonic-gate #include <sys/psw.h> 600Sstevel@tonic-gate #include <sys/regset.h> 610Sstevel@tonic-gate #include <sys/privregs.h> 623451Smrj #include <sys/cpu.h> 630Sstevel@tonic-gate #include <sys/stack.h> 640Sstevel@tonic-gate #include <sys/swap.h> 650Sstevel@tonic-gate #include <vm/hat.h> 660Sstevel@tonic-gate #include <vm/anon.h> 670Sstevel@tonic-gate #include <vm/as.h> 680Sstevel@tonic-gate #include <vm/page.h> 690Sstevel@tonic-gate #include <vm/seg.h> 700Sstevel@tonic-gate #include <vm/seg_kmem.h> 710Sstevel@tonic-gate #include <vm/seg_map.h> 720Sstevel@tonic-gate #include <vm/seg_vn.h> 730Sstevel@tonic-gate #include <sys/exec.h> 740Sstevel@tonic-gate #include <sys/acct.h> 750Sstevel@tonic-gate #include <sys/core.h> 760Sstevel@tonic-gate #include <sys/corectl.h> 770Sstevel@tonic-gate #include <sys/modctl.h> 780Sstevel@tonic-gate #include <sys/tuneable.h> 790Sstevel@tonic-gate #include <c2/audit.h> 800Sstevel@tonic-gate #include <sys/bootconf.h> 812712Snn35248 #include <sys/brand.h> 820Sstevel@tonic-gate #include <sys/dumphdr.h> 830Sstevel@tonic-gate #include <sys/promif.h> 840Sstevel@tonic-gate #include <sys/systeminfo.h> 850Sstevel@tonic-gate #include <sys/kdi.h> 860Sstevel@tonic-gate #include <sys/contract_impl.h> 870Sstevel@tonic-gate #include <sys/x86_archext.h> 880Sstevel@tonic-gate #include <sys/segments.h> 893446Smrj #include <sys/ontrap.h> 905084Sjohnlev #include <sys/cpu.h> 915084Sjohnlev #ifdef __xpv 925084Sjohnlev #include <sys/hypervisor.h> 935084Sjohnlev #endif 940Sstevel@tonic-gate 950Sstevel@tonic-gate /* 960Sstevel@tonic-gate * Compare the version of boot that boot says it is against 970Sstevel@tonic-gate * the version of boot the kernel expects. 980Sstevel@tonic-gate */ 990Sstevel@tonic-gate int 1000Sstevel@tonic-gate check_boot_version(int boots_version) 1010Sstevel@tonic-gate { 1020Sstevel@tonic-gate if (boots_version == BO_VERSION) 1030Sstevel@tonic-gate return (0); 1040Sstevel@tonic-gate 1050Sstevel@tonic-gate prom_printf("Wrong boot interface - kernel needs v%d found v%d\n", 1060Sstevel@tonic-gate BO_VERSION, boots_version); 1070Sstevel@tonic-gate prom_panic("halting"); 1080Sstevel@tonic-gate /*NOTREACHED*/ 1090Sstevel@tonic-gate } 1100Sstevel@tonic-gate 1110Sstevel@tonic-gate /* 1120Sstevel@tonic-gate * Process the physical installed list for boot. 1130Sstevel@tonic-gate * Finds: 1140Sstevel@tonic-gate * 1) the pfn of the highest installed physical page, 1150Sstevel@tonic-gate * 2) the number of pages installed 1160Sstevel@tonic-gate * 3) the number of distinct contiguous regions these pages fall into. 11712004Sjiang.liu@intel.com * 4) the number of contiguous memory ranges 1180Sstevel@tonic-gate */ 1190Sstevel@tonic-gate void 12012004Sjiang.liu@intel.com installed_top_size_ex( 1210Sstevel@tonic-gate struct memlist *list, /* pointer to start of installed list */ 1220Sstevel@tonic-gate pfn_t *high_pfn, /* return ptr for top value */ 1230Sstevel@tonic-gate pgcnt_t *pgcnt, /* return ptr for sum of installed pages */ 1240Sstevel@tonic-gate int *ranges) /* return ptr for the count of contig. ranges */ 1250Sstevel@tonic-gate { 1260Sstevel@tonic-gate pfn_t top = 0; 1270Sstevel@tonic-gate pgcnt_t sumpages = 0; 1280Sstevel@tonic-gate pfn_t highp; /* high page in a chunk */ 1290Sstevel@tonic-gate int cnt = 0; 1300Sstevel@tonic-gate 13111474SJonathan.Adams@Sun.COM for (; list; list = list->ml_next) { 1320Sstevel@tonic-gate ++cnt; 13311474SJonathan.Adams@Sun.COM highp = (list->ml_address + list->ml_size - 1) >> PAGESHIFT; 1340Sstevel@tonic-gate if (top < highp) 1350Sstevel@tonic-gate top = highp; 13611474SJonathan.Adams@Sun.COM sumpages += btop(list->ml_size); 1370Sstevel@tonic-gate } 1380Sstevel@tonic-gate 1390Sstevel@tonic-gate *high_pfn = top; 1400Sstevel@tonic-gate *pgcnt = sumpages; 1410Sstevel@tonic-gate *ranges = cnt; 1420Sstevel@tonic-gate } 1430Sstevel@tonic-gate 14412004Sjiang.liu@intel.com void 14512004Sjiang.liu@intel.com installed_top_size( 14612004Sjiang.liu@intel.com struct memlist *list, /* pointer to start of installed list */ 14712004Sjiang.liu@intel.com pfn_t *high_pfn, /* return ptr for top value */ 14812004Sjiang.liu@intel.com pgcnt_t *pgcnt) /* return ptr for sum of installed pages */ 14912004Sjiang.liu@intel.com { 15012004Sjiang.liu@intel.com int ranges; 15112004Sjiang.liu@intel.com 15212004Sjiang.liu@intel.com installed_top_size_ex(list, high_pfn, pgcnt, &ranges); 15312004Sjiang.liu@intel.com } 15412004Sjiang.liu@intel.com 15512004Sjiang.liu@intel.com void 15612004Sjiang.liu@intel.com phys_install_has_changed(void) 15712004Sjiang.liu@intel.com {} 15812004Sjiang.liu@intel.com 1590Sstevel@tonic-gate /* 1600Sstevel@tonic-gate * Copy in a memory list from boot to kernel, with a filter function 1610Sstevel@tonic-gate * to remove pages. The filter function can increase the address and/or 162842Smec * decrease the size to filter out pages. It will also align addresses and 163842Smec * sizes to PAGESIZE. 1640Sstevel@tonic-gate */ 1650Sstevel@tonic-gate void 1660Sstevel@tonic-gate copy_memlist_filter( 1670Sstevel@tonic-gate struct memlist *src, 1680Sstevel@tonic-gate struct memlist **dstp, 1690Sstevel@tonic-gate void (*filter)(uint64_t *, uint64_t *)) 1700Sstevel@tonic-gate { 1710Sstevel@tonic-gate struct memlist *dst, *prev; 1720Sstevel@tonic-gate uint64_t addr; 1730Sstevel@tonic-gate uint64_t size; 1740Sstevel@tonic-gate uint64_t eaddr; 1750Sstevel@tonic-gate 1760Sstevel@tonic-gate dst = *dstp; 1770Sstevel@tonic-gate prev = dst; 1780Sstevel@tonic-gate 1790Sstevel@tonic-gate /* 1800Sstevel@tonic-gate * Move through the memlist applying a filter against 1810Sstevel@tonic-gate * each range of memory. Note that we may apply the 1820Sstevel@tonic-gate * filter multiple times against each memlist entry. 1830Sstevel@tonic-gate */ 18411474SJonathan.Adams@Sun.COM for (; src; src = src->ml_next) { 18511474SJonathan.Adams@Sun.COM addr = P2ROUNDUP(src->ml_address, PAGESIZE); 18611474SJonathan.Adams@Sun.COM eaddr = P2ALIGN(src->ml_address + src->ml_size, PAGESIZE); 1870Sstevel@tonic-gate while (addr < eaddr) { 1880Sstevel@tonic-gate size = eaddr - addr; 1890Sstevel@tonic-gate if (filter != NULL) 1900Sstevel@tonic-gate filter(&addr, &size); 1910Sstevel@tonic-gate if (size == 0) 1920Sstevel@tonic-gate break; 19311474SJonathan.Adams@Sun.COM dst->ml_address = addr; 19411474SJonathan.Adams@Sun.COM dst->ml_size = size; 19511474SJonathan.Adams@Sun.COM dst->ml_next = 0; 1960Sstevel@tonic-gate if (prev == dst) { 19711474SJonathan.Adams@Sun.COM dst->ml_prev = 0; 1980Sstevel@tonic-gate dst++; 1990Sstevel@tonic-gate } else { 20011474SJonathan.Adams@Sun.COM dst->ml_prev = prev; 20111474SJonathan.Adams@Sun.COM prev->ml_next = dst; 2020Sstevel@tonic-gate dst++; 2030Sstevel@tonic-gate prev++; 2040Sstevel@tonic-gate } 2050Sstevel@tonic-gate addr += size; 2060Sstevel@tonic-gate } 2070Sstevel@tonic-gate } 2080Sstevel@tonic-gate 2090Sstevel@tonic-gate *dstp = dst; 2100Sstevel@tonic-gate } 2110Sstevel@tonic-gate 2120Sstevel@tonic-gate /* 2130Sstevel@tonic-gate * Kernel setup code, called from startup(). 2140Sstevel@tonic-gate */ 2150Sstevel@tonic-gate void 2160Sstevel@tonic-gate kern_setup1(void) 2170Sstevel@tonic-gate { 2180Sstevel@tonic-gate proc_t *pp; 2190Sstevel@tonic-gate 2200Sstevel@tonic-gate pp = &p0; 2210Sstevel@tonic-gate 2220Sstevel@tonic-gate proc_sched = pp; 2230Sstevel@tonic-gate 2240Sstevel@tonic-gate /* 2250Sstevel@tonic-gate * Initialize process 0 data structures 2260Sstevel@tonic-gate */ 2270Sstevel@tonic-gate pp->p_stat = SRUN; 2280Sstevel@tonic-gate pp->p_flag = SSYS; 2290Sstevel@tonic-gate 2300Sstevel@tonic-gate pp->p_pidp = &pid0; 2310Sstevel@tonic-gate pp->p_pgidp = &pid0; 2320Sstevel@tonic-gate pp->p_sessp = &session0; 2330Sstevel@tonic-gate pp->p_tlist = &t0; 2340Sstevel@tonic-gate pid0.pid_pglink = pp; 235749Ssusans pid0.pid_pgtail = pp; 2360Sstevel@tonic-gate 2370Sstevel@tonic-gate /* 2380Sstevel@tonic-gate * XXX - we asssume that the u-area is zeroed out except for 2390Sstevel@tonic-gate * ttolwp(curthread)->lwp_regs. 2400Sstevel@tonic-gate */ 2413446Smrj PTOU(curproc)->u_cmask = (mode_t)CMASK; 2420Sstevel@tonic-gate 2430Sstevel@tonic-gate thread_init(); /* init thread_free list */ 2440Sstevel@tonic-gate pid_init(); /* initialize pid (proc) table */ 2450Sstevel@tonic-gate contract_init(); /* initialize contracts */ 2460Sstevel@tonic-gate 2470Sstevel@tonic-gate init_pages_pp_maximum(); 2480Sstevel@tonic-gate } 2490Sstevel@tonic-gate 2500Sstevel@tonic-gate /* 2510Sstevel@tonic-gate * Load a procedure into a thread. 2520Sstevel@tonic-gate */ 2530Sstevel@tonic-gate void 2540Sstevel@tonic-gate thread_load(kthread_t *t, void (*start)(), caddr_t arg, size_t len) 2550Sstevel@tonic-gate { 2560Sstevel@tonic-gate caddr_t sp; 2570Sstevel@tonic-gate size_t framesz; 2580Sstevel@tonic-gate caddr_t argp; 2590Sstevel@tonic-gate long *p; 2600Sstevel@tonic-gate extern void thread_start(); 2610Sstevel@tonic-gate 2620Sstevel@tonic-gate /* 2630Sstevel@tonic-gate * Push a "c" call frame onto the stack to represent 2640Sstevel@tonic-gate * the caller of "start". 2650Sstevel@tonic-gate */ 2660Sstevel@tonic-gate sp = t->t_stk; 2670Sstevel@tonic-gate ASSERT(((uintptr_t)t->t_stk & (STACK_ENTRY_ALIGN - 1)) == 0); 2680Sstevel@tonic-gate if (len != 0) { 2690Sstevel@tonic-gate /* 2700Sstevel@tonic-gate * the object that arg points at is copied into the 2710Sstevel@tonic-gate * caller's frame. 2720Sstevel@tonic-gate */ 2730Sstevel@tonic-gate framesz = SA(len); 2740Sstevel@tonic-gate sp -= framesz; 2750Sstevel@tonic-gate ASSERT(sp > t->t_stkbase); 2760Sstevel@tonic-gate argp = sp + SA(MINFRAME); 2770Sstevel@tonic-gate bcopy(arg, argp, len); 2780Sstevel@tonic-gate arg = argp; 2790Sstevel@tonic-gate } 2800Sstevel@tonic-gate /* 2810Sstevel@tonic-gate * Set up arguments (arg and len) on the caller's stack frame. 2820Sstevel@tonic-gate */ 2830Sstevel@tonic-gate p = (long *)sp; 2840Sstevel@tonic-gate 2850Sstevel@tonic-gate *--p = 0; /* fake call */ 2860Sstevel@tonic-gate *--p = 0; /* null frame pointer terminates stack trace */ 2870Sstevel@tonic-gate *--p = (long)len; 2880Sstevel@tonic-gate *--p = (intptr_t)arg; 2890Sstevel@tonic-gate *--p = (intptr_t)start; 2900Sstevel@tonic-gate 2910Sstevel@tonic-gate /* 2920Sstevel@tonic-gate * initialize thread to resume at thread_start() which will 2930Sstevel@tonic-gate * turn around and invoke (*start)(arg, len). 2940Sstevel@tonic-gate */ 2950Sstevel@tonic-gate t->t_pc = (uintptr_t)thread_start; 2960Sstevel@tonic-gate t->t_sp = (uintptr_t)p; 2970Sstevel@tonic-gate 2980Sstevel@tonic-gate ASSERT((t->t_sp & (STACK_ENTRY_ALIGN - 1)) == 0); 2990Sstevel@tonic-gate } 3000Sstevel@tonic-gate 3010Sstevel@tonic-gate /* 3020Sstevel@tonic-gate * load user registers into lwp. 3030Sstevel@tonic-gate */ 3040Sstevel@tonic-gate /*ARGSUSED2*/ 3050Sstevel@tonic-gate void 3060Sstevel@tonic-gate lwp_load(klwp_t *lwp, gregset_t grp, uintptr_t thrptr) 3070Sstevel@tonic-gate { 3080Sstevel@tonic-gate struct regs *rp = lwptoregs(lwp); 3090Sstevel@tonic-gate 3100Sstevel@tonic-gate setgregs(lwp, grp); 3110Sstevel@tonic-gate rp->r_ps = PSL_USER; 3120Sstevel@tonic-gate 3130Sstevel@tonic-gate /* 3145084Sjohnlev * For 64-bit lwps, we allow one magic %fs selector value, and one 3155084Sjohnlev * magic %gs selector to point anywhere in the address space using 3160Sstevel@tonic-gate * %fsbase and %gsbase behind the scenes. libc uses %fs to point 3170Sstevel@tonic-gate * at the ulwp_t structure. 3180Sstevel@tonic-gate * 3190Sstevel@tonic-gate * For 32-bit lwps, libc wedges its lwp thread pointer into the 3200Sstevel@tonic-gate * ucontext ESP slot (which is otherwise irrelevant to setting a 3210Sstevel@tonic-gate * ucontext) and LWPGS_SEL value into gregs[REG_GS]. This is so 3220Sstevel@tonic-gate * syslwp_create() can atomically setup %gs. 3230Sstevel@tonic-gate * 3240Sstevel@tonic-gate * See setup_context() in libc. 3250Sstevel@tonic-gate */ 3260Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 3270Sstevel@tonic-gate if (lwp_getdatamodel(lwp) == DATAMODEL_ILP32) { 3280Sstevel@tonic-gate if (grp[REG_GS] == LWPGS_SEL) 3290Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, thrptr); 3303446Smrj } else { 3313446Smrj /* 3323446Smrj * See lwp_setprivate in kernel and setup_context in libc. 3333446Smrj * 3343446Smrj * Currently libc constructs a ucontext from whole cloth for 3353446Smrj * every new (not main) lwp created. For 64 bit processes 3363446Smrj * %fsbase is directly set to point to current thread pointer. 3373446Smrj * In the past (solaris 10) %fs was also set LWPFS_SEL to 3383446Smrj * indicate %fsbase. Now we use the null GDT selector for 3393446Smrj * this purpose. LWP[FS|GS]_SEL are only intended for 32 bit 3403446Smrj * processes. To ease transition we support older libcs in 3413446Smrj * the newer kernel by forcing %fs or %gs selector to null 3423446Smrj * by calling lwp_setprivate if LWP[FS|GS]_SEL is passed in 3433446Smrj * the ucontext. This is should be ripped out at some future 3443446Smrj * date. Another fix would be for libc to do a getcontext 3453446Smrj * and inherit the null %fs/%gs from the current context but 3463446Smrj * that means an extra system call and could hurt performance. 3473446Smrj */ 3483446Smrj if (grp[REG_FS] == 0x1bb) /* hard code legacy LWPFS_SEL */ 3494883Ssp92102 (void) lwp_setprivate(lwp, _LWP_FSBASE, 3504883Ssp92102 (uintptr_t)grp[REG_FSBASE]); 3513446Smrj 3523446Smrj if (grp[REG_GS] == 0x1c3) /* hard code legacy LWPGS_SEL */ 3534883Ssp92102 (void) lwp_setprivate(lwp, _LWP_GSBASE, 3544883Ssp92102 (uintptr_t)grp[REG_GSBASE]); 3550Sstevel@tonic-gate } 3560Sstevel@tonic-gate #else 3570Sstevel@tonic-gate if (grp[GS] == LWPGS_SEL) 3580Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, thrptr); 3590Sstevel@tonic-gate #endif 3600Sstevel@tonic-gate 3610Sstevel@tonic-gate lwp->lwp_eosys = JUSTRETURN; 3620Sstevel@tonic-gate lwptot(lwp)->t_post_sys = 1; 3630Sstevel@tonic-gate } 3640Sstevel@tonic-gate 3650Sstevel@tonic-gate /* 3660Sstevel@tonic-gate * set syscall()'s return values for a lwp. 3670Sstevel@tonic-gate */ 3680Sstevel@tonic-gate void 3690Sstevel@tonic-gate lwp_setrval(klwp_t *lwp, int v1, int v2) 3700Sstevel@tonic-gate { 3710Sstevel@tonic-gate lwptoregs(lwp)->r_ps &= ~PS_C; 3720Sstevel@tonic-gate lwptoregs(lwp)->r_r0 = v1; 3730Sstevel@tonic-gate lwptoregs(lwp)->r_r1 = v2; 3740Sstevel@tonic-gate } 3750Sstevel@tonic-gate 3760Sstevel@tonic-gate /* 3770Sstevel@tonic-gate * set syscall()'s return values for a lwp. 3780Sstevel@tonic-gate */ 3790Sstevel@tonic-gate void 3800Sstevel@tonic-gate lwp_setsp(klwp_t *lwp, caddr_t sp) 3810Sstevel@tonic-gate { 3820Sstevel@tonic-gate lwptoregs(lwp)->r_sp = (intptr_t)sp; 3830Sstevel@tonic-gate } 3840Sstevel@tonic-gate 3850Sstevel@tonic-gate /* 3860Sstevel@tonic-gate * Copy regs from parent to child. 3870Sstevel@tonic-gate */ 3880Sstevel@tonic-gate void 3890Sstevel@tonic-gate lwp_forkregs(klwp_t *lwp, klwp_t *clwp) 3900Sstevel@tonic-gate { 3910Sstevel@tonic-gate #if defined(__amd64) 3922712Snn35248 struct pcb *pcb = &clwp->lwp_pcb; 3932712Snn35248 struct regs *rp = lwptoregs(lwp); 3942712Snn35248 3954503Ssudheer if (pcb->pcb_rupdate == 0) { 3962712Snn35248 pcb->pcb_ds = rp->r_ds; 3972712Snn35248 pcb->pcb_es = rp->r_es; 3982712Snn35248 pcb->pcb_fs = rp->r_fs; 3992712Snn35248 pcb->pcb_gs = rp->r_gs; 4004503Ssudheer pcb->pcb_rupdate = 1; 4012712Snn35248 lwptot(clwp)->t_post_sys = 1; 4022712Snn35248 } 4032712Snn35248 ASSERT(lwptot(clwp)->t_post_sys); 4040Sstevel@tonic-gate #endif 4052712Snn35248 4060Sstevel@tonic-gate bcopy(lwp->lwp_regs, clwp->lwp_regs, sizeof (struct regs)); 4070Sstevel@tonic-gate } 4080Sstevel@tonic-gate 4090Sstevel@tonic-gate /* 4100Sstevel@tonic-gate * This function is currently unused on x86. 4110Sstevel@tonic-gate */ 4120Sstevel@tonic-gate /*ARGSUSED*/ 4130Sstevel@tonic-gate void 4140Sstevel@tonic-gate lwp_freeregs(klwp_t *lwp, int isexec) 4150Sstevel@tonic-gate {} 4160Sstevel@tonic-gate 4170Sstevel@tonic-gate /* 4180Sstevel@tonic-gate * This function is currently unused on x86. 4190Sstevel@tonic-gate */ 4200Sstevel@tonic-gate void 4210Sstevel@tonic-gate lwp_pcb_exit(void) 4220Sstevel@tonic-gate {} 4230Sstevel@tonic-gate 4240Sstevel@tonic-gate /* 4250Sstevel@tonic-gate * Lwp context ops for segment registers. 4260Sstevel@tonic-gate */ 4270Sstevel@tonic-gate 4280Sstevel@tonic-gate /* 4290Sstevel@tonic-gate * Every time we come into the kernel (syscall, interrupt or trap 4300Sstevel@tonic-gate * but not fast-traps) we capture the current values of the user's 4310Sstevel@tonic-gate * segment registers into the lwp's reg structure. This includes 4320Sstevel@tonic-gate * lcall for i386 generic system call support since it is handled 4330Sstevel@tonic-gate * as a segment-not-present trap. 4340Sstevel@tonic-gate * 4350Sstevel@tonic-gate * Here we save the current values from the lwp regs into the pcb 4364503Ssudheer * and set pcb->pcb_rupdate to 1 to tell the rest of the kernel 4370Sstevel@tonic-gate * that the pcb copy of the segment registers is the current one. 4380Sstevel@tonic-gate * This ensures the lwp's next trip to user land via update_sregs. 4390Sstevel@tonic-gate * Finally we set t_post_sys to ensure that no system call fast-path's 4400Sstevel@tonic-gate * its way out of the kernel via sysret. 4410Sstevel@tonic-gate * 4420Sstevel@tonic-gate * (This means that we need to have interrupts disabled when we test 4430Sstevel@tonic-gate * t->t_post_sys in the syscall handlers; if the test fails, we need 4440Sstevel@tonic-gate * to keep interrupts disabled until we return to userland so we can't 4450Sstevel@tonic-gate * be switched away.) 4460Sstevel@tonic-gate * 4470Sstevel@tonic-gate * As a result of all this, we don't really have to do a whole lot if 4480Sstevel@tonic-gate * the thread is just mucking about in the kernel, switching on and 4490Sstevel@tonic-gate * off the cpu for whatever reason it feels like. And yet we still 4500Sstevel@tonic-gate * preserve fast syscalls, cause if we -don't- get descheduled, 4510Sstevel@tonic-gate * we never come here either. 4520Sstevel@tonic-gate */ 4530Sstevel@tonic-gate 4540Sstevel@tonic-gate #define VALID_LWP_DESC(udp) ((udp)->usd_type == SDT_MEMRWA && \ 4550Sstevel@tonic-gate (udp)->usd_p == 1 && (udp)->usd_dpl == SEL_UPL) 4560Sstevel@tonic-gate 4575084Sjohnlev /*ARGSUSED*/ 4580Sstevel@tonic-gate void 4590Sstevel@tonic-gate lwp_segregs_save(klwp_t *lwp) 4600Sstevel@tonic-gate { 4610Sstevel@tonic-gate #if defined(__amd64) 4620Sstevel@tonic-gate pcb_t *pcb = &lwp->lwp_pcb; 4630Sstevel@tonic-gate struct regs *rp; 4640Sstevel@tonic-gate 4650Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_fsdesc)); 4660Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_gsdesc)); 4670Sstevel@tonic-gate 4684503Ssudheer if (pcb->pcb_rupdate == 0) { 4690Sstevel@tonic-gate rp = lwptoregs(lwp); 4700Sstevel@tonic-gate 4710Sstevel@tonic-gate /* 4720Sstevel@tonic-gate * If there's no update already pending, capture the current 4730Sstevel@tonic-gate * %ds/%es/%fs/%gs values from lwp's regs in case the user 4740Sstevel@tonic-gate * changed them; %fsbase and %gsbase are privileged so the 4750Sstevel@tonic-gate * kernel versions of these registers in pcb_fsbase and 4760Sstevel@tonic-gate * pcb_gsbase are always up-to-date. 4770Sstevel@tonic-gate */ 4780Sstevel@tonic-gate pcb->pcb_ds = rp->r_ds; 4790Sstevel@tonic-gate pcb->pcb_es = rp->r_es; 4800Sstevel@tonic-gate pcb->pcb_fs = rp->r_fs; 4810Sstevel@tonic-gate pcb->pcb_gs = rp->r_gs; 4824503Ssudheer pcb->pcb_rupdate = 1; 4830Sstevel@tonic-gate lwp->lwp_thread->t_post_sys = 1; 4840Sstevel@tonic-gate } 4850Sstevel@tonic-gate #endif /* __amd64 */ 4860Sstevel@tonic-gate 4875084Sjohnlev #if !defined(__xpv) /* XXPV not sure if we can re-read gdt? */ 4880Sstevel@tonic-gate ASSERT(bcmp(&CPU->cpu_gdt[GDT_LWPFS], &lwp->lwp_pcb.pcb_fsdesc, 4890Sstevel@tonic-gate sizeof (lwp->lwp_pcb.pcb_fsdesc)) == 0); 4900Sstevel@tonic-gate ASSERT(bcmp(&CPU->cpu_gdt[GDT_LWPGS], &lwp->lwp_pcb.pcb_gsdesc, 4910Sstevel@tonic-gate sizeof (lwp->lwp_pcb.pcb_gsdesc)) == 0); 4925084Sjohnlev #endif 4930Sstevel@tonic-gate } 4940Sstevel@tonic-gate 4953446Smrj #if defined(__amd64) 4963446Smrj 4973446Smrj /* 4985084Sjohnlev * Update the segment registers with new values from the pcb. 4993446Smrj * 5003446Smrj * We have to do this carefully, and in the following order, 5013446Smrj * in case any of the selectors points at a bogus descriptor. 5023446Smrj * If they do, we'll catch trap with on_trap and return 1. 5033446Smrj * returns 0 on success. 5043446Smrj * 5053446Smrj * This is particularly tricky for %gs. 5063446Smrj * This routine must be executed under a cli. 5073446Smrj */ 5083446Smrj int 5093446Smrj update_sregs(struct regs *rp, klwp_t *lwp) 5103446Smrj { 5113446Smrj pcb_t *pcb = &lwp->lwp_pcb; 5123446Smrj ulong_t kgsbase; 5133446Smrj on_trap_data_t otd; 5143446Smrj int rc = 0; 5153446Smrj 5163446Smrj if (!on_trap(&otd, OT_SEGMENT_ACCESS)) { 5173446Smrj 5185084Sjohnlev #if defined(__xpv) 5195084Sjohnlev /* 5205084Sjohnlev * On the hyervisor this is easy. The hypercall below will 5215084Sjohnlev * swapgs and load %gs with the user selector. If the user 5225084Sjohnlev * selector is bad the hypervisor will catch the fault and 5235084Sjohnlev * load %gs with the null selector instead. Either way the 5245084Sjohnlev * kernel's gsbase is not damaged. 5255084Sjohnlev */ 5265084Sjohnlev kgsbase = (ulong_t)CPU; 5275084Sjohnlev if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, 5285084Sjohnlev pcb->pcb_gs) != 0) { 5295084Sjohnlev no_trap(); 5305084Sjohnlev return (1); 5315084Sjohnlev } 5325084Sjohnlev 5335084Sjohnlev rp->r_gs = pcb->pcb_gs; 5345084Sjohnlev ASSERT((cpu_t *)kgsbase == CPU); 5355084Sjohnlev 5365084Sjohnlev #else /* __xpv */ 5375084Sjohnlev 5385084Sjohnlev /* 5395084Sjohnlev * A little more complicated running native. 5405084Sjohnlev */ 5413446Smrj kgsbase = (ulong_t)CPU; 5423446Smrj __set_gs(pcb->pcb_gs); 5433446Smrj 5443446Smrj /* 5453446Smrj * If __set_gs fails it's because the new %gs is a bad %gs, 5463446Smrj * we'll be taking a trap but with the original %gs and %gsbase 5473446Smrj * undamaged (i.e. pointing at curcpu). 5483446Smrj * 5493446Smrj * We've just mucked up the kernel's gsbase. Oops. In 5503446Smrj * particular we can't take any traps at all. Make the newly 5515084Sjohnlev * computed gsbase be the hidden gs via __swapgs, and fix 5523446Smrj * the kernel's gsbase back again. Later, when we return to 5533446Smrj * userland we'll swapgs again restoring gsbase just loaded 5543446Smrj * above. 5553446Smrj */ 5563446Smrj __swapgs(); 5573446Smrj rp->r_gs = pcb->pcb_gs; 5583446Smrj 5593446Smrj /* 5603446Smrj * restore kernel's gsbase 5613446Smrj */ 5623446Smrj wrmsr(MSR_AMD_GSBASE, kgsbase); 5633446Smrj 5645084Sjohnlev #endif /* __xpv */ 5655084Sjohnlev 5663446Smrj /* 5673446Smrj * Only override the descriptor base address if 5683446Smrj * r_gs == LWPGS_SEL or if r_gs == NULL. A note on 5693446Smrj * NULL descriptors -- 32-bit programs take faults 5703446Smrj * if they deference NULL descriptors; however, 5713446Smrj * when 64-bit programs load them into %fs or %gs, 5723446Smrj * they DONT fault -- only the base address remains 5733446Smrj * whatever it was from the last load. Urk. 5743446Smrj * 5753446Smrj * XXX - note that lwp_setprivate now sets %fs/%gs to the 5763446Smrj * null selector for 64 bit processes. Whereas before 5773446Smrj * %fs/%gs were set to LWP(FS|GS)_SEL regardless of 5783446Smrj * the process's data model. For now we check for both 5793446Smrj * values so that the kernel can also support the older 5803446Smrj * libc. This should be ripped out at some point in the 5813446Smrj * future. 5823446Smrj */ 5835084Sjohnlev if (pcb->pcb_gs == LWPGS_SEL || pcb->pcb_gs == 0) { 5845084Sjohnlev #if defined(__xpv) 5855084Sjohnlev if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER, 5865084Sjohnlev pcb->pcb_gsbase)) { 5875084Sjohnlev no_trap(); 5885084Sjohnlev return (1); 5895084Sjohnlev } 5905084Sjohnlev #else 5913446Smrj wrmsr(MSR_AMD_KGSBASE, pcb->pcb_gsbase); 5925084Sjohnlev #endif 5935084Sjohnlev } 5943446Smrj 5953446Smrj __set_ds(pcb->pcb_ds); 5963446Smrj rp->r_ds = pcb->pcb_ds; 5973446Smrj 5983446Smrj __set_es(pcb->pcb_es); 5993446Smrj rp->r_es = pcb->pcb_es; 6003446Smrj 6013446Smrj __set_fs(pcb->pcb_fs); 6023446Smrj rp->r_fs = pcb->pcb_fs; 6033446Smrj 6043446Smrj /* 6053446Smrj * Same as for %gs 6063446Smrj */ 6075084Sjohnlev if (pcb->pcb_fs == LWPFS_SEL || pcb->pcb_fs == 0) { 6085084Sjohnlev #if defined(__xpv) 6095084Sjohnlev if (HYPERVISOR_set_segment_base(SEGBASE_FS, 6105084Sjohnlev pcb->pcb_fsbase)) { 6115084Sjohnlev no_trap(); 6125084Sjohnlev return (1); 6135084Sjohnlev } 6145084Sjohnlev #else 6153446Smrj wrmsr(MSR_AMD_FSBASE, pcb->pcb_fsbase); 6165084Sjohnlev #endif 6175084Sjohnlev } 6183446Smrj 6193446Smrj } else { 6203446Smrj cli(); 6213446Smrj rc = 1; 6223446Smrj } 6233446Smrj no_trap(); 6243446Smrj return (rc); 6253446Smrj } 6265084Sjohnlev 6275084Sjohnlev /* 6285084Sjohnlev * Make sure any stale selectors are cleared from the segment registers 6295084Sjohnlev * by putting KDS_SEL (the kernel's default %ds gdt selector) into them. 6305084Sjohnlev * This is necessary because the kernel itself does not use %es, %fs, nor 6315084Sjohnlev * %ds. (%cs and %ss are necessary, and are set up by the kernel - along with 6325084Sjohnlev * %gs - to point to the current cpu struct.) If we enter kmdb while in the 6335084Sjohnlev * kernel and resume with a stale ldt or brandz selector sitting there in a 6345084Sjohnlev * segment register, kmdb will #gp fault if the stale selector points to, 6355084Sjohnlev * for example, an ldt in the context of another process. 6365084Sjohnlev * 6375084Sjohnlev * WARNING: Intel and AMD chips behave differently when storing 6385084Sjohnlev * the null selector into %fs and %gs while in long mode. On AMD 6395084Sjohnlev * chips fsbase and gsbase are not cleared. But on Intel chips, storing 6405084Sjohnlev * a null selector into %fs or %gs has the side effect of clearing 6415084Sjohnlev * fsbase or gsbase. For that reason we use KDS_SEL, which has 6425084Sjohnlev * consistent behavor between AMD and Intel. 6435084Sjohnlev * 6445084Sjohnlev * Caller responsible for preventing cpu migration. 6455084Sjohnlev */ 6465084Sjohnlev void 6475084Sjohnlev reset_sregs(void) 6485084Sjohnlev { 6495084Sjohnlev ulong_t kgsbase = (ulong_t)CPU; 6505084Sjohnlev 6515084Sjohnlev ASSERT(curthread->t_preempt != 0 || getpil() >= DISP_LEVEL); 6525084Sjohnlev 6535084Sjohnlev cli(); 6545084Sjohnlev __set_gs(KGS_SEL); 6555084Sjohnlev 6565084Sjohnlev /* 6575084Sjohnlev * restore kernel gsbase 6585084Sjohnlev */ 6595084Sjohnlev #if defined(__xpv) 6605084Sjohnlev xen_set_segment_base(SEGBASE_GS_KERNEL, kgsbase); 6615084Sjohnlev #else 6625084Sjohnlev wrmsr(MSR_AMD_GSBASE, kgsbase); 6635084Sjohnlev #endif 6645084Sjohnlev 6655084Sjohnlev sti(); 6665084Sjohnlev 6675084Sjohnlev __set_ds(KDS_SEL); 6685084Sjohnlev __set_es(0 | SEL_KPL); /* selector RPL not ring 0 on hypervisor */ 6695084Sjohnlev __set_fs(KFS_SEL); 6705084Sjohnlev } 6715084Sjohnlev 6723446Smrj #endif /* __amd64 */ 6733446Smrj 6743446Smrj #ifdef _SYSCALL32_IMPL 6753446Smrj 6763446Smrj /* 6773446Smrj * Make it impossible for a process to change its data model. 6783446Smrj * We do this by toggling the present bits for the 32 and 6793446Smrj * 64-bit user code descriptors. That way if a user lwp attempts 6803446Smrj * to change its data model (by using the wrong code descriptor in 6813446Smrj * %cs) it will fault immediately. This also allows us to simplify 6823446Smrj * assertions and checks in the kernel. 6833446Smrj */ 6845084Sjohnlev 6853446Smrj static void 6863446Smrj gdt_ucode_model(model_t model) 6873446Smrj { 6883446Smrj kpreempt_disable(); 6893446Smrj if (model == DATAMODEL_NATIVE) { 6905084Sjohnlev gdt_update_usegd(GDT_UCODE, &ucs_on); 6915084Sjohnlev gdt_update_usegd(GDT_U32CODE, &ucs32_off); 6923446Smrj } else { 6935084Sjohnlev gdt_update_usegd(GDT_U32CODE, &ucs32_on); 6945084Sjohnlev gdt_update_usegd(GDT_UCODE, &ucs_off); 6953446Smrj } 6963446Smrj kpreempt_enable(); 6973446Smrj } 6983446Smrj 6993446Smrj #endif /* _SYSCALL32_IMPL */ 7003446Smrj 7010Sstevel@tonic-gate /* 7020Sstevel@tonic-gate * Restore lwp private fs and gs segment descriptors 7030Sstevel@tonic-gate * on current cpu's GDT. 7040Sstevel@tonic-gate */ 7050Sstevel@tonic-gate static void 7060Sstevel@tonic-gate lwp_segregs_restore(klwp_t *lwp) 7070Sstevel@tonic-gate { 7083446Smrj pcb_t *pcb = &lwp->lwp_pcb; 7090Sstevel@tonic-gate 7100Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_fsdesc)); 7110Sstevel@tonic-gate ASSERT(VALID_LWP_DESC(&pcb->pcb_gsdesc)); 7120Sstevel@tonic-gate 7133446Smrj #ifdef _SYSCALL32_IMPL 7143446Smrj gdt_ucode_model(DATAMODEL_NATIVE); 7153446Smrj #endif 7163446Smrj 7175084Sjohnlev gdt_update_usegd(GDT_LWPFS, &pcb->pcb_fsdesc); 7185084Sjohnlev gdt_update_usegd(GDT_LWPGS, &pcb->pcb_gsdesc); 7190Sstevel@tonic-gate 7200Sstevel@tonic-gate } 7210Sstevel@tonic-gate 7220Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 7230Sstevel@tonic-gate 7240Sstevel@tonic-gate static void 7250Sstevel@tonic-gate lwp_segregs_restore32(klwp_t *lwp) 7260Sstevel@tonic-gate { 7273446Smrj /*LINTED*/ 7280Sstevel@tonic-gate cpu_t *cpu = CPU; 7290Sstevel@tonic-gate pcb_t *pcb = &lwp->lwp_pcb; 7300Sstevel@tonic-gate 7313446Smrj ASSERT(VALID_LWP_DESC(&lwp->lwp_pcb.pcb_fsdesc)); 7323446Smrj ASSERT(VALID_LWP_DESC(&lwp->lwp_pcb.pcb_gsdesc)); 7330Sstevel@tonic-gate 7343446Smrj gdt_ucode_model(DATAMODEL_ILP32); 7355084Sjohnlev gdt_update_usegd(GDT_LWPFS, &pcb->pcb_fsdesc); 7365084Sjohnlev gdt_update_usegd(GDT_LWPGS, &pcb->pcb_gsdesc); 7370Sstevel@tonic-gate } 7380Sstevel@tonic-gate 7390Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 7400Sstevel@tonic-gate 7410Sstevel@tonic-gate /* 7422712Snn35248 * If this is a process in a branded zone, then we want it to use the brand 7432712Snn35248 * syscall entry points instead of the standard Solaris entry points. This 7442712Snn35248 * routine must be called when a new lwp is created within a branded zone 7452712Snn35248 * or when an existing lwp moves into a branded zone via a zone_enter() 7462712Snn35248 * operation. 7472712Snn35248 */ 7482712Snn35248 void 7492712Snn35248 lwp_attach_brand_hdlrs(klwp_t *lwp) 7502712Snn35248 { 7512712Snn35248 kthread_t *t = lwptot(lwp); 7522712Snn35248 7532712Snn35248 ASSERT(PROC_IS_BRANDED(lwptoproc(lwp))); 7546994Sedp 7552712Snn35248 ASSERT(removectx(t, NULL, brand_interpositioning_disable, 7564883Ssp92102 brand_interpositioning_enable, NULL, NULL, 7574883Ssp92102 brand_interpositioning_disable, NULL) == 0); 7582712Snn35248 installctx(t, NULL, brand_interpositioning_disable, 7594883Ssp92102 brand_interpositioning_enable, NULL, NULL, 7604883Ssp92102 brand_interpositioning_disable, NULL); 7612712Snn35248 7622712Snn35248 if (t == curthread) { 7632712Snn35248 kpreempt_disable(); 7642712Snn35248 brand_interpositioning_enable(); 7652712Snn35248 kpreempt_enable(); 7662712Snn35248 } 7672712Snn35248 } 7682712Snn35248 7692712Snn35248 /* 7706994Sedp * If this is a process in a branded zone, then we want it to disable the 7716994Sedp * brand syscall entry points. This routine must be called when the last 7726994Sedp * lwp in a process is exiting in proc_exit(). 7736994Sedp */ 7746994Sedp void 7756994Sedp lwp_detach_brand_hdlrs(klwp_t *lwp) 7766994Sedp { 7776994Sedp kthread_t *t = lwptot(lwp); 7786994Sedp 7796994Sedp ASSERT(PROC_IS_BRANDED(lwptoproc(lwp))); 7806994Sedp if (t == curthread) 7816994Sedp kpreempt_disable(); 7826994Sedp 7836994Sedp /* Remove the original context handlers */ 7846994Sedp VERIFY(removectx(t, NULL, brand_interpositioning_disable, 7856994Sedp brand_interpositioning_enable, NULL, NULL, 7866994Sedp brand_interpositioning_disable, NULL) != 0); 7876994Sedp 7886994Sedp if (t == curthread) { 7896994Sedp /* Cleanup our MSR and IDT entries. */ 7906994Sedp brand_interpositioning_disable(); 7916994Sedp kpreempt_enable(); 7926994Sedp } 7936994Sedp } 7946994Sedp 7956994Sedp /* 7960Sstevel@tonic-gate * Add any lwp-associated context handlers to the lwp at the beginning 7970Sstevel@tonic-gate * of the lwp's useful life. 7980Sstevel@tonic-gate * 7990Sstevel@tonic-gate * All paths which create lwp's invoke lwp_create(); lwp_create() 8000Sstevel@tonic-gate * invokes lwp_stk_init() which initializes the stack, sets up 8010Sstevel@tonic-gate * lwp_regs, and invokes this routine. 8020Sstevel@tonic-gate * 8030Sstevel@tonic-gate * All paths which destroy lwp's invoke lwp_exit() to rip the lwp 8040Sstevel@tonic-gate * apart and put it on 'lwp_deathrow'; if the lwp is destroyed it 8050Sstevel@tonic-gate * ends up in thread_free() which invokes freectx(t, 0) before 8060Sstevel@tonic-gate * invoking lwp_stk_fini(). When the lwp is recycled from death 8070Sstevel@tonic-gate * row, lwp_stk_fini() is invoked, then thread_free(), and thus 8080Sstevel@tonic-gate * freectx(t, 0) as before. 8090Sstevel@tonic-gate * 8100Sstevel@tonic-gate * In the case of exec, the surviving lwp is thoroughly scrubbed 8110Sstevel@tonic-gate * clean; exec invokes freectx(t, 1) to destroy associated contexts. 8120Sstevel@tonic-gate * On the way back to the new image, it invokes setregs() which 8130Sstevel@tonic-gate * in turn invokes this routine. 8140Sstevel@tonic-gate */ 8150Sstevel@tonic-gate void 8160Sstevel@tonic-gate lwp_installctx(klwp_t *lwp) 8170Sstevel@tonic-gate { 8180Sstevel@tonic-gate kthread_t *t = lwptot(lwp); 8190Sstevel@tonic-gate int thisthread = t == curthread; 8200Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 8210Sstevel@tonic-gate void (*restop)(klwp_t *) = lwp_getdatamodel(lwp) == DATAMODEL_NATIVE ? 8220Sstevel@tonic-gate lwp_segregs_restore : lwp_segregs_restore32; 8230Sstevel@tonic-gate #else 8240Sstevel@tonic-gate void (*restop)(klwp_t *) = lwp_segregs_restore; 8250Sstevel@tonic-gate #endif 8260Sstevel@tonic-gate 8270Sstevel@tonic-gate /* 8280Sstevel@tonic-gate * Install the basic lwp context handlers on each lwp. 8290Sstevel@tonic-gate * 8300Sstevel@tonic-gate * On the amd64 kernel, the context handlers are responsible for 8310Sstevel@tonic-gate * virtualizing %ds, %es, %fs, and %gs to the lwp. The register 8320Sstevel@tonic-gate * values are only ever changed via sys_rtt when the 8334503Ssudheer * pcb->pcb_rupdate == 1. Only sys_rtt gets to clear the bit. 8340Sstevel@tonic-gate * 8350Sstevel@tonic-gate * On the i386 kernel, the context handlers are responsible for 8360Sstevel@tonic-gate * virtualizing %gs/%fs to the lwp by updating the per-cpu GDTs 8370Sstevel@tonic-gate */ 8380Sstevel@tonic-gate ASSERT(removectx(t, lwp, lwp_segregs_save, restop, 8390Sstevel@tonic-gate NULL, NULL, NULL, NULL) == 0); 8400Sstevel@tonic-gate if (thisthread) 8410Sstevel@tonic-gate kpreempt_disable(); 8420Sstevel@tonic-gate installctx(t, lwp, lwp_segregs_save, restop, 8430Sstevel@tonic-gate NULL, NULL, NULL, NULL); 8440Sstevel@tonic-gate if (thisthread) { 8450Sstevel@tonic-gate /* 8460Sstevel@tonic-gate * Since we're the right thread, set the values in the GDT 8470Sstevel@tonic-gate */ 8480Sstevel@tonic-gate restop(lwp); 8490Sstevel@tonic-gate kpreempt_enable(); 8500Sstevel@tonic-gate } 8510Sstevel@tonic-gate 8520Sstevel@tonic-gate /* 8530Sstevel@tonic-gate * If we have sysenter/sysexit instructions enabled, we need 8540Sstevel@tonic-gate * to ensure that the hardware mechanism is kept up-to-date with the 8550Sstevel@tonic-gate * lwp's kernel stack pointer across context switches. 8560Sstevel@tonic-gate * 8570Sstevel@tonic-gate * sep_save zeros the sysenter stack pointer msr; sep_restore sets 8580Sstevel@tonic-gate * it to the lwp's kernel stack pointer (kstktop). 8590Sstevel@tonic-gate */ 860*12826Skuriakose.kuruvilla@oracle.com if (is_x86_feature(x86_featureset, X86FSET_SEP)) { 8610Sstevel@tonic-gate #if defined(__amd64) 8620Sstevel@tonic-gate caddr_t kstktop = (caddr_t)lwp->lwp_regs; 8630Sstevel@tonic-gate #elif defined(__i386) 8640Sstevel@tonic-gate caddr_t kstktop = ((caddr_t)lwp->lwp_regs - MINFRAME) + 8650Sstevel@tonic-gate SA(sizeof (struct regs) + MINFRAME); 8660Sstevel@tonic-gate #endif 8670Sstevel@tonic-gate ASSERT(removectx(t, kstktop, 8680Sstevel@tonic-gate sep_save, sep_restore, NULL, NULL, NULL, NULL) == 0); 8690Sstevel@tonic-gate 8700Sstevel@tonic-gate if (thisthread) 8710Sstevel@tonic-gate kpreempt_disable(); 8720Sstevel@tonic-gate installctx(t, kstktop, 8730Sstevel@tonic-gate sep_save, sep_restore, NULL, NULL, NULL, NULL); 8740Sstevel@tonic-gate if (thisthread) { 8750Sstevel@tonic-gate /* 8760Sstevel@tonic-gate * We're the right thread, so set the stack pointer 8770Sstevel@tonic-gate * for the first sysenter instruction to use 8780Sstevel@tonic-gate */ 8790Sstevel@tonic-gate sep_restore(kstktop); 8800Sstevel@tonic-gate kpreempt_enable(); 8810Sstevel@tonic-gate } 8820Sstevel@tonic-gate } 8832712Snn35248 8842712Snn35248 if (PROC_IS_BRANDED(ttoproc(t))) 8852712Snn35248 lwp_attach_brand_hdlrs(lwp); 8860Sstevel@tonic-gate } 8870Sstevel@tonic-gate 8880Sstevel@tonic-gate /* 8890Sstevel@tonic-gate * Clear registers on exec(2). 8900Sstevel@tonic-gate */ 8910Sstevel@tonic-gate void 8920Sstevel@tonic-gate setregs(uarg_t *args) 8930Sstevel@tonic-gate { 8940Sstevel@tonic-gate struct regs *rp; 8950Sstevel@tonic-gate kthread_t *t = curthread; 8960Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 8970Sstevel@tonic-gate pcb_t *pcb = &lwp->lwp_pcb; 8980Sstevel@tonic-gate greg_t sp; 8990Sstevel@tonic-gate 9000Sstevel@tonic-gate /* 9010Sstevel@tonic-gate * Initialize user registers 9020Sstevel@tonic-gate */ 9030Sstevel@tonic-gate (void) save_syscall_args(); /* copy args from registers first */ 9040Sstevel@tonic-gate rp = lwptoregs(lwp); 9050Sstevel@tonic-gate sp = rp->r_sp; 9060Sstevel@tonic-gate bzero(rp, sizeof (*rp)); 9070Sstevel@tonic-gate 9080Sstevel@tonic-gate rp->r_ss = UDS_SEL; 9090Sstevel@tonic-gate rp->r_sp = sp; 9100Sstevel@tonic-gate rp->r_pc = args->entry; 9110Sstevel@tonic-gate rp->r_ps = PSL_USER; 9120Sstevel@tonic-gate 9130Sstevel@tonic-gate #if defined(__amd64) 9140Sstevel@tonic-gate 9150Sstevel@tonic-gate pcb->pcb_fs = pcb->pcb_gs = 0; 9160Sstevel@tonic-gate pcb->pcb_fsbase = pcb->pcb_gsbase = 0; 9170Sstevel@tonic-gate 9180Sstevel@tonic-gate if (ttoproc(t)->p_model == DATAMODEL_NATIVE) { 9190Sstevel@tonic-gate 9200Sstevel@tonic-gate rp->r_cs = UCS_SEL; 9210Sstevel@tonic-gate 9220Sstevel@tonic-gate /* 9230Sstevel@tonic-gate * Only allow 64-bit user code descriptor to be present. 9240Sstevel@tonic-gate */ 9253446Smrj gdt_ucode_model(DATAMODEL_NATIVE); 9260Sstevel@tonic-gate 9270Sstevel@tonic-gate /* 9280Sstevel@tonic-gate * Arrange that the virtualized %fs and %gs GDT descriptors 9290Sstevel@tonic-gate * have a well-defined initial state (present, ring 3 9300Sstevel@tonic-gate * and of type data). 9310Sstevel@tonic-gate */ 9320Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc; 9330Sstevel@tonic-gate 9340Sstevel@tonic-gate /* 9350Sstevel@tonic-gate * thrptr is either NULL or a value used by DTrace. 9360Sstevel@tonic-gate * 64-bit processes use %fs as their "thread" register. 9370Sstevel@tonic-gate */ 9380Sstevel@tonic-gate if (args->thrptr) 9390Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_FSBASE, args->thrptr); 9400Sstevel@tonic-gate 9410Sstevel@tonic-gate } else { 9420Sstevel@tonic-gate 9430Sstevel@tonic-gate rp->r_cs = U32CS_SEL; 9440Sstevel@tonic-gate rp->r_ds = rp->r_es = UDS_SEL; 9450Sstevel@tonic-gate 9460Sstevel@tonic-gate /* 9470Sstevel@tonic-gate * only allow 32-bit user code selector to be present. 9480Sstevel@tonic-gate */ 9493446Smrj gdt_ucode_model(DATAMODEL_ILP32); 9500Sstevel@tonic-gate 9510Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_u32desc; 9520Sstevel@tonic-gate 9530Sstevel@tonic-gate /* 9540Sstevel@tonic-gate * thrptr is either NULL or a value used by DTrace. 9550Sstevel@tonic-gate * 32-bit processes use %gs as their "thread" register. 9560Sstevel@tonic-gate */ 9570Sstevel@tonic-gate if (args->thrptr) 9580Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, args->thrptr); 9590Sstevel@tonic-gate 9600Sstevel@tonic-gate } 9610Sstevel@tonic-gate 9620Sstevel@tonic-gate pcb->pcb_ds = rp->r_ds; 9630Sstevel@tonic-gate pcb->pcb_es = rp->r_es; 9644503Ssudheer pcb->pcb_rupdate = 1; 9650Sstevel@tonic-gate 9660Sstevel@tonic-gate #elif defined(__i386) 9670Sstevel@tonic-gate 9680Sstevel@tonic-gate rp->r_cs = UCS_SEL; 9690Sstevel@tonic-gate rp->r_ds = rp->r_es = UDS_SEL; 9700Sstevel@tonic-gate 9710Sstevel@tonic-gate /* 9720Sstevel@tonic-gate * Arrange that the virtualized %fs and %gs GDT descriptors 9730Sstevel@tonic-gate * have a well-defined initial state (present, ring 3 9740Sstevel@tonic-gate * and of type data). 9750Sstevel@tonic-gate */ 9760Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc; 9770Sstevel@tonic-gate 9780Sstevel@tonic-gate /* 9790Sstevel@tonic-gate * For %gs we need to reset LWP_GSBASE in pcb and the 9800Sstevel@tonic-gate * per-cpu GDT descriptor. thrptr is either NULL 9810Sstevel@tonic-gate * or a value used by DTrace. 9820Sstevel@tonic-gate */ 9830Sstevel@tonic-gate if (args->thrptr) 9840Sstevel@tonic-gate (void) lwp_setprivate(lwp, _LWP_GSBASE, args->thrptr); 9850Sstevel@tonic-gate #endif 9860Sstevel@tonic-gate 9870Sstevel@tonic-gate lwp->lwp_eosys = JUSTRETURN; 9880Sstevel@tonic-gate t->t_post_sys = 1; 9890Sstevel@tonic-gate 9900Sstevel@tonic-gate /* 9910Sstevel@tonic-gate * Here we initialize minimal fpu state. 9920Sstevel@tonic-gate * The rest is done at the first floating 9930Sstevel@tonic-gate * point instruction that a process executes. 9940Sstevel@tonic-gate */ 9950Sstevel@tonic-gate pcb->pcb_fpu.fpu_flags = 0; 9960Sstevel@tonic-gate 9970Sstevel@tonic-gate /* 9980Sstevel@tonic-gate * Add the lwp context handlers that virtualize segment registers, 9990Sstevel@tonic-gate * and/or system call stacks etc. 10000Sstevel@tonic-gate */ 10010Sstevel@tonic-gate lwp_installctx(lwp); 10020Sstevel@tonic-gate } 10030Sstevel@tonic-gate 10042712Snn35248 user_desc_t * 10052712Snn35248 cpu_get_gdt(void) 10062712Snn35248 { 10072712Snn35248 return (CPU->cpu_gdt); 10082712Snn35248 } 10092712Snn35248 10102712Snn35248 10110Sstevel@tonic-gate #if !defined(lwp_getdatamodel) 10120Sstevel@tonic-gate 10130Sstevel@tonic-gate /* 10140Sstevel@tonic-gate * Return the datamodel of the given lwp. 10150Sstevel@tonic-gate */ 10160Sstevel@tonic-gate /*ARGSUSED*/ 10170Sstevel@tonic-gate model_t 10180Sstevel@tonic-gate lwp_getdatamodel(klwp_t *lwp) 10190Sstevel@tonic-gate { 10200Sstevel@tonic-gate return (lwp->lwp_procp->p_model); 10210Sstevel@tonic-gate } 10220Sstevel@tonic-gate 10230Sstevel@tonic-gate #endif /* !lwp_getdatamodel */ 10240Sstevel@tonic-gate 10250Sstevel@tonic-gate #if !defined(get_udatamodel) 10260Sstevel@tonic-gate 10270Sstevel@tonic-gate model_t 10280Sstevel@tonic-gate get_udatamodel(void) 10290Sstevel@tonic-gate { 10300Sstevel@tonic-gate return (curproc->p_model); 10310Sstevel@tonic-gate } 10320Sstevel@tonic-gate 10330Sstevel@tonic-gate #endif /* !get_udatamodel */ 1034