10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52414Saguzovsk * Common Development and Distribution License (the "License"). 62414Saguzovsk * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 222414Saguzovsk * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 270Sstevel@tonic-gate /* All Rights Reserved */ 280Sstevel@tonic-gate 290Sstevel@tonic-gate /* 300Sstevel@tonic-gate * University Copyright- Copyright (c) 1982, 1986, 1988 310Sstevel@tonic-gate * The Regents of the University of California 320Sstevel@tonic-gate * All Rights Reserved 330Sstevel@tonic-gate * 340Sstevel@tonic-gate * University Acknowledgment- Portions of this document are derived from 350Sstevel@tonic-gate * software developed by the University of California, Berkeley, and its 360Sstevel@tonic-gate * contributors. 370Sstevel@tonic-gate */ 380Sstevel@tonic-gate 390Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 400Sstevel@tonic-gate 410Sstevel@tonic-gate /* 420Sstevel@tonic-gate * Inter-Process Communication Shared Memory Facility. 430Sstevel@tonic-gate * 440Sstevel@tonic-gate * See os/ipc.c for a description of common IPC functionality. 450Sstevel@tonic-gate * 460Sstevel@tonic-gate * Resource controls 470Sstevel@tonic-gate * ----------------- 480Sstevel@tonic-gate * 49*2677Sml93401 * Control: zone.max-shm-ids (rc_zone_shmmni) 50*2677Sml93401 * Description: Maximum number of shared memory ids allowed a zone. 51*2677Sml93401 * 52*2677Sml93401 * When shmget() is used to allocate a shared memory segment, one id 53*2677Sml93401 * is allocated. If the id allocation doesn't succeed, shmget() 54*2677Sml93401 * fails and errno is set to ENOSPC. Upon successful shmctl(, 55*2677Sml93401 * IPC_RMID) the id is deallocated. 56*2677Sml93401 * 570Sstevel@tonic-gate * Control: project.max-shm-ids (rc_project_shmmni) 580Sstevel@tonic-gate * Description: Maximum number of shared memory ids allowed a project. 590Sstevel@tonic-gate * 600Sstevel@tonic-gate * When shmget() is used to allocate a shared memory segment, one id 610Sstevel@tonic-gate * is allocated. If the id allocation doesn't succeed, shmget() 620Sstevel@tonic-gate * fails and errno is set to ENOSPC. Upon successful shmctl(, 630Sstevel@tonic-gate * IPC_RMID) the id is deallocated. 640Sstevel@tonic-gate * 65*2677Sml93401 * Control: zone.max-shm-memory (rc_zone_shmmax) 66*2677Sml93401 * Description: Total amount of shared memory allowed a zone. 67*2677Sml93401 * 68*2677Sml93401 * When shmget() is used to allocate a shared memory segment, the 69*2677Sml93401 * segment's size is allocated against this limit. If the space 70*2677Sml93401 * allocation doesn't succeed, shmget() fails and errno is set to 71*2677Sml93401 * EINVAL. The size will be deallocated once the last process has 72*2677Sml93401 * detached the segment and the segment has been successfully 73*2677Sml93401 * shmctl(, IPC_RMID)ed. 74*2677Sml93401 * 750Sstevel@tonic-gate * Control: project.max-shm-memory (rc_project_shmmax) 760Sstevel@tonic-gate * Description: Total amount of shared memory allowed a project. 770Sstevel@tonic-gate * 780Sstevel@tonic-gate * When shmget() is used to allocate a shared memory segment, the 790Sstevel@tonic-gate * segment's size is allocated against this limit. If the space 800Sstevel@tonic-gate * allocation doesn't succeed, shmget() fails and errno is set to 810Sstevel@tonic-gate * EINVAL. The size will be deallocated once the last process has 820Sstevel@tonic-gate * detached the segment and the segment has been successfully 830Sstevel@tonic-gate * shmctl(, IPC_RMID)ed. 840Sstevel@tonic-gate */ 850Sstevel@tonic-gate 860Sstevel@tonic-gate #include <sys/types.h> 870Sstevel@tonic-gate #include <sys/param.h> 880Sstevel@tonic-gate #include <sys/cred.h> 890Sstevel@tonic-gate #include <sys/errno.h> 900Sstevel@tonic-gate #include <sys/time.h> 910Sstevel@tonic-gate #include <sys/kmem.h> 920Sstevel@tonic-gate #include <sys/user.h> 930Sstevel@tonic-gate #include <sys/proc.h> 940Sstevel@tonic-gate #include <sys/systm.h> 950Sstevel@tonic-gate #include <sys/prsystm.h> 960Sstevel@tonic-gate #include <sys/sysmacros.h> 970Sstevel@tonic-gate #include <sys/tuneable.h> 980Sstevel@tonic-gate #include <sys/vm.h> 990Sstevel@tonic-gate #include <sys/mman.h> 1000Sstevel@tonic-gate #include <sys/swap.h> 1010Sstevel@tonic-gate #include <sys/cmn_err.h> 1020Sstevel@tonic-gate #include <sys/debug.h> 1030Sstevel@tonic-gate #include <sys/lwpchan_impl.h> 1040Sstevel@tonic-gate #include <sys/avl.h> 1050Sstevel@tonic-gate #include <sys/modctl.h> 1060Sstevel@tonic-gate #include <sys/syscall.h> 1070Sstevel@tonic-gate #include <sys/task.h> 1080Sstevel@tonic-gate #include <sys/project.h> 1090Sstevel@tonic-gate #include <sys/policy.h> 1100Sstevel@tonic-gate #include <sys/zone.h> 1110Sstevel@tonic-gate 1120Sstevel@tonic-gate #include <sys/ipc.h> 1130Sstevel@tonic-gate #include <sys/ipc_impl.h> 1140Sstevel@tonic-gate #include <sys/shm.h> 1150Sstevel@tonic-gate #include <sys/shm_impl.h> 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate #include <vm/hat.h> 1180Sstevel@tonic-gate #include <vm/seg.h> 1190Sstevel@tonic-gate #include <vm/as.h> 1200Sstevel@tonic-gate #include <vm/seg_vn.h> 1210Sstevel@tonic-gate #include <vm/anon.h> 1220Sstevel@tonic-gate #include <vm/page.h> 1230Sstevel@tonic-gate #include <vm/vpage.h> 1240Sstevel@tonic-gate #include <vm/seg_spt.h> 1250Sstevel@tonic-gate 1260Sstevel@tonic-gate #include <c2/audit.h> 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate static int shmem_lock(struct anon_map *amp); 1290Sstevel@tonic-gate static void shmem_unlock(struct anon_map *amp, uint_t lck); 1300Sstevel@tonic-gate static void sa_add(struct proc *pp, caddr_t addr, size_t len, ulong_t flags, 1310Sstevel@tonic-gate kshmid_t *id); 1320Sstevel@tonic-gate static void shm_rm_amp(struct anon_map *amp, uint_t lckflag); 1330Sstevel@tonic-gate static void shm_dtor(kipc_perm_t *); 1340Sstevel@tonic-gate static void shm_rmid(kipc_perm_t *); 1350Sstevel@tonic-gate static void shm_remove_zone(zoneid_t, void *); 1360Sstevel@tonic-gate 1370Sstevel@tonic-gate /* 1380Sstevel@tonic-gate * Semantics for share_page_table and ism_off: 1390Sstevel@tonic-gate * 1400Sstevel@tonic-gate * These are hooks in /etc/system - only for internal testing purpose. 1410Sstevel@tonic-gate * 1420Sstevel@tonic-gate * Setting share_page_table automatically turns on the SHM_SHARE_MMU (ISM) flag 1430Sstevel@tonic-gate * in a call to shmat(2). In other words, with share_page_table set, you always 1440Sstevel@tonic-gate * get ISM, even if say, DISM is specified. It should really be called "ism_on". 1450Sstevel@tonic-gate * 1460Sstevel@tonic-gate * Setting ism_off turns off the SHM_SHARE_MMU flag from the flags passed to 1470Sstevel@tonic-gate * shmat(2). 1480Sstevel@tonic-gate * 1490Sstevel@tonic-gate * If both share_page_table and ism_off are set, share_page_table prevails. 1500Sstevel@tonic-gate * 1510Sstevel@tonic-gate * Although these tunables should probably be removed, they do have some 1520Sstevel@tonic-gate * external exposure; as long as they exist, they should at least work sensibly. 1530Sstevel@tonic-gate */ 1540Sstevel@tonic-gate 1550Sstevel@tonic-gate int share_page_table; 1560Sstevel@tonic-gate int ism_off; 1570Sstevel@tonic-gate 1580Sstevel@tonic-gate /* 1590Sstevel@tonic-gate * The following tunables are obsolete. Though for compatibility we 1600Sstevel@tonic-gate * still read and interpret shminfo_shmmax and shminfo_shmmni (see 1610Sstevel@tonic-gate * os/project.c), the preferred mechanism for administrating the IPC 1620Sstevel@tonic-gate * Shared Memory facility is through the resource controls described at 1630Sstevel@tonic-gate * the top of this file. 1640Sstevel@tonic-gate */ 1650Sstevel@tonic-gate size_t shminfo_shmmax = 0x800000; /* (obsolete) */ 1660Sstevel@tonic-gate int shminfo_shmmni = 100; /* (obsolete) */ 1670Sstevel@tonic-gate size_t shminfo_shmmin = 1; /* (obsolete) */ 1680Sstevel@tonic-gate int shminfo_shmseg = 6; /* (obsolete) */ 1690Sstevel@tonic-gate 170*2677Sml93401 extern rctl_hndl_t rc_zone_shmmax; 171*2677Sml93401 extern rctl_hndl_t rc_zone_shmmni; 1720Sstevel@tonic-gate extern rctl_hndl_t rc_project_shmmax; 1730Sstevel@tonic-gate extern rctl_hndl_t rc_project_shmmni; 1740Sstevel@tonic-gate static ipc_service_t *shm_svc; 1750Sstevel@tonic-gate static zone_key_t shm_zone_key; 1760Sstevel@tonic-gate 1770Sstevel@tonic-gate /* 1780Sstevel@tonic-gate * Module linkage information for the kernel. 1790Sstevel@tonic-gate */ 1800Sstevel@tonic-gate static uintptr_t shmsys(int, uintptr_t, uintptr_t, uintptr_t); 1810Sstevel@tonic-gate 1820Sstevel@tonic-gate static struct sysent ipcshm_sysent = { 1830Sstevel@tonic-gate 4, 1840Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 1850Sstevel@tonic-gate SE_ARGC | SE_NOUNLOAD | SE_64RVAL, 1860Sstevel@tonic-gate #else /* _SYSCALL32_IMPL */ 1870Sstevel@tonic-gate SE_ARGC | SE_NOUNLOAD | SE_32RVAL1, 1880Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 1890Sstevel@tonic-gate (int (*)())shmsys 1900Sstevel@tonic-gate }; 1910Sstevel@tonic-gate 1920Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 1930Sstevel@tonic-gate static struct sysent ipcshm_sysent32 = { 1940Sstevel@tonic-gate 4, 1950Sstevel@tonic-gate SE_ARGC | SE_NOUNLOAD | SE_32RVAL1, 1960Sstevel@tonic-gate (int (*)())shmsys 1970Sstevel@tonic-gate }; 1980Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 1990Sstevel@tonic-gate 2000Sstevel@tonic-gate static struct modlsys modlsys = { 2010Sstevel@tonic-gate &mod_syscallops, "System V shared memory", &ipcshm_sysent 2020Sstevel@tonic-gate }; 2030Sstevel@tonic-gate 2040Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 2050Sstevel@tonic-gate static struct modlsys modlsys32 = { 2060Sstevel@tonic-gate &mod_syscallops32, "32-bit System V shared memory", &ipcshm_sysent32 2070Sstevel@tonic-gate }; 2080Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 2090Sstevel@tonic-gate 2100Sstevel@tonic-gate static struct modlinkage modlinkage = { 2110Sstevel@tonic-gate MODREV_1, 2120Sstevel@tonic-gate &modlsys, 2130Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 2140Sstevel@tonic-gate &modlsys32, 2150Sstevel@tonic-gate #endif 2160Sstevel@tonic-gate NULL 2170Sstevel@tonic-gate }; 2180Sstevel@tonic-gate 2190Sstevel@tonic-gate 2200Sstevel@tonic-gate int 2210Sstevel@tonic-gate _init(void) 2220Sstevel@tonic-gate { 2230Sstevel@tonic-gate int result; 2240Sstevel@tonic-gate 225*2677Sml93401 shm_svc = ipcs_create("shmids", rc_project_shmmni, rc_zone_shmmni, 226*2677Sml93401 sizeof (kshmid_t), shm_dtor, shm_rmid, AT_IPC_SHM, 227*2677Sml93401 offsetof(ipc_rqty_t, ipcq_shmmni)); 2280Sstevel@tonic-gate zone_key_create(&shm_zone_key, NULL, shm_remove_zone, NULL); 2290Sstevel@tonic-gate 2300Sstevel@tonic-gate if ((result = mod_install(&modlinkage)) == 0) 2310Sstevel@tonic-gate return (0); 2320Sstevel@tonic-gate 2330Sstevel@tonic-gate (void) zone_key_delete(shm_zone_key); 2340Sstevel@tonic-gate ipcs_destroy(shm_svc); 2350Sstevel@tonic-gate 2360Sstevel@tonic-gate return (result); 2370Sstevel@tonic-gate } 2380Sstevel@tonic-gate 2390Sstevel@tonic-gate int 2400Sstevel@tonic-gate _fini(void) 2410Sstevel@tonic-gate { 2420Sstevel@tonic-gate return (EBUSY); 2430Sstevel@tonic-gate } 2440Sstevel@tonic-gate 2450Sstevel@tonic-gate int 2460Sstevel@tonic-gate _info(struct modinfo *modinfop) 2470Sstevel@tonic-gate { 2480Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop)); 2490Sstevel@tonic-gate } 2500Sstevel@tonic-gate 2510Sstevel@tonic-gate /* 2520Sstevel@tonic-gate * Shmat (attach shared segment) system call. 2530Sstevel@tonic-gate */ 2540Sstevel@tonic-gate static int 2550Sstevel@tonic-gate shmat(int shmid, caddr_t uaddr, int uflags, uintptr_t *rvp) 2560Sstevel@tonic-gate { 2570Sstevel@tonic-gate kshmid_t *sp; /* shared memory header ptr */ 2580Sstevel@tonic-gate size_t size; 2590Sstevel@tonic-gate int error = 0; 2600Sstevel@tonic-gate proc_t *pp = curproc; 2610Sstevel@tonic-gate struct as *as = pp->p_as; 2620Sstevel@tonic-gate struct segvn_crargs crargs; /* segvn create arguments */ 2630Sstevel@tonic-gate kmutex_t *lock; 2640Sstevel@tonic-gate struct seg *segspt = NULL; 2650Sstevel@tonic-gate caddr_t addr = uaddr; 2660Sstevel@tonic-gate int flags = (uflags & SHMAT_VALID_FLAGS_MASK); 2670Sstevel@tonic-gate int useISM; 2680Sstevel@tonic-gate uchar_t prot = PROT_ALL; 2690Sstevel@tonic-gate int result; 2700Sstevel@tonic-gate 2710Sstevel@tonic-gate if ((lock = ipc_lookup(shm_svc, shmid, (kipc_perm_t **)&sp)) == NULL) 2720Sstevel@tonic-gate return (EINVAL); 2730Sstevel@tonic-gate if (error = ipcperm_access(&sp->shm_perm, SHM_R, CRED())) 2740Sstevel@tonic-gate goto errret; 2750Sstevel@tonic-gate if ((flags & SHM_RDONLY) == 0 && 2760Sstevel@tonic-gate (error = ipcperm_access(&sp->shm_perm, SHM_W, CRED()))) 2770Sstevel@tonic-gate goto errret; 2780Sstevel@tonic-gate if (spt_invalid(flags)) { 2790Sstevel@tonic-gate error = EINVAL; 2800Sstevel@tonic-gate goto errret; 2810Sstevel@tonic-gate } 2820Sstevel@tonic-gate if (ism_off) 2830Sstevel@tonic-gate flags = flags & ~SHM_SHARE_MMU; 2840Sstevel@tonic-gate if (share_page_table) { 2850Sstevel@tonic-gate flags = flags & ~SHM_PAGEABLE; 2860Sstevel@tonic-gate flags = flags | SHM_SHARE_MMU; 2870Sstevel@tonic-gate } 2880Sstevel@tonic-gate useISM = (spt_locked(flags) || spt_pageable(flags)); 2890Sstevel@tonic-gate if (useISM && (error = ipcperm_access(&sp->shm_perm, SHM_W, CRED()))) 2900Sstevel@tonic-gate goto errret; 2910Sstevel@tonic-gate if (useISM && isspt(sp)) { 2920Sstevel@tonic-gate uint_t newsptflags = flags | spt_flags(sp->shm_sptseg); 2930Sstevel@tonic-gate /* 2940Sstevel@tonic-gate * If trying to change an existing {D}ISM segment from ISM 2950Sstevel@tonic-gate * to DISM or vice versa, return error. Note that this 2960Sstevel@tonic-gate * validation of flags needs to be done after the effect of 2970Sstevel@tonic-gate * tunables such as ism_off and share_page_table, for 2980Sstevel@tonic-gate * semantics that are consistent with the tunables' settings. 2990Sstevel@tonic-gate */ 3000Sstevel@tonic-gate if (spt_invalid(newsptflags)) { 3010Sstevel@tonic-gate error = EINVAL; 3020Sstevel@tonic-gate goto errret; 3030Sstevel@tonic-gate } 3040Sstevel@tonic-gate } 3050Sstevel@tonic-gate ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, RW_WRITER); 3060Sstevel@tonic-gate size = sp->shm_amp->size; 3070Sstevel@tonic-gate ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); 3080Sstevel@tonic-gate 3090Sstevel@tonic-gate /* somewhere to record spt info for final detach */ 3100Sstevel@tonic-gate if (sp->shm_sptinfo == NULL) 3110Sstevel@tonic-gate sp->shm_sptinfo = kmem_zalloc(sizeof (sptinfo_t), KM_SLEEP); 3120Sstevel@tonic-gate 3130Sstevel@tonic-gate as_rangelock(as); 3140Sstevel@tonic-gate 3150Sstevel@tonic-gate if (useISM) { 3160Sstevel@tonic-gate /* 3170Sstevel@tonic-gate * Handle ISM 3180Sstevel@tonic-gate */ 3190Sstevel@tonic-gate uint_t n, share_szc; 3200Sstevel@tonic-gate size_t share_size; 3210Sstevel@tonic-gate struct shm_data ssd; 3220Sstevel@tonic-gate uintptr_t align_hint; 3230Sstevel@tonic-gate 3240Sstevel@tonic-gate n = page_num_pagesizes(); 3250Sstevel@tonic-gate if (n < 2) { /* large pages aren't supported */ 3260Sstevel@tonic-gate as_rangeunlock(as); 3270Sstevel@tonic-gate error = EINVAL; 3280Sstevel@tonic-gate goto errret; 3290Sstevel@tonic-gate } 3300Sstevel@tonic-gate 3310Sstevel@tonic-gate /* 3320Sstevel@tonic-gate * Pick a share pagesize to use, if (!isspt(sp)). 3330Sstevel@tonic-gate * Otherwise use the already chosen page size. 3340Sstevel@tonic-gate * 3350Sstevel@tonic-gate * For the initial shmat (!isspt(sp)), where sptcreate is 3360Sstevel@tonic-gate * called, map_pgsz is called to recommend a [D]ISM pagesize, 3370Sstevel@tonic-gate * important for systems which offer more than one potential 3380Sstevel@tonic-gate * [D]ISM pagesize. 3390Sstevel@tonic-gate * If the shmat is just to attach to an already created 3400Sstevel@tonic-gate * [D]ISM segment, then use the previously selected page size. 3410Sstevel@tonic-gate */ 3420Sstevel@tonic-gate if (!isspt(sp)) { 3430Sstevel@tonic-gate share_size = map_pgsz(MAPPGSZ_ISM, 3440Sstevel@tonic-gate pp, addr, size, NULL); 3450Sstevel@tonic-gate if (share_size == 0) { 3460Sstevel@tonic-gate as_rangeunlock(as); 3470Sstevel@tonic-gate error = EINVAL; 3480Sstevel@tonic-gate goto errret; 3490Sstevel@tonic-gate } 3500Sstevel@tonic-gate share_szc = page_szc(share_size); 3510Sstevel@tonic-gate } else { 3520Sstevel@tonic-gate share_szc = sp->shm_sptseg->s_szc; 3530Sstevel@tonic-gate share_size = page_get_pagesize(share_szc); 3540Sstevel@tonic-gate } 3550Sstevel@tonic-gate size = P2ROUNDUP(size, share_size); 3560Sstevel@tonic-gate 3570Sstevel@tonic-gate align_hint = share_size; 3580Sstevel@tonic-gate #if defined(__i386) || defined(__amd64) 3590Sstevel@tonic-gate /* 3600Sstevel@tonic-gate * For 64 bit amd64, we want to share an entire page table 3610Sstevel@tonic-gate * if possible. We know (ugh) that there are 512 entries in 3620Sstevel@tonic-gate * in a page table. The number for 32 bit non-PAE should be 3630Sstevel@tonic-gate * 1024, but I'm not going to special case that. Note using 512 3640Sstevel@tonic-gate * won't cause a failure below. It retries with align_hint set 3650Sstevel@tonic-gate * to share_size 3660Sstevel@tonic-gate */ 3670Sstevel@tonic-gate while (size >= 512 * (uint64_t)align_hint) 3680Sstevel@tonic-gate align_hint *= 512; 3690Sstevel@tonic-gate #endif /* __i386 || __amd64 */ 3700Sstevel@tonic-gate 3710Sstevel@tonic-gate #if defined(__sparcv9) 3720Sstevel@tonic-gate if (addr == 0 && curproc->p_model == DATAMODEL_LP64) { 3730Sstevel@tonic-gate /* 3740Sstevel@tonic-gate * If no address has been passed in, and this is a 3750Sstevel@tonic-gate * 64-bit process, we'll try to find an address 3760Sstevel@tonic-gate * in the predict-ISM zone. 3770Sstevel@tonic-gate */ 3780Sstevel@tonic-gate caddr_t predbase = (caddr_t)PREDISM_1T_BASE; 3790Sstevel@tonic-gate size_t len = PREDISM_BOUND - PREDISM_1T_BASE; 3800Sstevel@tonic-gate 3810Sstevel@tonic-gate as_purge(as); 3820Sstevel@tonic-gate if (as_gap(as, size + share_size, &predbase, &len, 3830Sstevel@tonic-gate AH_LO, (caddr_t)NULL) != -1) { 3840Sstevel@tonic-gate /* 3850Sstevel@tonic-gate * We found an address which looks like a 3860Sstevel@tonic-gate * candidate. We want to round it up, and 3870Sstevel@tonic-gate * then check that it's a valid user range. 3880Sstevel@tonic-gate * This assures that we won't fail below. 3890Sstevel@tonic-gate */ 3900Sstevel@tonic-gate addr = (caddr_t)P2ROUNDUP((uintptr_t)predbase, 3910Sstevel@tonic-gate share_size); 3920Sstevel@tonic-gate 3930Sstevel@tonic-gate if (valid_usr_range(addr, size, prot, 3940Sstevel@tonic-gate as, as->a_userlimit) != RANGE_OKAY) { 3950Sstevel@tonic-gate addr = 0; 3960Sstevel@tonic-gate } 3970Sstevel@tonic-gate } 3980Sstevel@tonic-gate } 3990Sstevel@tonic-gate #endif /* __sparcv9 */ 4000Sstevel@tonic-gate 4010Sstevel@tonic-gate if (addr == 0) { 4020Sstevel@tonic-gate for (;;) { 4030Sstevel@tonic-gate addr = (caddr_t)align_hint; 4040Sstevel@tonic-gate map_addr(&addr, size, 0ll, 1, MAP_ALIGN); 4050Sstevel@tonic-gate if (addr != NULL || align_hint == share_size) 4060Sstevel@tonic-gate break; 4070Sstevel@tonic-gate align_hint = share_size; 4080Sstevel@tonic-gate } 4090Sstevel@tonic-gate if (addr == NULL) { 4100Sstevel@tonic-gate as_rangeunlock(as); 4110Sstevel@tonic-gate error = ENOMEM; 4120Sstevel@tonic-gate goto errret; 4130Sstevel@tonic-gate } 4140Sstevel@tonic-gate ASSERT(((uintptr_t)addr & (align_hint - 1)) == 0); 4150Sstevel@tonic-gate } else { 4160Sstevel@tonic-gate /* Use the user-supplied attach address */ 4170Sstevel@tonic-gate caddr_t base; 4180Sstevel@tonic-gate size_t len; 4190Sstevel@tonic-gate 4200Sstevel@tonic-gate /* 4210Sstevel@tonic-gate * Check that the address range 4220Sstevel@tonic-gate * 1) is properly aligned 4230Sstevel@tonic-gate * 2) is correct in unix terms 4240Sstevel@tonic-gate * 3) is within an unmapped address segment 4250Sstevel@tonic-gate */ 4260Sstevel@tonic-gate base = addr; 4270Sstevel@tonic-gate len = size; /* use spt aligned size */ 4280Sstevel@tonic-gate /* XXX - in SunOS, is sp->shm_segsz */ 4290Sstevel@tonic-gate if ((uintptr_t)base & (share_size - 1)) { 4300Sstevel@tonic-gate error = EINVAL; 4310Sstevel@tonic-gate as_rangeunlock(as); 4320Sstevel@tonic-gate goto errret; 4330Sstevel@tonic-gate } 4340Sstevel@tonic-gate result = valid_usr_range(base, len, prot, as, 4350Sstevel@tonic-gate as->a_userlimit); 4360Sstevel@tonic-gate if (result == RANGE_BADPROT) { 4370Sstevel@tonic-gate /* 4380Sstevel@tonic-gate * We try to accomodate processors which 4390Sstevel@tonic-gate * may not support execute permissions on 4400Sstevel@tonic-gate * all ISM segments by trying the check 4410Sstevel@tonic-gate * again but without PROT_EXEC. 4420Sstevel@tonic-gate */ 4430Sstevel@tonic-gate prot &= ~PROT_EXEC; 4440Sstevel@tonic-gate result = valid_usr_range(base, len, prot, as, 4450Sstevel@tonic-gate as->a_userlimit); 4460Sstevel@tonic-gate } 4470Sstevel@tonic-gate as_purge(as); 4480Sstevel@tonic-gate if (result != RANGE_OKAY || 4490Sstevel@tonic-gate as_gap(as, len, &base, &len, AH_LO, 4500Sstevel@tonic-gate (caddr_t)NULL) != 0) { 4510Sstevel@tonic-gate error = EINVAL; 4520Sstevel@tonic-gate as_rangeunlock(as); 4530Sstevel@tonic-gate goto errret; 4540Sstevel@tonic-gate } 4550Sstevel@tonic-gate } 4560Sstevel@tonic-gate 4570Sstevel@tonic-gate if (!isspt(sp)) { 4580Sstevel@tonic-gate error = sptcreate(size, &segspt, sp->shm_amp, prot, 4590Sstevel@tonic-gate flags, share_szc); 4600Sstevel@tonic-gate if (error) { 4610Sstevel@tonic-gate as_rangeunlock(as); 4620Sstevel@tonic-gate goto errret; 4630Sstevel@tonic-gate } 4640Sstevel@tonic-gate sp->shm_sptinfo->sptas = segspt->s_as; 4650Sstevel@tonic-gate sp->shm_sptseg = segspt; 4660Sstevel@tonic-gate sp->shm_sptprot = prot; 4670Sstevel@tonic-gate sp->shm_lkcnt = 0; 4680Sstevel@tonic-gate } else if ((prot & sp->shm_sptprot) != sp->shm_sptprot) { 4690Sstevel@tonic-gate /* 4700Sstevel@tonic-gate * Ensure we're attaching to an ISM segment with 4710Sstevel@tonic-gate * fewer or equal permissions than what we're 4720Sstevel@tonic-gate * allowed. Fail if the segment has more 4730Sstevel@tonic-gate * permissions than what we're allowed. 4740Sstevel@tonic-gate */ 4750Sstevel@tonic-gate error = EACCES; 4760Sstevel@tonic-gate as_rangeunlock(as); 4770Sstevel@tonic-gate goto errret; 4780Sstevel@tonic-gate } 4790Sstevel@tonic-gate 4800Sstevel@tonic-gate ssd.shm_sptseg = sp->shm_sptseg; 4810Sstevel@tonic-gate ssd.shm_sptas = sp->shm_sptinfo->sptas; 4820Sstevel@tonic-gate ssd.shm_amp = sp->shm_amp; 4830Sstevel@tonic-gate error = as_map(as, addr, size, segspt_shmattach, &ssd); 4840Sstevel@tonic-gate if (error == 0) 4850Sstevel@tonic-gate sp->shm_ismattch++; /* keep count of ISM attaches */ 4860Sstevel@tonic-gate } else { 4870Sstevel@tonic-gate 4880Sstevel@tonic-gate /* 4890Sstevel@tonic-gate * Normal case. 4900Sstevel@tonic-gate */ 4910Sstevel@tonic-gate if (flags & SHM_RDONLY) 4920Sstevel@tonic-gate prot &= ~PROT_WRITE; 4930Sstevel@tonic-gate 4940Sstevel@tonic-gate if (addr == 0) { 4950Sstevel@tonic-gate /* Let the system pick the attach address */ 4960Sstevel@tonic-gate map_addr(&addr, size, 0ll, 1, 0); 4970Sstevel@tonic-gate if (addr == NULL) { 4980Sstevel@tonic-gate as_rangeunlock(as); 4990Sstevel@tonic-gate error = ENOMEM; 5000Sstevel@tonic-gate goto errret; 5010Sstevel@tonic-gate } 5020Sstevel@tonic-gate } else { 5030Sstevel@tonic-gate /* Use the user-supplied attach address */ 5040Sstevel@tonic-gate caddr_t base; 5050Sstevel@tonic-gate size_t len; 5060Sstevel@tonic-gate 5070Sstevel@tonic-gate if (flags & SHM_RND) 5080Sstevel@tonic-gate addr = (caddr_t)((uintptr_t)addr & 5090Sstevel@tonic-gate ~(SHMLBA - 1)); 5100Sstevel@tonic-gate /* 5110Sstevel@tonic-gate * Check that the address range 5120Sstevel@tonic-gate * 1) is properly aligned 5130Sstevel@tonic-gate * 2) is correct in unix terms 5140Sstevel@tonic-gate * 3) is within an unmapped address segment 5150Sstevel@tonic-gate */ 5160Sstevel@tonic-gate base = addr; 5170Sstevel@tonic-gate len = size; /* use aligned size */ 5180Sstevel@tonic-gate /* XXX - in SunOS, is sp->shm_segsz */ 5190Sstevel@tonic-gate if ((uintptr_t)base & PAGEOFFSET) { 5200Sstevel@tonic-gate error = EINVAL; 5210Sstevel@tonic-gate as_rangeunlock(as); 5220Sstevel@tonic-gate goto errret; 5230Sstevel@tonic-gate } 5240Sstevel@tonic-gate result = valid_usr_range(base, len, prot, as, 5250Sstevel@tonic-gate as->a_userlimit); 5260Sstevel@tonic-gate if (result == RANGE_BADPROT) { 5270Sstevel@tonic-gate prot &= ~PROT_EXEC; 5280Sstevel@tonic-gate result = valid_usr_range(base, len, prot, as, 5290Sstevel@tonic-gate as->a_userlimit); 5300Sstevel@tonic-gate } 5310Sstevel@tonic-gate as_purge(as); 5320Sstevel@tonic-gate if (result != RANGE_OKAY || 5330Sstevel@tonic-gate as_gap(as, len, &base, &len, 5340Sstevel@tonic-gate AH_LO, (caddr_t)NULL) != 0) { 5350Sstevel@tonic-gate error = EINVAL; 5360Sstevel@tonic-gate as_rangeunlock(as); 5370Sstevel@tonic-gate goto errret; 5380Sstevel@tonic-gate } 5390Sstevel@tonic-gate } 5400Sstevel@tonic-gate 5410Sstevel@tonic-gate /* Initialize the create arguments and map the segment */ 5420Sstevel@tonic-gate crargs = *(struct segvn_crargs *)zfod_argsp; 5430Sstevel@tonic-gate crargs.offset = 0; 5440Sstevel@tonic-gate crargs.type = MAP_SHARED; 5450Sstevel@tonic-gate crargs.amp = sp->shm_amp; 5460Sstevel@tonic-gate crargs.prot = prot; 5470Sstevel@tonic-gate crargs.maxprot = crargs.prot; 5480Sstevel@tonic-gate crargs.flags = 0; 5490Sstevel@tonic-gate 5500Sstevel@tonic-gate error = as_map(as, addr, size, segvn_create, &crargs); 5510Sstevel@tonic-gate } 5520Sstevel@tonic-gate 5530Sstevel@tonic-gate as_rangeunlock(as); 5540Sstevel@tonic-gate if (error) 5550Sstevel@tonic-gate goto errret; 5560Sstevel@tonic-gate 5570Sstevel@tonic-gate /* record shmem range for the detach */ 5580Sstevel@tonic-gate sa_add(pp, addr, (size_t)size, useISM ? SHMSA_ISM : 0, sp); 5590Sstevel@tonic-gate *rvp = (uintptr_t)addr; 5600Sstevel@tonic-gate 5610Sstevel@tonic-gate sp->shm_atime = gethrestime_sec(); 5620Sstevel@tonic-gate sp->shm_lpid = pp->p_pid; 5630Sstevel@tonic-gate ipc_hold(shm_svc, (kipc_perm_t *)sp); 5640Sstevel@tonic-gate errret: 5650Sstevel@tonic-gate mutex_exit(lock); 5660Sstevel@tonic-gate return (error); 5670Sstevel@tonic-gate } 5680Sstevel@tonic-gate 5690Sstevel@tonic-gate static void 5700Sstevel@tonic-gate shm_dtor(kipc_perm_t *perm) 5710Sstevel@tonic-gate { 5720Sstevel@tonic-gate kshmid_t *sp = (kshmid_t *)perm; 5730Sstevel@tonic-gate uint_t cnt; 574*2677Sml93401 size_t rsize; 5750Sstevel@tonic-gate 5760Sstevel@tonic-gate if (sp->shm_sptinfo) { 5770Sstevel@tonic-gate if (isspt(sp)) 5780Sstevel@tonic-gate sptdestroy(sp->shm_sptinfo->sptas, sp->shm_amp); 5790Sstevel@tonic-gate kmem_free(sp->shm_sptinfo, sizeof (sptinfo_t)); 5800Sstevel@tonic-gate } 5810Sstevel@tonic-gate 5820Sstevel@tonic-gate ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, RW_WRITER); 5830Sstevel@tonic-gate cnt = --sp->shm_amp->refcnt; 5840Sstevel@tonic-gate ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); 5850Sstevel@tonic-gate ASSERT(cnt == 0); 5860Sstevel@tonic-gate shm_rm_amp(sp->shm_amp, sp->shm_lkcnt); 5870Sstevel@tonic-gate 5880Sstevel@tonic-gate if (sp->shm_perm.ipc_id != IPC_ID_INVAL) { 589*2677Sml93401 rsize = ptob(btopr(sp->shm_segsz)); 5900Sstevel@tonic-gate ipcs_lock(shm_svc); 591*2677Sml93401 sp->shm_perm.ipc_proj->kpj_data.kpd_shmmax -= rsize; 592*2677Sml93401 sp->shm_perm.ipc_zone->zone_shmmax -= rsize; 5930Sstevel@tonic-gate ipcs_unlock(shm_svc); 5940Sstevel@tonic-gate } 5950Sstevel@tonic-gate } 5960Sstevel@tonic-gate 5970Sstevel@tonic-gate /* ARGSUSED */ 5980Sstevel@tonic-gate static void 5990Sstevel@tonic-gate shm_rmid(kipc_perm_t *perm) 6000Sstevel@tonic-gate { 6010Sstevel@tonic-gate /* nothing to do */ 6020Sstevel@tonic-gate } 6030Sstevel@tonic-gate 6040Sstevel@tonic-gate /* 6050Sstevel@tonic-gate * Shmctl system call. 6060Sstevel@tonic-gate */ 6070Sstevel@tonic-gate /* ARGSUSED */ 6080Sstevel@tonic-gate static int 6090Sstevel@tonic-gate shmctl(int shmid, int cmd, void *arg) 6100Sstevel@tonic-gate { 6110Sstevel@tonic-gate kshmid_t *sp; /* shared memory header ptr */ 6120Sstevel@tonic-gate STRUCT_DECL(shmid_ds, ds); /* for SVR4 IPC_SET */ 6130Sstevel@tonic-gate int error = 0; 6140Sstevel@tonic-gate struct cred *cr = CRED(); 6150Sstevel@tonic-gate kmutex_t *lock; 6160Sstevel@tonic-gate model_t mdl = get_udatamodel(); 6170Sstevel@tonic-gate struct shmid_ds64 ds64; 6180Sstevel@tonic-gate shmatt_t nattch; 6190Sstevel@tonic-gate 6200Sstevel@tonic-gate STRUCT_INIT(ds, mdl); 6210Sstevel@tonic-gate 6220Sstevel@tonic-gate /* 6230Sstevel@tonic-gate * Perform pre- or non-lookup actions (e.g. copyins, RMID). 6240Sstevel@tonic-gate */ 6250Sstevel@tonic-gate switch (cmd) { 6260Sstevel@tonic-gate case IPC_SET: 6270Sstevel@tonic-gate if (copyin(arg, STRUCT_BUF(ds), STRUCT_SIZE(ds))) 6280Sstevel@tonic-gate return (EFAULT); 6290Sstevel@tonic-gate break; 6300Sstevel@tonic-gate 6310Sstevel@tonic-gate case IPC_SET64: 6320Sstevel@tonic-gate if (copyin(arg, &ds64, sizeof (struct shmid_ds64))) 6330Sstevel@tonic-gate return (EFAULT); 6340Sstevel@tonic-gate break; 6350Sstevel@tonic-gate 6360Sstevel@tonic-gate case IPC_RMID: 6370Sstevel@tonic-gate return (ipc_rmid(shm_svc, shmid, cr)); 6380Sstevel@tonic-gate } 6390Sstevel@tonic-gate 6400Sstevel@tonic-gate if ((lock = ipc_lookup(shm_svc, shmid, (kipc_perm_t **)&sp)) == NULL) 6410Sstevel@tonic-gate return (EINVAL); 6420Sstevel@tonic-gate 6430Sstevel@tonic-gate switch (cmd) { 6440Sstevel@tonic-gate /* Set ownership and permissions. */ 6450Sstevel@tonic-gate case IPC_SET: 6460Sstevel@tonic-gate if (error = ipcperm_set(shm_svc, cr, &sp->shm_perm, 6470Sstevel@tonic-gate &STRUCT_BUF(ds)->shm_perm, mdl)) 6480Sstevel@tonic-gate break; 6490Sstevel@tonic-gate sp->shm_ctime = gethrestime_sec(); 6500Sstevel@tonic-gate break; 6510Sstevel@tonic-gate 6520Sstevel@tonic-gate case IPC_STAT: 6530Sstevel@tonic-gate if (error = ipcperm_access(&sp->shm_perm, SHM_R, cr)) 6540Sstevel@tonic-gate break; 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate nattch = sp->shm_perm.ipc_ref - 1; 6570Sstevel@tonic-gate 6580Sstevel@tonic-gate ipcperm_stat(&STRUCT_BUF(ds)->shm_perm, &sp->shm_perm, mdl); 6590Sstevel@tonic-gate STRUCT_FSET(ds, shm_segsz, sp->shm_segsz); 6600Sstevel@tonic-gate STRUCT_FSETP(ds, shm_amp, NULL); /* kernel addr */ 6610Sstevel@tonic-gate STRUCT_FSET(ds, shm_lkcnt, sp->shm_lkcnt); 6620Sstevel@tonic-gate STRUCT_FSET(ds, shm_lpid, sp->shm_lpid); 6630Sstevel@tonic-gate STRUCT_FSET(ds, shm_cpid, sp->shm_cpid); 6640Sstevel@tonic-gate STRUCT_FSET(ds, shm_nattch, nattch); 6650Sstevel@tonic-gate STRUCT_FSET(ds, shm_cnattch, sp->shm_ismattch); 6660Sstevel@tonic-gate STRUCT_FSET(ds, shm_atime, sp->shm_atime); 6670Sstevel@tonic-gate STRUCT_FSET(ds, shm_dtime, sp->shm_dtime); 6680Sstevel@tonic-gate STRUCT_FSET(ds, shm_ctime, sp->shm_ctime); 6690Sstevel@tonic-gate 6700Sstevel@tonic-gate mutex_exit(lock); 6710Sstevel@tonic-gate if (copyout(STRUCT_BUF(ds), arg, STRUCT_SIZE(ds))) 6720Sstevel@tonic-gate return (EFAULT); 6730Sstevel@tonic-gate 6740Sstevel@tonic-gate return (0); 6750Sstevel@tonic-gate 6760Sstevel@tonic-gate case IPC_SET64: 6770Sstevel@tonic-gate if (error = ipcperm_set64(shm_svc, cr, 6780Sstevel@tonic-gate &sp->shm_perm, &ds64.shmx_perm)) 6790Sstevel@tonic-gate break; 6800Sstevel@tonic-gate sp->shm_ctime = gethrestime_sec(); 6810Sstevel@tonic-gate break; 6820Sstevel@tonic-gate 6830Sstevel@tonic-gate case IPC_STAT64: 6840Sstevel@tonic-gate nattch = sp->shm_perm.ipc_ref - 1; 6850Sstevel@tonic-gate 6860Sstevel@tonic-gate ipcperm_stat64(&ds64.shmx_perm, &sp->shm_perm); 6870Sstevel@tonic-gate ds64.shmx_segsz = sp->shm_segsz; 6880Sstevel@tonic-gate ds64.shmx_lkcnt = sp->shm_lkcnt; 6890Sstevel@tonic-gate ds64.shmx_lpid = sp->shm_lpid; 6900Sstevel@tonic-gate ds64.shmx_cpid = sp->shm_cpid; 6910Sstevel@tonic-gate ds64.shmx_nattch = nattch; 6920Sstevel@tonic-gate ds64.shmx_cnattch = sp->shm_ismattch; 6930Sstevel@tonic-gate ds64.shmx_atime = sp->shm_atime; 6940Sstevel@tonic-gate ds64.shmx_dtime = sp->shm_dtime; 6950Sstevel@tonic-gate ds64.shmx_ctime = sp->shm_ctime; 6960Sstevel@tonic-gate 6970Sstevel@tonic-gate mutex_exit(lock); 6980Sstevel@tonic-gate if (copyout(&ds64, arg, sizeof (struct shmid_ds64))) 6990Sstevel@tonic-gate return (EFAULT); 7000Sstevel@tonic-gate 7010Sstevel@tonic-gate return (0); 7020Sstevel@tonic-gate 7030Sstevel@tonic-gate /* Lock segment in memory */ 7040Sstevel@tonic-gate case SHM_LOCK: 7050Sstevel@tonic-gate if ((error = secpolicy_lock_memory(cr)) != 0) 7060Sstevel@tonic-gate break; 7070Sstevel@tonic-gate 7080Sstevel@tonic-gate if (!isspt(sp) && (sp->shm_lkcnt++ == 0)) { 7090Sstevel@tonic-gate if (error = shmem_lock(sp->shm_amp)) { 7100Sstevel@tonic-gate ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, RW_WRITER); 7110Sstevel@tonic-gate cmn_err(CE_NOTE, 7120Sstevel@tonic-gate "shmctl - couldn't lock %ld pages into memory", 7130Sstevel@tonic-gate sp->shm_amp->size); 7140Sstevel@tonic-gate ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); 7150Sstevel@tonic-gate error = ENOMEM; 7160Sstevel@tonic-gate sp->shm_lkcnt--; 7170Sstevel@tonic-gate shmem_unlock(sp->shm_amp, 0); 7180Sstevel@tonic-gate } 7190Sstevel@tonic-gate } 7200Sstevel@tonic-gate break; 7210Sstevel@tonic-gate 7220Sstevel@tonic-gate /* Unlock segment */ 7230Sstevel@tonic-gate case SHM_UNLOCK: 7240Sstevel@tonic-gate if ((error = secpolicy_lock_memory(cr)) != 0) 7250Sstevel@tonic-gate break; 7260Sstevel@tonic-gate 7270Sstevel@tonic-gate if (!isspt(sp)) { 7280Sstevel@tonic-gate if (sp->shm_lkcnt && (--sp->shm_lkcnt == 0)) { 7290Sstevel@tonic-gate shmem_unlock(sp->shm_amp, 1); 7300Sstevel@tonic-gate } 7310Sstevel@tonic-gate } 7320Sstevel@tonic-gate break; 7330Sstevel@tonic-gate 7340Sstevel@tonic-gate default: 7350Sstevel@tonic-gate error = EINVAL; 7360Sstevel@tonic-gate break; 7370Sstevel@tonic-gate } 7380Sstevel@tonic-gate mutex_exit(lock); 7390Sstevel@tonic-gate return (error); 7400Sstevel@tonic-gate } 7410Sstevel@tonic-gate 7420Sstevel@tonic-gate static void 7430Sstevel@tonic-gate shm_detach(proc_t *pp, segacct_t *sap) 7440Sstevel@tonic-gate { 7450Sstevel@tonic-gate kshmid_t *sp = sap->sa_id; 7460Sstevel@tonic-gate size_t len = sap->sa_len; 7470Sstevel@tonic-gate caddr_t addr = sap->sa_addr; 7480Sstevel@tonic-gate 7490Sstevel@tonic-gate /* 7500Sstevel@tonic-gate * Discard lwpchan mappings. 7510Sstevel@tonic-gate */ 7520Sstevel@tonic-gate if (pp->p_lcp != NULL) 7530Sstevel@tonic-gate lwpchan_delete_mapping(pp, addr, addr + len); 7540Sstevel@tonic-gate (void) as_unmap(pp->p_as, addr, len); 7550Sstevel@tonic-gate 7560Sstevel@tonic-gate /* 7570Sstevel@tonic-gate * Perform some detach-time accounting. 7580Sstevel@tonic-gate */ 7590Sstevel@tonic-gate (void) ipc_lock(shm_svc, sp->shm_perm.ipc_id); 7600Sstevel@tonic-gate if (sap->sa_flags & SHMSA_ISM) 7610Sstevel@tonic-gate sp->shm_ismattch--; 7620Sstevel@tonic-gate sp->shm_dtime = gethrestime_sec(); 7630Sstevel@tonic-gate sp->shm_lpid = pp->p_pid; 7640Sstevel@tonic-gate ipc_rele(shm_svc, (kipc_perm_t *)sp); /* Drops lock */ 7650Sstevel@tonic-gate 7660Sstevel@tonic-gate kmem_free(sap, sizeof (segacct_t)); 7670Sstevel@tonic-gate } 7680Sstevel@tonic-gate 7690Sstevel@tonic-gate static int 7700Sstevel@tonic-gate shmdt(caddr_t addr) 7710Sstevel@tonic-gate { 7720Sstevel@tonic-gate proc_t *pp = curproc; 7730Sstevel@tonic-gate segacct_t *sap, template; 7740Sstevel@tonic-gate 7750Sstevel@tonic-gate mutex_enter(&pp->p_lock); 7760Sstevel@tonic-gate prbarrier(pp); /* block /proc. See shmgetid(). */ 7770Sstevel@tonic-gate 7780Sstevel@tonic-gate template.sa_addr = addr; 7790Sstevel@tonic-gate template.sa_len = 0; 7800Sstevel@tonic-gate if ((pp->p_segacct == NULL) || 7810Sstevel@tonic-gate ((sap = avl_find(pp->p_segacct, &template, NULL)) == NULL)) { 7820Sstevel@tonic-gate mutex_exit(&pp->p_lock); 7830Sstevel@tonic-gate return (EINVAL); 7840Sstevel@tonic-gate } 7852414Saguzovsk if (sap->sa_addr != addr) { 7862414Saguzovsk mutex_exit(&pp->p_lock); 7872414Saguzovsk return (EINVAL); 7882414Saguzovsk } 7890Sstevel@tonic-gate avl_remove(pp->p_segacct, sap); 7900Sstevel@tonic-gate mutex_exit(&pp->p_lock); 7910Sstevel@tonic-gate 7920Sstevel@tonic-gate shm_detach(pp, sap); 7930Sstevel@tonic-gate 7940Sstevel@tonic-gate return (0); 7950Sstevel@tonic-gate } 7960Sstevel@tonic-gate 7970Sstevel@tonic-gate /* 7980Sstevel@tonic-gate * Remove all shared memory segments associated with a given zone. 7990Sstevel@tonic-gate * Called by zone_shutdown when the zone is halted. 8000Sstevel@tonic-gate */ 8010Sstevel@tonic-gate /*ARGSUSED1*/ 8020Sstevel@tonic-gate static void 8030Sstevel@tonic-gate shm_remove_zone(zoneid_t zoneid, void *arg) 8040Sstevel@tonic-gate { 8050Sstevel@tonic-gate ipc_remove_zone(shm_svc, zoneid); 8060Sstevel@tonic-gate } 8070Sstevel@tonic-gate 8080Sstevel@tonic-gate /* 8090Sstevel@tonic-gate * Shmget (create new shmem) system call. 8100Sstevel@tonic-gate */ 8110Sstevel@tonic-gate static int 8120Sstevel@tonic-gate shmget(key_t key, size_t size, int shmflg, uintptr_t *rvp) 8130Sstevel@tonic-gate { 8140Sstevel@tonic-gate proc_t *pp = curproc; 8150Sstevel@tonic-gate kshmid_t *sp; 8160Sstevel@tonic-gate kmutex_t *lock; 8170Sstevel@tonic-gate int error; 8180Sstevel@tonic-gate 8190Sstevel@tonic-gate top: 8200Sstevel@tonic-gate if (error = ipc_get(shm_svc, key, shmflg, (kipc_perm_t **)&sp, &lock)) 8210Sstevel@tonic-gate return (error); 8220Sstevel@tonic-gate 8230Sstevel@tonic-gate if (!IPC_FREE(&sp->shm_perm)) { 8240Sstevel@tonic-gate /* 8250Sstevel@tonic-gate * A segment with the requested key exists. 8260Sstevel@tonic-gate */ 8270Sstevel@tonic-gate if (size > sp->shm_segsz) { 8280Sstevel@tonic-gate mutex_exit(lock); 8290Sstevel@tonic-gate return (EINVAL); 8300Sstevel@tonic-gate } 8310Sstevel@tonic-gate } else { 8320Sstevel@tonic-gate /* 8330Sstevel@tonic-gate * A new segment should be created. 8340Sstevel@tonic-gate */ 8350Sstevel@tonic-gate size_t npages = btopr(size); 8360Sstevel@tonic-gate size_t rsize = ptob(npages); 8370Sstevel@tonic-gate 8380Sstevel@tonic-gate /* 839*2677Sml93401 * Check rsize and the per-project and per-zone limit on 840*2677Sml93401 * shared memory. Checking rsize handles both the size == 0 8410Sstevel@tonic-gate * case and the size < ULONG_MAX & PAGEMASK case (i.e. 8420Sstevel@tonic-gate * rounding up wraps a size_t). 8430Sstevel@tonic-gate */ 844*2677Sml93401 if (rsize == 0 || 845*2677Sml93401 (rctl_test(rc_project_shmmax, 8460Sstevel@tonic-gate pp->p_task->tk_proj->kpj_rctls, pp, rsize, 847*2677Sml93401 RCA_SAFE) & RCT_DENY) || 848*2677Sml93401 (rctl_test(rc_zone_shmmax, 849*2677Sml93401 pp->p_zone->zone_rctls, pp, rsize, 8500Sstevel@tonic-gate RCA_SAFE) & RCT_DENY)) { 8510Sstevel@tonic-gate 8520Sstevel@tonic-gate mutex_exit(&pp->p_lock); 8530Sstevel@tonic-gate mutex_exit(lock); 8540Sstevel@tonic-gate ipc_cleanup(shm_svc, (kipc_perm_t *)sp); 8550Sstevel@tonic-gate return (EINVAL); 8560Sstevel@tonic-gate } 8570Sstevel@tonic-gate mutex_exit(&pp->p_lock); 8580Sstevel@tonic-gate mutex_exit(lock); 8590Sstevel@tonic-gate 8600Sstevel@tonic-gate if (anon_resv(rsize) == 0) { 8610Sstevel@tonic-gate ipc_cleanup(shm_svc, (kipc_perm_t *)sp); 8620Sstevel@tonic-gate return (ENOMEM); 8630Sstevel@tonic-gate } 8640Sstevel@tonic-gate 8650Sstevel@tonic-gate sp->shm_amp = anonmap_alloc(rsize, rsize); 8660Sstevel@tonic-gate 8670Sstevel@tonic-gate /* 8680Sstevel@tonic-gate * Store the original user's requested size, in bytes, 8690Sstevel@tonic-gate * rather than the page-aligned size. The former is 8700Sstevel@tonic-gate * used for IPC_STAT and shmget() lookups. The latter 8710Sstevel@tonic-gate * is saved in the anon_map structure and is used for 8720Sstevel@tonic-gate * calls to the vm layer. 8730Sstevel@tonic-gate */ 8740Sstevel@tonic-gate sp->shm_segsz = size; 8750Sstevel@tonic-gate sp->shm_atime = sp->shm_dtime = 0; 8760Sstevel@tonic-gate sp->shm_ctime = gethrestime_sec(); 8770Sstevel@tonic-gate sp->shm_lpid = (pid_t)0; 8780Sstevel@tonic-gate sp->shm_cpid = curproc->p_pid; 8790Sstevel@tonic-gate sp->shm_ismattch = 0; 8800Sstevel@tonic-gate sp->shm_sptinfo = NULL; 8810Sstevel@tonic-gate 8820Sstevel@tonic-gate /* 8830Sstevel@tonic-gate * Check limits one last time, push id into global 8840Sstevel@tonic-gate * visibility, and update resource usage counts. 8850Sstevel@tonic-gate */ 8860Sstevel@tonic-gate if (error = ipc_commit_begin(shm_svc, key, shmflg, 8870Sstevel@tonic-gate (kipc_perm_t *)sp)) { 8880Sstevel@tonic-gate if (error == EAGAIN) 8890Sstevel@tonic-gate goto top; 8900Sstevel@tonic-gate return (error); 8910Sstevel@tonic-gate } 8920Sstevel@tonic-gate 893*2677Sml93401 if ((rctl_test(rc_project_shmmax, 8940Sstevel@tonic-gate sp->shm_perm.ipc_proj->kpj_rctls, pp, rsize, 895*2677Sml93401 RCA_SAFE) & RCT_DENY) || 896*2677Sml93401 (rctl_test(rc_zone_shmmax, 897*2677Sml93401 sp->shm_perm.ipc_zone->zone_rctls, pp, rsize, 898*2677Sml93401 RCA_SAFE) & RCT_DENY)) { 8990Sstevel@tonic-gate ipc_cleanup(shm_svc, (kipc_perm_t *)sp); 9000Sstevel@tonic-gate return (EINVAL); 9010Sstevel@tonic-gate } 9020Sstevel@tonic-gate sp->shm_perm.ipc_proj->kpj_data.kpd_shmmax += rsize; 903*2677Sml93401 sp->shm_perm.ipc_zone->zone_shmmax += rsize; 9040Sstevel@tonic-gate 9050Sstevel@tonic-gate lock = ipc_commit_end(shm_svc, &sp->shm_perm); 9060Sstevel@tonic-gate } 9070Sstevel@tonic-gate 9080Sstevel@tonic-gate #ifdef C2_AUDIT 9090Sstevel@tonic-gate if (audit_active) 9100Sstevel@tonic-gate audit_ipcget(AT_IPC_SHM, (void *)sp); 9110Sstevel@tonic-gate #endif 9120Sstevel@tonic-gate 9130Sstevel@tonic-gate *rvp = (uintptr_t)(sp->shm_perm.ipc_id); 9140Sstevel@tonic-gate 9150Sstevel@tonic-gate mutex_exit(lock); 9160Sstevel@tonic-gate return (0); 9170Sstevel@tonic-gate } 9180Sstevel@tonic-gate 9190Sstevel@tonic-gate /* 9200Sstevel@tonic-gate * shmids system call. 9210Sstevel@tonic-gate */ 9220Sstevel@tonic-gate static int 9230Sstevel@tonic-gate shmids(int *buf, uint_t nids, uint_t *pnids) 9240Sstevel@tonic-gate { 9250Sstevel@tonic-gate return (ipc_ids(shm_svc, buf, nids, pnids)); 9260Sstevel@tonic-gate } 9270Sstevel@tonic-gate 9280Sstevel@tonic-gate /* 9290Sstevel@tonic-gate * System entry point for shmat, shmctl, shmdt, and shmget system calls. 9300Sstevel@tonic-gate */ 9310Sstevel@tonic-gate static uintptr_t 9320Sstevel@tonic-gate shmsys(int opcode, uintptr_t a0, uintptr_t a1, uintptr_t a2) 9330Sstevel@tonic-gate { 9340Sstevel@tonic-gate int error; 9350Sstevel@tonic-gate uintptr_t r_val = 0; 9360Sstevel@tonic-gate 9370Sstevel@tonic-gate switch (opcode) { 9380Sstevel@tonic-gate case SHMAT: 9390Sstevel@tonic-gate error = shmat((int)a0, (caddr_t)a1, (int)a2, &r_val); 9400Sstevel@tonic-gate break; 9410Sstevel@tonic-gate case SHMCTL: 9420Sstevel@tonic-gate error = shmctl((int)a0, (int)a1, (void *)a2); 9430Sstevel@tonic-gate break; 9440Sstevel@tonic-gate case SHMDT: 9450Sstevel@tonic-gate error = shmdt((caddr_t)a0); 9460Sstevel@tonic-gate break; 9470Sstevel@tonic-gate case SHMGET: 9480Sstevel@tonic-gate error = shmget((key_t)a0, (size_t)a1, (int)a2, &r_val); 9490Sstevel@tonic-gate break; 9500Sstevel@tonic-gate case SHMIDS: 9510Sstevel@tonic-gate error = shmids((int *)a0, (uint_t)a1, (uint_t *)a2); 9520Sstevel@tonic-gate break; 9530Sstevel@tonic-gate default: 9540Sstevel@tonic-gate error = EINVAL; 9550Sstevel@tonic-gate break; 9560Sstevel@tonic-gate } 9570Sstevel@tonic-gate 9580Sstevel@tonic-gate if (error) 9590Sstevel@tonic-gate return ((uintptr_t)set_errno(error)); 9600Sstevel@tonic-gate 9610Sstevel@tonic-gate return (r_val); 9620Sstevel@tonic-gate } 9630Sstevel@tonic-gate 9640Sstevel@tonic-gate /* 9650Sstevel@tonic-gate * segacct_t comparator 9660Sstevel@tonic-gate * This works as expected, with one minor change: the first of two real 9670Sstevel@tonic-gate * segments with equal addresses is considered to be 'greater than' the 9680Sstevel@tonic-gate * second. We only return equal when searching using a template, in 9690Sstevel@tonic-gate * which case we explicitly set the template segment's length to 0 9700Sstevel@tonic-gate * (which is invalid for a real segment). 9710Sstevel@tonic-gate */ 9720Sstevel@tonic-gate static int 9730Sstevel@tonic-gate shm_sacompar(const void *x, const void *y) 9740Sstevel@tonic-gate { 9750Sstevel@tonic-gate segacct_t *sa1 = (segacct_t *)x; 9760Sstevel@tonic-gate segacct_t *sa2 = (segacct_t *)y; 9770Sstevel@tonic-gate 9782414Saguzovsk if (sa1->sa_addr < sa2->sa_addr) { 9790Sstevel@tonic-gate return (-1); 9802414Saguzovsk } else if (sa2->sa_len != 0) { 9812414Saguzovsk if (sa1->sa_addr >= sa2->sa_addr + sa2->sa_len) { 9822414Saguzovsk return (1); 9832414Saguzovsk } else if (sa1->sa_len != 0) { 9842414Saguzovsk return (1); 9852414Saguzovsk } else { 9862414Saguzovsk return (0); 9872414Saguzovsk } 9882414Saguzovsk } else if (sa1->sa_addr > sa2->sa_addr) { 9890Sstevel@tonic-gate return (1); 9902414Saguzovsk } else { 9910Sstevel@tonic-gate return (0); 9922414Saguzovsk } 9930Sstevel@tonic-gate } 9940Sstevel@tonic-gate 9950Sstevel@tonic-gate /* 9960Sstevel@tonic-gate * add this record to the segacct list. 9970Sstevel@tonic-gate */ 9980Sstevel@tonic-gate static void 9990Sstevel@tonic-gate sa_add(struct proc *pp, caddr_t addr, size_t len, ulong_t flags, kshmid_t *id) 10000Sstevel@tonic-gate { 10010Sstevel@tonic-gate segacct_t *nsap; 10020Sstevel@tonic-gate avl_tree_t *tree = NULL; 10030Sstevel@tonic-gate avl_index_t where; 10040Sstevel@tonic-gate 10050Sstevel@tonic-gate nsap = kmem_alloc(sizeof (segacct_t), KM_SLEEP); 10060Sstevel@tonic-gate nsap->sa_addr = addr; 10070Sstevel@tonic-gate nsap->sa_len = len; 10080Sstevel@tonic-gate nsap->sa_flags = flags; 10090Sstevel@tonic-gate nsap->sa_id = id; 10100Sstevel@tonic-gate 10110Sstevel@tonic-gate if (pp->p_segacct == NULL) 10120Sstevel@tonic-gate tree = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 10130Sstevel@tonic-gate 10140Sstevel@tonic-gate mutex_enter(&pp->p_lock); 10150Sstevel@tonic-gate prbarrier(pp); /* block /proc. See shmgetid(). */ 10160Sstevel@tonic-gate 10170Sstevel@tonic-gate if (pp->p_segacct == NULL) { 10180Sstevel@tonic-gate avl_create(tree, shm_sacompar, sizeof (segacct_t), 10190Sstevel@tonic-gate offsetof(segacct_t, sa_tree)); 10200Sstevel@tonic-gate pp->p_segacct = tree; 10210Sstevel@tonic-gate } else if (tree) { 10220Sstevel@tonic-gate kmem_free(tree, sizeof (avl_tree_t)); 10230Sstevel@tonic-gate } 10240Sstevel@tonic-gate 10250Sstevel@tonic-gate /* 10260Sstevel@tonic-gate * We can ignore the result of avl_find, as the comparator will 10270Sstevel@tonic-gate * never return equal for segments with non-zero length. This 10280Sstevel@tonic-gate * is a necessary hack to get around the fact that we do, in 10290Sstevel@tonic-gate * fact, have duplicate keys. 10300Sstevel@tonic-gate */ 10310Sstevel@tonic-gate (void) avl_find(pp->p_segacct, nsap, &where); 10320Sstevel@tonic-gate avl_insert(pp->p_segacct, nsap, where); 10330Sstevel@tonic-gate 10340Sstevel@tonic-gate mutex_exit(&pp->p_lock); 10350Sstevel@tonic-gate } 10360Sstevel@tonic-gate 10370Sstevel@tonic-gate /* 10380Sstevel@tonic-gate * Duplicate parent's segacct records in child. 10390Sstevel@tonic-gate */ 10400Sstevel@tonic-gate void 10410Sstevel@tonic-gate shmfork(struct proc *ppp, struct proc *cpp) 10420Sstevel@tonic-gate { 10430Sstevel@tonic-gate segacct_t *sap; 10440Sstevel@tonic-gate kshmid_t *sp; 10450Sstevel@tonic-gate kmutex_t *mp; 10460Sstevel@tonic-gate 10470Sstevel@tonic-gate ASSERT(ppp->p_segacct != NULL); 10480Sstevel@tonic-gate 10490Sstevel@tonic-gate /* 10500Sstevel@tonic-gate * We are the only lwp running in the parent so nobody can 10510Sstevel@tonic-gate * mess with our p_segacct list. Thus it is safe to traverse 10520Sstevel@tonic-gate * the list without holding p_lock. This is essential because 10530Sstevel@tonic-gate * we can't hold p_lock during a KM_SLEEP allocation. 10540Sstevel@tonic-gate */ 10550Sstevel@tonic-gate for (sap = (segacct_t *)avl_first(ppp->p_segacct); sap != NULL; 10560Sstevel@tonic-gate sap = (segacct_t *)AVL_NEXT(ppp->p_segacct, sap)) { 10570Sstevel@tonic-gate sa_add(cpp, sap->sa_addr, sap->sa_len, sap->sa_flags, 10580Sstevel@tonic-gate sap->sa_id); 10590Sstevel@tonic-gate sp = sap->sa_id; 10600Sstevel@tonic-gate mp = ipc_lock(shm_svc, sp->shm_perm.ipc_id); 10610Sstevel@tonic-gate if (sap->sa_flags & SHMSA_ISM) 10620Sstevel@tonic-gate sp->shm_ismattch++; 10630Sstevel@tonic-gate ipc_hold(shm_svc, (kipc_perm_t *)sp); 10640Sstevel@tonic-gate mutex_exit(mp); 10650Sstevel@tonic-gate } 10660Sstevel@tonic-gate } 10670Sstevel@tonic-gate 10680Sstevel@tonic-gate /* 10690Sstevel@tonic-gate * Detach shared memory segments from exiting process. 10700Sstevel@tonic-gate */ 10710Sstevel@tonic-gate void 10720Sstevel@tonic-gate shmexit(struct proc *pp) 10730Sstevel@tonic-gate { 10740Sstevel@tonic-gate segacct_t *sap; 10750Sstevel@tonic-gate avl_tree_t *tree; 10760Sstevel@tonic-gate void *cookie = NULL; 10770Sstevel@tonic-gate 10780Sstevel@tonic-gate ASSERT(pp->p_segacct != NULL); 10790Sstevel@tonic-gate 10800Sstevel@tonic-gate mutex_enter(&pp->p_lock); 10810Sstevel@tonic-gate prbarrier(pp); 10820Sstevel@tonic-gate tree = pp->p_segacct; 10830Sstevel@tonic-gate pp->p_segacct = NULL; 10840Sstevel@tonic-gate mutex_exit(&pp->p_lock); 10850Sstevel@tonic-gate 10860Sstevel@tonic-gate while ((sap = avl_destroy_nodes(tree, &cookie)) != NULL) 10870Sstevel@tonic-gate (void) shm_detach(pp, sap); 10880Sstevel@tonic-gate 10890Sstevel@tonic-gate avl_destroy(tree); 10900Sstevel@tonic-gate kmem_free(tree, sizeof (avl_tree_t)); 10910Sstevel@tonic-gate } 10920Sstevel@tonic-gate 10930Sstevel@tonic-gate /* 10940Sstevel@tonic-gate * At this time pages should be in memory, so just lock them. 10950Sstevel@tonic-gate */ 10960Sstevel@tonic-gate static void 10970Sstevel@tonic-gate lock_again(size_t npages, struct anon_map *amp) 10980Sstevel@tonic-gate { 10990Sstevel@tonic-gate struct anon *ap; 11000Sstevel@tonic-gate struct page *pp; 11010Sstevel@tonic-gate struct vnode *vp; 11020Sstevel@tonic-gate anoff_t off; 11030Sstevel@tonic-gate ulong_t anon_idx; 11040Sstevel@tonic-gate anon_sync_obj_t cookie; 11050Sstevel@tonic-gate 11060Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 11070Sstevel@tonic-gate 11080Sstevel@tonic-gate for (anon_idx = 0; npages != 0; anon_idx++, npages--) { 11090Sstevel@tonic-gate 11100Sstevel@tonic-gate anon_array_enter(amp, anon_idx, &cookie); 11110Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, anon_idx); 11120Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 11130Sstevel@tonic-gate anon_array_exit(&cookie); 11140Sstevel@tonic-gate 11150Sstevel@tonic-gate pp = page_lookup(vp, (u_offset_t)off, SE_SHARED); 11160Sstevel@tonic-gate if (pp == NULL) { 11170Sstevel@tonic-gate panic("lock_again: page not in the system"); 11180Sstevel@tonic-gate /*NOTREACHED*/ 11190Sstevel@tonic-gate } 11200Sstevel@tonic-gate (void) page_pp_lock(pp, 0, 0); 11210Sstevel@tonic-gate page_unlock(pp); 11220Sstevel@tonic-gate } 11230Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock); 11240Sstevel@tonic-gate } 11250Sstevel@tonic-gate 11260Sstevel@tonic-gate /* check if this segment is already locked. */ 11270Sstevel@tonic-gate /*ARGSUSED*/ 11280Sstevel@tonic-gate static int 11290Sstevel@tonic-gate check_locked(struct as *as, struct segvn_data *svd, size_t npages) 11300Sstevel@tonic-gate { 11310Sstevel@tonic-gate struct vpage *vpp = svd->vpage; 11320Sstevel@tonic-gate size_t i; 11330Sstevel@tonic-gate if (svd->vpage == NULL) 11340Sstevel@tonic-gate return (0); /* unlocked */ 11350Sstevel@tonic-gate 11360Sstevel@tonic-gate SEGVN_LOCK_ENTER(as, &svd->lock, RW_READER); 11370Sstevel@tonic-gate for (i = 0; i < npages; i++, vpp++) { 11380Sstevel@tonic-gate if (VPP_ISPPLOCK(vpp) == 0) { 11390Sstevel@tonic-gate SEGVN_LOCK_EXIT(as, &svd->lock); 11400Sstevel@tonic-gate return (1); /* partially locked */ 11410Sstevel@tonic-gate } 11420Sstevel@tonic-gate } 11430Sstevel@tonic-gate SEGVN_LOCK_EXIT(as, &svd->lock); 11440Sstevel@tonic-gate return (2); /* locked */ 11450Sstevel@tonic-gate } 11460Sstevel@tonic-gate 11470Sstevel@tonic-gate 11480Sstevel@tonic-gate /* 11490Sstevel@tonic-gate * Attach the shared memory segment to the process 11500Sstevel@tonic-gate * address space and lock the pages. 11510Sstevel@tonic-gate */ 11520Sstevel@tonic-gate static int 11530Sstevel@tonic-gate shmem_lock(struct anon_map *amp) 11540Sstevel@tonic-gate { 11550Sstevel@tonic-gate size_t npages = btopr(amp->size); 11560Sstevel@tonic-gate struct seg *seg; 11570Sstevel@tonic-gate struct as *as; 11580Sstevel@tonic-gate struct segvn_crargs crargs; 11590Sstevel@tonic-gate struct segvn_data *svd; 11600Sstevel@tonic-gate proc_t *p = curproc; 11610Sstevel@tonic-gate caddr_t addr; 11620Sstevel@tonic-gate uint_t error, ret; 11630Sstevel@tonic-gate caddr_t seg_base; 11640Sstevel@tonic-gate size_t seg_sz; 11650Sstevel@tonic-gate 11660Sstevel@tonic-gate as = p->p_as; 11670Sstevel@tonic-gate AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 11680Sstevel@tonic-gate /* check if shared memory is already attached */ 11690Sstevel@tonic-gate for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) { 11700Sstevel@tonic-gate svd = (struct segvn_data *)seg->s_data; 11710Sstevel@tonic-gate if ((seg->s_ops == &segvn_ops) && (svd->amp == amp) && 11720Sstevel@tonic-gate (amp->size == seg->s_size)) { 11730Sstevel@tonic-gate switch (ret = check_locked(as, svd, npages)) { 11740Sstevel@tonic-gate case 0: /* unlocked */ 11750Sstevel@tonic-gate case 1: /* partially locked */ 11760Sstevel@tonic-gate seg_base = seg->s_base; 11770Sstevel@tonic-gate seg_sz = seg->s_size; 11780Sstevel@tonic-gate 11790Sstevel@tonic-gate AS_LOCK_EXIT(as, &as->a_lock); 11800Sstevel@tonic-gate if ((error = as_ctl(as, seg_base, seg_sz, 11810Sstevel@tonic-gate MC_LOCK, 0, 0, NULL, 0)) == 0) 11820Sstevel@tonic-gate lock_again(npages, amp); 11830Sstevel@tonic-gate (void) as_ctl(as, seg_base, seg_sz, MC_UNLOCK, 11840Sstevel@tonic-gate 0, 0, NULL, NULL); 11850Sstevel@tonic-gate return (error); 11860Sstevel@tonic-gate case 2: /* locked */ 11870Sstevel@tonic-gate AS_LOCK_EXIT(as, &as->a_lock); 11880Sstevel@tonic-gate lock_again(npages, amp); 11890Sstevel@tonic-gate return (0); 11900Sstevel@tonic-gate default: 11910Sstevel@tonic-gate cmn_err(CE_WARN, "shmem_lock: deflt %d", ret); 11920Sstevel@tonic-gate break; 11930Sstevel@tonic-gate } 11940Sstevel@tonic-gate } 11950Sstevel@tonic-gate } 11960Sstevel@tonic-gate AS_LOCK_EXIT(as, &as->a_lock); 11970Sstevel@tonic-gate 11980Sstevel@tonic-gate /* attach shm segment to our address space */ 11990Sstevel@tonic-gate as_rangelock(as); 12000Sstevel@tonic-gate map_addr(&addr, amp->size, 0ll, 1, 0); 12010Sstevel@tonic-gate if (addr == NULL) { 12020Sstevel@tonic-gate as_rangeunlock(as); 12030Sstevel@tonic-gate return (ENOMEM); 12040Sstevel@tonic-gate } 12050Sstevel@tonic-gate 12060Sstevel@tonic-gate /* Initialize the create arguments and map the segment */ 12070Sstevel@tonic-gate crargs = *(struct segvn_crargs *)zfod_argsp; /* structure copy */ 12080Sstevel@tonic-gate crargs.offset = (u_offset_t)0; 12090Sstevel@tonic-gate crargs.type = MAP_SHARED; 12100Sstevel@tonic-gate crargs.amp = amp; 12110Sstevel@tonic-gate crargs.prot = PROT_ALL; 12120Sstevel@tonic-gate crargs.maxprot = crargs.prot; 12130Sstevel@tonic-gate crargs.flags = 0; 12140Sstevel@tonic-gate 12150Sstevel@tonic-gate error = as_map(as, addr, amp->size, segvn_create, &crargs); 12160Sstevel@tonic-gate as_rangeunlock(as); 12170Sstevel@tonic-gate if (!error) { 12180Sstevel@tonic-gate if ((error = as_ctl(as, addr, amp->size, MC_LOCK, 0, 0, 12190Sstevel@tonic-gate NULL, 0)) == 0) { 12200Sstevel@tonic-gate lock_again(npages, amp); 12210Sstevel@tonic-gate } 12220Sstevel@tonic-gate (void) as_unmap(as, addr, amp->size); 12230Sstevel@tonic-gate } 12240Sstevel@tonic-gate return (error); 12250Sstevel@tonic-gate } 12260Sstevel@tonic-gate 12270Sstevel@tonic-gate 12280Sstevel@tonic-gate /* 12290Sstevel@tonic-gate * Unlock shared memory 12300Sstevel@tonic-gate */ 12310Sstevel@tonic-gate static void 12320Sstevel@tonic-gate shmem_unlock(struct anon_map *amp, uint_t lck) 12330Sstevel@tonic-gate { 12340Sstevel@tonic-gate struct anon *ap; 12350Sstevel@tonic-gate pgcnt_t npages = btopr(amp->size); 12360Sstevel@tonic-gate struct vnode *vp; 12370Sstevel@tonic-gate struct page *pp; 12380Sstevel@tonic-gate anoff_t off; 12390Sstevel@tonic-gate ulong_t anon_idx; 12400Sstevel@tonic-gate 12410Sstevel@tonic-gate for (anon_idx = 0; anon_idx < npages; anon_idx++) { 12420Sstevel@tonic-gate 12430Sstevel@tonic-gate if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 12440Sstevel@tonic-gate if (lck) { 12450Sstevel@tonic-gate panic("shmem_unlock: null app"); 12460Sstevel@tonic-gate /*NOTREACHED*/ 12470Sstevel@tonic-gate } 12480Sstevel@tonic-gate continue; 12490Sstevel@tonic-gate } 12500Sstevel@tonic-gate swap_xlate(ap, &vp, &off); 12510Sstevel@tonic-gate pp = page_lookup(vp, off, SE_SHARED); 12520Sstevel@tonic-gate if (pp == NULL) { 12530Sstevel@tonic-gate if (lck) { 12540Sstevel@tonic-gate panic("shmem_unlock: page not in the system"); 12550Sstevel@tonic-gate /*NOTREACHED*/ 12560Sstevel@tonic-gate } 12570Sstevel@tonic-gate continue; 12580Sstevel@tonic-gate } 12590Sstevel@tonic-gate if (pp->p_lckcnt) { 12600Sstevel@tonic-gate page_pp_unlock(pp, 0, 0); 12610Sstevel@tonic-gate } 12620Sstevel@tonic-gate page_unlock(pp); 12630Sstevel@tonic-gate } 12640Sstevel@tonic-gate } 12650Sstevel@tonic-gate 12660Sstevel@tonic-gate /* 12670Sstevel@tonic-gate * We call this routine when we have removed all references to this 12680Sstevel@tonic-gate * amp. This means all shmdt()s and the IPC_RMID have been done. 12690Sstevel@tonic-gate */ 12700Sstevel@tonic-gate static void 12710Sstevel@tonic-gate shm_rm_amp(struct anon_map *amp, uint_t lckflag) 12720Sstevel@tonic-gate { 12730Sstevel@tonic-gate /* 12740Sstevel@tonic-gate * If we are finally deleting the 12750Sstevel@tonic-gate * shared memory, and if no one did 12760Sstevel@tonic-gate * the SHM_UNLOCK, we must do it now. 12770Sstevel@tonic-gate */ 12780Sstevel@tonic-gate shmem_unlock(amp, lckflag); 12790Sstevel@tonic-gate 12800Sstevel@tonic-gate /* 12810Sstevel@tonic-gate * Free up the anon_map. 12820Sstevel@tonic-gate */ 12830Sstevel@tonic-gate lgrp_shm_policy_fini(amp, NULL); 12842414Saguzovsk if (amp->a_szc != 0) { 12852414Saguzovsk ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 12862414Saguzovsk anon_shmap_free_pages(amp, 0, amp->size); 12872414Saguzovsk ANON_LOCK_EXIT(&->a_rwlock); 12882414Saguzovsk } else { 12892414Saguzovsk anon_free(amp->ahp, 0, amp->size); 12902414Saguzovsk } 12910Sstevel@tonic-gate anon_unresv(amp->swresv); 12920Sstevel@tonic-gate anonmap_free(amp); 12930Sstevel@tonic-gate } 12940Sstevel@tonic-gate 12950Sstevel@tonic-gate /* 12960Sstevel@tonic-gate * Return the shared memory id for the process's virtual address. 12970Sstevel@tonic-gate * Return SHMID_NONE if addr is not within a SysV shared memory segment. 12980Sstevel@tonic-gate * Return SHMID_FREE if addr's SysV shared memory segment's id has been freed. 12990Sstevel@tonic-gate * 13000Sstevel@tonic-gate * shmgetid() is called from code in /proc with the process locked but 13010Sstevel@tonic-gate * with pp->p_lock not held. The address space lock is held, so we 13020Sstevel@tonic-gate * cannot grab pp->p_lock here due to lock-ordering constraints. 13030Sstevel@tonic-gate * Because of all this, modifications to the p_segacct list must only 13040Sstevel@tonic-gate * be made after calling prbarrier() to ensure the process is not locked. 13050Sstevel@tonic-gate * See shmdt() and sa_add(), above. shmgetid() may also be called on a 13060Sstevel@tonic-gate * thread's own process without the process locked. 13070Sstevel@tonic-gate */ 13080Sstevel@tonic-gate int 13090Sstevel@tonic-gate shmgetid(proc_t *pp, caddr_t addr) 13100Sstevel@tonic-gate { 13110Sstevel@tonic-gate segacct_t *sap, template; 13120Sstevel@tonic-gate 13130Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&pp->p_lock)); 13140Sstevel@tonic-gate ASSERT((pp->p_proc_flag & P_PR_LOCK) || pp == curproc); 13150Sstevel@tonic-gate 13160Sstevel@tonic-gate if (pp->p_segacct == NULL) 13170Sstevel@tonic-gate return (SHMID_NONE); 13180Sstevel@tonic-gate 13190Sstevel@tonic-gate template.sa_addr = addr; 13200Sstevel@tonic-gate template.sa_len = 0; 13210Sstevel@tonic-gate if ((sap = avl_find(pp->p_segacct, &template, NULL)) == NULL) 13220Sstevel@tonic-gate return (SHMID_NONE); 13230Sstevel@tonic-gate 13240Sstevel@tonic-gate if (IPC_FREE(&sap->sa_id->shm_perm)) 13250Sstevel@tonic-gate return (SHMID_FREE); 13260Sstevel@tonic-gate 13270Sstevel@tonic-gate return (sap->sa_id->shm_perm.ipc_id); 13280Sstevel@tonic-gate } 1329