141490Smckusick /* 241490Smckusick * Copyright (c) 1988 University of Utah. 341490Smckusick * Copyright (c) 1990 The Regents of the University of California. 441490Smckusick * All rights reserved. 541490Smckusick * 641490Smckusick * This code is derived from software contributed to Berkeley by 741490Smckusick * the Systems Programming Group of the University of Utah Computer 841490Smckusick * Science Department. Originally from University of Wisconsin. 941490Smckusick * 1041490Smckusick * %sccs.include.redist.c% 1141490Smckusick * 1241490Smckusick * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$ 1341490Smckusick * 14*45737Smckusick * @(#)sysv_shm.c 7.10 (Berkeley) 12/05/90 1541490Smckusick */ 1641490Smckusick 1741490Smckusick /* 1841490Smckusick * System V shared memory routines. 1943630Skarels * TEMPORARY, until mmap is in place; 2043630Skarels * needed now for HP-UX compatibility and X server (yech!). 2141490Smckusick */ 2241490Smckusick 2341490Smckusick #ifdef SYSVSHM 2441490Smckusick 2541490Smckusick #include "param.h" 2641490Smckusick #include "systm.h" 2744405Skarels #include "user.h" 2841490Smckusick #include "kernel.h" 2941490Smckusick #include "proc.h" 3041490Smckusick #include "shm.h" 3141490Smckusick #include "malloc.h" 32*45737Smckusick #include "mman.h" 33*45737Smckusick #include "../vm/vm_param.h" 34*45737Smckusick #include "../vm/vm_map.h" 35*45737Smckusick #include "../vm/vm_kern.h" 36*45737Smckusick #include "../vm/vm_inherit.h" 37*45737Smckusick #include "../vm/vm_pager.h" 3841490Smckusick 3941490Smckusick #ifdef HPUXCOMPAT 4041490Smckusick #include "../hpux/hpux.h" 4141490Smckusick #endif 4241490Smckusick 4341490Smckusick int shmat(), shmctl(), shmdt(), shmget(); 4441490Smckusick int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget }; 4541490Smckusick int shmtot = 0; 4641490Smckusick 47*45737Smckusick /* 48*45737Smckusick * Per process internal structure for managing segments. 49*45737Smckusick * Each process using shm will have an array of ``shmseg'' of these. 50*45737Smckusick */ 51*45737Smckusick struct shmdesc { 52*45737Smckusick vm_offset_t shmd_uva; 53*45737Smckusick int shmd_id; 54*45737Smckusick }; 5541490Smckusick 56*45737Smckusick /* 57*45737Smckusick * Per segment internal structure (shm_handle). 58*45737Smckusick */ 59*45737Smckusick struct shmhandle { 60*45737Smckusick vm_offset_t shmh_kva; 61*45737Smckusick caddr_t shmh_id; 62*45737Smckusick }; 63*45737Smckusick 64*45737Smckusick vm_map_t shm_map; /* address space for shared memory segments */ 65*45737Smckusick 6641490Smckusick shminit() 6741490Smckusick { 6841490Smckusick register int i; 69*45737Smckusick vm_offset_t whocares1, whocares2; 7041490Smckusick 71*45737Smckusick shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2, 72*45737Smckusick shminfo.shmall * NBPG, FALSE); 7341490Smckusick if (shminfo.shmmni > SHMMMNI) 7441490Smckusick shminfo.shmmni = SHMMMNI; 7541490Smckusick for (i = 0; i < shminfo.shmmni; i++) { 7641490Smckusick shmsegs[i].shm_perm.mode = 0; 7741490Smckusick shmsegs[i].shm_perm.seq = 0; 7841490Smckusick } 7941490Smckusick } 8041490Smckusick 8142961Smckusick /* 8242961Smckusick * Entry point for all SHM calls 8342961Smckusick */ 8442961Smckusick shmsys(p, uap, retval) 8542961Smckusick struct proc *p; 8642961Smckusick struct args { 8743630Skarels u_int which; 8842961Smckusick } *uap; 8942961Smckusick int *retval; 9041490Smckusick { 9141490Smckusick 9242961Smckusick if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 9344405Skarels return (EINVAL); 9444405Skarels return ((*shmcalls[uap->which])(p, &uap[1], retval)); 9541490Smckusick } 9641490Smckusick 9742961Smckusick /* 9842961Smckusick * Get a shared memory segment 9942961Smckusick */ 10042961Smckusick shmget(p, uap, retval) 10142961Smckusick struct proc *p; 10242961Smckusick register struct args { 10341490Smckusick key_t key; 10441490Smckusick int size; 10541490Smckusick int shmflg; 10642961Smckusick } *uap; 10742961Smckusick int *retval; 10842961Smckusick { 10941490Smckusick register struct shmid_ds *shp; 11042961Smckusick register struct ucred *cred = u.u_cred; 11141490Smckusick register int i; 11242961Smckusick int error, size, rval = 0; 113*45737Smckusick register struct shmhandle *shmh; 11441490Smckusick 11541490Smckusick /* look up the specified shm_id */ 11641490Smckusick if (uap->key != IPC_PRIVATE) { 11741490Smckusick for (i = 0; i < shminfo.shmmni; i++) 11841490Smckusick if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) && 11941490Smckusick shmsegs[i].shm_perm.key == uap->key) { 12041490Smckusick rval = i; 12141490Smckusick break; 12241490Smckusick } 12341490Smckusick } else 12441490Smckusick i = shminfo.shmmni; 12541490Smckusick 12641490Smckusick /* create a new shared segment if necessary */ 12741490Smckusick if (i == shminfo.shmmni) { 12842961Smckusick if ((uap->shmflg & IPC_CREAT) == 0) 12942961Smckusick return (ENOENT); 13042961Smckusick if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 13142961Smckusick return (EINVAL); 13241490Smckusick for (i = 0; i < shminfo.shmmni; i++) 13341490Smckusick if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) { 13441490Smckusick rval = i; 13541490Smckusick break; 13641490Smckusick } 13742961Smckusick if (i == shminfo.shmmni) 13842961Smckusick return (ENOSPC); 13941490Smckusick size = clrnd(btoc(uap->size)); 14042961Smckusick if (shmtot + size > shminfo.shmall) 14142961Smckusick return (ENOMEM); 14241490Smckusick shp = &shmsegs[rval]; 14341490Smckusick /* 14441490Smckusick * We need to do a couple of things to ensure consistency 14541490Smckusick * in case we sleep in malloc(). We mark segment as 14641490Smckusick * allocated so that other shmgets() will not allocate it. 14741490Smckusick * We mark it as "destroyed" to insure that shmvalid() is 14841490Smckusick * false making most operations fail (XXX). We set the key, 14941490Smckusick * so that other shmget()s will fail. 15041490Smckusick */ 15141490Smckusick shp->shm_perm.mode = SHM_ALLOC | SHM_DEST; 15241490Smckusick shp->shm_perm.key = uap->key; 153*45737Smckusick shmh = (struct shmhandle *) 154*45737Smckusick malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK); 155*45737Smckusick shmh->shmh_kva = 0; 156*45737Smckusick shmh->shmh_id = (caddr_t)(0xc0000000|rval); /* XXX */ 157*45737Smckusick error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size), 158*45737Smckusick VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0); 159*45737Smckusick if (error) { 160*45737Smckusick free((caddr_t)shmh, M_SHM); 16141490Smckusick shp->shm_perm.mode = 0; 162*45737Smckusick return(ENOMEM); 16341490Smckusick } 164*45737Smckusick shp->shm_handle = (void *) shmh; 16541490Smckusick shmtot += size; 16642961Smckusick shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid; 16742961Smckusick shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid; 16841490Smckusick shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777); 16941490Smckusick shp->shm_segsz = uap->size; 17042922Smckusick shp->shm_cpid = p->p_pid; 17141490Smckusick shp->shm_lpid = shp->shm_nattch = 0; 17241490Smckusick shp->shm_atime = shp->shm_dtime = 0; 17341490Smckusick shp->shm_ctime = time.tv_sec; 17441490Smckusick } else { 17541490Smckusick shp = &shmsegs[rval]; 17641490Smckusick /* XXX: probably not the right thing to do */ 17742961Smckusick if (shp->shm_perm.mode & SHM_DEST) 17842961Smckusick return (EBUSY); 17943408Shibler if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred)) 18042961Smckusick return (error); 18142961Smckusick if (uap->size && uap->size > shp->shm_segsz) 18242961Smckusick return (EINVAL); 18342961Smckusick if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL)) 18442961Smckusick return (EEXIST); 18541490Smckusick } 18642961Smckusick *retval = shp->shm_perm.seq * SHMMMNI + rval; 18743408Shibler return (0); 18841490Smckusick } 18941490Smckusick 19042961Smckusick /* 19142961Smckusick * Shared memory control 19242961Smckusick */ 19342961Smckusick /* ARGSUSED */ 19442961Smckusick shmctl(p, uap, retval) 19542961Smckusick struct proc *p; 19642961Smckusick register struct args { 19741490Smckusick int shmid; 19841490Smckusick int cmd; 19941490Smckusick caddr_t buf; 20042961Smckusick } *uap; 20142961Smckusick int *retval; 20242961Smckusick { 20341490Smckusick register struct shmid_ds *shp; 20442961Smckusick register struct ucred *cred = u.u_cred; 20541490Smckusick struct shmid_ds sbuf; 20642961Smckusick int error; 20741490Smckusick 20842961Smckusick if (error = shmvalid(uap->shmid)) 20942961Smckusick return (error); 21041490Smckusick shp = &shmsegs[uap->shmid % SHMMMNI]; 21141490Smckusick switch (uap->cmd) { 21241490Smckusick case IPC_STAT: 21343408Shibler if (error = ipcaccess(&shp->shm_perm, IPC_R, cred)) 21442961Smckusick return (error); 21542961Smckusick return (copyout((caddr_t)shp, uap->buf, sizeof(*shp))); 21641490Smckusick 21741490Smckusick case IPC_SET: 21842961Smckusick if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && 21942961Smckusick cred->cr_uid != shp->shm_perm.cuid) 22042961Smckusick return (EPERM); 22142961Smckusick if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf)) 22242961Smckusick return (error); 22342961Smckusick shp->shm_perm.uid = sbuf.shm_perm.uid; 22442961Smckusick shp->shm_perm.gid = sbuf.shm_perm.gid; 22542961Smckusick shp->shm_perm.mode = (shp->shm_perm.mode & ~0777) 22642961Smckusick | (sbuf.shm_perm.mode & 0777); 22742961Smckusick shp->shm_ctime = time.tv_sec; 22841490Smckusick break; 22941490Smckusick 23041490Smckusick case IPC_RMID: 23142961Smckusick if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && 23242961Smckusick cred->cr_uid != shp->shm_perm.cuid) 23342961Smckusick return (EPERM); 23441490Smckusick /* set ctime? */ 23541490Smckusick shp->shm_perm.key = IPC_PRIVATE; 23641490Smckusick shp->shm_perm.mode |= SHM_DEST; 23741490Smckusick if (shp->shm_nattch <= 0) 23841490Smckusick shmfree(shp); 23941490Smckusick break; 24041490Smckusick 24141490Smckusick #ifdef HPUXCOMPAT 24241490Smckusick case SHM_LOCK: 24341490Smckusick case SHM_UNLOCK: 24441490Smckusick /* don't really do anything, but make them think we did */ 24542922Smckusick if ((p->p_flag & SHPUX) == 0) 24642961Smckusick return (EINVAL); 24742961Smckusick if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && 24842961Smckusick cred->cr_uid != shp->shm_perm.cuid) 24942961Smckusick return (EPERM); 25041490Smckusick break; 25141490Smckusick #endif 25241490Smckusick 25341490Smckusick default: 25442961Smckusick return (EINVAL); 25541490Smckusick } 25642961Smckusick return (0); 25741490Smckusick } 25841490Smckusick 25942961Smckusick /* 26042961Smckusick * Attach to shared memory segment. 26142961Smckusick */ 26242961Smckusick shmat(p, uap, retval) 26342961Smckusick struct proc *p; 26442961Smckusick register struct args { 26541490Smckusick int shmid; 26641490Smckusick caddr_t shmaddr; 26741490Smckusick int shmflg; 26842961Smckusick } *uap; 26942961Smckusick int *retval; 27042961Smckusick { 27141490Smckusick register struct shmid_ds *shp; 27241490Smckusick register int size; 27341490Smckusick caddr_t uva; 274*45737Smckusick int error; 275*45737Smckusick int flags; 276*45737Smckusick vm_prot_t prot; 277*45737Smckusick struct shmdesc *shmd; 27841490Smckusick 279*45737Smckusick /* 280*45737Smckusick * Allocate descriptors now (before validity check) 281*45737Smckusick * in case malloc() blocks. 282*45737Smckusick */ 283*45737Smckusick shmd = (struct shmdesc *)p->p_shm; 284*45737Smckusick size = shminfo.shmseg * sizeof(struct shmdesc); 285*45737Smckusick if (shmd == NULL) { 286*45737Smckusick shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK); 287*45737Smckusick bzero((caddr_t)shmd, size); 288*45737Smckusick p->p_shm = (caddr_t)shmd; 289*45737Smckusick } 29042961Smckusick if (error = shmvalid(uap->shmid)) 29142961Smckusick return (error); 29241490Smckusick shp = &shmsegs[uap->shmid % SHMMMNI]; 29341490Smckusick if (shp->shm_handle == NULL) 29442349Smckusick panic("shmat NULL handle"); 29543408Shibler if (error = ipcaccess(&shp->shm_perm, 29643408Shibler (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, u.u_cred)) 29742961Smckusick return (error); 29841490Smckusick uva = uap->shmaddr; 29941490Smckusick if (uva && ((int)uva & (SHMLBA-1))) { 30041490Smckusick if (uap->shmflg & SHM_RND) 30141490Smckusick uva = (caddr_t) ((int)uva & ~(SHMLBA-1)); 30242961Smckusick else 30342961Smckusick return (EINVAL); 30441490Smckusick } 30541490Smckusick /* 30641490Smckusick * Make sure user doesn't use more than their fair share 30741490Smckusick */ 308*45737Smckusick for (size = 0; size < shminfo.shmseg; size++) { 309*45737Smckusick if (shmd->shmd_uva == 0) 310*45737Smckusick break; 311*45737Smckusick shmd++; 312*45737Smckusick } 31342961Smckusick if (size >= shminfo.shmseg) 31442961Smckusick return (EMFILE); 31541490Smckusick size = ctob(clrnd(btoc(shp->shm_segsz))); 316*45737Smckusick prot = VM_PROT_READ; 317*45737Smckusick if ((uap->shmflg & SHM_RDONLY) == 0) 318*45737Smckusick prot |= VM_PROT_WRITE; 319*45737Smckusick flags = MAP_ANON|MAP_SHARED; 320*45737Smckusick if (uva) 321*45737Smckusick flags |= MAP_FIXED; 322*45737Smckusick else 323*45737Smckusick uva = (caddr_t)0x1000000; /* XXX */ 324*45737Smckusick error = vm_mmap(p->p_map, &uva, (vm_size_t)size, prot, flags, 325*45737Smckusick ((struct shmhandle *)shp->shm_handle)->shmh_id, 0); 32642961Smckusick if (error) 327*45737Smckusick return(error); 328*45737Smckusick shmd->shmd_uva = (vm_offset_t)uva; 329*45737Smckusick shmd->shmd_id = uap->shmid; 33041490Smckusick /* 33141490Smckusick * Fill in the remaining fields 33241490Smckusick */ 33342922Smckusick shp->shm_lpid = p->p_pid; 33441490Smckusick shp->shm_atime = time.tv_sec; 33541490Smckusick shp->shm_nattch++; 33642961Smckusick *retval = (int) uva; 33743408Shibler return (0); 33841490Smckusick } 33941490Smckusick 34042961Smckusick /* 34142961Smckusick * Detach from shared memory segment. 34242961Smckusick */ 34342961Smckusick /* ARGSUSED */ 34442961Smckusick shmdt(p, uap, retval) 34542961Smckusick struct proc *p; 34642961Smckusick struct args { 34742961Smckusick caddr_t shmaddr; 34842961Smckusick } *uap; 34942961Smckusick int *retval; 35041490Smckusick { 351*45737Smckusick register struct shmdesc *shmd; 352*45737Smckusick register int i; 35341490Smckusick 354*45737Smckusick shmd = (struct shmdesc *)p->p_shm; 355*45737Smckusick for (i = 0; i < shminfo.shmseg; i++, shmd++) 356*45737Smckusick if (shmd->shmd_uva && 357*45737Smckusick shmd->shmd_uva == (vm_offset_t)uap->shmaddr) 35841490Smckusick break; 359*45737Smckusick if (i == shminfo.shmseg) 360*45737Smckusick return(EINVAL); 361*45737Smckusick shmufree(p, shmd); 362*45737Smckusick shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid; 36341490Smckusick } 36441490Smckusick 365*45737Smckusick shmfork(rip, rpp, isvfork) 366*45737Smckusick struct proc *rip, *rpp; 367*45737Smckusick int isvfork; 36841490Smckusick { 369*45737Smckusick register struct shmdesc *shmd; 370*45737Smckusick register int size; 37141490Smckusick 372*45737Smckusick /* 373*45737Smckusick * Copy parents descriptive information 374*45737Smckusick */ 375*45737Smckusick size = shminfo.shmseg * sizeof(struct shmdesc); 376*45737Smckusick shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK); 377*45737Smckusick bcopy((caddr_t)rip->p_shm, (caddr_t)shmd, size); 378*45737Smckusick rpp->p_shm = (caddr_t)shmd; 379*45737Smckusick /* 380*45737Smckusick * Increment reference counts 381*45737Smckusick */ 382*45737Smckusick for (size = 0; size < shminfo.shmseg; size++, shmd++) 383*45737Smckusick if (shmd->shmd_uva) 384*45737Smckusick shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++; 38541490Smckusick } 38641490Smckusick 387*45737Smckusick shmexit(p) 388*45737Smckusick struct proc *p; 38941490Smckusick { 390*45737Smckusick register struct shmdesc *shmd; 391*45737Smckusick register int i; 39241490Smckusick 393*45737Smckusick shmd = (struct shmdesc *)p->p_shm; 394*45737Smckusick for (i = 0; i < shminfo.shmseg; i++, shmd++) 395*45737Smckusick if (shmd->shmd_uva) 396*45737Smckusick shmufree(p, shmd); 397*45737Smckusick free((caddr_t)p->p_shm, M_SHM); 398*45737Smckusick p->p_shm = NULL; 39941490Smckusick } 40041490Smckusick 40141490Smckusick shmvalid(id) 40241490Smckusick register int id; 40341490Smckusick { 40441490Smckusick register struct shmid_ds *shp; 40541490Smckusick 40641490Smckusick if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni) 40742961Smckusick return(EINVAL); 40841490Smckusick shp = &shmsegs[id % SHMMMNI]; 40941490Smckusick if (shp->shm_perm.seq == (id / SHMMMNI) && 41041490Smckusick (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC) 41142961Smckusick return(0); 41242961Smckusick return(EINVAL); 41341490Smckusick } 41441490Smckusick 41541490Smckusick /* 41641490Smckusick * Free user resources associated with a shared memory segment 41741490Smckusick */ 418*45737Smckusick shmufree(p, shmd) 41942922Smckusick struct proc *p; 420*45737Smckusick struct shmdesc *shmd; 42141490Smckusick { 42241490Smckusick register struct shmid_ds *shp; 42341490Smckusick 424*45737Smckusick shp = &shmsegs[shmd->shmd_id % SHMMMNI]; 425*45737Smckusick (void) vm_deallocate(p->p_map, shmd->shmd_uva, 426*45737Smckusick ctob(clrnd(btoc(shp->shm_segsz)))); 427*45737Smckusick shmd->shmd_id = 0; 428*45737Smckusick shmd->shmd_uva = 0; 42941490Smckusick shp->shm_dtime = time.tv_sec; 43041490Smckusick if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST)) 43141490Smckusick shmfree(shp); 43241490Smckusick } 43341490Smckusick 43441490Smckusick /* 43541490Smckusick * Deallocate resources associated with a shared memory segment 43641490Smckusick */ 43741490Smckusick shmfree(shp) 43841490Smckusick register struct shmid_ds *shp; 43941490Smckusick { 44041490Smckusick caddr_t kva; 44141490Smckusick 44241490Smckusick if (shp->shm_handle == NULL) 44341490Smckusick panic("shmfree"); 444*45737Smckusick /* 445*45737Smckusick * Lose our lingering object reference by deallocating space 446*45737Smckusick * in kernel. Pager will also be deallocated as a side-effect. 447*45737Smckusick */ 448*45737Smckusick vm_deallocate(shm_map, 449*45737Smckusick ((struct shmhandle *)shp->shm_handle)->shmh_kva, 450*45737Smckusick clrnd(btoc(shp->shm_segsz))); 451*45737Smckusick free((caddr_t)shp->shm_handle, M_SHM); 45241490Smckusick shp->shm_handle = NULL; 45341490Smckusick shmtot -= clrnd(btoc(shp->shm_segsz)); 45441490Smckusick shp->shm_perm.mode = 0; 45541490Smckusick /* 45641490Smckusick * Increment the sequence number to ensure that outstanding 45741490Smckusick * shmids for this segment will be invalid in the event that 45841490Smckusick * the segment is reallocated. Note that shmids must be 45941490Smckusick * positive as decreed by SVID. 46041490Smckusick */ 46141490Smckusick shp->shm_perm.seq++; 46241490Smckusick if ((int)(shp->shm_perm.seq * SHMMMNI) < 0) 46341490Smckusick shp->shm_perm.seq = 0; 46441490Smckusick } 46541490Smckusick 46641490Smckusick /* 46741490Smckusick * XXX This routine would be common to all sysV style IPC 46841490Smckusick * (if the others were implemented). 46941490Smckusick */ 47042961Smckusick ipcaccess(ipc, mode, cred) 47141490Smckusick register struct ipc_perm *ipc; 47242961Smckusick int mode; 47342961Smckusick register struct ucred *cred; 47441490Smckusick { 47541490Smckusick register int m; 47641490Smckusick 47742961Smckusick if (cred->cr_uid == 0) 47841490Smckusick return(0); 47941490Smckusick /* 48041490Smckusick * Access check is based on only one of owner, group, public. 48141490Smckusick * If not owner, then check group. 48241490Smckusick * If not a member of the group, then check public access. 48341490Smckusick */ 48441490Smckusick mode &= 0700; 48541490Smckusick m = ipc->mode; 48642961Smckusick if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) { 48741490Smckusick m <<= 3; 48842961Smckusick if (!groupmember(ipc->gid, cred) && 48942961Smckusick !groupmember(ipc->cgid, cred)) 49041490Smckusick m <<= 3; 49141490Smckusick } 49241490Smckusick if ((mode&m) == mode) 49342961Smckusick return (0); 49442961Smckusick return (EACCES); 49541490Smckusick } 49641490Smckusick #endif /* SYSVSHM */ 497