141490Smckusick /* 241490Smckusick * Copyright (c) 1988 University of Utah. 341490Smckusick * Copyright (c) 1990 The Regents of the University of California. 441490Smckusick * All rights reserved. 541490Smckusick * 641490Smckusick * This code is derived from software contributed to Berkeley by 741490Smckusick * the Systems Programming Group of the University of Utah Computer 841490Smckusick * Science Department. Originally from University of Wisconsin. 941490Smckusick * 1041490Smckusick * %sccs.include.redist.c% 1141490Smckusick * 1241490Smckusick * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$ 1341490Smckusick * 14*48446Skarels * @(#)sysv_shm.c 7.13 (Berkeley) 04/20/91 1541490Smckusick */ 1641490Smckusick 1741490Smckusick /* 1841490Smckusick * System V shared memory routines. 1943630Skarels * TEMPORARY, until mmap is in place; 2043630Skarels * needed now for HP-UX compatibility and X server (yech!). 2141490Smckusick */ 2241490Smckusick 2341490Smckusick #ifdef SYSVSHM 2441490Smckusick 2541490Smckusick #include "param.h" 2641490Smckusick #include "systm.h" 2741490Smckusick #include "kernel.h" 2841490Smckusick #include "proc.h" 2941490Smckusick #include "shm.h" 3041490Smckusick #include "malloc.h" 3145737Smckusick #include "mman.h" 32*48446Skarels #include "vm/vm.h" 33*48446Skarels #include "vm/vm_kern.h" 34*48446Skarels #include "vm/vm_inherit.h" 35*48446Skarels #include "vm/vm_pager.h" 3641490Smckusick 3741490Smckusick #ifdef HPUXCOMPAT 3845815Smckusick #include "hp300/hpux/hpux.h" 3941490Smckusick #endif 4041490Smckusick 4141490Smckusick int shmat(), shmctl(), shmdt(), shmget(); 4241490Smckusick int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget }; 4341490Smckusick int shmtot = 0; 4441490Smckusick 4545737Smckusick /* 4645737Smckusick * Per process internal structure for managing segments. 4745737Smckusick * Each process using shm will have an array of ``shmseg'' of these. 4845737Smckusick */ 4945737Smckusick struct shmdesc { 5045737Smckusick vm_offset_t shmd_uva; 5145737Smckusick int shmd_id; 5245737Smckusick }; 5341490Smckusick 5445737Smckusick /* 5545737Smckusick * Per segment internal structure (shm_handle). 5645737Smckusick */ 5745737Smckusick struct shmhandle { 5845737Smckusick vm_offset_t shmh_kva; 5945737Smckusick caddr_t shmh_id; 6045737Smckusick }; 6145737Smckusick 6245737Smckusick vm_map_t shm_map; /* address space for shared memory segments */ 6345737Smckusick 6441490Smckusick shminit() 6541490Smckusick { 6641490Smckusick register int i; 6745737Smckusick vm_offset_t whocares1, whocares2; 6841490Smckusick 6945737Smckusick shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2, 7045737Smckusick shminfo.shmall * NBPG, FALSE); 7141490Smckusick if (shminfo.shmmni > SHMMMNI) 7241490Smckusick shminfo.shmmni = SHMMMNI; 7341490Smckusick for (i = 0; i < shminfo.shmmni; i++) { 7441490Smckusick shmsegs[i].shm_perm.mode = 0; 7541490Smckusick shmsegs[i].shm_perm.seq = 0; 7641490Smckusick } 7741490Smckusick } 7841490Smckusick 7942961Smckusick /* 8042961Smckusick * Entry point for all SHM calls 8142961Smckusick */ 8242961Smckusick shmsys(p, uap, retval) 8342961Smckusick struct proc *p; 8442961Smckusick struct args { 8543630Skarels u_int which; 8642961Smckusick } *uap; 8742961Smckusick int *retval; 8841490Smckusick { 8941490Smckusick 9042961Smckusick if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 9144405Skarels return (EINVAL); 9244405Skarels return ((*shmcalls[uap->which])(p, &uap[1], retval)); 9341490Smckusick } 9441490Smckusick 9542961Smckusick /* 9642961Smckusick * Get a shared memory segment 9742961Smckusick */ 9842961Smckusick shmget(p, uap, retval) 9942961Smckusick struct proc *p; 10042961Smckusick register struct args { 10141490Smckusick key_t key; 10241490Smckusick int size; 10341490Smckusick int shmflg; 10442961Smckusick } *uap; 10542961Smckusick int *retval; 10642961Smckusick { 10741490Smckusick register struct shmid_ds *shp; 10847540Skarels register struct ucred *cred = p->p_ucred; 10941490Smckusick register int i; 11042961Smckusick int error, size, rval = 0; 11145737Smckusick register struct shmhandle *shmh; 11241490Smckusick 11341490Smckusick /* look up the specified shm_id */ 11441490Smckusick if (uap->key != IPC_PRIVATE) { 11541490Smckusick for (i = 0; i < shminfo.shmmni; i++) 11641490Smckusick if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) && 11741490Smckusick shmsegs[i].shm_perm.key == uap->key) { 11841490Smckusick rval = i; 11941490Smckusick break; 12041490Smckusick } 12141490Smckusick } else 12241490Smckusick i = shminfo.shmmni; 12341490Smckusick 12441490Smckusick /* create a new shared segment if necessary */ 12541490Smckusick if (i == shminfo.shmmni) { 12642961Smckusick if ((uap->shmflg & IPC_CREAT) == 0) 12742961Smckusick return (ENOENT); 12842961Smckusick if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 12942961Smckusick return (EINVAL); 13041490Smckusick for (i = 0; i < shminfo.shmmni; i++) 13141490Smckusick if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) { 13241490Smckusick rval = i; 13341490Smckusick break; 13441490Smckusick } 13542961Smckusick if (i == shminfo.shmmni) 13642961Smckusick return (ENOSPC); 13741490Smckusick size = clrnd(btoc(uap->size)); 13842961Smckusick if (shmtot + size > shminfo.shmall) 13942961Smckusick return (ENOMEM); 14041490Smckusick shp = &shmsegs[rval]; 14141490Smckusick /* 14241490Smckusick * We need to do a couple of things to ensure consistency 14341490Smckusick * in case we sleep in malloc(). We mark segment as 14441490Smckusick * allocated so that other shmgets() will not allocate it. 14541490Smckusick * We mark it as "destroyed" to insure that shmvalid() is 14641490Smckusick * false making most operations fail (XXX). We set the key, 14741490Smckusick * so that other shmget()s will fail. 14841490Smckusick */ 14941490Smckusick shp->shm_perm.mode = SHM_ALLOC | SHM_DEST; 15041490Smckusick shp->shm_perm.key = uap->key; 15145737Smckusick shmh = (struct shmhandle *) 15245737Smckusick malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK); 15345737Smckusick shmh->shmh_kva = 0; 15445737Smckusick shmh->shmh_id = (caddr_t)(0xc0000000|rval); /* XXX */ 15545737Smckusick error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size), 15645737Smckusick VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0); 15745737Smckusick if (error) { 15845737Smckusick free((caddr_t)shmh, M_SHM); 15941490Smckusick shp->shm_perm.mode = 0; 16045737Smckusick return(ENOMEM); 16141490Smckusick } 16245737Smckusick shp->shm_handle = (void *) shmh; 16341490Smckusick shmtot += size; 16442961Smckusick shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid; 16542961Smckusick shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid; 16641490Smckusick shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777); 16741490Smckusick shp->shm_segsz = uap->size; 16842922Smckusick shp->shm_cpid = p->p_pid; 16941490Smckusick shp->shm_lpid = shp->shm_nattch = 0; 17041490Smckusick shp->shm_atime = shp->shm_dtime = 0; 17141490Smckusick shp->shm_ctime = time.tv_sec; 17241490Smckusick } else { 17341490Smckusick shp = &shmsegs[rval]; 17441490Smckusick /* XXX: probably not the right thing to do */ 17542961Smckusick if (shp->shm_perm.mode & SHM_DEST) 17642961Smckusick return (EBUSY); 17743408Shibler if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred)) 17842961Smckusick return (error); 17942961Smckusick if (uap->size && uap->size > shp->shm_segsz) 18042961Smckusick return (EINVAL); 18142961Smckusick if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL)) 18242961Smckusick return (EEXIST); 18341490Smckusick } 18442961Smckusick *retval = shp->shm_perm.seq * SHMMMNI + rval; 18543408Shibler return (0); 18641490Smckusick } 18741490Smckusick 18842961Smckusick /* 18942961Smckusick * Shared memory control 19042961Smckusick */ 19142961Smckusick /* ARGSUSED */ 19242961Smckusick shmctl(p, uap, retval) 19342961Smckusick struct proc *p; 19442961Smckusick register struct args { 19541490Smckusick int shmid; 19641490Smckusick int cmd; 19741490Smckusick caddr_t buf; 19842961Smckusick } *uap; 19942961Smckusick int *retval; 20042961Smckusick { 20141490Smckusick register struct shmid_ds *shp; 20247540Skarels register struct ucred *cred = p->p_ucred; 20341490Smckusick struct shmid_ds sbuf; 20442961Smckusick int error; 20541490Smckusick 20642961Smckusick if (error = shmvalid(uap->shmid)) 20742961Smckusick return (error); 20841490Smckusick shp = &shmsegs[uap->shmid % SHMMMNI]; 20941490Smckusick switch (uap->cmd) { 21041490Smckusick case IPC_STAT: 21143408Shibler if (error = ipcaccess(&shp->shm_perm, IPC_R, cred)) 21242961Smckusick return (error); 21342961Smckusick return (copyout((caddr_t)shp, uap->buf, sizeof(*shp))); 21441490Smckusick 21541490Smckusick case IPC_SET: 21642961Smckusick if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && 21742961Smckusick cred->cr_uid != shp->shm_perm.cuid) 21842961Smckusick return (EPERM); 21942961Smckusick if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf)) 22042961Smckusick return (error); 22142961Smckusick shp->shm_perm.uid = sbuf.shm_perm.uid; 22242961Smckusick shp->shm_perm.gid = sbuf.shm_perm.gid; 22342961Smckusick shp->shm_perm.mode = (shp->shm_perm.mode & ~0777) 22442961Smckusick | (sbuf.shm_perm.mode & 0777); 22542961Smckusick shp->shm_ctime = time.tv_sec; 22641490Smckusick break; 22741490Smckusick 22841490Smckusick case IPC_RMID: 22942961Smckusick if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && 23042961Smckusick cred->cr_uid != shp->shm_perm.cuid) 23142961Smckusick return (EPERM); 23241490Smckusick /* set ctime? */ 23341490Smckusick shp->shm_perm.key = IPC_PRIVATE; 23441490Smckusick shp->shm_perm.mode |= SHM_DEST; 23541490Smckusick if (shp->shm_nattch <= 0) 23641490Smckusick shmfree(shp); 23741490Smckusick break; 23841490Smckusick 23941490Smckusick #ifdef HPUXCOMPAT 24041490Smckusick case SHM_LOCK: 24141490Smckusick case SHM_UNLOCK: 24241490Smckusick /* don't really do anything, but make them think we did */ 24342922Smckusick if ((p->p_flag & SHPUX) == 0) 24442961Smckusick return (EINVAL); 24542961Smckusick if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && 24642961Smckusick cred->cr_uid != shp->shm_perm.cuid) 24742961Smckusick return (EPERM); 24841490Smckusick break; 24941490Smckusick #endif 25041490Smckusick 25141490Smckusick default: 25242961Smckusick return (EINVAL); 25341490Smckusick } 25442961Smckusick return (0); 25541490Smckusick } 25641490Smckusick 25742961Smckusick /* 25842961Smckusick * Attach to shared memory segment. 25942961Smckusick */ 26042961Smckusick shmat(p, uap, retval) 26142961Smckusick struct proc *p; 26242961Smckusick register struct args { 26341490Smckusick int shmid; 26441490Smckusick caddr_t shmaddr; 26541490Smckusick int shmflg; 26642961Smckusick } *uap; 26742961Smckusick int *retval; 26842961Smckusick { 26941490Smckusick register struct shmid_ds *shp; 27041490Smckusick register int size; 27141490Smckusick caddr_t uva; 27245737Smckusick int error; 27345737Smckusick int flags; 27445737Smckusick vm_prot_t prot; 27545737Smckusick struct shmdesc *shmd; 27641490Smckusick 27745737Smckusick /* 27845737Smckusick * Allocate descriptors now (before validity check) 27945737Smckusick * in case malloc() blocks. 28045737Smckusick */ 28147540Skarels shmd = (struct shmdesc *)p->p_vmspace->vm_shm; 28245737Smckusick size = shminfo.shmseg * sizeof(struct shmdesc); 28345737Smckusick if (shmd == NULL) { 28445737Smckusick shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK); 28545737Smckusick bzero((caddr_t)shmd, size); 28647540Skarels p->p_vmspace->vm_shm = (caddr_t)shmd; 28745737Smckusick } 28842961Smckusick if (error = shmvalid(uap->shmid)) 28942961Smckusick return (error); 29041490Smckusick shp = &shmsegs[uap->shmid % SHMMMNI]; 29141490Smckusick if (shp->shm_handle == NULL) 29242349Smckusick panic("shmat NULL handle"); 29343408Shibler if (error = ipcaccess(&shp->shm_perm, 29447540Skarels (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred)) 29542961Smckusick return (error); 29641490Smckusick uva = uap->shmaddr; 29741490Smckusick if (uva && ((int)uva & (SHMLBA-1))) { 29841490Smckusick if (uap->shmflg & SHM_RND) 29941490Smckusick uva = (caddr_t) ((int)uva & ~(SHMLBA-1)); 30042961Smckusick else 30142961Smckusick return (EINVAL); 30241490Smckusick } 30341490Smckusick /* 30441490Smckusick * Make sure user doesn't use more than their fair share 30541490Smckusick */ 30645737Smckusick for (size = 0; size < shminfo.shmseg; size++) { 30745737Smckusick if (shmd->shmd_uva == 0) 30845737Smckusick break; 30945737Smckusick shmd++; 31045737Smckusick } 31142961Smckusick if (size >= shminfo.shmseg) 31242961Smckusick return (EMFILE); 31341490Smckusick size = ctob(clrnd(btoc(shp->shm_segsz))); 31445737Smckusick prot = VM_PROT_READ; 31545737Smckusick if ((uap->shmflg & SHM_RDONLY) == 0) 31645737Smckusick prot |= VM_PROT_WRITE; 31745737Smckusick flags = MAP_ANON|MAP_SHARED; 31845737Smckusick if (uva) 31945737Smckusick flags |= MAP_FIXED; 32045737Smckusick else 32145737Smckusick uva = (caddr_t)0x1000000; /* XXX */ 32247540Skarels error = vm_mmap(p->p_vmspace->vm_map, &uva, (vm_size_t)size, prot, 32347540Skarels flags, ((struct shmhandle *)shp->shm_handle)->shmh_id, 0); 32442961Smckusick if (error) 32545737Smckusick return(error); 32645737Smckusick shmd->shmd_uva = (vm_offset_t)uva; 32745737Smckusick shmd->shmd_id = uap->shmid; 32841490Smckusick /* 32941490Smckusick * Fill in the remaining fields 33041490Smckusick */ 33142922Smckusick shp->shm_lpid = p->p_pid; 33241490Smckusick shp->shm_atime = time.tv_sec; 33341490Smckusick shp->shm_nattch++; 33442961Smckusick *retval = (int) uva; 33543408Shibler return (0); 33641490Smckusick } 33741490Smckusick 33842961Smckusick /* 33942961Smckusick * Detach from shared memory segment. 34042961Smckusick */ 34142961Smckusick /* ARGSUSED */ 34242961Smckusick shmdt(p, uap, retval) 34342961Smckusick struct proc *p; 34442961Smckusick struct args { 34542961Smckusick caddr_t shmaddr; 34642961Smckusick } *uap; 34742961Smckusick int *retval; 34841490Smckusick { 34945737Smckusick register struct shmdesc *shmd; 35045737Smckusick register int i; 35141490Smckusick 35247540Skarels shmd = (struct shmdesc *)p->p_vmspace->vm_shm; 35345737Smckusick for (i = 0; i < shminfo.shmseg; i++, shmd++) 35445737Smckusick if (shmd->shmd_uva && 35545737Smckusick shmd->shmd_uva == (vm_offset_t)uap->shmaddr) 35641490Smckusick break; 35745737Smckusick if (i == shminfo.shmseg) 35845737Smckusick return(EINVAL); 35945737Smckusick shmufree(p, shmd); 36045737Smckusick shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid; 36141490Smckusick } 36241490Smckusick 36347540Skarels shmfork(p1, p2, isvfork) 36447540Skarels struct proc *p1, *p2; 36545737Smckusick int isvfork; 36641490Smckusick { 36745737Smckusick register struct shmdesc *shmd; 36845737Smckusick register int size; 36941490Smckusick 37045737Smckusick /* 37145737Smckusick * Copy parents descriptive information 37245737Smckusick */ 37345737Smckusick size = shminfo.shmseg * sizeof(struct shmdesc); 37445737Smckusick shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK); 37547540Skarels bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size); 37647540Skarels p2->p_vmspace->vm_shm = (caddr_t)shmd; 37745737Smckusick /* 37845737Smckusick * Increment reference counts 37945737Smckusick */ 38045737Smckusick for (size = 0; size < shminfo.shmseg; size++, shmd++) 38145737Smckusick if (shmd->shmd_uva) 38245737Smckusick shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++; 38341490Smckusick } 38441490Smckusick 38545737Smckusick shmexit(p) 38645737Smckusick struct proc *p; 38741490Smckusick { 38845737Smckusick register struct shmdesc *shmd; 38945737Smckusick register int i; 39041490Smckusick 39147540Skarels shmd = (struct shmdesc *)p->p_vmspace->vm_shm; 39245737Smckusick for (i = 0; i < shminfo.shmseg; i++, shmd++) 39345737Smckusick if (shmd->shmd_uva) 39445737Smckusick shmufree(p, shmd); 39547540Skarels free((caddr_t)p->p_vmspace->vm_shm, M_SHM); 39647540Skarels p->p_vmspace->vm_shm = NULL; 39741490Smckusick } 39841490Smckusick 39941490Smckusick shmvalid(id) 40041490Smckusick register int id; 40141490Smckusick { 40241490Smckusick register struct shmid_ds *shp; 40341490Smckusick 40441490Smckusick if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni) 40542961Smckusick return(EINVAL); 40641490Smckusick shp = &shmsegs[id % SHMMMNI]; 40741490Smckusick if (shp->shm_perm.seq == (id / SHMMMNI) && 40841490Smckusick (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC) 40942961Smckusick return(0); 41042961Smckusick return(EINVAL); 41141490Smckusick } 41241490Smckusick 41341490Smckusick /* 41441490Smckusick * Free user resources associated with a shared memory segment 41541490Smckusick */ 41645737Smckusick shmufree(p, shmd) 41742922Smckusick struct proc *p; 41845737Smckusick struct shmdesc *shmd; 41941490Smckusick { 42041490Smckusick register struct shmid_ds *shp; 42141490Smckusick 42245737Smckusick shp = &shmsegs[shmd->shmd_id % SHMMMNI]; 42347540Skarels (void) vm_deallocate(p->p_vmspace->vm_map, shmd->shmd_uva, 42445737Smckusick ctob(clrnd(btoc(shp->shm_segsz)))); 42545737Smckusick shmd->shmd_id = 0; 42645737Smckusick shmd->shmd_uva = 0; 42741490Smckusick shp->shm_dtime = time.tv_sec; 42841490Smckusick if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST)) 42941490Smckusick shmfree(shp); 43041490Smckusick } 43141490Smckusick 43241490Smckusick /* 43341490Smckusick * Deallocate resources associated with a shared memory segment 43441490Smckusick */ 43541490Smckusick shmfree(shp) 43641490Smckusick register struct shmid_ds *shp; 43741490Smckusick { 43841490Smckusick caddr_t kva; 43941490Smckusick 44041490Smckusick if (shp->shm_handle == NULL) 44141490Smckusick panic("shmfree"); 44245737Smckusick /* 44345737Smckusick * Lose our lingering object reference by deallocating space 44445737Smckusick * in kernel. Pager will also be deallocated as a side-effect. 44545737Smckusick */ 44645737Smckusick vm_deallocate(shm_map, 44745737Smckusick ((struct shmhandle *)shp->shm_handle)->shmh_kva, 44845737Smckusick clrnd(btoc(shp->shm_segsz))); 44945737Smckusick free((caddr_t)shp->shm_handle, M_SHM); 45041490Smckusick shp->shm_handle = NULL; 45141490Smckusick shmtot -= clrnd(btoc(shp->shm_segsz)); 45241490Smckusick shp->shm_perm.mode = 0; 45341490Smckusick /* 45441490Smckusick * Increment the sequence number to ensure that outstanding 45541490Smckusick * shmids for this segment will be invalid in the event that 45641490Smckusick * the segment is reallocated. Note that shmids must be 45741490Smckusick * positive as decreed by SVID. 45841490Smckusick */ 45941490Smckusick shp->shm_perm.seq++; 46041490Smckusick if ((int)(shp->shm_perm.seq * SHMMMNI) < 0) 46141490Smckusick shp->shm_perm.seq = 0; 46241490Smckusick } 46341490Smckusick 46441490Smckusick /* 46541490Smckusick * XXX This routine would be common to all sysV style IPC 46641490Smckusick * (if the others were implemented). 46741490Smckusick */ 46842961Smckusick ipcaccess(ipc, mode, cred) 46941490Smckusick register struct ipc_perm *ipc; 47042961Smckusick int mode; 47142961Smckusick register struct ucred *cred; 47241490Smckusick { 47341490Smckusick register int m; 47441490Smckusick 47542961Smckusick if (cred->cr_uid == 0) 47641490Smckusick return(0); 47741490Smckusick /* 47841490Smckusick * Access check is based on only one of owner, group, public. 47941490Smckusick * If not owner, then check group. 48041490Smckusick * If not a member of the group, then check public access. 48141490Smckusick */ 48241490Smckusick mode &= 0700; 48341490Smckusick m = ipc->mode; 48442961Smckusick if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) { 48541490Smckusick m <<= 3; 48642961Smckusick if (!groupmember(ipc->gid, cred) && 48742961Smckusick !groupmember(ipc->cgid, cred)) 48841490Smckusick m <<= 3; 48941490Smckusick } 49041490Smckusick if ((mode&m) == mode) 49142961Smckusick return (0); 49242961Smckusick return (EACCES); 49341490Smckusick } 49441490Smckusick #endif /* SYSVSHM */ 495