141490Smckusick /* 241490Smckusick * Copyright (c) 1988 University of Utah. 341490Smckusick * Copyright (c) 1990 The Regents of the University of California. 441490Smckusick * All rights reserved. 541490Smckusick * 641490Smckusick * This code is derived from software contributed to Berkeley by 741490Smckusick * the Systems Programming Group of the University of Utah Computer 841490Smckusick * Science Department. Originally from University of Wisconsin. 941490Smckusick * 1041490Smckusick * %sccs.include.redist.c% 1141490Smckusick * 1241490Smckusick * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$ 1341490Smckusick * 14*42961Smckusick * @(#)sysv_shm.c 7.5 (Berkeley) 06/07/90 1541490Smckusick */ 1641490Smckusick 1741490Smckusick /* 1841490Smckusick * System V shared memory routines. 1941490Smckusick */ 2041490Smckusick 2141490Smckusick #ifdef SYSVSHM 2241490Smckusick 2341490Smckusick #include "machine/pte.h" 2441490Smckusick 2541490Smckusick #include "param.h" 2641490Smckusick #include "systm.h" 27*42961Smckusick #include "syscontext.h" 2841490Smckusick #include "kernel.h" 2941490Smckusick #include "proc.h" 3041490Smckusick #include "vm.h" 3141490Smckusick #include "shm.h" 3241490Smckusick #include "mapmem.h" 3341490Smckusick #include "malloc.h" 3441490Smckusick 3541490Smckusick #ifdef HPUXCOMPAT 3641490Smckusick #include "../hpux/hpux.h" 3741490Smckusick #endif 3841490Smckusick 3941490Smckusick int shmat(), shmctl(), shmdt(), shmget(); 4041490Smckusick int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget }; 4141490Smckusick int shmtot = 0; 4241490Smckusick 4341490Smckusick int shmfork(), shmexit(); 4441490Smckusick struct mapmemops shmops = { shmfork, (int (*)())0, shmexit, shmexit }; 4541490Smckusick 4641490Smckusick shminit() 4741490Smckusick { 4841490Smckusick register int i; 4941490Smckusick 5041490Smckusick if (shminfo.shmmni > SHMMMNI) 5141490Smckusick shminfo.shmmni = SHMMMNI; 5241490Smckusick for (i = 0; i < shminfo.shmmni; i++) { 5341490Smckusick shmsegs[i].shm_perm.mode = 0; 5441490Smckusick shmsegs[i].shm_perm.seq = 0; 5541490Smckusick } 5641490Smckusick } 5741490Smckusick 58*42961Smckusick /* 59*42961Smckusick * Entry point for all SHM calls 60*42961Smckusick */ 61*42961Smckusick shmsys(p, uap, retval) 62*42961Smckusick struct proc *p; 63*42961Smckusick struct args { 64*42961Smckusick int which; 65*42961Smckusick } *uap; 66*42961Smckusick int *retval; 6741490Smckusick { 6841490Smckusick 69*42961Smckusick if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 70*42961Smckusick RETURN (EINVAL); 71*42961Smckusick RETURN ((*shmcalls[uap->which])(p, &uap[1], retval)); 7241490Smckusick } 7341490Smckusick 74*42961Smckusick /* 75*42961Smckusick * Get a shared memory segment 76*42961Smckusick */ 77*42961Smckusick shmget(p, uap, retval) 78*42961Smckusick struct proc *p; 79*42961Smckusick register struct args { 8041490Smckusick key_t key; 8141490Smckusick int size; 8241490Smckusick int shmflg; 83*42961Smckusick } *uap; 84*42961Smckusick int *retval; 85*42961Smckusick { 8641490Smckusick register struct shmid_ds *shp; 87*42961Smckusick register struct ucred *cred = u.u_cred; 8841490Smckusick register int i; 89*42961Smckusick int error, size, rval = 0; 9041490Smckusick caddr_t kva; 9141490Smckusick 9241490Smckusick /* look up the specified shm_id */ 9341490Smckusick if (uap->key != IPC_PRIVATE) { 9441490Smckusick for (i = 0; i < shminfo.shmmni; i++) 9541490Smckusick if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) && 9641490Smckusick shmsegs[i].shm_perm.key == uap->key) { 9741490Smckusick rval = i; 9841490Smckusick break; 9941490Smckusick } 10041490Smckusick } else 10141490Smckusick i = shminfo.shmmni; 10241490Smckusick 10341490Smckusick /* create a new shared segment if necessary */ 10441490Smckusick if (i == shminfo.shmmni) { 105*42961Smckusick if ((uap->shmflg & IPC_CREAT) == 0) 106*42961Smckusick return (ENOENT); 107*42961Smckusick if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 108*42961Smckusick return (EINVAL); 10941490Smckusick for (i = 0; i < shminfo.shmmni; i++) 11041490Smckusick if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) { 11141490Smckusick rval = i; 11241490Smckusick break; 11341490Smckusick } 114*42961Smckusick if (i == shminfo.shmmni) 115*42961Smckusick return (ENOSPC); 11641490Smckusick size = clrnd(btoc(uap->size)); 117*42961Smckusick if (shmtot + size > shminfo.shmall) 118*42961Smckusick return (ENOMEM); 11941490Smckusick shp = &shmsegs[rval]; 12041490Smckusick /* 12141490Smckusick * We need to do a couple of things to ensure consistency 12241490Smckusick * in case we sleep in malloc(). We mark segment as 12341490Smckusick * allocated so that other shmgets() will not allocate it. 12441490Smckusick * We mark it as "destroyed" to insure that shmvalid() is 12541490Smckusick * false making most operations fail (XXX). We set the key, 12641490Smckusick * so that other shmget()s will fail. 12741490Smckusick */ 12841490Smckusick shp->shm_perm.mode = SHM_ALLOC | SHM_DEST; 12941490Smckusick shp->shm_perm.key = uap->key; 13041490Smckusick kva = (caddr_t) malloc((u_long)ctob(size), M_SHM, M_WAITOK); 13141490Smckusick if (kva == NULL) { 13241490Smckusick shp->shm_perm.mode = 0; 133*42961Smckusick return (ENOMEM); 13441490Smckusick } 13541490Smckusick if (!claligned(kva)) 13641490Smckusick panic("shmget: non-aligned memory"); 13741490Smckusick bzero(kva, (u_int)ctob(size)); 13841490Smckusick shmtot += size; 139*42961Smckusick shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid; 140*42961Smckusick shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid; 14141490Smckusick shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777); 14241490Smckusick shp->shm_handle = (void *) kvtopte(kva); 14341490Smckusick shp->shm_segsz = uap->size; 14442922Smckusick shp->shm_cpid = p->p_pid; 14541490Smckusick shp->shm_lpid = shp->shm_nattch = 0; 14641490Smckusick shp->shm_atime = shp->shm_dtime = 0; 14741490Smckusick shp->shm_ctime = time.tv_sec; 14841490Smckusick } else { 14941490Smckusick shp = &shmsegs[rval]; 15041490Smckusick /* XXX: probably not the right thing to do */ 151*42961Smckusick if (shp->shm_perm.mode & SHM_DEST) 152*42961Smckusick return (EBUSY); 153*42961Smckusick if (error = ipcaccess(cred, &shp->shm_perm, uap->shmflg&0777)) 154*42961Smckusick return (error); 155*42961Smckusick if (uap->size && uap->size > shp->shm_segsz) 156*42961Smckusick return (EINVAL); 157*42961Smckusick if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL)) 158*42961Smckusick return (EEXIST); 15941490Smckusick } 160*42961Smckusick *retval = shp->shm_perm.seq * SHMMMNI + rval; 16141490Smckusick } 16241490Smckusick 163*42961Smckusick /* 164*42961Smckusick * Shared memory control 165*42961Smckusick */ 166*42961Smckusick /* ARGSUSED */ 167*42961Smckusick shmctl(p, uap, retval) 168*42961Smckusick struct proc *p; 169*42961Smckusick register struct args { 17041490Smckusick int shmid; 17141490Smckusick int cmd; 17241490Smckusick caddr_t buf; 173*42961Smckusick } *uap; 174*42961Smckusick int *retval; 175*42961Smckusick { 17641490Smckusick register struct shmid_ds *shp; 177*42961Smckusick register struct ucred *cred = u.u_cred; 17841490Smckusick struct shmid_ds sbuf; 179*42961Smckusick int error; 18041490Smckusick 181*42961Smckusick if (error = shmvalid(uap->shmid)) 182*42961Smckusick return (error); 18341490Smckusick shp = &shmsegs[uap->shmid % SHMMMNI]; 18441490Smckusick switch (uap->cmd) { 18541490Smckusick case IPC_STAT: 186*42961Smckusick if (error = ipcaccess(cred, &shp->shm_perm, IPC_R)) 187*42961Smckusick return (error); 188*42961Smckusick return (copyout((caddr_t)shp, uap->buf, sizeof(*shp))); 18941490Smckusick 19041490Smckusick case IPC_SET: 191*42961Smckusick if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && 192*42961Smckusick cred->cr_uid != shp->shm_perm.cuid) 193*42961Smckusick return (EPERM); 194*42961Smckusick if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf)) 195*42961Smckusick return (error); 196*42961Smckusick shp->shm_perm.uid = sbuf.shm_perm.uid; 197*42961Smckusick shp->shm_perm.gid = sbuf.shm_perm.gid; 198*42961Smckusick shp->shm_perm.mode = (shp->shm_perm.mode & ~0777) 199*42961Smckusick | (sbuf.shm_perm.mode & 0777); 200*42961Smckusick shp->shm_ctime = time.tv_sec; 20141490Smckusick break; 20241490Smckusick 20341490Smckusick case IPC_RMID: 204*42961Smckusick if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && 205*42961Smckusick cred->cr_uid != shp->shm_perm.cuid) 206*42961Smckusick return (EPERM); 20741490Smckusick /* set ctime? */ 20841490Smckusick shp->shm_perm.key = IPC_PRIVATE; 20941490Smckusick shp->shm_perm.mode |= SHM_DEST; 21041490Smckusick if (shp->shm_nattch <= 0) 21141490Smckusick shmfree(shp); 21241490Smckusick break; 21341490Smckusick 21441490Smckusick #ifdef HPUXCOMPAT 21541490Smckusick case SHM_LOCK: 21641490Smckusick case SHM_UNLOCK: 21741490Smckusick /* don't really do anything, but make them think we did */ 21842922Smckusick if ((p->p_flag & SHPUX) == 0) 219*42961Smckusick return (EINVAL); 220*42961Smckusick if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid && 221*42961Smckusick cred->cr_uid != shp->shm_perm.cuid) 222*42961Smckusick return (EPERM); 22341490Smckusick break; 22441490Smckusick #endif 22541490Smckusick 22641490Smckusick default: 227*42961Smckusick return (EINVAL); 22841490Smckusick } 229*42961Smckusick return (0); 23041490Smckusick } 23141490Smckusick 232*42961Smckusick /* 233*42961Smckusick * Attach to shared memory segment. 234*42961Smckusick */ 235*42961Smckusick shmat(p, uap, retval) 236*42961Smckusick struct proc *p; 237*42961Smckusick register struct args { 23841490Smckusick int shmid; 23941490Smckusick caddr_t shmaddr; 24041490Smckusick int shmflg; 241*42961Smckusick } *uap; 242*42961Smckusick int *retval; 243*42961Smckusick { 24441490Smckusick register struct shmid_ds *shp; 24541490Smckusick register int size; 24641490Smckusick struct mapmem *mp; 24741490Smckusick caddr_t uva; 248*42961Smckusick int error, error1, prot, shmmapin(); 24941490Smckusick 250*42961Smckusick if (error = shmvalid(uap->shmid)) 251*42961Smckusick return (error); 25241490Smckusick shp = &shmsegs[uap->shmid % SHMMMNI]; 25341490Smckusick if (shp->shm_handle == NULL) 25442349Smckusick panic("shmat NULL handle"); 255*42961Smckusick if (error = ipcaccess(u.u_cred, &shp->shm_perm, 25641490Smckusick (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W)) 257*42961Smckusick return (error); 25841490Smckusick uva = uap->shmaddr; 25941490Smckusick if (uva && ((int)uva & (SHMLBA-1))) { 26041490Smckusick if (uap->shmflg & SHM_RND) 26141490Smckusick uva = (caddr_t) ((int)uva & ~(SHMLBA-1)); 262*42961Smckusick else 263*42961Smckusick return (EINVAL); 26441490Smckusick } 26541490Smckusick /* 26641490Smckusick * Make sure user doesn't use more than their fair share 26741490Smckusick */ 26841490Smckusick size = 0; 26941490Smckusick for (mp = u.u_mmap; mp; mp = mp->mm_next) 27041490Smckusick if (mp->mm_ops == &shmops) 27141490Smckusick size++; 272*42961Smckusick if (size >= shminfo.shmseg) 273*42961Smckusick return (EMFILE); 27441490Smckusick /* 27541490Smckusick * Allocate a mapped memory region descriptor and 27641490Smckusick * attempt to expand the user page table to allow for region 27741490Smckusick */ 27841490Smckusick prot = (uap->shmflg & SHM_RDONLY) ? MM_RO : MM_RW; 27941490Smckusick #if defined(hp300) 28041490Smckusick prot |= MM_CI; 28141490Smckusick #endif 28241490Smckusick size = ctob(clrnd(btoc(shp->shm_segsz))); 28342922Smckusick error = mmalloc(p, uap->shmid, &uva, (segsz_t)size, prot, &shmops, &mp); 284*42961Smckusick if (error) 285*42961Smckusick return (error); 286*42961Smckusick if (error = mmmapin(p, mp, shmmapin)) { 287*42961Smckusick if (error1 = mmfree(p, mp)) 288*42961Smckusick return (error1); 289*42961Smckusick return (error); 29042922Smckusick } 29141490Smckusick /* 29241490Smckusick * Fill in the remaining fields 29341490Smckusick */ 29442922Smckusick shp->shm_lpid = p->p_pid; 29541490Smckusick shp->shm_atime = time.tv_sec; 29641490Smckusick shp->shm_nattch++; 297*42961Smckusick *retval = (int) uva; 29841490Smckusick } 29941490Smckusick 300*42961Smckusick /* 301*42961Smckusick * Detach from shared memory segment. 302*42961Smckusick */ 303*42961Smckusick /* ARGSUSED */ 304*42961Smckusick shmdt(p, uap, retval) 305*42961Smckusick struct proc *p; 306*42961Smckusick struct args { 307*42961Smckusick caddr_t shmaddr; 308*42961Smckusick } *uap; 309*42961Smckusick int *retval; 31041490Smckusick { 31141490Smckusick register struct mapmem *mp; 31241490Smckusick 31341490Smckusick for (mp = u.u_mmap; mp; mp = mp->mm_next) 31441490Smckusick if (mp->mm_ops == &shmops && mp->mm_uva == uap->shmaddr) 31541490Smckusick break; 316*42961Smckusick if (mp == MMNIL) 317*42961Smckusick return (EINVAL); 31842922Smckusick shmsegs[mp->mm_id % SHMMMNI].shm_lpid = p->p_pid; 319*42961Smckusick return (shmufree(p, mp)); 32041490Smckusick } 32141490Smckusick 32241490Smckusick shmmapin(mp, off) 32341490Smckusick struct mapmem *mp; 32441490Smckusick { 32541490Smckusick register struct shmid_ds *shp; 32641490Smckusick 32741490Smckusick shp = &shmsegs[mp->mm_id % SHMMMNI]; 32841490Smckusick if (off >= ctob(clrnd(btoc(shp->shm_segsz)))) 32941490Smckusick return(-1); 33041490Smckusick return(((struct pte *)shp->shm_handle)[btop(off)].pg_pfnum); 33141490Smckusick } 33241490Smckusick 33341490Smckusick /* 33441490Smckusick * Increment attach count on fork 33541490Smckusick */ 33641490Smckusick shmfork(mp, ischild) 33741490Smckusick register struct mapmem *mp; 33841490Smckusick { 33941490Smckusick if (!ischild) 34041490Smckusick shmsegs[mp->mm_id % SHMMMNI].shm_nattch++; 34141490Smckusick } 34241490Smckusick 34341490Smckusick /* 34441490Smckusick * Detach from shared memory segment on exit (or exec) 34541490Smckusick */ 34641490Smckusick shmexit(mp) 34741490Smckusick register struct mapmem *mp; 34841490Smckusick { 34942922Smckusick struct proc *p = u.u_procp; /* XXX */ 35042922Smckusick 351*42961Smckusick return (shmufree(p, mp)); 35241490Smckusick } 35341490Smckusick 35441490Smckusick shmvalid(id) 35541490Smckusick register int id; 35641490Smckusick { 35741490Smckusick register struct shmid_ds *shp; 35841490Smckusick 35941490Smckusick if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni) 360*42961Smckusick return(EINVAL); 36141490Smckusick shp = &shmsegs[id % SHMMMNI]; 36241490Smckusick if (shp->shm_perm.seq == (id / SHMMMNI) && 36341490Smckusick (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC) 364*42961Smckusick return(0); 365*42961Smckusick return(EINVAL); 36641490Smckusick } 36741490Smckusick 36841490Smckusick /* 36941490Smckusick * Free user resources associated with a shared memory segment 37041490Smckusick */ 37142922Smckusick shmufree(p, mp) 37242922Smckusick struct proc *p; 37341490Smckusick struct mapmem *mp; 37441490Smckusick { 37541490Smckusick register struct shmid_ds *shp; 37642922Smckusick int error; 37741490Smckusick 37841490Smckusick shp = &shmsegs[mp->mm_id % SHMMMNI]; 37942922Smckusick mmmapout(p, mp); 38042922Smckusick error = mmfree(p, mp); 38141490Smckusick shp->shm_dtime = time.tv_sec; 38241490Smckusick if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST)) 38341490Smckusick shmfree(shp); 38442922Smckusick return (error); 38541490Smckusick } 38641490Smckusick 38741490Smckusick /* 38841490Smckusick * Deallocate resources associated with a shared memory segment 38941490Smckusick */ 39041490Smckusick shmfree(shp) 39141490Smckusick register struct shmid_ds *shp; 39241490Smckusick { 39341490Smckusick caddr_t kva; 39441490Smckusick 39541490Smckusick if (shp->shm_handle == NULL) 39641490Smckusick panic("shmfree"); 39741490Smckusick kva = (caddr_t) ptetokv(shp->shm_handle); 39841490Smckusick free(kva, M_SHM); 39941490Smckusick shp->shm_handle = NULL; 40041490Smckusick shmtot -= clrnd(btoc(shp->shm_segsz)); 40141490Smckusick shp->shm_perm.mode = 0; 40241490Smckusick /* 40341490Smckusick * Increment the sequence number to ensure that outstanding 40441490Smckusick * shmids for this segment will be invalid in the event that 40541490Smckusick * the segment is reallocated. Note that shmids must be 40641490Smckusick * positive as decreed by SVID. 40741490Smckusick */ 40841490Smckusick shp->shm_perm.seq++; 40941490Smckusick if ((int)(shp->shm_perm.seq * SHMMMNI) < 0) 41041490Smckusick shp->shm_perm.seq = 0; 41141490Smckusick } 41241490Smckusick 41341490Smckusick /* 41441490Smckusick * XXX This routine would be common to all sysV style IPC 41541490Smckusick * (if the others were implemented). 41641490Smckusick */ 417*42961Smckusick ipcaccess(ipc, mode, cred) 41841490Smckusick register struct ipc_perm *ipc; 419*42961Smckusick int mode; 420*42961Smckusick register struct ucred *cred; 42141490Smckusick { 42241490Smckusick register int m; 42341490Smckusick 424*42961Smckusick if (cred->cr_uid == 0) 42541490Smckusick return(0); 42641490Smckusick /* 42741490Smckusick * Access check is based on only one of owner, group, public. 42841490Smckusick * If not owner, then check group. 42941490Smckusick * If not a member of the group, then check public access. 43041490Smckusick */ 43141490Smckusick mode &= 0700; 43241490Smckusick m = ipc->mode; 433*42961Smckusick if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) { 43441490Smckusick m <<= 3; 435*42961Smckusick if (!groupmember(ipc->gid, cred) && 436*42961Smckusick !groupmember(ipc->cgid, cred)) 43741490Smckusick m <<= 3; 43841490Smckusick } 43941490Smckusick if ((mode&m) == mode) 440*42961Smckusick return (0); 441*42961Smckusick return (EACCES); 44241490Smckusick } 44341490Smckusick 44441490Smckusick #endif /* SYSVSHM */ 445