xref: /csrg-svn/sys/kern/sysv_shm.c (revision 43630)
141490Smckusick /*
241490Smckusick  * Copyright (c) 1988 University of Utah.
341490Smckusick  * Copyright (c) 1990 The Regents of the University of California.
441490Smckusick  * All rights reserved.
541490Smckusick  *
641490Smckusick  * This code is derived from software contributed to Berkeley by
741490Smckusick  * the Systems Programming Group of the University of Utah Computer
841490Smckusick  * Science Department. Originally from University of Wisconsin.
941490Smckusick  *
1041490Smckusick  * %sccs.include.redist.c%
1141490Smckusick  *
1241490Smckusick  * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
1341490Smckusick  *
14*43630Skarels  *	@(#)sysv_shm.c	7.8 (Berkeley) 06/24/90
1541490Smckusick  */
1641490Smckusick 
1741490Smckusick /*
1841490Smckusick  * System V shared memory routines.
19*43630Skarels  * TEMPORARY, until mmap is in place;
20*43630Skarels  * needed now for HP-UX compatibility and X server (yech!).
2141490Smckusick  */
2241490Smckusick 
2341490Smckusick #ifdef SYSVSHM
2441490Smckusick 
2541490Smckusick #include "machine/pte.h"
2641490Smckusick 
2741490Smckusick #include "param.h"
2841490Smckusick #include "systm.h"
2942961Smckusick #include "syscontext.h"
3041490Smckusick #include "kernel.h"
3141490Smckusick #include "proc.h"
3241490Smckusick #include "vm.h"
3341490Smckusick #include "shm.h"
3441490Smckusick #include "mapmem.h"
3541490Smckusick #include "malloc.h"
3641490Smckusick 
3741490Smckusick #ifdef HPUXCOMPAT
3841490Smckusick #include "../hpux/hpux.h"
3941490Smckusick #endif
4041490Smckusick 
4141490Smckusick int	shmat(), shmctl(), shmdt(), shmget();
4241490Smckusick int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
4341490Smckusick int	shmtot = 0;
4441490Smckusick 
4541490Smckusick int	shmfork(), shmexit();
4641490Smckusick struct	mapmemops shmops = { shmfork, (int (*)())0, shmexit, shmexit };
4741490Smckusick 
4841490Smckusick shminit()
4941490Smckusick {
5041490Smckusick 	register int i;
5141490Smckusick 
5241490Smckusick 	if (shminfo.shmmni > SHMMMNI)
5341490Smckusick 		shminfo.shmmni = SHMMMNI;
5441490Smckusick 	for (i = 0; i < shminfo.shmmni; i++) {
5541490Smckusick 		shmsegs[i].shm_perm.mode = 0;
5641490Smckusick 		shmsegs[i].shm_perm.seq = 0;
5741490Smckusick 	}
5841490Smckusick }
5941490Smckusick 
6042961Smckusick /*
6142961Smckusick  * Entry point for all SHM calls
6242961Smckusick  */
6342961Smckusick shmsys(p, uap, retval)
6442961Smckusick 	struct proc *p;
6542961Smckusick 	struct args {
66*43630Skarels 		u_int which;
6742961Smckusick 	} *uap;
6842961Smckusick 	int *retval;
6941490Smckusick {
7041490Smckusick 
7142961Smckusick 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
7242961Smckusick 		RETURN (EINVAL);
7342961Smckusick 	RETURN ((*shmcalls[uap->which])(p, &uap[1], retval));
7441490Smckusick }
7541490Smckusick 
7642961Smckusick /*
7742961Smckusick  * Get a shared memory segment
7842961Smckusick  */
7942961Smckusick shmget(p, uap, retval)
8042961Smckusick 	struct proc *p;
8142961Smckusick 	register struct args {
8241490Smckusick 		key_t key;
8341490Smckusick 		int size;
8441490Smckusick 		int shmflg;
8542961Smckusick 	} *uap;
8642961Smckusick 	int *retval;
8742961Smckusick {
8841490Smckusick 	register struct shmid_ds *shp;
8942961Smckusick 	register struct ucred *cred = u.u_cred;
9041490Smckusick 	register int i;
9142961Smckusick 	int error, size, rval = 0;
9241490Smckusick 	caddr_t kva;
9341490Smckusick 
9441490Smckusick 	/* look up the specified shm_id */
9541490Smckusick 	if (uap->key != IPC_PRIVATE) {
9641490Smckusick 		for (i = 0; i < shminfo.shmmni; i++)
9741490Smckusick 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
9841490Smckusick 			    shmsegs[i].shm_perm.key == uap->key) {
9941490Smckusick 				rval = i;
10041490Smckusick 				break;
10141490Smckusick 			}
10241490Smckusick 	} else
10341490Smckusick 		i = shminfo.shmmni;
10441490Smckusick 
10541490Smckusick 	/* create a new shared segment if necessary */
10641490Smckusick 	if (i == shminfo.shmmni) {
10742961Smckusick 		if ((uap->shmflg & IPC_CREAT) == 0)
10842961Smckusick 			return (ENOENT);
10942961Smckusick 		if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
11042961Smckusick 			return (EINVAL);
11141490Smckusick 		for (i = 0; i < shminfo.shmmni; i++)
11241490Smckusick 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
11341490Smckusick 				rval = i;
11441490Smckusick 				break;
11541490Smckusick 			}
11642961Smckusick 		if (i == shminfo.shmmni)
11742961Smckusick 			return (ENOSPC);
11841490Smckusick 		size = clrnd(btoc(uap->size));
11942961Smckusick 		if (shmtot + size > shminfo.shmall)
12042961Smckusick 			return (ENOMEM);
12141490Smckusick 		shp = &shmsegs[rval];
12241490Smckusick 		/*
12341490Smckusick 		 * We need to do a couple of things to ensure consistency
12441490Smckusick 		 * in case we sleep in malloc().  We mark segment as
12541490Smckusick 		 * allocated so that other shmgets() will not allocate it.
12641490Smckusick 		 * We mark it as "destroyed" to insure that shmvalid() is
12741490Smckusick 		 * false making most operations fail (XXX).  We set the key,
12841490Smckusick 		 * so that other shmget()s will fail.
12941490Smckusick 		 */
13041490Smckusick 		shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
13141490Smckusick 		shp->shm_perm.key = uap->key;
13241490Smckusick 		kva = (caddr_t) malloc((u_long)ctob(size), M_SHM, M_WAITOK);
13341490Smckusick 		if (kva == NULL) {
13441490Smckusick 			shp->shm_perm.mode = 0;
13542961Smckusick 			return (ENOMEM);
13641490Smckusick 		}
13741490Smckusick 		if (!claligned(kva))
13841490Smckusick 			panic("shmget: non-aligned memory");
13941490Smckusick 		bzero(kva, (u_int)ctob(size));
14041490Smckusick 		shmtot += size;
14142961Smckusick 		shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
14242961Smckusick 		shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
14341490Smckusick 		shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
14441490Smckusick 		shp->shm_handle = (void *) kvtopte(kva);
14541490Smckusick 		shp->shm_segsz = uap->size;
14642922Smckusick 		shp->shm_cpid = p->p_pid;
14741490Smckusick 		shp->shm_lpid = shp->shm_nattch = 0;
14841490Smckusick 		shp->shm_atime = shp->shm_dtime = 0;
14941490Smckusick 		shp->shm_ctime = time.tv_sec;
15041490Smckusick 	} else {
15141490Smckusick 		shp = &shmsegs[rval];
15241490Smckusick 		/* XXX: probably not the right thing to do */
15342961Smckusick 		if (shp->shm_perm.mode & SHM_DEST)
15442961Smckusick 			return (EBUSY);
15543408Shibler 		if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
15642961Smckusick 			return (error);
15742961Smckusick 		if (uap->size && uap->size > shp->shm_segsz)
15842961Smckusick 			return (EINVAL);
15942961Smckusick 		if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
16042961Smckusick 			return (EEXIST);
16141490Smckusick 	}
16242961Smckusick 	*retval = shp->shm_perm.seq * SHMMMNI + rval;
16343408Shibler 	return (0);
16441490Smckusick }
16541490Smckusick 
16642961Smckusick /*
16742961Smckusick  * Shared memory control
16842961Smckusick  */
16942961Smckusick /* ARGSUSED */
17042961Smckusick shmctl(p, uap, retval)
17142961Smckusick 	struct proc *p;
17242961Smckusick 	register struct args {
17341490Smckusick 		int shmid;
17441490Smckusick 		int cmd;
17541490Smckusick 		caddr_t buf;
17642961Smckusick 	} *uap;
17742961Smckusick 	int *retval;
17842961Smckusick {
17941490Smckusick 	register struct shmid_ds *shp;
18042961Smckusick 	register struct ucred *cred = u.u_cred;
18141490Smckusick 	struct shmid_ds sbuf;
18242961Smckusick 	int error;
18341490Smckusick 
18442961Smckusick 	if (error = shmvalid(uap->shmid))
18542961Smckusick 		return (error);
18641490Smckusick 	shp = &shmsegs[uap->shmid % SHMMMNI];
18741490Smckusick 	switch (uap->cmd) {
18841490Smckusick 	case IPC_STAT:
18943408Shibler 		if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
19042961Smckusick 			return (error);
19142961Smckusick 		return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
19241490Smckusick 
19341490Smckusick 	case IPC_SET:
19442961Smckusick 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
19542961Smckusick 		    cred->cr_uid != shp->shm_perm.cuid)
19642961Smckusick 			return (EPERM);
19742961Smckusick 		if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
19842961Smckusick 			return (error);
19942961Smckusick 		shp->shm_perm.uid = sbuf.shm_perm.uid;
20042961Smckusick 		shp->shm_perm.gid = sbuf.shm_perm.gid;
20142961Smckusick 		shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
20242961Smckusick 			| (sbuf.shm_perm.mode & 0777);
20342961Smckusick 		shp->shm_ctime = time.tv_sec;
20441490Smckusick 		break;
20541490Smckusick 
20641490Smckusick 	case IPC_RMID:
20742961Smckusick 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
20842961Smckusick 		    cred->cr_uid != shp->shm_perm.cuid)
20942961Smckusick 			return (EPERM);
21041490Smckusick 		/* set ctime? */
21141490Smckusick 		shp->shm_perm.key = IPC_PRIVATE;
21241490Smckusick 		shp->shm_perm.mode |= SHM_DEST;
21341490Smckusick 		if (shp->shm_nattch <= 0)
21441490Smckusick 			shmfree(shp);
21541490Smckusick 		break;
21641490Smckusick 
21741490Smckusick #ifdef HPUXCOMPAT
21841490Smckusick 	case SHM_LOCK:
21941490Smckusick 	case SHM_UNLOCK:
22041490Smckusick 		/* don't really do anything, but make them think we did */
22142922Smckusick 		if ((p->p_flag & SHPUX) == 0)
22242961Smckusick 			return (EINVAL);
22342961Smckusick 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
22442961Smckusick 		    cred->cr_uid != shp->shm_perm.cuid)
22542961Smckusick 			return (EPERM);
22641490Smckusick 		break;
22741490Smckusick #endif
22841490Smckusick 
22941490Smckusick 	default:
23042961Smckusick 		return (EINVAL);
23141490Smckusick 	}
23242961Smckusick 	return (0);
23341490Smckusick }
23441490Smckusick 
23542961Smckusick /*
23642961Smckusick  * Attach to shared memory segment.
23742961Smckusick  */
23842961Smckusick shmat(p, uap, retval)
23942961Smckusick 	struct proc *p;
24042961Smckusick 	register struct args {
24141490Smckusick 		int	shmid;
24241490Smckusick 		caddr_t	shmaddr;
24341490Smckusick 		int	shmflg;
24442961Smckusick 	} *uap;
24542961Smckusick 	int *retval;
24642961Smckusick {
24741490Smckusick 	register struct shmid_ds *shp;
24841490Smckusick 	register int size;
24941490Smckusick 	struct mapmem *mp;
25041490Smckusick 	caddr_t uva;
25143408Shibler 	int error, prot, shmmapin();
25241490Smckusick 
25342961Smckusick 	if (error = shmvalid(uap->shmid))
25442961Smckusick 		return (error);
25541490Smckusick 	shp = &shmsegs[uap->shmid % SHMMMNI];
25641490Smckusick 	if (shp->shm_handle == NULL)
25742349Smckusick 		panic("shmat NULL handle");
25843408Shibler 	if (error = ipcaccess(&shp->shm_perm,
25943408Shibler 			(uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, u.u_cred))
26042961Smckusick 		return (error);
26141490Smckusick 	uva = uap->shmaddr;
26241490Smckusick 	if (uva && ((int)uva & (SHMLBA-1))) {
26341490Smckusick 		if (uap->shmflg & SHM_RND)
26441490Smckusick 			uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
26542961Smckusick 		else
26642961Smckusick 			return (EINVAL);
26741490Smckusick 	}
26841490Smckusick 	/*
26941490Smckusick 	 * Make sure user doesn't use more than their fair share
27041490Smckusick 	 */
27141490Smckusick 	size = 0;
27241490Smckusick 	for (mp = u.u_mmap; mp; mp = mp->mm_next)
27341490Smckusick 		if (mp->mm_ops == &shmops)
27441490Smckusick 			size++;
27542961Smckusick 	if (size >= shminfo.shmseg)
27642961Smckusick 		return (EMFILE);
27741490Smckusick 	/*
27841490Smckusick 	 * Allocate a mapped memory region descriptor and
27941490Smckusick 	 * attempt to expand the user page table to allow for region
28041490Smckusick 	 */
28141490Smckusick 	prot = (uap->shmflg & SHM_RDONLY) ? MM_RO : MM_RW;
28241490Smckusick #if defined(hp300)
28341490Smckusick 	prot |= MM_CI;
28441490Smckusick #endif
28541490Smckusick 	size = ctob(clrnd(btoc(shp->shm_segsz)));
28642922Smckusick 	error = mmalloc(p, uap->shmid, &uva, (segsz_t)size, prot, &shmops, &mp);
28742961Smckusick 	if (error)
28842961Smckusick 		return (error);
28942961Smckusick 	if (error = mmmapin(p, mp, shmmapin)) {
29043408Shibler 		(void) mmfree(p, mp);
29142961Smckusick 		return (error);
29242922Smckusick 	}
29341490Smckusick 	/*
29441490Smckusick 	 * Fill in the remaining fields
29541490Smckusick 	 */
29642922Smckusick 	shp->shm_lpid = p->p_pid;
29741490Smckusick 	shp->shm_atime = time.tv_sec;
29841490Smckusick 	shp->shm_nattch++;
29942961Smckusick 	*retval = (int) uva;
30043408Shibler 	return (0);
30141490Smckusick }
30241490Smckusick 
30342961Smckusick /*
30442961Smckusick  * Detach from shared memory segment.
30542961Smckusick  */
30642961Smckusick /* ARGSUSED */
30742961Smckusick shmdt(p, uap, retval)
30842961Smckusick 	struct proc *p;
30942961Smckusick 	struct args {
31042961Smckusick 		caddr_t	shmaddr;
31142961Smckusick 	} *uap;
31242961Smckusick 	int *retval;
31341490Smckusick {
31441490Smckusick 	register struct mapmem *mp;
31541490Smckusick 
31641490Smckusick 	for (mp = u.u_mmap; mp; mp = mp->mm_next)
31741490Smckusick 		if (mp->mm_ops == &shmops && mp->mm_uva == uap->shmaddr)
31841490Smckusick 			break;
31942961Smckusick 	if (mp == MMNIL)
32042961Smckusick 		return (EINVAL);
32142922Smckusick 	shmsegs[mp->mm_id % SHMMMNI].shm_lpid = p->p_pid;
32242961Smckusick 	return (shmufree(p, mp));
32341490Smckusick }
32441490Smckusick 
32541490Smckusick shmmapin(mp, off)
32641490Smckusick 	struct mapmem *mp;
32741490Smckusick {
32841490Smckusick 	register struct shmid_ds *shp;
32941490Smckusick 
33041490Smckusick 	shp = &shmsegs[mp->mm_id % SHMMMNI];
33141490Smckusick 	if (off >= ctob(clrnd(btoc(shp->shm_segsz))))
33241490Smckusick 		return(-1);
33341490Smckusick 	return(((struct pte *)shp->shm_handle)[btop(off)].pg_pfnum);
33441490Smckusick }
33541490Smckusick 
33641490Smckusick /*
33741490Smckusick  * Increment attach count on fork
33841490Smckusick  */
33942964Smckusick /* ARGSUSED */
34041490Smckusick shmfork(mp, ischild)
34141490Smckusick 	register struct mapmem *mp;
34241490Smckusick {
34341490Smckusick 	if (!ischild)
34441490Smckusick 		shmsegs[mp->mm_id % SHMMMNI].shm_nattch++;
34541490Smckusick }
34641490Smckusick 
34741490Smckusick /*
34841490Smckusick  * Detach from shared memory segment on exit (or exec)
34941490Smckusick  */
35043408Shibler shmexit(mp)
35142964Smckusick 	struct mapmem *mp;
35241490Smckusick {
35343408Shibler 	struct proc *p = u.u_procp;		/* XXX */
35442922Smckusick 
35542961Smckusick 	return (shmufree(p, mp));
35641490Smckusick }
35741490Smckusick 
35841490Smckusick shmvalid(id)
35941490Smckusick 	register int id;
36041490Smckusick {
36141490Smckusick 	register struct shmid_ds *shp;
36241490Smckusick 
36341490Smckusick 	if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
36442961Smckusick 		return(EINVAL);
36541490Smckusick 	shp = &shmsegs[id % SHMMMNI];
36641490Smckusick 	if (shp->shm_perm.seq == (id / SHMMMNI) &&
36741490Smckusick 	    (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
36842961Smckusick 		return(0);
36942961Smckusick 	return(EINVAL);
37041490Smckusick }
37141490Smckusick 
37241490Smckusick /*
37341490Smckusick  * Free user resources associated with a shared memory segment
37441490Smckusick  */
37542922Smckusick shmufree(p, mp)
37642922Smckusick 	struct proc *p;
37741490Smckusick 	struct mapmem *mp;
37841490Smckusick {
37941490Smckusick 	register struct shmid_ds *shp;
38042922Smckusick 	int error;
38141490Smckusick 
38241490Smckusick 	shp = &shmsegs[mp->mm_id % SHMMMNI];
38342922Smckusick 	mmmapout(p, mp);
38442922Smckusick 	error = mmfree(p, mp);
38541490Smckusick 	shp->shm_dtime = time.tv_sec;
38641490Smckusick 	if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
38741490Smckusick 		shmfree(shp);
38842922Smckusick 	return (error);
38941490Smckusick }
39041490Smckusick 
39141490Smckusick /*
39241490Smckusick  * Deallocate resources associated with a shared memory segment
39341490Smckusick  */
39441490Smckusick shmfree(shp)
39541490Smckusick 	register struct shmid_ds *shp;
39641490Smckusick {
39741490Smckusick 	caddr_t kva;
39841490Smckusick 
39941490Smckusick 	if (shp->shm_handle == NULL)
40041490Smckusick 		panic("shmfree");
40141490Smckusick 	kva = (caddr_t) ptetokv(shp->shm_handle);
40241490Smckusick 	free(kva, M_SHM);
40341490Smckusick 	shp->shm_handle = NULL;
40441490Smckusick 	shmtot -= clrnd(btoc(shp->shm_segsz));
40541490Smckusick 	shp->shm_perm.mode = 0;
40641490Smckusick 	/*
40741490Smckusick 	 * Increment the sequence number to ensure that outstanding
40841490Smckusick 	 * shmids for this segment will be invalid in the event that
40941490Smckusick 	 * the segment is reallocated.  Note that shmids must be
41041490Smckusick 	 * positive as decreed by SVID.
41141490Smckusick 	 */
41241490Smckusick 	shp->shm_perm.seq++;
41341490Smckusick 	if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
41441490Smckusick 		shp->shm_perm.seq = 0;
41541490Smckusick }
41641490Smckusick 
41741490Smckusick /*
41841490Smckusick  * XXX This routine would be common to all sysV style IPC
41941490Smckusick  *     (if the others were implemented).
42041490Smckusick  */
42142961Smckusick ipcaccess(ipc, mode, cred)
42241490Smckusick 	register struct ipc_perm *ipc;
42342961Smckusick 	int mode;
42442961Smckusick 	register struct ucred *cred;
42541490Smckusick {
42641490Smckusick 	register int m;
42741490Smckusick 
42842961Smckusick 	if (cred->cr_uid == 0)
42941490Smckusick 		return(0);
43041490Smckusick 	/*
43141490Smckusick 	 * Access check is based on only one of owner, group, public.
43241490Smckusick 	 * If not owner, then check group.
43341490Smckusick 	 * If not a member of the group, then check public access.
43441490Smckusick 	 */
43541490Smckusick 	mode &= 0700;
43641490Smckusick 	m = ipc->mode;
43742961Smckusick 	if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
43841490Smckusick 		m <<= 3;
43942961Smckusick 		if (!groupmember(ipc->gid, cred) &&
44042961Smckusick 		    !groupmember(ipc->cgid, cred))
44141490Smckusick 			m <<= 3;
44241490Smckusick 	}
44341490Smckusick 	if ((mode&m) == mode)
44442961Smckusick 		return (0);
44542961Smckusick 	return (EACCES);
44641490Smckusick }
44741490Smckusick 
44841490Smckusick #endif /* SYSVSHM */
449