xref: /csrg-svn/sys/kern/sysv_shm.c (revision 63178)
141490Smckusick /*
241490Smckusick  * Copyright (c) 1988 University of Utah.
3*63178Sbostic  * Copyright (c) 1990, 1993
4*63178Sbostic  *	The Regents of the University of California.  All rights reserved.
541490Smckusick  *
641490Smckusick  * This code is derived from software contributed to Berkeley by
741490Smckusick  * the Systems Programming Group of the University of Utah Computer
841490Smckusick  * Science Department. Originally from University of Wisconsin.
941490Smckusick  *
1041490Smckusick  * %sccs.include.redist.c%
1141490Smckusick  *
1254856Shibler  * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
1341490Smckusick  *
14*63178Sbostic  *	@(#)sysv_shm.c	8.1 (Berkeley) 06/10/93
1541490Smckusick  */
1641490Smckusick 
1741490Smckusick /*
1841490Smckusick  * System V shared memory routines.
1943630Skarels  * TEMPORARY, until mmap is in place;
2043630Skarels  * needed now for HP-UX compatibility and X server (yech!).
2141490Smckusick  */
2241490Smckusick 
2341490Smckusick #ifdef SYSVSHM
2441490Smckusick 
2556517Sbostic #include <sys/param.h>
2656517Sbostic #include <sys/systm.h>
2756517Sbostic #include <sys/kernel.h>
2856517Sbostic #include <sys/proc.h>
2956517Sbostic #include <sys/shm.h>
3056517Sbostic #include <sys/malloc.h>
3156517Sbostic #include <sys/mman.h>
3241490Smckusick 
3356517Sbostic #include <vm/vm.h>
3456517Sbostic #include <vm/vm_kern.h>
3556517Sbostic #include <vm/vm_inherit.h>
3656517Sbostic #include <vm/vm_pager.h>
3756517Sbostic 
3841490Smckusick int	shmat(), shmctl(), shmdt(), shmget();
3941490Smckusick int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
4041490Smckusick int	shmtot = 0;
4141490Smckusick 
4245737Smckusick /*
4345737Smckusick  * Per process internal structure for managing segments.
4445737Smckusick  * Each process using shm will have an array of ``shmseg'' of these.
4545737Smckusick  */
4645737Smckusick struct	shmdesc {
4745737Smckusick 	vm_offset_t	shmd_uva;
4845737Smckusick 	int		shmd_id;
4945737Smckusick };
5041490Smckusick 
5145737Smckusick /*
5245737Smckusick  * Per segment internal structure (shm_handle).
5345737Smckusick  */
5445737Smckusick struct	shmhandle {
5545737Smckusick 	vm_offset_t	shmh_kva;
5645737Smckusick 	caddr_t		shmh_id;
5745737Smckusick };
5845737Smckusick 
5945737Smckusick vm_map_t shm_map;	/* address space for shared memory segments */
6045737Smckusick 
6141490Smckusick shminit()
6241490Smckusick {
6341490Smckusick 	register int i;
6445737Smckusick 	vm_offset_t whocares1, whocares2;
6541490Smckusick 
6645737Smckusick 	shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
6745737Smckusick 				shminfo.shmall * NBPG, FALSE);
6841490Smckusick 	if (shminfo.shmmni > SHMMMNI)
6941490Smckusick 		shminfo.shmmni = SHMMMNI;
7041490Smckusick 	for (i = 0; i < shminfo.shmmni; i++) {
7141490Smckusick 		shmsegs[i].shm_perm.mode = 0;
7241490Smckusick 		shmsegs[i].shm_perm.seq = 0;
7341490Smckusick 	}
7441490Smckusick }
7541490Smckusick 
7642961Smckusick /*
7742961Smckusick  * Entry point for all SHM calls
7842961Smckusick  */
7954932Storek struct shmsys_args {
8054932Storek 	u_int which;
8154932Storek };
8242961Smckusick shmsys(p, uap, retval)
8342961Smckusick 	struct proc *p;
8454932Storek 	struct shmsys_args *uap;
8542961Smckusick 	int *retval;
8641490Smckusick {
8741490Smckusick 
8842961Smckusick 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
8944405Skarels 		return (EINVAL);
9044405Skarels 	return ((*shmcalls[uap->which])(p, &uap[1], retval));
9141490Smckusick }
9241490Smckusick 
9342961Smckusick /*
9442961Smckusick  * Get a shared memory segment
9542961Smckusick  */
9654932Storek struct shmget_args {
9754932Storek 	key_t key;
9854932Storek 	int size;
9954932Storek 	int shmflg;
10054932Storek };
10142961Smckusick shmget(p, uap, retval)
10242961Smckusick 	struct proc *p;
10354932Storek 	register struct shmget_args *uap;
10442961Smckusick 	int *retval;
10542961Smckusick {
10641490Smckusick 	register struct shmid_ds *shp;
10747540Skarels 	register struct ucred *cred = p->p_ucred;
10841490Smckusick 	register int i;
10942961Smckusick 	int error, size, rval = 0;
11045737Smckusick 	register struct shmhandle *shmh;
11141490Smckusick 
11241490Smckusick 	/* look up the specified shm_id */
11341490Smckusick 	if (uap->key != IPC_PRIVATE) {
11441490Smckusick 		for (i = 0; i < shminfo.shmmni; i++)
11541490Smckusick 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
11641490Smckusick 			    shmsegs[i].shm_perm.key == uap->key) {
11741490Smckusick 				rval = i;
11841490Smckusick 				break;
11941490Smckusick 			}
12041490Smckusick 	} else
12141490Smckusick 		i = shminfo.shmmni;
12241490Smckusick 
12341490Smckusick 	/* create a new shared segment if necessary */
12441490Smckusick 	if (i == shminfo.shmmni) {
12542961Smckusick 		if ((uap->shmflg & IPC_CREAT) == 0)
12642961Smckusick 			return (ENOENT);
12742961Smckusick 		if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
12842961Smckusick 			return (EINVAL);
12941490Smckusick 		for (i = 0; i < shminfo.shmmni; i++)
13041490Smckusick 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
13141490Smckusick 				rval = i;
13241490Smckusick 				break;
13341490Smckusick 			}
13442961Smckusick 		if (i == shminfo.shmmni)
13542961Smckusick 			return (ENOSPC);
13641490Smckusick 		size = clrnd(btoc(uap->size));
13742961Smckusick 		if (shmtot + size > shminfo.shmall)
13842961Smckusick 			return (ENOMEM);
13941490Smckusick 		shp = &shmsegs[rval];
14041490Smckusick 		/*
14141490Smckusick 		 * We need to do a couple of things to ensure consistency
14241490Smckusick 		 * in case we sleep in malloc().  We mark segment as
14341490Smckusick 		 * allocated so that other shmgets() will not allocate it.
14441490Smckusick 		 * We mark it as "destroyed" to insure that shmvalid() is
14541490Smckusick 		 * false making most operations fail (XXX).  We set the key,
14641490Smckusick 		 * so that other shmget()s will fail.
14741490Smckusick 		 */
14841490Smckusick 		shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
14941490Smckusick 		shp->shm_perm.key = uap->key;
15045737Smckusick 		shmh = (struct shmhandle *)
15145737Smckusick 			malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
15245737Smckusick 		shmh->shmh_kva = 0;
15345737Smckusick 		shmh->shmh_id = (caddr_t)(0xc0000000|rval);	/* XXX */
15445737Smckusick 		error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
15558595Shibler 				VM_PROT_ALL, VM_PROT_ALL,
15658595Shibler 				MAP_ANON, shmh->shmh_id, 0);
15745737Smckusick 		if (error) {
15845737Smckusick 			free((caddr_t)shmh, M_SHM);
15941490Smckusick 			shp->shm_perm.mode = 0;
16045737Smckusick 			return(ENOMEM);
16141490Smckusick 		}
16245737Smckusick 		shp->shm_handle = (void *) shmh;
16341490Smckusick 		shmtot += size;
16442961Smckusick 		shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
16542961Smckusick 		shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
16641490Smckusick 		shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
16741490Smckusick 		shp->shm_segsz = uap->size;
16842922Smckusick 		shp->shm_cpid = p->p_pid;
16941490Smckusick 		shp->shm_lpid = shp->shm_nattch = 0;
17041490Smckusick 		shp->shm_atime = shp->shm_dtime = 0;
17141490Smckusick 		shp->shm_ctime = time.tv_sec;
17241490Smckusick 	} else {
17341490Smckusick 		shp = &shmsegs[rval];
17441490Smckusick 		/* XXX: probably not the right thing to do */
17542961Smckusick 		if (shp->shm_perm.mode & SHM_DEST)
17642961Smckusick 			return (EBUSY);
17743408Shibler 		if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
17842961Smckusick 			return (error);
17942961Smckusick 		if (uap->size && uap->size > shp->shm_segsz)
18042961Smckusick 			return (EINVAL);
18142961Smckusick 		if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
18242961Smckusick 			return (EEXIST);
18341490Smckusick 	}
18442961Smckusick 	*retval = shp->shm_perm.seq * SHMMMNI + rval;
18543408Shibler 	return (0);
18641490Smckusick }
18741490Smckusick 
18842961Smckusick /*
18942961Smckusick  * Shared memory control
19042961Smckusick  */
19154932Storek struct shmctl_args {
19254932Storek 	int shmid;
19354932Storek 	int cmd;
19454932Storek 	caddr_t buf;
19554932Storek };
19642961Smckusick /* ARGSUSED */
19742961Smckusick shmctl(p, uap, retval)
19842961Smckusick 	struct proc *p;
19954932Storek 	register struct shmctl_args *uap;
20042961Smckusick 	int *retval;
20142961Smckusick {
20241490Smckusick 	register struct shmid_ds *shp;
20347540Skarels 	register struct ucred *cred = p->p_ucred;
20441490Smckusick 	struct shmid_ds sbuf;
20542961Smckusick 	int error;
20641490Smckusick 
20742961Smckusick 	if (error = shmvalid(uap->shmid))
20842961Smckusick 		return (error);
20941490Smckusick 	shp = &shmsegs[uap->shmid % SHMMMNI];
21041490Smckusick 	switch (uap->cmd) {
21141490Smckusick 	case IPC_STAT:
21243408Shibler 		if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
21342961Smckusick 			return (error);
21442961Smckusick 		return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
21541490Smckusick 
21641490Smckusick 	case IPC_SET:
21742961Smckusick 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
21842961Smckusick 		    cred->cr_uid != shp->shm_perm.cuid)
21942961Smckusick 			return (EPERM);
22042961Smckusick 		if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
22142961Smckusick 			return (error);
22242961Smckusick 		shp->shm_perm.uid = sbuf.shm_perm.uid;
22342961Smckusick 		shp->shm_perm.gid = sbuf.shm_perm.gid;
22442961Smckusick 		shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
22542961Smckusick 			| (sbuf.shm_perm.mode & 0777);
22642961Smckusick 		shp->shm_ctime = time.tv_sec;
22741490Smckusick 		break;
22841490Smckusick 
22941490Smckusick 	case IPC_RMID:
23042961Smckusick 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
23142961Smckusick 		    cred->cr_uid != shp->shm_perm.cuid)
23242961Smckusick 			return (EPERM);
23341490Smckusick 		/* set ctime? */
23441490Smckusick 		shp->shm_perm.key = IPC_PRIVATE;
23541490Smckusick 		shp->shm_perm.mode |= SHM_DEST;
23641490Smckusick 		if (shp->shm_nattch <= 0)
23741490Smckusick 			shmfree(shp);
23841490Smckusick 		break;
23941490Smckusick 
24041490Smckusick 	default:
24142961Smckusick 		return (EINVAL);
24241490Smckusick 	}
24342961Smckusick 	return (0);
24441490Smckusick }
24541490Smckusick 
24642961Smckusick /*
24742961Smckusick  * Attach to shared memory segment.
24842961Smckusick  */
24954932Storek struct shmat_args {
25054932Storek 	int	shmid;
25154932Storek 	caddr_t	shmaddr;
25254932Storek 	int	shmflg;
25354932Storek };
25442961Smckusick shmat(p, uap, retval)
25542961Smckusick 	struct proc *p;
25654932Storek 	register struct shmat_args *uap;
25742961Smckusick 	int *retval;
25842961Smckusick {
25941490Smckusick 	register struct shmid_ds *shp;
26041490Smckusick 	register int size;
26141490Smckusick 	caddr_t uva;
26245737Smckusick 	int error;
26345737Smckusick 	int flags;
26445737Smckusick 	vm_prot_t prot;
26545737Smckusick 	struct shmdesc *shmd;
26641490Smckusick 
26745737Smckusick 	/*
26845737Smckusick 	 * Allocate descriptors now (before validity check)
26945737Smckusick 	 * in case malloc() blocks.
27045737Smckusick 	 */
27147540Skarels 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
27245737Smckusick 	size = shminfo.shmseg * sizeof(struct shmdesc);
27345737Smckusick 	if (shmd == NULL) {
27445737Smckusick 		shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
27545737Smckusick 		bzero((caddr_t)shmd, size);
27647540Skarels 		p->p_vmspace->vm_shm = (caddr_t)shmd;
27745737Smckusick 	}
27842961Smckusick 	if (error = shmvalid(uap->shmid))
27942961Smckusick 		return (error);
28041490Smckusick 	shp = &shmsegs[uap->shmid % SHMMMNI];
28141490Smckusick 	if (shp->shm_handle == NULL)
28242349Smckusick 		panic("shmat NULL handle");
28343408Shibler 	if (error = ipcaccess(&shp->shm_perm,
28447540Skarels 	    (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
28542961Smckusick 		return (error);
28641490Smckusick 	uva = uap->shmaddr;
28741490Smckusick 	if (uva && ((int)uva & (SHMLBA-1))) {
28841490Smckusick 		if (uap->shmflg & SHM_RND)
28941490Smckusick 			uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
29042961Smckusick 		else
29142961Smckusick 			return (EINVAL);
29241490Smckusick 	}
29341490Smckusick 	/*
29441490Smckusick 	 * Make sure user doesn't use more than their fair share
29541490Smckusick 	 */
29645737Smckusick 	for (size = 0; size < shminfo.shmseg; size++) {
29745737Smckusick 		if (shmd->shmd_uva == 0)
29845737Smckusick 			break;
29945737Smckusick 		shmd++;
30045737Smckusick 	}
30142961Smckusick 	if (size >= shminfo.shmseg)
30242961Smckusick 		return (EMFILE);
30341490Smckusick 	size = ctob(clrnd(btoc(shp->shm_segsz)));
30445737Smckusick 	prot = VM_PROT_READ;
30545737Smckusick 	if ((uap->shmflg & SHM_RDONLY) == 0)
30645737Smckusick 		prot |= VM_PROT_WRITE;
30745737Smckusick 	flags = MAP_ANON|MAP_SHARED;
30845737Smckusick 	if (uva)
30945737Smckusick 		flags |= MAP_FIXED;
31045737Smckusick 	else
31145737Smckusick 		uva = (caddr_t)0x1000000;	/* XXX */
31253313Smckusick 	error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
31358595Shibler 			(vm_size_t)size, prot, VM_PROT_ALL, flags,
31458595Shibler 			((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
31542961Smckusick 	if (error)
31645737Smckusick 		return(error);
31745737Smckusick 	shmd->shmd_uva = (vm_offset_t)uva;
31845737Smckusick 	shmd->shmd_id = uap->shmid;
31941490Smckusick 	/*
32041490Smckusick 	 * Fill in the remaining fields
32141490Smckusick 	 */
32242922Smckusick 	shp->shm_lpid = p->p_pid;
32341490Smckusick 	shp->shm_atime = time.tv_sec;
32441490Smckusick 	shp->shm_nattch++;
32542961Smckusick 	*retval = (int) uva;
32643408Shibler 	return (0);
32741490Smckusick }
32841490Smckusick 
32942961Smckusick /*
33042961Smckusick  * Detach from shared memory segment.
33142961Smckusick  */
33254932Storek struct shmdt_args {
33354932Storek 	caddr_t	shmaddr;
33454932Storek };
33542961Smckusick /* ARGSUSED */
33642961Smckusick shmdt(p, uap, retval)
33742961Smckusick 	struct proc *p;
33854932Storek 	struct shmdt_args *uap;
33942961Smckusick 	int *retval;
34041490Smckusick {
34145737Smckusick 	register struct shmdesc *shmd;
34245737Smckusick 	register int i;
34341490Smckusick 
34447540Skarels 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
34545737Smckusick 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
34645737Smckusick 		if (shmd->shmd_uva &&
34745737Smckusick 		    shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
34841490Smckusick 			break;
34945737Smckusick 	if (i == shminfo.shmseg)
35045737Smckusick 		return(EINVAL);
35145737Smckusick 	shmufree(p, shmd);
35245737Smckusick 	shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
35341490Smckusick }
35441490Smckusick 
35547540Skarels shmfork(p1, p2, isvfork)
35647540Skarels 	struct proc *p1, *p2;
35745737Smckusick 	int isvfork;
35841490Smckusick {
35945737Smckusick 	register struct shmdesc *shmd;
36045737Smckusick 	register int size;
36141490Smckusick 
36245737Smckusick 	/*
36345737Smckusick 	 * Copy parents descriptive information
36445737Smckusick 	 */
36545737Smckusick 	size = shminfo.shmseg * sizeof(struct shmdesc);
36645737Smckusick 	shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
36747540Skarels 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
36847540Skarels 	p2->p_vmspace->vm_shm = (caddr_t)shmd;
36945737Smckusick 	/*
37045737Smckusick 	 * Increment reference counts
37145737Smckusick 	 */
37245737Smckusick 	for (size = 0; size < shminfo.shmseg; size++, shmd++)
37345737Smckusick 		if (shmd->shmd_uva)
37445737Smckusick 			shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
37541490Smckusick }
37641490Smckusick 
37745737Smckusick shmexit(p)
37845737Smckusick 	struct proc *p;
37941490Smckusick {
38045737Smckusick 	register struct shmdesc *shmd;
38145737Smckusick 	register int i;
38241490Smckusick 
38347540Skarels 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
38445737Smckusick 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
38545737Smckusick 		if (shmd->shmd_uva)
38645737Smckusick 			shmufree(p, shmd);
38747540Skarels 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
38847540Skarels 	p->p_vmspace->vm_shm = NULL;
38941490Smckusick }
39041490Smckusick 
39141490Smckusick shmvalid(id)
39241490Smckusick 	register int id;
39341490Smckusick {
39441490Smckusick 	register struct shmid_ds *shp;
39541490Smckusick 
39641490Smckusick 	if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
39742961Smckusick 		return(EINVAL);
39841490Smckusick 	shp = &shmsegs[id % SHMMMNI];
39941490Smckusick 	if (shp->shm_perm.seq == (id / SHMMMNI) &&
40041490Smckusick 	    (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
40142961Smckusick 		return(0);
40242961Smckusick 	return(EINVAL);
40341490Smckusick }
40441490Smckusick 
40541490Smckusick /*
40641490Smckusick  * Free user resources associated with a shared memory segment
40741490Smckusick  */
40845737Smckusick shmufree(p, shmd)
40942922Smckusick 	struct proc *p;
41045737Smckusick 	struct shmdesc *shmd;
41141490Smckusick {
41241490Smckusick 	register struct shmid_ds *shp;
41341490Smckusick 
41445737Smckusick 	shp = &shmsegs[shmd->shmd_id % SHMMMNI];
41549710Shibler 	(void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
41645737Smckusick 			     ctob(clrnd(btoc(shp->shm_segsz))));
41745737Smckusick 	shmd->shmd_id = 0;
41845737Smckusick 	shmd->shmd_uva = 0;
41941490Smckusick 	shp->shm_dtime = time.tv_sec;
42041490Smckusick 	if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
42141490Smckusick 		shmfree(shp);
42241490Smckusick }
42341490Smckusick 
42441490Smckusick /*
42541490Smckusick  * Deallocate resources associated with a shared memory segment
42641490Smckusick  */
42741490Smckusick shmfree(shp)
42841490Smckusick 	register struct shmid_ds *shp;
42941490Smckusick {
43041490Smckusick 
43141490Smckusick 	if (shp->shm_handle == NULL)
43241490Smckusick 		panic("shmfree");
43345737Smckusick 	/*
43445737Smckusick 	 * Lose our lingering object reference by deallocating space
43545737Smckusick 	 * in kernel.  Pager will also be deallocated as a side-effect.
43645737Smckusick 	 */
43745737Smckusick 	vm_deallocate(shm_map,
43845737Smckusick 		      ((struct shmhandle *)shp->shm_handle)->shmh_kva,
43949668Shibler 		      ctob(clrnd(btoc(shp->shm_segsz))));
44045737Smckusick 	free((caddr_t)shp->shm_handle, M_SHM);
44141490Smckusick 	shp->shm_handle = NULL;
44241490Smckusick 	shmtot -= clrnd(btoc(shp->shm_segsz));
44341490Smckusick 	shp->shm_perm.mode = 0;
44441490Smckusick 	/*
44541490Smckusick 	 * Increment the sequence number to ensure that outstanding
44641490Smckusick 	 * shmids for this segment will be invalid in the event that
44741490Smckusick 	 * the segment is reallocated.  Note that shmids must be
44841490Smckusick 	 * positive as decreed by SVID.
44941490Smckusick 	 */
45041490Smckusick 	shp->shm_perm.seq++;
45141490Smckusick 	if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
45241490Smckusick 		shp->shm_perm.seq = 0;
45341490Smckusick }
45441490Smckusick 
45541490Smckusick /*
45641490Smckusick  * XXX This routine would be common to all sysV style IPC
45741490Smckusick  *     (if the others were implemented).
45841490Smckusick  */
45942961Smckusick ipcaccess(ipc, mode, cred)
46041490Smckusick 	register struct ipc_perm *ipc;
46142961Smckusick 	int mode;
46242961Smckusick 	register struct ucred *cred;
46341490Smckusick {
46441490Smckusick 	register int m;
46541490Smckusick 
46642961Smckusick 	if (cred->cr_uid == 0)
46741490Smckusick 		return(0);
46841490Smckusick 	/*
46941490Smckusick 	 * Access check is based on only one of owner, group, public.
47041490Smckusick 	 * If not owner, then check group.
47141490Smckusick 	 * If not a member of the group, then check public access.
47241490Smckusick 	 */
47341490Smckusick 	mode &= 0700;
47441490Smckusick 	m = ipc->mode;
47542961Smckusick 	if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
47641490Smckusick 		m <<= 3;
47742961Smckusick 		if (!groupmember(ipc->gid, cred) &&
47842961Smckusick 		    !groupmember(ipc->cgid, cred))
47941490Smckusick 			m <<= 3;
48041490Smckusick 	}
48141490Smckusick 	if ((mode&m) == mode)
48242961Smckusick 		return (0);
48342961Smckusick 	return (EACCES);
48441490Smckusick }
48541490Smckusick #endif /* SYSVSHM */
486