xref: /csrg-svn/sys/kern/sysv_shm.c (revision 56517)
141490Smckusick /*
241490Smckusick  * Copyright (c) 1988 University of Utah.
341490Smckusick  * Copyright (c) 1990 The Regents of the University of California.
441490Smckusick  * All rights reserved.
541490Smckusick  *
641490Smckusick  * This code is derived from software contributed to Berkeley by
741490Smckusick  * the Systems Programming Group of the University of Utah Computer
841490Smckusick  * Science Department. Originally from University of Wisconsin.
941490Smckusick  *
1041490Smckusick  * %sccs.include.redist.c%
1141490Smckusick  *
1254856Shibler  * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
1341490Smckusick  *
14*56517Sbostic  *	@(#)sysv_shm.c	7.20 (Berkeley) 10/11/92
1541490Smckusick  */
1641490Smckusick 
1741490Smckusick /*
1841490Smckusick  * System V shared memory routines.
1943630Skarels  * TEMPORARY, until mmap is in place;
2043630Skarels  * needed now for HP-UX compatibility and X server (yech!).
2141490Smckusick  */
2241490Smckusick 
2341490Smckusick #ifdef SYSVSHM
2441490Smckusick 
25*56517Sbostic #include <sys/param.h>
26*56517Sbostic #include <sys/systm.h>
27*56517Sbostic #include <sys/kernel.h>
28*56517Sbostic #include <sys/proc.h>
29*56517Sbostic #include <sys/shm.h>
30*56517Sbostic #include <sys/malloc.h>
31*56517Sbostic #include <sys/mman.h>
3241490Smckusick 
33*56517Sbostic #include <vm/vm.h>
34*56517Sbostic #include <vm/vm_kern.h>
35*56517Sbostic #include <vm/vm_inherit.h>
36*56517Sbostic #include <vm/vm_pager.h>
37*56517Sbostic 
3841490Smckusick int	shmat(), shmctl(), shmdt(), shmget();
3941490Smckusick int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
4041490Smckusick int	shmtot = 0;
4141490Smckusick 
4245737Smckusick /*
4345737Smckusick  * Per process internal structure for managing segments.
4445737Smckusick  * Each process using shm will have an array of ``shmseg'' of these.
4545737Smckusick  */
4645737Smckusick struct	shmdesc {
4745737Smckusick 	vm_offset_t	shmd_uva;
4845737Smckusick 	int		shmd_id;
4945737Smckusick };
5041490Smckusick 
5145737Smckusick /*
5245737Smckusick  * Per segment internal structure (shm_handle).
5345737Smckusick  */
5445737Smckusick struct	shmhandle {
5545737Smckusick 	vm_offset_t	shmh_kva;
5645737Smckusick 	caddr_t		shmh_id;
5745737Smckusick };
5845737Smckusick 
5945737Smckusick vm_map_t shm_map;	/* address space for shared memory segments */
6045737Smckusick 
6141490Smckusick shminit()
6241490Smckusick {
6341490Smckusick 	register int i;
6445737Smckusick 	vm_offset_t whocares1, whocares2;
6541490Smckusick 
6645737Smckusick 	shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
6745737Smckusick 				shminfo.shmall * NBPG, FALSE);
6841490Smckusick 	if (shminfo.shmmni > SHMMMNI)
6941490Smckusick 		shminfo.shmmni = SHMMMNI;
7041490Smckusick 	for (i = 0; i < shminfo.shmmni; i++) {
7141490Smckusick 		shmsegs[i].shm_perm.mode = 0;
7241490Smckusick 		shmsegs[i].shm_perm.seq = 0;
7341490Smckusick 	}
7441490Smckusick }
7541490Smckusick 
7642961Smckusick /*
7742961Smckusick  * Entry point for all SHM calls
7842961Smckusick  */
7954932Storek struct shmsys_args {
8054932Storek 	u_int which;
8154932Storek };
8242961Smckusick shmsys(p, uap, retval)
8342961Smckusick 	struct proc *p;
8454932Storek 	struct shmsys_args *uap;
8542961Smckusick 	int *retval;
8641490Smckusick {
8741490Smckusick 
8842961Smckusick 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
8944405Skarels 		return (EINVAL);
9044405Skarels 	return ((*shmcalls[uap->which])(p, &uap[1], retval));
9141490Smckusick }
9241490Smckusick 
9342961Smckusick /*
9442961Smckusick  * Get a shared memory segment
9542961Smckusick  */
9654932Storek struct shmget_args {
9754932Storek 	key_t key;
9854932Storek 	int size;
9954932Storek 	int shmflg;
10054932Storek };
10142961Smckusick shmget(p, uap, retval)
10242961Smckusick 	struct proc *p;
10354932Storek 	register struct shmget_args *uap;
10442961Smckusick 	int *retval;
10542961Smckusick {
10641490Smckusick 	register struct shmid_ds *shp;
10747540Skarels 	register struct ucred *cred = p->p_ucred;
10841490Smckusick 	register int i;
10942961Smckusick 	int error, size, rval = 0;
11045737Smckusick 	register struct shmhandle *shmh;
11141490Smckusick 
11241490Smckusick 	/* look up the specified shm_id */
11341490Smckusick 	if (uap->key != IPC_PRIVATE) {
11441490Smckusick 		for (i = 0; i < shminfo.shmmni; i++)
11541490Smckusick 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
11641490Smckusick 			    shmsegs[i].shm_perm.key == uap->key) {
11741490Smckusick 				rval = i;
11841490Smckusick 				break;
11941490Smckusick 			}
12041490Smckusick 	} else
12141490Smckusick 		i = shminfo.shmmni;
12241490Smckusick 
12341490Smckusick 	/* create a new shared segment if necessary */
12441490Smckusick 	if (i == shminfo.shmmni) {
12542961Smckusick 		if ((uap->shmflg & IPC_CREAT) == 0)
12642961Smckusick 			return (ENOENT);
12742961Smckusick 		if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
12842961Smckusick 			return (EINVAL);
12941490Smckusick 		for (i = 0; i < shminfo.shmmni; i++)
13041490Smckusick 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
13141490Smckusick 				rval = i;
13241490Smckusick 				break;
13341490Smckusick 			}
13442961Smckusick 		if (i == shminfo.shmmni)
13542961Smckusick 			return (ENOSPC);
13641490Smckusick 		size = clrnd(btoc(uap->size));
13742961Smckusick 		if (shmtot + size > shminfo.shmall)
13842961Smckusick 			return (ENOMEM);
13941490Smckusick 		shp = &shmsegs[rval];
14041490Smckusick 		/*
14141490Smckusick 		 * We need to do a couple of things to ensure consistency
14241490Smckusick 		 * in case we sleep in malloc().  We mark segment as
14341490Smckusick 		 * allocated so that other shmgets() will not allocate it.
14441490Smckusick 		 * We mark it as "destroyed" to insure that shmvalid() is
14541490Smckusick 		 * false making most operations fail (XXX).  We set the key,
14641490Smckusick 		 * so that other shmget()s will fail.
14741490Smckusick 		 */
14841490Smckusick 		shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
14941490Smckusick 		shp->shm_perm.key = uap->key;
15045737Smckusick 		shmh = (struct shmhandle *)
15145737Smckusick 			malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
15245737Smckusick 		shmh->shmh_kva = 0;
15345737Smckusick 		shmh->shmh_id = (caddr_t)(0xc0000000|rval);	/* XXX */
15445737Smckusick 		error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
15545737Smckusick 				VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0);
15645737Smckusick 		if (error) {
15745737Smckusick 			free((caddr_t)shmh, M_SHM);
15841490Smckusick 			shp->shm_perm.mode = 0;
15945737Smckusick 			return(ENOMEM);
16041490Smckusick 		}
16145737Smckusick 		shp->shm_handle = (void *) shmh;
16241490Smckusick 		shmtot += size;
16342961Smckusick 		shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
16442961Smckusick 		shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
16541490Smckusick 		shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
16641490Smckusick 		shp->shm_segsz = uap->size;
16742922Smckusick 		shp->shm_cpid = p->p_pid;
16841490Smckusick 		shp->shm_lpid = shp->shm_nattch = 0;
16941490Smckusick 		shp->shm_atime = shp->shm_dtime = 0;
17041490Smckusick 		shp->shm_ctime = time.tv_sec;
17141490Smckusick 	} else {
17241490Smckusick 		shp = &shmsegs[rval];
17341490Smckusick 		/* XXX: probably not the right thing to do */
17442961Smckusick 		if (shp->shm_perm.mode & SHM_DEST)
17542961Smckusick 			return (EBUSY);
17643408Shibler 		if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
17742961Smckusick 			return (error);
17842961Smckusick 		if (uap->size && uap->size > shp->shm_segsz)
17942961Smckusick 			return (EINVAL);
18042961Smckusick 		if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
18142961Smckusick 			return (EEXIST);
18241490Smckusick 	}
18342961Smckusick 	*retval = shp->shm_perm.seq * SHMMMNI + rval;
18443408Shibler 	return (0);
18541490Smckusick }
18641490Smckusick 
18742961Smckusick /*
18842961Smckusick  * Shared memory control
18942961Smckusick  */
19054932Storek struct shmctl_args {
19154932Storek 	int shmid;
19254932Storek 	int cmd;
19354932Storek 	caddr_t buf;
19454932Storek };
19542961Smckusick /* ARGSUSED */
19642961Smckusick shmctl(p, uap, retval)
19742961Smckusick 	struct proc *p;
19854932Storek 	register struct shmctl_args *uap;
19942961Smckusick 	int *retval;
20042961Smckusick {
20141490Smckusick 	register struct shmid_ds *shp;
20247540Skarels 	register struct ucred *cred = p->p_ucred;
20341490Smckusick 	struct shmid_ds sbuf;
20442961Smckusick 	int error;
20541490Smckusick 
20642961Smckusick 	if (error = shmvalid(uap->shmid))
20742961Smckusick 		return (error);
20841490Smckusick 	shp = &shmsegs[uap->shmid % SHMMMNI];
20941490Smckusick 	switch (uap->cmd) {
21041490Smckusick 	case IPC_STAT:
21143408Shibler 		if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
21242961Smckusick 			return (error);
21342961Smckusick 		return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
21441490Smckusick 
21541490Smckusick 	case IPC_SET:
21642961Smckusick 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
21742961Smckusick 		    cred->cr_uid != shp->shm_perm.cuid)
21842961Smckusick 			return (EPERM);
21942961Smckusick 		if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
22042961Smckusick 			return (error);
22142961Smckusick 		shp->shm_perm.uid = sbuf.shm_perm.uid;
22242961Smckusick 		shp->shm_perm.gid = sbuf.shm_perm.gid;
22342961Smckusick 		shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
22442961Smckusick 			| (sbuf.shm_perm.mode & 0777);
22542961Smckusick 		shp->shm_ctime = time.tv_sec;
22641490Smckusick 		break;
22741490Smckusick 
22841490Smckusick 	case IPC_RMID:
22942961Smckusick 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
23042961Smckusick 		    cred->cr_uid != shp->shm_perm.cuid)
23142961Smckusick 			return (EPERM);
23241490Smckusick 		/* set ctime? */
23341490Smckusick 		shp->shm_perm.key = IPC_PRIVATE;
23441490Smckusick 		shp->shm_perm.mode |= SHM_DEST;
23541490Smckusick 		if (shp->shm_nattch <= 0)
23641490Smckusick 			shmfree(shp);
23741490Smckusick 		break;
23841490Smckusick 
23941490Smckusick 	default:
24042961Smckusick 		return (EINVAL);
24141490Smckusick 	}
24242961Smckusick 	return (0);
24341490Smckusick }
24441490Smckusick 
24542961Smckusick /*
24642961Smckusick  * Attach to shared memory segment.
24742961Smckusick  */
24854932Storek struct shmat_args {
24954932Storek 	int	shmid;
25054932Storek 	caddr_t	shmaddr;
25154932Storek 	int	shmflg;
25254932Storek };
25342961Smckusick shmat(p, uap, retval)
25442961Smckusick 	struct proc *p;
25554932Storek 	register struct shmat_args *uap;
25642961Smckusick 	int *retval;
25742961Smckusick {
25841490Smckusick 	register struct shmid_ds *shp;
25941490Smckusick 	register int size;
26041490Smckusick 	caddr_t uva;
26145737Smckusick 	int error;
26245737Smckusick 	int flags;
26345737Smckusick 	vm_prot_t prot;
26445737Smckusick 	struct shmdesc *shmd;
26541490Smckusick 
26645737Smckusick 	/*
26745737Smckusick 	 * Allocate descriptors now (before validity check)
26845737Smckusick 	 * in case malloc() blocks.
26945737Smckusick 	 */
27047540Skarels 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
27145737Smckusick 	size = shminfo.shmseg * sizeof(struct shmdesc);
27245737Smckusick 	if (shmd == NULL) {
27345737Smckusick 		shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
27445737Smckusick 		bzero((caddr_t)shmd, size);
27547540Skarels 		p->p_vmspace->vm_shm = (caddr_t)shmd;
27645737Smckusick 	}
27742961Smckusick 	if (error = shmvalid(uap->shmid))
27842961Smckusick 		return (error);
27941490Smckusick 	shp = &shmsegs[uap->shmid % SHMMMNI];
28041490Smckusick 	if (shp->shm_handle == NULL)
28142349Smckusick 		panic("shmat NULL handle");
28243408Shibler 	if (error = ipcaccess(&shp->shm_perm,
28347540Skarels 	    (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
28442961Smckusick 		return (error);
28541490Smckusick 	uva = uap->shmaddr;
28641490Smckusick 	if (uva && ((int)uva & (SHMLBA-1))) {
28741490Smckusick 		if (uap->shmflg & SHM_RND)
28841490Smckusick 			uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
28942961Smckusick 		else
29042961Smckusick 			return (EINVAL);
29141490Smckusick 	}
29241490Smckusick 	/*
29341490Smckusick 	 * Make sure user doesn't use more than their fair share
29441490Smckusick 	 */
29545737Smckusick 	for (size = 0; size < shminfo.shmseg; size++) {
29645737Smckusick 		if (shmd->shmd_uva == 0)
29745737Smckusick 			break;
29845737Smckusick 		shmd++;
29945737Smckusick 	}
30042961Smckusick 	if (size >= shminfo.shmseg)
30142961Smckusick 		return (EMFILE);
30241490Smckusick 	size = ctob(clrnd(btoc(shp->shm_segsz)));
30345737Smckusick 	prot = VM_PROT_READ;
30445737Smckusick 	if ((uap->shmflg & SHM_RDONLY) == 0)
30545737Smckusick 		prot |= VM_PROT_WRITE;
30645737Smckusick 	flags = MAP_ANON|MAP_SHARED;
30745737Smckusick 	if (uva)
30845737Smckusick 		flags |= MAP_FIXED;
30945737Smckusick 	else
31045737Smckusick 		uva = (caddr_t)0x1000000;	/* XXX */
31153313Smckusick 	error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
31253313Smckusick 	    (vm_size_t)size, prot, flags,
31353313Smckusick 	    ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
31442961Smckusick 	if (error)
31545737Smckusick 		return(error);
31645737Smckusick 	shmd->shmd_uva = (vm_offset_t)uva;
31745737Smckusick 	shmd->shmd_id = uap->shmid;
31841490Smckusick 	/*
31941490Smckusick 	 * Fill in the remaining fields
32041490Smckusick 	 */
32142922Smckusick 	shp->shm_lpid = p->p_pid;
32241490Smckusick 	shp->shm_atime = time.tv_sec;
32341490Smckusick 	shp->shm_nattch++;
32442961Smckusick 	*retval = (int) uva;
32543408Shibler 	return (0);
32641490Smckusick }
32741490Smckusick 
32842961Smckusick /*
32942961Smckusick  * Detach from shared memory segment.
33042961Smckusick  */
33154932Storek struct shmdt_args {
33254932Storek 	caddr_t	shmaddr;
33354932Storek };
33442961Smckusick /* ARGSUSED */
33542961Smckusick shmdt(p, uap, retval)
33642961Smckusick 	struct proc *p;
33754932Storek 	struct shmdt_args *uap;
33842961Smckusick 	int *retval;
33941490Smckusick {
34045737Smckusick 	register struct shmdesc *shmd;
34145737Smckusick 	register int i;
34241490Smckusick 
34347540Skarels 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
34445737Smckusick 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
34545737Smckusick 		if (shmd->shmd_uva &&
34645737Smckusick 		    shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
34741490Smckusick 			break;
34845737Smckusick 	if (i == shminfo.shmseg)
34945737Smckusick 		return(EINVAL);
35045737Smckusick 	shmufree(p, shmd);
35145737Smckusick 	shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
35241490Smckusick }
35341490Smckusick 
35447540Skarels shmfork(p1, p2, isvfork)
35547540Skarels 	struct proc *p1, *p2;
35645737Smckusick 	int isvfork;
35741490Smckusick {
35845737Smckusick 	register struct shmdesc *shmd;
35945737Smckusick 	register int size;
36041490Smckusick 
36145737Smckusick 	/*
36245737Smckusick 	 * Copy parents descriptive information
36345737Smckusick 	 */
36445737Smckusick 	size = shminfo.shmseg * sizeof(struct shmdesc);
36545737Smckusick 	shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
36647540Skarels 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
36747540Skarels 	p2->p_vmspace->vm_shm = (caddr_t)shmd;
36845737Smckusick 	/*
36945737Smckusick 	 * Increment reference counts
37045737Smckusick 	 */
37145737Smckusick 	for (size = 0; size < shminfo.shmseg; size++, shmd++)
37245737Smckusick 		if (shmd->shmd_uva)
37345737Smckusick 			shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
37441490Smckusick }
37541490Smckusick 
37645737Smckusick shmexit(p)
37745737Smckusick 	struct proc *p;
37841490Smckusick {
37945737Smckusick 	register struct shmdesc *shmd;
38045737Smckusick 	register int i;
38141490Smckusick 
38247540Skarels 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
38345737Smckusick 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
38445737Smckusick 		if (shmd->shmd_uva)
38545737Smckusick 			shmufree(p, shmd);
38647540Skarels 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
38747540Skarels 	p->p_vmspace->vm_shm = NULL;
38841490Smckusick }
38941490Smckusick 
39041490Smckusick shmvalid(id)
39141490Smckusick 	register int id;
39241490Smckusick {
39341490Smckusick 	register struct shmid_ds *shp;
39441490Smckusick 
39541490Smckusick 	if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
39642961Smckusick 		return(EINVAL);
39741490Smckusick 	shp = &shmsegs[id % SHMMMNI];
39841490Smckusick 	if (shp->shm_perm.seq == (id / SHMMMNI) &&
39941490Smckusick 	    (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
40042961Smckusick 		return(0);
40142961Smckusick 	return(EINVAL);
40241490Smckusick }
40341490Smckusick 
40441490Smckusick /*
40541490Smckusick  * Free user resources associated with a shared memory segment
40641490Smckusick  */
40745737Smckusick shmufree(p, shmd)
40842922Smckusick 	struct proc *p;
40945737Smckusick 	struct shmdesc *shmd;
41041490Smckusick {
41141490Smckusick 	register struct shmid_ds *shp;
41241490Smckusick 
41345737Smckusick 	shp = &shmsegs[shmd->shmd_id % SHMMMNI];
41449710Shibler 	(void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
41545737Smckusick 			     ctob(clrnd(btoc(shp->shm_segsz))));
41645737Smckusick 	shmd->shmd_id = 0;
41745737Smckusick 	shmd->shmd_uva = 0;
41841490Smckusick 	shp->shm_dtime = time.tv_sec;
41941490Smckusick 	if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
42041490Smckusick 		shmfree(shp);
42141490Smckusick }
42241490Smckusick 
42341490Smckusick /*
42441490Smckusick  * Deallocate resources associated with a shared memory segment
42541490Smckusick  */
42641490Smckusick shmfree(shp)
42741490Smckusick 	register struct shmid_ds *shp;
42841490Smckusick {
42941490Smckusick 
43041490Smckusick 	if (shp->shm_handle == NULL)
43141490Smckusick 		panic("shmfree");
43245737Smckusick 	/*
43345737Smckusick 	 * Lose our lingering object reference by deallocating space
43445737Smckusick 	 * in kernel.  Pager will also be deallocated as a side-effect.
43545737Smckusick 	 */
43645737Smckusick 	vm_deallocate(shm_map,
43745737Smckusick 		      ((struct shmhandle *)shp->shm_handle)->shmh_kva,
43849668Shibler 		      ctob(clrnd(btoc(shp->shm_segsz))));
43945737Smckusick 	free((caddr_t)shp->shm_handle, M_SHM);
44041490Smckusick 	shp->shm_handle = NULL;
44141490Smckusick 	shmtot -= clrnd(btoc(shp->shm_segsz));
44241490Smckusick 	shp->shm_perm.mode = 0;
44341490Smckusick 	/*
44441490Smckusick 	 * Increment the sequence number to ensure that outstanding
44541490Smckusick 	 * shmids for this segment will be invalid in the event that
44641490Smckusick 	 * the segment is reallocated.  Note that shmids must be
44741490Smckusick 	 * positive as decreed by SVID.
44841490Smckusick 	 */
44941490Smckusick 	shp->shm_perm.seq++;
45041490Smckusick 	if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
45141490Smckusick 		shp->shm_perm.seq = 0;
45241490Smckusick }
45341490Smckusick 
45441490Smckusick /*
45541490Smckusick  * XXX This routine would be common to all sysV style IPC
45641490Smckusick  *     (if the others were implemented).
45741490Smckusick  */
45842961Smckusick ipcaccess(ipc, mode, cred)
45941490Smckusick 	register struct ipc_perm *ipc;
46042961Smckusick 	int mode;
46142961Smckusick 	register struct ucred *cred;
46241490Smckusick {
46341490Smckusick 	register int m;
46441490Smckusick 
46542961Smckusick 	if (cred->cr_uid == 0)
46641490Smckusick 		return(0);
46741490Smckusick 	/*
46841490Smckusick 	 * Access check is based on only one of owner, group, public.
46941490Smckusick 	 * If not owner, then check group.
47041490Smckusick 	 * If not a member of the group, then check public access.
47141490Smckusick 	 */
47241490Smckusick 	mode &= 0700;
47341490Smckusick 	m = ipc->mode;
47442961Smckusick 	if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
47541490Smckusick 		m <<= 3;
47642961Smckusick 		if (!groupmember(ipc->gid, cred) &&
47742961Smckusick 		    !groupmember(ipc->cgid, cred))
47841490Smckusick 			m <<= 3;
47941490Smckusick 	}
48041490Smckusick 	if ((mode&m) == mode)
48142961Smckusick 		return (0);
48242961Smckusick 	return (EACCES);
48341490Smckusick }
48441490Smckusick #endif /* SYSVSHM */
485