xref: /csrg-svn/sys/kern/sysv_shm.c (revision 54932)
141490Smckusick /*
241490Smckusick  * Copyright (c) 1988 University of Utah.
341490Smckusick  * Copyright (c) 1990 The Regents of the University of California.
441490Smckusick  * All rights reserved.
541490Smckusick  *
641490Smckusick  * This code is derived from software contributed to Berkeley by
741490Smckusick  * the Systems Programming Group of the University of Utah Computer
841490Smckusick  * Science Department. Originally from University of Wisconsin.
941490Smckusick  *
1041490Smckusick  * %sccs.include.redist.c%
1141490Smckusick  *
1254856Shibler  * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
1341490Smckusick  *
14*54932Storek  *	@(#)sysv_shm.c	7.19 (Berkeley) 07/10/92
1541490Smckusick  */
1641490Smckusick 
1741490Smckusick /*
1841490Smckusick  * System V shared memory routines.
1943630Skarels  * TEMPORARY, until mmap is in place;
2043630Skarels  * needed now for HP-UX compatibility and X server (yech!).
2141490Smckusick  */
2241490Smckusick 
2341490Smckusick #ifdef SYSVSHM
2441490Smckusick 
2541490Smckusick #include "param.h"
2641490Smckusick #include "systm.h"
2741490Smckusick #include "kernel.h"
2841490Smckusick #include "proc.h"
2941490Smckusick #include "shm.h"
3041490Smckusick #include "malloc.h"
3145737Smckusick #include "mman.h"
3248446Skarels #include "vm/vm.h"
3348446Skarels #include "vm/vm_kern.h"
3448446Skarels #include "vm/vm_inherit.h"
3548446Skarels #include "vm/vm_pager.h"
3641490Smckusick 
3741490Smckusick int	shmat(), shmctl(), shmdt(), shmget();
3841490Smckusick int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
3941490Smckusick int	shmtot = 0;
4041490Smckusick 
4145737Smckusick /*
4245737Smckusick  * Per process internal structure for managing segments.
4345737Smckusick  * Each process using shm will have an array of ``shmseg'' of these.
4445737Smckusick  */
4545737Smckusick struct	shmdesc {
4645737Smckusick 	vm_offset_t	shmd_uva;
4745737Smckusick 	int		shmd_id;
4845737Smckusick };
4941490Smckusick 
5045737Smckusick /*
5145737Smckusick  * Per segment internal structure (shm_handle).
5245737Smckusick  */
5345737Smckusick struct	shmhandle {
5445737Smckusick 	vm_offset_t	shmh_kva;
5545737Smckusick 	caddr_t		shmh_id;
5645737Smckusick };
5745737Smckusick 
5845737Smckusick vm_map_t shm_map;	/* address space for shared memory segments */
5945737Smckusick 
6041490Smckusick shminit()
6141490Smckusick {
6241490Smckusick 	register int i;
6345737Smckusick 	vm_offset_t whocares1, whocares2;
6441490Smckusick 
6545737Smckusick 	shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
6645737Smckusick 				shminfo.shmall * NBPG, FALSE);
6741490Smckusick 	if (shminfo.shmmni > SHMMMNI)
6841490Smckusick 		shminfo.shmmni = SHMMMNI;
6941490Smckusick 	for (i = 0; i < shminfo.shmmni; i++) {
7041490Smckusick 		shmsegs[i].shm_perm.mode = 0;
7141490Smckusick 		shmsegs[i].shm_perm.seq = 0;
7241490Smckusick 	}
7341490Smckusick }
7441490Smckusick 
7542961Smckusick /*
7642961Smckusick  * Entry point for all SHM calls
7742961Smckusick  */
78*54932Storek struct shmsys_args {
79*54932Storek 	u_int which;
80*54932Storek };
8142961Smckusick shmsys(p, uap, retval)
8242961Smckusick 	struct proc *p;
83*54932Storek 	struct shmsys_args *uap;
8442961Smckusick 	int *retval;
8541490Smckusick {
8641490Smckusick 
8742961Smckusick 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
8844405Skarels 		return (EINVAL);
8944405Skarels 	return ((*shmcalls[uap->which])(p, &uap[1], retval));
9041490Smckusick }
9141490Smckusick 
9242961Smckusick /*
9342961Smckusick  * Get a shared memory segment
9442961Smckusick  */
95*54932Storek struct shmget_args {
96*54932Storek 	key_t key;
97*54932Storek 	int size;
98*54932Storek 	int shmflg;
99*54932Storek };
10042961Smckusick shmget(p, uap, retval)
10142961Smckusick 	struct proc *p;
102*54932Storek 	register struct shmget_args *uap;
10342961Smckusick 	int *retval;
10442961Smckusick {
10541490Smckusick 	register struct shmid_ds *shp;
10647540Skarels 	register struct ucred *cred = p->p_ucred;
10741490Smckusick 	register int i;
10842961Smckusick 	int error, size, rval = 0;
10945737Smckusick 	register struct shmhandle *shmh;
11041490Smckusick 
11141490Smckusick 	/* look up the specified shm_id */
11241490Smckusick 	if (uap->key != IPC_PRIVATE) {
11341490Smckusick 		for (i = 0; i < shminfo.shmmni; i++)
11441490Smckusick 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
11541490Smckusick 			    shmsegs[i].shm_perm.key == uap->key) {
11641490Smckusick 				rval = i;
11741490Smckusick 				break;
11841490Smckusick 			}
11941490Smckusick 	} else
12041490Smckusick 		i = shminfo.shmmni;
12141490Smckusick 
12241490Smckusick 	/* create a new shared segment if necessary */
12341490Smckusick 	if (i == shminfo.shmmni) {
12442961Smckusick 		if ((uap->shmflg & IPC_CREAT) == 0)
12542961Smckusick 			return (ENOENT);
12642961Smckusick 		if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
12742961Smckusick 			return (EINVAL);
12841490Smckusick 		for (i = 0; i < shminfo.shmmni; i++)
12941490Smckusick 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
13041490Smckusick 				rval = i;
13141490Smckusick 				break;
13241490Smckusick 			}
13342961Smckusick 		if (i == shminfo.shmmni)
13442961Smckusick 			return (ENOSPC);
13541490Smckusick 		size = clrnd(btoc(uap->size));
13642961Smckusick 		if (shmtot + size > shminfo.shmall)
13742961Smckusick 			return (ENOMEM);
13841490Smckusick 		shp = &shmsegs[rval];
13941490Smckusick 		/*
14041490Smckusick 		 * We need to do a couple of things to ensure consistency
14141490Smckusick 		 * in case we sleep in malloc().  We mark segment as
14241490Smckusick 		 * allocated so that other shmgets() will not allocate it.
14341490Smckusick 		 * We mark it as "destroyed" to insure that shmvalid() is
14441490Smckusick 		 * false making most operations fail (XXX).  We set the key,
14541490Smckusick 		 * so that other shmget()s will fail.
14641490Smckusick 		 */
14741490Smckusick 		shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
14841490Smckusick 		shp->shm_perm.key = uap->key;
14945737Smckusick 		shmh = (struct shmhandle *)
15045737Smckusick 			malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
15145737Smckusick 		shmh->shmh_kva = 0;
15245737Smckusick 		shmh->shmh_id = (caddr_t)(0xc0000000|rval);	/* XXX */
15345737Smckusick 		error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
15445737Smckusick 				VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0);
15545737Smckusick 		if (error) {
15645737Smckusick 			free((caddr_t)shmh, M_SHM);
15741490Smckusick 			shp->shm_perm.mode = 0;
15845737Smckusick 			return(ENOMEM);
15941490Smckusick 		}
16045737Smckusick 		shp->shm_handle = (void *) shmh;
16141490Smckusick 		shmtot += size;
16242961Smckusick 		shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
16342961Smckusick 		shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
16441490Smckusick 		shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
16541490Smckusick 		shp->shm_segsz = uap->size;
16642922Smckusick 		shp->shm_cpid = p->p_pid;
16741490Smckusick 		shp->shm_lpid = shp->shm_nattch = 0;
16841490Smckusick 		shp->shm_atime = shp->shm_dtime = 0;
16941490Smckusick 		shp->shm_ctime = time.tv_sec;
17041490Smckusick 	} else {
17141490Smckusick 		shp = &shmsegs[rval];
17241490Smckusick 		/* XXX: probably not the right thing to do */
17342961Smckusick 		if (shp->shm_perm.mode & SHM_DEST)
17442961Smckusick 			return (EBUSY);
17543408Shibler 		if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
17642961Smckusick 			return (error);
17742961Smckusick 		if (uap->size && uap->size > shp->shm_segsz)
17842961Smckusick 			return (EINVAL);
17942961Smckusick 		if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
18042961Smckusick 			return (EEXIST);
18141490Smckusick 	}
18242961Smckusick 	*retval = shp->shm_perm.seq * SHMMMNI + rval;
18343408Shibler 	return (0);
18441490Smckusick }
18541490Smckusick 
18642961Smckusick /*
18742961Smckusick  * Shared memory control
18842961Smckusick  */
189*54932Storek struct shmctl_args {
190*54932Storek 	int shmid;
191*54932Storek 	int cmd;
192*54932Storek 	caddr_t buf;
193*54932Storek };
19442961Smckusick /* ARGSUSED */
19542961Smckusick shmctl(p, uap, retval)
19642961Smckusick 	struct proc *p;
197*54932Storek 	register struct shmctl_args *uap;
19842961Smckusick 	int *retval;
19942961Smckusick {
20041490Smckusick 	register struct shmid_ds *shp;
20147540Skarels 	register struct ucred *cred = p->p_ucred;
20241490Smckusick 	struct shmid_ds sbuf;
20342961Smckusick 	int error;
20441490Smckusick 
20542961Smckusick 	if (error = shmvalid(uap->shmid))
20642961Smckusick 		return (error);
20741490Smckusick 	shp = &shmsegs[uap->shmid % SHMMMNI];
20841490Smckusick 	switch (uap->cmd) {
20941490Smckusick 	case IPC_STAT:
21043408Shibler 		if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
21142961Smckusick 			return (error);
21242961Smckusick 		return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
21341490Smckusick 
21441490Smckusick 	case IPC_SET:
21542961Smckusick 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
21642961Smckusick 		    cred->cr_uid != shp->shm_perm.cuid)
21742961Smckusick 			return (EPERM);
21842961Smckusick 		if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
21942961Smckusick 			return (error);
22042961Smckusick 		shp->shm_perm.uid = sbuf.shm_perm.uid;
22142961Smckusick 		shp->shm_perm.gid = sbuf.shm_perm.gid;
22242961Smckusick 		shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
22342961Smckusick 			| (sbuf.shm_perm.mode & 0777);
22442961Smckusick 		shp->shm_ctime = time.tv_sec;
22541490Smckusick 		break;
22641490Smckusick 
22741490Smckusick 	case IPC_RMID:
22842961Smckusick 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
22942961Smckusick 		    cred->cr_uid != shp->shm_perm.cuid)
23042961Smckusick 			return (EPERM);
23141490Smckusick 		/* set ctime? */
23241490Smckusick 		shp->shm_perm.key = IPC_PRIVATE;
23341490Smckusick 		shp->shm_perm.mode |= SHM_DEST;
23441490Smckusick 		if (shp->shm_nattch <= 0)
23541490Smckusick 			shmfree(shp);
23641490Smckusick 		break;
23741490Smckusick 
23841490Smckusick 	default:
23942961Smckusick 		return (EINVAL);
24041490Smckusick 	}
24142961Smckusick 	return (0);
24241490Smckusick }
24341490Smckusick 
24442961Smckusick /*
24542961Smckusick  * Attach to shared memory segment.
24642961Smckusick  */
247*54932Storek struct shmat_args {
248*54932Storek 	int	shmid;
249*54932Storek 	caddr_t	shmaddr;
250*54932Storek 	int	shmflg;
251*54932Storek };
25242961Smckusick shmat(p, uap, retval)
25342961Smckusick 	struct proc *p;
254*54932Storek 	register struct shmat_args *uap;
25542961Smckusick 	int *retval;
25642961Smckusick {
25741490Smckusick 	register struct shmid_ds *shp;
25841490Smckusick 	register int size;
25941490Smckusick 	caddr_t uva;
26045737Smckusick 	int error;
26145737Smckusick 	int flags;
26245737Smckusick 	vm_prot_t prot;
26345737Smckusick 	struct shmdesc *shmd;
26441490Smckusick 
26545737Smckusick 	/*
26645737Smckusick 	 * Allocate descriptors now (before validity check)
26745737Smckusick 	 * in case malloc() blocks.
26845737Smckusick 	 */
26947540Skarels 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
27045737Smckusick 	size = shminfo.shmseg * sizeof(struct shmdesc);
27145737Smckusick 	if (shmd == NULL) {
27245737Smckusick 		shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
27345737Smckusick 		bzero((caddr_t)shmd, size);
27447540Skarels 		p->p_vmspace->vm_shm = (caddr_t)shmd;
27545737Smckusick 	}
27642961Smckusick 	if (error = shmvalid(uap->shmid))
27742961Smckusick 		return (error);
27841490Smckusick 	shp = &shmsegs[uap->shmid % SHMMMNI];
27941490Smckusick 	if (shp->shm_handle == NULL)
28042349Smckusick 		panic("shmat NULL handle");
28143408Shibler 	if (error = ipcaccess(&shp->shm_perm,
28247540Skarels 	    (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
28342961Smckusick 		return (error);
28441490Smckusick 	uva = uap->shmaddr;
28541490Smckusick 	if (uva && ((int)uva & (SHMLBA-1))) {
28641490Smckusick 		if (uap->shmflg & SHM_RND)
28741490Smckusick 			uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
28842961Smckusick 		else
28942961Smckusick 			return (EINVAL);
29041490Smckusick 	}
29141490Smckusick 	/*
29241490Smckusick 	 * Make sure user doesn't use more than their fair share
29341490Smckusick 	 */
29445737Smckusick 	for (size = 0; size < shminfo.shmseg; size++) {
29545737Smckusick 		if (shmd->shmd_uva == 0)
29645737Smckusick 			break;
29745737Smckusick 		shmd++;
29845737Smckusick 	}
29942961Smckusick 	if (size >= shminfo.shmseg)
30042961Smckusick 		return (EMFILE);
30141490Smckusick 	size = ctob(clrnd(btoc(shp->shm_segsz)));
30245737Smckusick 	prot = VM_PROT_READ;
30345737Smckusick 	if ((uap->shmflg & SHM_RDONLY) == 0)
30445737Smckusick 		prot |= VM_PROT_WRITE;
30545737Smckusick 	flags = MAP_ANON|MAP_SHARED;
30645737Smckusick 	if (uva)
30745737Smckusick 		flags |= MAP_FIXED;
30845737Smckusick 	else
30945737Smckusick 		uva = (caddr_t)0x1000000;	/* XXX */
31053313Smckusick 	error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
31153313Smckusick 	    (vm_size_t)size, prot, flags,
31253313Smckusick 	    ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
31342961Smckusick 	if (error)
31445737Smckusick 		return(error);
31545737Smckusick 	shmd->shmd_uva = (vm_offset_t)uva;
31645737Smckusick 	shmd->shmd_id = uap->shmid;
31741490Smckusick 	/*
31841490Smckusick 	 * Fill in the remaining fields
31941490Smckusick 	 */
32042922Smckusick 	shp->shm_lpid = p->p_pid;
32141490Smckusick 	shp->shm_atime = time.tv_sec;
32241490Smckusick 	shp->shm_nattch++;
32342961Smckusick 	*retval = (int) uva;
32443408Shibler 	return (0);
32541490Smckusick }
32641490Smckusick 
32742961Smckusick /*
32842961Smckusick  * Detach from shared memory segment.
32942961Smckusick  */
330*54932Storek struct shmdt_args {
331*54932Storek 	caddr_t	shmaddr;
332*54932Storek };
33342961Smckusick /* ARGSUSED */
33442961Smckusick shmdt(p, uap, retval)
33542961Smckusick 	struct proc *p;
336*54932Storek 	struct shmdt_args *uap;
33742961Smckusick 	int *retval;
33841490Smckusick {
33945737Smckusick 	register struct shmdesc *shmd;
34045737Smckusick 	register int i;
34141490Smckusick 
34247540Skarels 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
34345737Smckusick 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
34445737Smckusick 		if (shmd->shmd_uva &&
34545737Smckusick 		    shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
34641490Smckusick 			break;
34745737Smckusick 	if (i == shminfo.shmseg)
34845737Smckusick 		return(EINVAL);
34945737Smckusick 	shmufree(p, shmd);
35045737Smckusick 	shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
35141490Smckusick }
35241490Smckusick 
35347540Skarels shmfork(p1, p2, isvfork)
35447540Skarels 	struct proc *p1, *p2;
35545737Smckusick 	int isvfork;
35641490Smckusick {
35745737Smckusick 	register struct shmdesc *shmd;
35845737Smckusick 	register int size;
35941490Smckusick 
36045737Smckusick 	/*
36145737Smckusick 	 * Copy parents descriptive information
36245737Smckusick 	 */
36345737Smckusick 	size = shminfo.shmseg * sizeof(struct shmdesc);
36445737Smckusick 	shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
36547540Skarels 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
36647540Skarels 	p2->p_vmspace->vm_shm = (caddr_t)shmd;
36745737Smckusick 	/*
36845737Smckusick 	 * Increment reference counts
36945737Smckusick 	 */
37045737Smckusick 	for (size = 0; size < shminfo.shmseg; size++, shmd++)
37145737Smckusick 		if (shmd->shmd_uva)
37245737Smckusick 			shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
37341490Smckusick }
37441490Smckusick 
37545737Smckusick shmexit(p)
37645737Smckusick 	struct proc *p;
37741490Smckusick {
37845737Smckusick 	register struct shmdesc *shmd;
37945737Smckusick 	register int i;
38041490Smckusick 
38147540Skarels 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
38245737Smckusick 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
38345737Smckusick 		if (shmd->shmd_uva)
38445737Smckusick 			shmufree(p, shmd);
38547540Skarels 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
38647540Skarels 	p->p_vmspace->vm_shm = NULL;
38741490Smckusick }
38841490Smckusick 
38941490Smckusick shmvalid(id)
39041490Smckusick 	register int id;
39141490Smckusick {
39241490Smckusick 	register struct shmid_ds *shp;
39341490Smckusick 
39441490Smckusick 	if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
39542961Smckusick 		return(EINVAL);
39641490Smckusick 	shp = &shmsegs[id % SHMMMNI];
39741490Smckusick 	if (shp->shm_perm.seq == (id / SHMMMNI) &&
39841490Smckusick 	    (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
39942961Smckusick 		return(0);
40042961Smckusick 	return(EINVAL);
40141490Smckusick }
40241490Smckusick 
40341490Smckusick /*
40441490Smckusick  * Free user resources associated with a shared memory segment
40541490Smckusick  */
40645737Smckusick shmufree(p, shmd)
40742922Smckusick 	struct proc *p;
40845737Smckusick 	struct shmdesc *shmd;
40941490Smckusick {
41041490Smckusick 	register struct shmid_ds *shp;
41141490Smckusick 
41245737Smckusick 	shp = &shmsegs[shmd->shmd_id % SHMMMNI];
41349710Shibler 	(void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
41445737Smckusick 			     ctob(clrnd(btoc(shp->shm_segsz))));
41545737Smckusick 	shmd->shmd_id = 0;
41645737Smckusick 	shmd->shmd_uva = 0;
41741490Smckusick 	shp->shm_dtime = time.tv_sec;
41841490Smckusick 	if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
41941490Smckusick 		shmfree(shp);
42041490Smckusick }
42141490Smckusick 
42241490Smckusick /*
42341490Smckusick  * Deallocate resources associated with a shared memory segment
42441490Smckusick  */
42541490Smckusick shmfree(shp)
42641490Smckusick 	register struct shmid_ds *shp;
42741490Smckusick {
42841490Smckusick 
42941490Smckusick 	if (shp->shm_handle == NULL)
43041490Smckusick 		panic("shmfree");
43145737Smckusick 	/*
43245737Smckusick 	 * Lose our lingering object reference by deallocating space
43345737Smckusick 	 * in kernel.  Pager will also be deallocated as a side-effect.
43445737Smckusick 	 */
43545737Smckusick 	vm_deallocate(shm_map,
43645737Smckusick 		      ((struct shmhandle *)shp->shm_handle)->shmh_kva,
43749668Shibler 		      ctob(clrnd(btoc(shp->shm_segsz))));
43845737Smckusick 	free((caddr_t)shp->shm_handle, M_SHM);
43941490Smckusick 	shp->shm_handle = NULL;
44041490Smckusick 	shmtot -= clrnd(btoc(shp->shm_segsz));
44141490Smckusick 	shp->shm_perm.mode = 0;
44241490Smckusick 	/*
44341490Smckusick 	 * Increment the sequence number to ensure that outstanding
44441490Smckusick 	 * shmids for this segment will be invalid in the event that
44541490Smckusick 	 * the segment is reallocated.  Note that shmids must be
44641490Smckusick 	 * positive as decreed by SVID.
44741490Smckusick 	 */
44841490Smckusick 	shp->shm_perm.seq++;
44941490Smckusick 	if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
45041490Smckusick 		shp->shm_perm.seq = 0;
45141490Smckusick }
45241490Smckusick 
45341490Smckusick /*
45441490Smckusick  * XXX This routine would be common to all sysV style IPC
45541490Smckusick  *     (if the others were implemented).
45641490Smckusick  */
45742961Smckusick ipcaccess(ipc, mode, cred)
45841490Smckusick 	register struct ipc_perm *ipc;
45942961Smckusick 	int mode;
46042961Smckusick 	register struct ucred *cred;
46141490Smckusick {
46241490Smckusick 	register int m;
46341490Smckusick 
46442961Smckusick 	if (cred->cr_uid == 0)
46541490Smckusick 		return(0);
46641490Smckusick 	/*
46741490Smckusick 	 * Access check is based on only one of owner, group, public.
46841490Smckusick 	 * If not owner, then check group.
46941490Smckusick 	 * If not a member of the group, then check public access.
47041490Smckusick 	 */
47141490Smckusick 	mode &= 0700;
47241490Smckusick 	m = ipc->mode;
47342961Smckusick 	if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
47441490Smckusick 		m <<= 3;
47542961Smckusick 		if (!groupmember(ipc->gid, cred) &&
47642961Smckusick 		    !groupmember(ipc->cgid, cred))
47741490Smckusick 			m <<= 3;
47841490Smckusick 	}
47941490Smckusick 	if ((mode&m) == mode)
48042961Smckusick 		return (0);
48142961Smckusick 	return (EACCES);
48241490Smckusick }
48341490Smckusick #endif /* SYSVSHM */
484