xref: /csrg-svn/sys/kern/sysv_shm.c (revision 64399)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department. Originally from University of Wisconsin.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
13  *
14  *	@(#)sysv_shm.c	8.2 (Berkeley) 09/04/93
15  */
16 
17 /*
18  * System V shared memory routines.
19  * TEMPORARY, until mmap is in place;
20  * needed now for HP-UX compatibility and X server (yech!).
21  */
22 
23 #ifdef SYSVSHM
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/kernel.h>
28 #include <sys/proc.h>
29 #include <sys/shm.h>
30 #include <sys/malloc.h>
31 #include <sys/mman.h>
32 #include <sys/stat.h>
33 
34 #include <vm/vm.h>
35 #include <vm/vm_kern.h>
36 #include <vm/vm_inherit.h>
37 #include <vm/vm_pager.h>
38 
39 int	shmat(), shmctl(), shmdt(), shmget();
40 int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
41 int	shmtot = 0;
42 
43 /*
44  * Per process internal structure for managing segments.
45  * Each process using shm will have an array of ``shmseg'' of these.
46  */
47 struct	shmdesc {
48 	vm_offset_t	shmd_uva;
49 	int		shmd_id;
50 };
51 
52 /*
53  * Per segment internal structure (shm_handle).
54  */
55 struct	shmhandle {
56 	vm_offset_t	shmh_kva;
57 	caddr_t		shmh_id;
58 };
59 
60 vm_map_t shm_map;	/* address space for shared memory segments */
61 
62 shminit()
63 {
64 	register int i;
65 	vm_offset_t whocares1, whocares2;
66 
67 	shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
68 				shminfo.shmall * NBPG, FALSE);
69 	if (shminfo.shmmni > SHMMMNI)
70 		shminfo.shmmni = SHMMMNI;
71 	for (i = 0; i < shminfo.shmmni; i++) {
72 		shmsegs[i].shm_perm.mode = 0;
73 		shmsegs[i].shm_perm.seq = 0;
74 	}
75 }
76 
77 /*
78  * Entry point for all SHM calls
79  */
80 struct shmsys_args {
81 	u_int which;
82 };
83 shmsys(p, uap, retval)
84 	struct proc *p;
85 	struct shmsys_args *uap;
86 	int *retval;
87 {
88 
89 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
90 		return (EINVAL);
91 	return ((*shmcalls[uap->which])(p, &uap[1], retval));
92 }
93 
94 /*
95  * Get a shared memory segment
96  */
97 struct shmget_args {
98 	key_t key;
99 	int size;
100 	int shmflg;
101 };
102 shmget(p, uap, retval)
103 	struct proc *p;
104 	register struct shmget_args *uap;
105 	int *retval;
106 {
107 	register struct shmid_ds *shp;
108 	register struct ucred *cred = p->p_ucred;
109 	register int i;
110 	int error, size, rval = 0;
111 	register struct shmhandle *shmh;
112 
113 	/* look up the specified shm_id */
114 	if (uap->key != IPC_PRIVATE) {
115 		for (i = 0; i < shminfo.shmmni; i++)
116 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
117 			    shmsegs[i].shm_perm.key == uap->key) {
118 				rval = i;
119 				break;
120 			}
121 	} else
122 		i = shminfo.shmmni;
123 
124 	/* create a new shared segment if necessary */
125 	if (i == shminfo.shmmni) {
126 		if ((uap->shmflg & IPC_CREAT) == 0)
127 			return (ENOENT);
128 		if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
129 			return (EINVAL);
130 		for (i = 0; i < shminfo.shmmni; i++)
131 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
132 				rval = i;
133 				break;
134 			}
135 		if (i == shminfo.shmmni)
136 			return (ENOSPC);
137 		size = clrnd(btoc(uap->size));
138 		if (shmtot + size > shminfo.shmall)
139 			return (ENOMEM);
140 		shp = &shmsegs[rval];
141 		/*
142 		 * We need to do a couple of things to ensure consistency
143 		 * in case we sleep in malloc().  We mark segment as
144 		 * allocated so that other shmgets() will not allocate it.
145 		 * We mark it as "destroyed" to insure that shmvalid() is
146 		 * false making most operations fail (XXX).  We set the key,
147 		 * so that other shmget()s will fail.
148 		 */
149 		shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
150 		shp->shm_perm.key = uap->key;
151 		shmh = (struct shmhandle *)
152 			malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
153 		shmh->shmh_kva = 0;
154 		shmh->shmh_id = (caddr_t)(0xc0000000|rval);	/* XXX */
155 		error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
156 				VM_PROT_ALL, VM_PROT_ALL,
157 				MAP_ANON, shmh->shmh_id, 0);
158 		if (error) {
159 			free((caddr_t)shmh, M_SHM);
160 			shp->shm_perm.mode = 0;
161 			return(ENOMEM);
162 		}
163 		shp->shm_handle = (void *) shmh;
164 		shmtot += size;
165 		shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
166 		shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
167 		shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg & ACCESSPERMS);
168 		shp->shm_segsz = uap->size;
169 		shp->shm_cpid = p->p_pid;
170 		shp->shm_lpid = shp->shm_nattch = 0;
171 		shp->shm_atime = shp->shm_dtime = 0;
172 		shp->shm_ctime = time.tv_sec;
173 	} else {
174 		shp = &shmsegs[rval];
175 		/* XXX: probably not the right thing to do */
176 		if (shp->shm_perm.mode & SHM_DEST)
177 			return (EBUSY);
178 		if (error = ipcaccess(&shp->shm_perm, uap->shmflg & ACCESSPERMS,
179 			    cred))
180 			return (error);
181 		if (uap->size && uap->size > shp->shm_segsz)
182 			return (EINVAL);
183 		if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
184 			return (EEXIST);
185 	}
186 	*retval = shp->shm_perm.seq * SHMMMNI + rval;
187 	return (0);
188 }
189 
190 /*
191  * Shared memory control
192  */
193 struct shmctl_args {
194 	int shmid;
195 	int cmd;
196 	caddr_t buf;
197 };
198 /* ARGSUSED */
199 shmctl(p, uap, retval)
200 	struct proc *p;
201 	register struct shmctl_args *uap;
202 	int *retval;
203 {
204 	register struct shmid_ds *shp;
205 	register struct ucred *cred = p->p_ucred;
206 	struct shmid_ds sbuf;
207 	int error;
208 
209 	if (error = shmvalid(uap->shmid))
210 		return (error);
211 	shp = &shmsegs[uap->shmid % SHMMMNI];
212 	switch (uap->cmd) {
213 	case IPC_STAT:
214 		if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
215 			return (error);
216 		return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
217 
218 	case IPC_SET:
219 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
220 		    cred->cr_uid != shp->shm_perm.cuid)
221 			return (EPERM);
222 		if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
223 			return (error);
224 		shp->shm_perm.uid = sbuf.shm_perm.uid;
225 		shp->shm_perm.gid = sbuf.shm_perm.gid;
226 		shp->shm_perm.mode = (shp->shm_perm.mode & ~ACCESSPERMS)
227 			| (sbuf.shm_perm.mode & ACCESSPERMS);
228 		shp->shm_ctime = time.tv_sec;
229 		break;
230 
231 	case IPC_RMID:
232 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
233 		    cred->cr_uid != shp->shm_perm.cuid)
234 			return (EPERM);
235 		/* set ctime? */
236 		shp->shm_perm.key = IPC_PRIVATE;
237 		shp->shm_perm.mode |= SHM_DEST;
238 		if (shp->shm_nattch <= 0)
239 			shmfree(shp);
240 		break;
241 
242 	default:
243 		return (EINVAL);
244 	}
245 	return (0);
246 }
247 
248 /*
249  * Attach to shared memory segment.
250  */
251 struct shmat_args {
252 	int	shmid;
253 	caddr_t	shmaddr;
254 	int	shmflg;
255 };
256 shmat(p, uap, retval)
257 	struct proc *p;
258 	register struct shmat_args *uap;
259 	int *retval;
260 {
261 	register struct shmid_ds *shp;
262 	register int size;
263 	caddr_t uva;
264 	int error;
265 	int flags;
266 	vm_prot_t prot;
267 	struct shmdesc *shmd;
268 
269 	/*
270 	 * Allocate descriptors now (before validity check)
271 	 * in case malloc() blocks.
272 	 */
273 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
274 	size = shminfo.shmseg * sizeof(struct shmdesc);
275 	if (shmd == NULL) {
276 		shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
277 		bzero((caddr_t)shmd, size);
278 		p->p_vmspace->vm_shm = (caddr_t)shmd;
279 	}
280 	if (error = shmvalid(uap->shmid))
281 		return (error);
282 	shp = &shmsegs[uap->shmid % SHMMMNI];
283 	if (shp->shm_handle == NULL)
284 		panic("shmat NULL handle");
285 	if (error = ipcaccess(&shp->shm_perm,
286 	    (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
287 		return (error);
288 	uva = uap->shmaddr;
289 	if (uva && ((int)uva & (SHMLBA-1))) {
290 		if (uap->shmflg & SHM_RND)
291 			uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
292 		else
293 			return (EINVAL);
294 	}
295 	/*
296 	 * Make sure user doesn't use more than their fair share
297 	 */
298 	for (size = 0; size < shminfo.shmseg; size++) {
299 		if (shmd->shmd_uva == 0)
300 			break;
301 		shmd++;
302 	}
303 	if (size >= shminfo.shmseg)
304 		return (EMFILE);
305 	size = ctob(clrnd(btoc(shp->shm_segsz)));
306 	prot = VM_PROT_READ;
307 	if ((uap->shmflg & SHM_RDONLY) == 0)
308 		prot |= VM_PROT_WRITE;
309 	flags = MAP_ANON|MAP_SHARED;
310 	if (uva)
311 		flags |= MAP_FIXED;
312 	else
313 		uva = (caddr_t)0x1000000;	/* XXX */
314 	error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
315 			(vm_size_t)size, prot, VM_PROT_ALL, flags,
316 			((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
317 	if (error)
318 		return(error);
319 	shmd->shmd_uva = (vm_offset_t)uva;
320 	shmd->shmd_id = uap->shmid;
321 	/*
322 	 * Fill in the remaining fields
323 	 */
324 	shp->shm_lpid = p->p_pid;
325 	shp->shm_atime = time.tv_sec;
326 	shp->shm_nattch++;
327 	*retval = (int) uva;
328 	return (0);
329 }
330 
331 /*
332  * Detach from shared memory segment.
333  */
334 struct shmdt_args {
335 	caddr_t	shmaddr;
336 };
337 /* ARGSUSED */
338 shmdt(p, uap, retval)
339 	struct proc *p;
340 	struct shmdt_args *uap;
341 	int *retval;
342 {
343 	register struct shmdesc *shmd;
344 	register int i;
345 
346 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
347 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
348 		if (shmd->shmd_uva &&
349 		    shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
350 			break;
351 	if (i == shminfo.shmseg)
352 		return(EINVAL);
353 	shmufree(p, shmd);
354 	shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
355 }
356 
357 shmfork(p1, p2, isvfork)
358 	struct proc *p1, *p2;
359 	int isvfork;
360 {
361 	register struct shmdesc *shmd;
362 	register int size;
363 
364 	/*
365 	 * Copy parents descriptive information
366 	 */
367 	size = shminfo.shmseg * sizeof(struct shmdesc);
368 	shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
369 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
370 	p2->p_vmspace->vm_shm = (caddr_t)shmd;
371 	/*
372 	 * Increment reference counts
373 	 */
374 	for (size = 0; size < shminfo.shmseg; size++, shmd++)
375 		if (shmd->shmd_uva)
376 			shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
377 }
378 
379 shmexit(p)
380 	struct proc *p;
381 {
382 	register struct shmdesc *shmd;
383 	register int i;
384 
385 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
386 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
387 		if (shmd->shmd_uva)
388 			shmufree(p, shmd);
389 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
390 	p->p_vmspace->vm_shm = NULL;
391 }
392 
393 shmvalid(id)
394 	register int id;
395 {
396 	register struct shmid_ds *shp;
397 
398 	if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
399 		return(EINVAL);
400 	shp = &shmsegs[id % SHMMMNI];
401 	if (shp->shm_perm.seq == (id / SHMMMNI) &&
402 	    (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
403 		return(0);
404 	return(EINVAL);
405 }
406 
407 /*
408  * Free user resources associated with a shared memory segment
409  */
410 shmufree(p, shmd)
411 	struct proc *p;
412 	struct shmdesc *shmd;
413 {
414 	register struct shmid_ds *shp;
415 
416 	shp = &shmsegs[shmd->shmd_id % SHMMMNI];
417 	(void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
418 			     ctob(clrnd(btoc(shp->shm_segsz))));
419 	shmd->shmd_id = 0;
420 	shmd->shmd_uva = 0;
421 	shp->shm_dtime = time.tv_sec;
422 	if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
423 		shmfree(shp);
424 }
425 
426 /*
427  * Deallocate resources associated with a shared memory segment
428  */
429 shmfree(shp)
430 	register struct shmid_ds *shp;
431 {
432 
433 	if (shp->shm_handle == NULL)
434 		panic("shmfree");
435 	/*
436 	 * Lose our lingering object reference by deallocating space
437 	 * in kernel.  Pager will also be deallocated as a side-effect.
438 	 */
439 	vm_deallocate(shm_map,
440 		      ((struct shmhandle *)shp->shm_handle)->shmh_kva,
441 		      ctob(clrnd(btoc(shp->shm_segsz))));
442 	free((caddr_t)shp->shm_handle, M_SHM);
443 	shp->shm_handle = NULL;
444 	shmtot -= clrnd(btoc(shp->shm_segsz));
445 	shp->shm_perm.mode = 0;
446 	/*
447 	 * Increment the sequence number to ensure that outstanding
448 	 * shmids for this segment will be invalid in the event that
449 	 * the segment is reallocated.  Note that shmids must be
450 	 * positive as decreed by SVID.
451 	 */
452 	shp->shm_perm.seq++;
453 	if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
454 		shp->shm_perm.seq = 0;
455 }
456 
457 /*
458  * XXX This routine would be common to all sysV style IPC
459  *     (if the others were implemented).
460  */
461 ipcaccess(ipc, mode, cred)
462 	register struct ipc_perm *ipc;
463 	int mode;
464 	register struct ucred *cred;
465 {
466 	register int m;
467 
468 	if (cred->cr_uid == 0)
469 		return(0);
470 	/*
471 	 * Access check is based on only one of owner, group, public.
472 	 * If not owner, then check group.
473 	 * If not a member of the group, then check public access.
474 	 */
475 	mode &= 0700;
476 	m = ipc->mode;
477 	if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
478 		m <<= 3;
479 		if (!groupmember(ipc->gid, cred) &&
480 		    !groupmember(ipc->cgid, cred))
481 			m <<= 3;
482 	}
483 	if ((mode&m) == mode)
484 		return (0);
485 	return (EACCES);
486 }
487 #endif /* SYSVSHM */
488