xref: /netbsd-src/sys/kern/sysv_shm.c (revision f5d3fbbc6ff4a77159fb268d247bd94cb7d7e332)
1 /*	$NetBSD: sysv_shm.c,v 1.40 1997/10/09 08:35:13 drochner Exp $	*/
2 
3 /*
4  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Adam Glass and Charles
17  *	Hannum.
18  * 4. The names of the authors may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/shm.h>
37 #include <sys/proc.h>
38 #include <sys/uio.h>
39 #include <sys/time.h>
40 #include <sys/malloc.h>
41 #include <sys/mman.h>
42 #include <sys/systm.h>
43 #include <sys/stat.h>
44 
45 #include <sys/mount.h>
46 #include <sys/syscallargs.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_kern.h>
52 
53 struct shmid_ds *shm_find_segment_by_shmid __P((int));
54 void shmexit __P((struct proc *));
55 
56 /*
57  * Provides the following externally accessible functions:
58  *
59  * shminit(void);		           initialization
60  * shmexit(struct proc *)                  cleanup
61  * shmfork(struct proc *, struct proc *)   fork handling
62  * shmsys(arg1, arg2, arg3, arg4);         shm{at,ctl,dt,get}(arg2, arg3, arg4)
63  *
64  * Structures:
65  * shmsegs (an array of 'struct shmid_ds')
66  * per proc array of 'struct shmmap_state'
67  */
68 
69 #define	SHMSEG_FREE     	0x0200
70 #define	SHMSEG_REMOVED  	0x0400
71 #define	SHMSEG_ALLOCATED	0x0800
72 #define	SHMSEG_WANTED		0x1000
73 
74 int shm_last_free, shm_nused, shm_committed;
75 
76 struct shm_handle {
77 	vm_object_t shm_object;
78 };
79 
80 struct shmmap_state {
81 	vm_offset_t va;
82 	int shmid;
83 };
84 
85 static int shm_find_segment_by_key __P((key_t));
86 static void shm_deallocate_segment __P((struct shmid_ds *));
87 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
88 static int shmget_existing __P((struct proc *, struct sys_shmget_args *,
89 				int, int, register_t *));
90 static int shmget_allocate_segment __P((struct proc *, struct sys_shmget_args *,
91 					int, register_t *));
92 
93 static int
94 shm_find_segment_by_key(key)
95 	key_t key;
96 {
97 	int i;
98 
99 	for (i = 0; i < shminfo.shmmni; i++)
100 		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
101 		    shmsegs[i].shm_perm.key == key)
102 			return i;
103 	return -1;
104 }
105 
106 struct shmid_ds *
107 shm_find_segment_by_shmid(shmid)
108 	int shmid;
109 {
110 	int segnum;
111 	struct shmid_ds *shmseg;
112 
113 	segnum = IPCID_TO_IX(shmid);
114 	if (segnum < 0 || segnum >= shminfo.shmmni)
115 		return NULL;
116 	shmseg = &shmsegs[segnum];
117 	if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
118 	    != SHMSEG_ALLOCATED ||
119 	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
120 		return NULL;
121 	return shmseg;
122 }
123 
124 static void
125 shm_deallocate_segment(shmseg)
126 	struct shmid_ds *shmseg;
127 {
128 	struct shm_handle *shm_handle;
129 	size_t size;
130 
131 	shm_handle = shmseg->shm_internal;
132 	size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
133 	vm_object_deallocate(shm_handle->shm_object);
134 	free((caddr_t)shm_handle, M_SHM);
135 	shmseg->shm_internal = NULL;
136 	shm_committed -= btoc(size);
137 	shmseg->shm_perm.mode = SHMSEG_FREE;
138 	shm_nused--;
139 }
140 
141 static int
142 shm_delete_mapping(p, shmmap_s)
143 	struct proc *p;
144 	struct shmmap_state *shmmap_s;
145 {
146 	struct shmid_ds *shmseg;
147 	int segnum, result;
148 	size_t size;
149 
150 	segnum = IPCID_TO_IX(shmmap_s->shmid);
151 	shmseg = &shmsegs[segnum];
152 	size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
153 	result = vm_map_remove(&p->p_vmspace->vm_map,
154 			       shmmap_s->va, shmmap_s->va + size);
155 	if (result != KERN_SUCCESS)
156 		return EINVAL;
157 	shmmap_s->shmid = -1;
158 	shmseg->shm_dtime = time.tv_sec;
159 	if ((--shmseg->shm_nattch <= 0) &&
160 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
161 		shm_deallocate_segment(shmseg);
162 		shm_last_free = segnum;
163 	}
164 	return 0;
165 }
166 
167 int
168 sys_shmdt(p, v, retval)
169 	struct proc *p;
170 	void *v;
171 	register_t *retval;
172 {
173 	struct sys_shmdt_args /* {
174 		syscallarg(void *) shmaddr;
175 	} */ *uap = v;
176 	struct shmmap_state *shmmap_s;
177 	int i;
178 
179 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
180 	if (shmmap_s == NULL)
181 		return EINVAL;
182 
183 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
184 		if (shmmap_s->shmid != -1 &&
185 		    shmmap_s->va == (vm_offset_t)SCARG(uap, shmaddr))
186 			break;
187 	if (i == shminfo.shmseg)
188 		return EINVAL;
189 	return shm_delete_mapping(p, shmmap_s);
190 }
191 
192 int
193 sys_shmat(p, v, retval)
194 	struct proc *p;
195 	void *v;
196 	register_t *retval;
197 {
198 	struct sys_shmat_args /* {
199 		syscallarg(int) shmid;
200 		syscallarg(void *) shmaddr;
201 		syscallarg(int) shmflg;
202 	} */ *uap = v;
203 	int error, i, flags;
204 	struct ucred *cred = p->p_ucred;
205 	struct shmid_ds *shmseg;
206 	struct shmmap_state *shmmap_s = NULL;
207 	struct shm_handle *shm_handle;
208 	vm_offset_t attach_va;
209 	vm_prot_t prot;
210 	vm_size_t size;
211 	int rv;
212 
213 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
214 	if (shmmap_s == NULL) {
215 		size = shminfo.shmseg * sizeof(struct shmmap_state);
216 		shmmap_s = malloc(size, M_SHM, M_WAITOK);
217 		for (i = 0; i < shminfo.shmseg; i++)
218 			shmmap_s[i].shmid = -1;
219 		p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
220 	}
221 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
222 	if (shmseg == NULL)
223 		return EINVAL;
224 	error = ipcperm(cred, &shmseg->shm_perm,
225 		    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
226 	if (error)
227 		return error;
228 	for (i = 0; i < shminfo.shmseg; i++) {
229 		if (shmmap_s->shmid == -1)
230 			break;
231 		shmmap_s++;
232 	}
233 	if (i >= shminfo.shmseg)
234 		return EMFILE;
235 	size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
236 	prot = VM_PROT_READ;
237 	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
238 		prot |= VM_PROT_WRITE;
239 	flags = MAP_ANON | MAP_SHARED;
240 	if (SCARG(uap, shmaddr)) {
241 		flags |= MAP_FIXED;
242 		if (SCARG(uap, shmflg) & SHM_RND)
243 			attach_va =
244 			    (vm_offset_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
245 		else if (((vm_offset_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
246 			attach_va = (vm_offset_t)SCARG(uap, shmaddr);
247 		else
248 			return EINVAL;
249 	} else {
250 		/* This is just a hint to vm_mmap() about where to put it. */
251 		attach_va =
252 		    round_page(p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
253 	}
254 	shm_handle = shmseg->shm_internal;
255 	vm_object_reference(shm_handle->shm_object);
256 	rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
257 		0, &attach_va, size, (flags & MAP_FIXED)?0:1);
258 	if (rv != KERN_SUCCESS) {
259 		return ENOMEM;
260 	}
261 	vm_map_protect(&p->p_vmspace->vm_map, attach_va, attach_va + size,
262 		       prot, 0);
263 	vm_map_inherit(&p->p_vmspace->vm_map,
264 		attach_va, attach_va + size, VM_INHERIT_SHARE);
265 
266 	shmmap_s->va = attach_va;
267 	shmmap_s->shmid = SCARG(uap, shmid);
268 	shmseg->shm_lpid = p->p_pid;
269 	shmseg->shm_atime = time.tv_sec;
270 	shmseg->shm_nattch++;
271 	*retval = attach_va;
272 	return 0;
273 }
274 
275 int
276 sys_shmctl(p, v, retval)
277 	struct proc *p;
278 	void *v;
279 	register_t *retval;
280 {
281 	struct sys_shmctl_args /* {
282 		syscallarg(int) shmid;
283 		syscallarg(int) cmd;
284 		syscallarg(struct shmid_ds *) buf;
285 	} */ *uap = v;
286 	int error;
287 	struct ucred *cred = p->p_ucred;
288 	struct shmid_ds inbuf;
289 	struct shmid_ds *shmseg;
290 
291 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
292 	if (shmseg == NULL)
293 		return EINVAL;
294 	switch (SCARG(uap, cmd)) {
295 	case IPC_STAT:
296 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
297 			return error;
298 		error = copyout((caddr_t)shmseg, SCARG(uap, buf),
299 				sizeof(inbuf));
300 		if (error)
301 			return error;
302 		break;
303 	case IPC_SET:
304 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
305 			return error;
306 		error = copyin(SCARG(uap, buf), (caddr_t)&inbuf,
307 			       sizeof(inbuf));
308 		if (error)
309 			return error;
310 		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
311 		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
312 		shmseg->shm_perm.mode =
313 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
314 		    (inbuf.shm_perm.mode & ACCESSPERMS);
315 		shmseg->shm_ctime = time.tv_sec;
316 		break;
317 	case IPC_RMID:
318 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
319 			return error;
320 		shmseg->shm_perm.key = IPC_PRIVATE;
321 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
322 		if (shmseg->shm_nattch <= 0) {
323 			shm_deallocate_segment(shmseg);
324 			shm_last_free = IPCID_TO_IX(SCARG(uap, shmid));
325 		}
326 		break;
327 	case SHM_LOCK:
328 	case SHM_UNLOCK:
329 	default:
330 		return EINVAL;
331 	}
332 	return 0;
333 }
334 
335 static int
336 shmget_existing(p, uap, mode, segnum, retval)
337 	struct proc *p;
338 	struct sys_shmget_args /* {
339 		syscallarg(key_t) key;
340 		syscallarg(int) size;
341 		syscallarg(int) shmflg;
342 	} */ *uap;
343 	int mode;
344 	int segnum;
345 	register_t *retval;
346 {
347 	struct shmid_ds *shmseg;
348 	struct ucred *cred = p->p_ucred;
349 	int error;
350 
351 	shmseg = &shmsegs[segnum];
352 	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
353 		/*
354 		 * This segment is in the process of being allocated.  Wait
355 		 * until it's done, and look the key up again (in case the
356 		 * allocation failed or it was freed).
357 		 */
358 		shmseg->shm_perm.mode |= SHMSEG_WANTED;
359 		error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
360 		if (error)
361 			return error;
362 		return EAGAIN;
363 	}
364 	if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
365 		return error;
366 	if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
367 		return EINVAL;
368 	if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
369 	    (IPC_CREAT | IPC_EXCL))
370 		return EEXIST;
371 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
372 	return 0;
373 }
374 
375 static int
376 shmget_allocate_segment(p, uap, mode, retval)
377 	struct proc *p;
378 	struct sys_shmget_args /* {
379 		syscallarg(key_t) key;
380 		syscallarg(int) size;
381 		syscallarg(int) shmflg;
382 	} */ *uap;
383 	int mode;
384 	register_t *retval;
385 {
386 	int i, segnum, shmid, size;
387 	struct ucred *cred = p->p_ucred;
388 	struct shmid_ds *shmseg;
389 	struct shm_handle *shm_handle;
390 	vm_pager_t pager;
391 	int error = 0;
392 
393 	if (SCARG(uap, size) < shminfo.shmmin ||
394 	    SCARG(uap, size) > shminfo.shmmax)
395 		return EINVAL;
396 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
397 		return ENOSPC;
398 	size = (SCARG(uap, size) + CLOFSET) & ~CLOFSET;
399 	if (shm_committed + btoc(size) > shminfo.shmall)
400 		return ENOMEM;
401 	if (shm_last_free < 0) {
402 		for (i = 0; i < shminfo.shmmni; i++)
403 			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
404 				break;
405 		if (i == shminfo.shmmni)
406 			panic("shmseg free count inconsistent");
407 		segnum = i;
408 	} else  {
409 		segnum = shm_last_free;
410 		shm_last_free = -1;
411 	}
412 	shmseg = &shmsegs[segnum];
413 	/*
414 	 * In case we sleep in malloc(), mark the segment present but deleted
415 	 * so that noone else tries to create the same key.
416 	 */
417 	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
418 	shmseg->shm_perm.key = SCARG(uap, key);
419 	shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
420 	shm_handle = (struct shm_handle *)
421 	    malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
422 	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
423 
424 	shm_handle->shm_object = vm_object_allocate(size);
425 	if (shm_handle->shm_object == NULL) {
426 		/* XXX cannot happen */
427 		error = ENOMEM;
428 		goto out;
429 	}
430 	/*
431 	 * We make sure that we have allocated a pager before we need
432 	 * to.
433 	 */
434 	pager = vm_pager_allocate(PG_DFLT, 0, size, VM_PROT_DEFAULT, 0);
435 	if (pager == NULL) {
436 		error = ENOMEM;
437 		goto out;
438 	}
439 	vm_object_setpager(shm_handle->shm_object, pager, 0, 0);
440 	shmseg->shm_internal = shm_handle;
441 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
442 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
443 	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
444 	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
445 	shmseg->shm_segsz = SCARG(uap, size);
446 	shmseg->shm_cpid = p->p_pid;
447 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
448 	shmseg->shm_atime = shmseg->shm_dtime = 0;
449 	shmseg->shm_ctime = time.tv_sec;
450 	shm_committed += btoc(size);
451 	shm_nused++;
452 
453 out:
454 	if (error) {
455 		if (shm_handle->shm_object != NULL)
456 			vm_object_deallocate(shm_handle->shm_object);
457 		free(shm_handle, M_SHM);
458 		shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED)
459 		    | SHMSEG_FREE;
460 	} else
461 		*retval = shmid;
462 	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
463 		/*
464 		 * Somebody else wanted this key while we were asleep.  Wake
465 		 * them up now.
466 		 */
467 		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
468 		wakeup((caddr_t)shmseg);
469 	}
470 	return error;
471 }
472 
473 int
474 sys_shmget(p, v, retval)
475 	struct proc *p;
476 	void *v;
477 	register_t *retval;
478 {
479 	struct sys_shmget_args /* {
480 		syscallarg(key_t) key;
481 		syscallarg(int) size;
482 		syscallarg(int) shmflg;
483 	} */ *uap = v;
484 	int segnum, mode, error;
485 
486 	mode = SCARG(uap, shmflg) & ACCESSPERMS;
487 	if (SCARG(uap, key) != IPC_PRIVATE) {
488 	again:
489 		segnum = shm_find_segment_by_key(SCARG(uap, key));
490 		if (segnum >= 0) {
491 			error = shmget_existing(p, uap, mode, segnum, retval);
492 			if (error == EAGAIN)
493 				goto again;
494 			return error;
495 		}
496 		if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
497 			return ENOENT;
498 	}
499 	return shmget_allocate_segment(p, uap, mode, retval);
500 }
501 
502 void
503 shmfork(p1, p2)
504 	struct proc *p1, *p2;
505 {
506 	struct shmmap_state *shmmap_s;
507 	size_t size;
508 	int i;
509 
510 	if (p1->p_vmspace->vm_shm == NULL) {
511 		p2->p_vmspace->vm_shm = NULL;
512 		return;
513 	}
514 
515 	size = shminfo.shmseg * sizeof(struct shmmap_state);
516 	shmmap_s = malloc(size, M_SHM, M_WAITOK);
517 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
518 	p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
519 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
520 		if (shmmap_s->shmid != -1)
521 			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
522 }
523 
524 void
525 shmexit(p)
526 	struct proc *p;
527 {
528 	struct shmmap_state *shmmap_s;
529 	int i;
530 
531 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
532 	if (shmmap_s == NULL)
533 		return;
534 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
535 		if (shmmap_s->shmid != -1)
536 			shm_delete_mapping(p, shmmap_s);
537 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
538 	p->p_vmspace->vm_shm = NULL;
539 }
540 
541 void
542 shminit()
543 {
544 	int i;
545 
546 	shminfo.shmmax *= NBPG;
547 
548 	for (i = 0; i < shminfo.shmmni; i++) {
549 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
550 		shmsegs[i].shm_perm.seq = 0;
551 	}
552 	shm_last_free = 0;
553 	shm_nused = 0;
554 	shm_committed = 0;
555 }
556