xref: /netbsd-src/sys/kern/sysv_shm.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: sysv_shm.c,v 1.100 2007/04/29 20:23:36 msaitoh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1994 Adam Glass and Charles M. Hannum.  All rights reserved.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by Adam Glass and Charles M.
54  *	Hannum.
55  * 4. The names of the authors may not be used to endorse or promote products
56  *    derived from this software without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
59  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
62  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
67  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68  */
69 
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.100 2007/04/29 20:23:36 msaitoh Exp $");
72 
73 #define SYSVSHM
74 
75 #include <sys/param.h>
76 #include <sys/kernel.h>
77 #include <sys/shm.h>
78 #include <sys/mutex.h>
79 #include <sys/malloc.h>
80 #include <sys/mman.h>
81 #include <sys/stat.h>
82 #include <sys/sysctl.h>
83 #include <sys/mount.h>		/* XXX for <sys/syscallargs.h> */
84 #include <sys/syscallargs.h>
85 #include <sys/queue.h>
86 #include <sys/pool.h>
87 #include <sys/kauth.h>
88 
89 #include <uvm/uvm_extern.h>
90 #include <uvm/uvm_object.h>
91 
92 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
93 
94 /*
95  * Provides the following externally accessible functions:
96  *
97  * shminit(void);		                 initialization
98  * shmexit(struct vmspace *)                     cleanup
99  * shmfork(struct vmspace *, struct vmspace *)   fork handling
100  *
101  * Structures:
102  * shmsegs (an array of 'struct shmid_ds')
103  * per proc array of 'struct shmmap_state'
104  */
105 
106 int shm_nused;
107 struct	shmid_ds *shmsegs;
108 
109 struct shmmap_entry {
110 	SLIST_ENTRY(shmmap_entry) next;
111 	vaddr_t va;
112 	int shmid;
113 };
114 
115 static kmutex_t	shm_lock;
116 static int	shm_last_free, shm_committed, shm_use_phys;
117 
118 static POOL_INIT(shmmap_entry_pool, sizeof(struct shmmap_entry), 0, 0, 0,
119     "shmmp", &pool_allocator_nointr, IPL_NONE);
120 
121 struct shmmap_state {
122 	unsigned int nitems;
123 	unsigned int nrefs;
124 	SLIST_HEAD(, shmmap_entry) entries;
125 };
126 
127 static int shm_find_segment_by_key(key_t);
128 static void shm_deallocate_segment(struct shmid_ds *);
129 static void shm_delete_mapping(struct vmspace *, struct shmmap_state *,
130 			       struct shmmap_entry *);
131 static int shmget_existing(struct lwp *, struct sys_shmget_args *,
132 			   int, int, register_t *);
133 static int shmget_allocate_segment(struct lwp *, struct sys_shmget_args *,
134 				   int, register_t *);
135 static struct shmmap_state *shmmap_getprivate(struct proc *);
136 static struct shmmap_entry *shm_find_mapping(struct shmmap_state *, vaddr_t);
137 static int shmrealloc(int);
138 
139 static int
140 shm_find_segment_by_key(key_t key)
141 {
142 	int i;
143 
144 	for (i = 0; i < shminfo.shmmni; i++)
145 		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
146 		    shmsegs[i].shm_perm._key == key)
147 			return i;
148 	return -1;
149 }
150 
151 static struct shmid_ds *
152 shm_find_segment_by_shmid(int shmid)
153 {
154 	int segnum;
155 	struct shmid_ds *shmseg;
156 
157 	segnum = IPCID_TO_IX(shmid);
158 	if (segnum < 0 || segnum >= shminfo.shmmni)
159 		return NULL;
160 	shmseg = &shmsegs[segnum];
161 	if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0)
162 		return NULL;
163 	if ((shmseg->shm_perm.mode & (SHMSEG_REMOVED|SHMSEG_RMLINGER)) == SHMSEG_REMOVED)
164 		return NULL;
165 	if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid))
166 		return NULL;
167 	return shmseg;
168 }
169 
170 static void
171 shm_deallocate_segment(struct shmid_ds *shmseg)
172 {
173 	struct uvm_object *uobj = shmseg->_shm_internal;
174 	size_t size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
175 
176 #ifdef SHMDEBUG
177 	printf("shm freeing key 0x%lx seq 0x%x\n",
178 	       shmseg->shm_perm._key, shmseg->shm_perm._seq);
179 #endif
180 
181 	(*uobj->pgops->pgo_detach)(uobj);
182 	shmseg->_shm_internal = NULL;
183 	shm_committed -= btoc(size);
184 	shmseg->shm_perm.mode = SHMSEG_FREE;
185 	shm_nused--;
186 }
187 
188 static void
189 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s,
190     struct shmmap_entry *shmmap_se)
191 {
192 	struct shmid_ds *shmseg;
193 	int segnum;
194 	size_t size;
195 
196 	segnum = IPCID_TO_IX(shmmap_se->shmid);
197 #ifdef DEBUG
198 	if (segnum < 0 || segnum >= shminfo.shmmni)
199 		panic("shm_delete_mapping: vmspace %p state %p entry %p - "
200 		    "entry segment ID bad (%d)",
201 		    vm, shmmap_s, shmmap_se, segnum);
202 #endif
203 	shmseg = &shmsegs[segnum];
204 	size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
205 	uvm_deallocate(&vm->vm_map, shmmap_se->va, size);
206 	SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next);
207 	shmmap_s->nitems--;
208 	pool_put(&shmmap_entry_pool, shmmap_se);
209 	shmseg->shm_dtime = time_second;
210 	if ((--shmseg->shm_nattch <= 0) &&
211 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
212 		shm_deallocate_segment(shmseg);
213 		shm_last_free = segnum;
214 	}
215 }
216 
217 /*
218  * Get a non-shared shm map for that vmspace.
219  * 3 cases:
220  *   - no shm map present: create a fresh one
221  *   - a shm map with refcount=1, just used by ourselves: fine
222  *   - a shared shm map: copy to a fresh one and adjust refcounts
223  */
224 static struct shmmap_state *
225 shmmap_getprivate(struct proc *p)
226 {
227 	struct shmmap_state *oshmmap_s, *shmmap_s;
228 	struct shmmap_entry *oshmmap_se, *shmmap_se;
229 
230 	oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
231 	if (oshmmap_s && oshmmap_s->nrefs == 1)
232 		return (oshmmap_s);
233 
234 	shmmap_s = malloc(sizeof(struct shmmap_state), M_SHM, M_WAITOK);
235 	memset(shmmap_s, 0, sizeof(struct shmmap_state));
236 	shmmap_s->nrefs = 1;
237 	SLIST_INIT(&shmmap_s->entries);
238 	p->p_vmspace->vm_shm = (void *)shmmap_s;
239 
240 	if (!oshmmap_s)
241 		return (shmmap_s);
242 
243 #ifdef SHMDEBUG
244 	printf("shmmap_getprivate: vm %p split (%d entries), was used by %d\n",
245 	       p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs);
246 #endif
247 	SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
248 		shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
249 		shmmap_se->va = oshmmap_se->va;
250 		shmmap_se->shmid = oshmmap_se->shmid;
251 		SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
252 	}
253 	shmmap_s->nitems = oshmmap_s->nitems;
254 	oshmmap_s->nrefs--;
255 	return (shmmap_s);
256 }
257 
258 static struct shmmap_entry *
259 shm_find_mapping(struct shmmap_state *map, vaddr_t va)
260 {
261 	struct shmmap_entry *shmmap_se;
262 
263 	SLIST_FOREACH(shmmap_se, &map->entries, next) {
264 		if (shmmap_se->va == va)
265 			return shmmap_se;
266 	}
267 	return 0;
268 }
269 
270 int
271 sys_shmdt(struct lwp *l, void *v, register_t *retval)
272 {
273 	struct sys_shmdt_args /* {
274 		syscallarg(const void *) shmaddr;
275 	} */ *uap = v;
276 	struct proc *p = l->l_proc;
277 	struct shmmap_state *shmmap_s, *shmmap_s1;
278 	struct shmmap_entry *shmmap_se;
279 
280 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
281 	if (shmmap_s == NULL)
282 		return EINVAL;
283 
284 	shmmap_se = shm_find_mapping(shmmap_s, (vaddr_t)SCARG(uap, shmaddr));
285 	if (!shmmap_se)
286 		return EINVAL;
287 
288 	shmmap_s1 = shmmap_getprivate(p);
289 	if (shmmap_s1 != shmmap_s) {
290 		/* map has been copied, lookup entry in new map */
291 		shmmap_se = shm_find_mapping(shmmap_s1,
292 					     (vaddr_t)SCARG(uap, shmaddr));
293 		KASSERT(shmmap_se != NULL);
294 	}
295 #ifdef SHMDEBUG
296 	printf("shmdt: vm %p: remove %d @%lx\n",
297 	       p->p_vmspace, shmmap_se->shmid, shmmap_se->va);
298 #endif
299 	shm_delete_mapping(p->p_vmspace, shmmap_s1, shmmap_se);
300 	return 0;
301 }
302 
303 int
304 sys_shmat(struct lwp *l, void *v, register_t *retval)
305 {
306 	struct sys_shmat_args /* {
307 		syscallarg(int) shmid;
308 		syscallarg(const void *) shmaddr;
309 		syscallarg(int) shmflg;
310 	} */ *uap = v;
311 	int error, flags = 0;
312 	struct proc *p = l->l_proc;
313 	kauth_cred_t cred = l->l_cred;
314 	struct shmid_ds *shmseg;
315 	struct shmmap_state *shmmap_s;
316 	struct uvm_object *uobj;
317 	vaddr_t attach_va;
318 	vm_prot_t prot;
319 	vsize_t size;
320 	struct shmmap_entry *shmmap_se;
321 
322 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
323 	if (shmseg == NULL)
324 		return EINVAL;
325 	error = ipcperm(cred, &shmseg->shm_perm,
326 		    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
327 	if (error)
328 		return error;
329 
330 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
331 	if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg)
332 		return EMFILE;
333 
334 	size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
335 	prot = VM_PROT_READ;
336 	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
337 		prot |= VM_PROT_WRITE;
338 	if (SCARG(uap, shmaddr)) {
339 		flags |= UVM_FLAG_FIXED;
340 		if (SCARG(uap, shmflg) & SHM_RND)
341 			attach_va =
342 			    (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
343 		else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
344 			attach_va = (vaddr_t)SCARG(uap, shmaddr);
345 		else
346 			return EINVAL;
347 	} else {
348 		/* This is just a hint to uvm_mmap() about where to put it. */
349 		attach_va = p->p_emul->e_vm_default_addr(p,
350 		    (vaddr_t)p->p_vmspace->vm_daddr, size);
351 	}
352 	uobj = shmseg->_shm_internal;
353 	(*uobj->pgops->pgo_reference)(uobj);
354 	error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
355 	    uobj, 0, 0,
356 	    UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, flags));
357 	if (error)
358 		goto out;
359 	/* Lock the memory */
360 	if (shm_use_phys || (shmseg->shm_perm.mode & SHMSEG_WIRED)) {
361 		/* Wire the map */
362 		error = uvm_map_pageable(&p->p_vmspace->vm_map, attach_va,
363 		    attach_va + size, false, 0);
364 		if (error) {
365 			if (error == EFAULT)
366 				error = ENOMEM;
367 			goto out;
368 		}
369 	}
370 
371 	shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
372 	shmmap_se->va = attach_va;
373 	shmmap_se->shmid = SCARG(uap, shmid);
374 	shmmap_s = shmmap_getprivate(p);
375 #ifdef SHMDEBUG
376 	printf("shmat: vm %p: add %d @%lx\n", p->p_vmspace, shmmap_se->shmid, attach_va);
377 #endif
378 	SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
379 	shmmap_s->nitems++;
380 	shmseg->shm_lpid = p->p_pid;
381 	shmseg->shm_atime = time_second;
382 	shmseg->shm_nattch++;
383 
384 	retval[0] = attach_va;
385 	return 0;
386 out:
387 	(*uobj->pgops->pgo_detach)(uobj);
388 	return error;
389 }
390 
391 int
392 sys___shmctl13(struct lwp *l, void *v, register_t *retval)
393 {
394 	struct sys___shmctl13_args /* {
395 		syscallarg(int) shmid;
396 		syscallarg(int) cmd;
397 		syscallarg(struct shmid_ds *) buf;
398 	} */ *uap = v;
399 	struct shmid_ds shmbuf;
400 	int cmd, error;
401 
402 	cmd = SCARG(uap, cmd);
403 
404 	if (cmd == IPC_SET) {
405 		error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf));
406 		if (error)
407 			return (error);
408 	}
409 
410 	error = shmctl1(l, SCARG(uap, shmid), cmd,
411 	    (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL);
412 
413 	if (error == 0 && cmd == IPC_STAT)
414 		error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf));
415 
416 	return (error);
417 }
418 
419 int
420 shmctl1(struct lwp *l, int shmid, int cmd, struct shmid_ds *shmbuf)
421 {
422 	kauth_cred_t cred = l->l_cred;
423  	struct proc *p = l->l_proc;
424 	struct shmid_ds *shmseg;
425 	struct shmmap_entry *shmmap_se;
426 	struct shmmap_state *shmmap_s;
427 	int error = 0;
428 	size_t size;
429 
430 	shmseg = shm_find_segment_by_shmid(shmid);
431 	if (shmseg == NULL)
432 		return EINVAL;
433 
434 	switch (cmd) {
435 	case IPC_STAT:
436 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
437 			return error;
438 		memcpy(shmbuf, shmseg, sizeof(struct shmid_ds));
439 		break;
440 	case IPC_SET:
441 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
442 			return error;
443 		shmseg->shm_perm.uid = shmbuf->shm_perm.uid;
444 		shmseg->shm_perm.gid = shmbuf->shm_perm.gid;
445 		shmseg->shm_perm.mode =
446 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
447 		    (shmbuf->shm_perm.mode & ACCESSPERMS);
448 		shmseg->shm_ctime = time_second;
449 		break;
450 	case IPC_RMID:
451 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
452 			return error;
453 		shmseg->shm_perm._key = IPC_PRIVATE;
454 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
455 		if (shmseg->shm_nattch <= 0) {
456 			shm_deallocate_segment(shmseg);
457 			shm_last_free = IPCID_TO_IX(shmid);
458 		}
459 		break;
460 	case SHM_LOCK:
461 	case SHM_UNLOCK:
462 		if ((error = kauth_authorize_generic(cred,
463 		    KAUTH_GENERIC_ISSUSER, NULL)) != 0)
464 			return error;
465 		shmmap_s = shmmap_getprivate(p);
466 		/* Find our shared memory address by shmid */
467 		SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) {
468 			if (shmmap_se->shmid != shmid)
469 				continue;
470 
471 			size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
472 
473 			if (cmd == SHM_LOCK &&
474 			    !(shmseg->shm_perm.mode & SHMSEG_WIRED)) {
475 				/* Wire the entire object */
476 				error = uobj_wirepages(shmseg->_shm_internal, 0,
477 					round_page(shmseg->shm_segsz));
478 				if (error)
479 					return EIO;
480 				/* Wire the map */
481 				error = uvm_map_pageable(&p->p_vmspace->vm_map,
482 				    shmmap_se->va, shmmap_se->va + size, false,
483 				    0);
484 				if (error) {
485 					uobj_unwirepages(shmseg->_shm_internal,
486 					    0, round_page(shmseg->shm_segsz));
487 					if (error == EFAULT)
488 						error = ENOMEM;
489 					return error;
490 				}
491 				/* Tag as wired */
492 				shmseg->shm_perm.mode |= SHMSEG_WIRED;
493 
494 			} else if (cmd == SHM_UNLOCK &&
495 			    (shmseg->shm_perm.mode & SHMSEG_WIRED)) {
496 				/* Unwire the object */
497 				uobj_unwirepages(shmseg->_shm_internal, 0,
498 				    round_page(shmseg->shm_segsz));
499 				error = uvm_map_pageable(&p->p_vmspace->vm_map,
500 				    shmmap_se->va, shmmap_se->va + size, true,
501 				    0);
502 				if (error) {
503 					/*
504 					 * In fact, uvm_map_pageable could fail
505 					 * only if arguments are invalid,
506 					 * otherwise it should always return 0.
507 					 */
508 					return EIO;
509 				}
510 				/* Tag as unwired */
511 				shmseg->shm_perm.mode &= ~SHMSEG_WIRED;
512 			}
513 		}
514 		break;
515 	default:
516 		return EINVAL;
517 	}
518 	return 0;
519 }
520 
521 static int
522 shmget_existing(struct lwp *l, struct sys_shmget_args *uap, int mode,
523     int segnum, register_t *retval)
524 {
525 	struct shmid_ds *shmseg;
526 	kauth_cred_t cred = l->l_cred;
527 	int error;
528 
529 	shmseg = &shmsegs[segnum];
530 	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
531 		/*
532 		 * This segment is in the process of being allocated.  Wait
533 		 * until it's done, and look the key up again (in case the
534 		 * allocation failed or it was freed).
535 		 */
536 		shmseg->shm_perm.mode |= SHMSEG_WANTED;
537 		error = tsleep((void *)shmseg, PLOCK | PCATCH, "shmget", 0);
538 		if (error)
539 			return error;
540 		return EAGAIN;
541 	}
542 	if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
543 		return error;
544 	if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
545 		return EINVAL;
546 	if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
547 	    (IPC_CREAT | IPC_EXCL))
548 		return EEXIST;
549 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
550 	return 0;
551 }
552 
553 static int
554 shmget_allocate_segment(struct lwp *l, struct sys_shmget_args *uap, int mode,
555     register_t *retval)
556 {
557 	int i, segnum, shmid, size;
558 	kauth_cred_t cred = l->l_cred;
559 	struct shmid_ds *shmseg;
560 	int error = 0;
561 
562 	if (SCARG(uap, size) < shminfo.shmmin ||
563 	    SCARG(uap, size) > shminfo.shmmax)
564 		return EINVAL;
565 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
566 		return ENOSPC;
567 	size = (SCARG(uap, size) + PGOFSET) & ~PGOFSET;
568 	if (shm_committed + btoc(size) > shminfo.shmall)
569 		return ENOMEM;
570 	if (shm_last_free < 0) {
571 		for (i = 0; i < shminfo.shmmni; i++)
572 			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
573 				break;
574 		if (i == shminfo.shmmni)
575 			panic("shmseg free count inconsistent");
576 		segnum = i;
577 	} else  {
578 		segnum = shm_last_free;
579 		shm_last_free = -1;
580 	}
581 	shmseg = &shmsegs[segnum];
582 	/*
583 	 * In case we sleep in malloc(), mark the segment present but deleted
584 	 * so that noone else tries to create the same key.
585 	 */
586 	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
587 	shmseg->shm_perm._key = SCARG(uap, key);
588 	shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff;
589 	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
590 
591 	shmseg->_shm_internal = uao_create(size, 0);
592 
593 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = kauth_cred_geteuid(cred);
594 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = kauth_cred_getegid(cred);
595 	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
596 	    (mode & (ACCESSPERMS|SHMSEG_RMLINGER)) | SHMSEG_ALLOCATED;
597 	shmseg->shm_segsz = SCARG(uap, size);
598 	shmseg->shm_cpid = l->l_proc->p_pid;
599 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
600 	shmseg->shm_atime = shmseg->shm_dtime = 0;
601 	shmseg->shm_ctime = time_second;
602 	shm_committed += btoc(size);
603 	shm_nused++;
604 
605 	*retval = shmid;
606 	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
607 		/*
608 		 * Somebody else wanted this key while we were asleep.  Wake
609 		 * them up now.
610 		 */
611 		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
612 		wakeup((void *)shmseg);
613 	}
614 
615 	/* Lock the memory */
616 	if (shm_use_phys) {
617 		/* Wire the entire object */
618 		error = uobj_wirepages(shmseg->_shm_internal, 0,
619 		    round_page(shmseg->shm_segsz));
620 		if (error) {
621 			shm_deallocate_segment(shmseg);
622 		} else {
623 			/* Tag as wired */
624 			shmseg->shm_perm.mode |= SHMSEG_WIRED;
625 		}
626 	}
627 
628 	return error;
629 }
630 
631 int
632 sys_shmget(struct lwp *l, void *v, register_t *retval)
633 {
634 	struct sys_shmget_args /* {
635 		syscallarg(key_t) key;
636 		syscallarg(int) size;
637 		syscallarg(int) shmflg;
638 	} */ *uap = v;
639 	int segnum, mode, error;
640 
641 	mode = SCARG(uap, shmflg) & ACCESSPERMS;
642 	if (SCARG(uap, shmflg) & _SHM_RMLINGER)
643 		mode |= SHMSEG_RMLINGER;
644 
645 #ifdef SHMDEBUG
646 	printf("shmget: key 0x%lx size 0x%x shmflg 0x%x mode 0x%x\n",
647         	SCARG(uap, key), SCARG(uap, size), SCARG(uap, shmflg), mode);
648 #endif
649 
650 	if (SCARG(uap, key) != IPC_PRIVATE) {
651 again:
652 		segnum = shm_find_segment_by_key(SCARG(uap, key));
653 		if (segnum >= 0) {
654 			error = shmget_existing(l, uap, mode, segnum, retval);
655 			if (error == EAGAIN)
656 				goto again;
657 			return error;
658 		}
659 		if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
660 			return ENOENT;
661 	}
662 	return shmget_allocate_segment(l, uap, mode, retval);
663 }
664 
665 void
666 shmfork(struct vmspace *vm1, struct vmspace *vm2)
667 {
668 	struct shmmap_state *shmmap_s;
669 	struct shmmap_entry *shmmap_se;
670 
671 	vm2->vm_shm = vm1->vm_shm;
672 
673 	if (vm1->vm_shm == NULL)
674 		return;
675 
676 #ifdef SHMDEBUG
677 	printf("shmfork %p->%p\n", vm1, vm2);
678 #endif
679 
680 	shmmap_s = (struct shmmap_state *)vm1->vm_shm;
681 
682 	SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
683 		shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++;
684 	shmmap_s->nrefs++;
685 }
686 
687 void
688 shmexit(struct vmspace *vm)
689 {
690 	struct shmmap_state *shmmap_s;
691 	struct shmmap_entry *shmmap_se;
692 
693 	shmmap_s = (struct shmmap_state *)vm->vm_shm;
694 	if (shmmap_s == NULL)
695 		return;
696 
697 	vm->vm_shm = NULL;
698 
699 	if (--shmmap_s->nrefs > 0) {
700 #ifdef SHMDEBUG
701 		printf("shmexit: vm %p drop ref (%d entries), now used by %d\n",
702 		       vm, shmmap_s->nitems, shmmap_s->nrefs);
703 #endif
704 		SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
705 			shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--;
706 		return;
707 	}
708 
709 #ifdef SHMDEBUG
710 	printf("shmexit: vm %p cleanup (%d entries)\n", vm, shmmap_s->nitems);
711 #endif
712 	while (!SLIST_EMPTY(&shmmap_s->entries)) {
713 		shmmap_se = SLIST_FIRST(&shmmap_s->entries);
714 		shm_delete_mapping(vm, shmmap_s, shmmap_se);
715 	}
716 	KASSERT(shmmap_s->nitems == 0);
717 	free(shmmap_s, M_SHM);
718 }
719 
720 static int
721 shmrealloc(int newshmni)
722 {
723 	int i, sz;
724 	vaddr_t v;
725 	struct shmid_ds *newshmsegs;
726 
727 	/* XXX: Would be good to have a upper limit */
728 	if (newshmni < 1)
729 		return EINVAL;
730 
731 	/* We can't reallocate less memory than we use */
732 	if (shm_nused > newshmni)
733 		return EPERM;
734 
735 	/* Allocate new memory area */
736 	sz = newshmni * sizeof(struct shmid_ds);
737 	v = uvm_km_alloc(kernel_map, round_page(sz), 0, UVM_KMF_WIRED);
738 	if (v == 0)
739 		return ENOMEM;
740 
741 	newshmsegs = (void *)v;
742 
743 	/* Copy all memory to the new area */
744 	for (i = 0; i < shm_nused; i++)
745 		(void)memcpy(&newshmsegs[i], &shmsegs[i],
746 		    sizeof(newshmsegs[0]));
747 
748 	/* Mark as free all new segments, if there is any */
749 	for (; i < newshmni; i++) {
750 		newshmsegs[i].shm_perm.mode = SHMSEG_FREE;
751 		newshmsegs[i].shm_perm._seq = 0;
752 	}
753 
754 	sz = shminfo.shmmni * sizeof(struct shmid_ds);
755 	uvm_km_free(kernel_map, (vaddr_t)shmsegs, sz, UVM_KMF_WIRED);
756 	shmsegs = newshmsegs;
757 
758 	return 0;
759 }
760 
761 void
762 shminit(void)
763 {
764 	int i, sz;
765 	vaddr_t v;
766 
767 	mutex_init(&shm_lock, MUTEX_DEFAULT, IPL_NONE);
768 
769 	/* Allocate pageable memory for our structures */
770 	sz = shminfo.shmmni * sizeof(struct shmid_ds);
771 	v = uvm_km_alloc(kernel_map, round_page(sz), 0, UVM_KMF_WIRED);
772 	if (v == 0)
773 		panic("sysv_shm: cannot allocate memory");
774 	shmsegs = (void *)v;
775 
776 	shminfo.shmmax *= PAGE_SIZE;
777 
778 	for (i = 0; i < shminfo.shmmni; i++) {
779 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
780 		shmsegs[i].shm_perm._seq = 0;
781 	}
782 	shm_last_free = 0;
783 	shm_nused = 0;
784 	shm_committed = 0;
785 }
786 
787 static int
788 sysctl_ipc_shmmni(SYSCTLFN_ARGS)
789 {
790 	int newsize, error;
791 	struct sysctlnode node;
792 	node = *rnode;
793 	node.sysctl_data = &newsize;
794 
795 	newsize = shminfo.shmmni;
796 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
797 	if (error || newp == NULL)
798 		return error;
799 
800 	mutex_enter(&shm_lock);
801 	error = shmrealloc(newsize);
802 	if (error == 0)
803 		shminfo.shmmni = newsize;
804 	mutex_exit(&shm_lock);
805 
806 	return error;
807 }
808 
809 static int
810 sysctl_ipc_shmmaxpgs(SYSCTLFN_ARGS)
811 {
812 	int newsize, error;
813 	struct sysctlnode node;
814 	node = *rnode;
815 	node.sysctl_data = &newsize;
816 	newsize = shminfo.shmall;
817 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
818 	if (error || newp == NULL)
819 		return error;
820 
821 	/* XXX: Would be good to have a upper limit */
822 	if (newsize < 1)
823 		return EINVAL;
824 
825 	shminfo.shmall = newsize;
826 	shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
827 
828 	return 0;
829 }
830 
831 SYSCTL_SETUP(sysctl_ipc_shm_setup, "sysctl kern.ipc subtree setup")
832 {
833 	sysctl_createv(clog, 0, NULL, NULL,
834 		CTLFLAG_PERMANENT,
835 		CTLTYPE_NODE, "kern", NULL,
836 		NULL, 0, NULL, 0,
837 		CTL_KERN, CTL_EOL);
838 
839 	sysctl_createv(clog, 0, NULL, NULL,
840 		CTLFLAG_PERMANENT,
841 		CTLTYPE_NODE, "ipc",
842 		SYSCTL_DESCR("SysV IPC options"),
843 		NULL, 0, NULL, 0,
844 		CTL_KERN, KERN_SYSVIPC, CTL_EOL);
845 
846 	sysctl_createv(clog, 0, NULL, NULL,
847 		CTLFLAG_PERMANENT | CTLFLAG_READONLY,
848 		CTLTYPE_INT, "shmmax",
849 		SYSCTL_DESCR("Max shared memory segment size in bytes"),
850 		NULL, 0, &shminfo.shmmax, 0,
851 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAX, CTL_EOL);
852 
853 	sysctl_createv(clog, 0, NULL, NULL,
854 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
855 		CTLTYPE_INT, "shmmni",
856 		SYSCTL_DESCR("Max number of shared memory identifiers"),
857 		sysctl_ipc_shmmni, 0, &shminfo.shmmni, 0,
858 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMNI, CTL_EOL);
859 
860 	sysctl_createv(clog, 0, NULL, NULL,
861 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
862 		CTLTYPE_INT, "shmseg",
863 		SYSCTL_DESCR("Max shared memory segments per process"),
864 		NULL, 0, &shminfo.shmseg, 0,
865 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMSEG, CTL_EOL);
866 
867 	sysctl_createv(clog, 0, NULL, NULL,
868 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
869 		CTLTYPE_INT, "shmmaxpgs",
870 		SYSCTL_DESCR("Max amount of shared memory in pages"),
871 		sysctl_ipc_shmmaxpgs, 0, &shminfo.shmall, 0,
872 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAXPGS, CTL_EOL);
873 
874 	sysctl_createv(clog, 0, NULL, NULL,
875 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
876 		CTLTYPE_INT, "shm_use_phys",
877 		SYSCTL_DESCR("Enable/disable locking of shared memory in "
878 		    "physical memory"), NULL, 0, &shm_use_phys, 0,
879 		CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMUSEPHYS, CTL_EOL);
880 }
881