xref: /openbsd-src/sys/kern/sysv_shm.c (revision 8500990981f885cbe5e6a4958549cacc238b5ae6)
1 /*	$OpenBSD: sysv_shm.c,v 1.39 2003/10/12 23:44:39 millert Exp $	*/
2 /*	$NetBSD: sysv_shm.c,v 1.50 1998/10/21 22:24:29 tron Exp $	*/
3 
4 /*
5  * Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  *
19  * Sponsored in part by the Defense Advanced Research Projects
20  * Agency (DARPA) and Air Force Research Laboratory, Air Force
21  * Materiel Command, USAF, under agreement number F39502-99-1-0512.
22  */
23 /*
24  * Copyright (c) 1994 Adam Glass and Charles M. Hannum.  All rights reserved.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  * 1. Redistributions of source code must retain the above copyright
30  *    notice, this list of conditions and the following disclaimer.
31  * 2. Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  * 3. All advertising materials mentioning features or use of this software
35  *    must display the following acknowledgement:
36  *	This product includes software developed by Adam Glass and Charles M.
37  *	Hannum.
38  * 4. The names of the authors may not be used to endorse or promote products
39  *    derived from this software without specific prior written permission.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
42  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
44  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52 
53 #include <sys/param.h>
54 #include <sys/kernel.h>
55 #include <sys/shm.h>
56 #include <sys/proc.h>
57 #include <sys/uio.h>
58 #include <sys/time.h>
59 #include <sys/malloc.h>
60 #include <sys/mman.h>
61 #include <sys/pool.h>
62 #include <sys/systm.h>
63 #include <sys/sysctl.h>
64 #include <sys/stat.h>
65 
66 #include <sys/mount.h>
67 #include <sys/syscallargs.h>
68 
69 #include <uvm/uvm_extern.h>
70 
71 extern struct shminfo shminfo;
72 struct shmid_ds **shmsegs;	/* linear mapping of shmid -> shmseg */
73 struct pool shm_pool;
74 unsigned short *shmseqs;	/* array of shm sequence numbers */
75 
76 struct shmid_ds *shm_find_segment_by_shmid(int, int);
77 
78 /*
79  * Provides the following externally accessible functions:
80  *
81  * shminit(void);		                 initialization
82  * shmexit(struct vmspace *)                     cleanup
83  * shmfork(struct vmspace *, struct vmspace *)   fork handling
84  * shmsys(arg1, arg2, arg3, arg4);         shm{at,ctl,dt,get}(arg2, arg3, arg4)
85  *
86  * Structures:
87  * shmsegs (an array of 'struct shmid_ds *')
88  * per proc 'struct shmmap_head' with an array of 'struct shmmap_state'
89  */
90 
91 #define	SHMSEG_REMOVED  	0x0200		/* can't overlap ACCESSPERMS */
92 
93 int shm_last_free, shm_nused, shm_committed;
94 
95 struct shm_handle {
96 	struct uvm_object *shm_object;
97 };
98 
99 struct shmmap_state {
100 	vaddr_t va;
101 	int shmid;
102 };
103 
104 struct shmmap_head {
105 	int shmseg;
106 	struct shmmap_state state[1];
107 };
108 
109 int shm_find_segment_by_key(key_t);
110 void shm_deallocate_segment(struct shmid_ds *);
111 int shm_delete_mapping(struct vmspace *, struct shmmap_state *);
112 int shmget_existing(struct proc *, struct sys_shmget_args *,
113 			 int, int, register_t *);
114 int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
115 				 int, register_t *);
116 
117 int
118 shm_find_segment_by_key(key_t key)
119 {
120 	struct shmid_ds *shmseg;
121 	int i;
122 
123 	for (i = 0; i < shminfo.shmmni; i++) {
124 		shmseg = shmsegs[i];
125 		if (shmseg != NULL && shmseg->shm_perm.key == key)
126 			return (i);
127 	}
128 	return (-1);
129 }
130 
131 struct shmid_ds *
132 shm_find_segment_by_shmid(int shmid, int findremoved)
133 {
134 	int segnum;
135 	struct shmid_ds *shmseg;
136 
137 	segnum = IPCID_TO_IX(shmid);
138 	if (segnum < 0 || segnum >= shminfo.shmmni ||
139 	    (shmseg = shmsegs[segnum]) == NULL ||
140 	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
141 		return (NULL);
142 	if (!findremoved && (shmseg->shm_perm.mode & SHMSEG_REMOVED))
143 		return (NULL);
144 	return (shmseg);
145 }
146 
147 void
148 shm_deallocate_segment(struct shmid_ds *shmseg)
149 {
150 	struct shm_handle *shm_handle;
151 	size_t size;
152 
153 	shm_handle = shmseg->shm_internal;
154 	size = round_page(shmseg->shm_segsz);
155 	uao_detach(shm_handle->shm_object);
156 	pool_put(&shm_pool, shmseg);
157 	shm_committed -= btoc(size);
158 	shm_nused--;
159 }
160 
161 int
162 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
163 {
164 	struct shmid_ds *shmseg;
165 	int segnum;
166 	size_t size;
167 
168 	segnum = IPCID_TO_IX(shmmap_s->shmid);
169 	if (segnum < 0 || segnum >= shminfo.shmmni ||
170 	    (shmseg = shmsegs[segnum]) == NULL)
171 		return (EINVAL);
172 	size = round_page(shmseg->shm_segsz);
173 	uvm_deallocate(&vm->vm_map, shmmap_s->va, size);
174 	shmmap_s->shmid = -1;
175 	shmseg->shm_dtime = time.tv_sec;
176 	if ((--shmseg->shm_nattch <= 0) &&
177 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
178 		shm_deallocate_segment(shmseg);
179 		shm_last_free = segnum;
180 		shmsegs[shm_last_free] = NULL;
181 	}
182 	return (0);
183 }
184 
185 int
186 sys_shmdt(struct proc *p, void *v, register_t *retval)
187 {
188 	struct sys_shmdt_args /* {
189 		syscallarg(const void *) shmaddr;
190 	} */ *uap = v;
191 	struct shmmap_head *shmmap_h;
192 	struct shmmap_state *shmmap_s;
193 	int i;
194 
195 	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
196 	if (shmmap_h == NULL)
197 		return (EINVAL);
198 
199 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
200 	    i++, shmmap_s++)
201 		if (shmmap_s->shmid != -1 &&
202 		    shmmap_s->va == (vaddr_t)SCARG(uap, shmaddr))
203 			break;
204 	if (i == shmmap_h->shmseg)
205 		return (EINVAL);
206 	return (shm_delete_mapping(p->p_vmspace, shmmap_s));
207 }
208 
209 int
210 sys_shmat(struct proc *p, void *v, register_t *retval)
211 {
212 	return (sys_shmat1(p, v, retval, 0));
213 }
214 
215 int
216 sys_shmat1(struct proc *p, void *v, register_t *retval, int findremoved)
217 {
218 	struct sys_shmat_args /* {
219 		syscallarg(int) shmid;
220 		syscallarg(const void *) shmaddr;
221 		syscallarg(int) shmflg;
222 	} */ *uap = v;
223 	int error, i, flags;
224 	struct ucred *cred = p->p_ucred;
225 	struct shmid_ds *shmseg;
226 	struct shmmap_head *shmmap_h;
227 	struct shmmap_state *shmmap_s;
228 	struct shm_handle *shm_handle;
229 	vaddr_t attach_va;
230 	vm_prot_t prot;
231 	vsize_t size;
232 
233 	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
234 	if (shmmap_h == NULL) {
235 		size = sizeof(int) +
236 		    shminfo.shmseg * sizeof(struct shmmap_state);
237 		shmmap_h = malloc(size, M_SHM, M_WAITOK);
238 		shmmap_h->shmseg = shminfo.shmseg;
239 		for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
240 		    i++, shmmap_s++)
241 			shmmap_s->shmid = -1;
242 		p->p_vmspace->vm_shm = (caddr_t)shmmap_h;
243 	}
244 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid), findremoved);
245 	if (shmseg == NULL)
246 		return (EINVAL);
247 	error = ipcperm(cred, &shmseg->shm_perm,
248 		    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
249 	if (error)
250 		return (error);
251 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; i++) {
252 		if (shmmap_s->shmid == -1)
253 			break;
254 		shmmap_s++;
255 	}
256 	if (i >= shmmap_h->shmseg)
257 		return (EMFILE);
258 	size = round_page(shmseg->shm_segsz);
259 	prot = VM_PROT_READ;
260 	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
261 		prot |= VM_PROT_WRITE;
262 	flags = MAP_ANON | MAP_SHARED;
263 	if (SCARG(uap, shmaddr)) {
264 		flags |= MAP_FIXED;
265 		if (SCARG(uap, shmflg) & SHM_RND)
266 			attach_va =
267 			    (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
268 		else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
269 			attach_va = (vaddr_t)SCARG(uap, shmaddr);
270 		else
271 			return (EINVAL);
272 	} else {
273 		/* This is just a hint to uvm_map() about where to put it. */
274 		attach_va = uvm_map_hint(p, prot);
275 	}
276 	shm_handle = shmseg->shm_internal;
277 	uao_reference(shm_handle->shm_object);
278 	error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
279 	    shm_handle->shm_object, 0, 0, UVM_MAPFLAG(prot, prot,
280 	    UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
281 	if (error)
282 		return (error);
283 
284 	shmmap_s->va = attach_va;
285 	shmmap_s->shmid = SCARG(uap, shmid);
286 	shmseg->shm_lpid = p->p_pid;
287 	shmseg->shm_atime = time.tv_sec;
288 	shmseg->shm_nattch++;
289 	*retval = attach_va;
290 	return (0);
291 }
292 
293 int
294 sys_shmctl(struct proc *p, void *v, register_t *retval)
295 {
296 	struct sys_shmctl_args /* {
297 		syscallarg(int) shmid;
298 		syscallarg(int) cmd;
299 		syscallarg(struct shmid_ds *) buf;
300 	} */ *uap = v;
301 	int error;
302 	struct ucred *cred = p->p_ucred;
303 	struct shmid_ds inbuf;
304 	struct shmid_ds *shmseg;
305 
306 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid), 1);
307 	if (shmseg == NULL)
308 		return (EINVAL);
309 	switch (SCARG(uap, cmd)) {
310 	case IPC_STAT:
311 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
312 			return (error);
313 		error = copyout((caddr_t)shmseg, SCARG(uap, buf),
314 				sizeof(inbuf));
315 		if (error)
316 			return (error);
317 		break;
318 	case IPC_SET:
319 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
320 			return (error);
321 		error = copyin(SCARG(uap, buf), (caddr_t)&inbuf,
322 		    sizeof(inbuf));
323 		if (error)
324 			return (error);
325 		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
326 		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
327 		shmseg->shm_perm.mode =
328 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
329 		    (inbuf.shm_perm.mode & ACCESSPERMS);
330 		shmseg->shm_ctime = time.tv_sec;
331 		break;
332 	case IPC_RMID:
333 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
334 			return (error);
335 		shmseg->shm_perm.key = IPC_PRIVATE;
336 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
337 		if (shmseg->shm_nattch <= 0) {
338 			shm_deallocate_segment(shmseg);
339 			shm_last_free = IPCID_TO_IX(SCARG(uap, shmid));
340 			shmsegs[shm_last_free] = NULL;
341 		}
342 		break;
343 	case SHM_LOCK:
344 	case SHM_UNLOCK:
345 	default:
346 		return (EINVAL);
347 	}
348 	return (0);
349 }
350 
351 int
352 shmget_existing(struct proc *p,
353 	struct sys_shmget_args /* {
354 		syscallarg(key_t) key;
355 		syscallarg(size_t) size;
356 		syscallarg(int) shmflg;
357 	} */ *uap,
358 	int mode, int segnum, register_t *retval)
359 {
360 	struct shmid_ds *shmseg;
361 	struct ucred *cred = p->p_ucred;
362 	int error;
363 
364 	shmseg = shmsegs[segnum];	/* We assume the segnum is valid */
365 	if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
366 		return (error);
367 	if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
368 		return (EINVAL);
369 	if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
370 	    (IPC_CREAT | IPC_EXCL))
371 		return (EEXIST);
372 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
373 	return (0);
374 }
375 
376 int
377 shmget_allocate_segment(struct proc *p,
378 	struct sys_shmget_args /* {
379 		syscallarg(key_t) key;
380 		syscallarg(size_t) size;
381 		syscallarg(int) shmflg;
382 	} */ *uap,
383 	int mode, register_t *retval)
384 {
385 	key_t key;
386 	int segnum, size;
387 	struct ucred *cred = p->p_ucred;
388 	struct shmid_ds *shmseg;
389 	struct shm_handle *shm_handle;
390 	int error = 0;
391 
392 	if (SCARG(uap, size) < shminfo.shmmin ||
393 	    SCARG(uap, size) > shminfo.shmmax)
394 		return (EINVAL);
395 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
396 		return (ENOSPC);
397 	size = round_page(SCARG(uap, size));
398 	if (shm_committed + btoc(size) > shminfo.shmall)
399 		return (ENOMEM);
400 	shm_nused++;
401 	shm_committed += btoc(size);
402 
403 	/*
404 	 * If a key has been specified and we had to wait for memory
405 	 * to be freed up we need to verify that no one has allocated
406 	 * the key we want in the meantime.  Yes, this is ugly.
407 	 */
408 	key = SCARG(uap, key);
409 	shmseg = pool_get(&shm_pool, key == IPC_PRIVATE ? PR_WAITOK : 0);
410 	if (shmseg == NULL) {
411 		shmseg = pool_get(&shm_pool, PR_WAITOK);
412 		if (shm_find_segment_by_key(key) != -1) {
413 			pool_put(&shm_pool, shmseg);
414 			shm_nused--;
415 			shm_committed -= btoc(size);
416 			return (EAGAIN);
417 		}
418 	}
419 
420 	/* XXX - hash shmids instead */
421 	if (shm_last_free < 0) {
422 		for (segnum = 0; segnum < shminfo.shmmni && shmsegs[segnum];
423 		    segnum++)
424 			;
425 		if (segnum == shminfo.shmmni)
426 			panic("shmseg free count inconsistent");
427 	} else {
428 		segnum = shm_last_free;
429 		if (++shm_last_free >= shminfo.shmmni || shmsegs[shm_last_free])
430 			shm_last_free = -1;
431 	}
432 	shmsegs[segnum] = shmseg;
433 
434 	shm_handle = (struct shm_handle *)((caddr_t)shmseg + sizeof(*shmseg));
435 	shm_handle->shm_object = uao_create(size, 0);
436 
437 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
438 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
439 	shmseg->shm_perm.mode = (mode & ACCESSPERMS);
440 	shmseg->shm_perm.seq = shmseqs[segnum] = (shmseqs[segnum] + 1) & 0x7fff;
441 	shmseg->shm_perm.key = key;
442 	shmseg->shm_segsz = SCARG(uap, size);
443 	shmseg->shm_cpid = p->p_pid;
444 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
445 	shmseg->shm_atime = shmseg->shm_dtime = 0;
446 	shmseg->shm_ctime = time.tv_sec;
447 	shmseg->shm_internal = shm_handle;
448 
449 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
450 	return (error);
451 }
452 
453 int
454 sys_shmget(struct proc *p, void *v, register_t *retval)
455 {
456 	struct sys_shmget_args /* {
457 		syscallarg(key_t) key;
458 		syscallarg(int) size;
459 		syscallarg(int) shmflg;
460 	} */ *uap = v;
461 	int segnum, mode, error;
462 
463 	mode = SCARG(uap, shmflg) & ACCESSPERMS;
464 	if (SCARG(uap, key) != IPC_PRIVATE) {
465 	again:
466 		segnum = shm_find_segment_by_key(SCARG(uap, key));
467 		if (segnum >= 0)
468 			return (shmget_existing(p, uap, mode, segnum, retval));
469 		if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
470 			return (ENOENT);
471 	}
472 	error = shmget_allocate_segment(p, uap, mode, retval);
473 	if (error == EAGAIN)
474 		goto again;
475 	return (error);
476 }
477 
478 void
479 shmfork(struct vmspace *vm1, struct vmspace *vm2)
480 {
481 	struct shmmap_head *shmmap_h;
482 	struct shmmap_state *shmmap_s;
483 	struct shmid_ds *shmseg;
484 	size_t size;
485 	int i;
486 
487 	if (vm1->vm_shm == NULL) {
488 		vm2->vm_shm = NULL;
489 		return;
490 	}
491 
492 	shmmap_h = (struct shmmap_head *)vm1->vm_shm;
493 	size = sizeof(int) + shmmap_h->shmseg * sizeof(struct shmmap_state);
494 	vm2->vm_shm = malloc(size, M_SHM, M_WAITOK);
495 	bcopy(vm1->vm_shm, vm2->vm_shm, size);
496 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
497 	    i++, shmmap_s++) {
498 		if (shmmap_s->shmid != -1 &&
499 		    (shmseg = shmsegs[IPCID_TO_IX(shmmap_s->shmid)]) != NULL)
500 			shmseg->shm_nattch++;
501 	}
502 }
503 
504 void
505 shmexit(struct vmspace *vm)
506 {
507 	struct shmmap_head *shmmap_h;
508 	struct shmmap_state *shmmap_s;
509 	int i;
510 
511 	shmmap_h = (struct shmmap_head *)vm->vm_shm;
512 	if (shmmap_h == NULL)
513 		return;
514 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
515 	    i++, shmmap_s++)
516 		if (shmmap_s->shmid != -1)
517 			shm_delete_mapping(vm, shmmap_s);
518 	free(vm->vm_shm, M_SHM);
519 	vm->vm_shm = NULL;
520 }
521 
522 void
523 shminit(void)
524 {
525 
526 	pool_init(&shm_pool, sizeof(struct shmid_ds) +
527 	    sizeof(struct shm_handle), 0, 0, 0, "shmpl",
528 	    &pool_allocator_nointr);
529 	shmsegs = malloc(shminfo.shmmni * sizeof(struct shmid_ds *),
530 	    M_SHM, M_WAITOK);
531 	bzero(shmsegs, shminfo.shmmni * sizeof(struct shmid_ds *));
532 	shmseqs = malloc(shminfo.shmmni * sizeof(unsigned short),
533 	    M_SHM, M_WAITOK);
534 	bzero(shmseqs, shminfo.shmmni * sizeof(unsigned short));
535 
536 	shminfo.shmmax *= PAGE_SIZE;	/* actually in pages */
537 	shm_last_free = 0;
538 	shm_nused = 0;
539 	shm_committed = 0;
540 }
541 
542 void
543 shmid_n2o(struct shmid_ds *n, struct oshmid_ds *o)
544 {
545 
546 	o->shm_segsz = n->shm_segsz;
547 	o->shm_lpid = n->shm_lpid;
548 	o->shm_cpid = n->shm_cpid;
549 	o->shm_nattch = n->shm_nattch;
550 	o->shm_atime = n->shm_atime;
551 	o->shm_dtime = n->shm_dtime;
552 	o->shm_ctime = n->shm_ctime;
553 	o->shm_internal = n->shm_internal;
554 	ipc_n2o(&n->shm_perm, &o->shm_perm);
555 }
556 
557 /*
558  * Userland access to struct shminfo.
559  */
560 int
561 sysctl_sysvshm(int *name, u_int namelen, void *oldp, size_t *oldlenp,
562 	void *newp, size_t newlen)
563 {
564 	int error, val;
565 	struct shmid_ds **newsegs;
566 	unsigned short *newseqs;
567 
568 	if (namelen != 2) {
569 		switch (name[0]) {
570 		case KERN_SHMINFO_SHMMAX:
571 		case KERN_SHMINFO_SHMMIN:
572 		case KERN_SHMINFO_SHMMNI:
573 		case KERN_SHMINFO_SHMSEG:
574 		case KERN_SHMINFO_SHMALL:
575 			break;
576 		default:
577                         return (ENOTDIR);       /* overloaded */
578                 }
579         }
580 
581 	switch (name[0]) {
582 	case KERN_SHMINFO_SHMMAX:
583 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen,
584 		    &shminfo.shmmax)) || newp == NULL)
585 			return (error);
586 
587 		/* If new shmmax > shmall, crank shmall */
588 		if (btoc(round_page(shminfo.shmmax)) > shminfo.shmall)
589 			shminfo.shmall = btoc(round_page(shminfo.shmmax));
590 		return (0);
591 	case KERN_SHMINFO_SHMMIN:
592 		val = shminfo.shmmin;
593 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
594 		    val == shminfo.shmmin)
595 			return (error);
596 		if (val <= 0)
597 			return (EINVAL);	/* shmmin must be >= 1 */
598 		shminfo.shmmin = val;
599 		return (0);
600 	case KERN_SHMINFO_SHMMNI:
601 		val = shminfo.shmmni;
602 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
603 		    val == shminfo.shmmni)
604 			return (error);
605 
606 		if (val < shminfo.shmmni || val > 0xffff)
607 			return (EINVAL);
608 
609 		/* Expand shmsegs and shmseqs arrays */
610 		newsegs = malloc(val * sizeof(struct shmid_ds *),
611 		    M_SHM, M_WAITOK);
612 		bcopy(shmsegs, newsegs,
613 		    shminfo.shmmni * sizeof(struct shmid_ds *));
614 		bzero(newsegs + shminfo.shmmni,
615 		    (val - shminfo.shmmni) * sizeof(struct shmid_ds *));
616 		newseqs = malloc(val * sizeof(unsigned short), M_SHM, M_WAITOK);
617 		bcopy(shmseqs, newseqs,
618 		    shminfo.shmmni * sizeof(unsigned short));
619 		bzero(newseqs + shminfo.shmmni,
620 		    (val - shminfo.shmmni) * sizeof(unsigned short));
621 		free(shmsegs, M_SHM);
622 		free(shmseqs, M_SHM);
623 		shmsegs = newsegs;
624 		shmseqs = newseqs;
625 		shminfo.shmmni = val;
626 		return (0);
627 	case KERN_SHMINFO_SHMSEG:
628 		val = shminfo.shmseg;
629 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
630 		    val == shminfo.shmseg)
631 			return (error);
632 		if (val <= 0)
633 			return (EINVAL);	/* shmseg must be >= 1 */
634 		shminfo.shmseg = val;
635 		return (0);
636 	case KERN_SHMINFO_SHMALL:
637 		val = shminfo.shmall;
638 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
639 		    val == shminfo.shmall)
640 			return (error);
641 		if (val < shminfo.shmall)
642 			return (EINVAL);	/* can't decrease shmall */
643 		shminfo.shmall = val;
644 		return (0);
645 	default:
646 		return (EOPNOTSUPP);
647 	}
648 	/* NOTREACHED */
649 }
650