xref: /openbsd-src/sys/kern/sysv_shm.c (revision db3296cf5c1dd9058ceecc3a29fe4aaa0bd26000)
1 /*	$OpenBSD: sysv_shm.c,v 1.37 2003/06/17 21:56:25 millert Exp $	*/
2 /*	$NetBSD: sysv_shm.c,v 1.50 1998/10/21 22:24:29 tron Exp $	*/
3 
4 /*
5  * Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  *
19  * Sponsored in part by the Defense Advanced Research Projects
20  * Agency (DARPA) and Air Force Research Laboratory, Air Force
21  * Materiel Command, USAF, under agreement number F39502-99-1-0512.
22  */
23 /*
24  * Copyright (c) 1994 Adam Glass and Charles M. Hannum.  All rights reserved.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  * 1. Redistributions of source code must retain the above copyright
30  *    notice, this list of conditions and the following disclaimer.
31  * 2. Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  * 3. All advertising materials mentioning features or use of this software
35  *    must display the following acknowledgement:
36  *	This product includes software developed by Adam Glass and Charles M.
37  *	Hannum.
38  * 4. The names of the authors may not be used to endorse or promote products
39  *    derived from this software without specific prior written permission.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
42  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
44  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52 
53 #include <sys/types.h>
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/shm.h>
57 #include <sys/proc.h>
58 #include <sys/uio.h>
59 #include <sys/time.h>
60 #include <sys/malloc.h>
61 #include <sys/mman.h>
62 #include <sys/pool.h>
63 #include <sys/systm.h>
64 #include <sys/sysctl.h>
65 #include <sys/stat.h>
66 
67 #include <sys/mount.h>
68 #include <sys/syscallargs.h>
69 
70 #include <uvm/uvm_extern.h>
71 
72 extern struct shminfo shminfo;
73 struct shmid_ds **shmsegs;	/* linear mapping of shmid -> shmseg */
74 struct pool shm_pool;
75 unsigned short *shmseqs;	/* array of shm sequence numbers */
76 
77 struct shmid_ds *shm_find_segment_by_shmid(int, int);
78 
79 /*
80  * Provides the following externally accessible functions:
81  *
82  * shminit(void);		                 initialization
83  * shmexit(struct vmspace *)                     cleanup
84  * shmfork(struct vmspace *, struct vmspace *)   fork handling
85  * shmsys(arg1, arg2, arg3, arg4);         shm{at,ctl,dt,get}(arg2, arg3, arg4)
86  *
87  * Structures:
88  * shmsegs (an array of 'struct shmid_ds *')
89  * per proc 'struct shmmap_head' with an array of 'struct shmmap_state'
90  */
91 
92 #define	SHMSEG_REMOVED  	0x0200		/* can't overlap ACCESSPERMS */
93 
94 int shm_last_free, shm_nused, shm_committed;
95 
96 struct shm_handle {
97 	struct uvm_object *shm_object;
98 };
99 
100 struct shmmap_state {
101 	vaddr_t va;
102 	int shmid;
103 };
104 
105 struct shmmap_head {
106 	int shmseg;
107 	struct shmmap_state state[1];
108 };
109 
110 int shm_find_segment_by_key(key_t);
111 void shm_deallocate_segment(struct shmid_ds *);
112 int shm_delete_mapping(struct vmspace *, struct shmmap_state *);
113 int shmget_existing(struct proc *, struct sys_shmget_args *,
114 			 int, int, register_t *);
115 int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
116 				 int, register_t *);
117 
118 int
119 shm_find_segment_by_key(key_t key)
120 {
121 	struct shmid_ds *shmseg;
122 	int i;
123 
124 	for (i = 0; i < shminfo.shmmni; i++) {
125 		shmseg = shmsegs[i];
126 		if (shmseg != NULL && shmseg->shm_perm.key == key)
127 			return (i);
128 	}
129 	return (-1);
130 }
131 
132 struct shmid_ds *
133 shm_find_segment_by_shmid(int shmid, int findremoved)
134 {
135 	int segnum;
136 	struct shmid_ds *shmseg;
137 
138 	segnum = IPCID_TO_IX(shmid);
139 	if (segnum < 0 || segnum >= shminfo.shmmni ||
140 	    (shmseg = shmsegs[segnum]) == NULL ||
141 	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
142 		return (NULL);
143 	if (!findremoved && (shmseg->shm_perm.mode & SHMSEG_REMOVED))
144 		return (NULL);
145 	return (shmseg);
146 }
147 
148 void
149 shm_deallocate_segment(struct shmid_ds *shmseg)
150 {
151 	struct shm_handle *shm_handle;
152 	size_t size;
153 
154 	shm_handle = shmseg->shm_internal;
155 	size = round_page(shmseg->shm_segsz);
156 	uao_detach(shm_handle->shm_object);
157 	pool_put(&shm_pool, shmseg);
158 	shm_committed -= btoc(size);
159 	shm_nused--;
160 }
161 
162 int
163 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
164 {
165 	struct shmid_ds *shmseg;
166 	int segnum;
167 	size_t size;
168 
169 	segnum = IPCID_TO_IX(shmmap_s->shmid);
170 	if (segnum < 0 || segnum >= shminfo.shmmni ||
171 	    (shmseg = shmsegs[segnum]) == NULL)
172 		return (EINVAL);
173 	size = round_page(shmseg->shm_segsz);
174 	uvm_deallocate(&vm->vm_map, shmmap_s->va, size);
175 	shmmap_s->shmid = -1;
176 	shmseg->shm_dtime = time.tv_sec;
177 	if ((--shmseg->shm_nattch <= 0) &&
178 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
179 		shm_deallocate_segment(shmseg);
180 		shm_last_free = segnum;
181 		shmsegs[shm_last_free] = NULL;
182 	}
183 	return (0);
184 }
185 
186 int
187 sys_shmdt(struct proc *p, void *v, register_t *retval)
188 {
189 	struct sys_shmdt_args /* {
190 		syscallarg(const void *) shmaddr;
191 	} */ *uap = v;
192 	struct shmmap_head *shmmap_h;
193 	struct shmmap_state *shmmap_s;
194 	int i;
195 
196 	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
197 	if (shmmap_h == NULL)
198 		return (EINVAL);
199 
200 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
201 	    i++, shmmap_s++)
202 		if (shmmap_s->shmid != -1 &&
203 		    shmmap_s->va == (vaddr_t)SCARG(uap, shmaddr))
204 			break;
205 	if (i == shmmap_h->shmseg)
206 		return (EINVAL);
207 	return (shm_delete_mapping(p->p_vmspace, shmmap_s));
208 }
209 
210 int
211 sys_shmat(struct proc *p, void *v, register_t *retval)
212 {
213 	struct sys_shmat_args /* {
214 		syscallarg(int) shmid;
215 		syscallarg(const void *) shmaddr;
216 		syscallarg(int) shmflg;
217 	} */ *uap = v;
218 	int error, i, flags;
219 	struct ucred *cred = p->p_ucred;
220 	struct shmid_ds *shmseg;
221 	struct shmmap_head *shmmap_h;
222 	struct shmmap_state *shmmap_s;
223 	struct shm_handle *shm_handle;
224 	vaddr_t attach_va;
225 	vm_prot_t prot;
226 	vsize_t size;
227 
228 	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
229 	if (shmmap_h == NULL) {
230 		size = sizeof(int) +
231 		    shminfo.shmseg * sizeof(struct shmmap_state);
232 		shmmap_h = malloc(size, M_SHM, M_WAITOK);
233 		shmmap_h->shmseg = shminfo.shmseg;
234 		for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
235 		    i++, shmmap_s++)
236 			shmmap_s->shmid = -1;
237 		p->p_vmspace->vm_shm = (caddr_t)shmmap_h;
238 	}
239 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid), 0);
240 	if (shmseg == NULL)
241 		return (EINVAL);
242 	error = ipcperm(cred, &shmseg->shm_perm,
243 		    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
244 	if (error)
245 		return (error);
246 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; i++) {
247 		if (shmmap_s->shmid == -1)
248 			break;
249 		shmmap_s++;
250 	}
251 	if (i >= shmmap_h->shmseg)
252 		return (EMFILE);
253 	size = round_page(shmseg->shm_segsz);
254 	prot = VM_PROT_READ;
255 	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
256 		prot |= VM_PROT_WRITE;
257 	flags = MAP_ANON | MAP_SHARED;
258 	if (SCARG(uap, shmaddr)) {
259 		flags |= MAP_FIXED;
260 		if (SCARG(uap, shmflg) & SHM_RND)
261 			attach_va =
262 			    (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
263 		else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
264 			attach_va = (vaddr_t)SCARG(uap, shmaddr);
265 		else
266 			return (EINVAL);
267 	} else {
268 		/* This is just a hint to uvm_map() about where to put it. */
269 		attach_va = uvm_map_hint(p, prot);
270 	}
271 	shm_handle = shmseg->shm_internal;
272 	uao_reference(shm_handle->shm_object);
273 	error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
274 	    shm_handle->shm_object, 0, 0, UVM_MAPFLAG(prot, prot,
275 	    UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
276 	if (error)
277 		return (error);
278 
279 	shmmap_s->va = attach_va;
280 	shmmap_s->shmid = SCARG(uap, shmid);
281 	shmseg->shm_lpid = p->p_pid;
282 	shmseg->shm_atime = time.tv_sec;
283 	shmseg->shm_nattch++;
284 	*retval = attach_va;
285 	return (0);
286 }
287 
288 int
289 sys_shmctl(struct proc *p, void *v, register_t *retval)
290 {
291 	struct sys_shmctl_args /* {
292 		syscallarg(int) shmid;
293 		syscallarg(int) cmd;
294 		syscallarg(struct shmid_ds *) buf;
295 	} */ *uap = v;
296 	int error;
297 	struct ucred *cred = p->p_ucred;
298 	struct shmid_ds inbuf;
299 	struct shmid_ds *shmseg;
300 
301 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid), 1);
302 	if (shmseg == NULL)
303 		return (EINVAL);
304 	switch (SCARG(uap, cmd)) {
305 	case IPC_STAT:
306 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
307 			return (error);
308 		error = copyout((caddr_t)shmseg, SCARG(uap, buf),
309 				sizeof(inbuf));
310 		if (error)
311 			return (error);
312 		break;
313 	case IPC_SET:
314 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
315 			return (error);
316 		error = copyin(SCARG(uap, buf), (caddr_t)&inbuf,
317 		    sizeof(inbuf));
318 		if (error)
319 			return (error);
320 		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
321 		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
322 		shmseg->shm_perm.mode =
323 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
324 		    (inbuf.shm_perm.mode & ACCESSPERMS);
325 		shmseg->shm_ctime = time.tv_sec;
326 		break;
327 	case IPC_RMID:
328 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
329 			return (error);
330 		shmseg->shm_perm.key = IPC_PRIVATE;
331 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
332 		if (shmseg->shm_nattch <= 0) {
333 			shm_deallocate_segment(shmseg);
334 			shm_last_free = IPCID_TO_IX(SCARG(uap, shmid));
335 			shmsegs[shm_last_free] = NULL;
336 		}
337 		break;
338 	case SHM_LOCK:
339 	case SHM_UNLOCK:
340 	default:
341 		return (EINVAL);
342 	}
343 	return (0);
344 }
345 
346 int
347 shmget_existing(struct proc *p,
348 	struct sys_shmget_args /* {
349 		syscallarg(key_t) key;
350 		syscallarg(size_t) size;
351 		syscallarg(int) shmflg;
352 	} */ *uap,
353 	int mode, int segnum, register_t *retval)
354 {
355 	struct shmid_ds *shmseg;
356 	struct ucred *cred = p->p_ucred;
357 	int error;
358 
359 	shmseg = shmsegs[segnum];	/* We assume the segnum is valid */
360 	if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
361 		return (error);
362 	if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
363 		return (EINVAL);
364 	if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
365 	    (IPC_CREAT | IPC_EXCL))
366 		return (EEXIST);
367 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
368 	return (0);
369 }
370 
371 int
372 shmget_allocate_segment(struct proc *p,
373 	struct sys_shmget_args /* {
374 		syscallarg(key_t) key;
375 		syscallarg(size_t) size;
376 		syscallarg(int) shmflg;
377 	} */ *uap,
378 	int mode, register_t *retval)
379 {
380 	key_t key;
381 	int segnum, size;
382 	struct ucred *cred = p->p_ucred;
383 	struct shmid_ds *shmseg;
384 	struct shm_handle *shm_handle;
385 	int error = 0;
386 
387 	if (SCARG(uap, size) < shminfo.shmmin ||
388 	    SCARG(uap, size) > shminfo.shmmax)
389 		return (EINVAL);
390 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
391 		return (ENOSPC);
392 	size = round_page(SCARG(uap, size));
393 	if (shm_committed + btoc(size) > shminfo.shmall)
394 		return (ENOMEM);
395 	shm_nused++;
396 	shm_committed += btoc(size);
397 
398 	/*
399 	 * If a key has been specified and we had to wait for memory
400 	 * to be freed up we need to verify that no one has allocated
401 	 * the key we want in the meantime.  Yes, this is ugly.
402 	 */
403 	key = SCARG(uap, key);
404 	shmseg = pool_get(&shm_pool, key == IPC_PRIVATE ? PR_WAITOK : 0);
405 	if (shmseg == NULL) {
406 		shmseg = pool_get(&shm_pool, PR_WAITOK);
407 		if (shm_find_segment_by_key(key) != -1) {
408 			pool_put(&shm_pool, shmseg);
409 			shm_nused--;
410 			shm_committed -= btoc(size);
411 			return (EAGAIN);
412 		}
413 	}
414 
415 	/* XXX - hash shmids instead */
416 	if (shm_last_free < 0) {
417 		for (segnum = 0; segnum < shminfo.shmmni && shmsegs[segnum];
418 		    segnum++)
419 			;
420 		if (segnum == shminfo.shmmni)
421 			panic("shmseg free count inconsistent");
422 	} else {
423 		segnum = shm_last_free;
424 		if (++shm_last_free >= shminfo.shmmni || shmsegs[shm_last_free])
425 			shm_last_free = -1;
426 	}
427 	shmsegs[segnum] = shmseg;
428 
429 	shm_handle = (struct shm_handle *)((caddr_t)shmseg + sizeof(*shmseg));
430 	shm_handle->shm_object = uao_create(size, 0);
431 
432 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
433 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
434 	shmseg->shm_perm.mode = (mode & ACCESSPERMS);
435 	shmseg->shm_perm.seq = shmseqs[segnum] = (shmseqs[segnum] + 1) & 0x7fff;
436 	shmseg->shm_perm.key = key;
437 	shmseg->shm_segsz = SCARG(uap, size);
438 	shmseg->shm_cpid = p->p_pid;
439 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
440 	shmseg->shm_atime = shmseg->shm_dtime = 0;
441 	shmseg->shm_ctime = time.tv_sec;
442 	shmseg->shm_internal = shm_handle;
443 
444 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
445 	return (error);
446 }
447 
448 int
449 sys_shmget(struct proc *p, void *v, register_t *retval)
450 {
451 	struct sys_shmget_args /* {
452 		syscallarg(key_t) key;
453 		syscallarg(int) size;
454 		syscallarg(int) shmflg;
455 	} */ *uap = v;
456 	int segnum, mode, error;
457 
458 	mode = SCARG(uap, shmflg) & ACCESSPERMS;
459 	if (SCARG(uap, key) != IPC_PRIVATE) {
460 	again:
461 		segnum = shm_find_segment_by_key(SCARG(uap, key));
462 		if (segnum >= 0)
463 			return (shmget_existing(p, uap, mode, segnum, retval));
464 		if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
465 			return (ENOENT);
466 	}
467 	error = shmget_allocate_segment(p, uap, mode, retval);
468 	if (error == EAGAIN)
469 		goto again;
470 	return (error);
471 }
472 
473 void
474 shmfork(struct vmspace *vm1, struct vmspace *vm2)
475 {
476 	struct shmmap_head *shmmap_h;
477 	struct shmmap_state *shmmap_s;
478 	struct shmid_ds *shmseg;
479 	size_t size;
480 	int i;
481 
482 	if (vm1->vm_shm == NULL) {
483 		vm2->vm_shm = NULL;
484 		return;
485 	}
486 
487 	shmmap_h = (struct shmmap_head *)vm1->vm_shm;
488 	size = sizeof(int) + shmmap_h->shmseg * sizeof(struct shmmap_state);
489 	vm2->vm_shm = malloc(size, M_SHM, M_WAITOK);
490 	bcopy(vm1->vm_shm, vm2->vm_shm, size);
491 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
492 	    i++, shmmap_s++) {
493 		if (shmmap_s->shmid != -1 &&
494 		    (shmseg = shmsegs[IPCID_TO_IX(shmmap_s->shmid)]) != NULL)
495 			shmseg->shm_nattch++;
496 	}
497 }
498 
499 void
500 shmexit(struct vmspace *vm)
501 {
502 	struct shmmap_head *shmmap_h;
503 	struct shmmap_state *shmmap_s;
504 	int i;
505 
506 	shmmap_h = (struct shmmap_head *)vm->vm_shm;
507 	if (shmmap_h == NULL)
508 		return;
509 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
510 	    i++, shmmap_s++)
511 		if (shmmap_s->shmid != -1)
512 			shm_delete_mapping(vm, shmmap_s);
513 	free(vm->vm_shm, M_SHM);
514 	vm->vm_shm = NULL;
515 }
516 
517 void
518 shminit(void)
519 {
520 
521 	pool_init(&shm_pool, sizeof(struct shmid_ds) +
522 	    sizeof(struct shm_handle), 0, 0, 0, "shmpl",
523 	    &pool_allocator_nointr);
524 	shmsegs = malloc(shminfo.shmmni * sizeof(struct shmid_ds *),
525 	    M_SHM, M_WAITOK);
526 	bzero(shmsegs, shminfo.shmmni * sizeof(struct shmid_ds *));
527 	shmseqs = malloc(shminfo.shmmni * sizeof(unsigned short),
528 	    M_SHM, M_WAITOK);
529 	bzero(shmseqs, shminfo.shmmni * sizeof(unsigned short));
530 
531 	shminfo.shmmax *= PAGE_SIZE;	/* actually in pages */
532 	shm_last_free = 0;
533 	shm_nused = 0;
534 	shm_committed = 0;
535 }
536 
537 void
538 shmid_n2o(struct shmid_ds *n, struct oshmid_ds *o)
539 {
540 
541 	o->shm_segsz = n->shm_segsz;
542 	o->shm_lpid = n->shm_lpid;
543 	o->shm_cpid = n->shm_cpid;
544 	o->shm_nattch = n->shm_nattch;
545 	o->shm_atime = n->shm_atime;
546 	o->shm_dtime = n->shm_dtime;
547 	o->shm_ctime = n->shm_ctime;
548 	o->shm_internal = n->shm_internal;
549 	ipc_n2o(&n->shm_perm, &o->shm_perm);
550 }
551 
552 /*
553  * Userland access to struct shminfo.
554  */
555 int
556 sysctl_sysvshm(int *name, u_int namelen, void *oldp, size_t *oldlenp,
557 	void *newp, size_t newlen)
558 {
559 	int error, val;
560 	struct shmid_ds **newsegs;
561 	unsigned short *newseqs;
562 
563 	if (namelen != 2) {
564 		switch (name[0]) {
565 		case KERN_SHMINFO_SHMMAX:
566 		case KERN_SHMINFO_SHMMIN:
567 		case KERN_SHMINFO_SHMMNI:
568 		case KERN_SHMINFO_SHMSEG:
569 		case KERN_SHMINFO_SHMALL:
570 			break;
571 		default:
572                         return (ENOTDIR);       /* overloaded */
573                 }
574         }
575 
576 	switch (name[0]) {
577 	case KERN_SHMINFO_SHMMAX:
578 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen,
579 		    &shminfo.shmmax)) || newp == NULL)
580 			return (error);
581 
582 		/* If new shmmax > shmall, crank shmall */
583 		if (btoc(round_page(shminfo.shmmax)) > shminfo.shmall)
584 			shminfo.shmall = btoc(round_page(shminfo.shmmax));
585 		return (0);
586 	case KERN_SHMINFO_SHMMIN:
587 		val = shminfo.shmmin;
588 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
589 		    val == shminfo.shmmin)
590 			return (error);
591 		if (val <= 0)
592 			return (EINVAL);	/* shmmin must be >= 1 */
593 		shminfo.shmmin = val;
594 		return (0);
595 	case KERN_SHMINFO_SHMMNI:
596 		val = shminfo.shmmni;
597 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
598 		    val == shminfo.shmmni)
599 			return (error);
600 
601 		if (val < shminfo.shmmni || val > 0xffff)
602 			return (EINVAL);
603 
604 		/* Expand shmsegs and shmseqs arrays */
605 		newsegs = malloc(val * sizeof(struct shmid_ds *),
606 		    M_SHM, M_WAITOK);
607 		bcopy(shmsegs, newsegs,
608 		    shminfo.shmmni * sizeof(struct shmid_ds *));
609 		bzero(newsegs + shminfo.shmmni,
610 		    (val - shminfo.shmmni) * sizeof(struct shmid_ds *));
611 		newseqs = malloc(val * sizeof(unsigned short), M_SHM, M_WAITOK);
612 		bcopy(shmseqs, newseqs,
613 		    shminfo.shmmni * sizeof(unsigned short));
614 		bzero(newseqs + shminfo.shmmni,
615 		    (val - shminfo.shmmni) * sizeof(unsigned short));
616 		free(shmsegs, M_SHM);
617 		free(shmseqs, M_SHM);
618 		shmsegs = newsegs;
619 		shmseqs = newseqs;
620 		shminfo.shmmni = val;
621 		return (0);
622 	case KERN_SHMINFO_SHMSEG:
623 		val = shminfo.shmseg;
624 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
625 		    val == shminfo.shmseg)
626 			return (error);
627 		if (val <= 0)
628 			return (EINVAL);	/* shmseg must be >= 1 */
629 		shminfo.shmseg = val;
630 		return (0);
631 	case KERN_SHMINFO_SHMALL:
632 		val = shminfo.shmall;
633 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
634 		    val == shminfo.shmall)
635 			return (error);
636 		if (val < shminfo.shmall)
637 			return (EINVAL);	/* can't decrease shmall */
638 		shminfo.shmall = val;
639 		return (0);
640 	default:
641 		return (EOPNOTSUPP);
642 	}
643 	/* NOTREACHED */
644 }
645