xref: /netbsd-src/sys/kern/sysv_msg.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: sysv_msg.c,v 1.59 2009/01/11 02:45:53 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2006, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Implementation of SVID messages
35  *
36  * Author: Daniel Boulet
37  *
38  * Copyright 1993 Daniel Boulet and RTMX Inc.
39  *
40  * This system call was implemented by Daniel Boulet under contract from RTMX.
41  *
42  * Redistribution and use in source forms, with and without modification,
43  * are permitted provided that this entire comment appears intact.
44  *
45  * Redistribution in binary form may occur without any restrictions.
46  * Obviously, it would be nice if you gave credit where credit is due
47  * but requiring it would be too onerous.
48  *
49  * This software is provided ``AS IS'' without any warranties of any kind.
50  */
51 
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: sysv_msg.c,v 1.59 2009/01/11 02:45:53 christos Exp $");
54 
55 #define SYSVMSG
56 
57 #include <sys/param.h>
58 #include <sys/kernel.h>
59 #include <sys/msg.h>
60 #include <sys/sysctl.h>
61 #include <sys/mount.h>		/* XXX for <sys/syscallargs.h> */
62 #include <sys/syscallargs.h>
63 #include <sys/kauth.h>
64 
65 #define MSG_DEBUG
66 #undef MSG_DEBUG_OK
67 
68 #ifdef MSG_DEBUG_OK
69 #define MSG_PRINTF(a)	printf a
70 #else
71 #define MSG_PRINTF(a)
72 #endif
73 
74 static int	nfree_msgmaps;		/* # of free map entries */
75 static short	free_msgmaps;	/* head of linked list of free map entries */
76 static struct	__msg *free_msghdrs;	/* list of free msg headers */
77 static char	*msgpool;		/* MSGMAX byte long msg buffer pool */
78 static struct	msgmap *msgmaps;	/* MSGSEG msgmap structures */
79 static struct __msg *msghdrs;		/* MSGTQL msg headers */
80 
81 kmsq_t	*msqs;				/* MSGMNI msqid_ds struct's */
82 kmutex_t msgmutex;			/* subsystem lock */
83 
84 static u_int	msg_waiters = 0;	/* total number of msgrcv waiters */
85 static bool	msg_realloc_state;
86 static kcondvar_t msg_realloc_cv;
87 
88 static void msg_freehdr(struct __msg *);
89 
90 void
91 msginit(void)
92 {
93 	int i, sz;
94 	vaddr_t v;
95 
96 	/*
97 	 * msginfo.msgssz should be a power of two for efficiency reasons.
98 	 * It is also pretty silly if msginfo.msgssz is less than 8
99 	 * or greater than about 256 so ...
100 	 */
101 
102 	i = 8;
103 	while (i < 1024 && i != msginfo.msgssz)
104 		i <<= 1;
105 	if (i != msginfo.msgssz) {
106 		panic("msginfo.msgssz = %d, not a small power of 2",
107 		    msginfo.msgssz);
108 	}
109 
110 	if (msginfo.msgseg > 32767) {
111 		panic("msginfo.msgseg = %d > 32767", msginfo.msgseg);
112 	}
113 
114 	/* Allocate the wired memory for our structures */
115 	sz = ALIGN(msginfo.msgmax) +
116 	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
117 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
118 	    ALIGN(msginfo.msgmni * sizeof(kmsq_t));
119 	v = uvm_km_alloc(kernel_map, round_page(sz), 0,
120 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
121 	if (v == 0)
122 		panic("sysv_msg: cannot allocate memory");
123 	msgpool = (void *)v;
124 	msgmaps = (void *)((uintptr_t)msgpool + ALIGN(msginfo.msgmax));
125 	msghdrs = (void *)((uintptr_t)msgmaps +
126 	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)));
127 	msqs = (void *)((uintptr_t)msghdrs +
128 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)));
129 
130 	for (i = 0; i < (msginfo.msgseg - 1); i++)
131 		msgmaps[i].next = i + 1;
132 	msgmaps[msginfo.msgseg - 1].next = -1;
133 
134 	free_msgmaps = 0;
135 	nfree_msgmaps = msginfo.msgseg;
136 
137 	for (i = 0; i < (msginfo.msgtql - 1); i++) {
138 		msghdrs[i].msg_type = 0;
139 		msghdrs[i].msg_next = &msghdrs[i + 1];
140 	}
141 	i = msginfo.msgtql - 1;
142 	msghdrs[i].msg_type = 0;
143 	msghdrs[i].msg_next = NULL;
144 	free_msghdrs = &msghdrs[0];
145 
146 	for (i = 0; i < msginfo.msgmni; i++) {
147 		cv_init(&msqs[i].msq_cv, "msgwait");
148 		/* Implies entry is available */
149 		msqs[i].msq_u.msg_qbytes = 0;
150 		/* Reset to a known value */
151 		msqs[i].msq_u.msg_perm._seq = 0;
152 	}
153 
154 	mutex_init(&msgmutex, MUTEX_DEFAULT, IPL_NONE);
155 	cv_init(&msg_realloc_cv, "msgrealc");
156 	msg_realloc_state = false;
157 }
158 
159 static int
160 msgrealloc(int newmsgmni, int newmsgseg)
161 {
162 	struct msgmap *new_msgmaps;
163 	struct __msg *new_msghdrs, *new_free_msghdrs;
164 	char *old_msgpool, *new_msgpool;
165 	kmsq_t *new_msqs;
166 	vaddr_t v;
167 	int i, sz, msqid, newmsgmax, new_nfree_msgmaps;
168 	short new_free_msgmaps;
169 
170 	if (newmsgmni < 1 || newmsgseg < 1)
171 		return EINVAL;
172 
173 	/* Allocate the wired memory for our structures */
174 	newmsgmax = msginfo.msgssz * newmsgseg;
175 	sz = ALIGN(newmsgmax) +
176 	    ALIGN(newmsgseg * sizeof(struct msgmap)) +
177 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
178 	    ALIGN(newmsgmni * sizeof(kmsq_t));
179 	v = uvm_km_alloc(kernel_map, round_page(sz), 0,
180 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
181 	if (v == 0)
182 		return ENOMEM;
183 
184 	mutex_enter(&msgmutex);
185 	if (msg_realloc_state) {
186 		mutex_exit(&msgmutex);
187 		uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
188 		return EBUSY;
189 	}
190 	msg_realloc_state = true;
191 	if (msg_waiters) {
192 		/*
193 		 * Mark reallocation state, wake-up all waiters,
194 		 * and wait while they will all exit.
195 		 */
196 		for (i = 0; i < msginfo.msgmni; i++)
197 			cv_broadcast(&msqs[i].msq_cv);
198 		while (msg_waiters)
199 			cv_wait(&msg_realloc_cv, &msgmutex);
200 	}
201 	old_msgpool = msgpool;
202 
203 	/* We cannot reallocate less memory than we use */
204 	i = 0;
205 	for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
206 		struct msqid_ds *mptr;
207 		kmsq_t *msq;
208 
209 		msq = &msqs[msqid];
210 		mptr = &msq->msq_u;
211 		if (mptr->msg_qbytes || (mptr->msg_perm.mode & MSG_LOCKED))
212 			i = msqid;
213 	}
214 	if (i >= newmsgmni || (msginfo.msgseg - nfree_msgmaps) > newmsgseg) {
215 		mutex_exit(&msgmutex);
216 		uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
217 		return EBUSY;
218 	}
219 
220 	new_msgpool = (void *)v;
221 	new_msgmaps = (void *)((uintptr_t)new_msgpool + ALIGN(newmsgmax));
222 	new_msghdrs = (void *)((uintptr_t)new_msgmaps +
223 	    ALIGN(newmsgseg * sizeof(struct msgmap)));
224 	new_msqs = (void *)((uintptr_t)new_msghdrs +
225 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)));
226 
227 	/* Initialize the structures */
228 	for (i = 0; i < (newmsgseg - 1); i++)
229 		new_msgmaps[i].next = i + 1;
230 	new_msgmaps[newmsgseg - 1].next = -1;
231 	new_free_msgmaps = 0;
232 	new_nfree_msgmaps = newmsgseg;
233 
234 	for (i = 0; i < (msginfo.msgtql - 1); i++) {
235 		new_msghdrs[i].msg_type = 0;
236 		new_msghdrs[i].msg_next = &new_msghdrs[i + 1];
237 	}
238 	i = msginfo.msgtql - 1;
239 	new_msghdrs[i].msg_type = 0;
240 	new_msghdrs[i].msg_next = NULL;
241 	new_free_msghdrs = &new_msghdrs[0];
242 
243 	for (i = 0; i < newmsgmni; i++) {
244 		new_msqs[i].msq_u.msg_qbytes = 0;
245 		new_msqs[i].msq_u.msg_perm._seq = 0;
246 		cv_init(&new_msqs[i].msq_cv, "msgwait");
247 	}
248 
249 	/*
250 	 * Copy all message queue identifiers, mesage headers and buffer
251 	 * pools to the new memory location.
252 	 */
253 	for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
254 		struct __msg *nmsghdr, *msghdr, *pmsghdr;
255 		struct msqid_ds *nmptr, *mptr;
256 		kmsq_t *nmsq, *msq;
257 
258 		msq = &msqs[msqid];
259 		mptr = &msq->msq_u;
260 
261 		if (mptr->msg_qbytes == 0 &&
262 		    (mptr->msg_perm.mode & MSG_LOCKED) == 0)
263 			continue;
264 
265 		nmsq = &new_msqs[msqid];
266 		nmptr = &nmsq->msq_u;
267 		memcpy(nmptr, mptr, sizeof(struct msqid_ds));
268 
269 		/*
270 		 * Go through the message headers, and and copy each
271 		 * one by taking the new ones, and thus defragmenting.
272 		 */
273 		nmsghdr = pmsghdr = NULL;
274 		msghdr = mptr->_msg_first;
275 		while (msghdr) {
276 			short nnext = 0, next;
277 			u_short msgsz, segcnt;
278 
279 			/* Take an entry from the new list of free msghdrs */
280 			nmsghdr = new_free_msghdrs;
281 			KASSERT(nmsghdr != NULL);
282 			new_free_msghdrs = nmsghdr->msg_next;
283 
284 			nmsghdr->msg_next = NULL;
285 			if (pmsghdr) {
286 				pmsghdr->msg_next = nmsghdr;
287 			} else {
288 				nmptr->_msg_first = nmsghdr;
289 				pmsghdr = nmsghdr;
290 			}
291 			nmsghdr->msg_ts = msghdr->msg_ts;
292 			nmsghdr->msg_spot = -1;
293 
294 			/* Compute the amount of segments and reserve them */
295 			msgsz = msghdr->msg_ts;
296 			segcnt = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
297 			if (segcnt == 0)
298 				continue;
299 			while (segcnt--) {
300 				nnext = new_free_msgmaps;
301 				new_free_msgmaps = new_msgmaps[nnext].next;
302 				new_nfree_msgmaps--;
303 				new_msgmaps[nnext].next = nmsghdr->msg_spot;
304 				nmsghdr->msg_spot = nnext;
305 			}
306 
307 			/* Copy all segments */
308 			KASSERT(nnext == nmsghdr->msg_spot);
309 			next = msghdr->msg_spot;
310 			while (msgsz > 0) {
311 				size_t tlen;
312 
313 				if (msgsz >= msginfo.msgssz) {
314 					tlen = msginfo.msgssz;
315 					msgsz -= msginfo.msgssz;
316 				} else {
317 					tlen = msgsz;
318 					msgsz = 0;
319 				}
320 
321 				/* Copy the message buffer */
322 				memcpy(&new_msgpool[nnext * msginfo.msgssz],
323 				    &msgpool[next * msginfo.msgssz], tlen);
324 
325 				/* Next entry of the map */
326 				nnext = msgmaps[nnext].next;
327 				next = msgmaps[next].next;
328 			}
329 
330 			/* Next message header */
331 			msghdr = msghdr->msg_next;
332 		}
333 		nmptr->_msg_last = nmsghdr;
334 	}
335 	KASSERT((msginfo.msgseg - nfree_msgmaps) ==
336 	    (newmsgseg - new_nfree_msgmaps));
337 
338 	sz = ALIGN(msginfo.msgmax) +
339 	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
340 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
341 	    ALIGN(msginfo.msgmni * sizeof(kmsq_t));
342 
343 	for (i = 0; i < msginfo.msgmni; i++)
344 		cv_destroy(&msqs[i].msq_cv);
345 
346 	/* Set the pointers and update the new values */
347 	msgpool = new_msgpool;
348 	msgmaps = new_msgmaps;
349 	msghdrs = new_msghdrs;
350 	msqs = new_msqs;
351 
352 	free_msghdrs = new_free_msghdrs;
353 	free_msgmaps = new_free_msgmaps;
354 	nfree_msgmaps = new_nfree_msgmaps;
355 	msginfo.msgmni = newmsgmni;
356 	msginfo.msgseg = newmsgseg;
357 	msginfo.msgmax = newmsgmax;
358 
359 	/* Reallocation completed - notify all waiters, if any */
360 	msg_realloc_state = false;
361 	cv_broadcast(&msg_realloc_cv);
362 	mutex_exit(&msgmutex);
363 
364 	uvm_km_free(kernel_map, (vaddr_t)old_msgpool, sz, UVM_KMF_WIRED);
365 	return 0;
366 }
367 
368 static void
369 msg_freehdr(struct __msg *msghdr)
370 {
371 
372 	KASSERT(mutex_owned(&msgmutex));
373 
374 	while (msghdr->msg_ts > 0) {
375 		short next;
376 		KASSERT(msghdr->msg_spot >= 0);
377 		KASSERT(msghdr->msg_spot < msginfo.msgseg);
378 
379 		next = msgmaps[msghdr->msg_spot].next;
380 		msgmaps[msghdr->msg_spot].next = free_msgmaps;
381 		free_msgmaps = msghdr->msg_spot;
382 		nfree_msgmaps++;
383 		msghdr->msg_spot = next;
384 		if (msghdr->msg_ts >= msginfo.msgssz)
385 			msghdr->msg_ts -= msginfo.msgssz;
386 		else
387 			msghdr->msg_ts = 0;
388 	}
389 	KASSERT(msghdr->msg_spot == -1);
390 	msghdr->msg_next = free_msghdrs;
391 	free_msghdrs = msghdr;
392 }
393 
394 int
395 sys___msgctl50(struct lwp *l, const struct sys___msgctl50_args *uap,
396     register_t *retval)
397 {
398 	/* {
399 		syscallarg(int) msqid;
400 		syscallarg(int) cmd;
401 		syscallarg(struct msqid_ds *) buf;
402 	} */
403 	struct msqid_ds msqbuf;
404 	int cmd, error;
405 
406 	cmd = SCARG(uap, cmd);
407 
408 	if (cmd == IPC_SET) {
409 		error = copyin(SCARG(uap, buf), &msqbuf, sizeof(msqbuf));
410 		if (error)
411 			return (error);
412 	}
413 
414 	error = msgctl1(l, SCARG(uap, msqid), cmd,
415 	    (cmd == IPC_SET || cmd == IPC_STAT) ? &msqbuf : NULL);
416 
417 	if (error == 0 && cmd == IPC_STAT)
418 		error = copyout(&msqbuf, SCARG(uap, buf), sizeof(msqbuf));
419 
420 	return (error);
421 }
422 
423 int
424 msgctl1(struct lwp *l, int msqid, int cmd, struct msqid_ds *msqbuf)
425 {
426 	kauth_cred_t cred = l->l_cred;
427 	struct msqid_ds *msqptr;
428 	kmsq_t *msq;
429 	int error = 0, ix;
430 
431 	MSG_PRINTF(("call to msgctl1(%d, %d)\n", msqid, cmd));
432 
433 	ix = IPCID_TO_IX(msqid);
434 
435 	mutex_enter(&msgmutex);
436 
437 	if (ix < 0 || ix >= msginfo.msgmni) {
438 		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", ix,
439 		    msginfo.msgmni));
440 		error = EINVAL;
441 		goto unlock;
442 	}
443 
444 	msq = &msqs[ix];
445 	msqptr = &msq->msq_u;
446 
447 	if (msqptr->msg_qbytes == 0) {
448 		MSG_PRINTF(("no such msqid\n"));
449 		error = EINVAL;
450 		goto unlock;
451 	}
452 	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqid)) {
453 		MSG_PRINTF(("wrong sequence number\n"));
454 		error = EINVAL;
455 		goto unlock;
456 	}
457 
458 	switch (cmd) {
459 	case IPC_RMID:
460 	{
461 		struct __msg *msghdr;
462 		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)) != 0)
463 			break;
464 		/* Free the message headers */
465 		msghdr = msqptr->_msg_first;
466 		while (msghdr != NULL) {
467 			struct __msg *msghdr_tmp;
468 
469 			/* Free the segments of each message */
470 			msqptr->_msg_cbytes -= msghdr->msg_ts;
471 			msqptr->msg_qnum--;
472 			msghdr_tmp = msghdr;
473 			msghdr = msghdr->msg_next;
474 			msg_freehdr(msghdr_tmp);
475 		}
476 		KASSERT(msqptr->_msg_cbytes == 0);
477 		KASSERT(msqptr->msg_qnum == 0);
478 
479 		/* Mark it as free */
480 		msqptr->msg_qbytes = 0;
481 		cv_broadcast(&msq->msq_cv);
482 	}
483 		break;
484 
485 	case IPC_SET:
486 		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)))
487 			break;
488 		if (msqbuf->msg_qbytes > msqptr->msg_qbytes &&
489 		    kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER,
490 		    NULL) != 0) {
491 			error = EPERM;
492 			break;
493 		}
494 		if (msqbuf->msg_qbytes > msginfo.msgmnb) {
495 			MSG_PRINTF(("can't increase msg_qbytes beyond %d "
496 			    "(truncating)\n", msginfo.msgmnb));
497 			/* silently restrict qbytes to system limit */
498 			msqbuf->msg_qbytes = msginfo.msgmnb;
499 		}
500 		if (msqbuf->msg_qbytes == 0) {
501 			MSG_PRINTF(("can't reduce msg_qbytes to 0\n"));
502 			error = EINVAL;		/* XXX non-standard errno! */
503 			break;
504 		}
505 		msqptr->msg_perm.uid = msqbuf->msg_perm.uid;
506 		msqptr->msg_perm.gid = msqbuf->msg_perm.gid;
507 		msqptr->msg_perm.mode = (msqptr->msg_perm.mode & ~0777) |
508 		    (msqbuf->msg_perm.mode & 0777);
509 		msqptr->msg_qbytes = msqbuf->msg_qbytes;
510 		msqptr->msg_ctime = time_second;
511 		break;
512 
513 	case IPC_STAT:
514 		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
515 			MSG_PRINTF(("requester doesn't have read access\n"));
516 			break;
517 		}
518 		memcpy(msqbuf, msqptr, sizeof(struct msqid_ds));
519 		break;
520 
521 	default:
522 		MSG_PRINTF(("invalid command %d\n", cmd));
523 		error = EINVAL;
524 		break;
525 	}
526 
527 unlock:
528 	mutex_exit(&msgmutex);
529 	return (error);
530 }
531 
532 int
533 sys_msgget(struct lwp *l, const struct sys_msgget_args *uap, register_t *retval)
534 {
535 	/* {
536 		syscallarg(key_t) key;
537 		syscallarg(int) msgflg;
538 	} */
539 	int msqid, error = 0;
540 	int key = SCARG(uap, key);
541 	int msgflg = SCARG(uap, msgflg);
542 	kauth_cred_t cred = l->l_cred;
543 	struct msqid_ds *msqptr = NULL;
544 	kmsq_t *msq;
545 
546 	mutex_enter(&msgmutex);
547 
548 	MSG_PRINTF(("msgget(0x%x, 0%o)\n", key, msgflg));
549 
550 	if (key != IPC_PRIVATE) {
551 		for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
552 			msq = &msqs[msqid];
553 			msqptr = &msq->msq_u;
554 			if (msqptr->msg_qbytes != 0 &&
555 			    msqptr->msg_perm._key == key)
556 				break;
557 		}
558 		if (msqid < msginfo.msgmni) {
559 			MSG_PRINTF(("found public key\n"));
560 			if ((msgflg & IPC_CREAT) && (msgflg & IPC_EXCL)) {
561 				MSG_PRINTF(("not exclusive\n"));
562 				error = EEXIST;
563 				goto unlock;
564 			}
565 			if ((error = ipcperm(cred, &msqptr->msg_perm,
566 			    msgflg & 0700 ))) {
567 				MSG_PRINTF(("requester doesn't have 0%o access\n",
568 				    msgflg & 0700));
569 				goto unlock;
570 			}
571 			goto found;
572 		}
573 	}
574 
575 	MSG_PRINTF(("need to allocate the msqid_ds\n"));
576 	if (key == IPC_PRIVATE || (msgflg & IPC_CREAT)) {
577 		for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
578 			/*
579 			 * Look for an unallocated and unlocked msqid_ds.
580 			 * msqid_ds's can be locked by msgsnd or msgrcv while
581 			 * they are copying the message in/out.  We can't
582 			 * re-use the entry until they release it.
583 			 */
584 			msq = &msqs[msqid];
585 			msqptr = &msq->msq_u;
586 			if (msqptr->msg_qbytes == 0 &&
587 			    (msqptr->msg_perm.mode & MSG_LOCKED) == 0)
588 				break;
589 		}
590 		if (msqid == msginfo.msgmni) {
591 			MSG_PRINTF(("no more msqid_ds's available\n"));
592 			error = ENOSPC;
593 			goto unlock;
594 		}
595 		MSG_PRINTF(("msqid %d is available\n", msqid));
596 		msqptr->msg_perm._key = key;
597 		msqptr->msg_perm.cuid = kauth_cred_geteuid(cred);
598 		msqptr->msg_perm.uid = kauth_cred_geteuid(cred);
599 		msqptr->msg_perm.cgid = kauth_cred_getegid(cred);
600 		msqptr->msg_perm.gid = kauth_cred_getegid(cred);
601 		msqptr->msg_perm.mode = (msgflg & 0777);
602 		/* Make sure that the returned msqid is unique */
603 		msqptr->msg_perm._seq++;
604 		msqptr->_msg_first = NULL;
605 		msqptr->_msg_last = NULL;
606 		msqptr->_msg_cbytes = 0;
607 		msqptr->msg_qnum = 0;
608 		msqptr->msg_qbytes = msginfo.msgmnb;
609 		msqptr->msg_lspid = 0;
610 		msqptr->msg_lrpid = 0;
611 		msqptr->msg_stime = 0;
612 		msqptr->msg_rtime = 0;
613 		msqptr->msg_ctime = time_second;
614 	} else {
615 		MSG_PRINTF(("didn't find it and wasn't asked to create it\n"));
616 		error = ENOENT;
617 		goto unlock;
618 	}
619 
620 found:
621 	/* Construct the unique msqid */
622 	*retval = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm);
623 
624 unlock:
625 	mutex_exit(&msgmutex);
626 	return (error);
627 }
628 
629 int
630 sys_msgsnd(struct lwp *l, const struct sys_msgsnd_args *uap, register_t *retval)
631 {
632 	/* {
633 		syscallarg(int) msqid;
634 		syscallarg(const void *) msgp;
635 		syscallarg(size_t) msgsz;
636 		syscallarg(int) msgflg;
637 	} */
638 
639 	return msgsnd1(l, SCARG(uap, msqid), SCARG(uap, msgp),
640 	    SCARG(uap, msgsz), SCARG(uap, msgflg), sizeof(long), copyin);
641 }
642 
643 int
644 msgsnd1(struct lwp *l, int msqidr, const char *user_msgp, size_t msgsz,
645     int msgflg, size_t typesz, copyin_t fetch_type)
646 {
647 	int segs_needed, error = 0, msqid;
648 	kauth_cred_t cred = l->l_cred;
649 	struct msqid_ds *msqptr;
650 	struct __msg *msghdr;
651 	kmsq_t *msq;
652 	short next;
653 
654 	MSG_PRINTF(("call to msgsnd(%d, %p, %lld, %d)\n", msqid, user_msgp,
655 	    (long long)msgsz, msgflg));
656 restart:
657 	msqid = IPCID_TO_IX(msqidr);
658 
659 	mutex_enter(&msgmutex);
660 	/* In case of reallocation, we will wait for completion */
661 	while (__predict_false(msg_realloc_state))
662 		cv_wait(&msg_realloc_cv, &msgmutex);
663 
664 	if (msqid < 0 || msqid >= msginfo.msgmni) {
665 		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
666 		    msginfo.msgmni));
667 		error = EINVAL;
668 		goto unlock;
669 	}
670 
671 	msq = &msqs[msqid];
672 	msqptr = &msq->msq_u;
673 
674 	if (msqptr->msg_qbytes == 0) {
675 		MSG_PRINTF(("no such message queue id\n"));
676 		error = EINVAL;
677 		goto unlock;
678 	}
679 	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
680 		MSG_PRINTF(("wrong sequence number\n"));
681 		error = EINVAL;
682 		goto unlock;
683 	}
684 
685 	if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_W))) {
686 		MSG_PRINTF(("requester doesn't have write access\n"));
687 		goto unlock;
688 	}
689 
690 	segs_needed = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
691 	MSG_PRINTF(("msgsz=%lld, msgssz=%d, segs_needed=%d\n",
692 	    (long long)msgsz, msginfo.msgssz, segs_needed));
693 	for (;;) {
694 		int need_more_resources = 0;
695 
696 		/*
697 		 * check msgsz [cannot be negative since it is unsigned]
698 		 * (inside this loop in case msg_qbytes changes while we sleep)
699 		 */
700 
701 		if (msgsz > msqptr->msg_qbytes) {
702 			MSG_PRINTF(("msgsz > msqptr->msg_qbytes\n"));
703 			error = EINVAL;
704 			goto unlock;
705 		}
706 
707 		if (msqptr->msg_perm.mode & MSG_LOCKED) {
708 			MSG_PRINTF(("msqid is locked\n"));
709 			need_more_resources = 1;
710 		}
711 		if (msgsz + msqptr->_msg_cbytes > msqptr->msg_qbytes) {
712 			MSG_PRINTF(("msgsz + msg_cbytes > msg_qbytes\n"));
713 			need_more_resources = 1;
714 		}
715 		if (segs_needed > nfree_msgmaps) {
716 			MSG_PRINTF(("segs_needed > nfree_msgmaps\n"));
717 			need_more_resources = 1;
718 		}
719 		if (free_msghdrs == NULL) {
720 			MSG_PRINTF(("no more msghdrs\n"));
721 			need_more_resources = 1;
722 		}
723 
724 		if (need_more_resources) {
725 			int we_own_it;
726 
727 			if ((msgflg & IPC_NOWAIT) != 0) {
728 				MSG_PRINTF(("need more resources but caller "
729 				    "doesn't want to wait\n"));
730 				error = EAGAIN;
731 				goto unlock;
732 			}
733 
734 			if ((msqptr->msg_perm.mode & MSG_LOCKED) != 0) {
735 				MSG_PRINTF(("we don't own the msqid_ds\n"));
736 				we_own_it = 0;
737 			} else {
738 				/* Force later arrivals to wait for our
739 				   request */
740 				MSG_PRINTF(("we own the msqid_ds\n"));
741 				msqptr->msg_perm.mode |= MSG_LOCKED;
742 				we_own_it = 1;
743 			}
744 
745 			msg_waiters++;
746 			MSG_PRINTF(("goodnight\n"));
747 			error = cv_wait_sig(&msq->msq_cv, &msgmutex);
748 			MSG_PRINTF(("good morning, error=%d\n", error));
749 			msg_waiters--;
750 
751 			if (we_own_it)
752 				msqptr->msg_perm.mode &= ~MSG_LOCKED;
753 
754 			/*
755 			 * In case of such state, notify reallocator and
756 			 * restart the call.
757 			 */
758 			if (msg_realloc_state) {
759 				cv_broadcast(&msg_realloc_cv);
760 				mutex_exit(&msgmutex);
761 				goto restart;
762 			}
763 
764 			if (error != 0) {
765 				MSG_PRINTF(("msgsnd: interrupted system "
766 				    "call\n"));
767 				error = EINTR;
768 				goto unlock;
769 			}
770 
771 			/*
772 			 * Make sure that the msq queue still exists
773 			 */
774 
775 			if (msqptr->msg_qbytes == 0) {
776 				MSG_PRINTF(("msqid deleted\n"));
777 				error = EIDRM;
778 				goto unlock;
779 			}
780 		} else {
781 			MSG_PRINTF(("got all the resources that we need\n"));
782 			break;
783 		}
784 	}
785 
786 	/*
787 	 * We have the resources that we need.
788 	 * Make sure!
789 	 */
790 
791 	KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
792 	KASSERT(segs_needed <= nfree_msgmaps);
793 	KASSERT(msgsz + msqptr->_msg_cbytes <= msqptr->msg_qbytes);
794 	KASSERT(free_msghdrs != NULL);
795 
796 	/*
797 	 * Re-lock the msqid_ds in case we page-fault when copying in the
798 	 * message
799 	 */
800 
801 	KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
802 	msqptr->msg_perm.mode |= MSG_LOCKED;
803 
804 	/*
805 	 * Allocate a message header
806 	 */
807 
808 	msghdr = free_msghdrs;
809 	free_msghdrs = msghdr->msg_next;
810 	msghdr->msg_spot = -1;
811 	msghdr->msg_ts = msgsz;
812 
813 	/*
814 	 * Allocate space for the message
815 	 */
816 
817 	while (segs_needed > 0) {
818 		KASSERT(nfree_msgmaps > 0);
819 		KASSERT(free_msgmaps != -1);
820 		KASSERT(free_msgmaps < msginfo.msgseg);
821 
822 		next = free_msgmaps;
823 		MSG_PRINTF(("allocating segment %d to message\n", next));
824 		free_msgmaps = msgmaps[next].next;
825 		nfree_msgmaps--;
826 		msgmaps[next].next = msghdr->msg_spot;
827 		msghdr->msg_spot = next;
828 		segs_needed--;
829 	}
830 
831 	/*
832 	 * Copy in the message type
833 	 */
834 	mutex_exit(&msgmutex);
835 	error = (*fetch_type)(user_msgp, &msghdr->msg_type, typesz);
836 	mutex_enter(&msgmutex);
837 	if (error != 0) {
838 		MSG_PRINTF(("error %d copying the message type\n", error));
839 		msg_freehdr(msghdr);
840 		msqptr->msg_perm.mode &= ~MSG_LOCKED;
841 		cv_broadcast(&msq->msq_cv);
842 		goto unlock;
843 	}
844 	user_msgp += typesz;
845 
846 	/*
847 	 * Validate the message type
848 	 */
849 
850 	if (msghdr->msg_type < 1) {
851 		msg_freehdr(msghdr);
852 		msqptr->msg_perm.mode &= ~MSG_LOCKED;
853 		cv_broadcast(&msq->msq_cv);
854 		MSG_PRINTF(("mtype (%ld) < 1\n", msghdr->msg_type));
855 		error = EINVAL;
856 		goto unlock;
857 	}
858 
859 	/*
860 	 * Copy in the message body
861 	 */
862 
863 	next = msghdr->msg_spot;
864 	while (msgsz > 0) {
865 		size_t tlen;
866 		KASSERT(next > -1);
867 		KASSERT(next < msginfo.msgseg);
868 
869 		if (msgsz > msginfo.msgssz)
870 			tlen = msginfo.msgssz;
871 		else
872 			tlen = msgsz;
873 		mutex_exit(&msgmutex);
874 		error = copyin(user_msgp, &msgpool[next * msginfo.msgssz], tlen);
875 		mutex_enter(&msgmutex);
876 		if (error != 0) {
877 			MSG_PRINTF(("error %d copying in message segment\n",
878 			    error));
879 			msg_freehdr(msghdr);
880 			msqptr->msg_perm.mode &= ~MSG_LOCKED;
881 			cv_broadcast(&msq->msq_cv);
882 			goto unlock;
883 		}
884 		msgsz -= tlen;
885 		user_msgp += tlen;
886 		next = msgmaps[next].next;
887 	}
888 	KASSERT(next == -1);
889 
890 	/*
891 	 * We've got the message.  Unlock the msqid_ds.
892 	 */
893 
894 	msqptr->msg_perm.mode &= ~MSG_LOCKED;
895 
896 	/*
897 	 * Make sure that the msqid_ds is still allocated.
898 	 */
899 
900 	if (msqptr->msg_qbytes == 0) {
901 		msg_freehdr(msghdr);
902 		cv_broadcast(&msq->msq_cv);
903 		error = EIDRM;
904 		goto unlock;
905 	}
906 
907 	/*
908 	 * Put the message into the queue
909 	 */
910 
911 	if (msqptr->_msg_first == NULL) {
912 		msqptr->_msg_first = msghdr;
913 		msqptr->_msg_last = msghdr;
914 	} else {
915 		msqptr->_msg_last->msg_next = msghdr;
916 		msqptr->_msg_last = msghdr;
917 	}
918 	msqptr->_msg_last->msg_next = NULL;
919 
920 	msqptr->_msg_cbytes += msghdr->msg_ts;
921 	msqptr->msg_qnum++;
922 	msqptr->msg_lspid = l->l_proc->p_pid;
923 	msqptr->msg_stime = time_second;
924 
925 	cv_broadcast(&msq->msq_cv);
926 
927 unlock:
928 	mutex_exit(&msgmutex);
929 	return error;
930 }
931 
932 int
933 sys_msgrcv(struct lwp *l, const struct sys_msgrcv_args *uap, register_t *retval)
934 {
935 	/* {
936 		syscallarg(int) msqid;
937 		syscallarg(void *) msgp;
938 		syscallarg(size_t) msgsz;
939 		syscallarg(long) msgtyp;
940 		syscallarg(int) msgflg;
941 	} */
942 
943 	return msgrcv1(l, SCARG(uap, msqid), SCARG(uap, msgp),
944 	    SCARG(uap, msgsz), SCARG(uap, msgtyp), SCARG(uap, msgflg),
945 	    sizeof(long), copyout, retval);
946 }
947 
948 int
949 msgrcv1(struct lwp *l, int msqidr, char *user_msgp, size_t msgsz, long msgtyp,
950     int msgflg, size_t typesz, copyout_t put_type, register_t *retval)
951 {
952 	size_t len;
953 	kauth_cred_t cred = l->l_cred;
954 	struct msqid_ds *msqptr;
955 	struct __msg *msghdr;
956 	int error = 0, msqid;
957 	kmsq_t *msq;
958 	short next;
959 
960 	MSG_PRINTF(("call to msgrcv(%d, %p, %lld, %ld, %d)\n", msqid,
961 	    user_msgp, (long long)msgsz, msgtyp, msgflg));
962 restart:
963 	msqid = IPCID_TO_IX(msqidr);
964 
965 	mutex_enter(&msgmutex);
966 	/* In case of reallocation, we will wait for completion */
967 	while (__predict_false(msg_realloc_state))
968 		cv_wait(&msg_realloc_cv, &msgmutex);
969 
970 	if (msqid < 0 || msqid >= msginfo.msgmni) {
971 		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
972 		    msginfo.msgmni));
973 		error = EINVAL;
974 		goto unlock;
975 	}
976 
977 	msq = &msqs[msqid];
978 	msqptr = &msq->msq_u;
979 
980 	if (msqptr->msg_qbytes == 0) {
981 		MSG_PRINTF(("no such message queue id\n"));
982 		error = EINVAL;
983 		goto unlock;
984 	}
985 	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
986 		MSG_PRINTF(("wrong sequence number\n"));
987 		error = EINVAL;
988 		goto unlock;
989 	}
990 
991 	if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
992 		MSG_PRINTF(("requester doesn't have read access\n"));
993 		goto unlock;
994 	}
995 
996 	msghdr = NULL;
997 	while (msghdr == NULL) {
998 		if (msgtyp == 0) {
999 			msghdr = msqptr->_msg_first;
1000 			if (msghdr != NULL) {
1001 				if (msgsz < msghdr->msg_ts &&
1002 				    (msgflg & MSG_NOERROR) == 0) {
1003 					MSG_PRINTF(("first msg on the queue "
1004 					    "is too big (want %lld, got %d)\n",
1005 					    (long long)msgsz, msghdr->msg_ts));
1006 					error = E2BIG;
1007 					goto unlock;
1008 				}
1009 				if (msqptr->_msg_first == msqptr->_msg_last) {
1010 					msqptr->_msg_first = NULL;
1011 					msqptr->_msg_last = NULL;
1012 				} else {
1013 					msqptr->_msg_first = msghdr->msg_next;
1014 					KASSERT(msqptr->_msg_first != NULL);
1015 				}
1016 			}
1017 		} else {
1018 			struct __msg *previous;
1019 			struct __msg **prev;
1020 
1021 			for (previous = NULL, prev = &msqptr->_msg_first;
1022 			     (msghdr = *prev) != NULL;
1023 			     previous = msghdr, prev = &msghdr->msg_next) {
1024 				/*
1025 				 * Is this message's type an exact match or is
1026 				 * this message's type less than or equal to
1027 				 * the absolute value of a negative msgtyp?
1028 				 * Note that the second half of this test can
1029 				 * NEVER be true if msgtyp is positive since
1030 				 * msg_type is always positive!
1031 				 */
1032 
1033 				if (msgtyp != msghdr->msg_type &&
1034 				    msghdr->msg_type > -msgtyp)
1035 					continue;
1036 
1037 				MSG_PRINTF(("found message type %ld, requested %ld\n",
1038 				    msghdr->msg_type, msgtyp));
1039 				if (msgsz < msghdr->msg_ts &&
1040 				     (msgflg & MSG_NOERROR) == 0) {
1041 					MSG_PRINTF(("requested message on the queue "
1042 					    "is too big (want %lld, got %d)\n",
1043 					    (long long)msgsz, msghdr->msg_ts));
1044 					error = E2BIG;
1045 					goto unlock;
1046 				}
1047 				*prev = msghdr->msg_next;
1048 				if (msghdr != msqptr->_msg_last)
1049 					break;
1050 				if (previous == NULL) {
1051 					KASSERT(prev == &msqptr->_msg_first);
1052 					msqptr->_msg_first = NULL;
1053 					msqptr->_msg_last = NULL;
1054 				} else {
1055 					KASSERT(prev != &msqptr->_msg_first);
1056 					msqptr->_msg_last = previous;
1057 				}
1058 				break;
1059 			}
1060 		}
1061 
1062 		/*
1063 		 * We've either extracted the msghdr for the appropriate
1064 		 * message or there isn't one.
1065 		 * If there is one then bail out of this loop.
1066 		 */
1067 		if (msghdr != NULL)
1068 			break;
1069 
1070 		/*
1071 		 * Hmph!  No message found.  Does the user want to wait?
1072 		 */
1073 
1074 		if ((msgflg & IPC_NOWAIT) != 0) {
1075 			MSG_PRINTF(("no appropriate message found (msgtyp=%ld)\n",
1076 			    msgtyp));
1077 			error = ENOMSG;
1078 			goto unlock;
1079 		}
1080 
1081 		/*
1082 		 * Wait for something to happen
1083 		 */
1084 
1085 		msg_waiters++;
1086 		MSG_PRINTF(("msgrcv:  goodnight\n"));
1087 		error = cv_wait_sig(&msq->msq_cv, &msgmutex);
1088 		MSG_PRINTF(("msgrcv: good morning (error=%d)\n", error));
1089 		msg_waiters--;
1090 
1091 		/*
1092 		 * In case of such state, notify reallocator and
1093 		 * restart the call.
1094 		 */
1095 		if (msg_realloc_state) {
1096 			cv_broadcast(&msg_realloc_cv);
1097 			mutex_exit(&msgmutex);
1098 			goto restart;
1099 		}
1100 
1101 		if (error != 0) {
1102 			MSG_PRINTF(("msgsnd: interrupted system call\n"));
1103 			error = EINTR;
1104 			goto unlock;
1105 		}
1106 
1107 		/*
1108 		 * Make sure that the msq queue still exists
1109 		 */
1110 
1111 		if (msqptr->msg_qbytes == 0 ||
1112 		    msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
1113 			MSG_PRINTF(("msqid deleted\n"));
1114 			error = EIDRM;
1115 			goto unlock;
1116 		}
1117 	}
1118 
1119 	/*
1120 	 * Return the message to the user.
1121 	 *
1122 	 * First, do the bookkeeping (before we risk being interrupted).
1123 	 */
1124 
1125 	msqptr->_msg_cbytes -= msghdr->msg_ts;
1126 	msqptr->msg_qnum--;
1127 	msqptr->msg_lrpid = l->l_proc->p_pid;
1128 	msqptr->msg_rtime = time_second;
1129 
1130 	/*
1131 	 * Make msgsz the actual amount that we'll be returning.
1132 	 * Note that this effectively truncates the message if it is too long
1133 	 * (since msgsz is never increased).
1134 	 */
1135 
1136 	MSG_PRINTF(("found a message, msgsz=%lld, msg_ts=%d\n",
1137 	    (long long)msgsz, msghdr->msg_ts));
1138 	if (msgsz > msghdr->msg_ts)
1139 		msgsz = msghdr->msg_ts;
1140 
1141 	/*
1142 	 * Return the type to the user.
1143 	 */
1144 	mutex_exit(&msgmutex);
1145 	error = (*put_type)(&msghdr->msg_type, user_msgp, typesz);
1146 	mutex_enter(&msgmutex);
1147 	if (error != 0) {
1148 		MSG_PRINTF(("error (%d) copying out message type\n", error));
1149 		msg_freehdr(msghdr);
1150 		cv_broadcast(&msq->msq_cv);
1151 		goto unlock;
1152 	}
1153 	user_msgp += typesz;
1154 
1155 	/*
1156 	 * Return the segments to the user
1157 	 */
1158 
1159 	next = msghdr->msg_spot;
1160 	for (len = 0; len < msgsz; len += msginfo.msgssz) {
1161 		size_t tlen;
1162 		KASSERT(next > -1);
1163 		KASSERT(next < msginfo.msgseg);
1164 
1165 		if (msgsz - len > msginfo.msgssz)
1166 			tlen = msginfo.msgssz;
1167 		else
1168 			tlen = msgsz - len;
1169 		mutex_exit(&msgmutex);
1170 		error = (*put_type)(&msgpool[next * msginfo.msgssz],
1171 		    user_msgp, tlen);
1172 		mutex_enter(&msgmutex);
1173 		if (error != 0) {
1174 			MSG_PRINTF(("error (%d) copying out message segment\n",
1175 			    error));
1176 			msg_freehdr(msghdr);
1177 			cv_broadcast(&msq->msq_cv);
1178 			goto unlock;
1179 		}
1180 		user_msgp += tlen;
1181 		next = msgmaps[next].next;
1182 	}
1183 
1184 	/*
1185 	 * Done, return the actual number of bytes copied out.
1186 	 */
1187 
1188 	msg_freehdr(msghdr);
1189 	cv_broadcast(&msq->msq_cv);
1190 	*retval = msgsz;
1191 
1192 unlock:
1193 	mutex_exit(&msgmutex);
1194 	return error;
1195 }
1196 
1197 /*
1198  * Sysctl initialization and nodes.
1199  */
1200 
1201 static int
1202 sysctl_ipc_msgmni(SYSCTLFN_ARGS)
1203 {
1204 	int newsize, error;
1205 	struct sysctlnode node;
1206 	node = *rnode;
1207 	node.sysctl_data = &newsize;
1208 
1209 	newsize = msginfo.msgmni;
1210 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1211 	if (error || newp == NULL)
1212 		return error;
1213 
1214 	sysctl_unlock();
1215 	error = msgrealloc(newsize, msginfo.msgseg);
1216 	sysctl_relock();
1217 	return error;
1218 }
1219 
1220 static int
1221 sysctl_ipc_msgseg(SYSCTLFN_ARGS)
1222 {
1223 	int newsize, error;
1224 	struct sysctlnode node;
1225 	node = *rnode;
1226 	node.sysctl_data = &newsize;
1227 
1228 	newsize = msginfo.msgseg;
1229 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1230 	if (error || newp == NULL)
1231 		return error;
1232 
1233 	sysctl_unlock();
1234 	error = msgrealloc(msginfo.msgmni, newsize);
1235 	sysctl_relock();
1236 	return error;
1237 }
1238 
1239 SYSCTL_SETUP(sysctl_ipc_msg_setup, "sysctl kern.ipc subtree setup")
1240 {
1241 	const struct sysctlnode *node = NULL;
1242 
1243 	sysctl_createv(clog, 0, NULL, NULL,
1244 		CTLFLAG_PERMANENT,
1245 		CTLTYPE_NODE, "kern", NULL,
1246 		NULL, 0, NULL, 0,
1247 		CTL_KERN, CTL_EOL);
1248 	sysctl_createv(clog, 0, NULL, &node,
1249 		CTLFLAG_PERMANENT,
1250 		CTLTYPE_NODE, "ipc",
1251 		SYSCTL_DESCR("SysV IPC options"),
1252 		NULL, 0, NULL, 0,
1253 		CTL_KERN, KERN_SYSVIPC, CTL_EOL);
1254 
1255 	if (node == NULL)
1256 		return;
1257 
1258 	sysctl_createv(clog, 0, &node, NULL,
1259 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1260 		CTLTYPE_INT, "msgmni",
1261 		SYSCTL_DESCR("Max number of message queue identifiers"),
1262 		sysctl_ipc_msgmni, 0, &msginfo.msgmni, 0,
1263 		CTL_CREATE, CTL_EOL);
1264 	sysctl_createv(clog, 0, &node, NULL,
1265 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1266 		CTLTYPE_INT, "msgseg",
1267 		SYSCTL_DESCR("Max number of number of message segments"),
1268 		sysctl_ipc_msgseg, 0, &msginfo.msgseg, 0,
1269 		CTL_CREATE, CTL_EOL);
1270 }
1271