xref: /netbsd-src/sys/kern/sysv_msg.c (revision cd22f25e6f6d1cc1f197fe8c5468a80f51d1c4e1)
1 /*	$NetBSD: sysv_msg.c,v 1.56 2008/04/28 20:24:05 martin Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2006, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Implementation of SVID messages
35  *
36  * Author: Daniel Boulet
37  *
38  * Copyright 1993 Daniel Boulet and RTMX Inc.
39  *
40  * This system call was implemented by Daniel Boulet under contract from RTMX.
41  *
42  * Redistribution and use in source forms, with and without modification,
43  * are permitted provided that this entire comment appears intact.
44  *
45  * Redistribution in binary form may occur without any restrictions.
46  * Obviously, it would be nice if you gave credit where credit is due
47  * but requiring it would be too onerous.
48  *
49  * This software is provided ``AS IS'' without any warranties of any kind.
50  */
51 
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: sysv_msg.c,v 1.56 2008/04/28 20:24:05 martin Exp $");
54 
55 #define SYSVMSG
56 
57 #include <sys/param.h>
58 #include <sys/kernel.h>
59 #include <sys/msg.h>
60 #include <sys/sysctl.h>
61 #include <sys/mount.h>		/* XXX for <sys/syscallargs.h> */
62 #include <sys/syscallargs.h>
63 #include <sys/kauth.h>
64 
65 #define MSG_DEBUG
66 #undef MSG_DEBUG_OK
67 
68 #ifdef MSG_DEBUG_OK
69 #define MSG_PRINTF(a)	printf a
70 #else
71 #define MSG_PRINTF(a)
72 #endif
73 
74 static int	nfree_msgmaps;		/* # of free map entries */
75 static short	free_msgmaps;	/* head of linked list of free map entries */
76 static struct	__msg *free_msghdrs;	/* list of free msg headers */
77 static char	*msgpool;		/* MSGMAX byte long msg buffer pool */
78 static struct	msgmap *msgmaps;	/* MSGSEG msgmap structures */
79 static struct __msg *msghdrs;		/* MSGTQL msg headers */
80 
81 kmsq_t	*msqs;				/* MSGMNI msqid_ds struct's */
82 kmutex_t msgmutex;			/* subsystem lock */
83 
84 static u_int	msg_waiters = 0;	/* total number of msgrcv waiters */
85 static bool	msg_realloc_state;
86 static kcondvar_t msg_realloc_cv;
87 
88 static void msg_freehdr(struct __msg *);
89 
90 void
91 msginit(void)
92 {
93 	int i, sz;
94 	vaddr_t v;
95 
96 	/*
97 	 * msginfo.msgssz should be a power of two for efficiency reasons.
98 	 * It is also pretty silly if msginfo.msgssz is less than 8
99 	 * or greater than about 256 so ...
100 	 */
101 
102 	i = 8;
103 	while (i < 1024 && i != msginfo.msgssz)
104 		i <<= 1;
105 	if (i != msginfo.msgssz) {
106 		panic("msginfo.msgssz = %d, not a small power of 2",
107 		    msginfo.msgssz);
108 	}
109 
110 	if (msginfo.msgseg > 32767) {
111 		panic("msginfo.msgseg = %d > 32767", msginfo.msgseg);
112 	}
113 
114 	/* Allocate the wired memory for our structures */
115 	sz = ALIGN(msginfo.msgmax) +
116 	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
117 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
118 	    ALIGN(msginfo.msgmni * sizeof(kmsq_t));
119 	v = uvm_km_alloc(kernel_map, round_page(sz), 0,
120 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
121 	if (v == 0)
122 		panic("sysv_msg: cannot allocate memory");
123 	msgpool = (void *)v;
124 	msgmaps = (void *)(ALIGN(msgpool) + msginfo.msgmax);
125 	msghdrs = (void *)(ALIGN(msgmaps) +
126 	    msginfo.msgseg * sizeof(struct msgmap));
127 	msqs = (void *)(ALIGN(msghdrs) +
128 	    msginfo.msgtql * sizeof(struct __msg));
129 
130 	for (i = 0; i < (msginfo.msgseg - 1); i++)
131 		msgmaps[i].next = i + 1;
132 	msgmaps[msginfo.msgseg - 1].next = -1;
133 
134 	free_msgmaps = 0;
135 	nfree_msgmaps = msginfo.msgseg;
136 
137 	for (i = 0; i < (msginfo.msgtql - 1); i++) {
138 		msghdrs[i].msg_type = 0;
139 		msghdrs[i].msg_next = &msghdrs[i + 1];
140 	}
141 	i = msginfo.msgtql - 1;
142 	msghdrs[i].msg_type = 0;
143 	msghdrs[i].msg_next = NULL;
144 	free_msghdrs = &msghdrs[0];
145 
146 	for (i = 0; i < msginfo.msgmni; i++) {
147 		cv_init(&msqs[i].msq_cv, "msgwait");
148 		/* Implies entry is available */
149 		msqs[i].msq_u.msg_qbytes = 0;
150 		/* Reset to a known value */
151 		msqs[i].msq_u.msg_perm._seq = 0;
152 	}
153 
154 	mutex_init(&msgmutex, MUTEX_DEFAULT, IPL_NONE);
155 	cv_init(&msg_realloc_cv, "msgrealc");
156 	msg_realloc_state = false;
157 }
158 
159 static int
160 msgrealloc(int newmsgmni, int newmsgseg)
161 {
162 	struct msgmap *new_msgmaps;
163 	struct __msg *new_msghdrs, *new_free_msghdrs;
164 	char *old_msgpool, *new_msgpool;
165 	kmsq_t *new_msqs;
166 	vaddr_t v;
167 	int i, sz, msqid, newmsgmax, new_nfree_msgmaps;
168 	short new_free_msgmaps;
169 
170 	if (newmsgmni < 1 || newmsgseg < 1)
171 		return EINVAL;
172 
173 	/* Allocate the wired memory for our structures */
174 	newmsgmax = msginfo.msgssz * newmsgseg;
175 	sz = ALIGN(newmsgmax) +
176 	    ALIGN(newmsgseg * sizeof(struct msgmap)) +
177 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
178 	    ALIGN(newmsgmni * sizeof(kmsq_t));
179 	v = uvm_km_alloc(kernel_map, round_page(sz), 0,
180 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
181 	if (v == 0)
182 		return ENOMEM;
183 
184 	mutex_enter(&msgmutex);
185 	if (msg_realloc_state) {
186 		mutex_exit(&msgmutex);
187 		uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
188 		return EBUSY;
189 	}
190 	msg_realloc_state = true;
191 	if (msg_waiters) {
192 		/*
193 		 * Mark reallocation state, wake-up all waiters,
194 		 * and wait while they will all exit.
195 		 */
196 		for (i = 0; i < msginfo.msgmni; i++)
197 			cv_broadcast(&msqs[i].msq_cv);
198 		while (msg_waiters)
199 			cv_wait(&msg_realloc_cv, &msgmutex);
200 	}
201 	old_msgpool = msgpool;
202 
203 	/* We cannot reallocate less memory than we use */
204 	i = 0;
205 	for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
206 		struct msqid_ds *mptr;
207 		kmsq_t *msq;
208 
209 		msq = &msqs[msqid];
210 		mptr = &msq->msq_u;
211 		if (mptr->msg_qbytes || (mptr->msg_perm.mode & MSG_LOCKED))
212 			i = msqid;
213 	}
214 	if (i >= newmsgmni || (msginfo.msgseg - nfree_msgmaps) > newmsgseg) {
215 		mutex_exit(&msgmutex);
216 		uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
217 		return EBUSY;
218 	}
219 
220 	new_msgpool = (void *)v;
221 	new_msgmaps = (void *)(ALIGN(new_msgpool) + newmsgmax);
222 	new_msghdrs = (void *)(ALIGN(new_msgmaps) +
223 	    newmsgseg * sizeof(struct msgmap));
224 	new_msqs = (void *)(ALIGN(new_msghdrs) +
225 	    msginfo.msgtql * sizeof(struct __msg));
226 
227 	/* Initialize the structures */
228 	for (i = 0; i < (newmsgseg - 1); i++)
229 		new_msgmaps[i].next = i + 1;
230 	new_msgmaps[newmsgseg - 1].next = -1;
231 	new_free_msgmaps = 0;
232 	new_nfree_msgmaps = newmsgseg;
233 
234 	for (i = 0; i < (msginfo.msgtql - 1); i++) {
235 		new_msghdrs[i].msg_type = 0;
236 		new_msghdrs[i].msg_next = &new_msghdrs[i + 1];
237 	}
238 	i = msginfo.msgtql - 1;
239 	new_msghdrs[i].msg_type = 0;
240 	new_msghdrs[i].msg_next = NULL;
241 	new_free_msghdrs = &new_msghdrs[0];
242 
243 	for (i = 0; i < newmsgmni; i++) {
244 		new_msqs[i].msq_u.msg_qbytes = 0;
245 		new_msqs[i].msq_u.msg_perm._seq = 0;
246 		cv_init(&new_msqs[i].msq_cv, "msgwait");
247 	}
248 
249 	/*
250 	 * Copy all message queue identifiers, mesage headers and buffer
251 	 * pools to the new memory location.
252 	 */
253 	for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
254 		struct __msg *nmsghdr, *msghdr, *pmsghdr;
255 		struct msqid_ds *nmptr, *mptr;
256 		kmsq_t *nmsq, *msq;
257 
258 		msq = &msqs[msqid];
259 		mptr = &msq->msq_u;
260 
261 		if (mptr->msg_qbytes == 0 &&
262 		    (mptr->msg_perm.mode & MSG_LOCKED) == 0)
263 			continue;
264 
265 		nmsq = &new_msqs[msqid];
266 		nmptr = &nmsq->msq_u;
267 		memcpy(nmptr, mptr, sizeof(struct msqid_ds));
268 
269 		/*
270 		 * Go through the message headers, and and copy each
271 		 * one by taking the new ones, and thus defragmenting.
272 		 */
273 		nmsghdr = pmsghdr = NULL;
274 		msghdr = mptr->_msg_first;
275 		while (msghdr) {
276 			short nnext = 0, next;
277 			u_short msgsz, segcnt;
278 
279 			/* Take an entry from the new list of free msghdrs */
280 			nmsghdr = new_free_msghdrs;
281 			KASSERT(nmsghdr != NULL);
282 			new_free_msghdrs = nmsghdr->msg_next;
283 
284 			nmsghdr->msg_next = NULL;
285 			if (pmsghdr) {
286 				pmsghdr->msg_next = nmsghdr;
287 			} else {
288 				nmptr->_msg_first = nmsghdr;
289 				pmsghdr = nmsghdr;
290 			}
291 			nmsghdr->msg_ts = msghdr->msg_ts;
292 			nmsghdr->msg_spot = -1;
293 
294 			/* Compute the amount of segments and reserve them */
295 			msgsz = msghdr->msg_ts;
296 			segcnt = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
297 			if (segcnt == 0)
298 				continue;
299 			while (segcnt--) {
300 				nnext = new_free_msgmaps;
301 				new_free_msgmaps = new_msgmaps[nnext].next;
302 				new_nfree_msgmaps--;
303 				new_msgmaps[nnext].next = nmsghdr->msg_spot;
304 				nmsghdr->msg_spot = nnext;
305 			}
306 
307 			/* Copy all segments */
308 			KASSERT(nnext == nmsghdr->msg_spot);
309 			next = msghdr->msg_spot;
310 			while (msgsz > 0) {
311 				size_t tlen;
312 
313 				if (msgsz >= msginfo.msgssz) {
314 					tlen = msginfo.msgssz;
315 					msgsz -= msginfo.msgssz;
316 				} else {
317 					tlen = msgsz;
318 					msgsz = 0;
319 				}
320 
321 				/* Copy the message buffer */
322 				memcpy(&new_msgpool[nnext * msginfo.msgssz],
323 				    &msgpool[next * msginfo.msgssz], tlen);
324 
325 				/* Next entry of the map */
326 				nnext = msgmaps[nnext].next;
327 				next = msgmaps[next].next;
328 			}
329 
330 			/* Next message header */
331 			msghdr = msghdr->msg_next;
332 		}
333 		nmptr->_msg_last = nmsghdr;
334 	}
335 	KASSERT((msginfo.msgseg - nfree_msgmaps) ==
336 	    (newmsgseg - new_nfree_msgmaps));
337 
338 	sz = ALIGN(msginfo.msgmax) +
339 	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
340 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
341 	    ALIGN(msginfo.msgmni * sizeof(kmsq_t));
342 
343 	for (i = 0; i < msginfo.msgmni; i++)
344 		cv_destroy(&msqs[i].msq_cv);
345 
346 	/* Set the pointers and update the new values */
347 	msgpool = new_msgpool;
348 	msgmaps = new_msgmaps;
349 	msghdrs = new_msghdrs;
350 	msqs = new_msqs;
351 
352 	free_msghdrs = new_free_msghdrs;
353 	free_msgmaps = new_free_msgmaps;
354 	nfree_msgmaps = new_nfree_msgmaps;
355 	msginfo.msgmni = newmsgmni;
356 	msginfo.msgseg = newmsgseg;
357 	msginfo.msgmax = newmsgmax;
358 
359 	/* Reallocation completed - notify all waiters, if any */
360 	msg_realloc_state = false;
361 	cv_broadcast(&msg_realloc_cv);
362 	mutex_exit(&msgmutex);
363 
364 	uvm_km_free(kernel_map, (vaddr_t)old_msgpool, sz, UVM_KMF_WIRED);
365 	return 0;
366 }
367 
368 static void
369 msg_freehdr(struct __msg *msghdr)
370 {
371 
372 	KASSERT(mutex_owned(&msgmutex));
373 
374 	while (msghdr->msg_ts > 0) {
375 		short next;
376 		KASSERT(msghdr->msg_spot >= 0);
377 		KASSERT(msghdr->msg_spot < msginfo.msgseg);
378 
379 		next = msgmaps[msghdr->msg_spot].next;
380 		msgmaps[msghdr->msg_spot].next = free_msgmaps;
381 		free_msgmaps = msghdr->msg_spot;
382 		nfree_msgmaps++;
383 		msghdr->msg_spot = next;
384 		if (msghdr->msg_ts >= msginfo.msgssz)
385 			msghdr->msg_ts -= msginfo.msgssz;
386 		else
387 			msghdr->msg_ts = 0;
388 	}
389 	KASSERT(msghdr->msg_spot == -1);
390 	msghdr->msg_next = free_msghdrs;
391 	free_msghdrs = msghdr;
392 }
393 
394 int
395 sys___msgctl13(struct lwp *l, const struct sys___msgctl13_args *uap, register_t *retval)
396 {
397 	/* {
398 		syscallarg(int) msqid;
399 		syscallarg(int) cmd;
400 		syscallarg(struct msqid_ds *) buf;
401 	} */
402 	struct msqid_ds msqbuf;
403 	int cmd, error;
404 
405 	cmd = SCARG(uap, cmd);
406 
407 	if (cmd == IPC_SET) {
408 		error = copyin(SCARG(uap, buf), &msqbuf, sizeof(msqbuf));
409 		if (error)
410 			return (error);
411 	}
412 
413 	error = msgctl1(l, SCARG(uap, msqid), cmd,
414 	    (cmd == IPC_SET || cmd == IPC_STAT) ? &msqbuf : NULL);
415 
416 	if (error == 0 && cmd == IPC_STAT)
417 		error = copyout(&msqbuf, SCARG(uap, buf), sizeof(msqbuf));
418 
419 	return (error);
420 }
421 
422 int
423 msgctl1(struct lwp *l, int msqid, int cmd, struct msqid_ds *msqbuf)
424 {
425 	kauth_cred_t cred = l->l_cred;
426 	struct msqid_ds *msqptr;
427 	kmsq_t *msq;
428 	int error = 0, ix;
429 
430 	MSG_PRINTF(("call to msgctl1(%d, %d)\n", msqid, cmd));
431 
432 	ix = IPCID_TO_IX(msqid);
433 
434 	mutex_enter(&msgmutex);
435 
436 	if (ix < 0 || ix >= msginfo.msgmni) {
437 		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", ix,
438 		    msginfo.msgmni));
439 		error = EINVAL;
440 		goto unlock;
441 	}
442 
443 	msq = &msqs[ix];
444 	msqptr = &msq->msq_u;
445 
446 	if (msqptr->msg_qbytes == 0) {
447 		MSG_PRINTF(("no such msqid\n"));
448 		error = EINVAL;
449 		goto unlock;
450 	}
451 	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqid)) {
452 		MSG_PRINTF(("wrong sequence number\n"));
453 		error = EINVAL;
454 		goto unlock;
455 	}
456 
457 	switch (cmd) {
458 	case IPC_RMID:
459 	{
460 		struct __msg *msghdr;
461 		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)) != 0)
462 			break;
463 		/* Free the message headers */
464 		msghdr = msqptr->_msg_first;
465 		while (msghdr != NULL) {
466 			struct __msg *msghdr_tmp;
467 
468 			/* Free the segments of each message */
469 			msqptr->_msg_cbytes -= msghdr->msg_ts;
470 			msqptr->msg_qnum--;
471 			msghdr_tmp = msghdr;
472 			msghdr = msghdr->msg_next;
473 			msg_freehdr(msghdr_tmp);
474 		}
475 		KASSERT(msqptr->_msg_cbytes == 0);
476 		KASSERT(msqptr->msg_qnum == 0);
477 
478 		/* Mark it as free */
479 		msqptr->msg_qbytes = 0;
480 		cv_broadcast(&msq->msq_cv);
481 	}
482 		break;
483 
484 	case IPC_SET:
485 		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)))
486 			break;
487 		if (msqbuf->msg_qbytes > msqptr->msg_qbytes &&
488 		    kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER,
489 		    NULL) != 0) {
490 			error = EPERM;
491 			break;
492 		}
493 		if (msqbuf->msg_qbytes > msginfo.msgmnb) {
494 			MSG_PRINTF(("can't increase msg_qbytes beyond %d "
495 			    "(truncating)\n", msginfo.msgmnb));
496 			/* silently restrict qbytes to system limit */
497 			msqbuf->msg_qbytes = msginfo.msgmnb;
498 		}
499 		if (msqbuf->msg_qbytes == 0) {
500 			MSG_PRINTF(("can't reduce msg_qbytes to 0\n"));
501 			error = EINVAL;		/* XXX non-standard errno! */
502 			break;
503 		}
504 		msqptr->msg_perm.uid = msqbuf->msg_perm.uid;
505 		msqptr->msg_perm.gid = msqbuf->msg_perm.gid;
506 		msqptr->msg_perm.mode = (msqptr->msg_perm.mode & ~0777) |
507 		    (msqbuf->msg_perm.mode & 0777);
508 		msqptr->msg_qbytes = msqbuf->msg_qbytes;
509 		msqptr->msg_ctime = time_second;
510 		break;
511 
512 	case IPC_STAT:
513 		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
514 			MSG_PRINTF(("requester doesn't have read access\n"));
515 			break;
516 		}
517 		memcpy(msqbuf, msqptr, sizeof(struct msqid_ds));
518 		break;
519 
520 	default:
521 		MSG_PRINTF(("invalid command %d\n", cmd));
522 		error = EINVAL;
523 		break;
524 	}
525 
526 unlock:
527 	mutex_exit(&msgmutex);
528 	return (error);
529 }
530 
531 int
532 sys_msgget(struct lwp *l, const struct sys_msgget_args *uap, register_t *retval)
533 {
534 	/* {
535 		syscallarg(key_t) key;
536 		syscallarg(int) msgflg;
537 	} */
538 	int msqid, error = 0;
539 	int key = SCARG(uap, key);
540 	int msgflg = SCARG(uap, msgflg);
541 	kauth_cred_t cred = l->l_cred;
542 	struct msqid_ds *msqptr = NULL;
543 	kmsq_t *msq;
544 
545 	mutex_enter(&msgmutex);
546 
547 	MSG_PRINTF(("msgget(0x%x, 0%o)\n", key, msgflg));
548 
549 	if (key != IPC_PRIVATE) {
550 		for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
551 			msq = &msqs[msqid];
552 			msqptr = &msq->msq_u;
553 			if (msqptr->msg_qbytes != 0 &&
554 			    msqptr->msg_perm._key == key)
555 				break;
556 		}
557 		if (msqid < msginfo.msgmni) {
558 			MSG_PRINTF(("found public key\n"));
559 			if ((msgflg & IPC_CREAT) && (msgflg & IPC_EXCL)) {
560 				MSG_PRINTF(("not exclusive\n"));
561 				error = EEXIST;
562 				goto unlock;
563 			}
564 			if ((error = ipcperm(cred, &msqptr->msg_perm,
565 			    msgflg & 0700 ))) {
566 				MSG_PRINTF(("requester doesn't have 0%o access\n",
567 				    msgflg & 0700));
568 				goto unlock;
569 			}
570 			goto found;
571 		}
572 	}
573 
574 	MSG_PRINTF(("need to allocate the msqid_ds\n"));
575 	if (key == IPC_PRIVATE || (msgflg & IPC_CREAT)) {
576 		for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
577 			/*
578 			 * Look for an unallocated and unlocked msqid_ds.
579 			 * msqid_ds's can be locked by msgsnd or msgrcv while
580 			 * they are copying the message in/out.  We can't
581 			 * re-use the entry until they release it.
582 			 */
583 			msq = &msqs[msqid];
584 			msqptr = &msq->msq_u;
585 			if (msqptr->msg_qbytes == 0 &&
586 			    (msqptr->msg_perm.mode & MSG_LOCKED) == 0)
587 				break;
588 		}
589 		if (msqid == msginfo.msgmni) {
590 			MSG_PRINTF(("no more msqid_ds's available\n"));
591 			error = ENOSPC;
592 			goto unlock;
593 		}
594 		MSG_PRINTF(("msqid %d is available\n", msqid));
595 		msqptr->msg_perm._key = key;
596 		msqptr->msg_perm.cuid = kauth_cred_geteuid(cred);
597 		msqptr->msg_perm.uid = kauth_cred_geteuid(cred);
598 		msqptr->msg_perm.cgid = kauth_cred_getegid(cred);
599 		msqptr->msg_perm.gid = kauth_cred_getegid(cred);
600 		msqptr->msg_perm.mode = (msgflg & 0777);
601 		/* Make sure that the returned msqid is unique */
602 		msqptr->msg_perm._seq++;
603 		msqptr->_msg_first = NULL;
604 		msqptr->_msg_last = NULL;
605 		msqptr->_msg_cbytes = 0;
606 		msqptr->msg_qnum = 0;
607 		msqptr->msg_qbytes = msginfo.msgmnb;
608 		msqptr->msg_lspid = 0;
609 		msqptr->msg_lrpid = 0;
610 		msqptr->msg_stime = 0;
611 		msqptr->msg_rtime = 0;
612 		msqptr->msg_ctime = time_second;
613 	} else {
614 		MSG_PRINTF(("didn't find it and wasn't asked to create it\n"));
615 		error = ENOENT;
616 		goto unlock;
617 	}
618 
619 found:
620 	/* Construct the unique msqid */
621 	*retval = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm);
622 
623 unlock:
624 	mutex_exit(&msgmutex);
625 	return (error);
626 }
627 
628 int
629 sys_msgsnd(struct lwp *l, const struct sys_msgsnd_args *uap, register_t *retval)
630 {
631 	/* {
632 		syscallarg(int) msqid;
633 		syscallarg(const void *) msgp;
634 		syscallarg(size_t) msgsz;
635 		syscallarg(int) msgflg;
636 	} */
637 
638 	return msgsnd1(l, SCARG(uap, msqid), SCARG(uap, msgp),
639 	    SCARG(uap, msgsz), SCARG(uap, msgflg), sizeof(long), copyin);
640 }
641 
642 int
643 msgsnd1(struct lwp *l, int msqidr, const char *user_msgp, size_t msgsz,
644     int msgflg, size_t typesz, copyin_t fetch_type)
645 {
646 	int segs_needed, error = 0, msqid;
647 	kauth_cred_t cred = l->l_cred;
648 	struct msqid_ds *msqptr;
649 	struct __msg *msghdr;
650 	kmsq_t *msq;
651 	short next;
652 
653 	MSG_PRINTF(("call to msgsnd(%d, %p, %lld, %d)\n", msqid, user_msgp,
654 	    (long long)msgsz, msgflg));
655 restart:
656 	msqid = IPCID_TO_IX(msqidr);
657 
658 	mutex_enter(&msgmutex);
659 	/* In case of reallocation, we will wait for completion */
660 	while (__predict_false(msg_realloc_state))
661 		cv_wait(&msg_realloc_cv, &msgmutex);
662 
663 	if (msqid < 0 || msqid >= msginfo.msgmni) {
664 		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
665 		    msginfo.msgmni));
666 		error = EINVAL;
667 		goto unlock;
668 	}
669 
670 	msq = &msqs[msqid];
671 	msqptr = &msq->msq_u;
672 
673 	if (msqptr->msg_qbytes == 0) {
674 		MSG_PRINTF(("no such message queue id\n"));
675 		error = EINVAL;
676 		goto unlock;
677 	}
678 	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
679 		MSG_PRINTF(("wrong sequence number\n"));
680 		error = EINVAL;
681 		goto unlock;
682 	}
683 
684 	if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_W))) {
685 		MSG_PRINTF(("requester doesn't have write access\n"));
686 		goto unlock;
687 	}
688 
689 	segs_needed = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
690 	MSG_PRINTF(("msgsz=%lld, msgssz=%d, segs_needed=%d\n",
691 	    (long long)msgsz, msginfo.msgssz, segs_needed));
692 	for (;;) {
693 		int need_more_resources = 0;
694 
695 		/*
696 		 * check msgsz [cannot be negative since it is unsigned]
697 		 * (inside this loop in case msg_qbytes changes while we sleep)
698 		 */
699 
700 		if (msgsz > msqptr->msg_qbytes) {
701 			MSG_PRINTF(("msgsz > msqptr->msg_qbytes\n"));
702 			error = EINVAL;
703 			goto unlock;
704 		}
705 
706 		if (msqptr->msg_perm.mode & MSG_LOCKED) {
707 			MSG_PRINTF(("msqid is locked\n"));
708 			need_more_resources = 1;
709 		}
710 		if (msgsz + msqptr->_msg_cbytes > msqptr->msg_qbytes) {
711 			MSG_PRINTF(("msgsz + msg_cbytes > msg_qbytes\n"));
712 			need_more_resources = 1;
713 		}
714 		if (segs_needed > nfree_msgmaps) {
715 			MSG_PRINTF(("segs_needed > nfree_msgmaps\n"));
716 			need_more_resources = 1;
717 		}
718 		if (free_msghdrs == NULL) {
719 			MSG_PRINTF(("no more msghdrs\n"));
720 			need_more_resources = 1;
721 		}
722 
723 		if (need_more_resources) {
724 			int we_own_it;
725 
726 			if ((msgflg & IPC_NOWAIT) != 0) {
727 				MSG_PRINTF(("need more resources but caller "
728 				    "doesn't want to wait\n"));
729 				error = EAGAIN;
730 				goto unlock;
731 			}
732 
733 			if ((msqptr->msg_perm.mode & MSG_LOCKED) != 0) {
734 				MSG_PRINTF(("we don't own the msqid_ds\n"));
735 				we_own_it = 0;
736 			} else {
737 				/* Force later arrivals to wait for our
738 				   request */
739 				MSG_PRINTF(("we own the msqid_ds\n"));
740 				msqptr->msg_perm.mode |= MSG_LOCKED;
741 				we_own_it = 1;
742 			}
743 
744 			msg_waiters++;
745 			MSG_PRINTF(("goodnight\n"));
746 			error = cv_wait_sig(&msq->msq_cv, &msgmutex);
747 			MSG_PRINTF(("good morning, error=%d\n", error));
748 			msg_waiters--;
749 
750 			if (we_own_it)
751 				msqptr->msg_perm.mode &= ~MSG_LOCKED;
752 
753 			/*
754 			 * In case of such state, notify reallocator and
755 			 * restart the call.
756 			 */
757 			if (msg_realloc_state) {
758 				cv_broadcast(&msg_realloc_cv);
759 				mutex_exit(&msgmutex);
760 				goto restart;
761 			}
762 
763 			if (error != 0) {
764 				MSG_PRINTF(("msgsnd: interrupted system "
765 				    "call\n"));
766 				error = EINTR;
767 				goto unlock;
768 			}
769 
770 			/*
771 			 * Make sure that the msq queue still exists
772 			 */
773 
774 			if (msqptr->msg_qbytes == 0) {
775 				MSG_PRINTF(("msqid deleted\n"));
776 				error = EIDRM;
777 				goto unlock;
778 			}
779 		} else {
780 			MSG_PRINTF(("got all the resources that we need\n"));
781 			break;
782 		}
783 	}
784 
785 	/*
786 	 * We have the resources that we need.
787 	 * Make sure!
788 	 */
789 
790 	KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
791 	KASSERT(segs_needed <= nfree_msgmaps);
792 	KASSERT(msgsz + msqptr->_msg_cbytes <= msqptr->msg_qbytes);
793 	KASSERT(free_msghdrs != NULL);
794 
795 	/*
796 	 * Re-lock the msqid_ds in case we page-fault when copying in the
797 	 * message
798 	 */
799 
800 	KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
801 	msqptr->msg_perm.mode |= MSG_LOCKED;
802 
803 	/*
804 	 * Allocate a message header
805 	 */
806 
807 	msghdr = free_msghdrs;
808 	free_msghdrs = msghdr->msg_next;
809 	msghdr->msg_spot = -1;
810 	msghdr->msg_ts = msgsz;
811 
812 	/*
813 	 * Allocate space for the message
814 	 */
815 
816 	while (segs_needed > 0) {
817 		KASSERT(nfree_msgmaps > 0);
818 		KASSERT(free_msgmaps != -1);
819 		KASSERT(free_msgmaps < msginfo.msgseg);
820 
821 		next = free_msgmaps;
822 		MSG_PRINTF(("allocating segment %d to message\n", next));
823 		free_msgmaps = msgmaps[next].next;
824 		nfree_msgmaps--;
825 		msgmaps[next].next = msghdr->msg_spot;
826 		msghdr->msg_spot = next;
827 		segs_needed--;
828 	}
829 
830 	/*
831 	 * Copy in the message type
832 	 */
833 	mutex_exit(&msgmutex);
834 	error = (*fetch_type)(user_msgp, &msghdr->msg_type, typesz);
835 	mutex_enter(&msgmutex);
836 	if (error != 0) {
837 		MSG_PRINTF(("error %d copying the message type\n", error));
838 		msg_freehdr(msghdr);
839 		msqptr->msg_perm.mode &= ~MSG_LOCKED;
840 		cv_broadcast(&msq->msq_cv);
841 		goto unlock;
842 	}
843 	user_msgp += typesz;
844 
845 	/*
846 	 * Validate the message type
847 	 */
848 
849 	if (msghdr->msg_type < 1) {
850 		msg_freehdr(msghdr);
851 		msqptr->msg_perm.mode &= ~MSG_LOCKED;
852 		cv_broadcast(&msq->msq_cv);
853 		MSG_PRINTF(("mtype (%ld) < 1\n", msghdr->msg_type));
854 		goto unlock;
855 	}
856 
857 	/*
858 	 * Copy in the message body
859 	 */
860 
861 	next = msghdr->msg_spot;
862 	while (msgsz > 0) {
863 		size_t tlen;
864 		KASSERT(next > -1);
865 		KASSERT(next < msginfo.msgseg);
866 
867 		if (msgsz > msginfo.msgssz)
868 			tlen = msginfo.msgssz;
869 		else
870 			tlen = msgsz;
871 		mutex_exit(&msgmutex);
872 		error = copyin(user_msgp, &msgpool[next * msginfo.msgssz], tlen);
873 		mutex_enter(&msgmutex);
874 		if (error != 0) {
875 			MSG_PRINTF(("error %d copying in message segment\n",
876 			    error));
877 			msg_freehdr(msghdr);
878 			msqptr->msg_perm.mode &= ~MSG_LOCKED;
879 			cv_broadcast(&msq->msq_cv);
880 			goto unlock;
881 		}
882 		msgsz -= tlen;
883 		user_msgp += tlen;
884 		next = msgmaps[next].next;
885 	}
886 	KASSERT(next == -1);
887 
888 	/*
889 	 * We've got the message.  Unlock the msqid_ds.
890 	 */
891 
892 	msqptr->msg_perm.mode &= ~MSG_LOCKED;
893 
894 	/*
895 	 * Make sure that the msqid_ds is still allocated.
896 	 */
897 
898 	if (msqptr->msg_qbytes == 0) {
899 		msg_freehdr(msghdr);
900 		cv_broadcast(&msq->msq_cv);
901 		error = EIDRM;
902 		goto unlock;
903 	}
904 
905 	/*
906 	 * Put the message into the queue
907 	 */
908 
909 	if (msqptr->_msg_first == NULL) {
910 		msqptr->_msg_first = msghdr;
911 		msqptr->_msg_last = msghdr;
912 	} else {
913 		msqptr->_msg_last->msg_next = msghdr;
914 		msqptr->_msg_last = msghdr;
915 	}
916 	msqptr->_msg_last->msg_next = NULL;
917 
918 	msqptr->_msg_cbytes += msghdr->msg_ts;
919 	msqptr->msg_qnum++;
920 	msqptr->msg_lspid = l->l_proc->p_pid;
921 	msqptr->msg_stime = time_second;
922 
923 	cv_broadcast(&msq->msq_cv);
924 
925 unlock:
926 	mutex_exit(&msgmutex);
927 	return error;
928 }
929 
930 int
931 sys_msgrcv(struct lwp *l, const struct sys_msgrcv_args *uap, register_t *retval)
932 {
933 	/* {
934 		syscallarg(int) msqid;
935 		syscallarg(void *) msgp;
936 		syscallarg(size_t) msgsz;
937 		syscallarg(long) msgtyp;
938 		syscallarg(int) msgflg;
939 	} */
940 
941 	return msgrcv1(l, SCARG(uap, msqid), SCARG(uap, msgp),
942 	    SCARG(uap, msgsz), SCARG(uap, msgtyp), SCARG(uap, msgflg),
943 	    sizeof(long), copyout, retval);
944 }
945 
946 int
947 msgrcv1(struct lwp *l, int msqidr, char *user_msgp, size_t msgsz, long msgtyp,
948     int msgflg, size_t typesz, copyout_t put_type, register_t *retval)
949 {
950 	size_t len;
951 	kauth_cred_t cred = l->l_cred;
952 	struct msqid_ds *msqptr;
953 	struct __msg *msghdr;
954 	int error = 0, msqid;
955 	kmsq_t *msq;
956 	short next;
957 
958 	MSG_PRINTF(("call to msgrcv(%d, %p, %lld, %ld, %d)\n", msqid,
959 	    user_msgp, (long long)msgsz, msgtyp, msgflg));
960 restart:
961 	msqid = IPCID_TO_IX(msqidr);
962 
963 	mutex_enter(&msgmutex);
964 	/* In case of reallocation, we will wait for completion */
965 	while (__predict_false(msg_realloc_state))
966 		cv_wait(&msg_realloc_cv, &msgmutex);
967 
968 	if (msqid < 0 || msqid >= msginfo.msgmni) {
969 		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
970 		    msginfo.msgmni));
971 		error = EINVAL;
972 		goto unlock;
973 	}
974 
975 	msq = &msqs[msqid];
976 	msqptr = &msq->msq_u;
977 
978 	if (msqptr->msg_qbytes == 0) {
979 		MSG_PRINTF(("no such message queue id\n"));
980 		error = EINVAL;
981 		goto unlock;
982 	}
983 	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
984 		MSG_PRINTF(("wrong sequence number\n"));
985 		error = EINVAL;
986 		goto unlock;
987 	}
988 
989 	if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
990 		MSG_PRINTF(("requester doesn't have read access\n"));
991 		goto unlock;
992 	}
993 
994 	msghdr = NULL;
995 	while (msghdr == NULL) {
996 		if (msgtyp == 0) {
997 			msghdr = msqptr->_msg_first;
998 			if (msghdr != NULL) {
999 				if (msgsz < msghdr->msg_ts &&
1000 				    (msgflg & MSG_NOERROR) == 0) {
1001 					MSG_PRINTF(("first msg on the queue "
1002 					    "is too big (want %lld, got %d)\n",
1003 					    (long long)msgsz, msghdr->msg_ts));
1004 					error = E2BIG;
1005 					goto unlock;
1006 				}
1007 				if (msqptr->_msg_first == msqptr->_msg_last) {
1008 					msqptr->_msg_first = NULL;
1009 					msqptr->_msg_last = NULL;
1010 				} else {
1011 					msqptr->_msg_first = msghdr->msg_next;
1012 					KASSERT(msqptr->_msg_first != NULL);
1013 				}
1014 			}
1015 		} else {
1016 			struct __msg *previous;
1017 			struct __msg **prev;
1018 
1019 			for (previous = NULL, prev = &msqptr->_msg_first;
1020 			     (msghdr = *prev) != NULL;
1021 			     previous = msghdr, prev = &msghdr->msg_next) {
1022 				/*
1023 				 * Is this message's type an exact match or is
1024 				 * this message's type less than or equal to
1025 				 * the absolute value of a negative msgtyp?
1026 				 * Note that the second half of this test can
1027 				 * NEVER be true if msgtyp is positive since
1028 				 * msg_type is always positive!
1029 				 */
1030 
1031 				if (msgtyp != msghdr->msg_type &&
1032 				    msghdr->msg_type > -msgtyp)
1033 					continue;
1034 
1035 				MSG_PRINTF(("found message type %ld, requested %ld\n",
1036 				    msghdr->msg_type, msgtyp));
1037 				if (msgsz < msghdr->msg_ts &&
1038 				     (msgflg & MSG_NOERROR) == 0) {
1039 					MSG_PRINTF(("requested message on the queue "
1040 					    "is too big (want %lld, got %d)\n",
1041 					    (long long)msgsz, msghdr->msg_ts));
1042 					error = E2BIG;
1043 					goto unlock;
1044 				}
1045 				*prev = msghdr->msg_next;
1046 				if (msghdr != msqptr->_msg_last)
1047 					break;
1048 				if (previous == NULL) {
1049 					KASSERT(prev == &msqptr->_msg_first);
1050 					msqptr->_msg_first = NULL;
1051 					msqptr->_msg_last = NULL;
1052 				} else {
1053 					KASSERT(prev != &msqptr->_msg_first);
1054 					msqptr->_msg_last = previous;
1055 				}
1056 				break;
1057 			}
1058 		}
1059 
1060 		/*
1061 		 * We've either extracted the msghdr for the appropriate
1062 		 * message or there isn't one.
1063 		 * If there is one then bail out of this loop.
1064 		 */
1065 		if (msghdr != NULL)
1066 			break;
1067 
1068 		/*
1069 		 * Hmph!  No message found.  Does the user want to wait?
1070 		 */
1071 
1072 		if ((msgflg & IPC_NOWAIT) != 0) {
1073 			MSG_PRINTF(("no appropriate message found (msgtyp=%ld)\n",
1074 			    msgtyp));
1075 			error = ENOMSG;
1076 			goto unlock;
1077 		}
1078 
1079 		/*
1080 		 * Wait for something to happen
1081 		 */
1082 
1083 		msg_waiters++;
1084 		MSG_PRINTF(("msgrcv:  goodnight\n"));
1085 		error = cv_wait_sig(&msq->msq_cv, &msgmutex);
1086 		MSG_PRINTF(("msgrcv: good morning (error=%d)\n", error));
1087 		msg_waiters--;
1088 
1089 		/*
1090 		 * In case of such state, notify reallocator and
1091 		 * restart the call.
1092 		 */
1093 		if (msg_realloc_state) {
1094 			cv_broadcast(&msg_realloc_cv);
1095 			mutex_exit(&msgmutex);
1096 			goto restart;
1097 		}
1098 
1099 		if (error != 0) {
1100 			MSG_PRINTF(("msgsnd: interrupted system call\n"));
1101 			error = EINTR;
1102 			goto unlock;
1103 		}
1104 
1105 		/*
1106 		 * Make sure that the msq queue still exists
1107 		 */
1108 
1109 		if (msqptr->msg_qbytes == 0 ||
1110 		    msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
1111 			MSG_PRINTF(("msqid deleted\n"));
1112 			error = EIDRM;
1113 			goto unlock;
1114 		}
1115 	}
1116 
1117 	/*
1118 	 * Return the message to the user.
1119 	 *
1120 	 * First, do the bookkeeping (before we risk being interrupted).
1121 	 */
1122 
1123 	msqptr->_msg_cbytes -= msghdr->msg_ts;
1124 	msqptr->msg_qnum--;
1125 	msqptr->msg_lrpid = l->l_proc->p_pid;
1126 	msqptr->msg_rtime = time_second;
1127 
1128 	/*
1129 	 * Make msgsz the actual amount that we'll be returning.
1130 	 * Note that this effectively truncates the message if it is too long
1131 	 * (since msgsz is never increased).
1132 	 */
1133 
1134 	MSG_PRINTF(("found a message, msgsz=%lld, msg_ts=%d\n",
1135 	    (long long)msgsz, msghdr->msg_ts));
1136 	if (msgsz > msghdr->msg_ts)
1137 		msgsz = msghdr->msg_ts;
1138 
1139 	/*
1140 	 * Return the type to the user.
1141 	 */
1142 	mutex_exit(&msgmutex);
1143 	error = (*put_type)(&msghdr->msg_type, user_msgp, typesz);
1144 	mutex_enter(&msgmutex);
1145 	if (error != 0) {
1146 		MSG_PRINTF(("error (%d) copying out message type\n", error));
1147 		msg_freehdr(msghdr);
1148 		cv_broadcast(&msq->msq_cv);
1149 		goto unlock;
1150 	}
1151 	user_msgp += typesz;
1152 
1153 	/*
1154 	 * Return the segments to the user
1155 	 */
1156 
1157 	next = msghdr->msg_spot;
1158 	for (len = 0; len < msgsz; len += msginfo.msgssz) {
1159 		size_t tlen;
1160 		KASSERT(next > -1);
1161 		KASSERT(next < msginfo.msgseg);
1162 
1163 		if (msgsz - len > msginfo.msgssz)
1164 			tlen = msginfo.msgssz;
1165 		else
1166 			tlen = msgsz - len;
1167 		mutex_exit(&msgmutex);
1168 		error = (*put_type)(&msgpool[next * msginfo.msgssz],
1169 		    user_msgp, tlen);
1170 		mutex_enter(&msgmutex);
1171 		if (error != 0) {
1172 			MSG_PRINTF(("error (%d) copying out message segment\n",
1173 			    error));
1174 			msg_freehdr(msghdr);
1175 			cv_broadcast(&msq->msq_cv);
1176 			goto unlock;
1177 		}
1178 		user_msgp += tlen;
1179 		next = msgmaps[next].next;
1180 	}
1181 
1182 	/*
1183 	 * Done, return the actual number of bytes copied out.
1184 	 */
1185 
1186 	msg_freehdr(msghdr);
1187 	cv_broadcast(&msq->msq_cv);
1188 	*retval = msgsz;
1189 
1190 unlock:
1191 	mutex_exit(&msgmutex);
1192 	return error;
1193 }
1194 
1195 /*
1196  * Sysctl initialization and nodes.
1197  */
1198 
1199 static int
1200 sysctl_ipc_msgmni(SYSCTLFN_ARGS)
1201 {
1202 	int newsize, error;
1203 	struct sysctlnode node;
1204 	node = *rnode;
1205 	node.sysctl_data = &newsize;
1206 
1207 	newsize = msginfo.msgmni;
1208 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1209 	if (error || newp == NULL)
1210 		return error;
1211 
1212 	sysctl_unlock();
1213 	error = msgrealloc(newsize, msginfo.msgseg);
1214 	sysctl_relock();
1215 	return error;
1216 }
1217 
1218 static int
1219 sysctl_ipc_msgseg(SYSCTLFN_ARGS)
1220 {
1221 	int newsize, error;
1222 	struct sysctlnode node;
1223 	node = *rnode;
1224 	node.sysctl_data = &newsize;
1225 
1226 	newsize = msginfo.msgseg;
1227 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1228 	if (error || newp == NULL)
1229 		return error;
1230 
1231 	sysctl_unlock();
1232 	error = msgrealloc(msginfo.msgmni, newsize);
1233 	sysctl_relock();
1234 	return error;
1235 }
1236 
1237 SYSCTL_SETUP(sysctl_ipc_msg_setup, "sysctl kern.ipc subtree setup")
1238 {
1239 	const struct sysctlnode *node = NULL;
1240 
1241 	sysctl_createv(clog, 0, NULL, NULL,
1242 		CTLFLAG_PERMANENT,
1243 		CTLTYPE_NODE, "kern", NULL,
1244 		NULL, 0, NULL, 0,
1245 		CTL_KERN, CTL_EOL);
1246 	sysctl_createv(clog, 0, NULL, &node,
1247 		CTLFLAG_PERMANENT,
1248 		CTLTYPE_NODE, "ipc",
1249 		SYSCTL_DESCR("SysV IPC options"),
1250 		NULL, 0, NULL, 0,
1251 		CTL_KERN, KERN_SYSVIPC, CTL_EOL);
1252 
1253 	if (node == NULL)
1254 		return;
1255 
1256 	sysctl_createv(clog, 0, &node, NULL,
1257 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1258 		CTLTYPE_INT, "msgmni",
1259 		SYSCTL_DESCR("Max number of message queue identifiers"),
1260 		sysctl_ipc_msgmni, 0, &msginfo.msgmni, 0,
1261 		CTL_CREATE, CTL_EOL);
1262 	sysctl_createv(clog, 0, &node, NULL,
1263 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1264 		CTLTYPE_INT, "msgseg",
1265 		SYSCTL_DESCR("Max number of number of message segments"),
1266 		sysctl_ipc_msgseg, 0, &msginfo.msgseg, 0,
1267 		CTL_CREATE, CTL_EOL);
1268 }
1269