xref: /netbsd-src/sys/kern/sysv_msg.c (revision 9aa0541bdf64142d9a27c2cf274394d60182818f)
1 /*	$NetBSD: sysv_msg.c,v 1.62 2011/07/30 06:19:02 uebayasi Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2006, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Implementation of SVID messages
35  *
36  * Author: Daniel Boulet
37  *
38  * Copyright 1993 Daniel Boulet and RTMX Inc.
39  *
40  * This system call was implemented by Daniel Boulet under contract from RTMX.
41  *
42  * Redistribution and use in source forms, with and without modification,
43  * are permitted provided that this entire comment appears intact.
44  *
45  * Redistribution in binary form may occur without any restrictions.
46  * Obviously, it would be nice if you gave credit where credit is due
47  * but requiring it would be too onerous.
48  *
49  * This software is provided ``AS IS'' without any warranties of any kind.
50  */
51 
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: sysv_msg.c,v 1.62 2011/07/30 06:19:02 uebayasi Exp $");
54 
55 #define SYSVMSG
56 
57 #include <sys/param.h>
58 #include <sys/kernel.h>
59 #include <sys/msg.h>
60 #include <sys/sysctl.h>
61 #include <sys/mount.h>		/* XXX for <sys/syscallargs.h> */
62 #include <sys/syscallargs.h>
63 #include <sys/kauth.h>
64 
65 #define MSG_DEBUG
66 #undef MSG_DEBUG_OK
67 
68 #ifdef MSG_DEBUG_OK
69 #define MSG_PRINTF(a)	printf a
70 #else
71 #define MSG_PRINTF(a)
72 #endif
73 
74 static int	nfree_msgmaps;		/* # of free map entries */
75 static short	free_msgmaps;	/* head of linked list of free map entries */
76 static struct	__msg *free_msghdrs;	/* list of free msg headers */
77 static char	*msgpool;		/* MSGMAX byte long msg buffer pool */
78 static struct	msgmap *msgmaps;	/* MSGSEG msgmap structures */
79 static struct __msg *msghdrs;		/* MSGTQL msg headers */
80 
81 kmsq_t	*msqs;				/* MSGMNI msqid_ds struct's */
82 kmutex_t msgmutex;			/* subsystem lock */
83 
84 static u_int	msg_waiters = 0;	/* total number of msgrcv waiters */
85 static bool	msg_realloc_state;
86 static kcondvar_t msg_realloc_cv;
87 
88 static void msg_freehdr(struct __msg *);
89 
90 void
91 msginit(void)
92 {
93 	int i, sz;
94 	vaddr_t v;
95 
96 	/*
97 	 * msginfo.msgssz should be a power of two for efficiency reasons.
98 	 * It is also pretty silly if msginfo.msgssz is less than 8
99 	 * or greater than about 256 so ...
100 	 */
101 
102 	i = 8;
103 	while (i < 1024 && i != msginfo.msgssz)
104 		i <<= 1;
105 	if (i != msginfo.msgssz) {
106 		panic("msginfo.msgssz = %d, not a small power of 2",
107 		    msginfo.msgssz);
108 	}
109 
110 	if (msginfo.msgseg > 32767) {
111 		panic("msginfo.msgseg = %d > 32767", msginfo.msgseg);
112 	}
113 
114 	/* Allocate the wired memory for our structures */
115 	sz = ALIGN(msginfo.msgmax) +
116 	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
117 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
118 	    ALIGN(msginfo.msgmni * sizeof(kmsq_t));
119 	sz = round_page(sz);
120 	v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
121 	if (v == 0)
122 		panic("sysv_msg: cannot allocate memory");
123 	msgpool = (void *)v;
124 	msgmaps = (void *)((uintptr_t)msgpool + ALIGN(msginfo.msgmax));
125 	msghdrs = (void *)((uintptr_t)msgmaps +
126 	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)));
127 	msqs = (void *)((uintptr_t)msghdrs +
128 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)));
129 
130 	for (i = 0; i < (msginfo.msgseg - 1); i++)
131 		msgmaps[i].next = i + 1;
132 	msgmaps[msginfo.msgseg - 1].next = -1;
133 
134 	free_msgmaps = 0;
135 	nfree_msgmaps = msginfo.msgseg;
136 
137 	for (i = 0; i < (msginfo.msgtql - 1); i++) {
138 		msghdrs[i].msg_type = 0;
139 		msghdrs[i].msg_next = &msghdrs[i + 1];
140 	}
141 	i = msginfo.msgtql - 1;
142 	msghdrs[i].msg_type = 0;
143 	msghdrs[i].msg_next = NULL;
144 	free_msghdrs = &msghdrs[0];
145 
146 	for (i = 0; i < msginfo.msgmni; i++) {
147 		cv_init(&msqs[i].msq_cv, "msgwait");
148 		/* Implies entry is available */
149 		msqs[i].msq_u.msg_qbytes = 0;
150 		/* Reset to a known value */
151 		msqs[i].msq_u.msg_perm._seq = 0;
152 	}
153 
154 	mutex_init(&msgmutex, MUTEX_DEFAULT, IPL_NONE);
155 	cv_init(&msg_realloc_cv, "msgrealc");
156 	msg_realloc_state = false;
157 }
158 
159 static int
160 msgrealloc(int newmsgmni, int newmsgseg)
161 {
162 	struct msgmap *new_msgmaps;
163 	struct __msg *new_msghdrs, *new_free_msghdrs;
164 	char *old_msgpool, *new_msgpool;
165 	kmsq_t *new_msqs;
166 	vaddr_t v;
167 	int i, sz, msqid, newmsgmax, new_nfree_msgmaps;
168 	short new_free_msgmaps;
169 
170 	if (newmsgmni < 1 || newmsgseg < 1)
171 		return EINVAL;
172 
173 	/* Allocate the wired memory for our structures */
174 	newmsgmax = msginfo.msgssz * newmsgseg;
175 	sz = ALIGN(newmsgmax) +
176 	    ALIGN(newmsgseg * sizeof(struct msgmap)) +
177 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
178 	    ALIGN(newmsgmni * sizeof(kmsq_t));
179 	sz = round_page(sz);
180 	v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
181 	if (v == 0)
182 		return ENOMEM;
183 
184 	mutex_enter(&msgmutex);
185 	if (msg_realloc_state) {
186 		mutex_exit(&msgmutex);
187 		uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
188 		return EBUSY;
189 	}
190 	msg_realloc_state = true;
191 	if (msg_waiters) {
192 		/*
193 		 * Mark reallocation state, wake-up all waiters,
194 		 * and wait while they will all exit.
195 		 */
196 		for (i = 0; i < msginfo.msgmni; i++)
197 			cv_broadcast(&msqs[i].msq_cv);
198 		while (msg_waiters)
199 			cv_wait(&msg_realloc_cv, &msgmutex);
200 	}
201 	old_msgpool = msgpool;
202 
203 	/* We cannot reallocate less memory than we use */
204 	i = 0;
205 	for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
206 		struct msqid_ds *mptr;
207 		kmsq_t *msq;
208 
209 		msq = &msqs[msqid];
210 		mptr = &msq->msq_u;
211 		if (mptr->msg_qbytes || (mptr->msg_perm.mode & MSG_LOCKED))
212 			i = msqid;
213 	}
214 	if (i >= newmsgmni || (msginfo.msgseg - nfree_msgmaps) > newmsgseg) {
215 		mutex_exit(&msgmutex);
216 		uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
217 		return EBUSY;
218 	}
219 
220 	new_msgpool = (void *)v;
221 	new_msgmaps = (void *)((uintptr_t)new_msgpool + ALIGN(newmsgmax));
222 	new_msghdrs = (void *)((uintptr_t)new_msgmaps +
223 	    ALIGN(newmsgseg * sizeof(struct msgmap)));
224 	new_msqs = (void *)((uintptr_t)new_msghdrs +
225 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)));
226 
227 	/* Initialize the structures */
228 	for (i = 0; i < (newmsgseg - 1); i++)
229 		new_msgmaps[i].next = i + 1;
230 	new_msgmaps[newmsgseg - 1].next = -1;
231 	new_free_msgmaps = 0;
232 	new_nfree_msgmaps = newmsgseg;
233 
234 	for (i = 0; i < (msginfo.msgtql - 1); i++) {
235 		new_msghdrs[i].msg_type = 0;
236 		new_msghdrs[i].msg_next = &new_msghdrs[i + 1];
237 	}
238 	i = msginfo.msgtql - 1;
239 	new_msghdrs[i].msg_type = 0;
240 	new_msghdrs[i].msg_next = NULL;
241 	new_free_msghdrs = &new_msghdrs[0];
242 
243 	for (i = 0; i < newmsgmni; i++) {
244 		new_msqs[i].msq_u.msg_qbytes = 0;
245 		new_msqs[i].msq_u.msg_perm._seq = 0;
246 		cv_init(&new_msqs[i].msq_cv, "msgwait");
247 	}
248 
249 	/*
250 	 * Copy all message queue identifiers, mesage headers and buffer
251 	 * pools to the new memory location.
252 	 */
253 	for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
254 		struct __msg *nmsghdr, *msghdr, *pmsghdr;
255 		struct msqid_ds *nmptr, *mptr;
256 		kmsq_t *nmsq, *msq;
257 
258 		msq = &msqs[msqid];
259 		mptr = &msq->msq_u;
260 
261 		if (mptr->msg_qbytes == 0 &&
262 		    (mptr->msg_perm.mode & MSG_LOCKED) == 0)
263 			continue;
264 
265 		nmsq = &new_msqs[msqid];
266 		nmptr = &nmsq->msq_u;
267 		memcpy(nmptr, mptr, sizeof(struct msqid_ds));
268 
269 		/*
270 		 * Go through the message headers, and and copy each
271 		 * one by taking the new ones, and thus defragmenting.
272 		 */
273 		nmsghdr = pmsghdr = NULL;
274 		msghdr = mptr->_msg_first;
275 		while (msghdr) {
276 			short nnext = 0, next;
277 			u_short msgsz, segcnt;
278 
279 			/* Take an entry from the new list of free msghdrs */
280 			nmsghdr = new_free_msghdrs;
281 			KASSERT(nmsghdr != NULL);
282 			new_free_msghdrs = nmsghdr->msg_next;
283 
284 			nmsghdr->msg_next = NULL;
285 			if (pmsghdr) {
286 				pmsghdr->msg_next = nmsghdr;
287 			} else {
288 				nmptr->_msg_first = nmsghdr;
289 				pmsghdr = nmsghdr;
290 			}
291 			nmsghdr->msg_ts = msghdr->msg_ts;
292 			nmsghdr->msg_spot = -1;
293 
294 			/* Compute the amount of segments and reserve them */
295 			msgsz = msghdr->msg_ts;
296 			segcnt = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
297 			if (segcnt == 0)
298 				continue;
299 			while (segcnt--) {
300 				nnext = new_free_msgmaps;
301 				new_free_msgmaps = new_msgmaps[nnext].next;
302 				new_nfree_msgmaps--;
303 				new_msgmaps[nnext].next = nmsghdr->msg_spot;
304 				nmsghdr->msg_spot = nnext;
305 			}
306 
307 			/* Copy all segments */
308 			KASSERT(nnext == nmsghdr->msg_spot);
309 			next = msghdr->msg_spot;
310 			while (msgsz > 0) {
311 				size_t tlen;
312 
313 				if (msgsz >= msginfo.msgssz) {
314 					tlen = msginfo.msgssz;
315 					msgsz -= msginfo.msgssz;
316 				} else {
317 					tlen = msgsz;
318 					msgsz = 0;
319 				}
320 
321 				/* Copy the message buffer */
322 				memcpy(&new_msgpool[nnext * msginfo.msgssz],
323 				    &msgpool[next * msginfo.msgssz], tlen);
324 
325 				/* Next entry of the map */
326 				nnext = msgmaps[nnext].next;
327 				next = msgmaps[next].next;
328 			}
329 
330 			/* Next message header */
331 			msghdr = msghdr->msg_next;
332 		}
333 		nmptr->_msg_last = nmsghdr;
334 	}
335 	KASSERT((msginfo.msgseg - nfree_msgmaps) ==
336 	    (newmsgseg - new_nfree_msgmaps));
337 
338 	sz = ALIGN(msginfo.msgmax) +
339 	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
340 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
341 	    ALIGN(msginfo.msgmni * sizeof(kmsq_t));
342 	sz = round_page(sz);
343 
344 	for (i = 0; i < msginfo.msgmni; i++)
345 		cv_destroy(&msqs[i].msq_cv);
346 
347 	/* Set the pointers and update the new values */
348 	msgpool = new_msgpool;
349 	msgmaps = new_msgmaps;
350 	msghdrs = new_msghdrs;
351 	msqs = new_msqs;
352 
353 	free_msghdrs = new_free_msghdrs;
354 	free_msgmaps = new_free_msgmaps;
355 	nfree_msgmaps = new_nfree_msgmaps;
356 	msginfo.msgmni = newmsgmni;
357 	msginfo.msgseg = newmsgseg;
358 	msginfo.msgmax = newmsgmax;
359 
360 	/* Reallocation completed - notify all waiters, if any */
361 	msg_realloc_state = false;
362 	cv_broadcast(&msg_realloc_cv);
363 	mutex_exit(&msgmutex);
364 
365 	uvm_km_free(kernel_map, (vaddr_t)old_msgpool, sz, UVM_KMF_WIRED);
366 	return 0;
367 }
368 
369 static void
370 msg_freehdr(struct __msg *msghdr)
371 {
372 
373 	KASSERT(mutex_owned(&msgmutex));
374 
375 	while (msghdr->msg_ts > 0) {
376 		short next;
377 		KASSERT(msghdr->msg_spot >= 0);
378 		KASSERT(msghdr->msg_spot < msginfo.msgseg);
379 
380 		next = msgmaps[msghdr->msg_spot].next;
381 		msgmaps[msghdr->msg_spot].next = free_msgmaps;
382 		free_msgmaps = msghdr->msg_spot;
383 		nfree_msgmaps++;
384 		msghdr->msg_spot = next;
385 		if (msghdr->msg_ts >= msginfo.msgssz)
386 			msghdr->msg_ts -= msginfo.msgssz;
387 		else
388 			msghdr->msg_ts = 0;
389 	}
390 	KASSERT(msghdr->msg_spot == -1);
391 	msghdr->msg_next = free_msghdrs;
392 	free_msghdrs = msghdr;
393 }
394 
395 int
396 sys___msgctl50(struct lwp *l, const struct sys___msgctl50_args *uap,
397     register_t *retval)
398 {
399 	/* {
400 		syscallarg(int) msqid;
401 		syscallarg(int) cmd;
402 		syscallarg(struct msqid_ds *) buf;
403 	} */
404 	struct msqid_ds msqbuf;
405 	int cmd, error;
406 
407 	cmd = SCARG(uap, cmd);
408 
409 	if (cmd == IPC_SET) {
410 		error = copyin(SCARG(uap, buf), &msqbuf, sizeof(msqbuf));
411 		if (error)
412 			return (error);
413 	}
414 
415 	error = msgctl1(l, SCARG(uap, msqid), cmd,
416 	    (cmd == IPC_SET || cmd == IPC_STAT) ? &msqbuf : NULL);
417 
418 	if (error == 0 && cmd == IPC_STAT)
419 		error = copyout(&msqbuf, SCARG(uap, buf), sizeof(msqbuf));
420 
421 	return (error);
422 }
423 
424 int
425 msgctl1(struct lwp *l, int msqid, int cmd, struct msqid_ds *msqbuf)
426 {
427 	kauth_cred_t cred = l->l_cred;
428 	struct msqid_ds *msqptr;
429 	kmsq_t *msq;
430 	int error = 0, ix;
431 
432 	MSG_PRINTF(("call to msgctl1(%d, %d)\n", msqid, cmd));
433 
434 	ix = IPCID_TO_IX(msqid);
435 
436 	mutex_enter(&msgmutex);
437 
438 	if (ix < 0 || ix >= msginfo.msgmni) {
439 		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", ix,
440 		    msginfo.msgmni));
441 		error = EINVAL;
442 		goto unlock;
443 	}
444 
445 	msq = &msqs[ix];
446 	msqptr = &msq->msq_u;
447 
448 	if (msqptr->msg_qbytes == 0) {
449 		MSG_PRINTF(("no such msqid\n"));
450 		error = EINVAL;
451 		goto unlock;
452 	}
453 	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqid)) {
454 		MSG_PRINTF(("wrong sequence number\n"));
455 		error = EINVAL;
456 		goto unlock;
457 	}
458 
459 	switch (cmd) {
460 	case IPC_RMID:
461 	{
462 		struct __msg *msghdr;
463 		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)) != 0)
464 			break;
465 		/* Free the message headers */
466 		msghdr = msqptr->_msg_first;
467 		while (msghdr != NULL) {
468 			struct __msg *msghdr_tmp;
469 
470 			/* Free the segments of each message */
471 			msqptr->_msg_cbytes -= msghdr->msg_ts;
472 			msqptr->msg_qnum--;
473 			msghdr_tmp = msghdr;
474 			msghdr = msghdr->msg_next;
475 			msg_freehdr(msghdr_tmp);
476 		}
477 		KASSERT(msqptr->_msg_cbytes == 0);
478 		KASSERT(msqptr->msg_qnum == 0);
479 
480 		/* Mark it as free */
481 		msqptr->msg_qbytes = 0;
482 		cv_broadcast(&msq->msq_cv);
483 	}
484 		break;
485 
486 	case IPC_SET:
487 		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)))
488 			break;
489 		if (msqbuf->msg_qbytes > msqptr->msg_qbytes &&
490 		    kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER,
491 		    NULL) != 0) {
492 			error = EPERM;
493 			break;
494 		}
495 		if (msqbuf->msg_qbytes > msginfo.msgmnb) {
496 			MSG_PRINTF(("can't increase msg_qbytes beyond %d "
497 			    "(truncating)\n", msginfo.msgmnb));
498 			/* silently restrict qbytes to system limit */
499 			msqbuf->msg_qbytes = msginfo.msgmnb;
500 		}
501 		if (msqbuf->msg_qbytes == 0) {
502 			MSG_PRINTF(("can't reduce msg_qbytes to 0\n"));
503 			error = EINVAL;		/* XXX non-standard errno! */
504 			break;
505 		}
506 		msqptr->msg_perm.uid = msqbuf->msg_perm.uid;
507 		msqptr->msg_perm.gid = msqbuf->msg_perm.gid;
508 		msqptr->msg_perm.mode = (msqptr->msg_perm.mode & ~0777) |
509 		    (msqbuf->msg_perm.mode & 0777);
510 		msqptr->msg_qbytes = msqbuf->msg_qbytes;
511 		msqptr->msg_ctime = time_second;
512 		break;
513 
514 	case IPC_STAT:
515 		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
516 			MSG_PRINTF(("requester doesn't have read access\n"));
517 			break;
518 		}
519 		memcpy(msqbuf, msqptr, sizeof(struct msqid_ds));
520 		break;
521 
522 	default:
523 		MSG_PRINTF(("invalid command %d\n", cmd));
524 		error = EINVAL;
525 		break;
526 	}
527 
528 unlock:
529 	mutex_exit(&msgmutex);
530 	return (error);
531 }
532 
533 int
534 sys_msgget(struct lwp *l, const struct sys_msgget_args *uap, register_t *retval)
535 {
536 	/* {
537 		syscallarg(key_t) key;
538 		syscallarg(int) msgflg;
539 	} */
540 	int msqid, error = 0;
541 	int key = SCARG(uap, key);
542 	int msgflg = SCARG(uap, msgflg);
543 	kauth_cred_t cred = l->l_cred;
544 	struct msqid_ds *msqptr = NULL;
545 	kmsq_t *msq;
546 
547 	mutex_enter(&msgmutex);
548 
549 	MSG_PRINTF(("msgget(0x%x, 0%o)\n", key, msgflg));
550 
551 	if (key != IPC_PRIVATE) {
552 		for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
553 			msq = &msqs[msqid];
554 			msqptr = &msq->msq_u;
555 			if (msqptr->msg_qbytes != 0 &&
556 			    msqptr->msg_perm._key == key)
557 				break;
558 		}
559 		if (msqid < msginfo.msgmni) {
560 			MSG_PRINTF(("found public key\n"));
561 			if ((msgflg & IPC_CREAT) && (msgflg & IPC_EXCL)) {
562 				MSG_PRINTF(("not exclusive\n"));
563 				error = EEXIST;
564 				goto unlock;
565 			}
566 			if ((error = ipcperm(cred, &msqptr->msg_perm,
567 			    msgflg & 0700 ))) {
568 				MSG_PRINTF(("requester doesn't have 0%o access\n",
569 				    msgflg & 0700));
570 				goto unlock;
571 			}
572 			goto found;
573 		}
574 	}
575 
576 	MSG_PRINTF(("need to allocate the msqid_ds\n"));
577 	if (key == IPC_PRIVATE || (msgflg & IPC_CREAT)) {
578 		for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
579 			/*
580 			 * Look for an unallocated and unlocked msqid_ds.
581 			 * msqid_ds's can be locked by msgsnd or msgrcv while
582 			 * they are copying the message in/out.  We can't
583 			 * re-use the entry until they release it.
584 			 */
585 			msq = &msqs[msqid];
586 			msqptr = &msq->msq_u;
587 			if (msqptr->msg_qbytes == 0 &&
588 			    (msqptr->msg_perm.mode & MSG_LOCKED) == 0)
589 				break;
590 		}
591 		if (msqid == msginfo.msgmni) {
592 			MSG_PRINTF(("no more msqid_ds's available\n"));
593 			error = ENOSPC;
594 			goto unlock;
595 		}
596 		MSG_PRINTF(("msqid %d is available\n", msqid));
597 		msqptr->msg_perm._key = key;
598 		msqptr->msg_perm.cuid = kauth_cred_geteuid(cred);
599 		msqptr->msg_perm.uid = kauth_cred_geteuid(cred);
600 		msqptr->msg_perm.cgid = kauth_cred_getegid(cred);
601 		msqptr->msg_perm.gid = kauth_cred_getegid(cred);
602 		msqptr->msg_perm.mode = (msgflg & 0777);
603 		/* Make sure that the returned msqid is unique */
604 		msqptr->msg_perm._seq++;
605 		msqptr->_msg_first = NULL;
606 		msqptr->_msg_last = NULL;
607 		msqptr->_msg_cbytes = 0;
608 		msqptr->msg_qnum = 0;
609 		msqptr->msg_qbytes = msginfo.msgmnb;
610 		msqptr->msg_lspid = 0;
611 		msqptr->msg_lrpid = 0;
612 		msqptr->msg_stime = 0;
613 		msqptr->msg_rtime = 0;
614 		msqptr->msg_ctime = time_second;
615 	} else {
616 		MSG_PRINTF(("didn't find it and wasn't asked to create it\n"));
617 		error = ENOENT;
618 		goto unlock;
619 	}
620 
621 found:
622 	/* Construct the unique msqid */
623 	*retval = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm);
624 
625 unlock:
626 	mutex_exit(&msgmutex);
627 	return (error);
628 }
629 
630 int
631 sys_msgsnd(struct lwp *l, const struct sys_msgsnd_args *uap, register_t *retval)
632 {
633 	/* {
634 		syscallarg(int) msqid;
635 		syscallarg(const void *) msgp;
636 		syscallarg(size_t) msgsz;
637 		syscallarg(int) msgflg;
638 	} */
639 
640 	return msgsnd1(l, SCARG(uap, msqid), SCARG(uap, msgp),
641 	    SCARG(uap, msgsz), SCARG(uap, msgflg), sizeof(long), copyin);
642 }
643 
644 int
645 msgsnd1(struct lwp *l, int msqidr, const char *user_msgp, size_t msgsz,
646     int msgflg, size_t typesz, copyin_t fetch_type)
647 {
648 	int segs_needed, error = 0, msqid;
649 	kauth_cred_t cred = l->l_cred;
650 	struct msqid_ds *msqptr;
651 	struct __msg *msghdr;
652 	kmsq_t *msq;
653 	short next;
654 
655 	MSG_PRINTF(("call to msgsnd(%d, %p, %lld, %d)\n", msqid, user_msgp,
656 	    (long long)msgsz, msgflg));
657 
658 	if ((ssize_t)msgsz < 0)
659 		return EINVAL;
660 
661 restart:
662 	msqid = IPCID_TO_IX(msqidr);
663 
664 	mutex_enter(&msgmutex);
665 	/* In case of reallocation, we will wait for completion */
666 	while (__predict_false(msg_realloc_state))
667 		cv_wait(&msg_realloc_cv, &msgmutex);
668 
669 	if (msqid < 0 || msqid >= msginfo.msgmni) {
670 		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
671 		    msginfo.msgmni));
672 		error = EINVAL;
673 		goto unlock;
674 	}
675 
676 	msq = &msqs[msqid];
677 	msqptr = &msq->msq_u;
678 
679 	if (msqptr->msg_qbytes == 0) {
680 		MSG_PRINTF(("no such message queue id\n"));
681 		error = EINVAL;
682 		goto unlock;
683 	}
684 	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
685 		MSG_PRINTF(("wrong sequence number\n"));
686 		error = EINVAL;
687 		goto unlock;
688 	}
689 
690 	if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_W))) {
691 		MSG_PRINTF(("requester doesn't have write access\n"));
692 		goto unlock;
693 	}
694 
695 	segs_needed = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
696 	MSG_PRINTF(("msgsz=%lld, msgssz=%d, segs_needed=%d\n",
697 	    (long long)msgsz, msginfo.msgssz, segs_needed));
698 	for (;;) {
699 		int need_more_resources = 0;
700 
701 		/*
702 		 * check msgsz [cannot be negative since it is unsigned]
703 		 * (inside this loop in case msg_qbytes changes while we sleep)
704 		 */
705 
706 		if (msgsz > msqptr->msg_qbytes) {
707 			MSG_PRINTF(("msgsz > msqptr->msg_qbytes\n"));
708 			error = EINVAL;
709 			goto unlock;
710 		}
711 
712 		if (msqptr->msg_perm.mode & MSG_LOCKED) {
713 			MSG_PRINTF(("msqid is locked\n"));
714 			need_more_resources = 1;
715 		}
716 		if (msgsz + msqptr->_msg_cbytes > msqptr->msg_qbytes) {
717 			MSG_PRINTF(("msgsz + msg_cbytes > msg_qbytes\n"));
718 			need_more_resources = 1;
719 		}
720 		if (segs_needed > nfree_msgmaps) {
721 			MSG_PRINTF(("segs_needed > nfree_msgmaps\n"));
722 			need_more_resources = 1;
723 		}
724 		if (free_msghdrs == NULL) {
725 			MSG_PRINTF(("no more msghdrs\n"));
726 			need_more_resources = 1;
727 		}
728 
729 		if (need_more_resources) {
730 			int we_own_it;
731 
732 			if ((msgflg & IPC_NOWAIT) != 0) {
733 				MSG_PRINTF(("need more resources but caller "
734 				    "doesn't want to wait\n"));
735 				error = EAGAIN;
736 				goto unlock;
737 			}
738 
739 			if ((msqptr->msg_perm.mode & MSG_LOCKED) != 0) {
740 				MSG_PRINTF(("we don't own the msqid_ds\n"));
741 				we_own_it = 0;
742 			} else {
743 				/* Force later arrivals to wait for our
744 				   request */
745 				MSG_PRINTF(("we own the msqid_ds\n"));
746 				msqptr->msg_perm.mode |= MSG_LOCKED;
747 				we_own_it = 1;
748 			}
749 
750 			msg_waiters++;
751 			MSG_PRINTF(("goodnight\n"));
752 			error = cv_wait_sig(&msq->msq_cv, &msgmutex);
753 			MSG_PRINTF(("good morning, error=%d\n", error));
754 			msg_waiters--;
755 
756 			if (we_own_it)
757 				msqptr->msg_perm.mode &= ~MSG_LOCKED;
758 
759 			/*
760 			 * In case of such state, notify reallocator and
761 			 * restart the call.
762 			 */
763 			if (msg_realloc_state) {
764 				cv_broadcast(&msg_realloc_cv);
765 				mutex_exit(&msgmutex);
766 				goto restart;
767 			}
768 
769 			if (error != 0) {
770 				MSG_PRINTF(("msgsnd: interrupted system "
771 				    "call\n"));
772 				error = EINTR;
773 				goto unlock;
774 			}
775 
776 			/*
777 			 * Make sure that the msq queue still exists
778 			 */
779 
780 			if (msqptr->msg_qbytes == 0) {
781 				MSG_PRINTF(("msqid deleted\n"));
782 				error = EIDRM;
783 				goto unlock;
784 			}
785 		} else {
786 			MSG_PRINTF(("got all the resources that we need\n"));
787 			break;
788 		}
789 	}
790 
791 	/*
792 	 * We have the resources that we need.
793 	 * Make sure!
794 	 */
795 
796 	KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
797 	KASSERT(segs_needed <= nfree_msgmaps);
798 	KASSERT(msgsz + msqptr->_msg_cbytes <= msqptr->msg_qbytes);
799 	KASSERT(free_msghdrs != NULL);
800 
801 	/*
802 	 * Re-lock the msqid_ds in case we page-fault when copying in the
803 	 * message
804 	 */
805 
806 	KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
807 	msqptr->msg_perm.mode |= MSG_LOCKED;
808 
809 	/*
810 	 * Allocate a message header
811 	 */
812 
813 	msghdr = free_msghdrs;
814 	free_msghdrs = msghdr->msg_next;
815 	msghdr->msg_spot = -1;
816 	msghdr->msg_ts = msgsz;
817 
818 	/*
819 	 * Allocate space for the message
820 	 */
821 
822 	while (segs_needed > 0) {
823 		KASSERT(nfree_msgmaps > 0);
824 		KASSERT(free_msgmaps != -1);
825 		KASSERT(free_msgmaps < msginfo.msgseg);
826 
827 		next = free_msgmaps;
828 		MSG_PRINTF(("allocating segment %d to message\n", next));
829 		free_msgmaps = msgmaps[next].next;
830 		nfree_msgmaps--;
831 		msgmaps[next].next = msghdr->msg_spot;
832 		msghdr->msg_spot = next;
833 		segs_needed--;
834 	}
835 
836 	/*
837 	 * Copy in the message type
838 	 */
839 	mutex_exit(&msgmutex);
840 	error = (*fetch_type)(user_msgp, &msghdr->msg_type, typesz);
841 	mutex_enter(&msgmutex);
842 	if (error != 0) {
843 		MSG_PRINTF(("error %d copying the message type\n", error));
844 		msg_freehdr(msghdr);
845 		msqptr->msg_perm.mode &= ~MSG_LOCKED;
846 		cv_broadcast(&msq->msq_cv);
847 		goto unlock;
848 	}
849 	user_msgp += typesz;
850 
851 	/*
852 	 * Validate the message type
853 	 */
854 
855 	if (msghdr->msg_type < 1) {
856 		msg_freehdr(msghdr);
857 		msqptr->msg_perm.mode &= ~MSG_LOCKED;
858 		cv_broadcast(&msq->msq_cv);
859 		MSG_PRINTF(("mtype (%ld) < 1\n", msghdr->msg_type));
860 		error = EINVAL;
861 		goto unlock;
862 	}
863 
864 	/*
865 	 * Copy in the message body
866 	 */
867 
868 	next = msghdr->msg_spot;
869 	while (msgsz > 0) {
870 		size_t tlen;
871 		KASSERT(next > -1);
872 		KASSERT(next < msginfo.msgseg);
873 
874 		if (msgsz > msginfo.msgssz)
875 			tlen = msginfo.msgssz;
876 		else
877 			tlen = msgsz;
878 		mutex_exit(&msgmutex);
879 		error = copyin(user_msgp, &msgpool[next * msginfo.msgssz], tlen);
880 		mutex_enter(&msgmutex);
881 		if (error != 0) {
882 			MSG_PRINTF(("error %d copying in message segment\n",
883 			    error));
884 			msg_freehdr(msghdr);
885 			msqptr->msg_perm.mode &= ~MSG_LOCKED;
886 			cv_broadcast(&msq->msq_cv);
887 			goto unlock;
888 		}
889 		msgsz -= tlen;
890 		user_msgp += tlen;
891 		next = msgmaps[next].next;
892 	}
893 	KASSERT(next == -1);
894 
895 	/*
896 	 * We've got the message.  Unlock the msqid_ds.
897 	 */
898 
899 	msqptr->msg_perm.mode &= ~MSG_LOCKED;
900 
901 	/*
902 	 * Make sure that the msqid_ds is still allocated.
903 	 */
904 
905 	if (msqptr->msg_qbytes == 0) {
906 		msg_freehdr(msghdr);
907 		cv_broadcast(&msq->msq_cv);
908 		error = EIDRM;
909 		goto unlock;
910 	}
911 
912 	/*
913 	 * Put the message into the queue
914 	 */
915 
916 	if (msqptr->_msg_first == NULL) {
917 		msqptr->_msg_first = msghdr;
918 		msqptr->_msg_last = msghdr;
919 	} else {
920 		msqptr->_msg_last->msg_next = msghdr;
921 		msqptr->_msg_last = msghdr;
922 	}
923 	msqptr->_msg_last->msg_next = NULL;
924 
925 	msqptr->_msg_cbytes += msghdr->msg_ts;
926 	msqptr->msg_qnum++;
927 	msqptr->msg_lspid = l->l_proc->p_pid;
928 	msqptr->msg_stime = time_second;
929 
930 	cv_broadcast(&msq->msq_cv);
931 
932 unlock:
933 	mutex_exit(&msgmutex);
934 	return error;
935 }
936 
937 int
938 sys_msgrcv(struct lwp *l, const struct sys_msgrcv_args *uap, register_t *retval)
939 {
940 	/* {
941 		syscallarg(int) msqid;
942 		syscallarg(void *) msgp;
943 		syscallarg(size_t) msgsz;
944 		syscallarg(long) msgtyp;
945 		syscallarg(int) msgflg;
946 	} */
947 
948 	return msgrcv1(l, SCARG(uap, msqid), SCARG(uap, msgp),
949 	    SCARG(uap, msgsz), SCARG(uap, msgtyp), SCARG(uap, msgflg),
950 	    sizeof(long), copyout, retval);
951 }
952 
953 int
954 msgrcv1(struct lwp *l, int msqidr, char *user_msgp, size_t msgsz, long msgtyp,
955     int msgflg, size_t typesz, copyout_t put_type, register_t *retval)
956 {
957 	size_t len;
958 	kauth_cred_t cred = l->l_cred;
959 	struct msqid_ds *msqptr;
960 	struct __msg *msghdr;
961 	int error = 0, msqid;
962 	kmsq_t *msq;
963 	short next;
964 
965 	MSG_PRINTF(("call to msgrcv(%d, %p, %lld, %ld, %d)\n", msqid,
966 	    user_msgp, (long long)msgsz, msgtyp, msgflg));
967 
968 	if ((ssize_t)msgsz < 0)
969 		return EINVAL;
970 
971 restart:
972 	msqid = IPCID_TO_IX(msqidr);
973 
974 	mutex_enter(&msgmutex);
975 	/* In case of reallocation, we will wait for completion */
976 	while (__predict_false(msg_realloc_state))
977 		cv_wait(&msg_realloc_cv, &msgmutex);
978 
979 	if (msqid < 0 || msqid >= msginfo.msgmni) {
980 		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
981 		    msginfo.msgmni));
982 		error = EINVAL;
983 		goto unlock;
984 	}
985 
986 	msq = &msqs[msqid];
987 	msqptr = &msq->msq_u;
988 
989 	if (msqptr->msg_qbytes == 0) {
990 		MSG_PRINTF(("no such message queue id\n"));
991 		error = EINVAL;
992 		goto unlock;
993 	}
994 	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
995 		MSG_PRINTF(("wrong sequence number\n"));
996 		error = EINVAL;
997 		goto unlock;
998 	}
999 
1000 	if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
1001 		MSG_PRINTF(("requester doesn't have read access\n"));
1002 		goto unlock;
1003 	}
1004 
1005 	msghdr = NULL;
1006 	while (msghdr == NULL) {
1007 		if (msgtyp == 0) {
1008 			msghdr = msqptr->_msg_first;
1009 			if (msghdr != NULL) {
1010 				if (msgsz < msghdr->msg_ts &&
1011 				    (msgflg & MSG_NOERROR) == 0) {
1012 					MSG_PRINTF(("first msg on the queue "
1013 					    "is too big (want %lld, got %d)\n",
1014 					    (long long)msgsz, msghdr->msg_ts));
1015 					error = E2BIG;
1016 					goto unlock;
1017 				}
1018 				if (msqptr->_msg_first == msqptr->_msg_last) {
1019 					msqptr->_msg_first = NULL;
1020 					msqptr->_msg_last = NULL;
1021 				} else {
1022 					msqptr->_msg_first = msghdr->msg_next;
1023 					KASSERT(msqptr->_msg_first != NULL);
1024 				}
1025 			}
1026 		} else {
1027 			struct __msg *previous;
1028 			struct __msg **prev;
1029 
1030 			for (previous = NULL, prev = &msqptr->_msg_first;
1031 			     (msghdr = *prev) != NULL;
1032 			     previous = msghdr, prev = &msghdr->msg_next) {
1033 				/*
1034 				 * Is this message's type an exact match or is
1035 				 * this message's type less than or equal to
1036 				 * the absolute value of a negative msgtyp?
1037 				 * Note that the second half of this test can
1038 				 * NEVER be true if msgtyp is positive since
1039 				 * msg_type is always positive!
1040 				 */
1041 
1042 				if (msgtyp != msghdr->msg_type &&
1043 				    msghdr->msg_type > -msgtyp)
1044 					continue;
1045 
1046 				MSG_PRINTF(("found message type %ld, requested %ld\n",
1047 				    msghdr->msg_type, msgtyp));
1048 				if (msgsz < msghdr->msg_ts &&
1049 				     (msgflg & MSG_NOERROR) == 0) {
1050 					MSG_PRINTF(("requested message on the queue "
1051 					    "is too big (want %lld, got %d)\n",
1052 					    (long long)msgsz, msghdr->msg_ts));
1053 					error = E2BIG;
1054 					goto unlock;
1055 				}
1056 				*prev = msghdr->msg_next;
1057 				if (msghdr != msqptr->_msg_last)
1058 					break;
1059 				if (previous == NULL) {
1060 					KASSERT(prev == &msqptr->_msg_first);
1061 					msqptr->_msg_first = NULL;
1062 					msqptr->_msg_last = NULL;
1063 				} else {
1064 					KASSERT(prev != &msqptr->_msg_first);
1065 					msqptr->_msg_last = previous;
1066 				}
1067 				break;
1068 			}
1069 		}
1070 
1071 		/*
1072 		 * We've either extracted the msghdr for the appropriate
1073 		 * message or there isn't one.
1074 		 * If there is one then bail out of this loop.
1075 		 */
1076 		if (msghdr != NULL)
1077 			break;
1078 
1079 		/*
1080 		 * Hmph!  No message found.  Does the user want to wait?
1081 		 */
1082 
1083 		if ((msgflg & IPC_NOWAIT) != 0) {
1084 			MSG_PRINTF(("no appropriate message found (msgtyp=%ld)\n",
1085 			    msgtyp));
1086 			error = ENOMSG;
1087 			goto unlock;
1088 		}
1089 
1090 		/*
1091 		 * Wait for something to happen
1092 		 */
1093 
1094 		msg_waiters++;
1095 		MSG_PRINTF(("msgrcv:  goodnight\n"));
1096 		error = cv_wait_sig(&msq->msq_cv, &msgmutex);
1097 		MSG_PRINTF(("msgrcv: good morning (error=%d)\n", error));
1098 		msg_waiters--;
1099 
1100 		/*
1101 		 * In case of such state, notify reallocator and
1102 		 * restart the call.
1103 		 */
1104 		if (msg_realloc_state) {
1105 			cv_broadcast(&msg_realloc_cv);
1106 			mutex_exit(&msgmutex);
1107 			goto restart;
1108 		}
1109 
1110 		if (error != 0) {
1111 			MSG_PRINTF(("msgsnd: interrupted system call\n"));
1112 			error = EINTR;
1113 			goto unlock;
1114 		}
1115 
1116 		/*
1117 		 * Make sure that the msq queue still exists
1118 		 */
1119 
1120 		if (msqptr->msg_qbytes == 0 ||
1121 		    msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
1122 			MSG_PRINTF(("msqid deleted\n"));
1123 			error = EIDRM;
1124 			goto unlock;
1125 		}
1126 	}
1127 
1128 	/*
1129 	 * Return the message to the user.
1130 	 *
1131 	 * First, do the bookkeeping (before we risk being interrupted).
1132 	 */
1133 
1134 	msqptr->_msg_cbytes -= msghdr->msg_ts;
1135 	msqptr->msg_qnum--;
1136 	msqptr->msg_lrpid = l->l_proc->p_pid;
1137 	msqptr->msg_rtime = time_second;
1138 
1139 	/*
1140 	 * Make msgsz the actual amount that we'll be returning.
1141 	 * Note that this effectively truncates the message if it is too long
1142 	 * (since msgsz is never increased).
1143 	 */
1144 
1145 	MSG_PRINTF(("found a message, msgsz=%lld, msg_ts=%d\n",
1146 	    (long long)msgsz, msghdr->msg_ts));
1147 	if (msgsz > msghdr->msg_ts)
1148 		msgsz = msghdr->msg_ts;
1149 
1150 	/*
1151 	 * Return the type to the user.
1152 	 */
1153 	mutex_exit(&msgmutex);
1154 	error = (*put_type)(&msghdr->msg_type, user_msgp, typesz);
1155 	mutex_enter(&msgmutex);
1156 	if (error != 0) {
1157 		MSG_PRINTF(("error (%d) copying out message type\n", error));
1158 		msg_freehdr(msghdr);
1159 		cv_broadcast(&msq->msq_cv);
1160 		goto unlock;
1161 	}
1162 	user_msgp += typesz;
1163 
1164 	/*
1165 	 * Return the segments to the user
1166 	 */
1167 
1168 	next = msghdr->msg_spot;
1169 	for (len = 0; len < msgsz; len += msginfo.msgssz) {
1170 		size_t tlen;
1171 		KASSERT(next > -1);
1172 		KASSERT(next < msginfo.msgseg);
1173 
1174 		if (msgsz - len > msginfo.msgssz)
1175 			tlen = msginfo.msgssz;
1176 		else
1177 			tlen = msgsz - len;
1178 		mutex_exit(&msgmutex);
1179 		error = copyout(&msgpool[next * msginfo.msgssz],
1180 		    user_msgp, tlen);
1181 		mutex_enter(&msgmutex);
1182 		if (error != 0) {
1183 			MSG_PRINTF(("error (%d) copying out message segment\n",
1184 			    error));
1185 			msg_freehdr(msghdr);
1186 			cv_broadcast(&msq->msq_cv);
1187 			goto unlock;
1188 		}
1189 		user_msgp += tlen;
1190 		next = msgmaps[next].next;
1191 	}
1192 
1193 	/*
1194 	 * Done, return the actual number of bytes copied out.
1195 	 */
1196 
1197 	msg_freehdr(msghdr);
1198 	cv_broadcast(&msq->msq_cv);
1199 	*retval = msgsz;
1200 
1201 unlock:
1202 	mutex_exit(&msgmutex);
1203 	return error;
1204 }
1205 
1206 /*
1207  * Sysctl initialization and nodes.
1208  */
1209 
1210 static int
1211 sysctl_ipc_msgmni(SYSCTLFN_ARGS)
1212 {
1213 	int newsize, error;
1214 	struct sysctlnode node;
1215 	node = *rnode;
1216 	node.sysctl_data = &newsize;
1217 
1218 	newsize = msginfo.msgmni;
1219 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1220 	if (error || newp == NULL)
1221 		return error;
1222 
1223 	sysctl_unlock();
1224 	error = msgrealloc(newsize, msginfo.msgseg);
1225 	sysctl_relock();
1226 	return error;
1227 }
1228 
1229 static int
1230 sysctl_ipc_msgseg(SYSCTLFN_ARGS)
1231 {
1232 	int newsize, error;
1233 	struct sysctlnode node;
1234 	node = *rnode;
1235 	node.sysctl_data = &newsize;
1236 
1237 	newsize = msginfo.msgseg;
1238 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1239 	if (error || newp == NULL)
1240 		return error;
1241 
1242 	sysctl_unlock();
1243 	error = msgrealloc(msginfo.msgmni, newsize);
1244 	sysctl_relock();
1245 	return error;
1246 }
1247 
1248 SYSCTL_SETUP(sysctl_ipc_msg_setup, "sysctl kern.ipc subtree setup")
1249 {
1250 	const struct sysctlnode *node = NULL;
1251 
1252 	sysctl_createv(clog, 0, NULL, NULL,
1253 		CTLFLAG_PERMANENT,
1254 		CTLTYPE_NODE, "kern", NULL,
1255 		NULL, 0, NULL, 0,
1256 		CTL_KERN, CTL_EOL);
1257 	sysctl_createv(clog, 0, NULL, &node,
1258 		CTLFLAG_PERMANENT,
1259 		CTLTYPE_NODE, "ipc",
1260 		SYSCTL_DESCR("SysV IPC options"),
1261 		NULL, 0, NULL, 0,
1262 		CTL_KERN, KERN_SYSVIPC, CTL_EOL);
1263 
1264 	if (node == NULL)
1265 		return;
1266 
1267 	sysctl_createv(clog, 0, &node, NULL,
1268 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1269 		CTLTYPE_INT, "msgmni",
1270 		SYSCTL_DESCR("Max number of message queue identifiers"),
1271 		sysctl_ipc_msgmni, 0, &msginfo.msgmni, 0,
1272 		CTL_CREATE, CTL_EOL);
1273 	sysctl_createv(clog, 0, &node, NULL,
1274 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1275 		CTLTYPE_INT, "msgseg",
1276 		SYSCTL_DESCR("Max number of number of message segments"),
1277 		sysctl_ipc_msgseg, 0, &msginfo.msgseg, 0,
1278 		CTL_CREATE, CTL_EOL);
1279 }
1280