xref: /netbsd-src/sys/kern/sysv_msg.c (revision a536ee5124e62c9a0051a252f7833dc8f50f44c9)
1 /*	$NetBSD: sysv_msg.c,v 1.63 2012/03/13 18:40:54 elad Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2006, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Implementation of SVID messages
35  *
36  * Author: Daniel Boulet
37  *
38  * Copyright 1993 Daniel Boulet and RTMX Inc.
39  *
40  * This system call was implemented by Daniel Boulet under contract from RTMX.
41  *
42  * Redistribution and use in source forms, with and without modification,
43  * are permitted provided that this entire comment appears intact.
44  *
45  * Redistribution in binary form may occur without any restrictions.
46  * Obviously, it would be nice if you gave credit where credit is due
47  * but requiring it would be too onerous.
48  *
49  * This software is provided ``AS IS'' without any warranties of any kind.
50  */
51 
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: sysv_msg.c,v 1.63 2012/03/13 18:40:54 elad Exp $");
54 
55 #define SYSVMSG
56 
57 #include <sys/param.h>
58 #include <sys/kernel.h>
59 #include <sys/msg.h>
60 #include <sys/sysctl.h>
61 #include <sys/mount.h>		/* XXX for <sys/syscallargs.h> */
62 #include <sys/syscallargs.h>
63 #include <sys/kauth.h>
64 
65 #define MSG_DEBUG
66 #undef MSG_DEBUG_OK
67 
68 #ifdef MSG_DEBUG_OK
69 #define MSG_PRINTF(a)	printf a
70 #else
71 #define MSG_PRINTF(a)
72 #endif
73 
74 static int	nfree_msgmaps;		/* # of free map entries */
75 static short	free_msgmaps;	/* head of linked list of free map entries */
76 static struct	__msg *free_msghdrs;	/* list of free msg headers */
77 static char	*msgpool;		/* MSGMAX byte long msg buffer pool */
78 static struct	msgmap *msgmaps;	/* MSGSEG msgmap structures */
79 static struct __msg *msghdrs;		/* MSGTQL msg headers */
80 
81 kmsq_t	*msqs;				/* MSGMNI msqid_ds struct's */
82 kmutex_t msgmutex;			/* subsystem lock */
83 
84 static u_int	msg_waiters = 0;	/* total number of msgrcv waiters */
85 static bool	msg_realloc_state;
86 static kcondvar_t msg_realloc_cv;
87 
88 static void msg_freehdr(struct __msg *);
89 
90 void
91 msginit(void)
92 {
93 	int i, sz;
94 	vaddr_t v;
95 
96 	/*
97 	 * msginfo.msgssz should be a power of two for efficiency reasons.
98 	 * It is also pretty silly if msginfo.msgssz is less than 8
99 	 * or greater than about 256 so ...
100 	 */
101 
102 	i = 8;
103 	while (i < 1024 && i != msginfo.msgssz)
104 		i <<= 1;
105 	if (i != msginfo.msgssz) {
106 		panic("msginfo.msgssz = %d, not a small power of 2",
107 		    msginfo.msgssz);
108 	}
109 
110 	if (msginfo.msgseg > 32767) {
111 		panic("msginfo.msgseg = %d > 32767", msginfo.msgseg);
112 	}
113 
114 	/* Allocate the wired memory for our structures */
115 	sz = ALIGN(msginfo.msgmax) +
116 	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
117 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
118 	    ALIGN(msginfo.msgmni * sizeof(kmsq_t));
119 	sz = round_page(sz);
120 	v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
121 	if (v == 0)
122 		panic("sysv_msg: cannot allocate memory");
123 	msgpool = (void *)v;
124 	msgmaps = (void *)((uintptr_t)msgpool + ALIGN(msginfo.msgmax));
125 	msghdrs = (void *)((uintptr_t)msgmaps +
126 	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)));
127 	msqs = (void *)((uintptr_t)msghdrs +
128 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)));
129 
130 	for (i = 0; i < (msginfo.msgseg - 1); i++)
131 		msgmaps[i].next = i + 1;
132 	msgmaps[msginfo.msgseg - 1].next = -1;
133 
134 	free_msgmaps = 0;
135 	nfree_msgmaps = msginfo.msgseg;
136 
137 	for (i = 0; i < (msginfo.msgtql - 1); i++) {
138 		msghdrs[i].msg_type = 0;
139 		msghdrs[i].msg_next = &msghdrs[i + 1];
140 	}
141 	i = msginfo.msgtql - 1;
142 	msghdrs[i].msg_type = 0;
143 	msghdrs[i].msg_next = NULL;
144 	free_msghdrs = &msghdrs[0];
145 
146 	for (i = 0; i < msginfo.msgmni; i++) {
147 		cv_init(&msqs[i].msq_cv, "msgwait");
148 		/* Implies entry is available */
149 		msqs[i].msq_u.msg_qbytes = 0;
150 		/* Reset to a known value */
151 		msqs[i].msq_u.msg_perm._seq = 0;
152 	}
153 
154 	mutex_init(&msgmutex, MUTEX_DEFAULT, IPL_NONE);
155 	cv_init(&msg_realloc_cv, "msgrealc");
156 	msg_realloc_state = false;
157 
158 	sysvipcinit();
159 }
160 
161 static int
162 msgrealloc(int newmsgmni, int newmsgseg)
163 {
164 	struct msgmap *new_msgmaps;
165 	struct __msg *new_msghdrs, *new_free_msghdrs;
166 	char *old_msgpool, *new_msgpool;
167 	kmsq_t *new_msqs;
168 	vaddr_t v;
169 	int i, sz, msqid, newmsgmax, new_nfree_msgmaps;
170 	short new_free_msgmaps;
171 
172 	if (newmsgmni < 1 || newmsgseg < 1)
173 		return EINVAL;
174 
175 	/* Allocate the wired memory for our structures */
176 	newmsgmax = msginfo.msgssz * newmsgseg;
177 	sz = ALIGN(newmsgmax) +
178 	    ALIGN(newmsgseg * sizeof(struct msgmap)) +
179 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
180 	    ALIGN(newmsgmni * sizeof(kmsq_t));
181 	sz = round_page(sz);
182 	v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
183 	if (v == 0)
184 		return ENOMEM;
185 
186 	mutex_enter(&msgmutex);
187 	if (msg_realloc_state) {
188 		mutex_exit(&msgmutex);
189 		uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
190 		return EBUSY;
191 	}
192 	msg_realloc_state = true;
193 	if (msg_waiters) {
194 		/*
195 		 * Mark reallocation state, wake-up all waiters,
196 		 * and wait while they will all exit.
197 		 */
198 		for (i = 0; i < msginfo.msgmni; i++)
199 			cv_broadcast(&msqs[i].msq_cv);
200 		while (msg_waiters)
201 			cv_wait(&msg_realloc_cv, &msgmutex);
202 	}
203 	old_msgpool = msgpool;
204 
205 	/* We cannot reallocate less memory than we use */
206 	i = 0;
207 	for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
208 		struct msqid_ds *mptr;
209 		kmsq_t *msq;
210 
211 		msq = &msqs[msqid];
212 		mptr = &msq->msq_u;
213 		if (mptr->msg_qbytes || (mptr->msg_perm.mode & MSG_LOCKED))
214 			i = msqid;
215 	}
216 	if (i >= newmsgmni || (msginfo.msgseg - nfree_msgmaps) > newmsgseg) {
217 		mutex_exit(&msgmutex);
218 		uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
219 		return EBUSY;
220 	}
221 
222 	new_msgpool = (void *)v;
223 	new_msgmaps = (void *)((uintptr_t)new_msgpool + ALIGN(newmsgmax));
224 	new_msghdrs = (void *)((uintptr_t)new_msgmaps +
225 	    ALIGN(newmsgseg * sizeof(struct msgmap)));
226 	new_msqs = (void *)((uintptr_t)new_msghdrs +
227 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)));
228 
229 	/* Initialize the structures */
230 	for (i = 0; i < (newmsgseg - 1); i++)
231 		new_msgmaps[i].next = i + 1;
232 	new_msgmaps[newmsgseg - 1].next = -1;
233 	new_free_msgmaps = 0;
234 	new_nfree_msgmaps = newmsgseg;
235 
236 	for (i = 0; i < (msginfo.msgtql - 1); i++) {
237 		new_msghdrs[i].msg_type = 0;
238 		new_msghdrs[i].msg_next = &new_msghdrs[i + 1];
239 	}
240 	i = msginfo.msgtql - 1;
241 	new_msghdrs[i].msg_type = 0;
242 	new_msghdrs[i].msg_next = NULL;
243 	new_free_msghdrs = &new_msghdrs[0];
244 
245 	for (i = 0; i < newmsgmni; i++) {
246 		new_msqs[i].msq_u.msg_qbytes = 0;
247 		new_msqs[i].msq_u.msg_perm._seq = 0;
248 		cv_init(&new_msqs[i].msq_cv, "msgwait");
249 	}
250 
251 	/*
252 	 * Copy all message queue identifiers, mesage headers and buffer
253 	 * pools to the new memory location.
254 	 */
255 	for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
256 		struct __msg *nmsghdr, *msghdr, *pmsghdr;
257 		struct msqid_ds *nmptr, *mptr;
258 		kmsq_t *nmsq, *msq;
259 
260 		msq = &msqs[msqid];
261 		mptr = &msq->msq_u;
262 
263 		if (mptr->msg_qbytes == 0 &&
264 		    (mptr->msg_perm.mode & MSG_LOCKED) == 0)
265 			continue;
266 
267 		nmsq = &new_msqs[msqid];
268 		nmptr = &nmsq->msq_u;
269 		memcpy(nmptr, mptr, sizeof(struct msqid_ds));
270 
271 		/*
272 		 * Go through the message headers, and and copy each
273 		 * one by taking the new ones, and thus defragmenting.
274 		 */
275 		nmsghdr = pmsghdr = NULL;
276 		msghdr = mptr->_msg_first;
277 		while (msghdr) {
278 			short nnext = 0, next;
279 			u_short msgsz, segcnt;
280 
281 			/* Take an entry from the new list of free msghdrs */
282 			nmsghdr = new_free_msghdrs;
283 			KASSERT(nmsghdr != NULL);
284 			new_free_msghdrs = nmsghdr->msg_next;
285 
286 			nmsghdr->msg_next = NULL;
287 			if (pmsghdr) {
288 				pmsghdr->msg_next = nmsghdr;
289 			} else {
290 				nmptr->_msg_first = nmsghdr;
291 				pmsghdr = nmsghdr;
292 			}
293 			nmsghdr->msg_ts = msghdr->msg_ts;
294 			nmsghdr->msg_spot = -1;
295 
296 			/* Compute the amount of segments and reserve them */
297 			msgsz = msghdr->msg_ts;
298 			segcnt = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
299 			if (segcnt == 0)
300 				continue;
301 			while (segcnt--) {
302 				nnext = new_free_msgmaps;
303 				new_free_msgmaps = new_msgmaps[nnext].next;
304 				new_nfree_msgmaps--;
305 				new_msgmaps[nnext].next = nmsghdr->msg_spot;
306 				nmsghdr->msg_spot = nnext;
307 			}
308 
309 			/* Copy all segments */
310 			KASSERT(nnext == nmsghdr->msg_spot);
311 			next = msghdr->msg_spot;
312 			while (msgsz > 0) {
313 				size_t tlen;
314 
315 				if (msgsz >= msginfo.msgssz) {
316 					tlen = msginfo.msgssz;
317 					msgsz -= msginfo.msgssz;
318 				} else {
319 					tlen = msgsz;
320 					msgsz = 0;
321 				}
322 
323 				/* Copy the message buffer */
324 				memcpy(&new_msgpool[nnext * msginfo.msgssz],
325 				    &msgpool[next * msginfo.msgssz], tlen);
326 
327 				/* Next entry of the map */
328 				nnext = msgmaps[nnext].next;
329 				next = msgmaps[next].next;
330 			}
331 
332 			/* Next message header */
333 			msghdr = msghdr->msg_next;
334 		}
335 		nmptr->_msg_last = nmsghdr;
336 	}
337 	KASSERT((msginfo.msgseg - nfree_msgmaps) ==
338 	    (newmsgseg - new_nfree_msgmaps));
339 
340 	sz = ALIGN(msginfo.msgmax) +
341 	    ALIGN(msginfo.msgseg * sizeof(struct msgmap)) +
342 	    ALIGN(msginfo.msgtql * sizeof(struct __msg)) +
343 	    ALIGN(msginfo.msgmni * sizeof(kmsq_t));
344 	sz = round_page(sz);
345 
346 	for (i = 0; i < msginfo.msgmni; i++)
347 		cv_destroy(&msqs[i].msq_cv);
348 
349 	/* Set the pointers and update the new values */
350 	msgpool = new_msgpool;
351 	msgmaps = new_msgmaps;
352 	msghdrs = new_msghdrs;
353 	msqs = new_msqs;
354 
355 	free_msghdrs = new_free_msghdrs;
356 	free_msgmaps = new_free_msgmaps;
357 	nfree_msgmaps = new_nfree_msgmaps;
358 	msginfo.msgmni = newmsgmni;
359 	msginfo.msgseg = newmsgseg;
360 	msginfo.msgmax = newmsgmax;
361 
362 	/* Reallocation completed - notify all waiters, if any */
363 	msg_realloc_state = false;
364 	cv_broadcast(&msg_realloc_cv);
365 	mutex_exit(&msgmutex);
366 
367 	uvm_km_free(kernel_map, (vaddr_t)old_msgpool, sz, UVM_KMF_WIRED);
368 	return 0;
369 }
370 
371 static void
372 msg_freehdr(struct __msg *msghdr)
373 {
374 
375 	KASSERT(mutex_owned(&msgmutex));
376 
377 	while (msghdr->msg_ts > 0) {
378 		short next;
379 		KASSERT(msghdr->msg_spot >= 0);
380 		KASSERT(msghdr->msg_spot < msginfo.msgseg);
381 
382 		next = msgmaps[msghdr->msg_spot].next;
383 		msgmaps[msghdr->msg_spot].next = free_msgmaps;
384 		free_msgmaps = msghdr->msg_spot;
385 		nfree_msgmaps++;
386 		msghdr->msg_spot = next;
387 		if (msghdr->msg_ts >= msginfo.msgssz)
388 			msghdr->msg_ts -= msginfo.msgssz;
389 		else
390 			msghdr->msg_ts = 0;
391 	}
392 	KASSERT(msghdr->msg_spot == -1);
393 	msghdr->msg_next = free_msghdrs;
394 	free_msghdrs = msghdr;
395 }
396 
397 int
398 sys___msgctl50(struct lwp *l, const struct sys___msgctl50_args *uap,
399     register_t *retval)
400 {
401 	/* {
402 		syscallarg(int) msqid;
403 		syscallarg(int) cmd;
404 		syscallarg(struct msqid_ds *) buf;
405 	} */
406 	struct msqid_ds msqbuf;
407 	int cmd, error;
408 
409 	cmd = SCARG(uap, cmd);
410 
411 	if (cmd == IPC_SET) {
412 		error = copyin(SCARG(uap, buf), &msqbuf, sizeof(msqbuf));
413 		if (error)
414 			return (error);
415 	}
416 
417 	error = msgctl1(l, SCARG(uap, msqid), cmd,
418 	    (cmd == IPC_SET || cmd == IPC_STAT) ? &msqbuf : NULL);
419 
420 	if (error == 0 && cmd == IPC_STAT)
421 		error = copyout(&msqbuf, SCARG(uap, buf), sizeof(msqbuf));
422 
423 	return (error);
424 }
425 
426 int
427 msgctl1(struct lwp *l, int msqid, int cmd, struct msqid_ds *msqbuf)
428 {
429 	kauth_cred_t cred = l->l_cred;
430 	struct msqid_ds *msqptr;
431 	kmsq_t *msq;
432 	int error = 0, ix;
433 
434 	MSG_PRINTF(("call to msgctl1(%d, %d)\n", msqid, cmd));
435 
436 	ix = IPCID_TO_IX(msqid);
437 
438 	mutex_enter(&msgmutex);
439 
440 	if (ix < 0 || ix >= msginfo.msgmni) {
441 		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", ix,
442 		    msginfo.msgmni));
443 		error = EINVAL;
444 		goto unlock;
445 	}
446 
447 	msq = &msqs[ix];
448 	msqptr = &msq->msq_u;
449 
450 	if (msqptr->msg_qbytes == 0) {
451 		MSG_PRINTF(("no such msqid\n"));
452 		error = EINVAL;
453 		goto unlock;
454 	}
455 	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqid)) {
456 		MSG_PRINTF(("wrong sequence number\n"));
457 		error = EINVAL;
458 		goto unlock;
459 	}
460 
461 	switch (cmd) {
462 	case IPC_RMID:
463 	{
464 		struct __msg *msghdr;
465 		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)) != 0)
466 			break;
467 		/* Free the message headers */
468 		msghdr = msqptr->_msg_first;
469 		while (msghdr != NULL) {
470 			struct __msg *msghdr_tmp;
471 
472 			/* Free the segments of each message */
473 			msqptr->_msg_cbytes -= msghdr->msg_ts;
474 			msqptr->msg_qnum--;
475 			msghdr_tmp = msghdr;
476 			msghdr = msghdr->msg_next;
477 			msg_freehdr(msghdr_tmp);
478 		}
479 		KASSERT(msqptr->_msg_cbytes == 0);
480 		KASSERT(msqptr->msg_qnum == 0);
481 
482 		/* Mark it as free */
483 		msqptr->msg_qbytes = 0;
484 		cv_broadcast(&msq->msq_cv);
485 	}
486 		break;
487 
488 	case IPC_SET:
489 		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)))
490 			break;
491 		if (msqbuf->msg_qbytes > msqptr->msg_qbytes &&
492 		    kauth_authorize_system(cred, KAUTH_SYSTEM_SYSVIPC,
493 		    KAUTH_REQ_SYSTEM_SYSVIPC_MSGQ_OVERSIZE,
494 		    KAUTH_ARG(msqbuf->msg_qbytes),
495 		    KAUTH_ARG(msqptr->msg_qbytes), NULL) != 0) {
496 			error = EPERM;
497 			break;
498 		}
499 		if (msqbuf->msg_qbytes > msginfo.msgmnb) {
500 			MSG_PRINTF(("can't increase msg_qbytes beyond %d "
501 			    "(truncating)\n", msginfo.msgmnb));
502 			/* silently restrict qbytes to system limit */
503 			msqbuf->msg_qbytes = msginfo.msgmnb;
504 		}
505 		if (msqbuf->msg_qbytes == 0) {
506 			MSG_PRINTF(("can't reduce msg_qbytes to 0\n"));
507 			error = EINVAL;		/* XXX non-standard errno! */
508 			break;
509 		}
510 		msqptr->msg_perm.uid = msqbuf->msg_perm.uid;
511 		msqptr->msg_perm.gid = msqbuf->msg_perm.gid;
512 		msqptr->msg_perm.mode = (msqptr->msg_perm.mode & ~0777) |
513 		    (msqbuf->msg_perm.mode & 0777);
514 		msqptr->msg_qbytes = msqbuf->msg_qbytes;
515 		msqptr->msg_ctime = time_second;
516 		break;
517 
518 	case IPC_STAT:
519 		if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
520 			MSG_PRINTF(("requester doesn't have read access\n"));
521 			break;
522 		}
523 		memcpy(msqbuf, msqptr, sizeof(struct msqid_ds));
524 		break;
525 
526 	default:
527 		MSG_PRINTF(("invalid command %d\n", cmd));
528 		error = EINVAL;
529 		break;
530 	}
531 
532 unlock:
533 	mutex_exit(&msgmutex);
534 	return (error);
535 }
536 
537 int
538 sys_msgget(struct lwp *l, const struct sys_msgget_args *uap, register_t *retval)
539 {
540 	/* {
541 		syscallarg(key_t) key;
542 		syscallarg(int) msgflg;
543 	} */
544 	int msqid, error = 0;
545 	int key = SCARG(uap, key);
546 	int msgflg = SCARG(uap, msgflg);
547 	kauth_cred_t cred = l->l_cred;
548 	struct msqid_ds *msqptr = NULL;
549 	kmsq_t *msq;
550 
551 	mutex_enter(&msgmutex);
552 
553 	MSG_PRINTF(("msgget(0x%x, 0%o)\n", key, msgflg));
554 
555 	if (key != IPC_PRIVATE) {
556 		for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
557 			msq = &msqs[msqid];
558 			msqptr = &msq->msq_u;
559 			if (msqptr->msg_qbytes != 0 &&
560 			    msqptr->msg_perm._key == key)
561 				break;
562 		}
563 		if (msqid < msginfo.msgmni) {
564 			MSG_PRINTF(("found public key\n"));
565 			if ((msgflg & IPC_CREAT) && (msgflg & IPC_EXCL)) {
566 				MSG_PRINTF(("not exclusive\n"));
567 				error = EEXIST;
568 				goto unlock;
569 			}
570 			if ((error = ipcperm(cred, &msqptr->msg_perm,
571 			    msgflg & 0700 ))) {
572 				MSG_PRINTF(("requester doesn't have 0%o access\n",
573 				    msgflg & 0700));
574 				goto unlock;
575 			}
576 			goto found;
577 		}
578 	}
579 
580 	MSG_PRINTF(("need to allocate the msqid_ds\n"));
581 	if (key == IPC_PRIVATE || (msgflg & IPC_CREAT)) {
582 		for (msqid = 0; msqid < msginfo.msgmni; msqid++) {
583 			/*
584 			 * Look for an unallocated and unlocked msqid_ds.
585 			 * msqid_ds's can be locked by msgsnd or msgrcv while
586 			 * they are copying the message in/out.  We can't
587 			 * re-use the entry until they release it.
588 			 */
589 			msq = &msqs[msqid];
590 			msqptr = &msq->msq_u;
591 			if (msqptr->msg_qbytes == 0 &&
592 			    (msqptr->msg_perm.mode & MSG_LOCKED) == 0)
593 				break;
594 		}
595 		if (msqid == msginfo.msgmni) {
596 			MSG_PRINTF(("no more msqid_ds's available\n"));
597 			error = ENOSPC;
598 			goto unlock;
599 		}
600 		MSG_PRINTF(("msqid %d is available\n", msqid));
601 		msqptr->msg_perm._key = key;
602 		msqptr->msg_perm.cuid = kauth_cred_geteuid(cred);
603 		msqptr->msg_perm.uid = kauth_cred_geteuid(cred);
604 		msqptr->msg_perm.cgid = kauth_cred_getegid(cred);
605 		msqptr->msg_perm.gid = kauth_cred_getegid(cred);
606 		msqptr->msg_perm.mode = (msgflg & 0777);
607 		/* Make sure that the returned msqid is unique */
608 		msqptr->msg_perm._seq++;
609 		msqptr->_msg_first = NULL;
610 		msqptr->_msg_last = NULL;
611 		msqptr->_msg_cbytes = 0;
612 		msqptr->msg_qnum = 0;
613 		msqptr->msg_qbytes = msginfo.msgmnb;
614 		msqptr->msg_lspid = 0;
615 		msqptr->msg_lrpid = 0;
616 		msqptr->msg_stime = 0;
617 		msqptr->msg_rtime = 0;
618 		msqptr->msg_ctime = time_second;
619 	} else {
620 		MSG_PRINTF(("didn't find it and wasn't asked to create it\n"));
621 		error = ENOENT;
622 		goto unlock;
623 	}
624 
625 found:
626 	/* Construct the unique msqid */
627 	*retval = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm);
628 
629 unlock:
630 	mutex_exit(&msgmutex);
631 	return (error);
632 }
633 
634 int
635 sys_msgsnd(struct lwp *l, const struct sys_msgsnd_args *uap, register_t *retval)
636 {
637 	/* {
638 		syscallarg(int) msqid;
639 		syscallarg(const void *) msgp;
640 		syscallarg(size_t) msgsz;
641 		syscallarg(int) msgflg;
642 	} */
643 
644 	return msgsnd1(l, SCARG(uap, msqid), SCARG(uap, msgp),
645 	    SCARG(uap, msgsz), SCARG(uap, msgflg), sizeof(long), copyin);
646 }
647 
648 int
649 msgsnd1(struct lwp *l, int msqidr, const char *user_msgp, size_t msgsz,
650     int msgflg, size_t typesz, copyin_t fetch_type)
651 {
652 	int segs_needed, error = 0, msqid;
653 	kauth_cred_t cred = l->l_cred;
654 	struct msqid_ds *msqptr;
655 	struct __msg *msghdr;
656 	kmsq_t *msq;
657 	short next;
658 
659 	MSG_PRINTF(("call to msgsnd(%d, %p, %lld, %d)\n", msqid, user_msgp,
660 	    (long long)msgsz, msgflg));
661 
662 	if ((ssize_t)msgsz < 0)
663 		return EINVAL;
664 
665 restart:
666 	msqid = IPCID_TO_IX(msqidr);
667 
668 	mutex_enter(&msgmutex);
669 	/* In case of reallocation, we will wait for completion */
670 	while (__predict_false(msg_realloc_state))
671 		cv_wait(&msg_realloc_cv, &msgmutex);
672 
673 	if (msqid < 0 || msqid >= msginfo.msgmni) {
674 		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
675 		    msginfo.msgmni));
676 		error = EINVAL;
677 		goto unlock;
678 	}
679 
680 	msq = &msqs[msqid];
681 	msqptr = &msq->msq_u;
682 
683 	if (msqptr->msg_qbytes == 0) {
684 		MSG_PRINTF(("no such message queue id\n"));
685 		error = EINVAL;
686 		goto unlock;
687 	}
688 	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
689 		MSG_PRINTF(("wrong sequence number\n"));
690 		error = EINVAL;
691 		goto unlock;
692 	}
693 
694 	if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_W))) {
695 		MSG_PRINTF(("requester doesn't have write access\n"));
696 		goto unlock;
697 	}
698 
699 	segs_needed = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
700 	MSG_PRINTF(("msgsz=%lld, msgssz=%d, segs_needed=%d\n",
701 	    (long long)msgsz, msginfo.msgssz, segs_needed));
702 	for (;;) {
703 		int need_more_resources = 0;
704 
705 		/*
706 		 * check msgsz [cannot be negative since it is unsigned]
707 		 * (inside this loop in case msg_qbytes changes while we sleep)
708 		 */
709 
710 		if (msgsz > msqptr->msg_qbytes) {
711 			MSG_PRINTF(("msgsz > msqptr->msg_qbytes\n"));
712 			error = EINVAL;
713 			goto unlock;
714 		}
715 
716 		if (msqptr->msg_perm.mode & MSG_LOCKED) {
717 			MSG_PRINTF(("msqid is locked\n"));
718 			need_more_resources = 1;
719 		}
720 		if (msgsz + msqptr->_msg_cbytes > msqptr->msg_qbytes) {
721 			MSG_PRINTF(("msgsz + msg_cbytes > msg_qbytes\n"));
722 			need_more_resources = 1;
723 		}
724 		if (segs_needed > nfree_msgmaps) {
725 			MSG_PRINTF(("segs_needed > nfree_msgmaps\n"));
726 			need_more_resources = 1;
727 		}
728 		if (free_msghdrs == NULL) {
729 			MSG_PRINTF(("no more msghdrs\n"));
730 			need_more_resources = 1;
731 		}
732 
733 		if (need_more_resources) {
734 			int we_own_it;
735 
736 			if ((msgflg & IPC_NOWAIT) != 0) {
737 				MSG_PRINTF(("need more resources but caller "
738 				    "doesn't want to wait\n"));
739 				error = EAGAIN;
740 				goto unlock;
741 			}
742 
743 			if ((msqptr->msg_perm.mode & MSG_LOCKED) != 0) {
744 				MSG_PRINTF(("we don't own the msqid_ds\n"));
745 				we_own_it = 0;
746 			} else {
747 				/* Force later arrivals to wait for our
748 				   request */
749 				MSG_PRINTF(("we own the msqid_ds\n"));
750 				msqptr->msg_perm.mode |= MSG_LOCKED;
751 				we_own_it = 1;
752 			}
753 
754 			msg_waiters++;
755 			MSG_PRINTF(("goodnight\n"));
756 			error = cv_wait_sig(&msq->msq_cv, &msgmutex);
757 			MSG_PRINTF(("good morning, error=%d\n", error));
758 			msg_waiters--;
759 
760 			if (we_own_it)
761 				msqptr->msg_perm.mode &= ~MSG_LOCKED;
762 
763 			/*
764 			 * In case of such state, notify reallocator and
765 			 * restart the call.
766 			 */
767 			if (msg_realloc_state) {
768 				cv_broadcast(&msg_realloc_cv);
769 				mutex_exit(&msgmutex);
770 				goto restart;
771 			}
772 
773 			if (error != 0) {
774 				MSG_PRINTF(("msgsnd: interrupted system "
775 				    "call\n"));
776 				error = EINTR;
777 				goto unlock;
778 			}
779 
780 			/*
781 			 * Make sure that the msq queue still exists
782 			 */
783 
784 			if (msqptr->msg_qbytes == 0) {
785 				MSG_PRINTF(("msqid deleted\n"));
786 				error = EIDRM;
787 				goto unlock;
788 			}
789 		} else {
790 			MSG_PRINTF(("got all the resources that we need\n"));
791 			break;
792 		}
793 	}
794 
795 	/*
796 	 * We have the resources that we need.
797 	 * Make sure!
798 	 */
799 
800 	KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
801 	KASSERT(segs_needed <= nfree_msgmaps);
802 	KASSERT(msgsz + msqptr->_msg_cbytes <= msqptr->msg_qbytes);
803 	KASSERT(free_msghdrs != NULL);
804 
805 	/*
806 	 * Re-lock the msqid_ds in case we page-fault when copying in the
807 	 * message
808 	 */
809 
810 	KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0);
811 	msqptr->msg_perm.mode |= MSG_LOCKED;
812 
813 	/*
814 	 * Allocate a message header
815 	 */
816 
817 	msghdr = free_msghdrs;
818 	free_msghdrs = msghdr->msg_next;
819 	msghdr->msg_spot = -1;
820 	msghdr->msg_ts = msgsz;
821 
822 	/*
823 	 * Allocate space for the message
824 	 */
825 
826 	while (segs_needed > 0) {
827 		KASSERT(nfree_msgmaps > 0);
828 		KASSERT(free_msgmaps != -1);
829 		KASSERT(free_msgmaps < msginfo.msgseg);
830 
831 		next = free_msgmaps;
832 		MSG_PRINTF(("allocating segment %d to message\n", next));
833 		free_msgmaps = msgmaps[next].next;
834 		nfree_msgmaps--;
835 		msgmaps[next].next = msghdr->msg_spot;
836 		msghdr->msg_spot = next;
837 		segs_needed--;
838 	}
839 
840 	/*
841 	 * Copy in the message type
842 	 */
843 	mutex_exit(&msgmutex);
844 	error = (*fetch_type)(user_msgp, &msghdr->msg_type, typesz);
845 	mutex_enter(&msgmutex);
846 	if (error != 0) {
847 		MSG_PRINTF(("error %d copying the message type\n", error));
848 		msg_freehdr(msghdr);
849 		msqptr->msg_perm.mode &= ~MSG_LOCKED;
850 		cv_broadcast(&msq->msq_cv);
851 		goto unlock;
852 	}
853 	user_msgp += typesz;
854 
855 	/*
856 	 * Validate the message type
857 	 */
858 
859 	if (msghdr->msg_type < 1) {
860 		msg_freehdr(msghdr);
861 		msqptr->msg_perm.mode &= ~MSG_LOCKED;
862 		cv_broadcast(&msq->msq_cv);
863 		MSG_PRINTF(("mtype (%ld) < 1\n", msghdr->msg_type));
864 		error = EINVAL;
865 		goto unlock;
866 	}
867 
868 	/*
869 	 * Copy in the message body
870 	 */
871 
872 	next = msghdr->msg_spot;
873 	while (msgsz > 0) {
874 		size_t tlen;
875 		KASSERT(next > -1);
876 		KASSERT(next < msginfo.msgseg);
877 
878 		if (msgsz > msginfo.msgssz)
879 			tlen = msginfo.msgssz;
880 		else
881 			tlen = msgsz;
882 		mutex_exit(&msgmutex);
883 		error = copyin(user_msgp, &msgpool[next * msginfo.msgssz], tlen);
884 		mutex_enter(&msgmutex);
885 		if (error != 0) {
886 			MSG_PRINTF(("error %d copying in message segment\n",
887 			    error));
888 			msg_freehdr(msghdr);
889 			msqptr->msg_perm.mode &= ~MSG_LOCKED;
890 			cv_broadcast(&msq->msq_cv);
891 			goto unlock;
892 		}
893 		msgsz -= tlen;
894 		user_msgp += tlen;
895 		next = msgmaps[next].next;
896 	}
897 	KASSERT(next == -1);
898 
899 	/*
900 	 * We've got the message.  Unlock the msqid_ds.
901 	 */
902 
903 	msqptr->msg_perm.mode &= ~MSG_LOCKED;
904 
905 	/*
906 	 * Make sure that the msqid_ds is still allocated.
907 	 */
908 
909 	if (msqptr->msg_qbytes == 0) {
910 		msg_freehdr(msghdr);
911 		cv_broadcast(&msq->msq_cv);
912 		error = EIDRM;
913 		goto unlock;
914 	}
915 
916 	/*
917 	 * Put the message into the queue
918 	 */
919 
920 	if (msqptr->_msg_first == NULL) {
921 		msqptr->_msg_first = msghdr;
922 		msqptr->_msg_last = msghdr;
923 	} else {
924 		msqptr->_msg_last->msg_next = msghdr;
925 		msqptr->_msg_last = msghdr;
926 	}
927 	msqptr->_msg_last->msg_next = NULL;
928 
929 	msqptr->_msg_cbytes += msghdr->msg_ts;
930 	msqptr->msg_qnum++;
931 	msqptr->msg_lspid = l->l_proc->p_pid;
932 	msqptr->msg_stime = time_second;
933 
934 	cv_broadcast(&msq->msq_cv);
935 
936 unlock:
937 	mutex_exit(&msgmutex);
938 	return error;
939 }
940 
941 int
942 sys_msgrcv(struct lwp *l, const struct sys_msgrcv_args *uap, register_t *retval)
943 {
944 	/* {
945 		syscallarg(int) msqid;
946 		syscallarg(void *) msgp;
947 		syscallarg(size_t) msgsz;
948 		syscallarg(long) msgtyp;
949 		syscallarg(int) msgflg;
950 	} */
951 
952 	return msgrcv1(l, SCARG(uap, msqid), SCARG(uap, msgp),
953 	    SCARG(uap, msgsz), SCARG(uap, msgtyp), SCARG(uap, msgflg),
954 	    sizeof(long), copyout, retval);
955 }
956 
957 int
958 msgrcv1(struct lwp *l, int msqidr, char *user_msgp, size_t msgsz, long msgtyp,
959     int msgflg, size_t typesz, copyout_t put_type, register_t *retval)
960 {
961 	size_t len;
962 	kauth_cred_t cred = l->l_cred;
963 	struct msqid_ds *msqptr;
964 	struct __msg *msghdr;
965 	int error = 0, msqid;
966 	kmsq_t *msq;
967 	short next;
968 
969 	MSG_PRINTF(("call to msgrcv(%d, %p, %lld, %ld, %d)\n", msqid,
970 	    user_msgp, (long long)msgsz, msgtyp, msgflg));
971 
972 	if ((ssize_t)msgsz < 0)
973 		return EINVAL;
974 
975 restart:
976 	msqid = IPCID_TO_IX(msqidr);
977 
978 	mutex_enter(&msgmutex);
979 	/* In case of reallocation, we will wait for completion */
980 	while (__predict_false(msg_realloc_state))
981 		cv_wait(&msg_realloc_cv, &msgmutex);
982 
983 	if (msqid < 0 || msqid >= msginfo.msgmni) {
984 		MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
985 		    msginfo.msgmni));
986 		error = EINVAL;
987 		goto unlock;
988 	}
989 
990 	msq = &msqs[msqid];
991 	msqptr = &msq->msq_u;
992 
993 	if (msqptr->msg_qbytes == 0) {
994 		MSG_PRINTF(("no such message queue id\n"));
995 		error = EINVAL;
996 		goto unlock;
997 	}
998 	if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
999 		MSG_PRINTF(("wrong sequence number\n"));
1000 		error = EINVAL;
1001 		goto unlock;
1002 	}
1003 
1004 	if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) {
1005 		MSG_PRINTF(("requester doesn't have read access\n"));
1006 		goto unlock;
1007 	}
1008 
1009 	msghdr = NULL;
1010 	while (msghdr == NULL) {
1011 		if (msgtyp == 0) {
1012 			msghdr = msqptr->_msg_first;
1013 			if (msghdr != NULL) {
1014 				if (msgsz < msghdr->msg_ts &&
1015 				    (msgflg & MSG_NOERROR) == 0) {
1016 					MSG_PRINTF(("first msg on the queue "
1017 					    "is too big (want %lld, got %d)\n",
1018 					    (long long)msgsz, msghdr->msg_ts));
1019 					error = E2BIG;
1020 					goto unlock;
1021 				}
1022 				if (msqptr->_msg_first == msqptr->_msg_last) {
1023 					msqptr->_msg_first = NULL;
1024 					msqptr->_msg_last = NULL;
1025 				} else {
1026 					msqptr->_msg_first = msghdr->msg_next;
1027 					KASSERT(msqptr->_msg_first != NULL);
1028 				}
1029 			}
1030 		} else {
1031 			struct __msg *previous;
1032 			struct __msg **prev;
1033 
1034 			for (previous = NULL, prev = &msqptr->_msg_first;
1035 			     (msghdr = *prev) != NULL;
1036 			     previous = msghdr, prev = &msghdr->msg_next) {
1037 				/*
1038 				 * Is this message's type an exact match or is
1039 				 * this message's type less than or equal to
1040 				 * the absolute value of a negative msgtyp?
1041 				 * Note that the second half of this test can
1042 				 * NEVER be true if msgtyp is positive since
1043 				 * msg_type is always positive!
1044 				 */
1045 
1046 				if (msgtyp != msghdr->msg_type &&
1047 				    msghdr->msg_type > -msgtyp)
1048 					continue;
1049 
1050 				MSG_PRINTF(("found message type %ld, requested %ld\n",
1051 				    msghdr->msg_type, msgtyp));
1052 				if (msgsz < msghdr->msg_ts &&
1053 				     (msgflg & MSG_NOERROR) == 0) {
1054 					MSG_PRINTF(("requested message on the queue "
1055 					    "is too big (want %lld, got %d)\n",
1056 					    (long long)msgsz, msghdr->msg_ts));
1057 					error = E2BIG;
1058 					goto unlock;
1059 				}
1060 				*prev = msghdr->msg_next;
1061 				if (msghdr != msqptr->_msg_last)
1062 					break;
1063 				if (previous == NULL) {
1064 					KASSERT(prev == &msqptr->_msg_first);
1065 					msqptr->_msg_first = NULL;
1066 					msqptr->_msg_last = NULL;
1067 				} else {
1068 					KASSERT(prev != &msqptr->_msg_first);
1069 					msqptr->_msg_last = previous;
1070 				}
1071 				break;
1072 			}
1073 		}
1074 
1075 		/*
1076 		 * We've either extracted the msghdr for the appropriate
1077 		 * message or there isn't one.
1078 		 * If there is one then bail out of this loop.
1079 		 */
1080 		if (msghdr != NULL)
1081 			break;
1082 
1083 		/*
1084 		 * Hmph!  No message found.  Does the user want to wait?
1085 		 */
1086 
1087 		if ((msgflg & IPC_NOWAIT) != 0) {
1088 			MSG_PRINTF(("no appropriate message found (msgtyp=%ld)\n",
1089 			    msgtyp));
1090 			error = ENOMSG;
1091 			goto unlock;
1092 		}
1093 
1094 		/*
1095 		 * Wait for something to happen
1096 		 */
1097 
1098 		msg_waiters++;
1099 		MSG_PRINTF(("msgrcv:  goodnight\n"));
1100 		error = cv_wait_sig(&msq->msq_cv, &msgmutex);
1101 		MSG_PRINTF(("msgrcv: good morning (error=%d)\n", error));
1102 		msg_waiters--;
1103 
1104 		/*
1105 		 * In case of such state, notify reallocator and
1106 		 * restart the call.
1107 		 */
1108 		if (msg_realloc_state) {
1109 			cv_broadcast(&msg_realloc_cv);
1110 			mutex_exit(&msgmutex);
1111 			goto restart;
1112 		}
1113 
1114 		if (error != 0) {
1115 			MSG_PRINTF(("msgsnd: interrupted system call\n"));
1116 			error = EINTR;
1117 			goto unlock;
1118 		}
1119 
1120 		/*
1121 		 * Make sure that the msq queue still exists
1122 		 */
1123 
1124 		if (msqptr->msg_qbytes == 0 ||
1125 		    msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) {
1126 			MSG_PRINTF(("msqid deleted\n"));
1127 			error = EIDRM;
1128 			goto unlock;
1129 		}
1130 	}
1131 
1132 	/*
1133 	 * Return the message to the user.
1134 	 *
1135 	 * First, do the bookkeeping (before we risk being interrupted).
1136 	 */
1137 
1138 	msqptr->_msg_cbytes -= msghdr->msg_ts;
1139 	msqptr->msg_qnum--;
1140 	msqptr->msg_lrpid = l->l_proc->p_pid;
1141 	msqptr->msg_rtime = time_second;
1142 
1143 	/*
1144 	 * Make msgsz the actual amount that we'll be returning.
1145 	 * Note that this effectively truncates the message if it is too long
1146 	 * (since msgsz is never increased).
1147 	 */
1148 
1149 	MSG_PRINTF(("found a message, msgsz=%lld, msg_ts=%d\n",
1150 	    (long long)msgsz, msghdr->msg_ts));
1151 	if (msgsz > msghdr->msg_ts)
1152 		msgsz = msghdr->msg_ts;
1153 
1154 	/*
1155 	 * Return the type to the user.
1156 	 */
1157 	mutex_exit(&msgmutex);
1158 	error = (*put_type)(&msghdr->msg_type, user_msgp, typesz);
1159 	mutex_enter(&msgmutex);
1160 	if (error != 0) {
1161 		MSG_PRINTF(("error (%d) copying out message type\n", error));
1162 		msg_freehdr(msghdr);
1163 		cv_broadcast(&msq->msq_cv);
1164 		goto unlock;
1165 	}
1166 	user_msgp += typesz;
1167 
1168 	/*
1169 	 * Return the segments to the user
1170 	 */
1171 
1172 	next = msghdr->msg_spot;
1173 	for (len = 0; len < msgsz; len += msginfo.msgssz) {
1174 		size_t tlen;
1175 		KASSERT(next > -1);
1176 		KASSERT(next < msginfo.msgseg);
1177 
1178 		if (msgsz - len > msginfo.msgssz)
1179 			tlen = msginfo.msgssz;
1180 		else
1181 			tlen = msgsz - len;
1182 		mutex_exit(&msgmutex);
1183 		error = copyout(&msgpool[next * msginfo.msgssz],
1184 		    user_msgp, tlen);
1185 		mutex_enter(&msgmutex);
1186 		if (error != 0) {
1187 			MSG_PRINTF(("error (%d) copying out message segment\n",
1188 			    error));
1189 			msg_freehdr(msghdr);
1190 			cv_broadcast(&msq->msq_cv);
1191 			goto unlock;
1192 		}
1193 		user_msgp += tlen;
1194 		next = msgmaps[next].next;
1195 	}
1196 
1197 	/*
1198 	 * Done, return the actual number of bytes copied out.
1199 	 */
1200 
1201 	msg_freehdr(msghdr);
1202 	cv_broadcast(&msq->msq_cv);
1203 	*retval = msgsz;
1204 
1205 unlock:
1206 	mutex_exit(&msgmutex);
1207 	return error;
1208 }
1209 
1210 /*
1211  * Sysctl initialization and nodes.
1212  */
1213 
1214 static int
1215 sysctl_ipc_msgmni(SYSCTLFN_ARGS)
1216 {
1217 	int newsize, error;
1218 	struct sysctlnode node;
1219 	node = *rnode;
1220 	node.sysctl_data = &newsize;
1221 
1222 	newsize = msginfo.msgmni;
1223 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1224 	if (error || newp == NULL)
1225 		return error;
1226 
1227 	sysctl_unlock();
1228 	error = msgrealloc(newsize, msginfo.msgseg);
1229 	sysctl_relock();
1230 	return error;
1231 }
1232 
1233 static int
1234 sysctl_ipc_msgseg(SYSCTLFN_ARGS)
1235 {
1236 	int newsize, error;
1237 	struct sysctlnode node;
1238 	node = *rnode;
1239 	node.sysctl_data = &newsize;
1240 
1241 	newsize = msginfo.msgseg;
1242 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1243 	if (error || newp == NULL)
1244 		return error;
1245 
1246 	sysctl_unlock();
1247 	error = msgrealloc(msginfo.msgmni, newsize);
1248 	sysctl_relock();
1249 	return error;
1250 }
1251 
1252 SYSCTL_SETUP(sysctl_ipc_msg_setup, "sysctl kern.ipc subtree setup")
1253 {
1254 	const struct sysctlnode *node = NULL;
1255 
1256 	sysctl_createv(clog, 0, NULL, NULL,
1257 		CTLFLAG_PERMANENT,
1258 		CTLTYPE_NODE, "kern", NULL,
1259 		NULL, 0, NULL, 0,
1260 		CTL_KERN, CTL_EOL);
1261 	sysctl_createv(clog, 0, NULL, &node,
1262 		CTLFLAG_PERMANENT,
1263 		CTLTYPE_NODE, "ipc",
1264 		SYSCTL_DESCR("SysV IPC options"),
1265 		NULL, 0, NULL, 0,
1266 		CTL_KERN, KERN_SYSVIPC, CTL_EOL);
1267 
1268 	if (node == NULL)
1269 		return;
1270 
1271 	sysctl_createv(clog, 0, &node, NULL,
1272 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1273 		CTLTYPE_INT, "msgmni",
1274 		SYSCTL_DESCR("Max number of message queue identifiers"),
1275 		sysctl_ipc_msgmni, 0, &msginfo.msgmni, 0,
1276 		CTL_CREATE, CTL_EOL);
1277 	sysctl_createv(clog, 0, &node, NULL,
1278 		CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1279 		CTLTYPE_INT, "msgseg",
1280 		SYSCTL_DESCR("Max number of number of message segments"),
1281 		sysctl_ipc_msgseg, 0, &msginfo.msgseg, 0,
1282 		CTL_CREATE, CTL_EOL);
1283 }
1284