xref: /openbsd-src/sys/kern/sysv_msg.c (revision b3af768da0f4194b8b809a68f9cd547e117a1619)
1 /*	$OpenBSD: sysv_msg.c,v 1.41 2023/04/11 00:45:09 jsg Exp $	*/
2 /*	$NetBSD: sysv_msg.c,v 1.19 1996/02/09 19:00:18 christos Exp $	*/
3 /*
4  * Copyright (c) 2009 Bret S. Lambert <blambert@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * Implementation of SVID messages
20  *
21  * Author:  Daniel Boulet
22  *
23  * Copyright 1993 Daniel Boulet and RTMX Inc.
24  *
25  * This system call was implemented by Daniel Boulet under contract from RTMX.
26  *
27  * Redistribution and use in source forms, with and without modification,
28  * are permitted provided that this entire comment appears intact.
29  *
30  * Redistribution in binary form may occur without any restrictions.
31  * Obviously, it would be nice if you gave credit where credit is due
32  * but requiring it would be too onerous.
33  *
34  * This software is provided ``AS IS'' without any warranties of any kind.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/mount.h>
41 #include <sys/msg.h>
42 #include <sys/pool.h>
43 #include <sys/proc.h>
44 #include <sys/queue.h>
45 #include <sys/syscallargs.h>
46 #include <sys/sysctl.h>
47 #include <sys/systm.h>
48 
49 struct que *que_create(key_t, struct ucred *, int);
50 struct que *que_lookup(int);
51 struct que *que_key_lookup(key_t);
52 void que_wakewriters(void);
53 void que_free(struct que *);
54 struct msg *msg_create(struct que *);
55 void msg_free(struct msg *);
56 void msg_enqueue(struct que *, struct msg *, struct proc *);
57 void msg_dequeue(struct que *, struct msg *, struct proc *);
58 struct msg *msg_lookup(struct que *, int);
59 int msg_copyin(struct msg *, const char *, size_t, struct proc *);
60 int msg_copyout(struct msg *, char *, size_t *, struct proc *);
61 
62 struct	pool sysvmsgpl;
63 struct	msginfo msginfo;
64 
65 TAILQ_HEAD(, que) msg_queues;
66 
67 int num_ques;
68 int num_msgs;
69 int sequence;
70 int maxmsgs;
71 
72 void
msginit(void)73 msginit(void)
74 {
75 	msginfo.msgmax = MSGMAX;
76 	msginfo.msgmni = MSGMNI;
77 	msginfo.msgmnb = MSGMNB;
78 	msginfo.msgtql = MSGTQL;
79 	msginfo.msgssz = MSGSSZ;
80 	msginfo.msgseg = MSGSEG;
81 
82 	pool_init(&sysvmsgpl, sizeof(struct msg), 0, IPL_NONE, PR_WAITOK,
83 	    "sysvmsgpl", NULL);
84 
85 	TAILQ_INIT(&msg_queues);
86 
87 	num_ques = 0;
88 	num_msgs = 0;
89 	sequence = 1;
90 	maxmsgs = 0;
91 }
92 
93 int
sys_msgctl(struct proc * p,void * v,register_t * retval)94 sys_msgctl(struct proc *p, void *v, register_t *retval)
95 {
96 	struct sys_msgctl_args /* {
97 		syscallarg(int) msqid;
98 		syscallarg(int) cmd;
99 		syscallarg(struct msqid_ds *) buf;
100 	} */ *uap = v;
101 	struct msqid_ds tmp, *umsq = SCARG(uap, buf);
102 	struct ucred *cred = p->p_ucred;
103 	struct que *que;
104 	int msqid = SCARG(uap, msqid);
105 	int cmd = SCARG(uap, cmd);
106 	int error;
107 
108 	if ((que = que_lookup(msqid)) == NULL)
109 		return (EINVAL);
110 
111 	QREF(que);
112 
113 	switch (cmd) {
114 
115 	case IPC_RMID:
116 		if ((error = ipcperm(cred, &que->msqid_ds.msg_perm, IPC_M)))
117 			goto out;
118 
119 		TAILQ_REMOVE(&msg_queues, que, que_next);
120 		que->que_flags |= MSGQ_DYING;
121 
122 		/* lose interest in the queue and wait for others to too */
123 		if (--que->que_references > 0) {
124 			wakeup(que);
125 			tsleep_nsec(&que->que_references, PZERO, "msgqrm",
126 			    INFSLP);
127 		}
128 
129 		que_free(que);
130 
131 		return (0);
132 
133 	case IPC_SET:
134 		if ((error = ipcperm(cred, &que->msqid_ds.msg_perm, IPC_M)))
135 			goto out;
136 		if ((error = copyin(umsq, &tmp, sizeof(struct msqid_ds))))
137 			goto out;
138 
139 		/* only superuser can bump max bytes in queue */
140 		if (tmp.msg_qbytes > que->msqid_ds.msg_qbytes &&
141 		    cred->cr_uid != 0) {
142 			error = EPERM;
143 			goto out;
144 		}
145 
146 		/* restrict max bytes in queue to system limit */
147 		if (tmp.msg_qbytes > msginfo.msgmnb)
148 			tmp.msg_qbytes = msginfo.msgmnb;
149 
150 		/* can't reduce msg_bytes to 0 */
151 		if (tmp.msg_qbytes == 0) {
152 			error = EINVAL;		/* non-standard errno! */
153 			goto out;
154 		}
155 
156 		que->msqid_ds.msg_perm.uid = tmp.msg_perm.uid;
157 		que->msqid_ds.msg_perm.gid = tmp.msg_perm.gid;
158 		que->msqid_ds.msg_perm.mode =
159 		    (que->msqid_ds.msg_perm.mode & ~0777) |
160 		    (tmp.msg_perm.mode & 0777);
161 		que->msqid_ds.msg_qbytes = tmp.msg_qbytes;
162 		que->msqid_ds.msg_ctime = gettime();
163 		break;
164 
165 	case IPC_STAT:
166 		if ((error = ipcperm(cred, &que->msqid_ds.msg_perm, IPC_R)))
167 			goto out;
168 		error = copyout(&que->msqid_ds, umsq, sizeof(struct msqid_ds));
169 		break;
170 
171 	default:
172 		error = EINVAL;
173 		break;
174 	}
175 out:
176 	QRELE(que);
177 
178 	return (error);
179 }
180 
181 int
sys_msgget(struct proc * p,void * v,register_t * retval)182 sys_msgget(struct proc *p, void *v, register_t *retval)
183 {
184 	struct sys_msgget_args /* {
185 		syscallarg(key_t) key;
186 		syscallarg(int) msgflg;
187 	} */ *uap = v;
188 	struct ucred *cred = p->p_ucred;
189 	struct que *que;
190 	key_t key = SCARG(uap, key);
191 	int msgflg = SCARG(uap, msgflg);
192 	int error = 0;
193 
194 again:
195 	if (key != IPC_PRIVATE) {
196 		que = que_key_lookup(key);
197 		if (que) {
198 			if ((msgflg & IPC_CREAT) && (msgflg & IPC_EXCL))
199 				return (EEXIST);
200 			if ((error = ipcperm(cred, &que->msqid_ds.msg_perm,
201 			    msgflg & 0700)))
202 				return (error);
203 			goto found;
204 		}
205 	}
206 
207 	/* don't create a new message queue if the caller doesn't want to */
208 	if (key != IPC_PRIVATE && !(msgflg & IPC_CREAT))
209 		return (ENOENT);
210 
211 	/* enforce limits on the maximum number of message queues */
212 	if (num_ques >= msginfo.msgmni)
213 		return (ENOSPC);
214 
215 	/*
216 	 * if que_create returns NULL, it means that a que with an identical
217 	 * key was created while this process was sleeping, so start over
218 	 */
219 	if ((que = que_create(key, cred, msgflg & 0777)) == NULL)
220 		goto again;
221 
222 found:
223 	*retval = IXSEQ_TO_IPCID(que->que_ix, que->msqid_ds.msg_perm);
224 	return (error);
225 }
226 
227 #define	MSGQ_SPACE(q)	((q)->msqid_ds.msg_qbytes - (q)->msqid_ds.msg_cbytes)
228 
229 int
sys_msgsnd(struct proc * p,void * v,register_t * retval)230 sys_msgsnd(struct proc *p, void *v, register_t *retval)
231 {
232 	struct sys_msgsnd_args /* {
233 		syscallarg(int) msqid;
234 		syscallarg(const void *) msgp;
235 		syscallarg(size_t) msgsz;
236 		syscallarg(int) msgflg;
237 	} */ *uap = v;
238 	struct ucred *cred = p->p_ucred;
239 	struct que *que;
240 	struct msg *msg;
241 	size_t msgsz = SCARG(uap, msgsz);
242 	int error;
243 
244 	if ((que = que_lookup(SCARG(uap, msqid))) == NULL)
245 		return (EINVAL);
246 
247 	if (msgsz > que->msqid_ds.msg_qbytes || msgsz > msginfo.msgmax)
248 		return (EINVAL);
249 
250 	if ((error = ipcperm(cred, &que->msqid_ds.msg_perm, IPC_W)))
251 		return (error);
252 
253 	QREF(que);
254 
255 	while (MSGQ_SPACE(que) < msgsz || num_msgs >= msginfo.msgtql) {
256 
257 		if (SCARG(uap, msgflg) & IPC_NOWAIT) {
258 			error = EAGAIN;
259 			goto out;
260 		}
261 
262 		/* notify world that process may wedge here */
263 		if (num_msgs >= msginfo.msgtql)
264 			maxmsgs = 1;
265 
266 		que->que_flags |= MSGQ_WRITERS;
267 		if ((error = tsleep_nsec(que, PZERO|PCATCH, "msgwait", INFSLP)))
268 			goto out;
269 
270 		if (que->que_flags & MSGQ_DYING) {
271 			error = EIDRM;
272 			goto out;
273 		}
274 	}
275 
276 	/* if msg_create returns NULL, the queue is being removed */
277 	if ((msg = msg_create(que)) == NULL) {
278 		error = EIDRM;
279 		goto out;
280 	}
281 
282 	/* msg_copyin frees msg on error */
283 	if ((error = msg_copyin(msg, (const char *)SCARG(uap, msgp), msgsz, p)))
284 		goto out;
285 
286 	msg_enqueue(que, msg, p);
287 
288 	if (que->que_flags & MSGQ_READERS) {
289 		que->que_flags &= ~MSGQ_READERS;
290 		wakeup(que);
291 	}
292 
293 	if (que->que_flags & MSGQ_DYING) {
294 		error = EIDRM;
295 		wakeup(que);
296 	}
297 out:
298 	QRELE(que);
299 
300 	return (error);
301 }
302 
303 int
sys_msgrcv(struct proc * p,void * v,register_t * retval)304 sys_msgrcv(struct proc *p, void *v, register_t *retval)
305 {
306 	struct sys_msgrcv_args /* {
307 		syscallarg(int) msqid;
308 		syscallarg(void *) msgp;
309 		syscallarg(size_t) msgsz;
310 		syscallarg(long) msgtyp;
311 		syscallarg(int) msgflg;
312 	} */ *uap = v;
313 	struct ucred *cred = p->p_ucred;
314 	char *msgp = SCARG(uap, msgp);
315 	struct que *que;
316 	struct msg *msg;
317 	size_t msgsz = SCARG(uap, msgsz);
318 	long msgtyp = SCARG(uap, msgtyp);
319 	int error;
320 
321 	if ((que = que_lookup(SCARG(uap, msqid))) == NULL)
322 		return (EINVAL);
323 
324 	if ((error = ipcperm(cred, &que->msqid_ds.msg_perm, IPC_R)))
325 		return (error);
326 
327 	QREF(que);
328 
329 	/* msg_lookup handles matching; sleeping gets handled here */
330 	while ((msg = msg_lookup(que, msgtyp)) == NULL) {
331 
332 		if (SCARG(uap, msgflg) & IPC_NOWAIT) {
333 			error = ENOMSG;
334 			goto out;
335 		}
336 
337 		que->que_flags |= MSGQ_READERS;
338 		if ((error = tsleep_nsec(que, PZERO|PCATCH, "msgwait", INFSLP)))
339 			goto out;
340 
341 		/* make sure the queue still alive */
342 		if (que->que_flags & MSGQ_DYING) {
343 			error = EIDRM;
344 			goto out;
345 		}
346 	}
347 
348 	/* if msg_copyout fails, keep the message around so it isn't lost */
349 	if ((error = msg_copyout(msg, msgp, &msgsz, p)))
350 		goto out;
351 
352 	msg_dequeue(que, msg, p);
353 	msg_free(msg);
354 
355 	if (que->que_flags & MSGQ_WRITERS) {
356 		que->que_flags &= ~MSGQ_WRITERS;
357 		wakeup(que);
358 	}
359 
360 	/* ensure processes waiting on the global limit don't wedge */
361 	if (maxmsgs) {
362 		maxmsgs = 0;
363 		que_wakewriters();
364 	}
365 
366 	*retval = msgsz;
367 out:
368 	QRELE(que);
369 
370 	return (error);
371 }
372 
373 /*
374  * que management functions
375  */
376 
377 struct que *
que_create(key_t key,struct ucred * cred,int mode)378 que_create(key_t key, struct ucred *cred, int mode)
379 {
380 	struct que *que, *que2;
381 	int nextix = 1;
382 
383 	que = malloc(sizeof(*que), M_TEMP, M_WAIT|M_ZERO);
384 
385 	/* if malloc slept, a queue with the same key may have been created */
386 	if (que_key_lookup(key)) {
387 		free(que, M_TEMP, sizeof *que);
388 		return (NULL);
389 	}
390 
391 	/* find next available "index" */
392 	TAILQ_FOREACH(que2, &msg_queues, que_next) {
393 		if (nextix < que2->que_ix)
394 			break;
395 		nextix = que2->que_ix + 1;
396 	}
397 	que->que_ix = nextix;
398 
399 	que->msqid_ds.msg_perm.key = key;
400 	que->msqid_ds.msg_perm.cuid = cred->cr_uid;
401 	que->msqid_ds.msg_perm.uid = cred->cr_uid;
402 	que->msqid_ds.msg_perm.cgid = cred->cr_gid;
403 	que->msqid_ds.msg_perm.gid = cred->cr_gid;
404 	que->msqid_ds.msg_perm.mode = mode & 0777;
405 	que->msqid_ds.msg_perm.seq = ++sequence & 0x7fff;
406 	que->msqid_ds.msg_qbytes = msginfo.msgmnb;
407 	que->msqid_ds.msg_ctime = gettime();
408 
409 	TAILQ_INIT(&que->que_msgs);
410 
411 	/* keep queues in "index" order */
412 	if (que2)
413 		TAILQ_INSERT_BEFORE(que2, que, que_next);
414 	else
415 		TAILQ_INSERT_TAIL(&msg_queues, que, que_next);
416 	num_ques++;
417 
418 	return (que);
419 }
420 
421 struct que *
que_lookup(int id)422 que_lookup(int id)
423 {
424 	struct que *que;
425 
426 	TAILQ_FOREACH(que, &msg_queues, que_next)
427 		if (que->que_ix == IPCID_TO_IX(id))
428 			break;
429 
430 	/* don't return queues marked for removal */
431 	if (que && que->que_flags & MSGQ_DYING)
432 		return (NULL);
433 
434 	return (que);
435 }
436 
437 struct que *
que_key_lookup(key_t key)438 que_key_lookup(key_t key)
439 {
440 	struct que *que;
441 
442 	if (key == IPC_PRIVATE)
443 		return (NULL);
444 
445 	TAILQ_FOREACH(que, &msg_queues, que_next)
446 		if (que->msqid_ds.msg_perm.key == key)
447 			break;
448 
449 	/* don't return queues marked for removal */
450 	if (que && que->que_flags & MSGQ_DYING)
451 		return (NULL);
452 
453 	return (que);
454 }
455 
456 void
que_wakewriters(void)457 que_wakewriters(void)
458 {
459 	struct que *que;
460 
461 	TAILQ_FOREACH(que, &msg_queues, que_next) {
462 		if (que->que_flags & MSGQ_WRITERS) {
463 			que->que_flags &= ~MSGQ_WRITERS;
464 			wakeup(que);
465 		}
466 	}
467 }
468 
469 void
que_free(struct que * que)470 que_free(struct que *que)
471 {
472 	struct msg *msg;
473 #ifdef DIAGNOSTIC
474 	if (que->que_references > 0)
475 		panic("freeing message queue with active references");
476 #endif
477 
478 	while ((msg = TAILQ_FIRST(&que->que_msgs))) {
479 		TAILQ_REMOVE(&que->que_msgs, msg, msg_next);
480 		msg_free(msg);
481 	}
482 	free(que, M_TEMP, sizeof *que);
483 	num_ques--;
484 }
485 
486 /*
487  * msg management functions
488  */
489 
490 struct msg *
msg_create(struct que * que)491 msg_create(struct que *que)
492 {
493 	struct msg *msg;
494 
495 	msg = pool_get(&sysvmsgpl, PR_WAITOK|PR_ZERO);
496 
497 	/* if the queue has died during allocation, return NULL */
498 	if (que->que_flags & MSGQ_DYING) {
499 		pool_put(&sysvmsgpl, msg);
500 		wakeup(que);
501 		return(NULL);
502 	}
503 
504 	num_msgs++;
505 
506 	return (msg);
507 }
508 
509 struct msg *
msg_lookup(struct que * que,int msgtyp)510 msg_lookup(struct que *que, int msgtyp)
511 {
512 	struct msg *msg;
513 
514 	/*
515 	 * Three different matches are performed based on the value of msgtyp:
516 	 * 1) msgtyp > 0 => match exactly
517 	 * 2) msgtyp = 0 => match any
518 	 * 3) msgtyp < 0 => match any up to absolute value of msgtyp
519 	 */
520 	TAILQ_FOREACH(msg, &que->que_msgs, msg_next)
521 		if (msgtyp == 0 || msgtyp == msg->msg_type ||
522 		    (msgtyp < 0 && -msgtyp <= msg->msg_type))
523 			break;
524 
525 	return (msg);
526 }
527 
528 void
msg_free(struct msg * msg)529 msg_free(struct msg *msg)
530 {
531 	m_freem(msg->msg_data);
532 	pool_put(&sysvmsgpl, msg);
533 	num_msgs--;
534 }
535 
536 void
msg_enqueue(struct que * que,struct msg * msg,struct proc * p)537 msg_enqueue(struct que *que, struct msg *msg, struct proc *p)
538 {
539 	que->msqid_ds.msg_cbytes += msg->msg_len;
540 	que->msqid_ds.msg_qnum++;
541 	que->msqid_ds.msg_lspid = p->p_p->ps_pid;
542 	que->msqid_ds.msg_stime = gettime();
543 
544 	TAILQ_INSERT_TAIL(&que->que_msgs, msg, msg_next);
545 }
546 
547 void
msg_dequeue(struct que * que,struct msg * msg,struct proc * p)548 msg_dequeue(struct que *que, struct msg *msg, struct proc *p)
549 {
550 	que->msqid_ds.msg_cbytes -= msg->msg_len;
551 	que->msqid_ds.msg_qnum--;
552 	que->msqid_ds.msg_lrpid = p->p_p->ps_pid;
553 	que->msqid_ds.msg_rtime = gettime();
554 
555 	TAILQ_REMOVE(&que->que_msgs, msg, msg_next);
556 }
557 
558 /*
559  * The actual I/O routines. A note concerning the layout of SysV msg buffers:
560  *
561  * The data to be copied is laid out as a single userspace buffer, with a
562  * long preceding an opaque buffer of len bytes. The long value ends
563  * up being the message type, which needs to be copied separately from
564  * the buffer data, which is stored in mbufs.
565  */
566 
567 int
msg_copyin(struct msg * msg,const char * ubuf,size_t len,struct proc * p)568 msg_copyin(struct msg *msg, const char *ubuf, size_t len, struct proc *p)
569 {
570 	struct mbuf **mm, *m;
571 	size_t xfer;
572 	int error;
573 
574 	if (msg == NULL)
575 		panic ("msg NULL");
576 
577 	if ((error = copyin(ubuf, &msg->msg_type, sizeof(long)))) {
578 		msg_free(msg);
579 		return (error);
580 	}
581 
582 	if (msg->msg_type < 1) {
583 		msg_free(msg);
584 		return (EINVAL);
585 	}
586 
587 	ubuf += sizeof(long);
588 
589 	msg->msg_len = 0;
590 	mm = &msg->msg_data;
591 
592 	while (msg->msg_len < len) {
593 		m = m_get(M_WAIT, MT_DATA);
594 		if (len >= MINCLSIZE) {
595 			MCLGET(m, M_WAIT);
596 			xfer = min(len, MCLBYTES);
597 		} else {
598 			xfer = min(len, MLEN);
599 		}
600 		m->m_len = xfer;
601 		msg->msg_len += xfer;
602 		*mm = m;
603 		mm = &m->m_next;
604 	}
605 
606 	for (m = msg->msg_data; m; m = m->m_next) {
607 		if ((error = copyin(ubuf, mtod(m, void *), m->m_len))) {
608 			msg_free(msg);
609 			return (error);
610 		}
611 		ubuf += m->m_len;
612 	}
613 
614 	return (0);
615 }
616 
617 int
msg_copyout(struct msg * msg,char * ubuf,size_t * len,struct proc * p)618 msg_copyout(struct msg *msg, char *ubuf, size_t *len, struct proc *p)
619 {
620 	struct mbuf *m;
621 	size_t xfer;
622 	int error;
623 
624 #ifdef DIAGNOSTIC
625 	if (msg->msg_len > MSGMAX)
626 		panic("SysV message longer than MSGMAX");
627 #endif
628 
629 	/* silently truncate messages too large for user buffer */
630 	xfer = min(*len, msg->msg_len);
631 
632 	if ((error = copyout(&msg->msg_type, ubuf, sizeof(long))))
633 		return (error);
634 
635 	ubuf += sizeof(long);
636 	*len = xfer;
637 
638 	for (m = msg->msg_data; m; m = m->m_next) {
639 		if ((error = copyout(mtod(m, void *), ubuf, m->m_len)))
640 			return (error);
641 		ubuf += m->m_len;
642 	}
643 
644 	return (0);
645 }
646 
647 int
sysctl_sysvmsg(int * name,u_int namelen,void * where,size_t * sizep)648 sysctl_sysvmsg(int *name, u_int namelen, void *where, size_t *sizep)
649 {
650 	struct msg_sysctl_info *info;
651 	struct que *que;
652 	size_t infolen, infolen0;
653 	int error;
654 
655 	switch (*name) {
656 	case KERN_SYSVIPC_MSG_INFO:
657 
658 		if (namelen != 1)
659 			return (ENOTDIR);
660 
661 		/*
662 		 * The userland ipcs(1) utility expects to be able
663 		 * to iterate over at least msginfo.msgmni queues,
664 		 * even if those queues don't exist. This is an
665 		 * artifact of the previous implementation of
666 		 * message queues; for now, emulate this behavior
667 		 * until a more thorough fix can be made.
668 		 */
669 		infolen0 = sizeof(msginfo) +
670 		    msginfo.msgmni * sizeof(struct msqid_ds);
671 		if (where == NULL) {
672 			*sizep = infolen0;
673 			return (0);
674 		}
675 
676 		/*
677 		 * More special-casing due to previous implementation:
678 		 * if the caller just wants the msginfo struct, then
679 		 * sizep will point to the value sizeof(struct msginfo).
680 		 * In that case, only copy out the msginfo struct to
681 		 * the caller.
682 		 */
683 		if (*sizep == sizeof(struct msginfo))
684 			return (copyout(&msginfo, where, sizeof(msginfo)));
685 
686 		info = malloc(infolen0, M_TEMP, M_WAIT|M_ZERO);
687 
688 		/* if the malloc slept, this may have changed */
689 		infolen = sizeof(msginfo) +
690 		    msginfo.msgmni * sizeof(struct msqid_ds);
691 
692 		if (*sizep < infolen) {
693 			free(info, M_TEMP, infolen0);
694 			return (ENOMEM);
695 		}
696 
697 		memcpy(&info->msginfo, &msginfo, sizeof(struct msginfo));
698 
699 		/*
700 		 * Special case #3: the previous array-based implementation
701 		 * exported the array indices and userland has come to rely
702 		 * upon these indices, so keep behavior consistent.
703 		 */
704 		TAILQ_FOREACH(que, &msg_queues, que_next)
705 			memcpy(&info->msgids[que->que_ix], &que->msqid_ds,
706 			    sizeof(struct msqid_ds));
707 
708 		error = copyout(info, where, infolen);
709 
710 		free(info, M_TEMP, infolen0);
711 
712 		return (error);
713 
714 	default:
715 		return (EINVAL);
716 	}
717 }
718