xref: /openbsd-src/sys/kern/sysv_msg.c (revision 46035553bfdd96e63c94e32da0210227ec2e3cf1)
1 /*	$OpenBSD: sysv_msg.c,v 1.37 2020/06/24 22:03:42 cheloha Exp $	*/
2 /*	$NetBSD: sysv_msg.c,v 1.19 1996/02/09 19:00:18 christos Exp $	*/
3 /*
4  * Copyright (c) 2009 Bret S. Lambert <blambert@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * Implementation of SVID messages
20  *
21  * Author:  Daniel Boulet
22  *
23  * Copyright 1993 Daniel Boulet and RTMX Inc.
24  *
25  * This system call was implemented by Daniel Boulet under contract from RTMX.
26  *
27  * Redistribution and use in source forms, with and without modification,
28  * are permitted provided that this entire comment appears intact.
29  *
30  * Redistribution in binary form may occur without any restrictions.
31  * Obviously, it would be nice if you gave credit where credit is due
32  * but requiring it would be too onerous.
33  *
34  * This software is provided ``AS IS'' without any warranties of any kind.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/mount.h>
41 #include <sys/msg.h>
42 #include <sys/pool.h>
43 #include <sys/proc.h>
44 #include <sys/queue.h>
45 #include <sys/syscallargs.h>
46 #include <sys/sysctl.h>
47 #include <sys/systm.h>
48 #include <sys/uio.h>
49 
50 struct que *que_create(key_t, struct ucred *, int);
51 struct que *que_lookup(int);
52 struct que *que_key_lookup(key_t);
53 void que_wakewriters(void);
54 void que_free(struct que *);
55 struct msg *msg_create(struct que *);
56 void msg_free(struct msg *);
57 void msg_enqueue(struct que *, struct msg *, struct proc *);
58 void msg_dequeue(struct que *, struct msg *, struct proc *);
59 struct msg *msg_lookup(struct que *, int);
60 int msg_copyin(struct msg *, const char *, size_t, struct proc *);
61 int msg_copyout(struct msg *, char *, size_t *, struct proc *);
62 
63 struct	pool sysvmsgpl;
64 struct	msginfo msginfo;
65 
66 TAILQ_HEAD(, que) msg_queues;
67 
68 int num_ques;
69 int num_msgs;
70 int sequence;
71 int maxmsgs;
72 
73 void
74 msginit(void)
75 {
76 	msginfo.msgmax = MSGMAX;
77 	msginfo.msgmni = MSGMNI;
78 	msginfo.msgmnb = MSGMNB;
79 	msginfo.msgtql = MSGTQL;
80 	msginfo.msgssz = MSGSSZ;
81 	msginfo.msgseg = MSGSEG;
82 
83 	pool_init(&sysvmsgpl, sizeof(struct msg), 0, IPL_NONE, PR_WAITOK,
84 	    "sysvmsgpl", NULL);
85 
86 	TAILQ_INIT(&msg_queues);
87 
88 	num_ques = 0;
89 	num_msgs = 0;
90 	sequence = 1;
91 	maxmsgs = 0;
92 }
93 
94 int
95 sys_msgctl(struct proc *p, void *v, register_t *retval)
96 {
97 	struct sys_msgctl_args /* {
98 		syscallarg(int) msqid;
99 		syscallarg(int) cmd;
100 		syscallarg(struct msqid_ds *) buf;
101 	} */ *uap = v;
102 
103 	return (msgctl1(p, SCARG(uap, msqid), SCARG(uap, cmd),
104 	    (caddr_t)SCARG(uap, buf), copyin, copyout));
105 }
106 
107 int
108 msgctl1(struct proc *p, int msqid, int cmd, caddr_t buf,
109     int (*ds_copyin)(const void *, void *, size_t),
110     int (*ds_copyout)(const void *, void *, size_t))
111 {
112 	struct msqid_ds tmp;
113 	struct ucred *cred = p->p_ucred;
114 	struct que *que;
115 	int error = 0;
116 
117 	if ((que = que_lookup(msqid)) == NULL)
118 		return (EINVAL);
119 
120 	QREF(que);
121 
122 	switch (cmd) {
123 
124 	case IPC_RMID:
125 		if ((error = ipcperm(cred, &que->msqid_ds.msg_perm, IPC_M)))
126 			goto out;
127 
128 		TAILQ_REMOVE(&msg_queues, que, que_next);
129 		que->que_flags |= MSGQ_DYING;
130 
131 		/* lose interest in the queue and wait for others to too */
132 		if (--que->que_references > 0) {
133 			wakeup(que);
134 			tsleep_nsec(&que->que_references, PZERO, "msgqrm",
135 			    INFSLP);
136 		}
137 
138 		que_free(que);
139 
140 		return (0);
141 
142 	case IPC_SET:
143 		if ((error = ipcperm(cred, &que->msqid_ds.msg_perm, IPC_M)))
144 			goto out;
145 		if ((error = ds_copyin(buf, &tmp, sizeof(struct msqid_ds))))
146 			goto out;
147 
148 		/* only superuser can bump max bytes in queue */
149 		if (tmp.msg_qbytes > que->msqid_ds.msg_qbytes &&
150 		    cred->cr_uid != 0) {
151 			error = EPERM;
152 			goto out;
153 		}
154 
155 		/* restrict max bytes in queue to system limit */
156 		if (tmp.msg_qbytes > msginfo.msgmnb)
157 			tmp.msg_qbytes = msginfo.msgmnb;
158 
159 		/* can't reduce msg_bytes to 0 */
160 		if (tmp.msg_qbytes == 0) {
161 			error = EINVAL;		/* non-standard errno! */
162 			goto out;
163 		}
164 
165 		que->msqid_ds.msg_perm.uid = tmp.msg_perm.uid;
166 		que->msqid_ds.msg_perm.gid = tmp.msg_perm.gid;
167 		que->msqid_ds.msg_perm.mode =
168 		    (que->msqid_ds.msg_perm.mode & ~0777) |
169 		    (tmp.msg_perm.mode & 0777);
170 		que->msqid_ds.msg_qbytes = tmp.msg_qbytes;
171 		que->msqid_ds.msg_ctime = gettime();
172 		break;
173 
174 	case IPC_STAT:
175 		if ((error = ipcperm(cred, &que->msqid_ds.msg_perm, IPC_R)))
176 			goto out;
177 		error = ds_copyout(&que->msqid_ds, buf,
178 		    sizeof(struct msqid_ds));
179 		break;
180 
181 	default:
182 		error = EINVAL;
183 		break;
184 	}
185 out:
186 	QRELE(que);
187 
188 	return (error);
189 }
190 
191 int
192 sys_msgget(struct proc *p, void *v, register_t *retval)
193 {
194 	struct sys_msgget_args /* {
195 		syscallarg(key_t) key;
196 		syscallarg(int) msgflg;
197 	} */ *uap = v;
198 	struct ucred *cred = p->p_ucred;
199 	struct que *que;
200 	key_t key = SCARG(uap, key);
201 	int msgflg = SCARG(uap, msgflg);
202 	int error = 0;
203 
204 again:
205 	if (key != IPC_PRIVATE) {
206 		que = que_key_lookup(key);
207 		if (que) {
208 			if ((msgflg & IPC_CREAT) && (msgflg & IPC_EXCL))
209 				return (EEXIST);
210 			if ((error = ipcperm(cred, &que->msqid_ds.msg_perm,
211 			    msgflg & 0700)))
212 				return (error);
213 			goto found;
214 		}
215 	}
216 
217 	/* don't create a new message queue if the caller doesn't want to */
218 	if (key != IPC_PRIVATE && !(msgflg & IPC_CREAT))
219 		return (ENOENT);
220 
221 	/* enforce limits on the maximum number of message queues */
222 	if (num_ques >= msginfo.msgmni)
223 		return (ENOSPC);
224 
225 	/*
226 	 * if que_create returns NULL, it means that a que with an identical
227 	 * key was created while this process was sleeping, so start over
228 	 */
229 	if ((que = que_create(key, cred, msgflg & 0777)) == NULL)
230 		goto again;
231 
232 found:
233 	*retval = IXSEQ_TO_IPCID(que->que_ix, que->msqid_ds.msg_perm);
234 	return (error);
235 }
236 
237 #define	MSGQ_SPACE(q)	((q)->msqid_ds.msg_qbytes - (q)->msqid_ds.msg_cbytes)
238 
239 int
240 sys_msgsnd(struct proc *p, void *v, register_t *retval)
241 {
242 	struct sys_msgsnd_args /* {
243 		syscallarg(int) msqid;
244 		syscallarg(const void *) msgp;
245 		syscallarg(size_t) msgsz;
246 		syscallarg(int) msgflg;
247 	} */ *uap = v;
248 	struct ucred *cred = p->p_ucred;
249 	struct que *que;
250 	struct msg *msg;
251 	size_t msgsz = SCARG(uap, msgsz);
252 	int error;
253 
254 	if ((que = que_lookup(SCARG(uap, msqid))) == NULL)
255 		return (EINVAL);
256 
257 	if (msgsz > que->msqid_ds.msg_qbytes || msgsz > msginfo.msgmax)
258 		return (EINVAL);
259 
260 	if ((error = ipcperm(cred, &que->msqid_ds.msg_perm, IPC_W)))
261 		return (error);
262 
263 	QREF(que);
264 
265 	while (MSGQ_SPACE(que) < msgsz || num_msgs >= msginfo.msgtql) {
266 
267 		if (SCARG(uap, msgflg) & IPC_NOWAIT) {
268 			error = EAGAIN;
269 			goto out;
270 		}
271 
272 		/* notify world that process may wedge here */
273 		if (num_msgs >= msginfo.msgtql)
274 			maxmsgs = 1;
275 
276 		que->que_flags |= MSGQ_WRITERS;
277 		if ((error = tsleep_nsec(que, PZERO|PCATCH, "msgwait", INFSLP)))
278 			goto out;
279 
280 		if (que->que_flags & MSGQ_DYING) {
281 			error = EIDRM;
282 			goto out;
283 		}
284 	}
285 
286 	/* if msg_create returns NULL, the queue is being removed */
287 	if ((msg = msg_create(que)) == NULL) {
288 		error = EIDRM;
289 		goto out;
290 	}
291 
292 	/* msg_copyin frees msg on error */
293 	if ((error = msg_copyin(msg, (const char *)SCARG(uap, msgp), msgsz, p)))
294 		goto out;
295 
296 	msg_enqueue(que, msg, p);
297 
298 	if (que->que_flags & MSGQ_READERS) {
299 		que->que_flags &= ~MSGQ_READERS;
300 		wakeup(que);
301 	}
302 
303 	if (que->que_flags & MSGQ_DYING) {
304 		error = EIDRM;
305 		wakeup(que);
306 	}
307 out:
308 	QRELE(que);
309 
310 	return (error);
311 }
312 
313 int
314 sys_msgrcv(struct proc *p, void *v, register_t *retval)
315 {
316 	struct sys_msgrcv_args /* {
317 		syscallarg(int) msqid;
318 		syscallarg(void *) msgp;
319 		syscallarg(size_t) msgsz;
320 		syscallarg(long) msgtyp;
321 		syscallarg(int) msgflg;
322 	} */ *uap = v;
323 	struct ucred *cred = p->p_ucred;
324 	char *msgp = SCARG(uap, msgp);
325 	struct que *que;
326 	struct msg *msg;
327 	size_t msgsz = SCARG(uap, msgsz);
328 	long msgtyp = SCARG(uap, msgtyp);
329 	int error;
330 
331 	if ((que = que_lookup(SCARG(uap, msqid))) == NULL)
332 		return (EINVAL);
333 
334 	if ((error = ipcperm(cred, &que->msqid_ds.msg_perm, IPC_R)))
335 		return (error);
336 
337 	QREF(que);
338 
339 	/* msg_lookup handles matching; sleeping gets handled here */
340 	while ((msg = msg_lookup(que, msgtyp)) == NULL) {
341 
342 		if (SCARG(uap, msgflg) & IPC_NOWAIT) {
343 			error = ENOMSG;
344 			goto out;
345 		}
346 
347 		que->que_flags |= MSGQ_READERS;
348 		if ((error = tsleep_nsec(que, PZERO|PCATCH, "msgwait", INFSLP)))
349 			goto out;
350 
351 		/* make sure the queue still alive */
352 		if (que->que_flags & MSGQ_DYING) {
353 			error = EIDRM;
354 			goto out;
355 		}
356 	}
357 
358 	/* if msg_copyout fails, keep the message around so it isn't lost */
359 	if ((error = msg_copyout(msg, msgp, &msgsz, p)))
360 		goto out;
361 
362 	msg_dequeue(que, msg, p);
363 	msg_free(msg);
364 
365 	if (que->que_flags & MSGQ_WRITERS) {
366 		que->que_flags &= ~MSGQ_WRITERS;
367 		wakeup(que);
368 	}
369 
370 	/* ensure processes waiting on the global limit don't wedge */
371 	if (maxmsgs) {
372 		maxmsgs = 0;
373 		que_wakewriters();
374 	}
375 
376 	*retval = msgsz;
377 out:
378 	QRELE(que);
379 
380 	return (error);
381 }
382 
383 /*
384  * que management functions
385  */
386 
387 struct que *
388 que_create(key_t key, struct ucred *cred, int mode)
389 {
390 	struct que *que, *que2;
391 	int nextix = 1;
392 
393 	que = malloc(sizeof(*que), M_TEMP, M_WAIT|M_ZERO);
394 
395 	/* if malloc slept, a queue with the same key may have been created */
396 	if (que_key_lookup(key)) {
397 		free(que, M_TEMP, sizeof *que);
398 		return (NULL);
399 	}
400 
401 	/* find next available "index" */
402 	TAILQ_FOREACH(que2, &msg_queues, que_next) {
403 		if (nextix < que2->que_ix)
404 			break;
405 		nextix = que2->que_ix + 1;
406 	}
407 	que->que_ix = nextix;
408 
409 	que->msqid_ds.msg_perm.key = key;
410 	que->msqid_ds.msg_perm.cuid = cred->cr_uid;
411 	que->msqid_ds.msg_perm.uid = cred->cr_uid;
412 	que->msqid_ds.msg_perm.cgid = cred->cr_gid;
413 	que->msqid_ds.msg_perm.gid = cred->cr_gid;
414 	que->msqid_ds.msg_perm.mode = mode & 0777;
415 	que->msqid_ds.msg_perm.seq = ++sequence & 0x7fff;
416 	que->msqid_ds.msg_qbytes = msginfo.msgmnb;
417 	que->msqid_ds.msg_ctime = gettime();
418 
419 	TAILQ_INIT(&que->que_msgs);
420 
421 	/* keep queues in "index" order */
422 	if (que2)
423 		TAILQ_INSERT_BEFORE(que2, que, que_next);
424 	else
425 		TAILQ_INSERT_TAIL(&msg_queues, que, que_next);
426 	num_ques++;
427 
428 	return (que);
429 }
430 
431 struct que *
432 que_lookup(int id)
433 {
434 	struct que *que;
435 
436 	TAILQ_FOREACH(que, &msg_queues, que_next)
437 		if (que->que_ix == IPCID_TO_IX(id))
438 			break;
439 
440 	/* don't return queues marked for removal */
441 	if (que && que->que_flags & MSGQ_DYING)
442 		return (NULL);
443 
444 	return (que);
445 }
446 
447 struct que *
448 que_key_lookup(key_t key)
449 {
450 	struct que *que;
451 
452 	if (key == IPC_PRIVATE)
453 		return (NULL);
454 
455 	TAILQ_FOREACH(que, &msg_queues, que_next)
456 		if (que->msqid_ds.msg_perm.key == key)
457 			break;
458 
459 	/* don't return queues marked for removal */
460 	if (que && que->que_flags & MSGQ_DYING)
461 		return (NULL);
462 
463 	return (que);
464 }
465 
466 void
467 que_wakewriters(void)
468 {
469 	struct que *que;
470 
471 	TAILQ_FOREACH(que, &msg_queues, que_next) {
472 		if (que->que_flags & MSGQ_WRITERS) {
473 			que->que_flags &= ~MSGQ_WRITERS;
474 			wakeup(que);
475 		}
476 	}
477 }
478 
479 void
480 que_free(struct que *que)
481 {
482 	struct msg *msg;
483 #ifdef DIAGNOSTIC
484 	if (que->que_references > 0)
485 		panic("freeing message queue with active references");
486 #endif
487 
488 	while ((msg = TAILQ_FIRST(&que->que_msgs))) {
489 		TAILQ_REMOVE(&que->que_msgs, msg, msg_next);
490 		msg_free(msg);
491 	}
492 	free(que, M_TEMP, sizeof *que);
493 	num_ques--;
494 }
495 
496 /*
497  * msg management functions
498  */
499 
500 struct msg *
501 msg_create(struct que *que)
502 {
503 	struct msg *msg;
504 
505 	msg = pool_get(&sysvmsgpl, PR_WAITOK|PR_ZERO);
506 
507 	/* if the queue has died during allocation, return NULL */
508 	if (que->que_flags & MSGQ_DYING) {
509 		pool_put(&sysvmsgpl, msg);
510 		wakeup(que);
511 		return(NULL);
512 	}
513 
514 	num_msgs++;
515 
516 	return (msg);
517 }
518 
519 struct msg *
520 msg_lookup(struct que *que, int msgtyp)
521 {
522 	struct msg *msg;
523 
524 	/*
525 	 * Three different matches are performed based on the value of msgtyp:
526 	 * 1) msgtyp > 0 => match exactly
527 	 * 2) msgtyp = 0 => match any
528 	 * 3) msgtyp < 0 => match any up to absolute value of msgtyp
529 	 */
530 	TAILQ_FOREACH(msg, &que->que_msgs, msg_next)
531 		if (msgtyp == 0 || msgtyp == msg->msg_type ||
532 		    (msgtyp < 0 && -msgtyp <= msg->msg_type))
533 			break;
534 
535 	return (msg);
536 }
537 
538 void
539 msg_free(struct msg *msg)
540 {
541 	m_freem(msg->msg_data);
542 	pool_put(&sysvmsgpl, msg);
543 	num_msgs--;
544 }
545 
546 void
547 msg_enqueue(struct que *que, struct msg *msg, struct proc *p)
548 {
549 	que->msqid_ds.msg_cbytes += msg->msg_len;
550 	que->msqid_ds.msg_qnum++;
551 	que->msqid_ds.msg_lspid = p->p_p->ps_pid;
552 	que->msqid_ds.msg_stime = gettime();
553 
554 	TAILQ_INSERT_TAIL(&que->que_msgs, msg, msg_next);
555 }
556 
557 void
558 msg_dequeue(struct que *que, struct msg *msg, struct proc *p)
559 {
560 	que->msqid_ds.msg_cbytes -= msg->msg_len;
561 	que->msqid_ds.msg_qnum--;
562 	que->msqid_ds.msg_lrpid = p->p_p->ps_pid;
563 	que->msqid_ds.msg_rtime = gettime();
564 
565 	TAILQ_REMOVE(&que->que_msgs, msg, msg_next);
566 }
567 
568 /*
569  * The actual I/O routines. A note concerning the layout of SysV msg buffers:
570  *
571  * The data to be copied is laid out as a single userspace buffer, with a
572  * long preceding an opaque buffer of len bytes. The long value ends
573  * up being the message type, which needs to be copied separately from
574  * the buffer data, which is stored in in mbufs.
575  */
576 
577 int
578 msg_copyin(struct msg *msg, const char *ubuf, size_t len, struct proc *p)
579 {
580 	struct mbuf **mm, *m;
581 	size_t xfer;
582 	int error;
583 
584 	if (msg == NULL)
585 		panic ("msg NULL");
586 
587 	if ((error = copyin(ubuf, &msg->msg_type, sizeof(long)))) {
588 		msg_free(msg);
589 		return (error);
590 	}
591 
592 	if (msg->msg_type < 1) {
593 		msg_free(msg);
594 		return (EINVAL);
595 	}
596 
597 	ubuf += sizeof(long);
598 
599 	msg->msg_len = 0;
600 	mm = &msg->msg_data;
601 
602 	while (msg->msg_len < len) {
603 		m = m_get(M_WAIT, MT_DATA);
604 		if (len >= MINCLSIZE) {
605 			MCLGET(m, M_WAIT);
606 			xfer = min(len, MCLBYTES);
607 		} else {
608 			xfer = min(len, MLEN);
609 		}
610 		m->m_len = xfer;
611 		msg->msg_len += xfer;
612 		*mm = m;
613 		mm = &m->m_next;
614 	}
615 
616 	for (m = msg->msg_data; m; m = m->m_next) {
617 		if ((error = copyin(ubuf, mtod(m, void *), m->m_len))) {
618 			msg_free(msg);
619 			return (error);
620 		}
621 		ubuf += m->m_len;
622 	}
623 
624 	return (0);
625 }
626 
627 int
628 msg_copyout(struct msg *msg, char *ubuf, size_t *len, struct proc *p)
629 {
630 	struct mbuf *m;
631 	size_t xfer;
632 	int error;
633 
634 #ifdef DIAGNOSTIC
635 	if (msg->msg_len > MSGMAX)
636 		panic("SysV message longer than MSGMAX");
637 #endif
638 
639 	/* silently truncate messages too large for user buffer */
640 	xfer = min(*len, msg->msg_len);
641 
642 	if ((error = copyout(&msg->msg_type, ubuf, sizeof(long))))
643 		return (error);
644 
645 	ubuf += sizeof(long);
646 	*len = xfer;
647 
648 	for (m = msg->msg_data; m; m = m->m_next) {
649 		if ((error = copyout(mtod(m, void *), ubuf, m->m_len)))
650 			return (error);
651 		ubuf += m->m_len;
652 	}
653 
654 	return (0);
655 }
656 
657 int
658 sysctl_sysvmsg(int *name, u_int namelen, void *where, size_t *sizep)
659 {
660 	struct msg_sysctl_info *info;
661 	struct que *que;
662 	size_t infolen, infolen0;
663 	int error;
664 
665 	switch (*name) {
666 	case KERN_SYSVIPC_MSG_INFO:
667 
668 		if (namelen != 1)
669 			return (ENOTDIR);
670 
671 		/*
672 		 * The userland ipcs(1) utility expects to be able
673 		 * to iterate over at least msginfo.msgmni queues,
674 		 * even if those queues don't exist. This is an
675 		 * artifact of the previous implementation of
676 		 * message queues; for now, emulate this behavior
677 		 * until a more thorough fix can be made.
678 		 */
679 		infolen0 = sizeof(msginfo) +
680 		    msginfo.msgmni * sizeof(struct msqid_ds);
681 		if (where == NULL) {
682 			*sizep = infolen0;
683 			return (0);
684 		}
685 
686 		/*
687 		 * More special-casing due to previous implementation:
688 		 * if the caller just wants the msginfo struct, then
689 		 * sizep will point to the value sizeof(struct msginfo).
690 		 * In that case, only copy out the msginfo struct to
691 		 * the caller.
692 		 */
693 		if (*sizep == sizeof(struct msginfo))
694 			return (copyout(&msginfo, where, sizeof(msginfo)));
695 
696 		info = malloc(infolen0, M_TEMP, M_WAIT|M_ZERO);
697 
698 		/* if the malloc slept, this may have changed */
699 		infolen = sizeof(msginfo) +
700 		    msginfo.msgmni * sizeof(struct msqid_ds);
701 
702 		if (*sizep < infolen) {
703 			free(info, M_TEMP, infolen0);
704 			return (ENOMEM);
705 		}
706 
707 		memcpy(&info->msginfo, &msginfo, sizeof(struct msginfo));
708 
709 		/*
710 		 * Special case #3: the previous array-based implementation
711 		 * exported the array indices and userland has come to rely
712 		 * upon these indices, so keep behavior consisitent.
713 		 */
714 		TAILQ_FOREACH(que, &msg_queues, que_next)
715 			memcpy(&info->msgids[que->que_ix], &que->msqid_ds,
716 			    sizeof(struct msqid_ds));
717 
718 		error = copyout(info, where, infolen);
719 
720 		free(info, M_TEMP, infolen0);
721 
722 		return (error);
723 
724 	default:
725 		return (EINVAL);
726 	}
727 }
728