1 /* $NetBSD: sysv_msg.c,v 1.69 2015/05/13 01:16:15 pgoyette Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center, and by Andrew Doran. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Implementation of SVID messages 35 * 36 * Author: Daniel Boulet 37 * 38 * Copyright 1993 Daniel Boulet and RTMX Inc. 39 * 40 * This system call was implemented by Daniel Boulet under contract from RTMX. 41 * 42 * Redistribution and use in source forms, with and without modification, 43 * are permitted provided that this entire comment appears intact. 44 * 45 * Redistribution in binary form may occur without any restrictions. 46 * Obviously, it would be nice if you gave credit where credit is due 47 * but requiring it would be too onerous. 48 * 49 * This software is provided ``AS IS'' without any warranties of any kind. 50 */ 51 52 #include <sys/cdefs.h> 53 __KERNEL_RCSID(0, "$NetBSD: sysv_msg.c,v 1.69 2015/05/13 01:16:15 pgoyette Exp $"); 54 55 #ifdef _KERNEL_OPT 56 #include "opt_sysv.h" 57 #endif 58 59 #include <sys/param.h> 60 #include <sys/kernel.h> 61 #include <sys/msg.h> 62 #include <sys/sysctl.h> 63 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */ 64 #include <sys/syscallargs.h> 65 #include <sys/kauth.h> 66 67 #define MSG_DEBUG 68 #undef MSG_DEBUG_OK 69 70 #ifdef MSG_DEBUG_OK 71 #define MSG_PRINTF(a) printf a 72 #else 73 #define MSG_PRINTF(a) 74 #endif 75 76 static int nfree_msgmaps; /* # of free map entries */ 77 static short free_msgmaps; /* head of linked list of free map entries */ 78 static struct __msg *free_msghdrs; /* list of free msg headers */ 79 static char *msgpool; /* MSGMAX byte long msg buffer pool */ 80 static struct msgmap *msgmaps; /* MSGSEG msgmap structures */ 81 static struct __msg *msghdrs; /* MSGTQL msg headers */ 82 83 kmsq_t *msqs; /* MSGMNI msqid_ds struct's */ 84 kmutex_t msgmutex; /* subsystem lock */ 85 86 static u_int msg_waiters = 0; /* total number of msgrcv waiters */ 87 static bool msg_realloc_state; 88 static kcondvar_t msg_realloc_cv; 89 90 static void msg_freehdr(struct __msg *); 91 92 extern int kern_has_sysvmsg; 93 94 void 95 msginit(void) 96 { 97 int i, sz; 98 vaddr_t v; 99 100 /* 101 * msginfo.msgssz should be a power of two for efficiency reasons. 102 * It is also pretty silly if msginfo.msgssz is less than 8 103 * or greater than about 256 so ... 104 */ 105 106 i = 8; 107 while (i < 1024 && i != msginfo.msgssz) 108 i <<= 1; 109 if (i != msginfo.msgssz) { 110 panic("msginfo.msgssz = %d, not a small power of 2", 111 msginfo.msgssz); 112 } 113 114 if (msginfo.msgseg > 32767) { 115 panic("msginfo.msgseg = %d > 32767", msginfo.msgseg); 116 } 117 118 /* Allocate the wired memory for our structures */ 119 sz = ALIGN(msginfo.msgmax) + 120 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) + 121 ALIGN(msginfo.msgtql * sizeof(struct __msg)) + 122 ALIGN(msginfo.msgmni * sizeof(kmsq_t)); 123 sz = round_page(sz); 124 v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO); 125 if (v == 0) 126 panic("sysv_msg: cannot allocate memory"); 127 msgpool = (void *)v; 128 msgmaps = (void *)((uintptr_t)msgpool + ALIGN(msginfo.msgmax)); 129 msghdrs = (void *)((uintptr_t)msgmaps + 130 ALIGN(msginfo.msgseg * sizeof(struct msgmap))); 131 msqs = (void *)((uintptr_t)msghdrs + 132 ALIGN(msginfo.msgtql * sizeof(struct __msg))); 133 134 for (i = 0; i < (msginfo.msgseg - 1); i++) 135 msgmaps[i].next = i + 1; 136 msgmaps[msginfo.msgseg - 1].next = -1; 137 138 free_msgmaps = 0; 139 nfree_msgmaps = msginfo.msgseg; 140 141 for (i = 0; i < (msginfo.msgtql - 1); i++) { 142 msghdrs[i].msg_type = 0; 143 msghdrs[i].msg_next = &msghdrs[i + 1]; 144 } 145 i = msginfo.msgtql - 1; 146 msghdrs[i].msg_type = 0; 147 msghdrs[i].msg_next = NULL; 148 free_msghdrs = &msghdrs[0]; 149 150 for (i = 0; i < msginfo.msgmni; i++) { 151 cv_init(&msqs[i].msq_cv, "msgwait"); 152 /* Implies entry is available */ 153 msqs[i].msq_u.msg_qbytes = 0; 154 /* Reset to a known value */ 155 msqs[i].msq_u.msg_perm._seq = 0; 156 } 157 158 mutex_init(&msgmutex, MUTEX_DEFAULT, IPL_NONE); 159 cv_init(&msg_realloc_cv, "msgrealc"); 160 msg_realloc_state = false; 161 162 kern_has_sysvmsg = 1; 163 164 sysvipcinit(); 165 } 166 167 int 168 msgfini(void) 169 { 170 int i, sz; 171 vaddr_t v = (vaddr_t)msgpool; 172 173 mutex_enter(&msgmutex); 174 for (i = 0; i < msginfo.msgmni; i++) { 175 if (msqs[i].msq_u.msg_qbytes != 0) { 176 mutex_exit(&msgmutex); 177 return 1; /* queue not available, prevent unload! */ 178 } 179 } 180 /* 181 * Destroy all condvars and free the memory we're using 182 */ 183 for (i = 0; i < msginfo.msgmni; i++) { 184 cv_destroy(&msqs[i].msq_cv); 185 } 186 sz = ALIGN(msginfo.msgmax) + 187 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) + 188 ALIGN(msginfo.msgtql * sizeof(struct __msg)) + 189 ALIGN(msginfo.msgmni * sizeof(kmsq_t)); 190 sz = round_page(sz); 191 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED); 192 193 mutex_exit(&msgmutex); 194 mutex_destroy(&msgmutex); 195 196 kern_has_sysvmsg = 0; 197 198 return 0; 199 } 200 201 static int 202 msgrealloc(int newmsgmni, int newmsgseg) 203 { 204 struct msgmap *new_msgmaps; 205 struct __msg *new_msghdrs, *new_free_msghdrs; 206 char *old_msgpool, *new_msgpool; 207 kmsq_t *new_msqs; 208 vaddr_t v; 209 int i, sz, msqid, newmsgmax, new_nfree_msgmaps; 210 short new_free_msgmaps; 211 212 if (newmsgmni < 1 || newmsgseg < 1) 213 return EINVAL; 214 215 /* Allocate the wired memory for our structures */ 216 newmsgmax = msginfo.msgssz * newmsgseg; 217 sz = ALIGN(newmsgmax) + 218 ALIGN(newmsgseg * sizeof(struct msgmap)) + 219 ALIGN(msginfo.msgtql * sizeof(struct __msg)) + 220 ALIGN(newmsgmni * sizeof(kmsq_t)); 221 sz = round_page(sz); 222 v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO); 223 if (v == 0) 224 return ENOMEM; 225 226 mutex_enter(&msgmutex); 227 if (msg_realloc_state) { 228 mutex_exit(&msgmutex); 229 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED); 230 return EBUSY; 231 } 232 msg_realloc_state = true; 233 if (msg_waiters) { 234 /* 235 * Mark reallocation state, wake-up all waiters, 236 * and wait while they will all exit. 237 */ 238 for (i = 0; i < msginfo.msgmni; i++) 239 cv_broadcast(&msqs[i].msq_cv); 240 while (msg_waiters) 241 cv_wait(&msg_realloc_cv, &msgmutex); 242 } 243 old_msgpool = msgpool; 244 245 /* We cannot reallocate less memory than we use */ 246 i = 0; 247 for (msqid = 0; msqid < msginfo.msgmni; msqid++) { 248 struct msqid_ds *mptr; 249 kmsq_t *msq; 250 251 msq = &msqs[msqid]; 252 mptr = &msq->msq_u; 253 if (mptr->msg_qbytes || (mptr->msg_perm.mode & MSG_LOCKED)) 254 i = msqid; 255 } 256 if (i >= newmsgmni || (msginfo.msgseg - nfree_msgmaps) > newmsgseg) { 257 mutex_exit(&msgmutex); 258 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED); 259 return EBUSY; 260 } 261 262 new_msgpool = (void *)v; 263 new_msgmaps = (void *)((uintptr_t)new_msgpool + ALIGN(newmsgmax)); 264 new_msghdrs = (void *)((uintptr_t)new_msgmaps + 265 ALIGN(newmsgseg * sizeof(struct msgmap))); 266 new_msqs = (void *)((uintptr_t)new_msghdrs + 267 ALIGN(msginfo.msgtql * sizeof(struct __msg))); 268 269 /* Initialize the structures */ 270 for (i = 0; i < (newmsgseg - 1); i++) 271 new_msgmaps[i].next = i + 1; 272 new_msgmaps[newmsgseg - 1].next = -1; 273 new_free_msgmaps = 0; 274 new_nfree_msgmaps = newmsgseg; 275 276 for (i = 0; i < (msginfo.msgtql - 1); i++) { 277 new_msghdrs[i].msg_type = 0; 278 new_msghdrs[i].msg_next = &new_msghdrs[i + 1]; 279 } 280 i = msginfo.msgtql - 1; 281 new_msghdrs[i].msg_type = 0; 282 new_msghdrs[i].msg_next = NULL; 283 new_free_msghdrs = &new_msghdrs[0]; 284 285 for (i = 0; i < newmsgmni; i++) { 286 new_msqs[i].msq_u.msg_qbytes = 0; 287 new_msqs[i].msq_u.msg_perm._seq = 0; 288 cv_init(&new_msqs[i].msq_cv, "msgwait"); 289 } 290 291 /* 292 * Copy all message queue identifiers, message headers and buffer 293 * pools to the new memory location. 294 */ 295 for (msqid = 0; msqid < msginfo.msgmni; msqid++) { 296 struct __msg *nmsghdr, *msghdr, *pmsghdr; 297 struct msqid_ds *nmptr, *mptr; 298 kmsq_t *nmsq, *msq; 299 300 msq = &msqs[msqid]; 301 mptr = &msq->msq_u; 302 303 if (mptr->msg_qbytes == 0 && 304 (mptr->msg_perm.mode & MSG_LOCKED) == 0) 305 continue; 306 307 nmsq = &new_msqs[msqid]; 308 nmptr = &nmsq->msq_u; 309 memcpy(nmptr, mptr, sizeof(struct msqid_ds)); 310 311 /* 312 * Go through the message headers, and and copy each 313 * one by taking the new ones, and thus defragmenting. 314 */ 315 nmsghdr = pmsghdr = NULL; 316 msghdr = mptr->_msg_first; 317 while (msghdr) { 318 short nnext = 0, next; 319 u_short msgsz, segcnt; 320 321 /* Take an entry from the new list of free msghdrs */ 322 nmsghdr = new_free_msghdrs; 323 KASSERT(nmsghdr != NULL); 324 new_free_msghdrs = nmsghdr->msg_next; 325 326 nmsghdr->msg_next = NULL; 327 if (pmsghdr) { 328 pmsghdr->msg_next = nmsghdr; 329 } else { 330 nmptr->_msg_first = nmsghdr; 331 pmsghdr = nmsghdr; 332 } 333 nmsghdr->msg_ts = msghdr->msg_ts; 334 nmsghdr->msg_spot = -1; 335 336 /* Compute the amount of segments and reserve them */ 337 msgsz = msghdr->msg_ts; 338 segcnt = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz; 339 if (segcnt == 0) 340 continue; 341 while (segcnt--) { 342 nnext = new_free_msgmaps; 343 new_free_msgmaps = new_msgmaps[nnext].next; 344 new_nfree_msgmaps--; 345 new_msgmaps[nnext].next = nmsghdr->msg_spot; 346 nmsghdr->msg_spot = nnext; 347 } 348 349 /* Copy all segments */ 350 KASSERT(nnext == nmsghdr->msg_spot); 351 next = msghdr->msg_spot; 352 while (msgsz > 0) { 353 size_t tlen; 354 355 if (msgsz >= msginfo.msgssz) { 356 tlen = msginfo.msgssz; 357 msgsz -= msginfo.msgssz; 358 } else { 359 tlen = msgsz; 360 msgsz = 0; 361 } 362 363 /* Copy the message buffer */ 364 memcpy(&new_msgpool[nnext * msginfo.msgssz], 365 &msgpool[next * msginfo.msgssz], tlen); 366 367 /* Next entry of the map */ 368 nnext = msgmaps[nnext].next; 369 next = msgmaps[next].next; 370 } 371 372 /* Next message header */ 373 msghdr = msghdr->msg_next; 374 } 375 nmptr->_msg_last = nmsghdr; 376 } 377 KASSERT((msginfo.msgseg - nfree_msgmaps) == 378 (newmsgseg - new_nfree_msgmaps)); 379 380 sz = ALIGN(msginfo.msgmax) + 381 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) + 382 ALIGN(msginfo.msgtql * sizeof(struct __msg)) + 383 ALIGN(msginfo.msgmni * sizeof(kmsq_t)); 384 sz = round_page(sz); 385 386 for (i = 0; i < msginfo.msgmni; i++) 387 cv_destroy(&msqs[i].msq_cv); 388 389 /* Set the pointers and update the new values */ 390 msgpool = new_msgpool; 391 msgmaps = new_msgmaps; 392 msghdrs = new_msghdrs; 393 msqs = new_msqs; 394 395 free_msghdrs = new_free_msghdrs; 396 free_msgmaps = new_free_msgmaps; 397 nfree_msgmaps = new_nfree_msgmaps; 398 msginfo.msgmni = newmsgmni; 399 msginfo.msgseg = newmsgseg; 400 msginfo.msgmax = newmsgmax; 401 402 /* Reallocation completed - notify all waiters, if any */ 403 msg_realloc_state = false; 404 cv_broadcast(&msg_realloc_cv); 405 mutex_exit(&msgmutex); 406 407 uvm_km_free(kernel_map, (vaddr_t)old_msgpool, sz, UVM_KMF_WIRED); 408 return 0; 409 } 410 411 static void 412 msg_freehdr(struct __msg *msghdr) 413 { 414 415 KASSERT(mutex_owned(&msgmutex)); 416 417 while (msghdr->msg_ts > 0) { 418 short next; 419 KASSERT(msghdr->msg_spot >= 0); 420 KASSERT(msghdr->msg_spot < msginfo.msgseg); 421 422 next = msgmaps[msghdr->msg_spot].next; 423 msgmaps[msghdr->msg_spot].next = free_msgmaps; 424 free_msgmaps = msghdr->msg_spot; 425 nfree_msgmaps++; 426 msghdr->msg_spot = next; 427 if (msghdr->msg_ts >= msginfo.msgssz) 428 msghdr->msg_ts -= msginfo.msgssz; 429 else 430 msghdr->msg_ts = 0; 431 } 432 KASSERT(msghdr->msg_spot == -1); 433 msghdr->msg_next = free_msghdrs; 434 free_msghdrs = msghdr; 435 } 436 437 int 438 sys___msgctl50(struct lwp *l, const struct sys___msgctl50_args *uap, 439 register_t *retval) 440 { 441 /* { 442 syscallarg(int) msqid; 443 syscallarg(int) cmd; 444 syscallarg(struct msqid_ds *) buf; 445 } */ 446 struct msqid_ds msqbuf; 447 int cmd, error; 448 449 cmd = SCARG(uap, cmd); 450 451 if (cmd == IPC_SET) { 452 error = copyin(SCARG(uap, buf), &msqbuf, sizeof(msqbuf)); 453 if (error) 454 return (error); 455 } 456 457 error = msgctl1(l, SCARG(uap, msqid), cmd, 458 (cmd == IPC_SET || cmd == IPC_STAT) ? &msqbuf : NULL); 459 460 if (error == 0 && cmd == IPC_STAT) 461 error = copyout(&msqbuf, SCARG(uap, buf), sizeof(msqbuf)); 462 463 return (error); 464 } 465 466 int 467 msgctl1(struct lwp *l, int msqid, int cmd, struct msqid_ds *msqbuf) 468 { 469 kauth_cred_t cred = l->l_cred; 470 struct msqid_ds *msqptr; 471 kmsq_t *msq; 472 int error = 0, ix; 473 474 MSG_PRINTF(("call to msgctl1(%d, %d)\n", msqid, cmd)); 475 476 ix = IPCID_TO_IX(msqid); 477 478 mutex_enter(&msgmutex); 479 480 if (ix < 0 || ix >= msginfo.msgmni) { 481 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", ix, 482 msginfo.msgmni)); 483 error = EINVAL; 484 goto unlock; 485 } 486 487 msq = &msqs[ix]; 488 msqptr = &msq->msq_u; 489 490 if (msqptr->msg_qbytes == 0) { 491 MSG_PRINTF(("no such msqid\n")); 492 error = EINVAL; 493 goto unlock; 494 } 495 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqid)) { 496 MSG_PRINTF(("wrong sequence number\n")); 497 error = EINVAL; 498 goto unlock; 499 } 500 501 switch (cmd) { 502 case IPC_RMID: 503 { 504 struct __msg *msghdr; 505 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)) != 0) 506 break; 507 /* Free the message headers */ 508 msghdr = msqptr->_msg_first; 509 while (msghdr != NULL) { 510 struct __msg *msghdr_tmp; 511 512 /* Free the segments of each message */ 513 msqptr->_msg_cbytes -= msghdr->msg_ts; 514 msqptr->msg_qnum--; 515 msghdr_tmp = msghdr; 516 msghdr = msghdr->msg_next; 517 msg_freehdr(msghdr_tmp); 518 } 519 KASSERT(msqptr->_msg_cbytes == 0); 520 KASSERT(msqptr->msg_qnum == 0); 521 522 /* Mark it as free */ 523 msqptr->msg_qbytes = 0; 524 cv_broadcast(&msq->msq_cv); 525 } 526 break; 527 528 case IPC_SET: 529 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M))) 530 break; 531 if (msqbuf->msg_qbytes > msqptr->msg_qbytes && 532 kauth_authorize_system(cred, KAUTH_SYSTEM_SYSVIPC, 533 KAUTH_REQ_SYSTEM_SYSVIPC_MSGQ_OVERSIZE, 534 KAUTH_ARG(msqbuf->msg_qbytes), 535 KAUTH_ARG(msqptr->msg_qbytes), NULL) != 0) { 536 error = EPERM; 537 break; 538 } 539 if (msqbuf->msg_qbytes > msginfo.msgmnb) { 540 MSG_PRINTF(("can't increase msg_qbytes beyond %d " 541 "(truncating)\n", msginfo.msgmnb)); 542 /* silently restrict qbytes to system limit */ 543 msqbuf->msg_qbytes = msginfo.msgmnb; 544 } 545 if (msqbuf->msg_qbytes == 0) { 546 MSG_PRINTF(("can't reduce msg_qbytes to 0\n")); 547 error = EINVAL; /* XXX non-standard errno! */ 548 break; 549 } 550 msqptr->msg_perm.uid = msqbuf->msg_perm.uid; 551 msqptr->msg_perm.gid = msqbuf->msg_perm.gid; 552 msqptr->msg_perm.mode = (msqptr->msg_perm.mode & ~0777) | 553 (msqbuf->msg_perm.mode & 0777); 554 msqptr->msg_qbytes = msqbuf->msg_qbytes; 555 msqptr->msg_ctime = time_second; 556 break; 557 558 case IPC_STAT: 559 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) { 560 MSG_PRINTF(("requester doesn't have read access\n")); 561 break; 562 } 563 memcpy(msqbuf, msqptr, sizeof(struct msqid_ds)); 564 break; 565 566 default: 567 MSG_PRINTF(("invalid command %d\n", cmd)); 568 error = EINVAL; 569 break; 570 } 571 572 unlock: 573 mutex_exit(&msgmutex); 574 return (error); 575 } 576 577 int 578 sys_msgget(struct lwp *l, const struct sys_msgget_args *uap, register_t *retval) 579 { 580 /* { 581 syscallarg(key_t) key; 582 syscallarg(int) msgflg; 583 } */ 584 int msqid, error = 0; 585 int key = SCARG(uap, key); 586 int msgflg = SCARG(uap, msgflg); 587 kauth_cred_t cred = l->l_cred; 588 struct msqid_ds *msqptr = NULL; 589 kmsq_t *msq; 590 591 mutex_enter(&msgmutex); 592 593 MSG_PRINTF(("msgget(0x%x, 0%o)\n", key, msgflg)); 594 595 if (key != IPC_PRIVATE) { 596 for (msqid = 0; msqid < msginfo.msgmni; msqid++) { 597 msq = &msqs[msqid]; 598 msqptr = &msq->msq_u; 599 if (msqptr->msg_qbytes != 0 && 600 msqptr->msg_perm._key == key) 601 break; 602 } 603 if (msqid < msginfo.msgmni) { 604 MSG_PRINTF(("found public key\n")); 605 if ((msgflg & IPC_CREAT) && (msgflg & IPC_EXCL)) { 606 MSG_PRINTF(("not exclusive\n")); 607 error = EEXIST; 608 goto unlock; 609 } 610 if ((error = ipcperm(cred, &msqptr->msg_perm, 611 msgflg & 0700 ))) { 612 MSG_PRINTF(("requester doesn't have 0%o access\n", 613 msgflg & 0700)); 614 goto unlock; 615 } 616 goto found; 617 } 618 } 619 620 MSG_PRINTF(("need to allocate the msqid_ds\n")); 621 if (key == IPC_PRIVATE || (msgflg & IPC_CREAT)) { 622 for (msqid = 0; msqid < msginfo.msgmni; msqid++) { 623 /* 624 * Look for an unallocated and unlocked msqid_ds. 625 * msqid_ds's can be locked by msgsnd or msgrcv while 626 * they are copying the message in/out. We can't 627 * re-use the entry until they release it. 628 */ 629 msq = &msqs[msqid]; 630 msqptr = &msq->msq_u; 631 if (msqptr->msg_qbytes == 0 && 632 (msqptr->msg_perm.mode & MSG_LOCKED) == 0) 633 break; 634 } 635 if (msqid == msginfo.msgmni) { 636 MSG_PRINTF(("no more msqid_ds's available\n")); 637 error = ENOSPC; 638 goto unlock; 639 } 640 MSG_PRINTF(("msqid %d is available\n", msqid)); 641 msqptr->msg_perm._key = key; 642 msqptr->msg_perm.cuid = kauth_cred_geteuid(cred); 643 msqptr->msg_perm.uid = kauth_cred_geteuid(cred); 644 msqptr->msg_perm.cgid = kauth_cred_getegid(cred); 645 msqptr->msg_perm.gid = kauth_cred_getegid(cred); 646 msqptr->msg_perm.mode = (msgflg & 0777); 647 /* Make sure that the returned msqid is unique */ 648 msqptr->msg_perm._seq++; 649 msqptr->_msg_first = NULL; 650 msqptr->_msg_last = NULL; 651 msqptr->_msg_cbytes = 0; 652 msqptr->msg_qnum = 0; 653 msqptr->msg_qbytes = msginfo.msgmnb; 654 msqptr->msg_lspid = 0; 655 msqptr->msg_lrpid = 0; 656 msqptr->msg_stime = 0; 657 msqptr->msg_rtime = 0; 658 msqptr->msg_ctime = time_second; 659 } else { 660 MSG_PRINTF(("didn't find it and wasn't asked to create it\n")); 661 error = ENOENT; 662 goto unlock; 663 } 664 665 found: 666 /* Construct the unique msqid */ 667 *retval = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm); 668 669 unlock: 670 mutex_exit(&msgmutex); 671 return (error); 672 } 673 674 int 675 sys_msgsnd(struct lwp *l, const struct sys_msgsnd_args *uap, register_t *retval) 676 { 677 /* { 678 syscallarg(int) msqid; 679 syscallarg(const void *) msgp; 680 syscallarg(size_t) msgsz; 681 syscallarg(int) msgflg; 682 } */ 683 684 return msgsnd1(l, SCARG(uap, msqid), SCARG(uap, msgp), 685 SCARG(uap, msgsz), SCARG(uap, msgflg), sizeof(long), copyin); 686 } 687 688 int 689 msgsnd1(struct lwp *l, int msqidr, const char *user_msgp, size_t msgsz, 690 int msgflg, size_t typesz, copyin_t fetch_type) 691 { 692 int segs_needed, error = 0, msqid; 693 kauth_cred_t cred = l->l_cred; 694 struct msqid_ds *msqptr; 695 struct __msg *msghdr; 696 kmsq_t *msq; 697 short next; 698 699 MSG_PRINTF(("call to msgsnd(%d, %p, %lld, %d)\n", msqidr, 700 user_msgp, (long long)msgsz, msgflg)); 701 702 if ((ssize_t)msgsz < 0) 703 return EINVAL; 704 705 restart: 706 msqid = IPCID_TO_IX(msqidr); 707 708 mutex_enter(&msgmutex); 709 /* In case of reallocation, we will wait for completion */ 710 while (__predict_false(msg_realloc_state)) 711 cv_wait(&msg_realloc_cv, &msgmutex); 712 713 if (msqid < 0 || msqid >= msginfo.msgmni) { 714 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid, 715 msginfo.msgmni)); 716 error = EINVAL; 717 goto unlock; 718 } 719 720 msq = &msqs[msqid]; 721 msqptr = &msq->msq_u; 722 723 if (msqptr->msg_qbytes == 0) { 724 MSG_PRINTF(("no such message queue id\n")); 725 error = EINVAL; 726 goto unlock; 727 } 728 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) { 729 MSG_PRINTF(("wrong sequence number\n")); 730 error = EINVAL; 731 goto unlock; 732 } 733 734 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_W))) { 735 MSG_PRINTF(("requester doesn't have write access\n")); 736 goto unlock; 737 } 738 739 segs_needed = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz; 740 MSG_PRINTF(("msgsz=%lld, msgssz=%d, segs_needed=%d\n", 741 (long long)msgsz, msginfo.msgssz, segs_needed)); 742 for (;;) { 743 int need_more_resources = 0; 744 745 /* 746 * check msgsz [cannot be negative since it is unsigned] 747 * (inside this loop in case msg_qbytes changes while we sleep) 748 */ 749 750 if (msgsz > msqptr->msg_qbytes) { 751 MSG_PRINTF(("msgsz > msqptr->msg_qbytes\n")); 752 error = EINVAL; 753 goto unlock; 754 } 755 756 if (msqptr->msg_perm.mode & MSG_LOCKED) { 757 MSG_PRINTF(("msqid is locked\n")); 758 need_more_resources = 1; 759 } 760 if (msgsz + msqptr->_msg_cbytes > msqptr->msg_qbytes) { 761 MSG_PRINTF(("msgsz + msg_cbytes > msg_qbytes\n")); 762 need_more_resources = 1; 763 } 764 if (segs_needed > nfree_msgmaps) { 765 MSG_PRINTF(("segs_needed > nfree_msgmaps\n")); 766 need_more_resources = 1; 767 } 768 if (free_msghdrs == NULL) { 769 MSG_PRINTF(("no more msghdrs\n")); 770 need_more_resources = 1; 771 } 772 773 if (need_more_resources) { 774 int we_own_it; 775 776 if ((msgflg & IPC_NOWAIT) != 0) { 777 MSG_PRINTF(("need more resources but caller " 778 "doesn't want to wait\n")); 779 error = EAGAIN; 780 goto unlock; 781 } 782 783 if ((msqptr->msg_perm.mode & MSG_LOCKED) != 0) { 784 MSG_PRINTF(("we don't own the msqid_ds\n")); 785 we_own_it = 0; 786 } else { 787 /* Force later arrivals to wait for our 788 request */ 789 MSG_PRINTF(("we own the msqid_ds\n")); 790 msqptr->msg_perm.mode |= MSG_LOCKED; 791 we_own_it = 1; 792 } 793 794 msg_waiters++; 795 MSG_PRINTF(("goodnight\n")); 796 error = cv_wait_sig(&msq->msq_cv, &msgmutex); 797 MSG_PRINTF(("good morning, error=%d\n", error)); 798 msg_waiters--; 799 800 if (we_own_it) 801 msqptr->msg_perm.mode &= ~MSG_LOCKED; 802 803 /* 804 * In case of such state, notify reallocator and 805 * restart the call. 806 */ 807 if (msg_realloc_state) { 808 cv_broadcast(&msg_realloc_cv); 809 mutex_exit(&msgmutex); 810 goto restart; 811 } 812 813 if (error != 0) { 814 MSG_PRINTF(("msgsnd: interrupted system " 815 "call\n")); 816 error = EINTR; 817 goto unlock; 818 } 819 820 /* 821 * Make sure that the msq queue still exists 822 */ 823 824 if (msqptr->msg_qbytes == 0) { 825 MSG_PRINTF(("msqid deleted\n")); 826 error = EIDRM; 827 goto unlock; 828 } 829 } else { 830 MSG_PRINTF(("got all the resources that we need\n")); 831 break; 832 } 833 } 834 835 /* 836 * We have the resources that we need. 837 * Make sure! 838 */ 839 840 KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0); 841 KASSERT(segs_needed <= nfree_msgmaps); 842 KASSERT(msgsz + msqptr->_msg_cbytes <= msqptr->msg_qbytes); 843 KASSERT(free_msghdrs != NULL); 844 845 /* 846 * Re-lock the msqid_ds in case we page-fault when copying in the 847 * message 848 */ 849 850 KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0); 851 msqptr->msg_perm.mode |= MSG_LOCKED; 852 853 /* 854 * Allocate a message header 855 */ 856 857 msghdr = free_msghdrs; 858 free_msghdrs = msghdr->msg_next; 859 msghdr->msg_spot = -1; 860 msghdr->msg_ts = msgsz; 861 862 /* 863 * Allocate space for the message 864 */ 865 866 while (segs_needed > 0) { 867 KASSERT(nfree_msgmaps > 0); 868 KASSERT(free_msgmaps != -1); 869 KASSERT(free_msgmaps < msginfo.msgseg); 870 871 next = free_msgmaps; 872 MSG_PRINTF(("allocating segment %d to message\n", next)); 873 free_msgmaps = msgmaps[next].next; 874 nfree_msgmaps--; 875 msgmaps[next].next = msghdr->msg_spot; 876 msghdr->msg_spot = next; 877 segs_needed--; 878 } 879 880 /* 881 * Copy in the message type 882 */ 883 mutex_exit(&msgmutex); 884 error = (*fetch_type)(user_msgp, &msghdr->msg_type, typesz); 885 mutex_enter(&msgmutex); 886 if (error != 0) { 887 MSG_PRINTF(("error %d copying the message type\n", error)); 888 msg_freehdr(msghdr); 889 msqptr->msg_perm.mode &= ~MSG_LOCKED; 890 cv_broadcast(&msq->msq_cv); 891 goto unlock; 892 } 893 user_msgp += typesz; 894 895 /* 896 * Validate the message type 897 */ 898 899 if (msghdr->msg_type < 1) { 900 msg_freehdr(msghdr); 901 msqptr->msg_perm.mode &= ~MSG_LOCKED; 902 cv_broadcast(&msq->msq_cv); 903 MSG_PRINTF(("mtype (%ld) < 1\n", msghdr->msg_type)); 904 error = EINVAL; 905 goto unlock; 906 } 907 908 /* 909 * Copy in the message body 910 */ 911 912 next = msghdr->msg_spot; 913 while (msgsz > 0) { 914 size_t tlen; 915 KASSERT(next > -1); 916 KASSERT(next < msginfo.msgseg); 917 918 if (msgsz > msginfo.msgssz) 919 tlen = msginfo.msgssz; 920 else 921 tlen = msgsz; 922 mutex_exit(&msgmutex); 923 error = copyin(user_msgp, &msgpool[next * msginfo.msgssz], tlen); 924 mutex_enter(&msgmutex); 925 if (error != 0) { 926 MSG_PRINTF(("error %d copying in message segment\n", 927 error)); 928 msg_freehdr(msghdr); 929 msqptr->msg_perm.mode &= ~MSG_LOCKED; 930 cv_broadcast(&msq->msq_cv); 931 goto unlock; 932 } 933 msgsz -= tlen; 934 user_msgp += tlen; 935 next = msgmaps[next].next; 936 } 937 KASSERT(next == -1); 938 939 /* 940 * We've got the message. Unlock the msqid_ds. 941 */ 942 943 msqptr->msg_perm.mode &= ~MSG_LOCKED; 944 945 /* 946 * Make sure that the msqid_ds is still allocated. 947 */ 948 949 if (msqptr->msg_qbytes == 0) { 950 msg_freehdr(msghdr); 951 cv_broadcast(&msq->msq_cv); 952 error = EIDRM; 953 goto unlock; 954 } 955 956 /* 957 * Put the message into the queue 958 */ 959 960 if (msqptr->_msg_first == NULL) { 961 msqptr->_msg_first = msghdr; 962 msqptr->_msg_last = msghdr; 963 } else { 964 msqptr->_msg_last->msg_next = msghdr; 965 msqptr->_msg_last = msghdr; 966 } 967 msqptr->_msg_last->msg_next = NULL; 968 969 msqptr->_msg_cbytes += msghdr->msg_ts; 970 msqptr->msg_qnum++; 971 msqptr->msg_lspid = l->l_proc->p_pid; 972 msqptr->msg_stime = time_second; 973 974 cv_broadcast(&msq->msq_cv); 975 976 unlock: 977 mutex_exit(&msgmutex); 978 return error; 979 } 980 981 int 982 sys_msgrcv(struct lwp *l, const struct sys_msgrcv_args *uap, register_t *retval) 983 { 984 /* { 985 syscallarg(int) msqid; 986 syscallarg(void *) msgp; 987 syscallarg(size_t) msgsz; 988 syscallarg(long) msgtyp; 989 syscallarg(int) msgflg; 990 } */ 991 992 return msgrcv1(l, SCARG(uap, msqid), SCARG(uap, msgp), 993 SCARG(uap, msgsz), SCARG(uap, msgtyp), SCARG(uap, msgflg), 994 sizeof(long), copyout, retval); 995 } 996 997 int 998 msgrcv1(struct lwp *l, int msqidr, char *user_msgp, size_t msgsz, long msgtyp, 999 int msgflg, size_t typesz, copyout_t put_type, register_t *retval) 1000 { 1001 size_t len; 1002 kauth_cred_t cred = l->l_cred; 1003 struct msqid_ds *msqptr; 1004 struct __msg *msghdr; 1005 int error = 0, msqid; 1006 kmsq_t *msq; 1007 short next; 1008 1009 MSG_PRINTF(("call to msgrcv(%d, %p, %lld, %ld, %d)\n", msqidr, 1010 user_msgp, (long long)msgsz, msgtyp, msgflg)); 1011 1012 if ((ssize_t)msgsz < 0) 1013 return EINVAL; 1014 1015 restart: 1016 msqid = IPCID_TO_IX(msqidr); 1017 1018 mutex_enter(&msgmutex); 1019 /* In case of reallocation, we will wait for completion */ 1020 while (__predict_false(msg_realloc_state)) 1021 cv_wait(&msg_realloc_cv, &msgmutex); 1022 1023 if (msqid < 0 || msqid >= msginfo.msgmni) { 1024 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid, 1025 msginfo.msgmni)); 1026 error = EINVAL; 1027 goto unlock; 1028 } 1029 1030 msq = &msqs[msqid]; 1031 msqptr = &msq->msq_u; 1032 1033 if (msqptr->msg_qbytes == 0) { 1034 MSG_PRINTF(("no such message queue id\n")); 1035 error = EINVAL; 1036 goto unlock; 1037 } 1038 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) { 1039 MSG_PRINTF(("wrong sequence number\n")); 1040 error = EINVAL; 1041 goto unlock; 1042 } 1043 1044 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) { 1045 MSG_PRINTF(("requester doesn't have read access\n")); 1046 goto unlock; 1047 } 1048 1049 msghdr = NULL; 1050 while (msghdr == NULL) { 1051 if (msgtyp == 0) { 1052 msghdr = msqptr->_msg_first; 1053 if (msghdr != NULL) { 1054 if (msgsz < msghdr->msg_ts && 1055 (msgflg & MSG_NOERROR) == 0) { 1056 MSG_PRINTF(("first msg on the queue " 1057 "is too big (want %lld, got %d)\n", 1058 (long long)msgsz, msghdr->msg_ts)); 1059 error = E2BIG; 1060 goto unlock; 1061 } 1062 if (msqptr->_msg_first == msqptr->_msg_last) { 1063 msqptr->_msg_first = NULL; 1064 msqptr->_msg_last = NULL; 1065 } else { 1066 msqptr->_msg_first = msghdr->msg_next; 1067 KASSERT(msqptr->_msg_first != NULL); 1068 } 1069 } 1070 } else { 1071 struct __msg *previous; 1072 struct __msg **prev; 1073 1074 for (previous = NULL, prev = &msqptr->_msg_first; 1075 (msghdr = *prev) != NULL; 1076 previous = msghdr, prev = &msghdr->msg_next) { 1077 /* 1078 * Is this message's type an exact match or is 1079 * this message's type less than or equal to 1080 * the absolute value of a negative msgtyp? 1081 * Note that the second half of this test can 1082 * NEVER be true if msgtyp is positive since 1083 * msg_type is always positive! 1084 */ 1085 1086 if (msgtyp != msghdr->msg_type && 1087 msghdr->msg_type > -msgtyp) 1088 continue; 1089 1090 MSG_PRINTF(("found message type %ld, requested %ld\n", 1091 msghdr->msg_type, msgtyp)); 1092 if (msgsz < msghdr->msg_ts && 1093 (msgflg & MSG_NOERROR) == 0) { 1094 MSG_PRINTF(("requested message on the queue " 1095 "is too big (want %lld, got %d)\n", 1096 (long long)msgsz, msghdr->msg_ts)); 1097 error = E2BIG; 1098 goto unlock; 1099 } 1100 *prev = msghdr->msg_next; 1101 if (msghdr != msqptr->_msg_last) 1102 break; 1103 if (previous == NULL) { 1104 KASSERT(prev == &msqptr->_msg_first); 1105 msqptr->_msg_first = NULL; 1106 msqptr->_msg_last = NULL; 1107 } else { 1108 KASSERT(prev != &msqptr->_msg_first); 1109 msqptr->_msg_last = previous; 1110 } 1111 break; 1112 } 1113 } 1114 1115 /* 1116 * We've either extracted the msghdr for the appropriate 1117 * message or there isn't one. 1118 * If there is one then bail out of this loop. 1119 */ 1120 if (msghdr != NULL) 1121 break; 1122 1123 /* 1124 * Hmph! No message found. Does the user want to wait? 1125 */ 1126 1127 if ((msgflg & IPC_NOWAIT) != 0) { 1128 MSG_PRINTF(("no appropriate message found (msgtyp=%ld)\n", 1129 msgtyp)); 1130 error = ENOMSG; 1131 goto unlock; 1132 } 1133 1134 /* 1135 * Wait for something to happen 1136 */ 1137 1138 msg_waiters++; 1139 MSG_PRINTF(("msgrcv: goodnight\n")); 1140 error = cv_wait_sig(&msq->msq_cv, &msgmutex); 1141 MSG_PRINTF(("msgrcv: good morning (error=%d)\n", error)); 1142 msg_waiters--; 1143 1144 /* 1145 * In case of such state, notify reallocator and 1146 * restart the call. 1147 */ 1148 if (msg_realloc_state) { 1149 cv_broadcast(&msg_realloc_cv); 1150 mutex_exit(&msgmutex); 1151 goto restart; 1152 } 1153 1154 if (error != 0) { 1155 MSG_PRINTF(("msgsnd: interrupted system call\n")); 1156 error = EINTR; 1157 goto unlock; 1158 } 1159 1160 /* 1161 * Make sure that the msq queue still exists 1162 */ 1163 1164 if (msqptr->msg_qbytes == 0 || 1165 msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) { 1166 MSG_PRINTF(("msqid deleted\n")); 1167 error = EIDRM; 1168 goto unlock; 1169 } 1170 } 1171 1172 /* 1173 * Return the message to the user. 1174 * 1175 * First, do the bookkeeping (before we risk being interrupted). 1176 */ 1177 1178 msqptr->_msg_cbytes -= msghdr->msg_ts; 1179 msqptr->msg_qnum--; 1180 msqptr->msg_lrpid = l->l_proc->p_pid; 1181 msqptr->msg_rtime = time_second; 1182 1183 /* 1184 * Make msgsz the actual amount that we'll be returning. 1185 * Note that this effectively truncates the message if it is too long 1186 * (since msgsz is never increased). 1187 */ 1188 1189 MSG_PRINTF(("found a message, msgsz=%lld, msg_ts=%d\n", 1190 (long long)msgsz, msghdr->msg_ts)); 1191 if (msgsz > msghdr->msg_ts) 1192 msgsz = msghdr->msg_ts; 1193 1194 /* 1195 * Return the type to the user. 1196 */ 1197 mutex_exit(&msgmutex); 1198 error = (*put_type)(&msghdr->msg_type, user_msgp, typesz); 1199 mutex_enter(&msgmutex); 1200 if (error != 0) { 1201 MSG_PRINTF(("error (%d) copying out message type\n", error)); 1202 msg_freehdr(msghdr); 1203 cv_broadcast(&msq->msq_cv); 1204 goto unlock; 1205 } 1206 user_msgp += typesz; 1207 1208 /* 1209 * Return the segments to the user 1210 */ 1211 1212 next = msghdr->msg_spot; 1213 for (len = 0; len < msgsz; len += msginfo.msgssz) { 1214 size_t tlen; 1215 KASSERT(next > -1); 1216 KASSERT(next < msginfo.msgseg); 1217 1218 if (msgsz - len > msginfo.msgssz) 1219 tlen = msginfo.msgssz; 1220 else 1221 tlen = msgsz - len; 1222 mutex_exit(&msgmutex); 1223 error = copyout(&msgpool[next * msginfo.msgssz], 1224 user_msgp, tlen); 1225 mutex_enter(&msgmutex); 1226 if (error != 0) { 1227 MSG_PRINTF(("error (%d) copying out message segment\n", 1228 error)); 1229 msg_freehdr(msghdr); 1230 cv_broadcast(&msq->msq_cv); 1231 goto unlock; 1232 } 1233 user_msgp += tlen; 1234 next = msgmaps[next].next; 1235 } 1236 1237 /* 1238 * Done, return the actual number of bytes copied out. 1239 */ 1240 1241 msg_freehdr(msghdr); 1242 cv_broadcast(&msq->msq_cv); 1243 *retval = msgsz; 1244 1245 unlock: 1246 mutex_exit(&msgmutex); 1247 return error; 1248 } 1249 1250 /* 1251 * Sysctl initialization and nodes. 1252 */ 1253 1254 static int 1255 sysctl_ipc_msgmni(SYSCTLFN_ARGS) 1256 { 1257 int newsize, error; 1258 struct sysctlnode node; 1259 node = *rnode; 1260 node.sysctl_data = &newsize; 1261 1262 newsize = msginfo.msgmni; 1263 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1264 if (error || newp == NULL) 1265 return error; 1266 1267 sysctl_unlock(); 1268 error = msgrealloc(newsize, msginfo.msgseg); 1269 sysctl_relock(); 1270 return error; 1271 } 1272 1273 static int 1274 sysctl_ipc_msgseg(SYSCTLFN_ARGS) 1275 { 1276 int newsize, error; 1277 struct sysctlnode node; 1278 node = *rnode; 1279 node.sysctl_data = &newsize; 1280 1281 newsize = msginfo.msgseg; 1282 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1283 if (error || newp == NULL) 1284 return error; 1285 1286 sysctl_unlock(); 1287 error = msgrealloc(msginfo.msgmni, newsize); 1288 sysctl_relock(); 1289 return error; 1290 } 1291 1292 SYSCTL_SETUP(sysctl_ipc_msg_setup, "sysctl kern.ipc subtree setup") 1293 { 1294 const struct sysctlnode *node = NULL; 1295 1296 sysctl_createv(clog, 0, NULL, &node, 1297 CTLFLAG_PERMANENT, 1298 CTLTYPE_NODE, "ipc", 1299 SYSCTL_DESCR("SysV IPC options"), 1300 NULL, 0, NULL, 0, 1301 CTL_KERN, KERN_SYSVIPC, CTL_EOL); 1302 1303 if (node == NULL) 1304 return; 1305 1306 sysctl_createv(clog, 0, &node, NULL, 1307 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 1308 CTLTYPE_INT, "msgmni", 1309 SYSCTL_DESCR("Max number of message queue identifiers"), 1310 sysctl_ipc_msgmni, 0, &msginfo.msgmni, 0, 1311 CTL_CREATE, CTL_EOL); 1312 sysctl_createv(clog, 0, &node, NULL, 1313 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 1314 CTLTYPE_INT, "msgseg", 1315 SYSCTL_DESCR("Max number of number of message segments"), 1316 sysctl_ipc_msgseg, 0, &msginfo.msgseg, 0, 1317 CTL_CREATE, CTL_EOL); 1318 } 1319