1 /* $NetBSD: sysv_msg.c,v 1.72 2018/03/30 22:54:37 maya Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center, and by Andrew Doran. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Implementation of SVID messages 35 * 36 * Author: Daniel Boulet 37 * 38 * Copyright 1993 Daniel Boulet and RTMX Inc. 39 * 40 * This system call was implemented by Daniel Boulet under contract from RTMX. 41 * 42 * Redistribution and use in source forms, with and without modification, 43 * are permitted provided that this entire comment appears intact. 44 * 45 * Redistribution in binary form may occur without any restrictions. 46 * Obviously, it would be nice if you gave credit where credit is due 47 * but requiring it would be too onerous. 48 * 49 * This software is provided ``AS IS'' without any warranties of any kind. 50 */ 51 52 #include <sys/cdefs.h> 53 __KERNEL_RCSID(0, "$NetBSD: sysv_msg.c,v 1.72 2018/03/30 22:54:37 maya Exp $"); 54 55 #ifdef _KERNEL_OPT 56 #include "opt_sysv.h" 57 #endif 58 59 #include <sys/param.h> 60 #include <sys/kernel.h> 61 #include <sys/msg.h> 62 #include <sys/sysctl.h> 63 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */ 64 #include <sys/syscallargs.h> 65 #include <sys/kauth.h> 66 67 #define MSG_DEBUG 68 #undef MSG_DEBUG_OK 69 70 #ifdef MSG_DEBUG_OK 71 #define MSG_PRINTF(a) printf a 72 #else 73 #define MSG_PRINTF(a) 74 #endif 75 76 static int nfree_msgmaps; /* # of free map entries */ 77 static short free_msgmaps; /* head of linked list of free map entries */ 78 static struct __msg *free_msghdrs; /* list of free msg headers */ 79 static char *msgpool; /* MSGMAX byte long msg buffer pool */ 80 static struct msgmap *msgmaps; /* MSGSEG msgmap structures */ 81 static struct __msg *msghdrs; /* MSGTQL msg headers */ 82 83 kmsq_t *msqs; /* MSGMNI msqid_ds struct's */ 84 kmutex_t msgmutex; /* subsystem lock */ 85 86 static u_int msg_waiters = 0; /* total number of msgrcv waiters */ 87 static bool msg_realloc_state; 88 static kcondvar_t msg_realloc_cv; 89 90 static void msg_freehdr(struct __msg *); 91 92 extern int kern_has_sysvmsg; 93 94 SYSCTL_SETUP_PROTO(sysctl_ipc_msg_setup); 95 96 void 97 msginit(struct sysctllog **clog) 98 { 99 int i, sz; 100 vaddr_t v; 101 102 /* 103 * msginfo.msgssz should be a power of two for efficiency reasons. 104 * It is also pretty silly if msginfo.msgssz is less than 8 105 * or greater than about 256 so ... 106 */ 107 108 i = 8; 109 while (i < 1024 && i != msginfo.msgssz) 110 i <<= 1; 111 if (i != msginfo.msgssz) { 112 panic("msginfo.msgssz = %d, not a small power of 2", 113 msginfo.msgssz); 114 } 115 116 if (msginfo.msgseg > 32767) { 117 panic("msginfo.msgseg = %d > 32767", msginfo.msgseg); 118 } 119 120 /* Allocate the wired memory for our structures */ 121 sz = ALIGN(msginfo.msgmax) + 122 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) + 123 ALIGN(msginfo.msgtql * sizeof(struct __msg)) + 124 ALIGN(msginfo.msgmni * sizeof(kmsq_t)); 125 sz = round_page(sz); 126 v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO); 127 if (v == 0) 128 panic("sysv_msg: cannot allocate memory"); 129 msgpool = (void *)v; 130 msgmaps = (void *)((uintptr_t)msgpool + ALIGN(msginfo.msgmax)); 131 msghdrs = (void *)((uintptr_t)msgmaps + 132 ALIGN(msginfo.msgseg * sizeof(struct msgmap))); 133 msqs = (void *)((uintptr_t)msghdrs + 134 ALIGN(msginfo.msgtql * sizeof(struct __msg))); 135 136 for (i = 0; i < (msginfo.msgseg - 1); i++) 137 msgmaps[i].next = i + 1; 138 msgmaps[msginfo.msgseg - 1].next = -1; 139 140 free_msgmaps = 0; 141 nfree_msgmaps = msginfo.msgseg; 142 143 for (i = 0; i < (msginfo.msgtql - 1); i++) { 144 msghdrs[i].msg_type = 0; 145 msghdrs[i].msg_next = &msghdrs[i + 1]; 146 } 147 i = msginfo.msgtql - 1; 148 msghdrs[i].msg_type = 0; 149 msghdrs[i].msg_next = NULL; 150 free_msghdrs = &msghdrs[0]; 151 152 for (i = 0; i < msginfo.msgmni; i++) { 153 cv_init(&msqs[i].msq_cv, "msgwait"); 154 /* Implies entry is available */ 155 msqs[i].msq_u.msg_qbytes = 0; 156 /* Reset to a known value */ 157 msqs[i].msq_u.msg_perm._seq = 0; 158 } 159 160 mutex_init(&msgmutex, MUTEX_DEFAULT, IPL_NONE); 161 cv_init(&msg_realloc_cv, "msgrealc"); 162 msg_realloc_state = false; 163 164 kern_has_sysvmsg = 1; 165 166 #ifdef _MODULE 167 if (clog) 168 sysctl_ipc_msg_setup(clog); 169 #endif 170 } 171 172 int 173 msgfini(void) 174 { 175 int i, sz; 176 vaddr_t v = (vaddr_t)msgpool; 177 178 mutex_enter(&msgmutex); 179 for (i = 0; i < msginfo.msgmni; i++) { 180 if (msqs[i].msq_u.msg_qbytes != 0) { 181 mutex_exit(&msgmutex); 182 return 1; /* queue not available, prevent unload! */ 183 } 184 } 185 /* 186 * Destroy all condvars and free the memory we're using 187 */ 188 for (i = 0; i < msginfo.msgmni; i++) { 189 cv_destroy(&msqs[i].msq_cv); 190 } 191 sz = ALIGN(msginfo.msgmax) + 192 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) + 193 ALIGN(msginfo.msgtql * sizeof(struct __msg)) + 194 ALIGN(msginfo.msgmni * sizeof(kmsq_t)); 195 sz = round_page(sz); 196 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED); 197 198 cv_destroy(&msg_realloc_cv); 199 mutex_exit(&msgmutex); 200 mutex_destroy(&msgmutex); 201 202 kern_has_sysvmsg = 0; 203 204 return 0; 205 } 206 207 static int 208 msgrealloc(int newmsgmni, int newmsgseg) 209 { 210 struct msgmap *new_msgmaps; 211 struct __msg *new_msghdrs, *new_free_msghdrs; 212 char *old_msgpool, *new_msgpool; 213 kmsq_t *new_msqs; 214 vaddr_t v; 215 int i, sz, msqid, newmsgmax, new_nfree_msgmaps; 216 short new_free_msgmaps; 217 218 if (newmsgmni < 1 || newmsgseg < 1) 219 return EINVAL; 220 221 /* Allocate the wired memory for our structures */ 222 newmsgmax = msginfo.msgssz * newmsgseg; 223 sz = ALIGN(newmsgmax) + 224 ALIGN(newmsgseg * sizeof(struct msgmap)) + 225 ALIGN(msginfo.msgtql * sizeof(struct __msg)) + 226 ALIGN(newmsgmni * sizeof(kmsq_t)); 227 sz = round_page(sz); 228 v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO); 229 if (v == 0) 230 return ENOMEM; 231 232 mutex_enter(&msgmutex); 233 if (msg_realloc_state) { 234 mutex_exit(&msgmutex); 235 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED); 236 return EBUSY; 237 } 238 msg_realloc_state = true; 239 if (msg_waiters) { 240 /* 241 * Mark reallocation state, wake-up all waiters, 242 * and wait while they will all exit. 243 */ 244 for (i = 0; i < msginfo.msgmni; i++) 245 cv_broadcast(&msqs[i].msq_cv); 246 while (msg_waiters) 247 cv_wait(&msg_realloc_cv, &msgmutex); 248 } 249 old_msgpool = msgpool; 250 251 /* We cannot reallocate less memory than we use */ 252 i = 0; 253 for (msqid = 0; msqid < msginfo.msgmni; msqid++) { 254 struct msqid_ds *mptr; 255 kmsq_t *msq; 256 257 msq = &msqs[msqid]; 258 mptr = &msq->msq_u; 259 if (mptr->msg_qbytes || (mptr->msg_perm.mode & MSG_LOCKED)) 260 i = msqid; 261 } 262 if (i >= newmsgmni || (msginfo.msgseg - nfree_msgmaps) > newmsgseg) { 263 mutex_exit(&msgmutex); 264 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED); 265 return EBUSY; 266 } 267 268 new_msgpool = (void *)v; 269 new_msgmaps = (void *)((uintptr_t)new_msgpool + ALIGN(newmsgmax)); 270 new_msghdrs = (void *)((uintptr_t)new_msgmaps + 271 ALIGN(newmsgseg * sizeof(struct msgmap))); 272 new_msqs = (void *)((uintptr_t)new_msghdrs + 273 ALIGN(msginfo.msgtql * sizeof(struct __msg))); 274 275 /* Initialize the structures */ 276 for (i = 0; i < (newmsgseg - 1); i++) 277 new_msgmaps[i].next = i + 1; 278 new_msgmaps[newmsgseg - 1].next = -1; 279 new_free_msgmaps = 0; 280 new_nfree_msgmaps = newmsgseg; 281 282 for (i = 0; i < (msginfo.msgtql - 1); i++) { 283 new_msghdrs[i].msg_type = 0; 284 new_msghdrs[i].msg_next = &new_msghdrs[i + 1]; 285 } 286 i = msginfo.msgtql - 1; 287 new_msghdrs[i].msg_type = 0; 288 new_msghdrs[i].msg_next = NULL; 289 new_free_msghdrs = &new_msghdrs[0]; 290 291 for (i = 0; i < newmsgmni; i++) { 292 new_msqs[i].msq_u.msg_qbytes = 0; 293 new_msqs[i].msq_u.msg_perm._seq = 0; 294 cv_init(&new_msqs[i].msq_cv, "msgwait"); 295 } 296 297 /* 298 * Copy all message queue identifiers, message headers and buffer 299 * pools to the new memory location. 300 */ 301 for (msqid = 0; msqid < msginfo.msgmni; msqid++) { 302 struct __msg *nmsghdr, *msghdr, *pmsghdr; 303 struct msqid_ds *nmptr, *mptr; 304 kmsq_t *nmsq, *msq; 305 306 msq = &msqs[msqid]; 307 mptr = &msq->msq_u; 308 309 if (mptr->msg_qbytes == 0 && 310 (mptr->msg_perm.mode & MSG_LOCKED) == 0) 311 continue; 312 313 nmsq = &new_msqs[msqid]; 314 nmptr = &nmsq->msq_u; 315 memcpy(nmptr, mptr, sizeof(struct msqid_ds)); 316 317 /* 318 * Go through the message headers, and copy each one 319 * by taking the new ones, and thus defragmenting. 320 */ 321 nmsghdr = pmsghdr = NULL; 322 msghdr = mptr->_msg_first; 323 while (msghdr) { 324 short nnext = 0, next; 325 u_short msgsz, segcnt; 326 327 /* Take an entry from the new list of free msghdrs */ 328 nmsghdr = new_free_msghdrs; 329 KASSERT(nmsghdr != NULL); 330 new_free_msghdrs = nmsghdr->msg_next; 331 332 nmsghdr->msg_next = NULL; 333 if (pmsghdr) { 334 pmsghdr->msg_next = nmsghdr; 335 } else { 336 nmptr->_msg_first = nmsghdr; 337 pmsghdr = nmsghdr; 338 } 339 nmsghdr->msg_ts = msghdr->msg_ts; 340 nmsghdr->msg_spot = -1; 341 342 /* Compute the amount of segments and reserve them */ 343 msgsz = msghdr->msg_ts; 344 segcnt = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz; 345 if (segcnt == 0) 346 continue; 347 while (segcnt--) { 348 nnext = new_free_msgmaps; 349 new_free_msgmaps = new_msgmaps[nnext].next; 350 new_nfree_msgmaps--; 351 new_msgmaps[nnext].next = nmsghdr->msg_spot; 352 nmsghdr->msg_spot = nnext; 353 } 354 355 /* Copy all segments */ 356 KASSERT(nnext == nmsghdr->msg_spot); 357 next = msghdr->msg_spot; 358 while (msgsz > 0) { 359 size_t tlen; 360 361 if (msgsz >= msginfo.msgssz) { 362 tlen = msginfo.msgssz; 363 msgsz -= msginfo.msgssz; 364 } else { 365 tlen = msgsz; 366 msgsz = 0; 367 } 368 369 /* Copy the message buffer */ 370 memcpy(&new_msgpool[nnext * msginfo.msgssz], 371 &msgpool[next * msginfo.msgssz], tlen); 372 373 /* Next entry of the map */ 374 nnext = msgmaps[nnext].next; 375 next = msgmaps[next].next; 376 } 377 378 /* Next message header */ 379 msghdr = msghdr->msg_next; 380 } 381 nmptr->_msg_last = nmsghdr; 382 } 383 KASSERT((msginfo.msgseg - nfree_msgmaps) == 384 (newmsgseg - new_nfree_msgmaps)); 385 386 sz = ALIGN(msginfo.msgmax) + 387 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) + 388 ALIGN(msginfo.msgtql * sizeof(struct __msg)) + 389 ALIGN(msginfo.msgmni * sizeof(kmsq_t)); 390 sz = round_page(sz); 391 392 for (i = 0; i < msginfo.msgmni; i++) 393 cv_destroy(&msqs[i].msq_cv); 394 395 /* Set the pointers and update the new values */ 396 msgpool = new_msgpool; 397 msgmaps = new_msgmaps; 398 msghdrs = new_msghdrs; 399 msqs = new_msqs; 400 401 free_msghdrs = new_free_msghdrs; 402 free_msgmaps = new_free_msgmaps; 403 nfree_msgmaps = new_nfree_msgmaps; 404 msginfo.msgmni = newmsgmni; 405 msginfo.msgseg = newmsgseg; 406 msginfo.msgmax = newmsgmax; 407 408 /* Reallocation completed - notify all waiters, if any */ 409 msg_realloc_state = false; 410 cv_broadcast(&msg_realloc_cv); 411 mutex_exit(&msgmutex); 412 413 uvm_km_free(kernel_map, (vaddr_t)old_msgpool, sz, UVM_KMF_WIRED); 414 return 0; 415 } 416 417 static void 418 msg_freehdr(struct __msg *msghdr) 419 { 420 421 KASSERT(mutex_owned(&msgmutex)); 422 423 while (msghdr->msg_ts > 0) { 424 short next; 425 KASSERT(msghdr->msg_spot >= 0); 426 KASSERT(msghdr->msg_spot < msginfo.msgseg); 427 428 next = msgmaps[msghdr->msg_spot].next; 429 msgmaps[msghdr->msg_spot].next = free_msgmaps; 430 free_msgmaps = msghdr->msg_spot; 431 nfree_msgmaps++; 432 msghdr->msg_spot = next; 433 if (msghdr->msg_ts >= msginfo.msgssz) 434 msghdr->msg_ts -= msginfo.msgssz; 435 else 436 msghdr->msg_ts = 0; 437 } 438 KASSERT(msghdr->msg_spot == -1); 439 msghdr->msg_next = free_msghdrs; 440 free_msghdrs = msghdr; 441 } 442 443 int 444 sys___msgctl50(struct lwp *l, const struct sys___msgctl50_args *uap, 445 register_t *retval) 446 { 447 /* { 448 syscallarg(int) msqid; 449 syscallarg(int) cmd; 450 syscallarg(struct msqid_ds *) buf; 451 } */ 452 struct msqid_ds msqbuf; 453 int cmd, error; 454 455 cmd = SCARG(uap, cmd); 456 457 if (cmd == IPC_SET) { 458 error = copyin(SCARG(uap, buf), &msqbuf, sizeof(msqbuf)); 459 if (error) 460 return (error); 461 } 462 463 error = msgctl1(l, SCARG(uap, msqid), cmd, 464 (cmd == IPC_SET || cmd == IPC_STAT) ? &msqbuf : NULL); 465 466 if (error == 0 && cmd == IPC_STAT) 467 error = copyout(&msqbuf, SCARG(uap, buf), sizeof(msqbuf)); 468 469 return (error); 470 } 471 472 int 473 msgctl1(struct lwp *l, int msqid, int cmd, struct msqid_ds *msqbuf) 474 { 475 kauth_cred_t cred = l->l_cred; 476 struct msqid_ds *msqptr; 477 kmsq_t *msq; 478 int error = 0, ix; 479 480 MSG_PRINTF(("call to msgctl1(%d, %d)\n", msqid, cmd)); 481 482 ix = IPCID_TO_IX(msqid); 483 484 mutex_enter(&msgmutex); 485 486 if (ix < 0 || ix >= msginfo.msgmni) { 487 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", ix, 488 msginfo.msgmni)); 489 error = EINVAL; 490 goto unlock; 491 } 492 493 msq = &msqs[ix]; 494 msqptr = &msq->msq_u; 495 496 if (msqptr->msg_qbytes == 0) { 497 MSG_PRINTF(("no such msqid\n")); 498 error = EINVAL; 499 goto unlock; 500 } 501 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqid)) { 502 MSG_PRINTF(("wrong sequence number\n")); 503 error = EINVAL; 504 goto unlock; 505 } 506 507 switch (cmd) { 508 case IPC_RMID: 509 { 510 struct __msg *msghdr; 511 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)) != 0) 512 break; 513 /* Free the message headers */ 514 msghdr = msqptr->_msg_first; 515 while (msghdr != NULL) { 516 struct __msg *msghdr_tmp; 517 518 /* Free the segments of each message */ 519 msqptr->_msg_cbytes -= msghdr->msg_ts; 520 msqptr->msg_qnum--; 521 msghdr_tmp = msghdr; 522 msghdr = msghdr->msg_next; 523 msg_freehdr(msghdr_tmp); 524 } 525 KASSERT(msqptr->_msg_cbytes == 0); 526 KASSERT(msqptr->msg_qnum == 0); 527 528 /* Mark it as free */ 529 msqptr->msg_qbytes = 0; 530 cv_broadcast(&msq->msq_cv); 531 } 532 break; 533 534 case IPC_SET: 535 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M))) 536 break; 537 if (msqbuf->msg_qbytes > msqptr->msg_qbytes && 538 kauth_authorize_system(cred, KAUTH_SYSTEM_SYSVIPC, 539 KAUTH_REQ_SYSTEM_SYSVIPC_MSGQ_OVERSIZE, 540 KAUTH_ARG(msqbuf->msg_qbytes), 541 KAUTH_ARG(msqptr->msg_qbytes), NULL) != 0) { 542 error = EPERM; 543 break; 544 } 545 if (msqbuf->msg_qbytes > msginfo.msgmnb) { 546 MSG_PRINTF(("can't increase msg_qbytes beyond %d " 547 "(truncating)\n", msginfo.msgmnb)); 548 /* silently restrict qbytes to system limit */ 549 msqbuf->msg_qbytes = msginfo.msgmnb; 550 } 551 if (msqbuf->msg_qbytes == 0) { 552 MSG_PRINTF(("can't reduce msg_qbytes to 0\n")); 553 error = EINVAL; /* XXX non-standard errno! */ 554 break; 555 } 556 msqptr->msg_perm.uid = msqbuf->msg_perm.uid; 557 msqptr->msg_perm.gid = msqbuf->msg_perm.gid; 558 msqptr->msg_perm.mode = (msqptr->msg_perm.mode & ~0777) | 559 (msqbuf->msg_perm.mode & 0777); 560 msqptr->msg_qbytes = msqbuf->msg_qbytes; 561 msqptr->msg_ctime = time_second; 562 break; 563 564 case IPC_STAT: 565 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) { 566 MSG_PRINTF(("requester doesn't have read access\n")); 567 break; 568 } 569 memcpy(msqbuf, msqptr, sizeof(struct msqid_ds)); 570 break; 571 572 default: 573 MSG_PRINTF(("invalid command %d\n", cmd)); 574 error = EINVAL; 575 break; 576 } 577 578 unlock: 579 mutex_exit(&msgmutex); 580 return (error); 581 } 582 583 int 584 sys_msgget(struct lwp *l, const struct sys_msgget_args *uap, register_t *retval) 585 { 586 /* { 587 syscallarg(key_t) key; 588 syscallarg(int) msgflg; 589 } */ 590 int msqid, error = 0; 591 int key = SCARG(uap, key); 592 int msgflg = SCARG(uap, msgflg); 593 kauth_cred_t cred = l->l_cred; 594 struct msqid_ds *msqptr = NULL; 595 kmsq_t *msq; 596 597 mutex_enter(&msgmutex); 598 599 MSG_PRINTF(("msgget(0x%x, 0%o)\n", key, msgflg)); 600 601 if (key != IPC_PRIVATE) { 602 for (msqid = 0; msqid < msginfo.msgmni; msqid++) { 603 msq = &msqs[msqid]; 604 msqptr = &msq->msq_u; 605 if (msqptr->msg_qbytes != 0 && 606 msqptr->msg_perm._key == key) 607 break; 608 } 609 if (msqid < msginfo.msgmni) { 610 MSG_PRINTF(("found public key\n")); 611 if ((msgflg & IPC_CREAT) && (msgflg & IPC_EXCL)) { 612 MSG_PRINTF(("not exclusive\n")); 613 error = EEXIST; 614 goto unlock; 615 } 616 if ((error = ipcperm(cred, &msqptr->msg_perm, 617 msgflg & 0700 ))) { 618 MSG_PRINTF(("requester doesn't have 0%o access\n", 619 msgflg & 0700)); 620 goto unlock; 621 } 622 goto found; 623 } 624 } 625 626 MSG_PRINTF(("need to allocate the msqid_ds\n")); 627 if (key == IPC_PRIVATE || (msgflg & IPC_CREAT)) { 628 for (msqid = 0; msqid < msginfo.msgmni; msqid++) { 629 /* 630 * Look for an unallocated and unlocked msqid_ds. 631 * msqid_ds's can be locked by msgsnd or msgrcv while 632 * they are copying the message in/out. We can't 633 * re-use the entry until they release it. 634 */ 635 msq = &msqs[msqid]; 636 msqptr = &msq->msq_u; 637 if (msqptr->msg_qbytes == 0 && 638 (msqptr->msg_perm.mode & MSG_LOCKED) == 0) 639 break; 640 } 641 if (msqid == msginfo.msgmni) { 642 MSG_PRINTF(("no more msqid_ds's available\n")); 643 error = ENOSPC; 644 goto unlock; 645 } 646 MSG_PRINTF(("msqid %d is available\n", msqid)); 647 msqptr->msg_perm._key = key; 648 msqptr->msg_perm.cuid = kauth_cred_geteuid(cred); 649 msqptr->msg_perm.uid = kauth_cred_geteuid(cred); 650 msqptr->msg_perm.cgid = kauth_cred_getegid(cred); 651 msqptr->msg_perm.gid = kauth_cred_getegid(cred); 652 msqptr->msg_perm.mode = (msgflg & 0777); 653 /* Make sure that the returned msqid is unique */ 654 msqptr->msg_perm._seq++; 655 msqptr->_msg_first = NULL; 656 msqptr->_msg_last = NULL; 657 msqptr->_msg_cbytes = 0; 658 msqptr->msg_qnum = 0; 659 msqptr->msg_qbytes = msginfo.msgmnb; 660 msqptr->msg_lspid = 0; 661 msqptr->msg_lrpid = 0; 662 msqptr->msg_stime = 0; 663 msqptr->msg_rtime = 0; 664 msqptr->msg_ctime = time_second; 665 } else { 666 MSG_PRINTF(("didn't find it and wasn't asked to create it\n")); 667 error = ENOENT; 668 goto unlock; 669 } 670 671 found: 672 /* Construct the unique msqid */ 673 *retval = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm); 674 675 unlock: 676 mutex_exit(&msgmutex); 677 return (error); 678 } 679 680 int 681 sys_msgsnd(struct lwp *l, const struct sys_msgsnd_args *uap, register_t *retval) 682 { 683 /* { 684 syscallarg(int) msqid; 685 syscallarg(const void *) msgp; 686 syscallarg(size_t) msgsz; 687 syscallarg(int) msgflg; 688 } */ 689 690 return msgsnd1(l, SCARG(uap, msqid), SCARG(uap, msgp), 691 SCARG(uap, msgsz), SCARG(uap, msgflg), sizeof(long), copyin); 692 } 693 694 int 695 msgsnd1(struct lwp *l, int msqidr, const char *user_msgp, size_t msgsz, 696 int msgflg, size_t typesz, copyin_t fetch_type) 697 { 698 int segs_needed, error = 0, msqid; 699 kauth_cred_t cred = l->l_cred; 700 struct msqid_ds *msqptr; 701 struct __msg *msghdr; 702 kmsq_t *msq; 703 short next; 704 705 MSG_PRINTF(("call to msgsnd(%d, %p, %lld, %d)\n", msqidr, 706 user_msgp, (long long)msgsz, msgflg)); 707 708 if ((ssize_t)msgsz < 0) 709 return EINVAL; 710 711 restart: 712 msqid = IPCID_TO_IX(msqidr); 713 714 mutex_enter(&msgmutex); 715 /* In case of reallocation, we will wait for completion */ 716 while (__predict_false(msg_realloc_state)) 717 cv_wait(&msg_realloc_cv, &msgmutex); 718 719 if (msqid < 0 || msqid >= msginfo.msgmni) { 720 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid, 721 msginfo.msgmni)); 722 error = EINVAL; 723 goto unlock; 724 } 725 726 msq = &msqs[msqid]; 727 msqptr = &msq->msq_u; 728 729 if (msqptr->msg_qbytes == 0) { 730 MSG_PRINTF(("no such message queue id\n")); 731 error = EINVAL; 732 goto unlock; 733 } 734 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) { 735 MSG_PRINTF(("wrong sequence number\n")); 736 error = EINVAL; 737 goto unlock; 738 } 739 740 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_W))) { 741 MSG_PRINTF(("requester doesn't have write access\n")); 742 goto unlock; 743 } 744 745 segs_needed = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz; 746 MSG_PRINTF(("msgsz=%lld, msgssz=%d, segs_needed=%d\n", 747 (long long)msgsz, msginfo.msgssz, segs_needed)); 748 for (;;) { 749 int need_more_resources = 0; 750 751 /* 752 * check msgsz [cannot be negative since it is unsigned] 753 * (inside this loop in case msg_qbytes changes while we sleep) 754 */ 755 756 if (msgsz > msqptr->msg_qbytes) { 757 MSG_PRINTF(("msgsz > msqptr->msg_qbytes\n")); 758 error = EINVAL; 759 goto unlock; 760 } 761 762 if (msqptr->msg_perm.mode & MSG_LOCKED) { 763 MSG_PRINTF(("msqid is locked\n")); 764 need_more_resources = 1; 765 } 766 if (msgsz + msqptr->_msg_cbytes > msqptr->msg_qbytes) { 767 MSG_PRINTF(("msgsz + msg_cbytes > msg_qbytes\n")); 768 need_more_resources = 1; 769 } 770 if (segs_needed > nfree_msgmaps) { 771 MSG_PRINTF(("segs_needed > nfree_msgmaps\n")); 772 need_more_resources = 1; 773 } 774 if (free_msghdrs == NULL) { 775 MSG_PRINTF(("no more msghdrs\n")); 776 need_more_resources = 1; 777 } 778 779 if (need_more_resources) { 780 int we_own_it; 781 782 if ((msgflg & IPC_NOWAIT) != 0) { 783 MSG_PRINTF(("need more resources but caller " 784 "doesn't want to wait\n")); 785 error = EAGAIN; 786 goto unlock; 787 } 788 789 if ((msqptr->msg_perm.mode & MSG_LOCKED) != 0) { 790 MSG_PRINTF(("we don't own the msqid_ds\n")); 791 we_own_it = 0; 792 } else { 793 /* Force later arrivals to wait for our 794 request */ 795 MSG_PRINTF(("we own the msqid_ds\n")); 796 msqptr->msg_perm.mode |= MSG_LOCKED; 797 we_own_it = 1; 798 } 799 800 msg_waiters++; 801 MSG_PRINTF(("goodnight\n")); 802 error = cv_wait_sig(&msq->msq_cv, &msgmutex); 803 MSG_PRINTF(("good morning, error=%d\n", error)); 804 msg_waiters--; 805 806 if (we_own_it) 807 msqptr->msg_perm.mode &= ~MSG_LOCKED; 808 809 /* 810 * In case of such state, notify reallocator and 811 * restart the call. 812 */ 813 if (msg_realloc_state) { 814 cv_broadcast(&msg_realloc_cv); 815 mutex_exit(&msgmutex); 816 goto restart; 817 } 818 819 if (error != 0) { 820 MSG_PRINTF(("msgsnd: interrupted system " 821 "call\n")); 822 error = EINTR; 823 goto unlock; 824 } 825 826 /* 827 * Make sure that the msq queue still exists 828 */ 829 830 if (msqptr->msg_qbytes == 0) { 831 MSG_PRINTF(("msqid deleted\n")); 832 error = EIDRM; 833 goto unlock; 834 } 835 } else { 836 MSG_PRINTF(("got all the resources that we need\n")); 837 break; 838 } 839 } 840 841 /* 842 * We have the resources that we need. 843 * Make sure! 844 */ 845 846 KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0); 847 KASSERT(segs_needed <= nfree_msgmaps); 848 KASSERT(msgsz + msqptr->_msg_cbytes <= msqptr->msg_qbytes); 849 KASSERT(free_msghdrs != NULL); 850 851 /* 852 * Re-lock the msqid_ds in case we page-fault when copying in the 853 * message 854 */ 855 856 KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0); 857 msqptr->msg_perm.mode |= MSG_LOCKED; 858 859 /* 860 * Allocate a message header 861 */ 862 863 msghdr = free_msghdrs; 864 free_msghdrs = msghdr->msg_next; 865 msghdr->msg_spot = -1; 866 msghdr->msg_ts = msgsz; 867 868 /* 869 * Allocate space for the message 870 */ 871 872 while (segs_needed > 0) { 873 KASSERT(nfree_msgmaps > 0); 874 KASSERT(free_msgmaps != -1); 875 KASSERT(free_msgmaps < msginfo.msgseg); 876 877 next = free_msgmaps; 878 MSG_PRINTF(("allocating segment %d to message\n", next)); 879 free_msgmaps = msgmaps[next].next; 880 nfree_msgmaps--; 881 msgmaps[next].next = msghdr->msg_spot; 882 msghdr->msg_spot = next; 883 segs_needed--; 884 } 885 886 /* 887 * Copy in the message type 888 */ 889 mutex_exit(&msgmutex); 890 error = (*fetch_type)(user_msgp, &msghdr->msg_type, typesz); 891 mutex_enter(&msgmutex); 892 if (error != 0) { 893 MSG_PRINTF(("error %d copying the message type\n", error)); 894 msg_freehdr(msghdr); 895 msqptr->msg_perm.mode &= ~MSG_LOCKED; 896 cv_broadcast(&msq->msq_cv); 897 goto unlock; 898 } 899 user_msgp += typesz; 900 901 /* 902 * Validate the message type 903 */ 904 905 if (msghdr->msg_type < 1) { 906 msg_freehdr(msghdr); 907 msqptr->msg_perm.mode &= ~MSG_LOCKED; 908 cv_broadcast(&msq->msq_cv); 909 MSG_PRINTF(("mtype (%ld) < 1\n", msghdr->msg_type)); 910 error = EINVAL; 911 goto unlock; 912 } 913 914 /* 915 * Copy in the message body 916 */ 917 918 next = msghdr->msg_spot; 919 while (msgsz > 0) { 920 size_t tlen; 921 KASSERT(next > -1); 922 KASSERT(next < msginfo.msgseg); 923 924 if (msgsz > msginfo.msgssz) 925 tlen = msginfo.msgssz; 926 else 927 tlen = msgsz; 928 mutex_exit(&msgmutex); 929 error = copyin(user_msgp, &msgpool[next * msginfo.msgssz], tlen); 930 mutex_enter(&msgmutex); 931 if (error != 0) { 932 MSG_PRINTF(("error %d copying in message segment\n", 933 error)); 934 msg_freehdr(msghdr); 935 msqptr->msg_perm.mode &= ~MSG_LOCKED; 936 cv_broadcast(&msq->msq_cv); 937 goto unlock; 938 } 939 msgsz -= tlen; 940 user_msgp += tlen; 941 next = msgmaps[next].next; 942 } 943 KASSERT(next == -1); 944 945 /* 946 * We've got the message. Unlock the msqid_ds. 947 */ 948 949 msqptr->msg_perm.mode &= ~MSG_LOCKED; 950 951 /* 952 * Make sure that the msqid_ds is still allocated. 953 */ 954 955 if (msqptr->msg_qbytes == 0) { 956 msg_freehdr(msghdr); 957 cv_broadcast(&msq->msq_cv); 958 error = EIDRM; 959 goto unlock; 960 } 961 962 /* 963 * Put the message into the queue 964 */ 965 966 if (msqptr->_msg_first == NULL) { 967 msqptr->_msg_first = msghdr; 968 msqptr->_msg_last = msghdr; 969 } else { 970 msqptr->_msg_last->msg_next = msghdr; 971 msqptr->_msg_last = msghdr; 972 } 973 msqptr->_msg_last->msg_next = NULL; 974 975 msqptr->_msg_cbytes += msghdr->msg_ts; 976 msqptr->msg_qnum++; 977 msqptr->msg_lspid = l->l_proc->p_pid; 978 msqptr->msg_stime = time_second; 979 980 cv_broadcast(&msq->msq_cv); 981 982 unlock: 983 mutex_exit(&msgmutex); 984 return error; 985 } 986 987 int 988 sys_msgrcv(struct lwp *l, const struct sys_msgrcv_args *uap, register_t *retval) 989 { 990 /* { 991 syscallarg(int) msqid; 992 syscallarg(void *) msgp; 993 syscallarg(size_t) msgsz; 994 syscallarg(long) msgtyp; 995 syscallarg(int) msgflg; 996 } */ 997 998 return msgrcv1(l, SCARG(uap, msqid), SCARG(uap, msgp), 999 SCARG(uap, msgsz), SCARG(uap, msgtyp), SCARG(uap, msgflg), 1000 sizeof(long), copyout, retval); 1001 } 1002 1003 int 1004 msgrcv1(struct lwp *l, int msqidr, char *user_msgp, size_t msgsz, long msgtyp, 1005 int msgflg, size_t typesz, copyout_t put_type, register_t *retval) 1006 { 1007 size_t len; 1008 kauth_cred_t cred = l->l_cred; 1009 struct msqid_ds *msqptr; 1010 struct __msg *msghdr; 1011 int error = 0, msqid; 1012 kmsq_t *msq; 1013 short next; 1014 1015 MSG_PRINTF(("call to msgrcv(%d, %p, %lld, %ld, %d)\n", msqidr, 1016 user_msgp, (long long)msgsz, msgtyp, msgflg)); 1017 1018 if ((ssize_t)msgsz < 0) 1019 return EINVAL; 1020 1021 restart: 1022 msqid = IPCID_TO_IX(msqidr); 1023 1024 mutex_enter(&msgmutex); 1025 /* In case of reallocation, we will wait for completion */ 1026 while (__predict_false(msg_realloc_state)) 1027 cv_wait(&msg_realloc_cv, &msgmutex); 1028 1029 if (msqid < 0 || msqid >= msginfo.msgmni) { 1030 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid, 1031 msginfo.msgmni)); 1032 error = EINVAL; 1033 goto unlock; 1034 } 1035 1036 msq = &msqs[msqid]; 1037 msqptr = &msq->msq_u; 1038 1039 if (msqptr->msg_qbytes == 0) { 1040 MSG_PRINTF(("no such message queue id\n")); 1041 error = EINVAL; 1042 goto unlock; 1043 } 1044 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) { 1045 MSG_PRINTF(("wrong sequence number\n")); 1046 error = EINVAL; 1047 goto unlock; 1048 } 1049 1050 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) { 1051 MSG_PRINTF(("requester doesn't have read access\n")); 1052 goto unlock; 1053 } 1054 1055 msghdr = NULL; 1056 while (msghdr == NULL) { 1057 if (msgtyp == 0) { 1058 msghdr = msqptr->_msg_first; 1059 if (msghdr != NULL) { 1060 if (msgsz < msghdr->msg_ts && 1061 (msgflg & MSG_NOERROR) == 0) { 1062 MSG_PRINTF(("first msg on the queue " 1063 "is too big (want %lld, got %d)\n", 1064 (long long)msgsz, msghdr->msg_ts)); 1065 error = E2BIG; 1066 goto unlock; 1067 } 1068 if (msqptr->_msg_first == msqptr->_msg_last) { 1069 msqptr->_msg_first = NULL; 1070 msqptr->_msg_last = NULL; 1071 } else { 1072 msqptr->_msg_first = msghdr->msg_next; 1073 KASSERT(msqptr->_msg_first != NULL); 1074 } 1075 } 1076 } else { 1077 struct __msg *previous; 1078 struct __msg **prev; 1079 1080 for (previous = NULL, prev = &msqptr->_msg_first; 1081 (msghdr = *prev) != NULL; 1082 previous = msghdr, prev = &msghdr->msg_next) { 1083 /* 1084 * Is this message's type an exact match or is 1085 * this message's type less than or equal to 1086 * the absolute value of a negative msgtyp? 1087 * Note that the second half of this test can 1088 * NEVER be true if msgtyp is positive since 1089 * msg_type is always positive! 1090 */ 1091 1092 if (msgtyp != msghdr->msg_type && 1093 msghdr->msg_type > -msgtyp) 1094 continue; 1095 1096 MSG_PRINTF(("found message type %ld, requested %ld\n", 1097 msghdr->msg_type, msgtyp)); 1098 if (msgsz < msghdr->msg_ts && 1099 (msgflg & MSG_NOERROR) == 0) { 1100 MSG_PRINTF(("requested message on the queue " 1101 "is too big (want %lld, got %d)\n", 1102 (long long)msgsz, msghdr->msg_ts)); 1103 error = E2BIG; 1104 goto unlock; 1105 } 1106 *prev = msghdr->msg_next; 1107 if (msghdr != msqptr->_msg_last) 1108 break; 1109 if (previous == NULL) { 1110 KASSERT(prev == &msqptr->_msg_first); 1111 msqptr->_msg_first = NULL; 1112 msqptr->_msg_last = NULL; 1113 } else { 1114 KASSERT(prev != &msqptr->_msg_first); 1115 msqptr->_msg_last = previous; 1116 } 1117 break; 1118 } 1119 } 1120 1121 /* 1122 * We've either extracted the msghdr for the appropriate 1123 * message or there isn't one. 1124 * If there is one then bail out of this loop. 1125 */ 1126 if (msghdr != NULL) 1127 break; 1128 1129 /* 1130 * Hmph! No message found. Does the user want to wait? 1131 */ 1132 1133 if ((msgflg & IPC_NOWAIT) != 0) { 1134 MSG_PRINTF(("no appropriate message found (msgtyp=%ld)\n", 1135 msgtyp)); 1136 error = ENOMSG; 1137 goto unlock; 1138 } 1139 1140 /* 1141 * Wait for something to happen 1142 */ 1143 1144 msg_waiters++; 1145 MSG_PRINTF(("msgrcv: goodnight\n")); 1146 error = cv_wait_sig(&msq->msq_cv, &msgmutex); 1147 MSG_PRINTF(("msgrcv: good morning (error=%d)\n", error)); 1148 msg_waiters--; 1149 1150 /* 1151 * In case of such state, notify reallocator and 1152 * restart the call. 1153 */ 1154 if (msg_realloc_state) { 1155 cv_broadcast(&msg_realloc_cv); 1156 mutex_exit(&msgmutex); 1157 goto restart; 1158 } 1159 1160 if (error != 0) { 1161 MSG_PRINTF(("msgsnd: interrupted system call\n")); 1162 error = EINTR; 1163 goto unlock; 1164 } 1165 1166 /* 1167 * Make sure that the msq queue still exists 1168 */ 1169 1170 if (msqptr->msg_qbytes == 0 || 1171 msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) { 1172 MSG_PRINTF(("msqid deleted\n")); 1173 error = EIDRM; 1174 goto unlock; 1175 } 1176 } 1177 1178 /* 1179 * Return the message to the user. 1180 * 1181 * First, do the bookkeeping (before we risk being interrupted). 1182 */ 1183 1184 msqptr->_msg_cbytes -= msghdr->msg_ts; 1185 msqptr->msg_qnum--; 1186 msqptr->msg_lrpid = l->l_proc->p_pid; 1187 msqptr->msg_rtime = time_second; 1188 1189 /* 1190 * Make msgsz the actual amount that we'll be returning. 1191 * Note that this effectively truncates the message if it is too long 1192 * (since msgsz is never increased). 1193 */ 1194 1195 MSG_PRINTF(("found a message, msgsz=%lld, msg_ts=%d\n", 1196 (long long)msgsz, msghdr->msg_ts)); 1197 if (msgsz > msghdr->msg_ts) 1198 msgsz = msghdr->msg_ts; 1199 1200 /* 1201 * Return the type to the user. 1202 */ 1203 mutex_exit(&msgmutex); 1204 error = (*put_type)(&msghdr->msg_type, user_msgp, typesz); 1205 mutex_enter(&msgmutex); 1206 if (error != 0) { 1207 MSG_PRINTF(("error (%d) copying out message type\n", error)); 1208 msg_freehdr(msghdr); 1209 cv_broadcast(&msq->msq_cv); 1210 goto unlock; 1211 } 1212 user_msgp += typesz; 1213 1214 /* 1215 * Return the segments to the user 1216 */ 1217 1218 next = msghdr->msg_spot; 1219 for (len = 0; len < msgsz; len += msginfo.msgssz) { 1220 size_t tlen; 1221 KASSERT(next > -1); 1222 KASSERT(next < msginfo.msgseg); 1223 1224 if (msgsz - len > msginfo.msgssz) 1225 tlen = msginfo.msgssz; 1226 else 1227 tlen = msgsz - len; 1228 mutex_exit(&msgmutex); 1229 error = copyout(&msgpool[next * msginfo.msgssz], 1230 user_msgp, tlen); 1231 mutex_enter(&msgmutex); 1232 if (error != 0) { 1233 MSG_PRINTF(("error (%d) copying out message segment\n", 1234 error)); 1235 msg_freehdr(msghdr); 1236 cv_broadcast(&msq->msq_cv); 1237 goto unlock; 1238 } 1239 user_msgp += tlen; 1240 next = msgmaps[next].next; 1241 } 1242 1243 /* 1244 * Done, return the actual number of bytes copied out. 1245 */ 1246 1247 msg_freehdr(msghdr); 1248 cv_broadcast(&msq->msq_cv); 1249 *retval = msgsz; 1250 1251 unlock: 1252 mutex_exit(&msgmutex); 1253 return error; 1254 } 1255 1256 /* 1257 * Sysctl initialization and nodes. 1258 */ 1259 1260 static int 1261 sysctl_ipc_msgmni(SYSCTLFN_ARGS) 1262 { 1263 int newsize, error; 1264 struct sysctlnode node; 1265 node = *rnode; 1266 node.sysctl_data = &newsize; 1267 1268 newsize = msginfo.msgmni; 1269 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1270 if (error || newp == NULL) 1271 return error; 1272 1273 sysctl_unlock(); 1274 error = msgrealloc(newsize, msginfo.msgseg); 1275 sysctl_relock(); 1276 return error; 1277 } 1278 1279 static int 1280 sysctl_ipc_msgseg(SYSCTLFN_ARGS) 1281 { 1282 int newsize, error; 1283 struct sysctlnode node; 1284 node = *rnode; 1285 node.sysctl_data = &newsize; 1286 1287 newsize = msginfo.msgseg; 1288 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1289 if (error || newp == NULL) 1290 return error; 1291 1292 sysctl_unlock(); 1293 error = msgrealloc(msginfo.msgmni, newsize); 1294 sysctl_relock(); 1295 return error; 1296 } 1297 1298 SYSCTL_SETUP(sysctl_ipc_msg_setup, "sysctl kern.ipc subtree setup") 1299 { 1300 const struct sysctlnode *node = NULL; 1301 1302 sysctl_createv(clog, 0, NULL, &node, 1303 CTLFLAG_PERMANENT, 1304 CTLTYPE_NODE, "ipc", 1305 SYSCTL_DESCR("SysV IPC options"), 1306 NULL, 0, NULL, 0, 1307 CTL_KERN, KERN_SYSVIPC, CTL_EOL); 1308 1309 if (node == NULL) 1310 return; 1311 1312 sysctl_createv(clog, 0, &node, NULL, 1313 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 1314 CTLTYPE_INT, "msgmni", 1315 SYSCTL_DESCR("Max number of message queue identifiers"), 1316 sysctl_ipc_msgmni, 0, &msginfo.msgmni, 0, 1317 CTL_CREATE, CTL_EOL); 1318 sysctl_createv(clog, 0, &node, NULL, 1319 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 1320 CTLTYPE_INT, "msgseg", 1321 SYSCTL_DESCR("Max number of number of message segments"), 1322 sysctl_ipc_msgseg, 0, &msginfo.msgseg, 0, 1323 CTL_CREATE, CTL_EOL); 1324 } 1325