1 /* $NetBSD: sysv_msg.c,v 1.53 2007/11/25 19:03:24 rmind Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center, and by Andrew Doran. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Implementation of SVID messages 42 * 43 * Author: Daniel Boulet 44 * 45 * Copyright 1993 Daniel Boulet and RTMX Inc. 46 * 47 * This system call was implemented by Daniel Boulet under contract from RTMX. 48 * 49 * Redistribution and use in source forms, with and without modification, 50 * are permitted provided that this entire comment appears intact. 51 * 52 * Redistribution in binary form may occur without any restrictions. 53 * Obviously, it would be nice if you gave credit where credit is due 54 * but requiring it would be too onerous. 55 * 56 * This software is provided ``AS IS'' without any warranties of any kind. 57 */ 58 59 #include <sys/cdefs.h> 60 __KERNEL_RCSID(0, "$NetBSD: sysv_msg.c,v 1.53 2007/11/25 19:03:24 rmind Exp $"); 61 62 #define SYSVMSG 63 64 #include <sys/param.h> 65 #include <sys/kernel.h> 66 #include <sys/msg.h> 67 #include <sys/sysctl.h> 68 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */ 69 #include <sys/syscallargs.h> 70 #include <sys/kauth.h> 71 72 #define MSG_DEBUG 73 #undef MSG_DEBUG_OK 74 75 #ifdef MSG_DEBUG_OK 76 #define MSG_PRINTF(a) printf a 77 #else 78 #define MSG_PRINTF(a) 79 #endif 80 81 static int nfree_msgmaps; /* # of free map entries */ 82 static short free_msgmaps; /* head of linked list of free map entries */ 83 static struct __msg *free_msghdrs; /* list of free msg headers */ 84 static char *msgpool; /* MSGMAX byte long msg buffer pool */ 85 static struct msgmap *msgmaps; /* MSGSEG msgmap structures */ 86 static struct __msg *msghdrs; /* MSGTQL msg headers */ 87 88 kmsq_t *msqs; /* MSGMNI msqid_ds struct's */ 89 kmutex_t msgmutex; /* subsystem lock */ 90 91 static u_int msg_waiters = 0; /* total number of msgrcv waiters */ 92 static bool msg_realloc_state; 93 static kcondvar_t msg_realloc_cv; 94 95 static void msg_freehdr(struct __msg *); 96 97 void 98 msginit(void) 99 { 100 int i, sz; 101 vaddr_t v; 102 103 /* 104 * msginfo.msgssz should be a power of two for efficiency reasons. 105 * It is also pretty silly if msginfo.msgssz is less than 8 106 * or greater than about 256 so ... 107 */ 108 109 i = 8; 110 while (i < 1024 && i != msginfo.msgssz) 111 i <<= 1; 112 if (i != msginfo.msgssz) { 113 panic("msginfo.msgssz = %d, not a small power of 2", 114 msginfo.msgssz); 115 } 116 117 if (msginfo.msgseg > 32767) { 118 panic("msginfo.msgseg = %d > 32767", msginfo.msgseg); 119 } 120 121 /* Allocate the wired memory for our structures */ 122 sz = ALIGN(msginfo.msgmax) + 123 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) + 124 ALIGN(msginfo.msgtql * sizeof(struct __msg)) + 125 ALIGN(msginfo.msgmni * sizeof(kmsq_t)); 126 v = uvm_km_alloc(kernel_map, round_page(sz), 0, 127 UVM_KMF_WIRED|UVM_KMF_ZERO); 128 if (v == 0) 129 panic("sysv_msg: cannot allocate memory"); 130 msgpool = (void *)v; 131 msgmaps = (void *)(ALIGN(msgpool) + msginfo.msgmax); 132 msghdrs = (void *)(ALIGN(msgmaps) + 133 msginfo.msgseg * sizeof(struct msgmap)); 134 msqs = (void *)(ALIGN(msghdrs) + 135 msginfo.msgtql * sizeof(struct __msg)); 136 137 for (i = 0; i < (msginfo.msgseg - 1); i++) 138 msgmaps[i].next = i + 1; 139 msgmaps[msginfo.msgseg - 1].next = -1; 140 141 free_msgmaps = 0; 142 nfree_msgmaps = msginfo.msgseg; 143 144 for (i = 0; i < (msginfo.msgtql - 1); i++) { 145 msghdrs[i].msg_type = 0; 146 msghdrs[i].msg_next = &msghdrs[i + 1]; 147 } 148 i = msginfo.msgtql - 1; 149 msghdrs[i].msg_type = 0; 150 msghdrs[i].msg_next = NULL; 151 free_msghdrs = &msghdrs[0]; 152 153 for (i = 0; i < msginfo.msgmni; i++) { 154 cv_init(&msqs[i].msq_cv, "msgwait"); 155 /* Implies entry is available */ 156 msqs[i].msq_u.msg_qbytes = 0; 157 /* Reset to a known value */ 158 msqs[i].msq_u.msg_perm._seq = 0; 159 } 160 161 mutex_init(&msgmutex, MUTEX_DEFAULT, IPL_NONE); 162 cv_init(&msg_realloc_cv, "msgrealc"); 163 msg_realloc_state = false; 164 } 165 166 static int 167 msgrealloc(int newmsgmni, int newmsgseg) 168 { 169 struct msgmap *new_msgmaps; 170 struct __msg *new_msghdrs, *new_free_msghdrs; 171 char *old_msgpool, *new_msgpool; 172 kmsq_t *new_msqs; 173 vaddr_t v; 174 int i, sz, msqid, newmsgmax, new_nfree_msgmaps; 175 short new_free_msgmaps; 176 177 if (newmsgmni < 1 || newmsgseg < 1) 178 return EINVAL; 179 180 /* Allocate the wired memory for our structures */ 181 newmsgmax = msginfo.msgssz * newmsgseg; 182 sz = ALIGN(newmsgmax) + 183 ALIGN(newmsgseg * sizeof(struct msgmap)) + 184 ALIGN(msginfo.msgtql * sizeof(struct __msg)) + 185 ALIGN(newmsgmni * sizeof(kmsq_t)); 186 v = uvm_km_alloc(kernel_map, round_page(sz), 0, 187 UVM_KMF_WIRED|UVM_KMF_ZERO); 188 if (v == 0) 189 return ENOMEM; 190 191 mutex_enter(&msgmutex); 192 if (msg_realloc_state) { 193 mutex_exit(&msgmutex); 194 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED); 195 return EBUSY; 196 } 197 msg_realloc_state = true; 198 if (msg_waiters) { 199 /* 200 * Mark reallocation state, wake-up all waiters, 201 * and wait while they will all exit. 202 */ 203 for (i = 0; i < msginfo.msgmni; i++) 204 cv_broadcast(&msqs[i].msq_cv); 205 while (msg_waiters) 206 cv_wait(&msg_realloc_cv, &msgmutex); 207 } 208 old_msgpool = msgpool; 209 210 /* We cannot reallocate less memory than we use */ 211 i = 0; 212 for (msqid = 0; msqid < msginfo.msgmni; msqid++) { 213 struct msqid_ds *mptr; 214 kmsq_t *msq; 215 216 msq = &msqs[msqid]; 217 mptr = &msq->msq_u; 218 if (mptr->msg_qbytes || (mptr->msg_perm.mode & MSG_LOCKED)) 219 i = msqid; 220 } 221 if (i >= newmsgmni || (msginfo.msgseg - nfree_msgmaps) > newmsgseg) { 222 mutex_exit(&msgmutex); 223 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED); 224 return EBUSY; 225 } 226 227 new_msgpool = (void *)v; 228 new_msgmaps = (void *)(ALIGN(new_msgpool) + newmsgmax); 229 new_msghdrs = (void *)(ALIGN(new_msgmaps) + 230 newmsgseg * sizeof(struct msgmap)); 231 new_msqs = (void *)(ALIGN(new_msghdrs) + 232 msginfo.msgtql * sizeof(struct __msg)); 233 234 /* Initialize the structures */ 235 for (i = 0; i < (newmsgseg - 1); i++) 236 new_msgmaps[i].next = i + 1; 237 new_msgmaps[newmsgseg - 1].next = -1; 238 new_free_msgmaps = 0; 239 new_nfree_msgmaps = newmsgseg; 240 241 for (i = 0; i < (msginfo.msgtql - 1); i++) { 242 new_msghdrs[i].msg_type = 0; 243 new_msghdrs[i].msg_next = &new_msghdrs[i + 1]; 244 } 245 i = msginfo.msgtql - 1; 246 new_msghdrs[i].msg_type = 0; 247 new_msghdrs[i].msg_next = NULL; 248 new_free_msghdrs = &new_msghdrs[0]; 249 250 for (i = 0; i < newmsgmni; i++) { 251 new_msqs[i].msq_u.msg_qbytes = 0; 252 new_msqs[i].msq_u.msg_perm._seq = 0; 253 cv_init(&new_msqs[i].msq_cv, "msgwait"); 254 } 255 256 /* 257 * Copy all message queue identifiers, mesage headers and buffer 258 * pools to the new memory location. 259 */ 260 for (msqid = 0; msqid < msginfo.msgmni; msqid++) { 261 struct __msg *nmsghdr, *msghdr, *pmsghdr; 262 struct msqid_ds *nmptr, *mptr; 263 kmsq_t *nmsq, *msq; 264 265 msq = &msqs[msqid]; 266 mptr = &msq->msq_u; 267 268 if (mptr->msg_qbytes == 0 && 269 (mptr->msg_perm.mode & MSG_LOCKED) == 0) 270 continue; 271 272 nmsq = &new_msqs[msqid]; 273 nmptr = &nmsq->msq_u; 274 memcpy(nmptr, mptr, sizeof(struct msqid_ds)); 275 276 /* 277 * Go through the message headers, and and copy each 278 * one by taking the new ones, and thus defragmenting. 279 */ 280 nmsghdr = pmsghdr = NULL; 281 msghdr = mptr->_msg_first; 282 while (msghdr) { 283 short nnext = 0, next; 284 u_short msgsz, segcnt; 285 286 /* Take an entry from the new list of free msghdrs */ 287 nmsghdr = new_free_msghdrs; 288 KASSERT(nmsghdr != NULL); 289 new_free_msghdrs = nmsghdr->msg_next; 290 291 nmsghdr->msg_next = NULL; 292 if (pmsghdr) { 293 pmsghdr->msg_next = nmsghdr; 294 } else { 295 nmptr->_msg_first = nmsghdr; 296 pmsghdr = nmsghdr; 297 } 298 nmsghdr->msg_ts = msghdr->msg_ts; 299 nmsghdr->msg_spot = -1; 300 301 /* Compute the amount of segments and reserve them */ 302 msgsz = msghdr->msg_ts; 303 segcnt = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz; 304 if (segcnt == 0) 305 continue; 306 while (segcnt--) { 307 nnext = new_free_msgmaps; 308 new_free_msgmaps = new_msgmaps[nnext].next; 309 new_nfree_msgmaps--; 310 new_msgmaps[nnext].next = nmsghdr->msg_spot; 311 nmsghdr->msg_spot = nnext; 312 } 313 314 /* Copy all segments */ 315 KASSERT(nnext == nmsghdr->msg_spot); 316 next = msghdr->msg_spot; 317 while (msgsz > 0) { 318 size_t tlen; 319 320 if (msgsz >= msginfo.msgssz) { 321 tlen = msginfo.msgssz; 322 msgsz -= msginfo.msgssz; 323 } else { 324 tlen = msgsz; 325 msgsz = 0; 326 } 327 328 /* Copy the message buffer */ 329 memcpy(&new_msgpool[nnext * msginfo.msgssz], 330 &msgpool[next * msginfo.msgssz], tlen); 331 332 /* Next entry of the map */ 333 nnext = msgmaps[nnext].next; 334 next = msgmaps[next].next; 335 } 336 337 /* Next message header */ 338 msghdr = msghdr->msg_next; 339 } 340 nmptr->_msg_last = nmsghdr; 341 } 342 KASSERT((msginfo.msgseg - nfree_msgmaps) == 343 (newmsgseg - new_nfree_msgmaps)); 344 345 sz = ALIGN(msginfo.msgmax) + 346 ALIGN(msginfo.msgseg * sizeof(struct msgmap)) + 347 ALIGN(msginfo.msgtql * sizeof(struct __msg)) + 348 ALIGN(msginfo.msgmni * sizeof(kmsq_t)); 349 350 for (i = 0; i < msginfo.msgmni; i++) 351 cv_destroy(&msqs[i].msq_cv); 352 353 /* Set the pointers and update the new values */ 354 msgpool = new_msgpool; 355 msgmaps = new_msgmaps; 356 msghdrs = new_msghdrs; 357 msqs = new_msqs; 358 359 free_msghdrs = new_free_msghdrs; 360 free_msgmaps = new_free_msgmaps; 361 nfree_msgmaps = new_nfree_msgmaps; 362 msginfo.msgmni = newmsgmni; 363 msginfo.msgseg = newmsgseg; 364 msginfo.msgmax = newmsgmax; 365 366 /* Reallocation completed - notify all waiters, if any */ 367 msg_realloc_state = false; 368 cv_broadcast(&msg_realloc_cv); 369 mutex_exit(&msgmutex); 370 371 uvm_km_free(kernel_map, (vaddr_t)old_msgpool, sz, UVM_KMF_WIRED); 372 return 0; 373 } 374 375 static void 376 msg_freehdr(struct __msg *msghdr) 377 { 378 379 KASSERT(mutex_owned(&msgmutex)); 380 381 while (msghdr->msg_ts > 0) { 382 short next; 383 KASSERT(msghdr->msg_spot >= 0); 384 KASSERT(msghdr->msg_spot < msginfo.msgseg); 385 386 next = msgmaps[msghdr->msg_spot].next; 387 msgmaps[msghdr->msg_spot].next = free_msgmaps; 388 free_msgmaps = msghdr->msg_spot; 389 nfree_msgmaps++; 390 msghdr->msg_spot = next; 391 if (msghdr->msg_ts >= msginfo.msgssz) 392 msghdr->msg_ts -= msginfo.msgssz; 393 else 394 msghdr->msg_ts = 0; 395 } 396 KASSERT(msghdr->msg_spot == -1); 397 msghdr->msg_next = free_msghdrs; 398 free_msghdrs = msghdr; 399 } 400 401 int 402 sys___msgctl13(struct lwp *l, void *v, register_t *retval) 403 { 404 struct sys___msgctl13_args /* { 405 syscallarg(int) msqid; 406 syscallarg(int) cmd; 407 syscallarg(struct msqid_ds *) buf; 408 } */ *uap = v; 409 struct msqid_ds msqbuf; 410 int cmd, error; 411 412 cmd = SCARG(uap, cmd); 413 414 if (cmd == IPC_SET) { 415 error = copyin(SCARG(uap, buf), &msqbuf, sizeof(msqbuf)); 416 if (error) 417 return (error); 418 } 419 420 error = msgctl1(l, SCARG(uap, msqid), cmd, 421 (cmd == IPC_SET || cmd == IPC_STAT) ? &msqbuf : NULL); 422 423 if (error == 0 && cmd == IPC_STAT) 424 error = copyout(&msqbuf, SCARG(uap, buf), sizeof(msqbuf)); 425 426 return (error); 427 } 428 429 int 430 msgctl1(struct lwp *l, int msqid, int cmd, struct msqid_ds *msqbuf) 431 { 432 kauth_cred_t cred = l->l_cred; 433 struct msqid_ds *msqptr; 434 kmsq_t *msq; 435 int error = 0, ix; 436 437 MSG_PRINTF(("call to msgctl1(%d, %d)\n", msqid, cmd)); 438 439 ix = IPCID_TO_IX(msqid); 440 441 mutex_enter(&msgmutex); 442 443 if (ix < 0 || ix >= msginfo.msgmni) { 444 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", ix, 445 msginfo.msgmni)); 446 error = EINVAL; 447 goto unlock; 448 } 449 450 msq = &msqs[ix]; 451 msqptr = &msq->msq_u; 452 453 if (msqptr->msg_qbytes == 0) { 454 MSG_PRINTF(("no such msqid\n")); 455 error = EINVAL; 456 goto unlock; 457 } 458 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqid)) { 459 MSG_PRINTF(("wrong sequence number\n")); 460 error = EINVAL; 461 goto unlock; 462 } 463 464 switch (cmd) { 465 case IPC_RMID: 466 { 467 struct __msg *msghdr; 468 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M)) != 0) 469 break; 470 /* Free the message headers */ 471 msghdr = msqptr->_msg_first; 472 while (msghdr != NULL) { 473 struct __msg *msghdr_tmp; 474 475 /* Free the segments of each message */ 476 msqptr->_msg_cbytes -= msghdr->msg_ts; 477 msqptr->msg_qnum--; 478 msghdr_tmp = msghdr; 479 msghdr = msghdr->msg_next; 480 msg_freehdr(msghdr_tmp); 481 } 482 KASSERT(msqptr->_msg_cbytes == 0); 483 KASSERT(msqptr->msg_qnum == 0); 484 485 /* Mark it as free */ 486 msqptr->msg_qbytes = 0; 487 cv_broadcast(&msq->msq_cv); 488 } 489 break; 490 491 case IPC_SET: 492 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_M))) 493 break; 494 if (msqbuf->msg_qbytes > msqptr->msg_qbytes && 495 kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, 496 NULL) != 0) { 497 error = EPERM; 498 break; 499 } 500 if (msqbuf->msg_qbytes > msginfo.msgmnb) { 501 MSG_PRINTF(("can't increase msg_qbytes beyond %d " 502 "(truncating)\n", msginfo.msgmnb)); 503 /* silently restrict qbytes to system limit */ 504 msqbuf->msg_qbytes = msginfo.msgmnb; 505 } 506 if (msqbuf->msg_qbytes == 0) { 507 MSG_PRINTF(("can't reduce msg_qbytes to 0\n")); 508 error = EINVAL; /* XXX non-standard errno! */ 509 break; 510 } 511 msqptr->msg_perm.uid = msqbuf->msg_perm.uid; 512 msqptr->msg_perm.gid = msqbuf->msg_perm.gid; 513 msqptr->msg_perm.mode = (msqptr->msg_perm.mode & ~0777) | 514 (msqbuf->msg_perm.mode & 0777); 515 msqptr->msg_qbytes = msqbuf->msg_qbytes; 516 msqptr->msg_ctime = time_second; 517 break; 518 519 case IPC_STAT: 520 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) { 521 MSG_PRINTF(("requester doesn't have read access\n")); 522 break; 523 } 524 memcpy(msqbuf, msqptr, sizeof(struct msqid_ds)); 525 break; 526 527 default: 528 MSG_PRINTF(("invalid command %d\n", cmd)); 529 error = EINVAL; 530 break; 531 } 532 533 unlock: 534 mutex_exit(&msgmutex); 535 return (error); 536 } 537 538 int 539 sys_msgget(struct lwp *l, void *v, register_t *retval) 540 { 541 struct sys_msgget_args /* { 542 syscallarg(key_t) key; 543 syscallarg(int) msgflg; 544 } */ *uap = v; 545 int msqid, error = 0; 546 int key = SCARG(uap, key); 547 int msgflg = SCARG(uap, msgflg); 548 kauth_cred_t cred = l->l_cred; 549 struct msqid_ds *msqptr = NULL; 550 kmsq_t *msq; 551 552 mutex_enter(&msgmutex); 553 554 MSG_PRINTF(("msgget(0x%x, 0%o)\n", key, msgflg)); 555 556 if (key != IPC_PRIVATE) { 557 for (msqid = 0; msqid < msginfo.msgmni; msqid++) { 558 msq = &msqs[msqid]; 559 msqptr = &msq->msq_u; 560 if (msqptr->msg_qbytes != 0 && 561 msqptr->msg_perm._key == key) 562 break; 563 } 564 if (msqid < msginfo.msgmni) { 565 MSG_PRINTF(("found public key\n")); 566 if ((msgflg & IPC_CREAT) && (msgflg & IPC_EXCL)) { 567 MSG_PRINTF(("not exclusive\n")); 568 error = EEXIST; 569 goto unlock; 570 } 571 if ((error = ipcperm(cred, &msqptr->msg_perm, 572 msgflg & 0700 ))) { 573 MSG_PRINTF(("requester doesn't have 0%o access\n", 574 msgflg & 0700)); 575 goto unlock; 576 } 577 goto found; 578 } 579 } 580 581 MSG_PRINTF(("need to allocate the msqid_ds\n")); 582 if (key == IPC_PRIVATE || (msgflg & IPC_CREAT)) { 583 for (msqid = 0; msqid < msginfo.msgmni; msqid++) { 584 /* 585 * Look for an unallocated and unlocked msqid_ds. 586 * msqid_ds's can be locked by msgsnd or msgrcv while 587 * they are copying the message in/out. We can't 588 * re-use the entry until they release it. 589 */ 590 msq = &msqs[msqid]; 591 msqptr = &msq->msq_u; 592 if (msqptr->msg_qbytes == 0 && 593 (msqptr->msg_perm.mode & MSG_LOCKED) == 0) 594 break; 595 } 596 if (msqid == msginfo.msgmni) { 597 MSG_PRINTF(("no more msqid_ds's available\n")); 598 error = ENOSPC; 599 goto unlock; 600 } 601 MSG_PRINTF(("msqid %d is available\n", msqid)); 602 msqptr->msg_perm._key = key; 603 msqptr->msg_perm.cuid = kauth_cred_geteuid(cred); 604 msqptr->msg_perm.uid = kauth_cred_geteuid(cred); 605 msqptr->msg_perm.cgid = kauth_cred_getegid(cred); 606 msqptr->msg_perm.gid = kauth_cred_getegid(cred); 607 msqptr->msg_perm.mode = (msgflg & 0777); 608 /* Make sure that the returned msqid is unique */ 609 msqptr->msg_perm._seq++; 610 msqptr->_msg_first = NULL; 611 msqptr->_msg_last = NULL; 612 msqptr->_msg_cbytes = 0; 613 msqptr->msg_qnum = 0; 614 msqptr->msg_qbytes = msginfo.msgmnb; 615 msqptr->msg_lspid = 0; 616 msqptr->msg_lrpid = 0; 617 msqptr->msg_stime = 0; 618 msqptr->msg_rtime = 0; 619 msqptr->msg_ctime = time_second; 620 } else { 621 MSG_PRINTF(("didn't find it and wasn't asked to create it\n")); 622 error = ENOENT; 623 goto unlock; 624 } 625 626 found: 627 /* Construct the unique msqid */ 628 *retval = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm); 629 630 unlock: 631 mutex_exit(&msgmutex); 632 return (error); 633 } 634 635 int 636 sys_msgsnd(struct lwp *l, void *v, register_t *retval) 637 { 638 struct sys_msgsnd_args /* { 639 syscallarg(int) msqid; 640 syscallarg(const void *) msgp; 641 syscallarg(size_t) msgsz; 642 syscallarg(int) msgflg; 643 } */ *uap = v; 644 645 return msgsnd1(l, SCARG(uap, msqid), SCARG(uap, msgp), 646 SCARG(uap, msgsz), SCARG(uap, msgflg), sizeof(long), copyin); 647 } 648 649 int 650 msgsnd1(struct lwp *l, int msqidr, const char *user_msgp, size_t msgsz, 651 int msgflg, size_t typesz, copyin_t fetch_type) 652 { 653 int segs_needed, error = 0, msqid; 654 kauth_cred_t cred = l->l_cred; 655 struct msqid_ds *msqptr; 656 struct __msg *msghdr; 657 kmsq_t *msq; 658 short next; 659 660 MSG_PRINTF(("call to msgsnd(%d, %p, %lld, %d)\n", msqid, user_msgp, 661 (long long)msgsz, msgflg)); 662 restart: 663 msqid = IPCID_TO_IX(msqidr); 664 665 mutex_enter(&msgmutex); 666 /* In case of reallocation, we will wait for completion */ 667 while (__predict_false(msg_realloc_state)) 668 cv_wait(&msg_realloc_cv, &msgmutex); 669 670 if (msqid < 0 || msqid >= msginfo.msgmni) { 671 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid, 672 msginfo.msgmni)); 673 error = EINVAL; 674 goto unlock; 675 } 676 677 msq = &msqs[msqid]; 678 msqptr = &msq->msq_u; 679 680 if (msqptr->msg_qbytes == 0) { 681 MSG_PRINTF(("no such message queue id\n")); 682 error = EINVAL; 683 goto unlock; 684 } 685 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) { 686 MSG_PRINTF(("wrong sequence number\n")); 687 error = EINVAL; 688 goto unlock; 689 } 690 691 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_W))) { 692 MSG_PRINTF(("requester doesn't have write access\n")); 693 goto unlock; 694 } 695 696 segs_needed = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz; 697 MSG_PRINTF(("msgsz=%lld, msgssz=%d, segs_needed=%d\n", 698 (long long)msgsz, msginfo.msgssz, segs_needed)); 699 for (;;) { 700 int need_more_resources = 0; 701 702 /* 703 * check msgsz [cannot be negative since it is unsigned] 704 * (inside this loop in case msg_qbytes changes while we sleep) 705 */ 706 707 if (msgsz > msqptr->msg_qbytes) { 708 MSG_PRINTF(("msgsz > msqptr->msg_qbytes\n")); 709 error = EINVAL; 710 goto unlock; 711 } 712 713 if (msqptr->msg_perm.mode & MSG_LOCKED) { 714 MSG_PRINTF(("msqid is locked\n")); 715 need_more_resources = 1; 716 } 717 if (msgsz + msqptr->_msg_cbytes > msqptr->msg_qbytes) { 718 MSG_PRINTF(("msgsz + msg_cbytes > msg_qbytes\n")); 719 need_more_resources = 1; 720 } 721 if (segs_needed > nfree_msgmaps) { 722 MSG_PRINTF(("segs_needed > nfree_msgmaps\n")); 723 need_more_resources = 1; 724 } 725 if (free_msghdrs == NULL) { 726 MSG_PRINTF(("no more msghdrs\n")); 727 need_more_resources = 1; 728 } 729 730 if (need_more_resources) { 731 int we_own_it; 732 733 if ((msgflg & IPC_NOWAIT) != 0) { 734 MSG_PRINTF(("need more resources but caller " 735 "doesn't want to wait\n")); 736 error = EAGAIN; 737 goto unlock; 738 } 739 740 if ((msqptr->msg_perm.mode & MSG_LOCKED) != 0) { 741 MSG_PRINTF(("we don't own the msqid_ds\n")); 742 we_own_it = 0; 743 } else { 744 /* Force later arrivals to wait for our 745 request */ 746 MSG_PRINTF(("we own the msqid_ds\n")); 747 msqptr->msg_perm.mode |= MSG_LOCKED; 748 we_own_it = 1; 749 } 750 751 msg_waiters++; 752 MSG_PRINTF(("goodnight\n")); 753 error = cv_wait_sig(&msq->msq_cv, &msgmutex); 754 MSG_PRINTF(("good morning, error=%d\n", error)); 755 msg_waiters--; 756 757 if (we_own_it) 758 msqptr->msg_perm.mode &= ~MSG_LOCKED; 759 760 /* 761 * In case of such state, notify reallocator and 762 * restart the call. 763 */ 764 if (msg_realloc_state) { 765 cv_broadcast(&msg_realloc_cv); 766 mutex_exit(&msgmutex); 767 goto restart; 768 } 769 770 if (error != 0) { 771 MSG_PRINTF(("msgsnd: interrupted system " 772 "call\n")); 773 error = EINTR; 774 goto unlock; 775 } 776 777 /* 778 * Make sure that the msq queue still exists 779 */ 780 781 if (msqptr->msg_qbytes == 0) { 782 MSG_PRINTF(("msqid deleted\n")); 783 error = EIDRM; 784 goto unlock; 785 } 786 } else { 787 MSG_PRINTF(("got all the resources that we need\n")); 788 break; 789 } 790 } 791 792 /* 793 * We have the resources that we need. 794 * Make sure! 795 */ 796 797 KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0); 798 KASSERT(segs_needed <= nfree_msgmaps); 799 KASSERT(msgsz + msqptr->_msg_cbytes <= msqptr->msg_qbytes); 800 KASSERT(free_msghdrs != NULL); 801 802 /* 803 * Re-lock the msqid_ds in case we page-fault when copying in the 804 * message 805 */ 806 807 KASSERT((msqptr->msg_perm.mode & MSG_LOCKED) == 0); 808 msqptr->msg_perm.mode |= MSG_LOCKED; 809 810 /* 811 * Allocate a message header 812 */ 813 814 msghdr = free_msghdrs; 815 free_msghdrs = msghdr->msg_next; 816 msghdr->msg_spot = -1; 817 msghdr->msg_ts = msgsz; 818 819 /* 820 * Allocate space for the message 821 */ 822 823 while (segs_needed > 0) { 824 KASSERT(nfree_msgmaps > 0); 825 KASSERT(free_msgmaps != -1); 826 KASSERT(free_msgmaps < msginfo.msgseg); 827 828 next = free_msgmaps; 829 MSG_PRINTF(("allocating segment %d to message\n", next)); 830 free_msgmaps = msgmaps[next].next; 831 nfree_msgmaps--; 832 msgmaps[next].next = msghdr->msg_spot; 833 msghdr->msg_spot = next; 834 segs_needed--; 835 } 836 837 /* 838 * Copy in the message type 839 */ 840 mutex_exit(&msgmutex); 841 error = (*fetch_type)(user_msgp, &msghdr->msg_type, typesz); 842 mutex_enter(&msgmutex); 843 if (error != 0) { 844 MSG_PRINTF(("error %d copying the message type\n", error)); 845 msg_freehdr(msghdr); 846 msqptr->msg_perm.mode &= ~MSG_LOCKED; 847 cv_broadcast(&msq->msq_cv); 848 goto unlock; 849 } 850 user_msgp += typesz; 851 852 /* 853 * Validate the message type 854 */ 855 856 if (msghdr->msg_type < 1) { 857 msg_freehdr(msghdr); 858 msqptr->msg_perm.mode &= ~MSG_LOCKED; 859 cv_broadcast(&msq->msq_cv); 860 MSG_PRINTF(("mtype (%ld) < 1\n", msghdr->msg_type)); 861 goto unlock; 862 } 863 864 /* 865 * Copy in the message body 866 */ 867 868 next = msghdr->msg_spot; 869 while (msgsz > 0) { 870 size_t tlen; 871 KASSERT(next > -1); 872 KASSERT(next < msginfo.msgseg); 873 874 if (msgsz > msginfo.msgssz) 875 tlen = msginfo.msgssz; 876 else 877 tlen = msgsz; 878 mutex_exit(&msgmutex); 879 error = copyin(user_msgp, &msgpool[next * msginfo.msgssz], tlen); 880 mutex_enter(&msgmutex); 881 if (error != 0) { 882 MSG_PRINTF(("error %d copying in message segment\n", 883 error)); 884 msg_freehdr(msghdr); 885 msqptr->msg_perm.mode &= ~MSG_LOCKED; 886 cv_broadcast(&msq->msq_cv); 887 goto unlock; 888 } 889 msgsz -= tlen; 890 user_msgp += tlen; 891 next = msgmaps[next].next; 892 } 893 KASSERT(next == -1); 894 895 /* 896 * We've got the message. Unlock the msqid_ds. 897 */ 898 899 msqptr->msg_perm.mode &= ~MSG_LOCKED; 900 901 /* 902 * Make sure that the msqid_ds is still allocated. 903 */ 904 905 if (msqptr->msg_qbytes == 0) { 906 msg_freehdr(msghdr); 907 cv_broadcast(&msq->msq_cv); 908 error = EIDRM; 909 goto unlock; 910 } 911 912 /* 913 * Put the message into the queue 914 */ 915 916 if (msqptr->_msg_first == NULL) { 917 msqptr->_msg_first = msghdr; 918 msqptr->_msg_last = msghdr; 919 } else { 920 msqptr->_msg_last->msg_next = msghdr; 921 msqptr->_msg_last = msghdr; 922 } 923 msqptr->_msg_last->msg_next = NULL; 924 925 msqptr->_msg_cbytes += msghdr->msg_ts; 926 msqptr->msg_qnum++; 927 msqptr->msg_lspid = l->l_proc->p_pid; 928 msqptr->msg_stime = time_second; 929 930 cv_broadcast(&msq->msq_cv); 931 932 unlock: 933 mutex_exit(&msgmutex); 934 return error; 935 } 936 937 int 938 sys_msgrcv(struct lwp *l, void *v, register_t *retval) 939 { 940 struct sys_msgrcv_args /* { 941 syscallarg(int) msqid; 942 syscallarg(void *) msgp; 943 syscallarg(size_t) msgsz; 944 syscallarg(long) msgtyp; 945 syscallarg(int) msgflg; 946 } */ *uap = v; 947 948 return msgrcv1(l, SCARG(uap, msqid), SCARG(uap, msgp), 949 SCARG(uap, msgsz), SCARG(uap, msgtyp), SCARG(uap, msgflg), 950 sizeof(long), copyout, retval); 951 } 952 953 int 954 msgrcv1(struct lwp *l, int msqidr, char *user_msgp, size_t msgsz, long msgtyp, 955 int msgflg, size_t typesz, copyout_t put_type, register_t *retval) 956 { 957 size_t len; 958 kauth_cred_t cred = l->l_cred; 959 struct msqid_ds *msqptr; 960 struct __msg *msghdr; 961 int error = 0, msqid; 962 kmsq_t *msq; 963 short next; 964 965 MSG_PRINTF(("call to msgrcv(%d, %p, %lld, %ld, %d)\n", msqid, 966 user_msgp, (long long)msgsz, msgtyp, msgflg)); 967 restart: 968 msqid = IPCID_TO_IX(msqidr); 969 970 mutex_enter(&msgmutex); 971 /* In case of reallocation, we will wait for completion */ 972 while (__predict_false(msg_realloc_state)) 973 cv_wait(&msg_realloc_cv, &msgmutex); 974 975 if (msqid < 0 || msqid >= msginfo.msgmni) { 976 MSG_PRINTF(("msqid (%d) out of range (0<=msqid<%d)\n", msqid, 977 msginfo.msgmni)); 978 error = EINVAL; 979 goto unlock; 980 } 981 982 msq = &msqs[msqid]; 983 msqptr = &msq->msq_u; 984 985 if (msqptr->msg_qbytes == 0) { 986 MSG_PRINTF(("no such message queue id\n")); 987 error = EINVAL; 988 goto unlock; 989 } 990 if (msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) { 991 MSG_PRINTF(("wrong sequence number\n")); 992 error = EINVAL; 993 goto unlock; 994 } 995 996 if ((error = ipcperm(cred, &msqptr->msg_perm, IPC_R))) { 997 MSG_PRINTF(("requester doesn't have read access\n")); 998 goto unlock; 999 } 1000 1001 msghdr = NULL; 1002 while (msghdr == NULL) { 1003 if (msgtyp == 0) { 1004 msghdr = msqptr->_msg_first; 1005 if (msghdr != NULL) { 1006 if (msgsz < msghdr->msg_ts && 1007 (msgflg & MSG_NOERROR) == 0) { 1008 MSG_PRINTF(("first msg on the queue " 1009 "is too big (want %lld, got %d)\n", 1010 (long long)msgsz, msghdr->msg_ts)); 1011 error = E2BIG; 1012 goto unlock; 1013 } 1014 if (msqptr->_msg_first == msqptr->_msg_last) { 1015 msqptr->_msg_first = NULL; 1016 msqptr->_msg_last = NULL; 1017 } else { 1018 msqptr->_msg_first = msghdr->msg_next; 1019 KASSERT(msqptr->_msg_first != NULL); 1020 } 1021 } 1022 } else { 1023 struct __msg *previous; 1024 struct __msg **prev; 1025 1026 for (previous = NULL, prev = &msqptr->_msg_first; 1027 (msghdr = *prev) != NULL; 1028 previous = msghdr, prev = &msghdr->msg_next) { 1029 /* 1030 * Is this message's type an exact match or is 1031 * this message's type less than or equal to 1032 * the absolute value of a negative msgtyp? 1033 * Note that the second half of this test can 1034 * NEVER be true if msgtyp is positive since 1035 * msg_type is always positive! 1036 */ 1037 1038 if (msgtyp != msghdr->msg_type && 1039 msghdr->msg_type > -msgtyp) 1040 continue; 1041 1042 MSG_PRINTF(("found message type %ld, requested %ld\n", 1043 msghdr->msg_type, msgtyp)); 1044 if (msgsz < msghdr->msg_ts && 1045 (msgflg & MSG_NOERROR) == 0) { 1046 MSG_PRINTF(("requested message on the queue " 1047 "is too big (want %lld, got %d)\n", 1048 (long long)msgsz, msghdr->msg_ts)); 1049 error = E2BIG; 1050 goto unlock; 1051 } 1052 *prev = msghdr->msg_next; 1053 if (msghdr != msqptr->_msg_last) 1054 break; 1055 if (previous == NULL) { 1056 KASSERT(prev == &msqptr->_msg_first); 1057 msqptr->_msg_first = NULL; 1058 msqptr->_msg_last = NULL; 1059 } else { 1060 KASSERT(prev != &msqptr->_msg_first); 1061 msqptr->_msg_last = previous; 1062 } 1063 break; 1064 } 1065 } 1066 1067 /* 1068 * We've either extracted the msghdr for the appropriate 1069 * message or there isn't one. 1070 * If there is one then bail out of this loop. 1071 */ 1072 if (msghdr != NULL) 1073 break; 1074 1075 /* 1076 * Hmph! No message found. Does the user want to wait? 1077 */ 1078 1079 if ((msgflg & IPC_NOWAIT) != 0) { 1080 MSG_PRINTF(("no appropriate message found (msgtyp=%ld)\n", 1081 msgtyp)); 1082 error = ENOMSG; 1083 goto unlock; 1084 } 1085 1086 /* 1087 * Wait for something to happen 1088 */ 1089 1090 msg_waiters++; 1091 MSG_PRINTF(("msgrcv: goodnight\n")); 1092 error = cv_wait_sig(&msq->msq_cv, &msgmutex); 1093 MSG_PRINTF(("msgrcv: good morning (error=%d)\n", error)); 1094 msg_waiters--; 1095 1096 /* 1097 * In case of such state, notify reallocator and 1098 * restart the call. 1099 */ 1100 if (msg_realloc_state) { 1101 cv_broadcast(&msg_realloc_cv); 1102 mutex_exit(&msgmutex); 1103 goto restart; 1104 } 1105 1106 if (error != 0) { 1107 MSG_PRINTF(("msgsnd: interrupted system call\n")); 1108 error = EINTR; 1109 goto unlock; 1110 } 1111 1112 /* 1113 * Make sure that the msq queue still exists 1114 */ 1115 1116 if (msqptr->msg_qbytes == 0 || 1117 msqptr->msg_perm._seq != IPCID_TO_SEQ(msqidr)) { 1118 MSG_PRINTF(("msqid deleted\n")); 1119 error = EIDRM; 1120 goto unlock; 1121 } 1122 } 1123 1124 /* 1125 * Return the message to the user. 1126 * 1127 * First, do the bookkeeping (before we risk being interrupted). 1128 */ 1129 1130 msqptr->_msg_cbytes -= msghdr->msg_ts; 1131 msqptr->msg_qnum--; 1132 msqptr->msg_lrpid = l->l_proc->p_pid; 1133 msqptr->msg_rtime = time_second; 1134 1135 /* 1136 * Make msgsz the actual amount that we'll be returning. 1137 * Note that this effectively truncates the message if it is too long 1138 * (since msgsz is never increased). 1139 */ 1140 1141 MSG_PRINTF(("found a message, msgsz=%lld, msg_ts=%d\n", 1142 (long long)msgsz, msghdr->msg_ts)); 1143 if (msgsz > msghdr->msg_ts) 1144 msgsz = msghdr->msg_ts; 1145 1146 /* 1147 * Return the type to the user. 1148 */ 1149 mutex_exit(&msgmutex); 1150 error = (*put_type)(&msghdr->msg_type, user_msgp, typesz); 1151 mutex_enter(&msgmutex); 1152 if (error != 0) { 1153 MSG_PRINTF(("error (%d) copying out message type\n", error)); 1154 msg_freehdr(msghdr); 1155 cv_broadcast(&msq->msq_cv); 1156 goto unlock; 1157 } 1158 user_msgp += typesz; 1159 1160 /* 1161 * Return the segments to the user 1162 */ 1163 1164 next = msghdr->msg_spot; 1165 for (len = 0; len < msgsz; len += msginfo.msgssz) { 1166 size_t tlen; 1167 KASSERT(next > -1); 1168 KASSERT(next < msginfo.msgseg); 1169 1170 if (msgsz - len > msginfo.msgssz) 1171 tlen = msginfo.msgssz; 1172 else 1173 tlen = msgsz - len; 1174 mutex_exit(&msgmutex); 1175 error = (*put_type)(&msgpool[next * msginfo.msgssz], 1176 user_msgp, tlen); 1177 mutex_enter(&msgmutex); 1178 if (error != 0) { 1179 MSG_PRINTF(("error (%d) copying out message segment\n", 1180 error)); 1181 msg_freehdr(msghdr); 1182 cv_broadcast(&msq->msq_cv); 1183 goto unlock; 1184 } 1185 user_msgp += tlen; 1186 next = msgmaps[next].next; 1187 } 1188 1189 /* 1190 * Done, return the actual number of bytes copied out. 1191 */ 1192 1193 msg_freehdr(msghdr); 1194 cv_broadcast(&msq->msq_cv); 1195 *retval = msgsz; 1196 1197 unlock: 1198 mutex_exit(&msgmutex); 1199 return error; 1200 } 1201 1202 /* 1203 * Sysctl initialization and nodes. 1204 */ 1205 1206 static int 1207 sysctl_ipc_msgmni(SYSCTLFN_ARGS) 1208 { 1209 int newsize, error; 1210 struct sysctlnode node; 1211 node = *rnode; 1212 node.sysctl_data = &newsize; 1213 1214 newsize = msginfo.msgmni; 1215 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1216 if (error || newp == NULL) 1217 return error; 1218 1219 return msgrealloc(newsize, msginfo.msgseg); 1220 } 1221 1222 static int 1223 sysctl_ipc_msgseg(SYSCTLFN_ARGS) 1224 { 1225 int newsize, error; 1226 struct sysctlnode node; 1227 node = *rnode; 1228 node.sysctl_data = &newsize; 1229 1230 newsize = msginfo.msgseg; 1231 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1232 if (error || newp == NULL) 1233 return error; 1234 1235 return msgrealloc(msginfo.msgmni, newsize); 1236 } 1237 1238 SYSCTL_SETUP(sysctl_ipc_msg_setup, "sysctl kern.ipc subtree setup") 1239 { 1240 const struct sysctlnode *node = NULL; 1241 1242 sysctl_createv(clog, 0, NULL, NULL, 1243 CTLFLAG_PERMANENT, 1244 CTLTYPE_NODE, "kern", NULL, 1245 NULL, 0, NULL, 0, 1246 CTL_KERN, CTL_EOL); 1247 sysctl_createv(clog, 0, NULL, &node, 1248 CTLFLAG_PERMANENT, 1249 CTLTYPE_NODE, "ipc", 1250 SYSCTL_DESCR("SysV IPC options"), 1251 NULL, 0, NULL, 0, 1252 CTL_KERN, KERN_SYSVIPC, CTL_EOL); 1253 1254 if (node == NULL) 1255 return; 1256 1257 sysctl_createv(clog, 0, &node, NULL, 1258 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 1259 CTLTYPE_INT, "msgmni", 1260 SYSCTL_DESCR("Max number of message queue identifiers"), 1261 sysctl_ipc_msgmni, 0, &msginfo.msgmni, 0, 1262 CTL_CREATE, CTL_EOL); 1263 sysctl_createv(clog, 0, &node, NULL, 1264 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 1265 CTLTYPE_INT, "msgseg", 1266 SYSCTL_DESCR("Max number of number of message segments"), 1267 sysctl_ipc_msgseg, 0, &msginfo.msgseg, 0, 1268 CTL_CREATE, CTL_EOL); 1269 } 1270