1 /* $NetBSD: sysv_sem.c,v 1.77 2007/12/08 15:02:46 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center, and by Andrew Doran. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Implementation of SVID semaphores 42 * 43 * Author: Daniel Boulet 44 * 45 * This software is provided ``AS IS'' without any warranties of any kind. 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: sysv_sem.c,v 1.77 2007/12/08 15:02:46 ad Exp $"); 50 51 #define SYSVSEM 52 53 #include <sys/param.h> 54 #include <sys/kernel.h> 55 #include <sys/sem.h> 56 #include <sys/sysctl.h> 57 #include <sys/kmem.h> 58 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */ 59 #include <sys/syscallargs.h> 60 #include <sys/kauth.h> 61 62 /* 63 * Memory areas: 64 * 1st: Pool of semaphore identifiers 65 * 2nd: Semaphores 66 * 3rd: Conditional variables 67 * 4th: Undo structures 68 */ 69 struct semid_ds *sema; 70 static struct __sem *sem; 71 static kcondvar_t *semcv; 72 static int *semu; 73 74 static kmutex_t semlock; 75 static struct sem_undo *semu_list; /* list of active undo structures */ 76 static u_int semtot = 0; /* total number of semaphores */ 77 78 static u_int sem_waiters = 0; /* total number of semop waiters */ 79 static bool sem_realloc_state; 80 static kcondvar_t sem_realloc_cv; 81 82 /* Macro to find a particular sem_undo vector */ 83 #define SEMU(s, ix) ((struct sem_undo *)(((long)s) + ix * seminfo.semusz)) 84 85 #ifdef SEM_DEBUG 86 #define SEM_PRINTF(a) printf a 87 #else 88 #define SEM_PRINTF(a) 89 #endif 90 91 struct sem_undo *semu_alloc(struct proc *); 92 int semundo_adjust(struct proc *, struct sem_undo **, int, int, int); 93 void semundo_clear(int, int); 94 95 void 96 seminit(void) 97 { 98 int i, sz; 99 vaddr_t v; 100 101 mutex_init(&semlock, MUTEX_DEFAULT, IPL_NONE); 102 cv_init(&sem_realloc_cv, "semrealc"); 103 sem_realloc_state = false; 104 105 /* Allocate the wired memory for our structures */ 106 sz = ALIGN(seminfo.semmni * sizeof(struct semid_ds)) + 107 ALIGN(seminfo.semmns * sizeof(struct __sem)) + 108 ALIGN(seminfo.semmni * sizeof(kcondvar_t)) + 109 ALIGN(seminfo.semmnu * seminfo.semusz); 110 v = uvm_km_alloc(kernel_map, round_page(sz), 0, 111 UVM_KMF_WIRED|UVM_KMF_ZERO); 112 if (v == 0) 113 panic("sysv_sem: cannot allocate memory"); 114 sema = (void *)v; 115 sem = (void *)(ALIGN(sema) + 116 seminfo.semmni * sizeof(struct semid_ds)); 117 semcv = (void *)(ALIGN(sem) + 118 seminfo.semmns * sizeof(struct __sem)); 119 semu = (void *)(ALIGN(semcv) + 120 seminfo.semmni * sizeof(kcondvar_t)); 121 122 for (i = 0; i < seminfo.semmni; i++) { 123 sema[i]._sem_base = 0; 124 sema[i].sem_perm.mode = 0; 125 cv_init(&semcv[i], "semwait"); 126 } 127 for (i = 0; i < seminfo.semmnu; i++) { 128 struct sem_undo *suptr = SEMU(semu, i); 129 suptr->un_proc = NULL; 130 } 131 semu_list = NULL; 132 exithook_establish(semexit, NULL); 133 } 134 135 static int 136 semrealloc(int newsemmni, int newsemmns, int newsemmnu) 137 { 138 struct semid_ds *new_sema, *old_sema; 139 struct __sem *new_sem; 140 struct sem_undo *new_semu_list, *suptr, *nsuptr; 141 int *new_semu; 142 kcondvar_t *new_semcv; 143 vaddr_t v; 144 int i, j, lsemid, nmnus, sz; 145 146 if (newsemmni < 1 || newsemmns < 1 || newsemmnu < 1) 147 return EINVAL; 148 149 /* Allocate the wired memory for our structures */ 150 sz = ALIGN(newsemmni * sizeof(struct semid_ds)) + 151 ALIGN(newsemmns * sizeof(struct __sem)) + 152 ALIGN(newsemmni * sizeof(kcondvar_t)) + 153 ALIGN(newsemmnu * seminfo.semusz); 154 v = uvm_km_alloc(kernel_map, round_page(sz), 0, 155 UVM_KMF_WIRED|UVM_KMF_ZERO); 156 if (v == 0) 157 return ENOMEM; 158 159 mutex_enter(&semlock); 160 if (sem_realloc_state) { 161 mutex_exit(&semlock); 162 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED); 163 return EBUSY; 164 } 165 sem_realloc_state = true; 166 if (sem_waiters) { 167 /* 168 * Mark reallocation state, wake-up all waiters, 169 * and wait while they will all exit. 170 */ 171 for (i = 0; i < seminfo.semmni; i++) 172 cv_broadcast(&semcv[i]); 173 while (sem_waiters) 174 cv_wait(&sem_realloc_cv, &semlock); 175 } 176 old_sema = sema; 177 178 /* Get the number of last slot */ 179 lsemid = 0; 180 for (i = 0; i < seminfo.semmni; i++) 181 if (sema[i].sem_perm.mode & SEM_ALLOC) 182 lsemid = i; 183 184 /* Get the number of currently used undo structures */ 185 nmnus = 0; 186 for (i = 0; i < seminfo.semmnu; i++) { 187 suptr = SEMU(semu, i); 188 if (suptr->un_proc == NULL) 189 continue; 190 nmnus++; 191 } 192 193 /* We cannot reallocate less memory than we use */ 194 if (lsemid >= newsemmni || semtot > newsemmns || nmnus > newsemmnu) { 195 mutex_exit(&semlock); 196 uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED); 197 return EBUSY; 198 } 199 200 new_sema = (void *)v; 201 new_sem = (void *)(ALIGN(new_sema) + 202 newsemmni * sizeof(struct semid_ds)); 203 new_semcv = (void *)(ALIGN(new_sem) + 204 newsemmns * sizeof(struct __sem)); 205 new_semu = (void *)(ALIGN(new_semcv) + 206 newsemmni * sizeof(kcondvar_t)); 207 208 /* Initialize all semaphore identifiers and condvars */ 209 for (i = 0; i < newsemmni; i++) { 210 new_sema[i]._sem_base = 0; 211 new_sema[i].sem_perm.mode = 0; 212 cv_init(&new_semcv[i], "semwait"); 213 } 214 for (i = 0; i < newsemmnu; i++) { 215 nsuptr = SEMU(new_semu, i); 216 nsuptr->un_proc = NULL; 217 } 218 219 /* 220 * Copy all identifiers, semaphores and list of the 221 * undo structures to the new memory allocation. 222 */ 223 j = 0; 224 for (i = 0; i <= lsemid; i++) { 225 if ((sema[i].sem_perm.mode & SEM_ALLOC) == 0) 226 continue; 227 memcpy(&new_sema[i], &sema[i], sizeof(struct semid_ds)); 228 new_sema[i]._sem_base = &new_sem[j]; 229 memcpy(new_sema[i]._sem_base, sema[i]._sem_base, 230 (sizeof(struct __sem) * sema[i].sem_nsems)); 231 j += sema[i].sem_nsems; 232 } 233 KASSERT(j == semtot); 234 235 j = 0; 236 new_semu_list = NULL; 237 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next) { 238 KASSERT(j < newsemmnu); 239 nsuptr = SEMU(new_semu, j); 240 memcpy(nsuptr, suptr, SEMUSZ); 241 nsuptr->un_next = new_semu_list; 242 new_semu_list = nsuptr; 243 j++; 244 } 245 246 for (i = 0; i < seminfo.semmni; i++) { 247 KASSERT(cv_has_waiters(&semcv[i]) == false); 248 cv_destroy(&semcv[i]); 249 } 250 251 sz = ALIGN(seminfo.semmni * sizeof(struct semid_ds)) + 252 ALIGN(seminfo.semmns * sizeof(struct __sem)) + 253 ALIGN(seminfo.semmni * sizeof(kcondvar_t)) + 254 ALIGN(seminfo.semmnu * seminfo.semusz); 255 256 /* Set the pointers and update the new values */ 257 sema = new_sema; 258 sem = new_sem; 259 semcv = new_semcv; 260 semu = new_semu; 261 semu_list = new_semu_list; 262 263 seminfo.semmni = newsemmni; 264 seminfo.semmns = newsemmns; 265 seminfo.semmnu = newsemmnu; 266 267 /* Reallocation completed - notify all waiters, if any */ 268 sem_realloc_state = false; 269 cv_broadcast(&sem_realloc_cv); 270 mutex_exit(&semlock); 271 272 uvm_km_free(kernel_map, (vaddr_t)old_sema, sz, UVM_KMF_WIRED); 273 return 0; 274 } 275 276 /* 277 * Placebo. 278 */ 279 280 int 281 sys_semconfig(struct lwp *l, void *v, register_t *retval) 282 { 283 284 *retval = 0; 285 return 0; 286 } 287 288 /* 289 * Allocate a new sem_undo structure for a process 290 * (returns ptr to structure or NULL if no more room) 291 */ 292 293 struct sem_undo * 294 semu_alloc(struct proc *p) 295 { 296 int i; 297 struct sem_undo *suptr; 298 struct sem_undo **supptr; 299 int attempt; 300 301 KASSERT(mutex_owned(&semlock)); 302 303 /* 304 * Try twice to allocate something. 305 * (we'll purge any empty structures after the first pass so 306 * two passes are always enough) 307 */ 308 309 for (attempt = 0; attempt < 2; attempt++) { 310 /* 311 * Look for a free structure. 312 * Fill it in and return it if we find one. 313 */ 314 315 for (i = 0; i < seminfo.semmnu; i++) { 316 suptr = SEMU(semu, i); 317 if (suptr->un_proc == NULL) { 318 suptr->un_next = semu_list; 319 semu_list = suptr; 320 suptr->un_cnt = 0; 321 suptr->un_proc = p; 322 return (suptr); 323 } 324 } 325 326 /* 327 * We didn't find a free one, if this is the first attempt 328 * then try to free some structures. 329 */ 330 331 if (attempt == 0) { 332 /* All the structures are in use - try to free some */ 333 int did_something = 0; 334 335 supptr = &semu_list; 336 while ((suptr = *supptr) != NULL) { 337 if (suptr->un_cnt == 0) { 338 suptr->un_proc = NULL; 339 *supptr = suptr->un_next; 340 did_something = 1; 341 } else 342 supptr = &suptr->un_next; 343 } 344 345 /* If we didn't free anything then just give-up */ 346 if (!did_something) 347 return (NULL); 348 } else { 349 /* 350 * The second pass failed even though we freed 351 * something after the first pass! 352 * This is IMPOSSIBLE! 353 */ 354 panic("semu_alloc - second attempt failed"); 355 } 356 } 357 return NULL; 358 } 359 360 /* 361 * Adjust a particular entry for a particular proc 362 */ 363 364 int 365 semundo_adjust(struct proc *p, struct sem_undo **supptr, int semid, int semnum, 366 int adjval) 367 { 368 struct sem_undo *suptr; 369 struct undo *sunptr; 370 int i; 371 372 KASSERT(mutex_owned(&semlock)); 373 374 /* 375 * Look for and remember the sem_undo if the caller doesn't 376 * provide it 377 */ 378 379 suptr = *supptr; 380 if (suptr == NULL) { 381 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next) 382 if (suptr->un_proc == p) 383 break; 384 385 if (suptr == NULL) { 386 suptr = semu_alloc(p); 387 if (suptr == NULL) 388 return (ENOSPC); 389 } 390 *supptr = suptr; 391 } 392 393 /* 394 * Look for the requested entry and adjust it (delete if 395 * adjval becomes 0). 396 */ 397 sunptr = &suptr->un_ent[0]; 398 for (i = 0; i < suptr->un_cnt; i++, sunptr++) { 399 if (sunptr->un_id != semid || sunptr->un_num != semnum) 400 continue; 401 sunptr->un_adjval += adjval; 402 if (sunptr->un_adjval == 0) { 403 suptr->un_cnt--; 404 if (i < suptr->un_cnt) 405 suptr->un_ent[i] = 406 suptr->un_ent[suptr->un_cnt]; 407 } 408 return (0); 409 } 410 411 /* Didn't find the right entry - create it */ 412 if (suptr->un_cnt == SEMUME) 413 return (EINVAL); 414 415 sunptr = &suptr->un_ent[suptr->un_cnt]; 416 suptr->un_cnt++; 417 sunptr->un_adjval = adjval; 418 sunptr->un_id = semid; 419 sunptr->un_num = semnum; 420 return (0); 421 } 422 423 void 424 semundo_clear(int semid, int semnum) 425 { 426 struct sem_undo *suptr; 427 struct undo *sunptr, *sunend; 428 429 KASSERT(mutex_owned(&semlock)); 430 431 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next) 432 for (sunptr = &suptr->un_ent[0], 433 sunend = sunptr + suptr->un_cnt; sunptr < sunend;) { 434 if (sunptr->un_id == semid) { 435 if (semnum == -1 || sunptr->un_num == semnum) { 436 suptr->un_cnt--; 437 sunend--; 438 if (sunptr != sunend) 439 *sunptr = *sunend; 440 if (semnum != -1) 441 break; 442 else 443 continue; 444 } 445 } 446 sunptr++; 447 } 448 } 449 450 int 451 sys_____semctl13(struct lwp *l, void *v, register_t *retval) 452 { 453 struct sys_____semctl13_args /* { 454 syscallarg(int) semid; 455 syscallarg(int) semnum; 456 syscallarg(int) cmd; 457 syscallarg(union __semun *) arg; 458 } */ *uap = v; 459 struct semid_ds sembuf; 460 int cmd, error; 461 void *pass_arg; 462 union __semun karg; 463 464 cmd = SCARG(uap, cmd); 465 466 pass_arg = get_semctl_arg(cmd, &sembuf, &karg); 467 468 if (pass_arg) { 469 error = copyin(SCARG(uap, arg), &karg, sizeof(karg)); 470 if (error) 471 return error; 472 if (cmd == IPC_SET) { 473 error = copyin(karg.buf, &sembuf, sizeof(sembuf)); 474 if (error) 475 return (error); 476 } 477 } 478 479 error = semctl1(l, SCARG(uap, semid), SCARG(uap, semnum), cmd, 480 pass_arg, retval); 481 482 if (error == 0 && cmd == IPC_STAT) 483 error = copyout(&sembuf, karg.buf, sizeof(sembuf)); 484 485 return (error); 486 } 487 488 int 489 semctl1(struct lwp *l, int semid, int semnum, int cmd, void *v, 490 register_t *retval) 491 { 492 kauth_cred_t cred = l->l_cred; 493 union __semun *arg = v; 494 struct semid_ds *sembuf = v, *semaptr; 495 int i, error, ix; 496 497 SEM_PRINTF(("call to semctl(%d, %d, %d, %p)\n", 498 semid, semnum, cmd, v)); 499 500 mutex_enter(&semlock); 501 502 ix = IPCID_TO_IX(semid); 503 if (ix < 0 || ix >= seminfo.semmni) { 504 mutex_exit(&semlock); 505 return (EINVAL); 506 } 507 508 semaptr = &sema[ix]; 509 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 || 510 semaptr->sem_perm._seq != IPCID_TO_SEQ(semid)) { 511 mutex_exit(&semlock); 512 return (EINVAL); 513 } 514 515 switch (cmd) { 516 case IPC_RMID: 517 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_M)) != 0) 518 break; 519 semaptr->sem_perm.cuid = kauth_cred_geteuid(cred); 520 semaptr->sem_perm.uid = kauth_cred_geteuid(cred); 521 semtot -= semaptr->sem_nsems; 522 for (i = semaptr->_sem_base - sem; i < semtot; i++) 523 sem[i] = sem[i + semaptr->sem_nsems]; 524 for (i = 0; i < seminfo.semmni; i++) { 525 if ((sema[i].sem_perm.mode & SEM_ALLOC) && 526 sema[i]._sem_base > semaptr->_sem_base) 527 sema[i]._sem_base -= semaptr->sem_nsems; 528 } 529 semaptr->sem_perm.mode = 0; 530 semundo_clear(ix, -1); 531 cv_broadcast(&semcv[ix]); 532 break; 533 534 case IPC_SET: 535 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_M))) 536 break; 537 KASSERT(sembuf != NULL); 538 semaptr->sem_perm.uid = sembuf->sem_perm.uid; 539 semaptr->sem_perm.gid = sembuf->sem_perm.gid; 540 semaptr->sem_perm.mode = (semaptr->sem_perm.mode & ~0777) | 541 (sembuf->sem_perm.mode & 0777); 542 semaptr->sem_ctime = time_second; 543 break; 544 545 case IPC_STAT: 546 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R))) 547 break; 548 KASSERT(sembuf != NULL); 549 memcpy(sembuf, semaptr, sizeof(struct semid_ds)); 550 break; 551 552 case GETNCNT: 553 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R))) 554 break; 555 if (semnum < 0 || semnum >= semaptr->sem_nsems) { 556 error = EINVAL; 557 break; 558 } 559 *retval = semaptr->_sem_base[semnum].semncnt; 560 break; 561 562 case GETPID: 563 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R))) 564 break; 565 if (semnum < 0 || semnum >= semaptr->sem_nsems) { 566 error = EINVAL; 567 break; 568 } 569 *retval = semaptr->_sem_base[semnum].sempid; 570 break; 571 572 case GETVAL: 573 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R))) 574 break; 575 if (semnum < 0 || semnum >= semaptr->sem_nsems) { 576 error = EINVAL; 577 break; 578 } 579 *retval = semaptr->_sem_base[semnum].semval; 580 break; 581 582 case GETALL: 583 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R))) 584 break; 585 KASSERT(arg != NULL); 586 for (i = 0; i < semaptr->sem_nsems; i++) { 587 error = copyout(&semaptr->_sem_base[i].semval, 588 &arg->array[i], sizeof(arg->array[i])); 589 if (error != 0) 590 break; 591 } 592 break; 593 594 case GETZCNT: 595 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R))) 596 break; 597 if (semnum < 0 || semnum >= semaptr->sem_nsems) { 598 error = EINVAL; 599 break; 600 } 601 *retval = semaptr->_sem_base[semnum].semzcnt; 602 break; 603 604 case SETVAL: 605 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_W))) 606 break; 607 if (semnum < 0 || semnum >= semaptr->sem_nsems) { 608 error = EINVAL; 609 break; 610 } 611 KASSERT(arg != NULL); 612 semaptr->_sem_base[semnum].semval = arg->val; 613 semundo_clear(ix, semnum); 614 cv_broadcast(&semcv[ix]); 615 break; 616 617 case SETALL: 618 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_W))) 619 break; 620 KASSERT(arg != NULL); 621 for (i = 0; i < semaptr->sem_nsems; i++) { 622 error = copyin(&arg->array[i], 623 &semaptr->_sem_base[i].semval, 624 sizeof(arg->array[i])); 625 if (error != 0) 626 break; 627 } 628 semundo_clear(ix, -1); 629 cv_broadcast(&semcv[ix]); 630 break; 631 632 default: 633 error = EINVAL; 634 break; 635 } 636 637 mutex_exit(&semlock); 638 return (error); 639 } 640 641 int 642 sys_semget(struct lwp *l, void *v, register_t *retval) 643 { 644 struct sys_semget_args /* { 645 syscallarg(key_t) key; 646 syscallarg(int) nsems; 647 syscallarg(int) semflg; 648 } */ *uap = v; 649 int semid, error = 0; 650 int key = SCARG(uap, key); 651 int nsems = SCARG(uap, nsems); 652 int semflg = SCARG(uap, semflg); 653 kauth_cred_t cred = l->l_cred; 654 655 SEM_PRINTF(("semget(0x%x, %d, 0%o)\n", key, nsems, semflg)); 656 657 mutex_enter(&semlock); 658 659 if (key != IPC_PRIVATE) { 660 for (semid = 0; semid < seminfo.semmni; semid++) { 661 if ((sema[semid].sem_perm.mode & SEM_ALLOC) && 662 sema[semid].sem_perm._key == key) 663 break; 664 } 665 if (semid < seminfo.semmni) { 666 SEM_PRINTF(("found public key\n")); 667 if ((error = ipcperm(cred, &sema[semid].sem_perm, 668 semflg & 0700))) 669 goto out; 670 if (nsems > 0 && sema[semid].sem_nsems < nsems) { 671 SEM_PRINTF(("too small\n")); 672 error = EINVAL; 673 goto out; 674 } 675 if ((semflg & IPC_CREAT) && (semflg & IPC_EXCL)) { 676 SEM_PRINTF(("not exclusive\n")); 677 error = EEXIST; 678 goto out; 679 } 680 goto found; 681 } 682 } 683 684 SEM_PRINTF(("need to allocate the semid_ds\n")); 685 if (key == IPC_PRIVATE || (semflg & IPC_CREAT)) { 686 if (nsems <= 0 || nsems > seminfo.semmsl) { 687 SEM_PRINTF(("nsems out of range (0<%d<=%d)\n", nsems, 688 seminfo.semmsl)); 689 error = EINVAL; 690 goto out; 691 } 692 if (nsems > seminfo.semmns - semtot) { 693 SEM_PRINTF(("not enough semaphores left " 694 "(need %d, got %d)\n", 695 nsems, seminfo.semmns - semtot)); 696 error = ENOSPC; 697 goto out; 698 } 699 for (semid = 0; semid < seminfo.semmni; semid++) { 700 if ((sema[semid].sem_perm.mode & SEM_ALLOC) == 0) 701 break; 702 } 703 if (semid == seminfo.semmni) { 704 SEM_PRINTF(("no more semid_ds's available\n")); 705 error = ENOSPC; 706 goto out; 707 } 708 SEM_PRINTF(("semid %d is available\n", semid)); 709 sema[semid].sem_perm._key = key; 710 sema[semid].sem_perm.cuid = kauth_cred_geteuid(cred); 711 sema[semid].sem_perm.uid = kauth_cred_geteuid(cred); 712 sema[semid].sem_perm.cgid = kauth_cred_getegid(cred); 713 sema[semid].sem_perm.gid = kauth_cred_getegid(cred); 714 sema[semid].sem_perm.mode = (semflg & 0777) | SEM_ALLOC; 715 sema[semid].sem_perm._seq = 716 (sema[semid].sem_perm._seq + 1) & 0x7fff; 717 sema[semid].sem_nsems = nsems; 718 sema[semid].sem_otime = 0; 719 sema[semid].sem_ctime = time_second; 720 sema[semid]._sem_base = &sem[semtot]; 721 semtot += nsems; 722 memset(sema[semid]._sem_base, 0, 723 sizeof(sema[semid]._sem_base[0]) * nsems); 724 SEM_PRINTF(("sembase = %p, next = %p\n", sema[semid]._sem_base, 725 &sem[semtot])); 726 } else { 727 SEM_PRINTF(("didn't find it and wasn't asked to create it\n")); 728 error = ENOENT; 729 goto out; 730 } 731 732 found: 733 *retval = IXSEQ_TO_IPCID(semid, sema[semid].sem_perm); 734 out: 735 mutex_exit(&semlock); 736 return (error); 737 } 738 739 #define SMALL_SOPS 8 740 741 int 742 sys_semop(struct lwp *l, void *v, register_t *retval) 743 { 744 struct sys_semop_args /* { 745 syscallarg(int) semid; 746 syscallarg(struct sembuf *) sops; 747 syscallarg(size_t) nsops; 748 } */ *uap = v; 749 struct proc *p = l->l_proc; 750 int semid = SCARG(uap, semid), seq; 751 size_t nsops = SCARG(uap, nsops); 752 struct sembuf small_sops[SMALL_SOPS]; 753 struct sembuf *sops; 754 struct semid_ds *semaptr; 755 struct sembuf *sopptr = NULL; 756 struct __sem *semptr = NULL; 757 struct sem_undo *suptr = NULL; 758 kauth_cred_t cred = l->l_cred; 759 int i, error; 760 int do_wakeup, do_undos; 761 762 SEM_PRINTF(("call to semop(%d, %p, %zd)\n", semid, SCARG(uap,sops), nsops)); 763 restart: 764 if (nsops <= SMALL_SOPS) { 765 sops = small_sops; 766 } else if (nsops <= seminfo.semopm) { 767 KERNEL_LOCK(1, l); /* XXXSMP */ 768 sops = kmem_alloc(nsops * sizeof(*sops), KM_SLEEP); 769 KERNEL_UNLOCK_ONE(l); /* XXXSMP */ 770 } else { 771 SEM_PRINTF(("too many sops (max=%d, nsops=%zd)\n", 772 seminfo.semopm, nsops)); 773 return (E2BIG); 774 } 775 776 error = copyin(SCARG(uap, sops), sops, nsops * sizeof(sops[0])); 777 if (error) { 778 SEM_PRINTF(("error = %d from copyin(%p, %p, %zd)\n", error, 779 SCARG(uap, sops), &sops, nsops * sizeof(sops[0]))); 780 if (sops != small_sops) { 781 KERNEL_LOCK(1, l); /* XXXSMP */ 782 kmem_free(sops, nsops * sizeof(*sops)); 783 KERNEL_UNLOCK_ONE(l); /* XXXSMP */ 784 } 785 return error; 786 } 787 788 mutex_enter(&semlock); 789 /* In case of reallocation, we will wait for completion */ 790 while (__predict_false(sem_realloc_state)) 791 cv_wait(&sem_realloc_cv, &semlock); 792 793 semid = IPCID_TO_IX(semid); /* Convert back to zero origin */ 794 if (semid < 0 || semid >= seminfo.semmni) { 795 error = EINVAL; 796 goto out; 797 } 798 799 semaptr = &sema[semid]; 800 seq = IPCID_TO_SEQ(SCARG(uap, semid)); 801 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 || 802 semaptr->sem_perm._seq != seq) { 803 error = EINVAL; 804 goto out; 805 } 806 807 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_W))) { 808 SEM_PRINTF(("error = %d from ipaccess\n", error)); 809 goto out; 810 } 811 812 for (i = 0; i < nsops; i++) 813 if (sops[i].sem_num >= semaptr->sem_nsems) { 814 error = EFBIG; 815 goto out; 816 } 817 818 /* 819 * Loop trying to satisfy the vector of requests. 820 * If we reach a point where we must wait, any requests already 821 * performed are rolled back and we go to sleep until some other 822 * process wakes us up. At this point, we start all over again. 823 * 824 * This ensures that from the perspective of other tasks, a set 825 * of requests is atomic (never partially satisfied). 826 */ 827 do_undos = 0; 828 829 for (;;) { 830 do_wakeup = 0; 831 832 for (i = 0; i < nsops; i++) { 833 sopptr = &sops[i]; 834 semptr = &semaptr->_sem_base[sopptr->sem_num]; 835 836 SEM_PRINTF(("semop: semaptr=%p, sem_base=%p, " 837 "semptr=%p, sem[%d]=%d : op=%d, flag=%s\n", 838 semaptr, semaptr->_sem_base, semptr, 839 sopptr->sem_num, semptr->semval, sopptr->sem_op, 840 (sopptr->sem_flg & IPC_NOWAIT) ? 841 "nowait" : "wait")); 842 843 if (sopptr->sem_op < 0) { 844 if ((int)(semptr->semval + 845 sopptr->sem_op) < 0) { 846 SEM_PRINTF(("semop: " 847 "can't do it now\n")); 848 break; 849 } else { 850 semptr->semval += sopptr->sem_op; 851 if (semptr->semval == 0 && 852 semptr->semzcnt > 0) 853 do_wakeup = 1; 854 } 855 if (sopptr->sem_flg & SEM_UNDO) 856 do_undos = 1; 857 } else if (sopptr->sem_op == 0) { 858 if (semptr->semval > 0) { 859 SEM_PRINTF(("semop: not zero now\n")); 860 break; 861 } 862 } else { 863 if (semptr->semncnt > 0) 864 do_wakeup = 1; 865 semptr->semval += sopptr->sem_op; 866 if (sopptr->sem_flg & SEM_UNDO) 867 do_undos = 1; 868 } 869 } 870 871 /* 872 * Did we get through the entire vector? 873 */ 874 if (i >= nsops) 875 goto done; 876 877 /* 878 * No ... rollback anything that we've already done 879 */ 880 SEM_PRINTF(("semop: rollback 0 through %d\n", i - 1)); 881 while (i-- > 0) 882 semaptr->_sem_base[sops[i].sem_num].semval -= 883 sops[i].sem_op; 884 885 /* 886 * If the request that we couldn't satisfy has the 887 * NOWAIT flag set then return with EAGAIN. 888 */ 889 if (sopptr->sem_flg & IPC_NOWAIT) { 890 error = EAGAIN; 891 goto out; 892 } 893 894 if (sopptr->sem_op == 0) 895 semptr->semzcnt++; 896 else 897 semptr->semncnt++; 898 899 sem_waiters++; 900 SEM_PRINTF(("semop: good night!\n")); 901 error = cv_wait_sig(&semcv[semid], &semlock); 902 SEM_PRINTF(("semop: good morning (error=%d)!\n", error)); 903 sem_waiters--; 904 905 /* Notify reallocator, if it is waiting */ 906 cv_broadcast(&sem_realloc_cv); 907 908 /* 909 * Make sure that the semaphore still exists 910 */ 911 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 || 912 semaptr->sem_perm._seq != seq) { 913 error = EIDRM; 914 goto out; 915 } 916 917 /* 918 * The semaphore is still alive. Readjust the count of 919 * waiting processes. 920 */ 921 semptr = &semaptr->_sem_base[sopptr->sem_num]; 922 if (sopptr->sem_op == 0) 923 semptr->semzcnt--; 924 else 925 semptr->semncnt--; 926 927 /* In case of such state, restart the call */ 928 if (sem_realloc_state) { 929 mutex_exit(&semlock); 930 goto restart; 931 } 932 933 /* Is it really morning, or was our sleep interrupted? */ 934 if (error != 0) { 935 error = EINTR; 936 goto out; 937 } 938 SEM_PRINTF(("semop: good morning!\n")); 939 } 940 941 done: 942 /* 943 * Process any SEM_UNDO requests. 944 */ 945 if (do_undos) { 946 for (i = 0; i < nsops; i++) { 947 /* 948 * We only need to deal with SEM_UNDO's for non-zero 949 * op's. 950 */ 951 int adjval; 952 953 if ((sops[i].sem_flg & SEM_UNDO) == 0) 954 continue; 955 adjval = sops[i].sem_op; 956 if (adjval == 0) 957 continue; 958 error = semundo_adjust(p, &suptr, semid, 959 sops[i].sem_num, -adjval); 960 if (error == 0) 961 continue; 962 963 /* 964 * Oh-Oh! We ran out of either sem_undo's or undo's. 965 * Rollback the adjustments to this point and then 966 * rollback the semaphore ups and down so we can return 967 * with an error with all structures restored. We 968 * rollback the undo's in the exact reverse order that 969 * we applied them. This guarantees that we won't run 970 * out of space as we roll things back out. 971 */ 972 while (i-- > 0) { 973 if ((sops[i].sem_flg & SEM_UNDO) == 0) 974 continue; 975 adjval = sops[i].sem_op; 976 if (adjval == 0) 977 continue; 978 if (semundo_adjust(p, &suptr, semid, 979 sops[i].sem_num, adjval) != 0) 980 panic("semop - can't undo undos"); 981 } 982 983 for (i = 0; i < nsops; i++) 984 semaptr->_sem_base[sops[i].sem_num].semval -= 985 sops[i].sem_op; 986 987 SEM_PRINTF(("error = %d from semundo_adjust\n", error)); 988 goto out; 989 } /* loop through the sops */ 990 } /* if (do_undos) */ 991 992 /* We're definitely done - set the sempid's */ 993 for (i = 0; i < nsops; i++) { 994 sopptr = &sops[i]; 995 semptr = &semaptr->_sem_base[sopptr->sem_num]; 996 semptr->sempid = p->p_pid; 997 } 998 999 /* Update sem_otime */ 1000 semaptr->sem_otime = time_second; 1001 1002 /* Do a wakeup if any semaphore was up'd. */ 1003 if (do_wakeup) { 1004 SEM_PRINTF(("semop: doing wakeup\n")); 1005 cv_broadcast(&semcv[semid]); 1006 SEM_PRINTF(("semop: back from wakeup\n")); 1007 } 1008 SEM_PRINTF(("semop: done\n")); 1009 *retval = 0; 1010 1011 out: 1012 mutex_exit(&semlock); 1013 if (sops != small_sops) { 1014 KERNEL_LOCK(1, l); /* XXXSMP */ 1015 kmem_free(sops, nsops * sizeof(*sops)); 1016 KERNEL_UNLOCK_ONE(l); /* XXXSMP */ 1017 } 1018 return error; 1019 } 1020 1021 /* 1022 * Go through the undo structures for this process and apply the 1023 * adjustments to semaphores. 1024 */ 1025 /*ARGSUSED*/ 1026 void 1027 semexit(struct proc *p, void *v) 1028 { 1029 struct sem_undo *suptr; 1030 struct sem_undo **supptr; 1031 1032 mutex_enter(&semlock); 1033 1034 /* 1035 * Go through the chain of undo vectors looking for one 1036 * associated with this process. 1037 */ 1038 1039 for (supptr = &semu_list; (suptr = *supptr) != NULL; 1040 supptr = &suptr->un_next) { 1041 if (suptr->un_proc == p) 1042 break; 1043 } 1044 1045 /* 1046 * If there is no undo vector, skip to the end. 1047 */ 1048 1049 if (suptr == NULL) { 1050 mutex_exit(&semlock); 1051 return; 1052 } 1053 1054 /* 1055 * We now have an undo vector for this process. 1056 */ 1057 1058 SEM_PRINTF(("proc @%p has undo structure with %d entries\n", p, 1059 suptr->un_cnt)); 1060 1061 /* 1062 * If there are any active undo elements then process them. 1063 */ 1064 if (suptr->un_cnt > 0) { 1065 int ix; 1066 1067 for (ix = 0; ix < suptr->un_cnt; ix++) { 1068 int semid = suptr->un_ent[ix].un_id; 1069 int semnum = suptr->un_ent[ix].un_num; 1070 int adjval = suptr->un_ent[ix].un_adjval; 1071 struct semid_ds *semaptr; 1072 1073 semaptr = &sema[semid]; 1074 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0) 1075 panic("semexit - semid not allocated"); 1076 if (semnum >= semaptr->sem_nsems) 1077 panic("semexit - semnum out of range"); 1078 1079 SEM_PRINTF(("semexit: %p id=%d num=%d(adj=%d) ; " 1080 "sem=%d\n", 1081 suptr->un_proc, suptr->un_ent[ix].un_id, 1082 suptr->un_ent[ix].un_num, 1083 suptr->un_ent[ix].un_adjval, 1084 semaptr->_sem_base[semnum].semval)); 1085 1086 if (adjval < 0 && 1087 semaptr->_sem_base[semnum].semval < -adjval) 1088 semaptr->_sem_base[semnum].semval = 0; 1089 else 1090 semaptr->_sem_base[semnum].semval += adjval; 1091 1092 cv_broadcast(&semcv[semid]); 1093 SEM_PRINTF(("semexit: back from wakeup\n")); 1094 } 1095 } 1096 1097 /* 1098 * Deallocate the undo vector. 1099 */ 1100 SEM_PRINTF(("removing vector\n")); 1101 suptr->un_proc = NULL; 1102 *supptr = suptr->un_next; 1103 mutex_exit(&semlock); 1104 } 1105 1106 /* 1107 * Sysctl initialization and nodes. 1108 */ 1109 1110 static int 1111 sysctl_ipc_semmni(SYSCTLFN_ARGS) 1112 { 1113 int newsize, error; 1114 struct sysctlnode node; 1115 node = *rnode; 1116 node.sysctl_data = &newsize; 1117 1118 newsize = seminfo.semmni; 1119 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1120 if (error || newp == NULL) 1121 return error; 1122 1123 return semrealloc(newsize, seminfo.semmns, seminfo.semmnu); 1124 } 1125 1126 static int 1127 sysctl_ipc_semmns(SYSCTLFN_ARGS) 1128 { 1129 int newsize, error; 1130 struct sysctlnode node; 1131 node = *rnode; 1132 node.sysctl_data = &newsize; 1133 1134 newsize = seminfo.semmns; 1135 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1136 if (error || newp == NULL) 1137 return error; 1138 1139 return semrealloc(seminfo.semmni, newsize, seminfo.semmnu); 1140 } 1141 1142 static int 1143 sysctl_ipc_semmnu(SYSCTLFN_ARGS) 1144 { 1145 int newsize, error; 1146 struct sysctlnode node; 1147 node = *rnode; 1148 node.sysctl_data = &newsize; 1149 1150 newsize = seminfo.semmnu; 1151 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1152 if (error || newp == NULL) 1153 return error; 1154 1155 return semrealloc(seminfo.semmni, seminfo.semmns, newsize); 1156 } 1157 1158 SYSCTL_SETUP(sysctl_ipc_sem_setup, "sysctl kern.ipc subtree setup") 1159 { 1160 const struct sysctlnode *node = NULL; 1161 1162 sysctl_createv(clog, 0, NULL, NULL, 1163 CTLFLAG_PERMANENT, 1164 CTLTYPE_NODE, "kern", NULL, 1165 NULL, 0, NULL, 0, 1166 CTL_KERN, CTL_EOL); 1167 sysctl_createv(clog, 0, NULL, &node, 1168 CTLFLAG_PERMANENT, 1169 CTLTYPE_NODE, "ipc", 1170 SYSCTL_DESCR("SysV IPC options"), 1171 NULL, 0, NULL, 0, 1172 CTL_KERN, KERN_SYSVIPC, CTL_EOL); 1173 1174 if (node == NULL) 1175 return; 1176 1177 sysctl_createv(clog, 0, &node, NULL, 1178 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 1179 CTLTYPE_INT, "semmni", 1180 SYSCTL_DESCR("Max number of number of semaphore identifiers"), 1181 sysctl_ipc_semmni, 0, &seminfo.semmni, 0, 1182 CTL_CREATE, CTL_EOL); 1183 sysctl_createv(clog, 0, &node, NULL, 1184 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 1185 CTLTYPE_INT, "semmns", 1186 SYSCTL_DESCR("Max number of number of semaphores in system"), 1187 sysctl_ipc_semmns, 0, &seminfo.semmns, 0, 1188 CTL_CREATE, CTL_EOL); 1189 sysctl_createv(clog, 0, &node, NULL, 1190 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 1191 CTLTYPE_INT, "semmnu", 1192 SYSCTL_DESCR("Max number of undo structures in system"), 1193 sysctl_ipc_semmnu, 0, &seminfo.semmnu, 0, 1194 CTL_CREATE, CTL_EOL); 1195 } 1196