1 /* $NetBSD: sysv_sem.c,v 1.56 2005/04/01 11:59:37 yamt Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Implementation of SVID semaphores 42 * 43 * Author: Daniel Boulet 44 * 45 * This software is provided ``AS IS'' without any warranties of any kind. 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: sysv_sem.c,v 1.56 2005/04/01 11:59:37 yamt Exp $"); 50 51 #define SYSVSEM 52 53 #include <sys/param.h> 54 #include <sys/kernel.h> 55 #include <sys/sem.h> 56 #include <sys/sysctl.h> 57 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */ 58 #include <sys/sa.h> 59 #include <sys/syscallargs.h> 60 61 static int semtot = 0; 62 struct semid_ds *sema; /* semaphore id pool */ 63 static struct __sem *sem; /* semaphore pool */ 64 static struct sem_undo *semu_list; /* list of active undo structures */ 65 static int *semu; /* undo structure pool */ 66 67 #ifdef SEM_DEBUG 68 #define SEM_PRINTF(a) printf a 69 #else 70 #define SEM_PRINTF(a) 71 #endif 72 73 struct sem_undo *semu_alloc(struct proc *); 74 int semundo_adjust(struct proc *, struct sem_undo **, int, int, int); 75 void semundo_clear(int, int); 76 77 /* 78 * XXXSMP Once we go MP, there needs to be a lock for the semaphore system. 79 * Until then, we're saved by being a non-preemptive kernel. 80 */ 81 82 void 83 seminit() 84 { 85 int i, sz; 86 vaddr_t v; 87 88 /* Allocate pageable memory for our structures */ 89 sz = seminfo.semmni * sizeof(struct semid_ds) + 90 seminfo.semmns * sizeof(struct __sem) + 91 seminfo.semmnu * seminfo.semusz; 92 v = uvm_km_alloc(kernel_map, round_page(sz), 0, 93 UVM_KMF_WIRED|UVM_KMF_ZERO); 94 if (v == 0) 95 panic("sysv_sem: cannot allocate memory"); 96 sema = (void *)v; 97 sem = (void *)(sema + seminfo.semmni); 98 semu = (void *)(sem + seminfo.semmns); 99 100 for (i = 0; i < seminfo.semmni; i++) { 101 sema[i]._sem_base = 0; 102 sema[i].sem_perm.mode = 0; 103 } 104 for (i = 0; i < seminfo.semmnu; i++) { 105 struct sem_undo *suptr = SEMU(i); 106 suptr->un_proc = NULL; 107 } 108 semu_list = NULL; 109 exithook_establish(semexit, NULL); 110 } 111 112 /* 113 * Placebo. 114 */ 115 116 int 117 sys_semconfig(l, v, retval) 118 struct lwp *l; 119 void *v; 120 register_t *retval; 121 { 122 123 *retval = 0; 124 return 0; 125 } 126 127 /* 128 * Allocate a new sem_undo structure for a process 129 * (returns ptr to structure or NULL if no more room) 130 */ 131 132 struct sem_undo * 133 semu_alloc(p) 134 struct proc *p; 135 { 136 int i; 137 struct sem_undo *suptr; 138 struct sem_undo **supptr; 139 int attempt; 140 141 /* 142 * Try twice to allocate something. 143 * (we'll purge any empty structures after the first pass so 144 * two passes are always enough) 145 */ 146 147 for (attempt = 0; attempt < 2; attempt++) { 148 /* 149 * Look for a free structure. 150 * Fill it in and return it if we find one. 151 */ 152 153 for (i = 0; i < seminfo.semmnu; i++) { 154 suptr = SEMU(i); 155 if (suptr->un_proc == NULL) { 156 suptr->un_next = semu_list; 157 semu_list = suptr; 158 suptr->un_cnt = 0; 159 suptr->un_proc = p; 160 return (suptr); 161 } 162 } 163 164 /* 165 * We didn't find a free one, if this is the first attempt 166 * then try to free some structures. 167 */ 168 169 if (attempt == 0) { 170 /* All the structures are in use - try to free some */ 171 int did_something = 0; 172 173 supptr = &semu_list; 174 while ((suptr = *supptr) != NULL) { 175 if (suptr->un_cnt == 0) { 176 suptr->un_proc = NULL; 177 *supptr = suptr->un_next; 178 did_something = 1; 179 } else 180 supptr = &suptr->un_next; 181 } 182 183 /* If we didn't free anything then just give-up */ 184 if (!did_something) 185 return (NULL); 186 } else { 187 /* 188 * The second pass failed even though we freed 189 * something after the first pass! 190 * This is IMPOSSIBLE! 191 */ 192 panic("semu_alloc - second attempt failed"); 193 } 194 } 195 return NULL; 196 } 197 198 /* 199 * Adjust a particular entry for a particular proc 200 */ 201 202 int 203 semundo_adjust(p, supptr, semid, semnum, adjval) 204 struct proc *p; 205 struct sem_undo **supptr; 206 int semid, semnum; 207 int adjval; 208 { 209 struct sem_undo *suptr; 210 struct undo *sunptr; 211 int i; 212 213 /* 214 * Look for and remember the sem_undo if the caller doesn't 215 * provide it 216 */ 217 218 suptr = *supptr; 219 if (suptr == NULL) { 220 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next) 221 if (suptr->un_proc == p) 222 break; 223 224 if (suptr == NULL) { 225 suptr = semu_alloc(p); 226 if (suptr == NULL) 227 return (ENOSPC); 228 } 229 *supptr = suptr; 230 } 231 232 /* 233 * Look for the requested entry and adjust it (delete if 234 * adjval becomes 0). 235 */ 236 sunptr = &suptr->un_ent[0]; 237 for (i = 0; i < suptr->un_cnt; i++, sunptr++) { 238 if (sunptr->un_id != semid || sunptr->un_num != semnum) 239 continue; 240 sunptr->un_adjval += adjval; 241 if (sunptr->un_adjval == 0) { 242 suptr->un_cnt--; 243 if (i < suptr->un_cnt) 244 suptr->un_ent[i] = 245 suptr->un_ent[suptr->un_cnt]; 246 } 247 return (0); 248 } 249 250 /* Didn't find the right entry - create it */ 251 if (suptr->un_cnt == SEMUME) 252 return (EINVAL); 253 254 sunptr = &suptr->un_ent[suptr->un_cnt]; 255 suptr->un_cnt++; 256 sunptr->un_adjval = adjval; 257 sunptr->un_id = semid; 258 sunptr->un_num = semnum; 259 return (0); 260 } 261 262 void 263 semundo_clear(semid, semnum) 264 int semid, semnum; 265 { 266 struct sem_undo *suptr; 267 struct undo *sunptr, *sunend; 268 269 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next) 270 for (sunptr = &suptr->un_ent[0], 271 sunend = sunptr + suptr->un_cnt; sunptr < sunend;) { 272 if (sunptr->un_id == semid) { 273 if (semnum == -1 || sunptr->un_num == semnum) { 274 suptr->un_cnt--; 275 sunend--; 276 if (sunptr != sunend) 277 *sunptr = *sunend; 278 if (semnum != -1) 279 break; 280 else 281 continue; 282 } 283 } 284 sunptr++; 285 } 286 } 287 288 int 289 sys_____semctl13(l, v, retval) 290 struct lwp *l; 291 void *v; 292 register_t *retval; 293 { 294 struct sys_____semctl13_args /* { 295 syscallarg(int) semid; 296 syscallarg(int) semnum; 297 syscallarg(int) cmd; 298 syscallarg(union __semun *) arg; 299 } */ *uap = v; 300 struct proc *p = l->l_proc; 301 struct semid_ds sembuf; 302 int cmd, error; 303 void *pass_arg; 304 union __semun karg; 305 306 cmd = SCARG(uap, cmd); 307 308 switch (cmd) { 309 case IPC_SET: 310 case IPC_STAT: 311 pass_arg = &sembuf; 312 break; 313 314 case GETALL: 315 case SETVAL: 316 case SETALL: 317 pass_arg = &karg; 318 break; 319 default: 320 pass_arg = NULL; 321 break; 322 } 323 324 if (pass_arg) { 325 error = copyin(SCARG(uap, arg), &karg, sizeof(karg)); 326 if (error) 327 return error; 328 if (cmd == IPC_SET) { 329 error = copyin(karg.buf, &sembuf, sizeof(sembuf)); 330 if (error) 331 return (error); 332 } 333 } 334 335 error = semctl1(p, SCARG(uap, semid), SCARG(uap, semnum), cmd, 336 pass_arg, retval); 337 338 if (error == 0 && cmd == IPC_STAT) 339 error = copyout(&sembuf, karg.buf, sizeof(sembuf)); 340 341 return (error); 342 } 343 344 int 345 semctl1(p, semid, semnum, cmd, v, retval) 346 struct proc *p; 347 int semid, semnum, cmd; 348 void *v; 349 register_t *retval; 350 { 351 struct ucred *cred = p->p_ucred; 352 union __semun *arg = v; 353 struct semid_ds *sembuf = v, *semaptr; 354 int i, error, ix; 355 356 SEM_PRINTF(("call to semctl(%d, %d, %d, %p)\n", 357 semid, semnum, cmd, v)); 358 359 ix = IPCID_TO_IX(semid); 360 if (ix < 0 || ix >= seminfo.semmni) 361 return (EINVAL); 362 363 semaptr = &sema[ix]; 364 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 || 365 semaptr->sem_perm._seq != IPCID_TO_SEQ(semid)) 366 return (EINVAL); 367 368 switch (cmd) { 369 case IPC_RMID: 370 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_M)) != 0) 371 return (error); 372 semaptr->sem_perm.cuid = cred->cr_uid; 373 semaptr->sem_perm.uid = cred->cr_uid; 374 semtot -= semaptr->sem_nsems; 375 for (i = semaptr->_sem_base - sem; i < semtot; i++) 376 sem[i] = sem[i + semaptr->sem_nsems]; 377 for (i = 0; i < seminfo.semmni; i++) { 378 if ((sema[i].sem_perm.mode & SEM_ALLOC) && 379 sema[i]._sem_base > semaptr->_sem_base) 380 sema[i]._sem_base -= semaptr->sem_nsems; 381 } 382 semaptr->sem_perm.mode = 0; 383 semundo_clear(ix, -1); 384 wakeup(semaptr); 385 break; 386 387 case IPC_SET: 388 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_M))) 389 return (error); 390 semaptr->sem_perm.uid = sembuf->sem_perm.uid; 391 semaptr->sem_perm.gid = sembuf->sem_perm.gid; 392 semaptr->sem_perm.mode = (semaptr->sem_perm.mode & ~0777) | 393 (sembuf->sem_perm.mode & 0777); 394 semaptr->sem_ctime = time.tv_sec; 395 break; 396 397 case IPC_STAT: 398 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R))) 399 return (error); 400 memcpy(sembuf, semaptr, sizeof(struct semid_ds)); 401 break; 402 403 case GETNCNT: 404 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R))) 405 return (error); 406 if (semnum < 0 || semnum >= semaptr->sem_nsems) 407 return (EINVAL); 408 *retval = semaptr->_sem_base[semnum].semncnt; 409 break; 410 411 case GETPID: 412 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R))) 413 return (error); 414 if (semnum < 0 || semnum >= semaptr->sem_nsems) 415 return (EINVAL); 416 *retval = semaptr->_sem_base[semnum].sempid; 417 break; 418 419 case GETVAL: 420 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R))) 421 return (error); 422 if (semnum < 0 || semnum >= semaptr->sem_nsems) 423 return (EINVAL); 424 *retval = semaptr->_sem_base[semnum].semval; 425 break; 426 427 case GETALL: 428 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R))) 429 return (error); 430 for (i = 0; i < semaptr->sem_nsems; i++) { 431 error = copyout(&semaptr->_sem_base[i].semval, 432 &arg->array[i], sizeof(arg->array[i])); 433 if (error != 0) 434 break; 435 } 436 break; 437 438 case GETZCNT: 439 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_R))) 440 return (error); 441 if (semnum < 0 || semnum >= semaptr->sem_nsems) 442 return (EINVAL); 443 *retval = semaptr->_sem_base[semnum].semzcnt; 444 break; 445 446 case SETVAL: 447 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_W))) 448 return (error); 449 if (semnum < 0 || semnum >= semaptr->sem_nsems) 450 return (EINVAL); 451 semaptr->_sem_base[semnum].semval = arg->val; 452 semundo_clear(ix, semnum); 453 wakeup(semaptr); 454 break; 455 456 case SETALL: 457 if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_W))) 458 return (error); 459 for (i = 0; i < semaptr->sem_nsems; i++) { 460 error = copyin(&arg->array[i], 461 &semaptr->_sem_base[i].semval, 462 sizeof(arg->array[i])); 463 if (error != 0) 464 break; 465 } 466 semundo_clear(ix, -1); 467 wakeup(semaptr); 468 break; 469 470 default: 471 return (EINVAL); 472 } 473 474 return (error); 475 } 476 477 int 478 sys_semget(l, v, retval) 479 struct lwp *l; 480 void *v; 481 register_t *retval; 482 { 483 struct sys_semget_args /* { 484 syscallarg(key_t) key; 485 syscallarg(int) nsems; 486 syscallarg(int) semflg; 487 } */ *uap = v; 488 int semid, eval; 489 int key = SCARG(uap, key); 490 int nsems = SCARG(uap, nsems); 491 int semflg = SCARG(uap, semflg); 492 struct ucred *cred = l->l_proc->p_ucred; 493 494 SEM_PRINTF(("semget(0x%x, %d, 0%o)\n", key, nsems, semflg)); 495 496 if (key != IPC_PRIVATE) { 497 for (semid = 0; semid < seminfo.semmni; semid++) { 498 if ((sema[semid].sem_perm.mode & SEM_ALLOC) && 499 sema[semid].sem_perm._key == key) 500 break; 501 } 502 if (semid < seminfo.semmni) { 503 SEM_PRINTF(("found public key\n")); 504 if ((eval = ipcperm(cred, &sema[semid].sem_perm, 505 semflg & 0700))) 506 return (eval); 507 if (nsems > 0 && sema[semid].sem_nsems < nsems) { 508 SEM_PRINTF(("too small\n")); 509 return (EINVAL); 510 } 511 if ((semflg & IPC_CREAT) && (semflg & IPC_EXCL)) { 512 SEM_PRINTF(("not exclusive\n")); 513 return (EEXIST); 514 } 515 goto found; 516 } 517 } 518 519 SEM_PRINTF(("need to allocate the semid_ds\n")); 520 if (key == IPC_PRIVATE || (semflg & IPC_CREAT)) { 521 if (nsems <= 0 || nsems > seminfo.semmsl) { 522 SEM_PRINTF(("nsems out of range (0<%d<=%d)\n", nsems, 523 seminfo.semmsl)); 524 return (EINVAL); 525 } 526 if (nsems > seminfo.semmns - semtot) { 527 SEM_PRINTF(("not enough semaphores left " 528 "(need %d, got %d)\n", 529 nsems, seminfo.semmns - semtot)); 530 return (ENOSPC); 531 } 532 for (semid = 0; semid < seminfo.semmni; semid++) { 533 if ((sema[semid].sem_perm.mode & SEM_ALLOC) == 0) 534 break; 535 } 536 if (semid == seminfo.semmni) { 537 SEM_PRINTF(("no more semid_ds's available\n")); 538 return (ENOSPC); 539 } 540 SEM_PRINTF(("semid %d is available\n", semid)); 541 sema[semid].sem_perm._key = key; 542 sema[semid].sem_perm.cuid = cred->cr_uid; 543 sema[semid].sem_perm.uid = cred->cr_uid; 544 sema[semid].sem_perm.cgid = cred->cr_gid; 545 sema[semid].sem_perm.gid = cred->cr_gid; 546 sema[semid].sem_perm.mode = (semflg & 0777) | SEM_ALLOC; 547 sema[semid].sem_perm._seq = 548 (sema[semid].sem_perm._seq + 1) & 0x7fff; 549 sema[semid].sem_nsems = nsems; 550 sema[semid].sem_otime = 0; 551 sema[semid].sem_ctime = time.tv_sec; 552 sema[semid]._sem_base = &sem[semtot]; 553 semtot += nsems; 554 memset(sema[semid]._sem_base, 0, 555 sizeof(sema[semid]._sem_base[0]) * nsems); 556 SEM_PRINTF(("sembase = %p, next = %p\n", sema[semid]._sem_base, 557 &sem[semtot])); 558 } else { 559 SEM_PRINTF(("didn't find it and wasn't asked to create it\n")); 560 return (ENOENT); 561 } 562 563 found: 564 *retval = IXSEQ_TO_IPCID(semid, sema[semid].sem_perm); 565 return (0); 566 } 567 568 int 569 sys_semop(l, v, retval) 570 struct lwp *l; 571 void *v; 572 register_t *retval; 573 { 574 struct sys_semop_args /* { 575 syscallarg(int) semid; 576 syscallarg(struct sembuf *) sops; 577 syscallarg(size_t) nsops; 578 } */ *uap = v; 579 struct proc *p = l->l_proc; 580 int semid = SCARG(uap, semid), seq; 581 size_t nsops = SCARG(uap, nsops); 582 struct sembuf sops[MAX_SOPS]; 583 struct semid_ds *semaptr; 584 struct sembuf *sopptr = NULL; 585 struct __sem *semptr = NULL; 586 struct sem_undo *suptr = NULL; 587 struct ucred *cred = p->p_ucred; 588 int i, eval; 589 int do_wakeup, do_undos; 590 591 SEM_PRINTF(("call to semop(%d, %p, %lld)\n", semid, sops, 592 (long long)nsops)); 593 594 semid = IPCID_TO_IX(semid); /* Convert back to zero origin */ 595 if (semid < 0 || semid >= seminfo.semmni) 596 return (EINVAL); 597 598 semaptr = &sema[semid]; 599 seq = IPCID_TO_SEQ(SCARG(uap, semid)); 600 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 || 601 semaptr->sem_perm._seq != seq) 602 return (EINVAL); 603 604 if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_W))) { 605 SEM_PRINTF(("eval = %d from ipaccess\n", eval)); 606 return (eval); 607 } 608 609 if (nsops > MAX_SOPS) { 610 SEM_PRINTF(("too many sops (max=%d, nsops=%lld)\n", MAX_SOPS, 611 (long long)nsops)); 612 return (E2BIG); 613 } 614 615 if ((eval = copyin(SCARG(uap, sops), 616 sops, nsops * sizeof(sops[0]))) != 0) { 617 SEM_PRINTF(("eval = %d from copyin(%p, %p, %lld)\n", eval, 618 SCARG(uap, sops), &sops, 619 (long long)(nsops * sizeof(sops[0])))); 620 return (eval); 621 } 622 623 for (i = 0; i < nsops; i++) 624 if (sops[i].sem_num >= semaptr->sem_nsems) 625 return (EFBIG); 626 627 /* 628 * Loop trying to satisfy the vector of requests. 629 * If we reach a point where we must wait, any requests already 630 * performed are rolled back and we go to sleep until some other 631 * process wakes us up. At this point, we start all over again. 632 * 633 * This ensures that from the perspective of other tasks, a set 634 * of requests is atomic (never partially satisfied). 635 */ 636 do_undos = 0; 637 638 for (;;) { 639 do_wakeup = 0; 640 641 for (i = 0; i < nsops; i++) { 642 sopptr = &sops[i]; 643 semptr = &semaptr->_sem_base[sopptr->sem_num]; 644 645 SEM_PRINTF(("semop: semaptr=%p, sem_base=%p, " 646 "semptr=%p, sem[%d]=%d : op=%d, flag=%s\n", 647 semaptr, semaptr->_sem_base, semptr, 648 sopptr->sem_num, semptr->semval, sopptr->sem_op, 649 (sopptr->sem_flg & IPC_NOWAIT) ? 650 "nowait" : "wait")); 651 652 if (sopptr->sem_op < 0) { 653 if ((int)(semptr->semval + 654 sopptr->sem_op) < 0) { 655 SEM_PRINTF(("semop: " 656 "can't do it now\n")); 657 break; 658 } else { 659 semptr->semval += sopptr->sem_op; 660 if (semptr->semval == 0 && 661 semptr->semzcnt > 0) 662 do_wakeup = 1; 663 } 664 if (sopptr->sem_flg & SEM_UNDO) 665 do_undos = 1; 666 } else if (sopptr->sem_op == 0) { 667 if (semptr->semval > 0) { 668 SEM_PRINTF(("semop: not zero now\n")); 669 break; 670 } 671 } else { 672 if (semptr->semncnt > 0) 673 do_wakeup = 1; 674 semptr->semval += sopptr->sem_op; 675 if (sopptr->sem_flg & SEM_UNDO) 676 do_undos = 1; 677 } 678 } 679 680 /* 681 * Did we get through the entire vector? 682 */ 683 if (i >= nsops) 684 goto done; 685 686 /* 687 * No ... rollback anything that we've already done 688 */ 689 SEM_PRINTF(("semop: rollback 0 through %d\n", i - 1)); 690 while (i-- > 0) 691 semaptr->_sem_base[sops[i].sem_num].semval -= 692 sops[i].sem_op; 693 694 /* 695 * If the request that we couldn't satisfy has the 696 * NOWAIT flag set then return with EAGAIN. 697 */ 698 if (sopptr->sem_flg & IPC_NOWAIT) 699 return (EAGAIN); 700 701 if (sopptr->sem_op == 0) 702 semptr->semzcnt++; 703 else 704 semptr->semncnt++; 705 706 SEM_PRINTF(("semop: good night!\n")); 707 eval = tsleep((caddr_t)semaptr, (PZERO - 4) | PCATCH, 708 "semwait", 0); 709 SEM_PRINTF(("semop: good morning (eval=%d)!\n", eval)); 710 711 /* 712 * Make sure that the semaphore still exists 713 */ 714 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 || 715 semaptr->sem_perm._seq != seq) { 716 /* The man page says to return EIDRM. */ 717 /* Unfortunately, BSD doesn't define that code! */ 718 #ifdef EIDRM 719 return (EIDRM); 720 #else 721 return (EINVAL); 722 #endif 723 } 724 725 /* 726 * The semaphore is still alive. Readjust the count of 727 * waiting processes. 728 */ 729 semptr = &semaptr->_sem_base[sopptr->sem_num]; 730 if (sopptr->sem_op == 0) 731 semptr->semzcnt--; 732 else 733 semptr->semncnt--; 734 /* 735 * Is it really morning, or was our sleep interrupted? 736 * (Delayed check of tsleep() return code because we 737 * need to decrement sem[nz]cnt either way.) 738 */ 739 if (eval != 0) 740 return (EINTR); 741 SEM_PRINTF(("semop: good morning!\n")); 742 } 743 744 done: 745 /* 746 * Process any SEM_UNDO requests. 747 */ 748 if (do_undos) { 749 for (i = 0; i < nsops; i++) { 750 /* 751 * We only need to deal with SEM_UNDO's for non-zero 752 * op's. 753 */ 754 int adjval; 755 756 if ((sops[i].sem_flg & SEM_UNDO) == 0) 757 continue; 758 adjval = sops[i].sem_op; 759 if (adjval == 0) 760 continue; 761 eval = semundo_adjust(p, &suptr, semid, 762 sops[i].sem_num, -adjval); 763 if (eval == 0) 764 continue; 765 766 /* 767 * Oh-Oh! We ran out of either sem_undo's or undo's. 768 * Rollback the adjustments to this point and then 769 * rollback the semaphore ups and down so we can return 770 * with an error with all structures restored. We 771 * rollback the undo's in the exact reverse order that 772 * we applied them. This guarantees that we won't run 773 * out of space as we roll things back out. 774 */ 775 while (i-- > 0) { 776 if ((sops[i].sem_flg & SEM_UNDO) == 0) 777 continue; 778 adjval = sops[i].sem_op; 779 if (adjval == 0) 780 continue; 781 if (semundo_adjust(p, &suptr, semid, 782 sops[i].sem_num, adjval) != 0) 783 panic("semop - can't undo undos"); 784 } 785 786 for (i = 0; i < nsops; i++) 787 semaptr->_sem_base[sops[i].sem_num].semval -= 788 sops[i].sem_op; 789 790 SEM_PRINTF(("eval = %d from semundo_adjust\n", eval)); 791 return (eval); 792 } /* loop through the sops */ 793 } /* if (do_undos) */ 794 795 /* We're definitely done - set the sempid's */ 796 for (i = 0; i < nsops; i++) { 797 sopptr = &sops[i]; 798 semptr = &semaptr->_sem_base[sopptr->sem_num]; 799 semptr->sempid = p->p_pid; 800 } 801 802 /* Update sem_otime */ 803 semaptr->sem_otime = time.tv_sec; 804 805 /* Do a wakeup if any semaphore was up'd. */ 806 if (do_wakeup) { 807 SEM_PRINTF(("semop: doing wakeup\n")); 808 #ifdef SEM_WAKEUP 809 sem_wakeup((caddr_t)semaptr); 810 #else 811 wakeup((caddr_t)semaptr); 812 #endif 813 SEM_PRINTF(("semop: back from wakeup\n")); 814 } 815 SEM_PRINTF(("semop: done\n")); 816 *retval = 0; 817 return (0); 818 } 819 820 /* 821 * Go through the undo structures for this process and apply the 822 * adjustments to semaphores. 823 */ 824 /*ARGSUSED*/ 825 void 826 semexit(p, v) 827 struct proc *p; 828 void *v; 829 { 830 struct sem_undo *suptr; 831 struct sem_undo **supptr; 832 833 /* 834 * Go through the chain of undo vectors looking for one 835 * associated with this process. 836 */ 837 838 for (supptr = &semu_list; (suptr = *supptr) != NULL; 839 supptr = &suptr->un_next) { 840 if (suptr->un_proc == p) 841 break; 842 } 843 844 /* 845 * If there is no undo vector, skip to the end. 846 */ 847 848 if (suptr == NULL) 849 return; 850 851 /* 852 * We now have an undo vector for this process. 853 */ 854 855 SEM_PRINTF(("proc @%p has undo structure with %d entries\n", p, 856 suptr->un_cnt)); 857 858 /* 859 * If there are any active undo elements then process them. 860 */ 861 if (suptr->un_cnt > 0) { 862 int ix; 863 864 for (ix = 0; ix < suptr->un_cnt; ix++) { 865 int semid = suptr->un_ent[ix].un_id; 866 int semnum = suptr->un_ent[ix].un_num; 867 int adjval = suptr->un_ent[ix].un_adjval; 868 struct semid_ds *semaptr; 869 870 semaptr = &sema[semid]; 871 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0) 872 panic("semexit - semid not allocated"); 873 if (semnum >= semaptr->sem_nsems) 874 panic("semexit - semnum out of range"); 875 876 SEM_PRINTF(("semexit: %p id=%d num=%d(adj=%d) ; " 877 "sem=%d\n", 878 suptr->un_proc, suptr->un_ent[ix].un_id, 879 suptr->un_ent[ix].un_num, 880 suptr->un_ent[ix].un_adjval, 881 semaptr->_sem_base[semnum].semval)); 882 883 if (adjval < 0 && 884 semaptr->_sem_base[semnum].semval < -adjval) 885 semaptr->_sem_base[semnum].semval = 0; 886 else 887 semaptr->_sem_base[semnum].semval += adjval; 888 889 #ifdef SEM_WAKEUP 890 sem_wakeup((caddr_t)semaptr); 891 #else 892 wakeup((caddr_t)semaptr); 893 #endif 894 SEM_PRINTF(("semexit: back from wakeup\n")); 895 } 896 } 897 898 /* 899 * Deallocate the undo vector. 900 */ 901 SEM_PRINTF(("removing vector\n")); 902 suptr->un_proc = NULL; 903 *supptr = suptr->un_next; 904 } 905