1 /* $NetBSD: vfs_lockf.c,v 1.22 2003/02/01 06:23:45 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Scooter Morris at Genentech Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.22 2003/02/01 06:23:45 thorpej Exp $"); 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/file.h> 48 #include <sys/proc.h> 49 #include <sys/vnode.h> 50 #include <sys/malloc.h> 51 #include <sys/fcntl.h> 52 #include <sys/lockf.h> 53 54 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 55 56 /* 57 * This variable controls the maximum number of processes that will 58 * be checked in doing deadlock detection. 59 */ 60 int maxlockdepth = MAXDEPTH; 61 62 #ifdef LOCKF_DEBUG 63 int lockf_debug = 0; 64 #endif 65 66 #define NOLOCKF (struct lockf *)0 67 #define SELF 0x1 68 #define OTHERS 0x2 69 70 /* 71 * XXX TODO 72 * Misc cleanups: "caddr_t id" should be visible in the API as a 73 * "struct proc *". 74 * (This requires rototilling all VFS's which support advisory locking). 75 * 76 * Use pools for lock allocation. 77 */ 78 79 /* 80 * XXXSMP TODO: Using either (a) a global lock, or (b) the vnode's 81 * interlock should be sufficient; (b) requires a change to the API 82 * because the vnode isn't visible here. 83 * 84 * If there's a lot of lock contention on a single vnode, locking 85 * schemes which allow for more paralleism would be needed. Given how 86 * infrequently byte-range locks are actually used in typical BSD 87 * code, a more complex approach probably isn't worth it. 88 */ 89 90 /* 91 * Do an advisory lock operation. 92 */ 93 int 94 lf_advlock(ap, head, size) 95 struct vop_advlock_args *ap; 96 struct lockf **head; 97 off_t size; 98 { 99 struct flock *fl = ap->a_fl; 100 struct lockf *lock; 101 off_t start, end; 102 int error; 103 104 /* 105 * Convert the flock structure into a start and end. 106 */ 107 switch (fl->l_whence) { 108 case SEEK_SET: 109 case SEEK_CUR: 110 /* 111 * Caller is responsible for adding any necessary offset 112 * when SEEK_CUR is used. 113 */ 114 start = fl->l_start; 115 break; 116 117 case SEEK_END: 118 start = size + fl->l_start; 119 break; 120 121 default: 122 return (EINVAL); 123 } 124 if (start < 0) 125 return (EINVAL); 126 127 /* 128 * Avoid the common case of unlocking when inode has no locks. 129 */ 130 if (*head == (struct lockf *)0) { 131 if (ap->a_op != F_SETLK) { 132 fl->l_type = F_UNLCK; 133 return (0); 134 } 135 } 136 137 if (fl->l_len == 0) 138 end = -1; 139 else 140 end = start + fl->l_len - 1; 141 /* 142 * Create the lockf structure. 143 */ 144 MALLOC(lock, struct lockf *, sizeof(*lock), M_LOCKF, M_WAITOK); 145 lock->lf_start = start; 146 lock->lf_end = end; 147 /* XXX NJWLWP 148 * I don't want to make the entire VFS universe use LWPs, because 149 * they don't need them, for the most part. This is an exception, 150 * and a kluge. 151 */ 152 153 lock->lf_head = head; 154 lock->lf_type = fl->l_type; 155 lock->lf_next = (struct lockf *)0; 156 TAILQ_INIT(&lock->lf_blkhd); 157 lock->lf_flags = ap->a_flags; 158 if (lock->lf_flags & F_POSIX) { 159 KASSERT(curproc == (struct proc *)ap->a_id); 160 lock->lf_id = (caddr_t) curlwp; 161 } else { 162 lock->lf_id = ap->a_id; /* Not a proc at all, but a file * */ 163 } 164 165 166 /* 167 * Do the requested operation. 168 */ 169 switch (ap->a_op) { 170 171 case F_SETLK: 172 return (lf_setlock(lock)); 173 174 case F_UNLCK: 175 error = lf_clearlock(lock); 176 FREE(lock, M_LOCKF); 177 return (error); 178 179 case F_GETLK: 180 error = lf_getlock(lock, fl); 181 FREE(lock, M_LOCKF); 182 return (error); 183 184 default: 185 FREE(lock, M_LOCKF); 186 return (EINVAL); 187 } 188 /* NOTREACHED */ 189 } 190 191 /* 192 * Set a byte-range lock. 193 */ 194 int 195 lf_setlock(lock) 196 struct lockf *lock; 197 { 198 struct lockf *block; 199 struct lockf **head = lock->lf_head; 200 struct lockf **prev, *overlap, *ltmp; 201 static char lockstr[] = "lockf"; 202 int ovcase, priority, needtolink, error; 203 204 #ifdef LOCKF_DEBUG 205 if (lockf_debug & 1) 206 lf_print("lf_setlock", lock); 207 #endif /* LOCKF_DEBUG */ 208 209 /* 210 * Set the priority 211 */ 212 priority = PLOCK; 213 if (lock->lf_type == F_WRLCK) 214 priority += 4; 215 priority |= PCATCH; 216 /* 217 * Scan lock list for this file looking for locks that would block us. 218 */ 219 while ((block = lf_getblock(lock)) != NULL) { 220 /* 221 * Free the structure and return if nonblocking. 222 */ 223 if ((lock->lf_flags & F_WAIT) == 0) { 224 FREE(lock, M_LOCKF); 225 return (EAGAIN); 226 } 227 /* 228 * We are blocked. Since flock style locks cover 229 * the whole file, there is no chance for deadlock. 230 * For byte-range locks we must check for deadlock. 231 * 232 * Deadlock detection is done by looking through the 233 * wait channels to see if there are any cycles that 234 * involve us. MAXDEPTH is set just to make sure we 235 * do not go off into neverneverland. 236 */ 237 if ((lock->lf_flags & F_POSIX) && 238 (block->lf_flags & F_POSIX)) { 239 struct lwp *wlwp; 240 struct lockf *waitblock; 241 int i = 0; 242 243 /* The block is waiting on something */ 244 wlwp = (struct lwp *)block->lf_id; 245 while (wlwp->l_wchan && 246 (wlwp->l_wmesg == lockstr) && 247 (i++ < maxlockdepth)) { 248 waitblock = (struct lockf *)wlwp->l_wchan; 249 /* Get the owner of the blocking lock */ 250 waitblock = waitblock->lf_next; 251 if ((waitblock->lf_flags & F_POSIX) == 0) 252 break; 253 wlwp = (struct lwp *)waitblock->lf_id; 254 if (wlwp == (struct lwp *)lock->lf_id) { 255 free(lock, M_LOCKF); 256 return (EDEADLK); 257 } 258 } 259 /* 260 * If we're still following a dependancy chain 261 * after maxlockdepth iterations, assume we're in 262 * a cycle to be safe. 263 */ 264 if (i >= maxlockdepth) { 265 free(lock, M_LOCKF); 266 return (EDEADLK); 267 } 268 } 269 /* 270 * For flock type locks, we must first remove 271 * any shared locks that we hold before we sleep 272 * waiting for an exclusive lock. 273 */ 274 if ((lock->lf_flags & F_FLOCK) && 275 lock->lf_type == F_WRLCK) { 276 lock->lf_type = F_UNLCK; 277 (void) lf_clearlock(lock); 278 lock->lf_type = F_WRLCK; 279 } 280 /* 281 * Add our lock to the blocked list and sleep until we're free. 282 * Remember who blocked us (for deadlock detection). 283 */ 284 lock->lf_next = block; 285 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); 286 #ifdef LOCKF_DEBUG 287 if (lockf_debug & 1) { 288 lf_print("lf_setlock: blocking on", block); 289 lf_printlist("lf_setlock", block); 290 } 291 #endif /* LOCKF_DEBUG */ 292 error = tsleep((caddr_t)lock, priority, lockstr, 0); 293 294 /* 295 * We may have been awakened by a signal (in 296 * which case we must remove ourselves from the 297 * blocked list) and/or by another process 298 * releasing a lock (in which case we have already 299 * been removed from the blocked list and our 300 * lf_next field set to NOLOCKF). 301 */ 302 if (lock->lf_next != NOLOCKF) { 303 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block); 304 lock->lf_next = NOLOCKF; 305 } 306 if (error) { 307 free(lock, M_LOCKF); 308 return (error); 309 } 310 } 311 /* 312 * No blocks!! Add the lock. Note that we will 313 * downgrade or upgrade any overlapping locks this 314 * process already owns. 315 * 316 * Skip over locks owned by other processes. 317 * Handle any locks that overlap and are owned by ourselves. 318 */ 319 prev = head; 320 block = *head; 321 needtolink = 1; 322 for (;;) { 323 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 324 if (ovcase) 325 block = overlap->lf_next; 326 /* 327 * Six cases: 328 * 0) no overlap 329 * 1) overlap == lock 330 * 2) overlap contains lock 331 * 3) lock contains overlap 332 * 4) overlap starts before lock 333 * 5) overlap ends after lock 334 */ 335 switch (ovcase) { 336 case 0: /* no overlap */ 337 if (needtolink) { 338 *prev = lock; 339 lock->lf_next = overlap; 340 } 341 break; 342 343 case 1: /* overlap == lock */ 344 /* 345 * If downgrading lock, others may be 346 * able to acquire it. 347 */ 348 if (lock->lf_type == F_RDLCK && 349 overlap->lf_type == F_WRLCK) 350 lf_wakelock(overlap); 351 overlap->lf_type = lock->lf_type; 352 FREE(lock, M_LOCKF); 353 lock = overlap; /* for debug output below */ 354 break; 355 356 case 2: /* overlap contains lock */ 357 /* 358 * Check for common starting point and different types. 359 */ 360 if (overlap->lf_type == lock->lf_type) { 361 free(lock, M_LOCKF); 362 lock = overlap; /* for debug output below */ 363 break; 364 } 365 if (overlap->lf_start == lock->lf_start) { 366 *prev = lock; 367 lock->lf_next = overlap; 368 overlap->lf_start = lock->lf_end + 1; 369 } else 370 lf_split(overlap, lock); 371 lf_wakelock(overlap); 372 break; 373 374 case 3: /* lock contains overlap */ 375 /* 376 * If downgrading lock, others may be able to 377 * acquire it, otherwise take the list. 378 */ 379 if (lock->lf_type == F_RDLCK && 380 overlap->lf_type == F_WRLCK) { 381 lf_wakelock(overlap); 382 } else { 383 while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) { 384 KASSERT(ltmp->lf_next == overlap); 385 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, 386 lf_block); 387 ltmp->lf_next = lock; 388 TAILQ_INSERT_TAIL(&lock->lf_blkhd, 389 ltmp, lf_block); 390 } 391 } 392 /* 393 * Add the new lock if necessary and delete the overlap. 394 */ 395 if (needtolink) { 396 *prev = lock; 397 lock->lf_next = overlap->lf_next; 398 prev = &lock->lf_next; 399 needtolink = 0; 400 } else 401 *prev = overlap->lf_next; 402 free(overlap, M_LOCKF); 403 continue; 404 405 case 4: /* overlap starts before lock */ 406 /* 407 * Add lock after overlap on the list. 408 */ 409 lock->lf_next = overlap->lf_next; 410 overlap->lf_next = lock; 411 overlap->lf_end = lock->lf_start - 1; 412 prev = &lock->lf_next; 413 lf_wakelock(overlap); 414 needtolink = 0; 415 continue; 416 417 case 5: /* overlap ends after lock */ 418 /* 419 * Add the new lock before overlap. 420 */ 421 if (needtolink) { 422 *prev = lock; 423 lock->lf_next = overlap; 424 } 425 overlap->lf_start = lock->lf_end + 1; 426 lf_wakelock(overlap); 427 break; 428 } 429 break; 430 } 431 #ifdef LOCKF_DEBUG 432 if (lockf_debug & 1) { 433 lf_print("lf_setlock: got the lock", lock); 434 lf_printlist("lf_setlock", lock); 435 } 436 #endif /* LOCKF_DEBUG */ 437 return (0); 438 } 439 440 /* 441 * Remove a byte-range lock on an inode. 442 * 443 * Generally, find the lock (or an overlap to that lock) 444 * and remove it (or shrink it), then wakeup anyone we can. 445 */ 446 int 447 lf_clearlock(unlock) 448 struct lockf *unlock; 449 { 450 struct lockf **head = unlock->lf_head; 451 struct lockf *lf = *head; 452 struct lockf *overlap, **prev; 453 int ovcase; 454 455 if (lf == NOLOCKF) 456 return (0); 457 #ifdef LOCKF_DEBUG 458 if (unlock->lf_type != F_UNLCK) 459 panic("lf_clearlock: bad type"); 460 if (lockf_debug & 1) 461 lf_print("lf_clearlock", unlock); 462 #endif /* LOCKF_DEBUG */ 463 prev = head; 464 while ((ovcase = lf_findoverlap(lf, unlock, SELF, 465 &prev, &overlap)) != 0) { 466 /* 467 * Wakeup the list of locks to be retried. 468 */ 469 lf_wakelock(overlap); 470 471 switch (ovcase) { 472 473 case 1: /* overlap == lock */ 474 *prev = overlap->lf_next; 475 FREE(overlap, M_LOCKF); 476 break; 477 478 case 2: /* overlap contains lock: split it */ 479 if (overlap->lf_start == unlock->lf_start) { 480 overlap->lf_start = unlock->lf_end + 1; 481 break; 482 } 483 lf_split(overlap, unlock); 484 overlap->lf_next = unlock->lf_next; 485 break; 486 487 case 3: /* lock contains overlap */ 488 *prev = overlap->lf_next; 489 lf = overlap->lf_next; 490 free(overlap, M_LOCKF); 491 continue; 492 493 case 4: /* overlap starts before lock */ 494 overlap->lf_end = unlock->lf_start - 1; 495 prev = &overlap->lf_next; 496 lf = overlap->lf_next; 497 continue; 498 499 case 5: /* overlap ends after lock */ 500 overlap->lf_start = unlock->lf_end + 1; 501 break; 502 } 503 break; 504 } 505 #ifdef LOCKF_DEBUG 506 if (lockf_debug & 1) 507 lf_printlist("lf_clearlock", unlock); 508 #endif /* LOCKF_DEBUG */ 509 return (0); 510 } 511 512 /* 513 * Check whether there is a blocking lock, 514 * and if so return its process identifier. 515 */ 516 int 517 lf_getlock(lock, fl) 518 struct lockf *lock; 519 struct flock *fl; 520 { 521 struct lockf *block; 522 523 #ifdef LOCKF_DEBUG 524 if (lockf_debug & 1) 525 lf_print("lf_getlock", lock); 526 #endif /* LOCKF_DEBUG */ 527 528 if ((block = lf_getblock(lock)) != NULL) { 529 fl->l_type = block->lf_type; 530 fl->l_whence = SEEK_SET; 531 fl->l_start = block->lf_start; 532 if (block->lf_end == -1) 533 fl->l_len = 0; 534 else 535 fl->l_len = block->lf_end - block->lf_start + 1; 536 if (block->lf_flags & F_POSIX) 537 fl->l_pid = ((struct lwp *)(block->lf_id))->l_proc->p_pid; 538 else 539 fl->l_pid = -1; 540 } else { 541 fl->l_type = F_UNLCK; 542 } 543 return (0); 544 } 545 546 /* 547 * Walk the list of locks for an inode and 548 * return the first blocking lock. 549 */ 550 struct lockf * 551 lf_getblock(lock) 552 struct lockf *lock; 553 { 554 struct lockf **prev, *overlap, *lf = *(lock->lf_head); 555 556 prev = lock->lf_head; 557 while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) { 558 /* 559 * We've found an overlap, see if it blocks us 560 */ 561 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 562 return (overlap); 563 /* 564 * Nope, point to the next one on the list and 565 * see if it blocks us 566 */ 567 lf = overlap->lf_next; 568 } 569 return (NOLOCKF); 570 } 571 572 /* 573 * Walk the list of locks for an inode to 574 * find an overlapping lock (if any). 575 * 576 * NOTE: this returns only the FIRST overlapping lock. There 577 * may be more than one. 578 */ 579 int 580 lf_findoverlap(lf, lock, type, prev, overlap) 581 struct lockf *lf; 582 struct lockf *lock; 583 int type; 584 struct lockf ***prev; 585 struct lockf **overlap; 586 { 587 off_t start, end; 588 589 *overlap = lf; 590 if (lf == NOLOCKF) 591 return (0); 592 #ifdef LOCKF_DEBUG 593 if (lockf_debug & 2) 594 lf_print("lf_findoverlap: looking for overlap in", lock); 595 #endif /* LOCKF_DEBUG */ 596 start = lock->lf_start; 597 end = lock->lf_end; 598 while (lf != NOLOCKF) { 599 if (((type & SELF) && lf->lf_id != lock->lf_id) || 600 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 601 *prev = &lf->lf_next; 602 *overlap = lf = lf->lf_next; 603 continue; 604 } 605 #ifdef LOCKF_DEBUG 606 if (lockf_debug & 2) 607 lf_print("\tchecking", lf); 608 #endif /* LOCKF_DEBUG */ 609 /* 610 * OK, check for overlap 611 * 612 * Six cases: 613 * 0) no overlap 614 * 1) overlap == lock 615 * 2) overlap contains lock 616 * 3) lock contains overlap 617 * 4) overlap starts before lock 618 * 5) overlap ends after lock 619 */ 620 if ((lf->lf_end != -1 && start > lf->lf_end) || 621 (end != -1 && lf->lf_start > end)) { 622 /* Case 0 */ 623 #ifdef LOCKF_DEBUG 624 if (lockf_debug & 2) 625 printf("no overlap\n"); 626 #endif /* LOCKF_DEBUG */ 627 if ((type & SELF) && end != -1 && lf->lf_start > end) 628 return (0); 629 *prev = &lf->lf_next; 630 *overlap = lf = lf->lf_next; 631 continue; 632 } 633 if ((lf->lf_start == start) && (lf->lf_end == end)) { 634 /* Case 1 */ 635 #ifdef LOCKF_DEBUG 636 if (lockf_debug & 2) 637 printf("overlap == lock\n"); 638 #endif /* LOCKF_DEBUG */ 639 return (1); 640 } 641 if ((lf->lf_start <= start) && 642 (end != -1) && 643 ((lf->lf_end >= end) || (lf->lf_end == -1))) { 644 /* Case 2 */ 645 #ifdef LOCKF_DEBUG 646 if (lockf_debug & 2) 647 printf("overlap contains lock\n"); 648 #endif /* LOCKF_DEBUG */ 649 return (2); 650 } 651 if (start <= lf->lf_start && 652 (end == -1 || 653 (lf->lf_end != -1 && end >= lf->lf_end))) { 654 /* Case 3 */ 655 #ifdef LOCKF_DEBUG 656 if (lockf_debug & 2) 657 printf("lock contains overlap\n"); 658 #endif /* LOCKF_DEBUG */ 659 return (3); 660 } 661 if ((lf->lf_start < start) && 662 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 663 /* Case 4 */ 664 #ifdef LOCKF_DEBUG 665 if (lockf_debug & 2) 666 printf("overlap starts before lock\n"); 667 #endif /* LOCKF_DEBUG */ 668 return (4); 669 } 670 if ((lf->lf_start > start) && 671 (end != -1) && 672 ((lf->lf_end > end) || (lf->lf_end == -1))) { 673 /* Case 5 */ 674 #ifdef LOCKF_DEBUG 675 if (lockf_debug & 2) 676 printf("overlap ends after lock\n"); 677 #endif /* LOCKF_DEBUG */ 678 return (5); 679 } 680 panic("lf_findoverlap: default"); 681 } 682 return (0); 683 } 684 685 /* 686 * Split a lock and a contained region into 687 * two or three locks as necessary. 688 */ 689 void 690 lf_split(lock1, lock2) 691 struct lockf *lock1; 692 struct lockf *lock2; 693 { 694 struct lockf *splitlock; 695 696 #ifdef LOCKF_DEBUG 697 if (lockf_debug & 2) { 698 lf_print("lf_split", lock1); 699 lf_print("splitting from", lock2); 700 } 701 #endif /* LOCKF_DEBUG */ 702 /* 703 * Check to see if spliting into only two pieces. 704 */ 705 if (lock1->lf_start == lock2->lf_start) { 706 lock1->lf_start = lock2->lf_end + 1; 707 lock2->lf_next = lock1; 708 return; 709 } 710 if (lock1->lf_end == lock2->lf_end) { 711 lock1->lf_end = lock2->lf_start - 1; 712 lock2->lf_next = lock1->lf_next; 713 lock1->lf_next = lock2; 714 return; 715 } 716 /* 717 * Make a new lock consisting of the last part of 718 * the encompassing lock 719 */ 720 MALLOC(splitlock, struct lockf *, sizeof(*splitlock), M_LOCKF, M_WAITOK); 721 memcpy((caddr_t)splitlock, (caddr_t)lock1, sizeof(*splitlock)); 722 splitlock->lf_start = lock2->lf_end + 1; 723 TAILQ_INIT(&splitlock->lf_blkhd); 724 lock1->lf_end = lock2->lf_start - 1; 725 /* 726 * OK, now link it in 727 */ 728 splitlock->lf_next = lock1->lf_next; 729 lock2->lf_next = splitlock; 730 lock1->lf_next = lock2; 731 } 732 733 /* 734 * Wakeup a blocklist 735 */ 736 void 737 lf_wakelock(listhead) 738 struct lockf *listhead; 739 { 740 struct lockf *wakelock; 741 742 while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) { 743 KASSERT(wakelock->lf_next == listhead); 744 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); 745 wakelock->lf_next = NOLOCKF; 746 #ifdef LOCKF_DEBUG 747 if (lockf_debug & 2) 748 lf_print("lf_wakelock: awakening", wakelock); 749 #endif 750 wakeup((caddr_t)wakelock); 751 } 752 } 753 754 #ifdef LOCKF_DEBUG 755 /* 756 * Print out a lock. 757 */ 758 void 759 lf_print(tag, lock) 760 char *tag; 761 struct lockf *lock; 762 { 763 764 printf("%s: lock %p for ", tag, lock); 765 if (lock->lf_flags & F_POSIX) 766 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid); 767 else 768 printf("id 0x%p", lock->lf_id); 769 printf(" %s, start %qx, end %qx", 770 lock->lf_type == F_RDLCK ? "shared" : 771 lock->lf_type == F_WRLCK ? "exclusive" : 772 lock->lf_type == F_UNLCK ? "unlock" : 773 "unknown", lock->lf_start, lock->lf_end); 774 if (TAILQ_FIRST(&lock->lf_blkhd)) 775 printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd)); 776 else 777 printf("\n"); 778 } 779 780 void 781 lf_printlist(tag, lock) 782 char *tag; 783 struct lockf *lock; 784 { 785 struct lockf *lf, *blk; 786 787 printf("%s: Lock list:\n", tag); 788 for (lf = *lock->lf_head; lf; lf = lf->lf_next) { 789 printf("\tlock %p for ", lf); 790 if (lf->lf_flags & F_POSIX) 791 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid); 792 else 793 printf("id 0x%p", lf->lf_id); 794 printf(", %s, start %qx, end %qx", 795 lf->lf_type == F_RDLCK ? "shared" : 796 lf->lf_type == F_WRLCK ? "exclusive" : 797 lf->lf_type == F_UNLCK ? "unlock" : 798 "unknown", lf->lf_start, lf->lf_end); 799 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) { 800 if (blk->lf_flags & F_POSIX) 801 printf("proc %d", 802 ((struct proc *)(blk->lf_id))->p_pid); 803 else 804 printf("id 0x%p", blk->lf_id); 805 printf(", %s, start %qx, end %qx", 806 blk->lf_type == F_RDLCK ? "shared" : 807 blk->lf_type == F_WRLCK ? "exclusive" : 808 blk->lf_type == F_UNLCK ? "unlock" : 809 "unknown", blk->lf_start, blk->lf_end); 810 if (TAILQ_FIRST(&blk->lf_blkhd)) 811 panic("lf_printlist: bad list"); 812 } 813 printf("\n"); 814 } 815 } 816 #endif /* LOCKF_DEBUG */ 817