1 /* $OpenBSD: vfs_lockf.c,v 1.4 2001/07/28 17:03:50 gluk Exp $ */ 2 /* $NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Scooter Morris at Genentech Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/file.h> 46 #include <sys/proc.h> 47 #include <sys/vnode.h> 48 #include <sys/malloc.h> 49 #include <sys/fcntl.h> 50 #include <sys/lockf.h> 51 52 /* 53 * This variable controls the maximum number of processes that will 54 * be checked in doing deadlock detection. 55 */ 56 int maxlockdepth = MAXDEPTH; 57 58 #define SELF 0x1 59 #define OTHERS 0x2 60 61 #ifdef LOCKF_DEBUG 62 63 #define DEBUG_SETLOCK 0x01 64 #define DEBUG_CLEARLOCK 0x02 65 #define DEBUG_GETLOCK 0x04 66 #define DEBUG_FINDOVR 0x08 67 #define DEBUG_SPLIT 0x10 68 #define DEBUG_WAKELOCK 0x20 69 70 int lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK; 71 72 #define DPRINTF(args, level) if (lockf_debug & (level)) printf args 73 #else 74 #define DPRINTF(args, level) 75 #endif 76 77 /* 78 * Do an advisory lock operation. 79 */ 80 int 81 lf_advlock(head, size, id, op, fl, flags) 82 struct lockf **head; 83 off_t size; 84 caddr_t id; 85 int op; 86 register struct flock *fl; 87 int flags; 88 { 89 register struct lockf *lock; 90 off_t start, end; 91 int error; 92 93 /* 94 * Convert the flock structure into a start and end. 95 */ 96 switch (fl->l_whence) { 97 98 case SEEK_SET: 99 case SEEK_CUR: 100 /* 101 * Caller is responsible for adding any necessary offset 102 * when SEEK_CUR is used. 103 */ 104 start = fl->l_start; 105 break; 106 107 case SEEK_END: 108 start = size + fl->l_start; 109 break; 110 111 default: 112 return (EINVAL); 113 } 114 if (start < 0) 115 return (EINVAL); 116 if (fl->l_len == 0) 117 end = -1; 118 else { 119 end = start + fl->l_len - 1; 120 if (end < start) 121 return (EINVAL); 122 } 123 124 /* 125 * Avoid the common case of unlocking when inode has no locks. 126 */ 127 if (*head == NULL) { 128 if (op != F_SETLK) { 129 fl->l_type = F_UNLCK; 130 return (0); 131 } 132 } 133 134 /* 135 * Create the lockf structure. 136 */ 137 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); 138 lock->lf_start = start; 139 lock->lf_end = end; 140 lock->lf_id = id; 141 lock->lf_head = head; 142 lock->lf_type = fl->l_type; 143 lock->lf_next = NULL; 144 TAILQ_INIT(&lock->lf_blkhd); 145 lock->lf_flags = flags; 146 /* 147 * Do the requested operation. 148 */ 149 switch (op) { 150 151 case F_SETLK: 152 return (lf_setlock(lock)); 153 154 case F_UNLCK: 155 error = lf_clearlock(lock); 156 FREE(lock, M_LOCKF); 157 return (error); 158 159 case F_GETLK: 160 error = lf_getlock(lock, fl); 161 FREE(lock, M_LOCKF); 162 return (error); 163 164 default: 165 FREE(lock, M_LOCKF); 166 return (EINVAL); 167 } 168 /* NOTREACHED */ 169 } 170 171 /* 172 * Set a byte-range lock. 173 */ 174 int 175 lf_setlock(lock) 176 register struct lockf *lock; 177 { 178 register struct lockf *block; 179 struct lockf **head = lock->lf_head; 180 struct lockf **prev, *overlap, *ltmp; 181 static char lockstr[] = "lockf"; 182 int ovcase, priority, needtolink, error; 183 184 #ifdef LOCKF_DEBUG 185 if (lockf_debug & DEBUG_SETLOCK) 186 lf_print("lf_setlock", lock); 187 #endif /* LOCKF_DEBUG */ 188 189 /* 190 * Set the priority 191 */ 192 priority = PLOCK; 193 if (lock->lf_type == F_WRLCK) 194 priority += 4; 195 priority |= PCATCH; 196 /* 197 * Scan lock list for this file looking for locks that would block us. 198 */ 199 while ((block = lf_getblock(lock)) != NULL) { 200 /* 201 * Free the structure and return if nonblocking. 202 */ 203 if ((lock->lf_flags & F_WAIT) == 0) { 204 FREE(lock, M_LOCKF); 205 return (EAGAIN); 206 } 207 /* 208 * We are blocked. Since flock style locks cover 209 * the whole file, there is no chance for deadlock. 210 * For byte-range locks we must check for deadlock. 211 * 212 * Deadlock detection is done by looking through the 213 * wait channels to see if there are any cycles that 214 * involve us. MAXDEPTH is set just to make sure we 215 * do not go off into neverland. 216 */ 217 if ((lock->lf_flags & F_POSIX) && 218 (block->lf_flags & F_POSIX)) { 219 register struct proc *wproc; 220 register struct lockf *waitblock; 221 int i = 0; 222 223 /* The block is waiting on something */ 224 wproc = (struct proc *)block->lf_id; 225 while (wproc->p_wchan && 226 (wproc->p_wmesg == lockstr) && 227 (i++ < maxlockdepth)) { 228 waitblock = (struct lockf *)wproc->p_wchan; 229 /* Get the owner of the blocking lock */ 230 waitblock = waitblock->lf_next; 231 if ((waitblock->lf_flags & F_POSIX) == 0) 232 break; 233 wproc = (struct proc *)waitblock->lf_id; 234 if (wproc == (struct proc *)lock->lf_id) { 235 FREE(lock, M_LOCKF); 236 return (EDEADLK); 237 } 238 } 239 } 240 /* 241 * For flock type locks, we must first remove 242 * any shared locks that we hold before we sleep 243 * waiting for an exclusive lock. 244 */ 245 if ((lock->lf_flags & F_FLOCK) && 246 lock->lf_type == F_WRLCK) { 247 lock->lf_type = F_UNLCK; 248 (void) lf_clearlock(lock); 249 lock->lf_type = F_WRLCK; 250 } 251 /* 252 * Add our lock to the blocked list and sleep until we're free. 253 * Remember who blocked us (for deadlock detection). 254 */ 255 lock->lf_next = block; 256 #ifdef LOCKF_DEBUG 257 if (lockf_debug & DEBUG_SETLOCK) { 258 lf_print("lf_setlock", lock); 259 lf_print("lf_setlock: blocking on", block); 260 } 261 #endif /* LOCKF_DEBUG */ 262 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); 263 error = tsleep((caddr_t)lock, priority, lockstr, 0); 264 #if 0 265 if (error) { 266 /* 267 * Delete ourselves from the waiting to lock list. 268 */ 269 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block); 270 FREE(lock, M_LOCKF); 271 return (error); 272 } 273 #else 274 if (lock->lf_next != NULL) { 275 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block); 276 lock->lf_next = NULL; 277 } 278 if (error) { 279 FREE(lock, M_LOCKF); 280 return (error); 281 } 282 #endif 283 } 284 /* 285 * No blocks!! Add the lock. Note that we will 286 * downgrade or upgrade any overlapping locks this 287 * process already owns. 288 * 289 * Skip over locks owned by other processes. 290 * Handle any locks that overlap and are owned by ourselves. 291 */ 292 prev = head; 293 block = *head; 294 needtolink = 1; 295 for (;;) { 296 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 297 if (ovcase) 298 block = overlap->lf_next; 299 /* 300 * Six cases: 301 * 0) no overlap 302 * 1) overlap == lock 303 * 2) overlap contains lock 304 * 3) lock contains overlap 305 * 4) overlap starts before lock 306 * 5) overlap ends after lock 307 */ 308 switch (ovcase) { 309 case 0: /* no overlap */ 310 if (needtolink) { 311 *prev = lock; 312 lock->lf_next = overlap; 313 } 314 break; 315 316 case 1: /* overlap == lock */ 317 /* 318 * If downgrading lock, others may be 319 * able to acquire it. 320 */ 321 if (lock->lf_type == F_RDLCK && 322 overlap->lf_type == F_WRLCK) 323 lf_wakelock(overlap); 324 overlap->lf_type = lock->lf_type; 325 FREE(lock, M_LOCKF); 326 lock = overlap; /* for debug output below */ 327 break; 328 329 case 2: /* overlap contains lock */ 330 /* 331 * Check for common starting point and different types. 332 */ 333 if (overlap->lf_type == lock->lf_type) { 334 FREE(lock, M_LOCKF); 335 lock = overlap; /* for debug output below */ 336 break; 337 } 338 if (overlap->lf_start == lock->lf_start) { 339 *prev = lock; 340 lock->lf_next = overlap; 341 overlap->lf_start = lock->lf_end + 1; 342 } else 343 lf_split(overlap, lock); 344 lf_wakelock(overlap); 345 break; 346 347 case 3: /* lock contains overlap */ 348 /* 349 * If downgrading lock, others may be able to 350 * acquire it, otherwise take the list. 351 */ 352 if (lock->lf_type == F_RDLCK && 353 overlap->lf_type == F_WRLCK) { 354 lf_wakelock(overlap); 355 } else { 356 while ((ltmp = 357 TAILQ_FIRST(&overlap->lf_blkhd))) { 358 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, 359 lf_block); 360 ltmp->lf_next = lock; 361 TAILQ_INSERT_TAIL(&lock->lf_blkhd, 362 ltmp, lf_block); 363 } 364 } 365 /* 366 * Add the new lock if necessary and delete the overlap. 367 */ 368 if (needtolink) { 369 *prev = lock; 370 lock->lf_next = overlap->lf_next; 371 prev = &lock->lf_next; 372 needtolink = 0; 373 } else 374 *prev = overlap->lf_next; 375 FREE(overlap, M_LOCKF); 376 continue; 377 378 case 4: /* overlap starts before lock */ 379 /* 380 * Add lock after overlap on the list. 381 */ 382 lock->lf_next = overlap->lf_next; 383 overlap->lf_next = lock; 384 overlap->lf_end = lock->lf_start - 1; 385 prev = &lock->lf_next; 386 lf_wakelock(overlap); 387 needtolink = 0; 388 continue; 389 390 case 5: /* overlap ends after lock */ 391 /* 392 * Add the new lock before overlap. 393 */ 394 if (needtolink) { 395 *prev = lock; 396 lock->lf_next = overlap; 397 } 398 overlap->lf_start = lock->lf_end + 1; 399 lf_wakelock(overlap); 400 break; 401 } 402 break; 403 } 404 #ifdef LOCKF_DEBUG 405 if (lockf_debug & DEBUG_SETLOCK) { 406 lf_print("lf_setlock: got the lock", lock); 407 } 408 #endif /* LOCKF_DEBUG */ 409 return (0); 410 } 411 412 /* 413 * Remove a byte-range lock on an inode. 414 * 415 * Generally, find the lock (or an overlap to that lock) 416 * and remove it (or shrink it), then wakeup anyone we can. 417 */ 418 int 419 lf_clearlock(lock) 420 register struct lockf *lock; 421 { 422 struct lockf **head = lock->lf_head; 423 register struct lockf *lf = *head; 424 struct lockf *overlap, **prev; 425 int ovcase; 426 427 if (lf == NULL) 428 return (0); 429 #ifdef LOCKF_DEBUG 430 if (lockf_debug & DEBUG_CLEARLOCK) 431 lf_print("lf_clearlock", lock); 432 #endif /* LOCKF_DEBUG */ 433 prev = head; 434 while ((ovcase = lf_findoverlap(lf, lock, SELF, 435 &prev, &overlap)) != 0) { 436 /* 437 * Wakeup the list of locks to be retried. 438 */ 439 lf_wakelock(overlap); 440 441 switch (ovcase) { 442 443 case 1: /* overlap == lock */ 444 *prev = overlap->lf_next; 445 FREE(overlap, M_LOCKF); 446 break; 447 448 case 2: /* overlap contains lock: split it */ 449 if (overlap->lf_start == lock->lf_start) { 450 overlap->lf_start = lock->lf_end + 1; 451 break; 452 } 453 lf_split(overlap, lock); 454 overlap->lf_next = lock->lf_next; 455 break; 456 457 case 3: /* lock contains overlap */ 458 *prev = overlap->lf_next; 459 lf = overlap->lf_next; 460 FREE(overlap, M_LOCKF); 461 continue; 462 463 case 4: /* overlap starts before lock */ 464 overlap->lf_end = lock->lf_start - 1; 465 prev = &overlap->lf_next; 466 lf = overlap->lf_next; 467 continue; 468 469 case 5: /* overlap ends after lock */ 470 overlap->lf_start = lock->lf_end + 1; 471 break; 472 } 473 break; 474 } 475 return (0); 476 } 477 478 /* 479 * Check whether there is a blocking lock, 480 * and if so return its process identifier. 481 */ 482 int 483 lf_getlock(lock, fl) 484 register struct lockf *lock; 485 register struct flock *fl; 486 { 487 register struct lockf *block; 488 489 #ifdef LOCKF_DEBUG 490 if (lockf_debug & DEBUG_CLEARLOCK) 491 lf_print("lf_getlock", lock); 492 #endif /* LOCKF_DEBUG */ 493 494 if ((block = lf_getblock(lock)) != NULL) { 495 fl->l_type = block->lf_type; 496 fl->l_whence = SEEK_SET; 497 fl->l_start = block->lf_start; 498 if (block->lf_end == -1) 499 fl->l_len = 0; 500 else 501 fl->l_len = block->lf_end - block->lf_start + 1; 502 if (block->lf_flags & F_POSIX) 503 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; 504 else 505 fl->l_pid = -1; 506 } else { 507 fl->l_type = F_UNLCK; 508 } 509 return (0); 510 } 511 512 /* 513 * Walk the list of locks for an inode and 514 * return the first blocking lock. 515 */ 516 struct lockf * 517 lf_getblock(lock) 518 register struct lockf *lock; 519 { 520 struct lockf **prev, *overlap, *lf; 521 522 prev = lock->lf_head; 523 lf = *prev; 524 while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) { 525 /* 526 * We've found an overlap, see if it blocks us 527 */ 528 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 529 return (overlap); 530 /* 531 * Nope, point to the next one on the list and 532 * see if it blocks us 533 */ 534 lf = overlap->lf_next; 535 } 536 return (NULL); 537 } 538 539 /* 540 * Walk the list of locks for an inode to 541 * find an overlapping lock (if any). 542 * 543 * NOTE: this returns only the FIRST overlapping lock. There 544 * may be more than one. 545 */ 546 int 547 lf_findoverlap(lf, lock, type, prev, overlap) 548 register struct lockf *lf; 549 struct lockf *lock; 550 int type; 551 struct lockf ***prev; 552 struct lockf **overlap; 553 { 554 off_t start, end; 555 556 #ifdef LOCKF_DEBUG 557 if (lf && lockf_debug & DEBUG_FINDOVR) 558 lf_print("lf_findoverlap: looking for overlap in", lock); 559 #endif /* LOCKF_DEBUG */ 560 561 *overlap = lf; 562 start = lock->lf_start; 563 end = lock->lf_end; 564 while (lf != NULL) { 565 if (((type & SELF) && lf->lf_id != lock->lf_id) || 566 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 567 *prev = &lf->lf_next; 568 *overlap = lf = lf->lf_next; 569 continue; 570 } 571 #ifdef LOCKF_DEBUG 572 if (lockf_debug & DEBUG_FINDOVR) 573 lf_print("\tchecking", lf); 574 #endif /* LOCKF_DEBUG */ 575 /* 576 * OK, check for overlap 577 * 578 * Six cases: 579 * 0) no overlap 580 * 1) overlap == lock 581 * 2) overlap contains lock 582 * 3) lock contains overlap 583 * 4) overlap starts before lock 584 * 5) overlap ends after lock 585 */ 586 587 /* Case 0 */ 588 if ((lf->lf_end != -1 && start > lf->lf_end) || 589 (end != -1 && lf->lf_start > end)) { 590 DPRINTF(("no overlap\n"), DEBUG_FINDOVR); 591 if ((type & SELF) && end != -1 && lf->lf_start > end) 592 return (0); 593 *prev = &lf->lf_next; 594 *overlap = lf = lf->lf_next; 595 continue; 596 } 597 /* Case 1 */ 598 if ((lf->lf_start == start) && (lf->lf_end == end)) { 599 DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR); 600 return (1); 601 } 602 /* Case 2 */ 603 if ((lf->lf_start <= start) && 604 (lf->lf_end == -1 || 605 (end != -1 && lf->lf_end >= end))) { 606 DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR); 607 return (2); 608 } 609 /* Case 3 */ 610 if (start <= lf->lf_start && 611 (end == -1 || 612 (lf->lf_end != -1 && end >= lf->lf_end))) { 613 DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR); 614 return (3); 615 } 616 /* Case 4 */ 617 if ((lf->lf_start < start) && 618 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 619 DPRINTF(("overlap starts before lock\n"), 620 DEBUG_FINDOVR); 621 return (4); 622 } 623 /* Case 5 */ 624 if ((lf->lf_start > start) && 625 (end != -1) && 626 ((lf->lf_end > end) || (lf->lf_end == -1))) { 627 DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR); 628 return (5); 629 } 630 panic("lf_findoverlap: default"); 631 } 632 return (0); 633 } 634 635 /* 636 * Split a lock and a contained region into 637 * two or three locks as necessary. 638 */ 639 void 640 lf_split(lock1, lock2) 641 register struct lockf *lock1; 642 register struct lockf *lock2; 643 { 644 register struct lockf *splitlock; 645 646 #ifdef LOCKF_DEBUG 647 if (lockf_debug & DEBUG_SPLIT) { 648 lf_print("lf_split", lock1); 649 lf_print("splitting from", lock2); 650 } 651 #endif /* LOCKF_DEBUG */ 652 /* 653 * Check to see if spliting into only two pieces. 654 */ 655 if (lock1->lf_start == lock2->lf_start) { 656 lock1->lf_start = lock2->lf_end + 1; 657 lock2->lf_next = lock1; 658 return; 659 } 660 if (lock1->lf_end == lock2->lf_end) { 661 lock1->lf_end = lock2->lf_start - 1; 662 lock2->lf_next = lock1->lf_next; 663 lock1->lf_next = lock2; 664 return; 665 } 666 /* 667 * Make a new lock consisting of the last part of 668 * the encompassing lock 669 */ 670 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); 671 memcpy((caddr_t)splitlock, (caddr_t)lock1, sizeof (*splitlock)); 672 splitlock->lf_start = lock2->lf_end + 1; 673 splitlock->lf_block.tqe_next = NULL; 674 TAILQ_INIT(&splitlock->lf_blkhd); 675 lock1->lf_end = lock2->lf_start - 1; 676 /* 677 * OK, now link it in 678 */ 679 lock2->lf_next = splitlock; 680 lock1->lf_next = lock2; 681 } 682 683 /* 684 * Wakeup a blocklist 685 */ 686 void 687 lf_wakelock(lock) 688 struct lockf *lock; 689 { 690 struct lockf *wakelock; 691 692 while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) { 693 TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block); 694 wakelock->lf_next = NULL; 695 wakeup_one(wakelock); 696 } 697 } 698 699 #ifdef LOCKF_DEBUG 700 /* 701 * Print out a lock. 702 */ 703 void 704 lf_print(tag, lock) 705 char *tag; 706 register struct lockf *lock; 707 { 708 struct lockf *block; 709 710 printf("%s: lock %p for ", tag, lock); 711 if (lock->lf_flags & F_POSIX) 712 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid); 713 else 714 printf("id %p", lock->lf_id); 715 printf(" %s, start %qx, end %qx", 716 lock->lf_type == F_RDLCK ? "shared" : 717 lock->lf_type == F_WRLCK ? "exclusive" : 718 lock->lf_type == F_UNLCK ? "unlock" : 719 "unknown", lock->lf_start, lock->lf_end); 720 block = TAILQ_FIRST(&lock->lf_blkhd); 721 if (block) 722 printf(" block"); 723 TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block) 724 printf(" %p,", block); 725 printf("\n"); 726 727 } 728 729 void 730 lf_printlist(tag, lock) 731 char *tag; 732 struct lockf *lock; 733 { 734 register struct lockf *lf; 735 736 printf("%s: Lock list:\n", tag); 737 for (lf = *lock->lf_head; lf; lf = lf->lf_next) { 738 printf("\tlock %p for ", lf); 739 if (lf->lf_flags & F_POSIX) 740 printf("proc %d", ((struct proc*)(lf->lf_id))->p_pid); 741 else 742 printf("id %p", lf->lf_id); 743 printf(" %s, start %qx, end %qx", 744 lf->lf_type == F_RDLCK ? "shared" : 745 lf->lf_type == F_WRLCK ? "exclusive" : 746 lf->lf_type == F_UNLCK ? "unlock" : 747 "unknown", lf->lf_start, lf->lf_end); 748 printf("\n"); 749 } 750 } 751 #endif /* LOCKF_DEBUG */ 752