1 /* $OpenBSD: vfs_lockf.c,v 1.21 2015/12/22 21:39:34 mmcc Exp $ */ 2 /* $NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Scooter Morris at Genentech Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/proc.h> 42 #include <sys/vnode.h> 43 #include <sys/pool.h> 44 #include <sys/fcntl.h> 45 #include <sys/lockf.h> 46 #include <sys/unistd.h> 47 48 struct pool lockfpool; 49 50 /* 51 * This variable controls the maximum number of processes that will 52 * be checked in doing deadlock detection. 53 */ 54 int maxlockdepth = MAXDEPTH; 55 56 #define SELF 0x1 57 #define OTHERS 0x2 58 59 #ifdef LOCKF_DEBUG 60 61 #define DEBUG_SETLOCK 0x01 62 #define DEBUG_CLEARLOCK 0x02 63 #define DEBUG_GETLOCK 0x04 64 #define DEBUG_FINDOVR 0x08 65 #define DEBUG_SPLIT 0x10 66 #define DEBUG_WAKELOCK 0x20 67 68 int lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK; 69 70 #define DPRINTF(args, level) if (lockf_debug & (level)) printf args 71 #else 72 #define DPRINTF(args, level) 73 #endif 74 75 void 76 lf_init(void) 77 { 78 pool_init(&lockfpool, sizeof(struct lockf), 0, 0, PR_WAITOK, 79 "lockfpl", NULL); 80 } 81 82 struct lockf *lf_alloc(uid_t, int); 83 void lf_free(struct lockf *); 84 85 /* 86 * We enforce a limit on locks by uid, so that a single user cannot 87 * run the kernel out of memory. For now, the limit is pretty coarse. 88 * There is no limit on root. 89 * 90 * Splitting a lock will always succeed, regardless of current allocations. 91 * If you're slightly above the limit, we still have to permit an allocation 92 * so that the unlock can succeed. If the unlocking causes too many splits, 93 * however, you're totally cutoff. 94 */ 95 int maxlocksperuid = 1024; 96 97 /* 98 * 3 options for allowfail. 99 * 0 - always allocate. 1 - cutoff at limit. 2 - cutoff at double limit. 100 */ 101 struct lockf * 102 lf_alloc(uid_t uid, int allowfail) 103 { 104 struct uidinfo *uip; 105 struct lockf *lock; 106 107 uip = uid_find(uid); 108 if (uid && allowfail && uip->ui_lockcnt > 109 (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) 110 return (NULL); 111 uip->ui_lockcnt++; 112 lock = pool_get(&lockfpool, PR_WAITOK); 113 lock->lf_uid = uid; 114 return (lock); 115 } 116 117 void 118 lf_free(struct lockf *lock) 119 { 120 struct uidinfo *uip; 121 122 uip = uid_find(lock->lf_uid); 123 uip->ui_lockcnt--; 124 pool_put(&lockfpool, lock); 125 } 126 127 128 /* 129 * Do an advisory lock operation. 130 */ 131 int 132 lf_advlock(struct lockf **head, off_t size, caddr_t id, int op, 133 struct flock *fl, int flags) 134 { 135 struct proc *p = curproc; 136 struct lockf *lock; 137 off_t start, end; 138 int error; 139 140 /* 141 * Convert the flock structure into a start and end. 142 */ 143 switch (fl->l_whence) { 144 case SEEK_SET: 145 case SEEK_CUR: 146 /* 147 * Caller is responsible for adding any necessary offset 148 * when SEEK_CUR is used. 149 */ 150 start = fl->l_start; 151 break; 152 case SEEK_END: 153 start = size + fl->l_start; 154 break; 155 default: 156 return (EINVAL); 157 } 158 if (start < 0) 159 return (EINVAL); 160 if (fl->l_len == 0) { 161 end = -1; 162 } else { 163 end = start + fl->l_len - 1; 164 if (end < start) 165 return (EINVAL); 166 } 167 168 /* 169 * Avoid the common case of unlocking when inode has no locks. 170 */ 171 if (*head == NULL) { 172 if (op != F_SETLK) { 173 fl->l_type = F_UNLCK; 174 return (0); 175 } 176 } 177 178 lock = lf_alloc(p->p_ucred->cr_uid, op == F_SETLK ? 1 : 2); 179 if (!lock) 180 return (ENOLCK); 181 lock->lf_start = start; 182 lock->lf_end = end; 183 lock->lf_id = id; 184 lock->lf_head = head; 185 lock->lf_type = fl->l_type; 186 lock->lf_next = NULL; 187 TAILQ_INIT(&lock->lf_blkhd); 188 lock->lf_flags = flags; 189 lock->lf_pid = (flags & F_POSIX) ? p->p_p->ps_pid : -1; 190 191 switch (op) { 192 case F_SETLK: 193 return (lf_setlock(lock)); 194 case F_UNLCK: 195 error = lf_clearlock(lock); 196 lf_free(lock); 197 return (error); 198 case F_GETLK: 199 error = lf_getlock(lock, fl); 200 lf_free(lock); 201 return (error); 202 default: 203 lf_free(lock); 204 return (EINVAL); 205 } 206 /* NOTREACHED */ 207 } 208 209 /* 210 * Set a byte-range lock. 211 */ 212 int 213 lf_setlock(struct lockf *lock) 214 { 215 struct lockf *block; 216 struct lockf **head = lock->lf_head; 217 struct lockf **prev, *overlap, *ltmp; 218 static char lockstr[] = "lockf"; 219 int ovcase, priority, needtolink, error; 220 221 #ifdef LOCKF_DEBUG 222 if (lockf_debug & DEBUG_SETLOCK) 223 lf_print("lf_setlock", lock); 224 #endif /* LOCKF_DEBUG */ 225 226 priority = PLOCK; 227 if (lock->lf_type == F_WRLCK) 228 priority += 4; 229 priority |= PCATCH; 230 /* 231 * Scan lock list for this file looking for locks that would block us. 232 */ 233 while ((block = lf_getblock(lock)) != NULL) { 234 if ((lock->lf_flags & F_WAIT) == 0) { 235 lf_free(lock); 236 return (EAGAIN); 237 } 238 /* 239 * We are blocked. Since flock style locks cover 240 * the whole file, there is no chance for deadlock. 241 * For byte-range locks we must check for deadlock. 242 * 243 * Deadlock detection is done by looking through the 244 * wait channels to see if there are any cycles that 245 * involve us. MAXDEPTH is set just to make sure we 246 * do not go off into neverland. 247 */ 248 if ((lock->lf_flags & F_POSIX) && 249 (block->lf_flags & F_POSIX)) { 250 struct proc *wproc; 251 struct lockf *waitblock; 252 int i = 0; 253 254 /* The block is waiting on something */ 255 wproc = (struct proc *)block->lf_id; 256 while (wproc->p_wchan && 257 (wproc->p_wmesg == lockstr) && 258 (i++ < maxlockdepth)) { 259 waitblock = (struct lockf *)wproc->p_wchan; 260 /* Get the owner of the blocking lock */ 261 waitblock = waitblock->lf_next; 262 if ((waitblock->lf_flags & F_POSIX) == 0) 263 break; 264 wproc = (struct proc *)waitblock->lf_id; 265 if (wproc == (struct proc *)lock->lf_id) { 266 lf_free(lock); 267 return (EDEADLK); 268 } 269 } 270 } 271 /* 272 * For flock type locks, we must first remove 273 * any shared locks that we hold before we sleep 274 * waiting for an exclusive lock. 275 */ 276 if ((lock->lf_flags & F_FLOCK) && lock->lf_type == F_WRLCK) { 277 lock->lf_type = F_UNLCK; 278 (void)lf_clearlock(lock); 279 lock->lf_type = F_WRLCK; 280 } 281 /* 282 * Add our lock to the blocked list and sleep until we're free. 283 * Remember who blocked us (for deadlock detection). 284 */ 285 lock->lf_next = block; 286 #ifdef LOCKF_DEBUG 287 if (lockf_debug & DEBUG_SETLOCK) { 288 lf_print("lf_setlock", lock); 289 lf_print("lf_setlock: blocking on", block); 290 } 291 #endif /* LOCKF_DEBUG */ 292 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); 293 error = tsleep(lock, priority, lockstr, 0); 294 if (lock->lf_next != NULL) { 295 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block); 296 lock->lf_next = NULL; 297 } 298 if (error) { 299 lf_free(lock); 300 return (error); 301 } 302 } 303 /* 304 * No blocks!! Add the lock. Note that we will 305 * downgrade or upgrade any overlapping locks this 306 * process already owns. 307 * 308 * Skip over locks owned by other processes. 309 * Handle any locks that overlap and are owned by ourselves. 310 */ 311 prev = head; 312 block = *head; 313 needtolink = 1; 314 for (;;) { 315 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 316 if (ovcase) 317 block = overlap->lf_next; 318 /* 319 * Six cases: 320 * 0) no overlap 321 * 1) overlap == lock 322 * 2) overlap contains lock 323 * 3) lock contains overlap 324 * 4) overlap starts before lock 325 * 5) overlap ends after lock 326 */ 327 switch (ovcase) { 328 case 0: /* no overlap */ 329 if (needtolink) { 330 *prev = lock; 331 lock->lf_next = overlap; 332 } 333 break; 334 case 1: /* overlap == lock */ 335 /* 336 * If downgrading lock, others may be 337 * able to acquire it. 338 */ 339 if (lock->lf_type == F_RDLCK && 340 overlap->lf_type == F_WRLCK) 341 lf_wakelock(overlap); 342 overlap->lf_type = lock->lf_type; 343 lf_free(lock); 344 lock = overlap; /* for debug output below */ 345 break; 346 case 2: /* overlap contains lock */ 347 /* 348 * Check for common starting point and different types. 349 */ 350 if (overlap->lf_type == lock->lf_type) { 351 lf_free(lock); 352 lock = overlap; /* for debug output below */ 353 break; 354 } 355 if (overlap->lf_start == lock->lf_start) { 356 *prev = lock; 357 lock->lf_next = overlap; 358 overlap->lf_start = lock->lf_end + 1; 359 } else 360 lf_split(overlap, lock); 361 lf_wakelock(overlap); 362 break; 363 case 3: /* lock contains overlap */ 364 /* 365 * If downgrading lock, others may be able to 366 * acquire it, otherwise take the list. 367 */ 368 if (lock->lf_type == F_RDLCK && 369 overlap->lf_type == F_WRLCK) { 370 lf_wakelock(overlap); 371 } else { 372 while ((ltmp = 373 TAILQ_FIRST(&overlap->lf_blkhd))) { 374 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, 375 lf_block); 376 ltmp->lf_next = lock; 377 TAILQ_INSERT_TAIL(&lock->lf_blkhd, 378 ltmp, lf_block); 379 } 380 } 381 /* 382 * Add the new lock if necessary and delete the overlap. 383 */ 384 if (needtolink) { 385 *prev = lock; 386 lock->lf_next = overlap->lf_next; 387 prev = &lock->lf_next; 388 needtolink = 0; 389 } else 390 *prev = overlap->lf_next; 391 lf_free(overlap); 392 continue; 393 case 4: /* overlap starts before lock */ 394 /* 395 * Add lock after overlap on the list. 396 */ 397 lock->lf_next = overlap->lf_next; 398 overlap->lf_next = lock; 399 overlap->lf_end = lock->lf_start - 1; 400 prev = &lock->lf_next; 401 lf_wakelock(overlap); 402 needtolink = 0; 403 continue; 404 case 5: /* overlap ends after lock */ 405 /* 406 * Add the new lock before overlap. 407 */ 408 if (needtolink) { 409 *prev = lock; 410 lock->lf_next = overlap; 411 } 412 overlap->lf_start = lock->lf_end + 1; 413 lf_wakelock(overlap); 414 break; 415 } 416 break; 417 } 418 #ifdef LOCKF_DEBUG 419 if (lockf_debug & DEBUG_SETLOCK) { 420 lf_print("lf_setlock: got the lock", lock); 421 } 422 #endif /* LOCKF_DEBUG */ 423 return (0); 424 } 425 426 /* 427 * Remove a byte-range lock on an inode. 428 * 429 * Generally, find the lock (or an overlap to that lock) 430 * and remove it (or shrink it), then wakeup anyone we can. 431 */ 432 int 433 lf_clearlock(struct lockf *lock) 434 { 435 struct lockf **head = lock->lf_head; 436 struct lockf *lf = *head; 437 struct lockf *overlap, **prev; 438 int ovcase; 439 440 if (lf == NULL) 441 return (0); 442 #ifdef LOCKF_DEBUG 443 if (lockf_debug & DEBUG_CLEARLOCK) 444 lf_print("lf_clearlock", lock); 445 #endif /* LOCKF_DEBUG */ 446 prev = head; 447 while ((ovcase = lf_findoverlap(lf, lock, SELF, &prev, &overlap))) { 448 lf_wakelock(overlap); 449 450 switch (ovcase) { 451 case 1: /* overlap == lock */ 452 *prev = overlap->lf_next; 453 lf_free(overlap); 454 break; 455 case 2: /* overlap contains lock: split it */ 456 if (overlap->lf_start == lock->lf_start) { 457 overlap->lf_start = lock->lf_end + 1; 458 break; 459 } 460 lf_split(overlap, lock); 461 overlap->lf_next = lock->lf_next; 462 break; 463 case 3: /* lock contains overlap */ 464 *prev = overlap->lf_next; 465 lf = overlap->lf_next; 466 lf_free(overlap); 467 continue; 468 case 4: /* overlap starts before lock */ 469 overlap->lf_end = lock->lf_start - 1; 470 prev = &overlap->lf_next; 471 lf = overlap->lf_next; 472 continue; 473 case 5: /* overlap ends after lock */ 474 overlap->lf_start = lock->lf_end + 1; 475 break; 476 } 477 break; 478 } 479 return (0); 480 } 481 482 /* 483 * Check whether there is a blocking lock, 484 * and if so return its process identifier. 485 */ 486 int 487 lf_getlock(struct lockf *lock, struct flock *fl) 488 { 489 struct lockf *block; 490 491 #ifdef LOCKF_DEBUG 492 if (lockf_debug & DEBUG_CLEARLOCK) 493 lf_print("lf_getlock", lock); 494 #endif /* LOCKF_DEBUG */ 495 496 if ((block = lf_getblock(lock)) != NULL) { 497 fl->l_type = block->lf_type; 498 fl->l_whence = SEEK_SET; 499 fl->l_start = block->lf_start; 500 if (block->lf_end == -1) 501 fl->l_len = 0; 502 else 503 fl->l_len = block->lf_end - block->lf_start + 1; 504 fl->l_pid = block->lf_pid; 505 } else { 506 fl->l_type = F_UNLCK; 507 } 508 return (0); 509 } 510 511 /* 512 * Walk the list of locks for an inode and 513 * return the first blocking lock. 514 */ 515 struct lockf * 516 lf_getblock(struct lockf *lock) 517 { 518 struct lockf **prev, *overlap, *lf; 519 520 prev = lock->lf_head; 521 lf = *prev; 522 while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) { 523 /* 524 * We've found an overlap, see if it blocks us 525 */ 526 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 527 return (overlap); 528 /* 529 * Nope, point to the next one on the list and 530 * see if it blocks us 531 */ 532 lf = overlap->lf_next; 533 } 534 return (NULL); 535 } 536 537 /* 538 * Walk the list of locks for an inode to 539 * find an overlapping lock (if any). 540 * 541 * NOTE: this returns only the FIRST overlapping lock. There 542 * may be more than one. 543 */ 544 int 545 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type, 546 struct lockf ***prev, struct lockf **overlap) 547 { 548 off_t start, end; 549 550 #ifdef LOCKF_DEBUG 551 if (lf && lockf_debug & DEBUG_FINDOVR) 552 lf_print("lf_findoverlap: looking for overlap in", lock); 553 #endif /* LOCKF_DEBUG */ 554 555 *overlap = lf; 556 start = lock->lf_start; 557 end = lock->lf_end; 558 while (lf != NULL) { 559 if (((type & SELF) && lf->lf_id != lock->lf_id) || 560 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 561 *prev = &lf->lf_next; 562 *overlap = lf = lf->lf_next; 563 continue; 564 } 565 #ifdef LOCKF_DEBUG 566 if (lockf_debug & DEBUG_FINDOVR) 567 lf_print("\tchecking", lf); 568 #endif /* LOCKF_DEBUG */ 569 /* 570 * OK, check for overlap 571 * 572 * Six cases: 573 * 0) no overlap 574 * 1) overlap == lock 575 * 2) overlap contains lock 576 * 3) lock contains overlap 577 * 4) overlap starts before lock 578 * 5) overlap ends after lock 579 */ 580 581 /* Case 0 */ 582 if ((lf->lf_end != -1 && start > lf->lf_end) || 583 (end != -1 && lf->lf_start > end)) { 584 DPRINTF(("no overlap\n"), DEBUG_FINDOVR); 585 if ((type & SELF) && end != -1 && lf->lf_start > end) 586 return (0); 587 *prev = &lf->lf_next; 588 *overlap = lf = lf->lf_next; 589 continue; 590 } 591 /* Case 1 */ 592 if ((lf->lf_start == start) && (lf->lf_end == end)) { 593 DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR); 594 return (1); 595 } 596 /* Case 2 */ 597 if ((lf->lf_start <= start) && 598 (lf->lf_end == -1 || (end != -1 && lf->lf_end >= end))) { 599 DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR); 600 return (2); 601 } 602 /* Case 3 */ 603 if (start <= lf->lf_start && 604 (end == -1 || (lf->lf_end != -1 && end >= lf->lf_end))) { 605 DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR); 606 return (3); 607 } 608 /* Case 4 */ 609 if ((lf->lf_start < start) && 610 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 611 DPRINTF(("overlap starts before lock\n"), 612 DEBUG_FINDOVR); 613 return (4); 614 } 615 /* Case 5 */ 616 if ((lf->lf_start > start) && (end != -1) && 617 ((lf->lf_end > end) || (lf->lf_end == -1))) { 618 DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR); 619 return (5); 620 } 621 panic("lf_findoverlap: default"); 622 } 623 return (0); 624 } 625 626 /* 627 * Split a lock and a contained region into 628 * two or three locks as necessary. 629 */ 630 void 631 lf_split(struct lockf *lock1, struct lockf *lock2) 632 { 633 struct lockf *splitlock; 634 635 #ifdef LOCKF_DEBUG 636 if (lockf_debug & DEBUG_SPLIT) { 637 lf_print("lf_split", lock1); 638 lf_print("splitting from", lock2); 639 } 640 #endif /* LOCKF_DEBUG */ 641 /* 642 * Check to see if splitting into only two pieces. 643 */ 644 if (lock1->lf_start == lock2->lf_start) { 645 lock1->lf_start = lock2->lf_end + 1; 646 lock2->lf_next = lock1; 647 return; 648 } 649 if (lock1->lf_end == lock2->lf_end) { 650 lock1->lf_end = lock2->lf_start - 1; 651 lock2->lf_next = lock1->lf_next; 652 lock1->lf_next = lock2; 653 return; 654 } 655 /* 656 * Make a new lock consisting of the last part of 657 * the encompassing lock 658 */ 659 splitlock = lf_alloc(lock1->lf_uid, 0); 660 memcpy(splitlock, lock1, sizeof(*splitlock)); 661 splitlock->lf_start = lock2->lf_end + 1; 662 splitlock->lf_block.tqe_next = NULL; 663 TAILQ_INIT(&splitlock->lf_blkhd); 664 lock1->lf_end = lock2->lf_start - 1; 665 666 lock2->lf_next = splitlock; 667 lock1->lf_next = lock2; 668 } 669 670 /* 671 * Wakeup a blocklist 672 */ 673 void 674 lf_wakelock(struct lockf *lock) 675 { 676 struct lockf *wakelock; 677 678 while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) { 679 TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block); 680 wakelock->lf_next = NULL; 681 wakeup_one(wakelock); 682 } 683 } 684 685 #ifdef LOCKF_DEBUG 686 /* 687 * Print out a lock. 688 */ 689 void 690 lf_print(char *tag, struct lockf *lock) 691 { 692 struct lockf *block; 693 694 printf("%s: lock %p for ", tag, lock); 695 if (lock->lf_flags & F_POSIX) 696 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid); 697 else 698 printf("id %p", lock->lf_id); 699 printf(" %s, start %llx, end %llx", 700 lock->lf_type == F_RDLCK ? "shared" : 701 lock->lf_type == F_WRLCK ? "exclusive" : 702 lock->lf_type == F_UNLCK ? "unlock" : 703 "unknown", lock->lf_start, lock->lf_end); 704 block = TAILQ_FIRST(&lock->lf_blkhd); 705 if (block) 706 printf(" block"); 707 TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block) 708 printf(" %p,", block); 709 printf("\n"); 710 711 } 712 713 void 714 lf_printlist(char *tag, struct lockf *lock) 715 { 716 struct lockf *lf; 717 718 printf("%s: Lock list:\n", tag); 719 for (lf = *lock->lf_head; lf; lf = lf->lf_next) { 720 printf("\tlock %p for ", lf); 721 if (lf->lf_flags & F_POSIX) 722 printf("proc %d", ((struct proc*)(lf->lf_id))->p_pid); 723 else 724 printf("id %p", lf->lf_id); 725 printf(" %s, start %llx, end %llx", 726 lf->lf_type == F_RDLCK ? "shared" : 727 lf->lf_type == F_WRLCK ? "exclusive" : 728 lf->lf_type == F_UNLCK ? "unlock" : 729 "unknown", lf->lf_start, lf->lf_end); 730 printf("\n"); 731 } 732 } 733 #endif /* LOCKF_DEBUG */ 734