xref: /openbsd-src/sys/kern/vfs_lockf.c (revision db3296cf5c1dd9058ceecc3a29fe4aaa0bd26000)
1 /*	$OpenBSD: vfs_lockf.c,v 1.7 2003/07/21 22:44:50 tedu Exp $	*/
2 /*	$NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Scooter Morris at Genentech Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/file.h>
42 #include <sys/proc.h>
43 #include <sys/vnode.h>
44 #include <sys/malloc.h>
45 #include <sys/fcntl.h>
46 #include <sys/lockf.h>
47 
48 /*
49  * This variable controls the maximum number of processes that will
50  * be checked in doing deadlock detection.
51  */
52 int maxlockdepth = MAXDEPTH;
53 
54 #define SELF	0x1
55 #define OTHERS	0x2
56 
57 #ifdef LOCKF_DEBUG
58 
59 #define	DEBUG_SETLOCK		0x01
60 #define	DEBUG_CLEARLOCK		0x02
61 #define	DEBUG_GETLOCK		0x04
62 #define	DEBUG_FINDOVR		0x08
63 #define	DEBUG_SPLIT		0x10
64 #define	DEBUG_WAKELOCK		0x20
65 
66 int	lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
67 
68 #define	DPRINTF(args, level)	if (lockf_debug & (level)) printf args
69 #else
70 #define	DPRINTF(args, level)
71 #endif
72 
73 /*
74  * Do an advisory lock operation.
75  */
76 int
77 lf_advlock(head, size, id, op, fl, flags)
78 	struct lockf **head;
79 	off_t size;
80 	caddr_t id;
81 	int op;
82 	register struct flock *fl;
83 	int flags;
84 {
85 	register struct lockf *lock;
86 	off_t start, end;
87 	int error;
88 
89 	/*
90 	 * Convert the flock structure into a start and end.
91 	 */
92 	switch (fl->l_whence) {
93 
94 	case SEEK_SET:
95 	case SEEK_CUR:
96 		/*
97 		 * Caller is responsible for adding any necessary offset
98 		 * when SEEK_CUR is used.
99 		 */
100 		start = fl->l_start;
101 		break;
102 
103 	case SEEK_END:
104 		start = size + fl->l_start;
105 		break;
106 
107 	default:
108 		return (EINVAL);
109 	}
110 	if (start < 0)
111 		return (EINVAL);
112 	if (fl->l_len == 0)
113 		end = -1;
114 	else {
115 		end = start + fl->l_len - 1;
116 		if (end < start)
117 			return (EINVAL);
118 	}
119 
120 	/*
121 	 * Avoid the common case of unlocking when inode has no locks.
122 	 */
123 	if (*head == NULL) {
124 		if (op != F_SETLK) {
125 			fl->l_type = F_UNLCK;
126 			return (0);
127 		}
128 	}
129 
130 	/*
131 	 * Create the lockf structure.
132 	 */
133 	MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
134 	lock->lf_start = start;
135 	lock->lf_end = end;
136 	lock->lf_id = id;
137 	lock->lf_head = head;
138 	lock->lf_type = fl->l_type;
139 	lock->lf_next = NULL;
140 	TAILQ_INIT(&lock->lf_blkhd);
141 	lock->lf_flags = flags;
142 	/*
143 	 * Do the requested operation.
144 	 */
145 	switch (op) {
146 
147 	case F_SETLK:
148 		return (lf_setlock(lock));
149 
150 	case F_UNLCK:
151 		error = lf_clearlock(lock);
152 		FREE(lock, M_LOCKF);
153 		return (error);
154 
155 	case F_GETLK:
156 		error = lf_getlock(lock, fl);
157 		FREE(lock, M_LOCKF);
158 		return (error);
159 
160 	default:
161 		FREE(lock, M_LOCKF);
162 		return (EINVAL);
163 	}
164 	/* NOTREACHED */
165 }
166 
167 /*
168  * Set a byte-range lock.
169  */
170 int
171 lf_setlock(lock)
172 	register struct lockf *lock;
173 {
174 	register struct lockf *block;
175 	struct lockf **head = lock->lf_head;
176 	struct lockf **prev, *overlap, *ltmp;
177 	static char lockstr[] = "lockf";
178 	int ovcase, priority, needtolink, error;
179 
180 #ifdef LOCKF_DEBUG
181 	if (lockf_debug & DEBUG_SETLOCK)
182 		lf_print("lf_setlock", lock);
183 #endif /* LOCKF_DEBUG */
184 
185 	/*
186 	 * Set the priority
187 	 */
188 	priority = PLOCK;
189 	if (lock->lf_type == F_WRLCK)
190 		priority += 4;
191 	priority |= PCATCH;
192 	/*
193 	 * Scan lock list for this file looking for locks that would block us.
194 	 */
195 	while ((block = lf_getblock(lock)) != NULL) {
196 		/*
197 		 * Free the structure and return if nonblocking.
198 		 */
199 		if ((lock->lf_flags & F_WAIT) == 0) {
200 			FREE(lock, M_LOCKF);
201 			return (EAGAIN);
202 		}
203 		/*
204 		 * We are blocked. Since flock style locks cover
205 		 * the whole file, there is no chance for deadlock.
206 		 * For byte-range locks we must check for deadlock.
207 		 *
208 		 * Deadlock detection is done by looking through the
209 		 * wait channels to see if there are any cycles that
210 		 * involve us. MAXDEPTH is set just to make sure we
211 		 * do not go off into neverland.
212 		 */
213 		if ((lock->lf_flags & F_POSIX) &&
214 		    (block->lf_flags & F_POSIX)) {
215 			register struct proc *wproc;
216 			register struct lockf *waitblock;
217 			int i = 0;
218 
219 			/* The block is waiting on something */
220 			wproc = (struct proc *)block->lf_id;
221 			while (wproc->p_wchan &&
222 			    (wproc->p_wmesg == lockstr) &&
223 			    (i++ < maxlockdepth)) {
224 				waitblock = (struct lockf *)wproc->p_wchan;
225 				/* Get the owner of the blocking lock */
226 				waitblock = waitblock->lf_next;
227 				if ((waitblock->lf_flags & F_POSIX) == 0)
228 					break;
229 				wproc = (struct proc *)waitblock->lf_id;
230 				if (wproc == (struct proc *)lock->lf_id) {
231 					FREE(lock, M_LOCKF);
232 					return (EDEADLK);
233 				}
234 			}
235 		}
236 		/*
237 		 * For flock type locks, we must first remove
238 		 * any shared locks that we hold before we sleep
239 		 * waiting for an exclusive lock.
240 		 */
241 		if ((lock->lf_flags & F_FLOCK) &&
242 		    lock->lf_type == F_WRLCK) {
243 			lock->lf_type = F_UNLCK;
244 			(void) lf_clearlock(lock);
245 			lock->lf_type = F_WRLCK;
246 		}
247 		/*
248 		 * Add our lock to the blocked list and sleep until we're free.
249 		 * Remember who blocked us (for deadlock detection).
250 		 */
251 		lock->lf_next = block;
252 #ifdef LOCKF_DEBUG
253 		if (lockf_debug & DEBUG_SETLOCK) {
254 			lf_print("lf_setlock", lock);
255 			lf_print("lf_setlock: blocking on", block);
256 		}
257 #endif /* LOCKF_DEBUG */
258 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
259 		error = tsleep(lock, priority, lockstr, 0);
260 #if 0
261 		if (error) {
262 			/*
263 			 * Delete ourselves from the waiting to lock list.
264 			 */
265 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
266 			FREE(lock, M_LOCKF);
267 			return (error);
268 		}
269 #else
270 		if (lock->lf_next != NULL) {
271 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
272 			lock->lf_next = NULL;
273 		}
274 		if (error) {
275 			FREE(lock, M_LOCKF);
276 			return (error);
277 		}
278 #endif
279 	}
280 	/*
281 	 * No blocks!!  Add the lock.  Note that we will
282 	 * downgrade or upgrade any overlapping locks this
283 	 * process already owns.
284 	 *
285 	 * Skip over locks owned by other processes.
286 	 * Handle any locks that overlap and are owned by ourselves.
287 	 */
288 	prev = head;
289 	block = *head;
290 	needtolink = 1;
291 	for (;;) {
292 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
293 		if (ovcase)
294 			block = overlap->lf_next;
295 		/*
296 		 * Six cases:
297 		 *	0) no overlap
298 		 *	1) overlap == lock
299 		 *	2) overlap contains lock
300 		 *	3) lock contains overlap
301 		 *	4) overlap starts before lock
302 		 *	5) overlap ends after lock
303 		 */
304 		switch (ovcase) {
305 		case 0: /* no overlap */
306 			if (needtolink) {
307 				*prev = lock;
308 				lock->lf_next = overlap;
309 			}
310 			break;
311 
312 		case 1: /* overlap == lock */
313 			/*
314 			 * If downgrading lock, others may be
315 			 * able to acquire it.
316 			 */
317 			if (lock->lf_type == F_RDLCK &&
318 			    overlap->lf_type == F_WRLCK)
319 				lf_wakelock(overlap);
320 			overlap->lf_type = lock->lf_type;
321 			FREE(lock, M_LOCKF);
322 			lock = overlap; /* for debug output below */
323 			break;
324 
325 		case 2: /* overlap contains lock */
326 			/*
327 			 * Check for common starting point and different types.
328 			 */
329 			if (overlap->lf_type == lock->lf_type) {
330 				FREE(lock, M_LOCKF);
331 				lock = overlap; /* for debug output below */
332 				break;
333 			}
334 			if (overlap->lf_start == lock->lf_start) {
335 				*prev = lock;
336 				lock->lf_next = overlap;
337 				overlap->lf_start = lock->lf_end + 1;
338 			} else
339 				lf_split(overlap, lock);
340 			lf_wakelock(overlap);
341 			break;
342 
343 		case 3: /* lock contains overlap */
344 			/*
345 			 * If downgrading lock, others may be able to
346 			 * acquire it, otherwise take the list.
347 			 */
348 			if (lock->lf_type == F_RDLCK &&
349 			    overlap->lf_type == F_WRLCK) {
350 				lf_wakelock(overlap);
351 			} else {
352 				while ((ltmp =
353 				    TAILQ_FIRST(&overlap->lf_blkhd))) {
354 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
355 					    lf_block);
356 					ltmp->lf_next = lock;
357 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
358 					    ltmp, lf_block);
359 				}
360 			}
361 			/*
362 			 * Add the new lock if necessary and delete the overlap.
363 			 */
364 			if (needtolink) {
365 				*prev = lock;
366 				lock->lf_next = overlap->lf_next;
367 				prev = &lock->lf_next;
368 				needtolink = 0;
369 			} else
370 				*prev = overlap->lf_next;
371 			FREE(overlap, M_LOCKF);
372 			continue;
373 
374 		case 4: /* overlap starts before lock */
375 			/*
376 			 * Add lock after overlap on the list.
377 			 */
378 			lock->lf_next = overlap->lf_next;
379 			overlap->lf_next = lock;
380 			overlap->lf_end = lock->lf_start - 1;
381 			prev = &lock->lf_next;
382 			lf_wakelock(overlap);
383 			needtolink = 0;
384 			continue;
385 
386 		case 5: /* overlap ends after lock */
387 			/*
388 			 * Add the new lock before overlap.
389 			 */
390 			if (needtolink) {
391 				*prev = lock;
392 				lock->lf_next = overlap;
393 			}
394 			overlap->lf_start = lock->lf_end + 1;
395 			lf_wakelock(overlap);
396 			break;
397 		}
398 		break;
399 	}
400 #ifdef LOCKF_DEBUG
401 	if (lockf_debug & DEBUG_SETLOCK) {
402 		lf_print("lf_setlock: got the lock", lock);
403 	}
404 #endif /* LOCKF_DEBUG */
405 	return (0);
406 }
407 
408 /*
409  * Remove a byte-range lock on an inode.
410  *
411  * Generally, find the lock (or an overlap to that lock)
412  * and remove it (or shrink it), then wakeup anyone we can.
413  */
414 int
415 lf_clearlock(lock)
416 	register struct lockf *lock;
417 {
418 	struct lockf **head = lock->lf_head;
419 	register struct lockf *lf = *head;
420 	struct lockf *overlap, **prev;
421 	int ovcase;
422 
423 	if (lf == NULL)
424 		return (0);
425 #ifdef LOCKF_DEBUG
426 	if (lockf_debug & DEBUG_CLEARLOCK)
427 		lf_print("lf_clearlock", lock);
428 #endif /* LOCKF_DEBUG */
429 	prev = head;
430 	while ((ovcase = lf_findoverlap(lf, lock, SELF,
431 					&prev, &overlap)) != 0) {
432 		/*
433 		 * Wakeup the list of locks to be retried.
434 		 */
435 		lf_wakelock(overlap);
436 
437 		switch (ovcase) {
438 
439 		case 1: /* overlap == lock */
440 			*prev = overlap->lf_next;
441 			FREE(overlap, M_LOCKF);
442 			break;
443 
444 		case 2: /* overlap contains lock: split it */
445 			if (overlap->lf_start == lock->lf_start) {
446 				overlap->lf_start = lock->lf_end + 1;
447 				break;
448 			}
449 			lf_split(overlap, lock);
450 			overlap->lf_next = lock->lf_next;
451 			break;
452 
453 		case 3: /* lock contains overlap */
454 			*prev = overlap->lf_next;
455 			lf = overlap->lf_next;
456 			FREE(overlap, M_LOCKF);
457 			continue;
458 
459 		case 4: /* overlap starts before lock */
460 			overlap->lf_end = lock->lf_start - 1;
461 			prev = &overlap->lf_next;
462 			lf = overlap->lf_next;
463 			continue;
464 
465 		case 5: /* overlap ends after lock */
466 			overlap->lf_start = lock->lf_end + 1;
467 			break;
468 		}
469 		break;
470 	}
471 	return (0);
472 }
473 
474 /*
475  * Check whether there is a blocking lock,
476  * and if so return its process identifier.
477  */
478 int
479 lf_getlock(lock, fl)
480 	register struct lockf *lock;
481 	register struct flock *fl;
482 {
483 	register struct lockf *block;
484 
485 #ifdef LOCKF_DEBUG
486 	if (lockf_debug & DEBUG_CLEARLOCK)
487 		lf_print("lf_getlock", lock);
488 #endif /* LOCKF_DEBUG */
489 
490 	if ((block = lf_getblock(lock)) != NULL) {
491 		fl->l_type = block->lf_type;
492 		fl->l_whence = SEEK_SET;
493 		fl->l_start = block->lf_start;
494 		if (block->lf_end == -1)
495 			fl->l_len = 0;
496 		else
497 			fl->l_len = block->lf_end - block->lf_start + 1;
498 		if (block->lf_flags & F_POSIX)
499 			fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
500 		else
501 			fl->l_pid = -1;
502 	} else {
503 		fl->l_type = F_UNLCK;
504 	}
505 	return (0);
506 }
507 
508 /*
509  * Walk the list of locks for an inode and
510  * return the first blocking lock.
511  */
512 struct lockf *
513 lf_getblock(lock)
514 	register struct lockf *lock;
515 {
516 	struct lockf **prev, *overlap, *lf;
517 
518 	prev = lock->lf_head;
519 	lf = *prev;
520 	while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
521 		/*
522 		 * We've found an overlap, see if it blocks us
523 		 */
524 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
525 			return (overlap);
526 		/*
527 		 * Nope, point to the next one on the list and
528 		 * see if it blocks us
529 		 */
530 		lf = overlap->lf_next;
531 	}
532 	return (NULL);
533 }
534 
535 /*
536  * Walk the list of locks for an inode to
537  * find an overlapping lock (if any).
538  *
539  * NOTE: this returns only the FIRST overlapping lock.  There
540  *	 may be more than one.
541  */
542 int
543 lf_findoverlap(lf, lock, type, prev, overlap)
544 	register struct lockf *lf;
545 	struct lockf *lock;
546 	int type;
547 	struct lockf ***prev;
548 	struct lockf **overlap;
549 {
550 	off_t start, end;
551 
552 #ifdef LOCKF_DEBUG
553 	if (lf && lockf_debug & DEBUG_FINDOVR)
554 		lf_print("lf_findoverlap: looking for overlap in", lock);
555 #endif /* LOCKF_DEBUG */
556 
557 	*overlap = lf;
558 	start = lock->lf_start;
559 	end = lock->lf_end;
560 	while (lf != NULL) {
561 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
562 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
563 			*prev = &lf->lf_next;
564 			*overlap = lf = lf->lf_next;
565 			continue;
566 		}
567 #ifdef LOCKF_DEBUG
568 		if (lockf_debug & DEBUG_FINDOVR)
569 			lf_print("\tchecking", lf);
570 #endif /* LOCKF_DEBUG */
571 		/*
572 		 * OK, check for overlap
573 		 *
574 		 * Six cases:
575 		 *	0) no overlap
576 		 *	1) overlap == lock
577 		 *	2) overlap contains lock
578 		 *	3) lock contains overlap
579 		 *	4) overlap starts before lock
580 		 *	5) overlap ends after lock
581 		 */
582 
583 		/* Case 0 */
584 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
585 		    (end != -1 && lf->lf_start > end)) {
586 			DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
587 			if ((type & SELF) && end != -1 && lf->lf_start > end)
588 				return (0);
589 			*prev = &lf->lf_next;
590 			*overlap = lf = lf->lf_next;
591 			continue;
592 		}
593 		/* Case 1 */
594 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
595 			DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
596 			return (1);
597 		}
598 		/* Case 2 */
599 		if ((lf->lf_start <= start) &&
600 		    (lf->lf_end == -1 ||
601 		    (end != -1 && lf->lf_end >= end))) {
602 			DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
603 			return (2);
604 		}
605 		/* Case 3 */
606 		if (start <= lf->lf_start &&
607 		    (end == -1 ||
608 		    (lf->lf_end != -1 && end >= lf->lf_end))) {
609 			DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
610 			return (3);
611 		}
612 		/* Case 4 */
613 		if ((lf->lf_start < start) &&
614 		    ((lf->lf_end >= start) || (lf->lf_end == -1))) {
615 			DPRINTF(("overlap starts before lock\n"),
616 			    DEBUG_FINDOVR);
617 			return (4);
618 		}
619 		/* Case 5 */
620 		if ((lf->lf_start > start) &&
621 		    (end != -1) &&
622 		    ((lf->lf_end > end) || (lf->lf_end == -1))) {
623 			DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
624 			return (5);
625 		}
626 		panic("lf_findoverlap: default");
627 	}
628 	return (0);
629 }
630 
631 /*
632  * Split a lock and a contained region into
633  * two or three locks as necessary.
634  */
635 void
636 lf_split(lock1, lock2)
637 	register struct lockf *lock1;
638 	register struct lockf *lock2;
639 {
640 	register struct lockf *splitlock;
641 
642 #ifdef LOCKF_DEBUG
643 	if (lockf_debug & DEBUG_SPLIT) {
644 		lf_print("lf_split", lock1);
645 		lf_print("splitting from", lock2);
646 	}
647 #endif /* LOCKF_DEBUG */
648 	/*
649 	 * Check to see if spliting into only two pieces.
650 	 */
651 	if (lock1->lf_start == lock2->lf_start) {
652 		lock1->lf_start = lock2->lf_end + 1;
653 		lock2->lf_next = lock1;
654 		return;
655 	}
656 	if (lock1->lf_end == lock2->lf_end) {
657 		lock1->lf_end = lock2->lf_start - 1;
658 		lock2->lf_next = lock1->lf_next;
659 		lock1->lf_next = lock2;
660 		return;
661 	}
662 	/*
663 	 * Make a new lock consisting of the last part of
664 	 * the encompassing lock
665 	 */
666 	MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
667 	memcpy(splitlock, lock1, sizeof (*splitlock));
668 	splitlock->lf_start = lock2->lf_end + 1;
669 	splitlock->lf_block.tqe_next = NULL;
670 	TAILQ_INIT(&splitlock->lf_blkhd);
671 	lock1->lf_end = lock2->lf_start - 1;
672 	/*
673 	 * OK, now link it in
674 	 */
675 	lock2->lf_next = splitlock;
676 	lock1->lf_next = lock2;
677 }
678 
679 /*
680  * Wakeup a blocklist
681  */
682 void
683 lf_wakelock(lock)
684 	struct lockf *lock;
685 {
686 	struct lockf *wakelock;
687 
688 	while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) {
689 		TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block);
690 		wakelock->lf_next = NULL;
691 		wakeup_one(wakelock);
692 	}
693 }
694 
695 #ifdef LOCKF_DEBUG
696 /*
697  * Print out a lock.
698  */
699 void
700 lf_print(tag, lock)
701 	char *tag;
702 	register struct lockf *lock;
703 {
704 	struct lockf	*block;
705 
706 	printf("%s: lock %p for ", tag, lock);
707 	if (lock->lf_flags & F_POSIX)
708 		printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
709 	else
710 		printf("id %p", lock->lf_id);
711 	printf(" %s, start %llx, end %llx",
712 		lock->lf_type == F_RDLCK ? "shared" :
713 		lock->lf_type == F_WRLCK ? "exclusive" :
714 		lock->lf_type == F_UNLCK ? "unlock" :
715 		"unknown", lock->lf_start, lock->lf_end);
716 	block = TAILQ_FIRST(&lock->lf_blkhd);
717 	if (block)
718 		printf(" block");
719 	TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)
720 		printf(" %p,", block);
721 	printf("\n");
722 
723 }
724 
725 void
726 lf_printlist(tag, lock)
727 	char *tag;
728 	struct lockf *lock;
729 {
730 	register struct lockf *lf;
731 
732 	printf("%s: Lock list:\n", tag);
733 	for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
734 		printf("\tlock %p for ", lf);
735 		if (lf->lf_flags & F_POSIX)
736 			printf("proc %d", ((struct proc*)(lf->lf_id))->p_pid);
737 		else
738 			printf("id %p", lf->lf_id);
739 		printf(" %s, start %llx, end %llx",
740 			lf->lf_type == F_RDLCK ? "shared" :
741 			lf->lf_type == F_WRLCK ? "exclusive" :
742 			lf->lf_type == F_UNLCK ? "unlock" :
743 			"unknown", lf->lf_start, lf->lf_end);
744 		printf("\n");
745 	}
746 }
747 #endif /* LOCKF_DEBUG */
748