xref: /openbsd-src/sys/kern/vfs_lockf.c (revision 897fc685943471cf985a0fe38ba076ea6fe74fa5)
1 /*	$OpenBSD: vfs_lockf.c,v 1.25 2018/02/26 13:43:51 mpi Exp $	*/
2 /*	$NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Scooter Morris at Genentech Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/pool.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/unistd.h>
47 
48 struct pool lockfpool;
49 
50 /*
51  * This variable controls the maximum number of processes that will
52  * be checked in doing deadlock detection.
53  */
54 int maxlockdepth = MAXDEPTH;
55 
56 #define SELF	0x1
57 #define OTHERS	0x2
58 
59 #ifdef LOCKF_DEBUG
60 
61 #define	DEBUG_SETLOCK		0x01
62 #define	DEBUG_CLEARLOCK		0x02
63 #define	DEBUG_GETLOCK		0x04
64 #define	DEBUG_FINDOVR		0x08
65 #define	DEBUG_SPLIT		0x10
66 #define	DEBUG_WAKELOCK		0x20
67 
68 int	lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
69 
70 #define	DPRINTF(args, level)	if (lockf_debug & (level)) printf args
71 #else
72 #define	DPRINTF(args, level)
73 #endif
74 
75 void
76 lf_init(void)
77 {
78 	pool_init(&lockfpool, sizeof(struct lockf), 0, IPL_NONE, PR_WAITOK,
79 	    "lockfpl", NULL);
80 }
81 
82 struct lockf *lf_alloc(uid_t, int);
83 void lf_free(struct lockf *);
84 
85 /*
86  * We enforce a limit on locks by uid, so that a single user cannot
87  * run the kernel out of memory.  For now, the limit is pretty coarse.
88  * There is no limit on root.
89  *
90  * Splitting a lock will always succeed, regardless of current allocations.
91  * If you're slightly above the limit, we still have to permit an allocation
92  * so that the unlock can succeed.  If the unlocking causes too many splits,
93  * however, you're totally cutoff.
94  */
95 int maxlocksperuid = 1024;
96 
97 /*
98  * 3 options for allowfail.
99  * 0 - always allocate.  1 - cutoff at limit.  2 - cutoff at double limit.
100  */
101 struct lockf *
102 lf_alloc(uid_t uid, int allowfail)
103 {
104 	struct uidinfo *uip;
105 	struct lockf *lock;
106 
107 	uip = uid_find(uid);
108 	if (uid && allowfail && uip->ui_lockcnt >
109 	    (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
110 		uid_release(uip);
111 		return (NULL);
112 	}
113 	uip->ui_lockcnt++;
114 	uid_release(uip);
115 	lock = pool_get(&lockfpool, PR_WAITOK);
116 	lock->lf_uid = uid;
117 	return (lock);
118 }
119 
120 void
121 lf_free(struct lockf *lock)
122 {
123 	struct uidinfo *uip;
124 
125 	uip = uid_find(lock->lf_uid);
126 	uip->ui_lockcnt--;
127 	uid_release(uip);
128 	pool_put(&lockfpool, lock);
129 }
130 
131 
132 /*
133  * Do an advisory lock operation.
134  */
135 int
136 lf_advlock(struct lockf **head, off_t size, caddr_t id, int op,
137     struct flock *fl, int flags)
138 {
139 	struct proc *p = curproc;
140 	struct lockf *lock;
141 	off_t start, end;
142 	int error;
143 
144 	/*
145 	 * Convert the flock structure into a start and end.
146 	 */
147 	switch (fl->l_whence) {
148 	case SEEK_SET:
149 	case SEEK_CUR:
150 		/*
151 		 * Caller is responsible for adding any necessary offset
152 		 * when SEEK_CUR is used.
153 		 */
154 		start = fl->l_start;
155 		break;
156 	case SEEK_END:
157 		start = size + fl->l_start;
158 		break;
159 	default:
160 		return (EINVAL);
161 	}
162 	if (start < 0)
163 		return (EINVAL);
164 	if (fl->l_len == 0) {
165 		end = -1;
166 	} else {
167 		end = start + fl->l_len - 1;
168 		if (end < start)
169 			return (EINVAL);
170 	}
171 
172 	/*
173 	 * Avoid the common case of unlocking when inode has no locks.
174 	 */
175 	if (*head == NULL) {
176 		if (op != F_SETLK) {
177 			fl->l_type = F_UNLCK;
178 			return (0);
179 		}
180 	}
181 
182 	lock = lf_alloc(p->p_ucred->cr_uid, op == F_SETLK ? 1 : 2);
183 	if (!lock)
184 		return (ENOLCK);
185 	lock->lf_start = start;
186 	lock->lf_end = end;
187 	lock->lf_id = id;
188 	lock->lf_head = head;
189 	lock->lf_type = fl->l_type;
190 	lock->lf_next = NULL;
191 	TAILQ_INIT(&lock->lf_blkhd);
192 	lock->lf_flags = flags;
193 	lock->lf_pid = (flags & F_POSIX) ? p->p_p->ps_pid : -1;
194 
195 	switch (op) {
196 	case F_SETLK:
197 		return (lf_setlock(lock));
198 	case F_UNLCK:
199 		error = lf_clearlock(lock);
200 		lf_free(lock);
201 		return (error);
202 	case F_GETLK:
203 		error = lf_getlock(lock, fl);
204 		lf_free(lock);
205 		return (error);
206 	default:
207 		lf_free(lock);
208 		return (EINVAL);
209 	}
210 	/* NOTREACHED */
211 }
212 
213 /*
214  * Set a byte-range lock.
215  */
216 int
217 lf_setlock(struct lockf *lock)
218 {
219 	struct lockf *block;
220 	struct lockf **head = lock->lf_head;
221 	struct lockf **prev, *overlap, *ltmp;
222 	static char lockstr[] = "lockf";
223 	int ovcase, priority, needtolink, error;
224 
225 #ifdef LOCKF_DEBUG
226 	if (lockf_debug & DEBUG_SETLOCK)
227 		lf_print("lf_setlock", lock);
228 #endif /* LOCKF_DEBUG */
229 
230 	priority = PLOCK;
231 	if (lock->lf_type == F_WRLCK)
232 		priority += 4;
233 	priority |= PCATCH;
234 	/*
235 	 * Scan lock list for this file looking for locks that would block us.
236 	 */
237 	while ((block = lf_getblock(lock)) != NULL) {
238 		if ((lock->lf_flags & F_WAIT) == 0) {
239 			lf_free(lock);
240 			return (EAGAIN);
241 		}
242 		/*
243 		 * We are blocked. Since flock style locks cover
244 		 * the whole file, there is no chance for deadlock.
245 		 * For byte-range locks we must check for deadlock.
246 		 *
247 		 * Deadlock detection is done by looking through the
248 		 * wait channels to see if there are any cycles that
249 		 * involve us. MAXDEPTH is set just to make sure we
250 		 * do not go off into neverland.
251 		 */
252 		if ((lock->lf_flags & F_POSIX) &&
253 		    (block->lf_flags & F_POSIX)) {
254 			struct proc *wproc;
255 			struct lockf *waitblock;
256 			int i = 0;
257 
258 			/* The block is waiting on something */
259 			wproc = (struct proc *)block->lf_id;
260 			while (wproc->p_wchan &&
261 			    (wproc->p_wmesg == lockstr) &&
262 			    (i++ < maxlockdepth)) {
263 				waitblock = (struct lockf *)wproc->p_wchan;
264 				/* Get the owner of the blocking lock */
265 				waitblock = waitblock->lf_next;
266 				if ((waitblock->lf_flags & F_POSIX) == 0)
267 					break;
268 				wproc = (struct proc *)waitblock->lf_id;
269 				if (wproc == (struct proc *)lock->lf_id) {
270 					lf_free(lock);
271 					return (EDEADLK);
272 				}
273 			}
274 		}
275 		/*
276 		 * For flock type locks, we must first remove
277 		 * any shared locks that we hold before we sleep
278 		 * waiting for an exclusive lock.
279 		 */
280 		if ((lock->lf_flags & F_FLOCK) && lock->lf_type == F_WRLCK) {
281 			lock->lf_type = F_UNLCK;
282 			(void)lf_clearlock(lock);
283 			lock->lf_type = F_WRLCK;
284 		}
285 		/*
286 		 * Add our lock to the blocked list and sleep until we're free.
287 		 * Remember who blocked us (for deadlock detection).
288 		 */
289 		lock->lf_next = block;
290 #ifdef LOCKF_DEBUG
291 		if (lockf_debug & DEBUG_SETLOCK) {
292 			lf_print("lf_setlock", lock);
293 			lf_print("lf_setlock: blocking on", block);
294 		}
295 #endif /* LOCKF_DEBUG */
296 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
297 		error = tsleep(lock, priority, lockstr, 0);
298 		if (lock->lf_next != NULL) {
299 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
300 			lock->lf_next = NULL;
301 		}
302 		if (error) {
303 			lf_free(lock);
304 			return (error);
305 		}
306 	}
307 	/*
308 	 * No blocks!!  Add the lock.  Note that we will
309 	 * downgrade or upgrade any overlapping locks this
310 	 * process already owns.
311 	 *
312 	 * Skip over locks owned by other processes.
313 	 * Handle any locks that overlap and are owned by ourselves.
314 	 */
315 	prev = head;
316 	block = *head;
317 	needtolink = 1;
318 	for (;;) {
319 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
320 		if (ovcase)
321 			block = overlap->lf_next;
322 		/*
323 		 * Six cases:
324 		 *	0) no overlap
325 		 *	1) overlap == lock
326 		 *	2) overlap contains lock
327 		 *	3) lock contains overlap
328 		 *	4) overlap starts before lock
329 		 *	5) overlap ends after lock
330 		 */
331 		switch (ovcase) {
332 		case 0: /* no overlap */
333 			if (needtolink) {
334 				*prev = lock;
335 				lock->lf_next = overlap;
336 			}
337 			break;
338 		case 1: /* overlap == lock */
339 			/*
340 			 * If downgrading lock, others may be
341 			 * able to acquire it.
342 			 */
343 			if (lock->lf_type == F_RDLCK &&
344 			    overlap->lf_type == F_WRLCK)
345 				lf_wakelock(overlap);
346 			overlap->lf_type = lock->lf_type;
347 			lf_free(lock);
348 			lock = overlap; /* for debug output below */
349 			break;
350 		case 2: /* overlap contains lock */
351 			/*
352 			 * Check for common starting point and different types.
353 			 */
354 			if (overlap->lf_type == lock->lf_type) {
355 				lf_free(lock);
356 				lock = overlap; /* for debug output below */
357 				break;
358 			}
359 			if (overlap->lf_start == lock->lf_start) {
360 				*prev = lock;
361 				lock->lf_next = overlap;
362 				overlap->lf_start = lock->lf_end + 1;
363 			} else
364 				lf_split(overlap, lock);
365 			lf_wakelock(overlap);
366 			break;
367 		case 3: /* lock contains overlap */
368 			/*
369 			 * If downgrading lock, others may be able to
370 			 * acquire it, otherwise take the list.
371 			 */
372 			if (lock->lf_type == F_RDLCK &&
373 			    overlap->lf_type == F_WRLCK) {
374 				lf_wakelock(overlap);
375 			} else {
376 				while ((ltmp =
377 				    TAILQ_FIRST(&overlap->lf_blkhd))) {
378 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
379 					    lf_block);
380 					ltmp->lf_next = lock;
381 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
382 					    ltmp, lf_block);
383 				}
384 			}
385 			/*
386 			 * Add the new lock if necessary and delete the overlap.
387 			 */
388 			if (needtolink) {
389 				*prev = lock;
390 				lock->lf_next = overlap->lf_next;
391 				prev = &lock->lf_next;
392 				needtolink = 0;
393 			} else
394 				*prev = overlap->lf_next;
395 			lf_free(overlap);
396 			continue;
397 		case 4: /* overlap starts before lock */
398 			/*
399 			 * Add lock after overlap on the list.
400 			 */
401 			lock->lf_next = overlap->lf_next;
402 			overlap->lf_next = lock;
403 			overlap->lf_end = lock->lf_start - 1;
404 			prev = &lock->lf_next;
405 			lf_wakelock(overlap);
406 			needtolink = 0;
407 			continue;
408 		case 5: /* overlap ends after lock */
409 			/*
410 			 * Add the new lock before overlap.
411 			 */
412 			if (needtolink) {
413 				*prev = lock;
414 				lock->lf_next = overlap;
415 			}
416 			overlap->lf_start = lock->lf_end + 1;
417 			lf_wakelock(overlap);
418 			break;
419 		}
420 		break;
421 	}
422 #ifdef LOCKF_DEBUG
423 	if (lockf_debug & DEBUG_SETLOCK) {
424 		lf_print("lf_setlock: got the lock", lock);
425 	}
426 #endif /* LOCKF_DEBUG */
427 	return (0);
428 }
429 
430 /*
431  * Remove a byte-range lock on an inode.
432  *
433  * Generally, find the lock (or an overlap to that lock)
434  * and remove it (or shrink it), then wakeup anyone we can.
435  */
436 int
437 lf_clearlock(struct lockf *lock)
438 {
439 	struct lockf **head = lock->lf_head;
440 	struct lockf *lf = *head;
441 	struct lockf *overlap, **prev;
442 	int ovcase;
443 
444 	if (lf == NULL)
445 		return (0);
446 #ifdef LOCKF_DEBUG
447 	if (lockf_debug & DEBUG_CLEARLOCK)
448 		lf_print("lf_clearlock", lock);
449 #endif /* LOCKF_DEBUG */
450 	prev = head;
451 	while ((ovcase = lf_findoverlap(lf, lock, SELF, &prev, &overlap))) {
452 		lf_wakelock(overlap);
453 
454 		switch (ovcase) {
455 		case 1: /* overlap == lock */
456 			*prev = overlap->lf_next;
457 			lf_free(overlap);
458 			break;
459 		case 2: /* overlap contains lock: split it */
460 			if (overlap->lf_start == lock->lf_start) {
461 				overlap->lf_start = lock->lf_end + 1;
462 				break;
463 			}
464 			lf_split(overlap, lock);
465 			overlap->lf_next = lock->lf_next;
466 			break;
467 		case 3: /* lock contains overlap */
468 			*prev = overlap->lf_next;
469 			lf = overlap->lf_next;
470 			lf_free(overlap);
471 			continue;
472 		case 4: /* overlap starts before lock */
473 			overlap->lf_end = lock->lf_start - 1;
474 			prev = &overlap->lf_next;
475 			lf = overlap->lf_next;
476 			continue;
477 		case 5: /* overlap ends after lock */
478 			overlap->lf_start = lock->lf_end + 1;
479 			break;
480 		}
481 		break;
482 	}
483 	return (0);
484 }
485 
486 /*
487  * Check whether there is a blocking lock,
488  * and if so return its process identifier.
489  */
490 int
491 lf_getlock(struct lockf *lock, struct flock *fl)
492 {
493 	struct lockf *block;
494 
495 #ifdef LOCKF_DEBUG
496 	if (lockf_debug & DEBUG_CLEARLOCK)
497 		lf_print("lf_getlock", lock);
498 #endif /* LOCKF_DEBUG */
499 
500 	if ((block = lf_getblock(lock)) != NULL) {
501 		fl->l_type = block->lf_type;
502 		fl->l_whence = SEEK_SET;
503 		fl->l_start = block->lf_start;
504 		if (block->lf_end == -1)
505 			fl->l_len = 0;
506 		else
507 			fl->l_len = block->lf_end - block->lf_start + 1;
508 		fl->l_pid = block->lf_pid;
509 	} else {
510 		fl->l_type = F_UNLCK;
511 	}
512 	return (0);
513 }
514 
515 /*
516  * Walk the list of locks for an inode and
517  * return the first blocking lock.
518  */
519 struct lockf *
520 lf_getblock(struct lockf *lock)
521 {
522 	struct lockf **prev, *overlap, *lf;
523 
524 	prev = lock->lf_head;
525 	lf = *prev;
526 	while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
527 		/*
528 		 * We've found an overlap, see if it blocks us
529 		 */
530 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
531 			return (overlap);
532 		/*
533 		 * Nope, point to the next one on the list and
534 		 * see if it blocks us
535 		 */
536 		lf = overlap->lf_next;
537 	}
538 	return (NULL);
539 }
540 
541 /*
542  * Walk the list of locks for an inode to
543  * find an overlapping lock (if any).
544  *
545  * NOTE: this returns only the FIRST overlapping lock.  There
546  *	 may be more than one.
547  */
548 int
549 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
550     struct lockf ***prev, struct lockf **overlap)
551 {
552 	off_t start, end;
553 
554 #ifdef LOCKF_DEBUG
555 	if (lf && lockf_debug & DEBUG_FINDOVR)
556 		lf_print("lf_findoverlap: looking for overlap in", lock);
557 #endif /* LOCKF_DEBUG */
558 
559 	*overlap = lf;
560 	start = lock->lf_start;
561 	end = lock->lf_end;
562 	while (lf != NULL) {
563 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
564 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
565 			*prev = &lf->lf_next;
566 			*overlap = lf = lf->lf_next;
567 			continue;
568 		}
569 #ifdef LOCKF_DEBUG
570 		if (lockf_debug & DEBUG_FINDOVR)
571 			lf_print("\tchecking", lf);
572 #endif /* LOCKF_DEBUG */
573 		/*
574 		 * OK, check for overlap
575 		 *
576 		 * Six cases:
577 		 *	0) no overlap
578 		 *	1) overlap == lock
579 		 *	2) overlap contains lock
580 		 *	3) lock contains overlap
581 		 *	4) overlap starts before lock
582 		 *	5) overlap ends after lock
583 		 */
584 
585 		/* Case 0 */
586 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
587 		    (end != -1 && lf->lf_start > end)) {
588 			DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
589 			if ((type & SELF) && end != -1 && lf->lf_start > end)
590 				return (0);
591 			*prev = &lf->lf_next;
592 			*overlap = lf = lf->lf_next;
593 			continue;
594 		}
595 		/* Case 1 */
596 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
597 			DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
598 			return (1);
599 		}
600 		/* Case 2 */
601 		if ((lf->lf_start <= start) &&
602 		    (lf->lf_end == -1 || (end != -1 && lf->lf_end >= end))) {
603 			DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
604 			return (2);
605 		}
606 		/* Case 3 */
607 		if (start <= lf->lf_start &&
608 		    (end == -1 || (lf->lf_end != -1 && end >= lf->lf_end))) {
609 			DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
610 			return (3);
611 		}
612 		/* Case 4 */
613 		if ((lf->lf_start < start) &&
614 		    ((lf->lf_end >= start) || (lf->lf_end == -1))) {
615 			DPRINTF(("overlap starts before lock\n"),
616 			    DEBUG_FINDOVR);
617 			return (4);
618 		}
619 		/* Case 5 */
620 		if ((lf->lf_start > start) && (end != -1) &&
621 		    ((lf->lf_end > end) || (lf->lf_end == -1))) {
622 			DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
623 			return (5);
624 		}
625 		panic("lf_findoverlap: default");
626 	}
627 	return (0);
628 }
629 
630 /*
631  * Split a lock and a contained region into
632  * two or three locks as necessary.
633  */
634 void
635 lf_split(struct lockf *lock1, struct lockf *lock2)
636 {
637 	struct lockf *splitlock;
638 
639 #ifdef LOCKF_DEBUG
640 	if (lockf_debug & DEBUG_SPLIT) {
641 		lf_print("lf_split", lock1);
642 		lf_print("splitting from", lock2);
643 	}
644 #endif /* LOCKF_DEBUG */
645 	/*
646 	 * Check to see if splitting into only two pieces.
647 	 */
648 	if (lock1->lf_start == lock2->lf_start) {
649 		lock1->lf_start = lock2->lf_end + 1;
650 		lock2->lf_next = lock1;
651 		return;
652 	}
653 	if (lock1->lf_end == lock2->lf_end) {
654 		lock1->lf_end = lock2->lf_start - 1;
655 		lock2->lf_next = lock1->lf_next;
656 		lock1->lf_next = lock2;
657 		return;
658 	}
659 	/*
660 	 * Make a new lock consisting of the last part of
661 	 * the encompassing lock
662 	 */
663 	splitlock = lf_alloc(lock1->lf_uid, 0);
664 	memcpy(splitlock, lock1, sizeof(*splitlock));
665 	splitlock->lf_start = lock2->lf_end + 1;
666 	splitlock->lf_block.tqe_next = NULL;
667 	TAILQ_INIT(&splitlock->lf_blkhd);
668 	lock1->lf_end = lock2->lf_start - 1;
669 
670 	lock2->lf_next = splitlock;
671 	lock1->lf_next = lock2;
672 }
673 
674 /*
675  * Wakeup a blocklist
676  */
677 void
678 lf_wakelock(struct lockf *lock)
679 {
680 	struct lockf *wakelock;
681 
682 	while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) {
683 		TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block);
684 		wakelock->lf_next = NULL;
685 		wakeup_one(wakelock);
686 	}
687 }
688 
689 #ifdef LOCKF_DEBUG
690 /*
691  * Print out a lock.
692  */
693 void
694 lf_print(char *tag, struct lockf *lock)
695 {
696 	struct lockf	*block;
697 
698 	printf("%s: lock %p for ", tag, lock);
699 	if (lock->lf_flags & F_POSIX)
700 		printf("thread %d", ((struct proc *)(lock->lf_id))->p_tid);
701 	else
702 		printf("id %p", lock->lf_id);
703 	printf(" %s, start %llx, end %llx",
704 		lock->lf_type == F_RDLCK ? "shared" :
705 		lock->lf_type == F_WRLCK ? "exclusive" :
706 		lock->lf_type == F_UNLCK ? "unlock" :
707 		"unknown", lock->lf_start, lock->lf_end);
708 	block = TAILQ_FIRST(&lock->lf_blkhd);
709 	if (block)
710 		printf(" block");
711 	TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)
712 		printf(" %p,", block);
713 	printf("\n");
714 
715 }
716 
717 void
718 lf_printlist(char *tag, struct lockf *lock)
719 {
720 	struct lockf *lf;
721 
722 	printf("%s: Lock list:\n", tag);
723 	for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
724 		printf("\tlock %p for ", lf);
725 		if (lf->lf_flags & F_POSIX)
726 			printf("thread %d", ((struct proc*)(lf->lf_id))->p_tid);
727 		else
728 			printf("id %p", lf->lf_id);
729 		printf(" %s, start %llx, end %llx",
730 			lf->lf_type == F_RDLCK ? "shared" :
731 			lf->lf_type == F_WRLCK ? "exclusive" :
732 			lf->lf_type == F_UNLCK ? "unlock" :
733 			"unknown", lf->lf_start, lf->lf_end);
734 		printf("\n");
735 	}
736 }
737 #endif /* LOCKF_DEBUG */
738