xref: /openbsd-src/sys/kern/vfs_lockf.c (revision 6bd4f7ca0ef936d4e355b60a534d1efc45dff893)
1 /*	$OpenBSD: vfs_lockf.c,v 1.32 2019/01/21 18:09:21 anton Exp $	*/
2 /*	$NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Scooter Morris at Genentech Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/pool.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/unistd.h>
47 
48 struct pool lockf_state_pool;
49 struct pool lockf_pool;
50 
51 /*
52  * This variable controls the maximum number of processes that will
53  * be checked in doing deadlock detection.
54  */
55 int maxlockdepth = MAXDEPTH;
56 
57 #define SELF	0x1
58 #define OTHERS	0x2
59 
60 #ifdef LOCKF_DEBUG
61 
62 #define	DEBUG_SETLOCK		0x01
63 #define	DEBUG_CLEARLOCK		0x02
64 #define	DEBUG_GETLOCK		0x04
65 #define	DEBUG_FINDOVR		0x08
66 #define	DEBUG_SPLIT		0x10
67 #define	DEBUG_WAKELOCK		0x20
68 #define	DEBUG_LINK		0x40
69 
70 int	lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
71 
72 #define	DPRINTF(args, level)	if (lockf_debug & (level)) printf args
73 #define	LFPRINT(args, level)	if (lockf_debug & (level)) lf_print args
74 #else
75 #define	DPRINTF(args, level)
76 #define	LFPRINT(args, level)
77 #endif
78 
79 void ls_ref(struct lockf_state *);
80 void ls_rele(struct lockf_state *);
81 
82 void
83 lf_init(void)
84 {
85 	pool_init(&lockf_state_pool, sizeof(struct lockf_state), 0, IPL_NONE,
86 	    PR_WAITOK, "lockfspl", NULL);
87 	pool_init(&lockf_pool, sizeof(struct lockf), 0, IPL_NONE,
88 	    PR_WAITOK, "lockfpl", NULL);
89 }
90 
91 struct lockf *lf_alloc(uid_t, int);
92 void lf_free(struct lockf *);
93 
94 void
95 ls_ref(struct lockf_state *ls)
96 {
97 	ls->ls_refs++;
98 }
99 
100 void
101 ls_rele(struct lockf_state *ls)
102 {
103 	if (--ls->ls_refs > 0)
104 		return;
105 
106 #ifdef LOCKF_DIAGNOSTIC
107 	KASSERT(ls->ls_head == NULL);
108 	KASSERT(ls->ls_pending == 0);
109 #endif
110 
111 	*ls->ls_owner = NULL;
112 	pool_put(&lockf_state_pool, ls);
113 }
114 
115 /*
116  * We enforce a limit on locks by uid, so that a single user cannot
117  * run the kernel out of memory.  For now, the limit is pretty coarse.
118  * There is no limit on root.
119  *
120  * Splitting a lock will always succeed, regardless of current allocations.
121  * If you're slightly above the limit, we still have to permit an allocation
122  * so that the unlock can succeed.  If the unlocking causes too many splits,
123  * however, you're totally cutoff.
124  */
125 int maxlocksperuid = 1024;
126 
127 /*
128  * 3 options for allowfail.
129  * 0 - always allocate.  1 - cutoff at limit.  2 - cutoff at double limit.
130  */
131 struct lockf *
132 lf_alloc(uid_t uid, int allowfail)
133 {
134 	struct uidinfo *uip;
135 	struct lockf *lock;
136 
137 	uip = uid_find(uid);
138 	if (uid && allowfail && uip->ui_lockcnt >
139 	    (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
140 		uid_release(uip);
141 		return (NULL);
142 	}
143 	uip->ui_lockcnt++;
144 	uid_release(uip);
145 	lock = pool_get(&lockf_pool, PR_WAITOK);
146 	lock->lf_uid = uid;
147 	return (lock);
148 }
149 
150 void
151 lf_free(struct lockf *lock)
152 {
153 	struct uidinfo *uip;
154 
155 	LFPRINT(("lf_free", lock), DEBUG_LINK);
156 
157 	if (lock->lf_state->ls_head == lock) {
158 		LFPRINT(("lf_free: head", lock->lf_next), DEBUG_LINK);
159 
160 #ifdef LOCKF_DIAGNOSTIC
161 		KASSERT(lock->lf_prev == NULL);
162 #endif /* LOCKF_DIAGNOSTIC */
163 
164 		lock->lf_state->ls_head = lock->lf_next;
165 	}
166 
167 #ifdef LOCKF_DIAGNOSTIC
168 	KASSERT(TAILQ_EMPTY(&lock->lf_blkhd));
169 #endif /* LOCKF_DIAGNOSTIC */
170 
171 	lf_unlink(lock);
172 	ls_rele(lock->lf_state);
173 
174 	uip = uid_find(lock->lf_uid);
175 	uip->ui_lockcnt--;
176 	uid_release(uip);
177 	pool_put(&lockf_pool, lock);
178 }
179 
180 void
181 lf_link(struct lockf *lock1, struct lockf *lock2)
182 {
183 	LFPRINT(("lf_link: lock1", lock1), DEBUG_LINK);
184 	LFPRINT(("lf_link: lock2", lock2), DEBUG_LINK);
185 
186 #ifdef LOCKF_DIAGNOSTIC
187 	KASSERT(lock1 != NULL && lock2 != NULL);
188 	KASSERT(lock1 != lock2);
189 	if (lock1->lf_next != NULL)
190 		KASSERT(lock2->lf_next == NULL);
191 	if (lock2->lf_prev != NULL)
192 		KASSERT(lock1->lf_prev == NULL);
193 #endif /* LOCKF_DIAGNOSTIC */
194 
195 	if (lock1->lf_next != NULL) {
196 		lock2->lf_next = lock1->lf_next;
197 		lock1->lf_next->lf_prev = lock2;
198 	}
199 	lock1->lf_next = lock2;
200 
201 	if (lock2->lf_prev != NULL) {
202 		lock1->lf_prev = lock2->lf_prev;
203 		lock2->lf_prev->lf_next = lock1;
204 	}
205 	lock2->lf_prev = lock1;
206 
207 	if (lock1->lf_state->ls_head == NULL) {
208 		LFPRINT(("lf_link: head", lock1), DEBUG_LINK);
209 
210 		lock1->lf_state->ls_head = lock1;
211 	} else if (lock1->lf_state->ls_head == lock2) {
212 		LFPRINT(("lf_link: swap head", lock1), DEBUG_LINK);
213 
214 		lock1->lf_state->ls_head = lock1;
215 	}
216 }
217 
218 void
219 lf_unlink(struct lockf *lock)
220 {
221 	LFPRINT(("lf_unlink", lock), DEBUG_LINK);
222 
223 	if (lock->lf_prev != NULL)
224 		lock->lf_prev->lf_next = lock->lf_next;
225 	if (lock->lf_next != NULL)
226 		lock->lf_next->lf_prev = lock->lf_prev;
227 	lock->lf_prev = lock->lf_next = NULL;
228 }
229 
230 /*
231  * Do an advisory lock operation.
232  */
233 int
234 lf_advlock(struct lockf_state **state, off_t size, caddr_t id, int op,
235     struct flock *fl, int flags)
236 {
237 	struct proc *p = curproc;
238 	struct lockf_state *ls;
239 	struct lockf *lock;
240 	off_t start, end;
241 	int error;
242 
243 	/*
244 	 * Convert the flock structure into a start and end.
245 	 */
246 	switch (fl->l_whence) {
247 	case SEEK_SET:
248 	case SEEK_CUR:
249 		/*
250 		 * Caller is responsible for adding any necessary offset
251 		 * when SEEK_CUR is used.
252 		 */
253 		start = fl->l_start;
254 		break;
255 	case SEEK_END:
256 		start = size + fl->l_start;
257 		break;
258 	default:
259 		return (EINVAL);
260 	}
261 	if (start < 0)
262 		return (EINVAL);
263 	if (fl->l_len > 0) {
264 		if (fl->l_len - 1 > LLONG_MAX - start)
265 			return (EOVERFLOW);
266 		end = start + (fl->l_len - 1);
267 	} else if (fl->l_len < 0) {
268 		if (fl->l_start + fl->l_len < 0)
269 			return (EINVAL);
270 		end = fl->l_start - 1;
271 		start += fl->l_len;
272 	} else {
273 		end = -1;
274 	}
275 
276 	if (*state == NULL) {
277 		ls = pool_get(&lockf_state_pool, PR_WAITOK | PR_ZERO);
278 		/* pool_get() can sleep, ensure the race was won. */
279 		if (*state == NULL) {
280 			*state = ls;
281 			ls->ls_owner = state;
282 		} else {
283 			pool_put(&lockf_state_pool, ls);
284 		}
285 	}
286 	ls = *state;
287 	ls_ref(ls);
288 
289 	/*
290 	 * Avoid the common case of unlocking when inode has no locks.
291 	 */
292 	if (ls->ls_head == NULL) {
293 		if (op != F_SETLK) {
294 			ls_rele(ls);
295 			fl->l_type = F_UNLCK;
296 			return (0);
297 		}
298 	}
299 
300 	lock = lf_alloc(p->p_ucred->cr_uid, op == F_SETLK ? 1 : 2);
301 	if (!lock) {
302 		ls_rele(ls);
303 		return (ENOLCK);
304 	}
305 	lock->lf_start = start;
306 	lock->lf_end = end;
307 	lock->lf_id = id;
308 	lock->lf_state = ls;
309 	lock->lf_type = fl->l_type;
310 	lock->lf_prev = NULL;
311 	lock->lf_next = NULL;
312 	TAILQ_INIT(&lock->lf_blkhd);
313 	lock->lf_flags = flags;
314 	lock->lf_pid = (flags & F_POSIX) ? p->p_p->ps_pid : -1;
315 
316 	switch (op) {
317 	case F_SETLK:
318 		return (lf_setlock(lock));
319 	case F_UNLCK:
320 		error = lf_clearlock(lock);
321 		lf_free(lock);
322 		return (error);
323 	case F_GETLK:
324 		error = lf_getlock(lock, fl);
325 		lf_free(lock);
326 		return (error);
327 	default:
328 		lf_free(lock);
329 		return (EINVAL);
330 	}
331 	/* NOTREACHED */
332 }
333 
334 /*
335  * Set a byte-range lock.
336  */
337 int
338 lf_setlock(struct lockf *lock)
339 {
340 	struct lockf *block;
341 	struct lockf *prev, *overlap, *ltmp;
342 	static char lockstr[] = "lockf";
343 	int ovcase, priority, needtolink, error;
344 
345 	LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
346 
347 	priority = PLOCK;
348 	if (lock->lf_type == F_WRLCK)
349 		priority += 4;
350 	priority |= PCATCH;
351 	/*
352 	 * Scan lock list for this file looking for locks that would block us.
353 	 */
354 	while ((block = lf_getblock(lock)) != NULL) {
355 		if ((lock->lf_flags & F_WAIT) == 0) {
356 			lf_free(lock);
357 			return (EAGAIN);
358 		}
359 		/*
360 		 * We are blocked. Since flock style locks cover
361 		 * the whole file, there is no chance for deadlock.
362 		 * For byte-range locks we must check for deadlock.
363 		 *
364 		 * Deadlock detection is done by looking through the
365 		 * wait channels to see if there are any cycles that
366 		 * involve us. MAXDEPTH is set just to make sure we
367 		 * do not go off into neverland.
368 		 */
369 		if ((lock->lf_flags & F_POSIX) &&
370 		    (block->lf_flags & F_POSIX)) {
371 			struct proc *wproc;
372 			struct lockf *waitblock;
373 			int i = 0;
374 
375 			/* The block is waiting on something */
376 			wproc = (struct proc *)block->lf_id;
377 			while (wproc->p_wchan &&
378 			    (wproc->p_wmesg == lockstr) &&
379 			    (i++ < maxlockdepth)) {
380 				waitblock = (struct lockf *)wproc->p_wchan;
381 				/* Get the owner of the blocking lock */
382 				waitblock = waitblock->lf_next;
383 				if ((waitblock->lf_flags & F_POSIX) == 0)
384 					break;
385 				wproc = (struct proc *)waitblock->lf_id;
386 				if (wproc == (struct proc *)lock->lf_id) {
387 					lf_free(lock);
388 					return (EDEADLK);
389 				}
390 			}
391 		}
392 		/*
393 		 * For flock type locks, we must first remove
394 		 * any shared locks that we hold before we sleep
395 		 * waiting for an exclusive lock.
396 		 */
397 		if ((lock->lf_flags & F_FLOCK) && lock->lf_type == F_WRLCK) {
398 			lock->lf_type = F_UNLCK;
399 			(void)lf_clearlock(lock);
400 			lock->lf_type = F_WRLCK;
401 		}
402 		/*
403 		 * Add our lock to the blocked list and sleep until we're free.
404 		 * Remember who blocked us (for deadlock detection).
405 		 * Since lock is not yet part of any list, it's safe to let the
406 		 * lf_next field refer to the blocking lock.
407 		 */
408 		lock->lf_next = block;
409 		LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
410 		LFPRINT(("lf_setlock: blocking on", block), DEBUG_SETLOCK);
411 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
412 		lock->lf_state->ls_pending++;
413 		error = tsleep(lock, priority, lockstr, 0);
414 		lock->lf_state->ls_pending--;
415 		wakeup_one(lock->lf_state);
416 		if (lock->lf_next != NULL) {
417 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
418 			lock->lf_next = NULL;
419 		}
420 		if (error) {
421 			lf_free(lock);
422 			return (error);
423 		}
424 		if (lock->lf_flags & F_INTR) {
425 			lf_free(lock);
426 			return (EINTR);
427 		}
428 	}
429 	/*
430 	 * No blocks!!  Add the lock.  Note that we will
431 	 * downgrade or upgrade any overlapping locks this
432 	 * process already owns.
433 	 *
434 	 * Skip over locks owned by other processes.
435 	 * Handle any locks that overlap and are owned by ourselves.
436 	 */
437 	block = lock->lf_state->ls_head;
438 	prev = NULL;
439 	overlap = NULL;
440 	needtolink = 1;
441 	for (;;) {
442 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
443 		if (ovcase)
444 			block = overlap->lf_next;
445 		/*
446 		 * Six cases:
447 		 *	0) no overlap
448 		 *	1) overlap == lock
449 		 *	2) overlap contains lock
450 		 *	3) lock contains overlap
451 		 *	4) overlap starts before lock
452 		 *	5) overlap ends after lock
453 		 */
454 		switch (ovcase) {
455 		case 0: /* no overlap */
456 			if (needtolink) {
457 				if (overlap)	/* insert before overlap */
458 					lf_link(lock, overlap);
459 				else if (prev)	/* last lock in list */
460 					lf_link(prev, lock);
461 				else {		/* first lock in list */
462 #ifdef LOCKF_DIAGNOSTIC
463 					KASSERT(lock->lf_state->ls_head == NULL);
464 #endif
465 					lock->lf_state->ls_head = lock;
466 				}
467 			}
468 			break;
469 		case 1: /* overlap == lock */
470 			/*
471 			 * If downgrading lock, others may be
472 			 * able to acquire it.
473 			 */
474 			if (lock->lf_type == F_RDLCK &&
475 			    overlap->lf_type == F_WRLCK)
476 				lf_wakelock(overlap, 0);
477 			overlap->lf_type = lock->lf_type;
478 			lf_free(lock);
479 			lock = overlap; /* for debug output below */
480 			break;
481 		case 2: /* overlap contains lock */
482 			/*
483 			 * Check for common starting point and different types.
484 			 */
485 			if (overlap->lf_type == lock->lf_type) {
486 				lf_free(lock);
487 				lock = overlap; /* for debug output below */
488 				break;
489 			}
490 			if (overlap->lf_start == lock->lf_start) {
491 				if (!needtolink)
492 					lf_unlink(lock);
493 				lf_link(lock, overlap);
494 				overlap->lf_start = lock->lf_end + 1;
495 			} else
496 				lf_split(overlap, lock);
497 			lf_wakelock(overlap, 0);
498 			break;
499 		case 3: /* lock contains overlap */
500 			/*
501 			 * If downgrading lock, others may be able to
502 			 * acquire it, otherwise take the list.
503 			 */
504 			if (lock->lf_type == F_RDLCK &&
505 			    overlap->lf_type == F_WRLCK) {
506 				lf_wakelock(overlap, 0);
507 			} else {
508 				while ((ltmp =
509 				    TAILQ_FIRST(&overlap->lf_blkhd))) {
510 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
511 					    lf_block);
512 					ltmp->lf_next = lock;
513 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
514 					    ltmp, lf_block);
515 				}
516 			}
517 			/*
518 			 * Add the new lock if necessary and delete the overlap.
519 			 */
520 			if (needtolink) {
521 				lf_link(lock, overlap);
522 				needtolink = 0;
523 			}
524 			lf_free(overlap);
525 			continue;
526 		case 4: /* overlap starts before lock */
527 			/*
528 			 * Add lock after overlap on the list.
529 			 */
530 			if (!needtolink)
531 				lf_unlink(lock);
532 			lf_link(overlap, lock);
533 			overlap->lf_end = lock->lf_start - 1;
534 			lf_wakelock(overlap, 0);
535 			needtolink = 0;
536 			continue;
537 		case 5: /* overlap ends after lock */
538 			/*
539 			 * Add the new lock before overlap.
540 			 */
541 			if (needtolink)
542 				lf_link(lock, overlap);
543 			overlap->lf_start = lock->lf_end + 1;
544 			lf_wakelock(overlap, 0);
545 			break;
546 		}
547 		break;
548 	}
549 	LFPRINT(("lf_setlock: got the lock", lock), DEBUG_SETLOCK);
550 	return (0);
551 }
552 
553 /*
554  * Remove a byte-range lock on an inode.
555  *
556  * Generally, find the lock (or an overlap to that lock)
557  * and remove it (or shrink it), then wakeup anyone we can.
558  */
559 int
560 lf_clearlock(struct lockf *lock)
561 {
562 	struct lockf *lf = lock->lf_state->ls_head;
563 	struct lockf *overlap, *prev;
564 	int ovcase;
565 
566 	if (lf == NULL)
567 		return (0);
568 	LFPRINT(("lf_clearlock", lock), DEBUG_CLEARLOCK);
569 	while ((ovcase = lf_findoverlap(lf, lock, SELF, &prev, &overlap))) {
570 		lf_wakelock(overlap, 0);
571 
572 		switch (ovcase) {
573 		case 1: /* overlap == lock */
574 			lf_free(overlap);
575 			break;
576 		case 2: /* overlap contains lock: split it */
577 			if (overlap->lf_start == lock->lf_start) {
578 				overlap->lf_start = lock->lf_end + 1;
579 				break;
580 			}
581 			lf_split(overlap, lock);
582 			break;
583 		case 3: /* lock contains overlap */
584 			lf = overlap->lf_next;
585 			lf_free(overlap);
586 			continue;
587 		case 4: /* overlap starts before lock */
588 			overlap->lf_end = lock->lf_start - 1;
589 			lf = overlap->lf_next;
590 			continue;
591 		case 5: /* overlap ends after lock */
592 			overlap->lf_start = lock->lf_end + 1;
593 			break;
594 		}
595 		break;
596 	}
597 	return (0);
598 }
599 
600 /*
601  * Check whether there is a blocking lock,
602  * and if so return its process identifier.
603  */
604 int
605 lf_getlock(struct lockf *lock, struct flock *fl)
606 {
607 	struct lockf *block;
608 
609 	LFPRINT(("lf_getlock", lock), DEBUG_CLEARLOCK);
610 
611 	if ((block = lf_getblock(lock)) != NULL) {
612 		fl->l_type = block->lf_type;
613 		fl->l_whence = SEEK_SET;
614 		fl->l_start = block->lf_start;
615 		if (block->lf_end == -1)
616 			fl->l_len = 0;
617 		else
618 			fl->l_len = block->lf_end - block->lf_start + 1;
619 		fl->l_pid = block->lf_pid;
620 	} else {
621 		fl->l_type = F_UNLCK;
622 	}
623 	return (0);
624 }
625 
626 /*
627  * Walk the list of locks for an inode and
628  * return the first blocking lock.
629  */
630 struct lockf *
631 lf_getblock(struct lockf *lock)
632 {
633 	struct lockf *prev, *overlap, *lf;
634 
635 	lf = lock->lf_state->ls_head;
636 	while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
637 		/*
638 		 * We've found an overlap, see if it blocks us
639 		 */
640 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
641 			return (overlap);
642 		/*
643 		 * Nope, point to the next one on the list and
644 		 * see if it blocks us
645 		 */
646 		lf = overlap->lf_next;
647 	}
648 	return (NULL);
649 }
650 
651 /*
652  * Walk the list of locks for an inode to
653  * find an overlapping lock (if any).
654  *
655  * NOTE: this returns only the FIRST overlapping lock.  There
656  *	 may be more than one.
657  */
658 int
659 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
660     struct lockf **prev, struct lockf **overlap)
661 {
662 	off_t start, end;
663 
664 	LFPRINT(("lf_findoverlap: looking for overlap in", lock), DEBUG_FINDOVR);
665 
666 	*overlap = lf;
667 	start = lock->lf_start;
668 	end = lock->lf_end;
669 	while (lf != NULL) {
670 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
671 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
672 			*prev = lf;
673 			*overlap = lf = lf->lf_next;
674 			continue;
675 		}
676 		LFPRINT(("\tchecking", lf), DEBUG_FINDOVR);
677 		/*
678 		 * OK, check for overlap
679 		 *
680 		 * Six cases:
681 		 *	0) no overlap
682 		 *	1) overlap == lock
683 		 *	2) overlap contains lock
684 		 *	3) lock contains overlap
685 		 *	4) overlap starts before lock
686 		 *	5) overlap ends after lock
687 		 */
688 
689 		/* Case 0 */
690 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
691 		    (end != -1 && lf->lf_start > end)) {
692 			DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
693 			if ((type & SELF) && end != -1 && lf->lf_start > end)
694 				return (0);
695 			*prev = lf;
696 			*overlap = lf = lf->lf_next;
697 			continue;
698 		}
699 		/* Case 1 */
700 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
701 			DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
702 			return (1);
703 		}
704 		/* Case 2 */
705 		if ((lf->lf_start <= start) &&
706 		    (lf->lf_end == -1 || (end != -1 && lf->lf_end >= end))) {
707 			DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
708 			return (2);
709 		}
710 		/* Case 3 */
711 		if (start <= lf->lf_start &&
712 		    (end == -1 || (lf->lf_end != -1 && end >= lf->lf_end))) {
713 			DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
714 			return (3);
715 		}
716 		/* Case 4 */
717 		if ((lf->lf_start < start) &&
718 		    ((lf->lf_end >= start) || (lf->lf_end == -1))) {
719 			DPRINTF(("overlap starts before lock\n"),
720 			    DEBUG_FINDOVR);
721 			return (4);
722 		}
723 		/* Case 5 */
724 		if ((lf->lf_start > start) && (end != -1) &&
725 		    ((lf->lf_end > end) || (lf->lf_end == -1))) {
726 			DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
727 			return (5);
728 		}
729 		panic("lf_findoverlap: default");
730 	}
731 	return (0);
732 }
733 
734 /*
735  * Purge all locks associated with the given lock state.
736  */
737 void
738 lf_purgelocks(struct lockf_state *ls)
739 {
740 	struct lockf *lock;
741 
742 	if (ls == NULL)
743 		return;
744 
745 	ls_ref(ls);
746 
747 	/* Interrupt blocked locks and wait for all of them to finish. */
748 	for (lock = ls->ls_head; lock != NULL; lock = lock->lf_next) {
749 		LFPRINT(("lf_purgelocks: wakeup", lock), DEBUG_SETLOCK);
750 		lf_wakelock(lock, F_INTR);
751 	}
752 	while (ls->ls_pending > 0) {
753 		tsleep(ls, PLOCK, "lockfp", 0);
754 	}
755 
756 	/*
757 	 * Any remaining locks cannot block other locks at this point and can
758 	 * safely be removed.
759 	 */
760 	while ((lock = ls->ls_head) != NULL) {
761 		lf_free(lock);
762 	}
763 
764 	/* This is the last expected thread to hold a lock state reference. */
765 #ifdef LOCKF_DIAGNOSTIC
766 	KASSERT(ls->ls_refs == 1);
767 #endif
768 	ls_rele(ls);
769 }
770 
771 /*
772  * Split a lock and a contained region into
773  * two or three locks as necessary.
774  */
775 void
776 lf_split(struct lockf *lock1, struct lockf *lock2)
777 {
778 	struct lockf *splitlock;
779 
780 	LFPRINT(("lf_split", lock1), DEBUG_SPLIT);
781 	LFPRINT(("splitting from", lock2), DEBUG_SPLIT);
782 
783 	/*
784 	 * Check to see if splitting into only two pieces.
785 	 */
786 	if (lock1->lf_start == lock2->lf_start) {
787 		lock1->lf_start = lock2->lf_end + 1;
788 		lf_link(lock2, lock1);
789 		return;
790 	}
791 	if (lock1->lf_end == lock2->lf_end) {
792 		lock1->lf_end = lock2->lf_start - 1;
793 		lf_link(lock1, lock2);
794 		return;
795 	}
796 	/*
797 	 * Make a new lock consisting of the last part of
798 	 * the encompassing lock
799 	 */
800 	splitlock = lf_alloc(lock1->lf_uid, 0);
801 	memcpy(splitlock, lock1, sizeof(*splitlock));
802 	ls_ref(splitlock->lf_state);
803 	splitlock->lf_prev = NULL;
804 	splitlock->lf_next = NULL;
805 	splitlock->lf_start = lock2->lf_end + 1;
806 	splitlock->lf_block.tqe_next = NULL;
807 	TAILQ_INIT(&splitlock->lf_blkhd);
808 	lock1->lf_end = lock2->lf_start - 1;
809 
810 	lf_link(lock1, lock2);
811 	lf_link(lock2, splitlock);
812 }
813 
814 /*
815  * Wakeup a blocklist
816  */
817 void
818 lf_wakelock(struct lockf *lock, int flags)
819 {
820 	struct lockf *wakelock;
821 
822 	while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) {
823 		TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block);
824 		wakelock->lf_next = NULL;
825 		wakelock->lf_flags |= flags;
826 		wakeup_one(wakelock);
827 	}
828 }
829 
830 #ifdef LOCKF_DEBUG
831 /*
832  * Print out a lock.
833  */
834 void
835 lf_print(char *tag, struct lockf *lock)
836 {
837 	struct lockf	*block;
838 
839 	if (tag)
840 		printf("%s: ", tag);
841 	printf("lock %p", lock);
842 	if (lock == NULL) {
843 		printf("\n");
844 		return;
845 	}
846 	printf(" for ");
847 	if (lock->lf_flags & F_POSIX)
848 		printf("thread %d", ((struct proc *)(lock->lf_id))->p_tid);
849 	else
850 		printf("id %p", lock->lf_id);
851 	printf(" %s, start %llx, end %llx",
852 		lock->lf_type == F_RDLCK ? "shared" :
853 		lock->lf_type == F_WRLCK ? "exclusive" :
854 		lock->lf_type == F_UNLCK ? "unlock" :
855 		"unknown", lock->lf_start, lock->lf_end);
856 	printf(", prev %p, next %p, state %p",
857 	    lock->lf_prev, lock->lf_next, lock->lf_state);
858 	block = TAILQ_FIRST(&lock->lf_blkhd);
859 	if (block)
860 		printf(", block");
861 	TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)
862 		printf(" %p,", block);
863 	printf("\n");
864 }
865 
866 void
867 lf_printlist(char *tag, struct lockf *lock)
868 {
869 	struct lockf *lf;
870 #ifdef LOCKF_DIAGNOSTIC
871 	struct lockf *prev = NULL;
872 #endif /* LOCKF_DIAGNOSTIC */
873 
874 	printf("%s: Lock list:\n", tag);
875 	for (lf = lock->lf_state->ls_head; lf; lf = lf->lf_next) {
876 		if (lock == lf)
877 			printf(" * ");
878 		else
879 			printf("   ");
880 		lf_print(NULL, lf);
881 
882 #ifdef LOCKF_DIAGNOSTIC
883 		KASSERT(lf->lf_prev == prev);
884 		prev = lf;
885 #endif /* LOCKF_DIAGNOSTIC */
886 	}
887 }
888 #endif /* LOCKF_DEBUG */
889