xref: /openbsd-src/sys/kern/vfs_lockf.c (revision e2502dba36e3aa6b390edb6ea13ef18c5af91193)
1 /*	$OpenBSD: vfs_lockf.c,v 1.39 2019/04/20 14:13:11 anton Exp $	*/
2 /*	$NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Scooter Morris at Genentech Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/pool.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/rwlock.h>
47 #include <sys/unistd.h>
48 
49 /*
50  * The lockf structure is a kernel structure which contains the information
51  * associated with a byte range lock.  The lockf structures are linked into
52  * the inode structure. Locks are sorted by the starting byte of the lock for
53  * efficiency.
54  */
55 TAILQ_HEAD(locklist, lockf);
56 
57 struct lockf {
58 	short	lf_flags;	 /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */
59 	short	lf_type;	 /* Lock type: F_RDLCK, F_WRLCK */
60 	off_t	lf_start;	 /* The byte # of the start of the lock */
61 	off_t	lf_end;		 /* The byte # of the end of the lock (-1=EOF)*/
62 	caddr_t	lf_id;		 /* The id of the resource holding the lock */
63 	struct	lockf_state *lf_state;	/* State associated with the lock */
64 	TAILQ_ENTRY(lockf) lf_entry;
65 	struct	lockf *lf_blk;	 /* The lock that blocks us */
66 	struct	locklist lf_blkhd;	/* The list of blocked locks */
67 	TAILQ_ENTRY(lockf) lf_block; /* A request waiting for a lock */
68 	uid_t	lf_uid;		/* User ID responsible */
69 	pid_t	lf_pid;		/* POSIX - owner pid */
70 };
71 
72 struct lockf_state {
73 	TAILQ_HEAD(, lockf)	  ls_locks;	/* list of locks */
74 	struct lockf_state	**ls_owner;	/* owner */
75 	int		 	  ls_refs;	/* reference counter */
76 	int			  ls_pending;	/* pending lock operations */
77 };
78 
79 struct pool lockf_state_pool;
80 struct pool lockf_pool;
81 
82 /*
83  * This variable controls the maximum number of processes that will
84  * be checked in doing deadlock detection.
85  */
86 int maxlockdepth = 50;
87 
88 #define SELF	0x1
89 #define OTHERS	0x2
90 
91 #ifdef LOCKF_DEBUG
92 
93 #define	DEBUG_SETLOCK		0x01
94 #define	DEBUG_CLEARLOCK		0x02
95 #define	DEBUG_GETLOCK		0x04
96 #define	DEBUG_FINDOVR		0x08
97 #define	DEBUG_SPLIT		0x10
98 #define	DEBUG_WAKELOCK		0x20
99 #define	DEBUG_LINK		0x40
100 
101 int	lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
102 
103 void	lf_print(const char *, struct lockf *);
104 void	lf_printlist(const char *, struct lockf *);
105 
106 #define	DPRINTF(args, level)	if (lockf_debug & (level)) printf args
107 #define	LFPRINT(args, level)	if (lockf_debug & (level)) lf_print args
108 #else
109 #define	DPRINTF(args, level)
110 #define	LFPRINT(args, level)
111 #endif
112 
113 int lf_clearlock(struct lockf *);
114 int lf_findoverlap(struct lockf *, struct lockf *, int, struct lockf **);
115 struct lockf *lf_getblock(struct lockf *);
116 int lf_getlock(struct lockf *, struct flock *);
117 int lf_setlock(struct lockf *);
118 void lf_split(struct lockf *, struct lockf *);
119 void lf_wakelock(struct lockf *, int);
120 void ls_ref(struct lockf_state *);
121 void ls_rele(struct lockf_state *);
122 
123 /*
124  * Serializes access to each instance of struct lockf and struct lockf_state
125  * and each pointer from a vnode to struct lockf_state.
126  */
127 struct rwlock lockf_lock = RWLOCK_INITIALIZER("lockflk");
128 
129 void
130 lf_init(void)
131 {
132 	pool_init(&lockf_state_pool, sizeof(struct lockf_state), 0, IPL_NONE,
133 	    PR_WAITOK | PR_RWLOCK, "lockfspl", NULL);
134 	pool_init(&lockf_pool, sizeof(struct lockf), 0, IPL_NONE,
135 	    PR_WAITOK | PR_RWLOCK, "lockfpl", NULL);
136 }
137 
138 struct lockf *lf_alloc(uid_t, int);
139 void lf_free(struct lockf *);
140 
141 void
142 ls_ref(struct lockf_state *ls)
143 {
144 	rw_assert_wrlock(&lockf_lock);
145 
146 	ls->ls_refs++;
147 }
148 
149 void
150 ls_rele(struct lockf_state *ls)
151 {
152 	rw_assert_wrlock(&lockf_lock);
153 
154 	if (--ls->ls_refs > 0)
155 		return;
156 
157 #ifdef LOCKF_DIAGNOSTIC
158 	KASSERT(TAILQ_EMPTY(&ls->ls_locks));
159 	KASSERT(ls->ls_pending == 0);
160 #endif
161 
162 	*ls->ls_owner = NULL;
163 	pool_put(&lockf_state_pool, ls);
164 }
165 
166 /*
167  * We enforce a limit on locks by uid, so that a single user cannot
168  * run the kernel out of memory.  For now, the limit is pretty coarse.
169  * There is no limit on root.
170  *
171  * Splitting a lock will always succeed, regardless of current allocations.
172  * If you're slightly above the limit, we still have to permit an allocation
173  * so that the unlock can succeed.  If the unlocking causes too many splits,
174  * however, you're totally cutoff.
175  */
176 int maxlocksperuid = 1024;
177 
178 /*
179  * 3 options for allowfail.
180  * 0 - always allocate.  1 - cutoff at limit.  2 - cutoff at double limit.
181  */
182 struct lockf *
183 lf_alloc(uid_t uid, int allowfail)
184 {
185 	struct uidinfo *uip;
186 	struct lockf *lock;
187 
188 	uip = uid_find(uid);
189 	if (uid && allowfail && uip->ui_lockcnt >
190 	    (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
191 		uid_release(uip);
192 		return (NULL);
193 	}
194 	uip->ui_lockcnt++;
195 	uid_release(uip);
196 	lock = pool_get(&lockf_pool, PR_WAITOK);
197 	lock->lf_uid = uid;
198 	return (lock);
199 }
200 
201 void
202 lf_free(struct lockf *lock)
203 {
204 	struct uidinfo *uip;
205 
206 	rw_assert_wrlock(&lockf_lock);
207 
208 	LFPRINT(("lf_free", lock), DEBUG_LINK);
209 
210 #ifdef LOCKF_DIAGNOSTIC
211 	KASSERT(TAILQ_EMPTY(&lock->lf_blkhd));
212 #endif /* LOCKF_DIAGNOSTIC */
213 
214 	ls_rele(lock->lf_state);
215 
216 	uip = uid_find(lock->lf_uid);
217 	uip->ui_lockcnt--;
218 	uid_release(uip);
219 	pool_put(&lockf_pool, lock);
220 }
221 
222 
223 /*
224  * Do an advisory lock operation.
225  */
226 int
227 lf_advlock(struct lockf_state **state, off_t size, caddr_t id, int op,
228     struct flock *fl, int flags)
229 {
230 	struct proc *p = curproc;
231 	struct lockf_state *ls;
232 	struct lockf *lock;
233 	off_t start, end;
234 	int error = 0;
235 
236 	/*
237 	 * Convert the flock structure into a start and end.
238 	 */
239 	switch (fl->l_whence) {
240 	case SEEK_SET:
241 	case SEEK_CUR:
242 		/*
243 		 * Caller is responsible for adding any necessary offset
244 		 * when SEEK_CUR is used.
245 		 */
246 		start = fl->l_start;
247 		break;
248 	case SEEK_END:
249 		start = size + fl->l_start;
250 		break;
251 	default:
252 		return (EINVAL);
253 	}
254 	if (start < 0)
255 		return (EINVAL);
256 	if (fl->l_len > 0) {
257 		if (fl->l_len - 1 > LLONG_MAX - start)
258 			return (EOVERFLOW);
259 		end = start + (fl->l_len - 1);
260 	} else if (fl->l_len < 0) {
261 		if (fl->l_start + fl->l_len < 0)
262 			return (EINVAL);
263 		end = fl->l_start - 1;
264 		start += fl->l_len;
265 	} else {
266 		end = -1;
267 	}
268 
269 	rw_enter_write(&lockf_lock);
270 	ls = *state;
271 
272 	/*
273 	 * Avoid the common case of unlocking when inode has no locks.
274 	 */
275 	if (ls == NULL && op != F_SETLK) {
276 		fl->l_type = F_UNLCK;
277 		goto out;
278 	}
279 
280 	if (ls == NULL) {
281 		ls = pool_get(&lockf_state_pool, PR_WAITOK | PR_ZERO);
282 		ls->ls_owner = state;
283 		TAILQ_INIT(&ls->ls_locks);
284 		*state = ls;
285 	}
286 	ls_ref(ls);
287 
288 	lock = lf_alloc(p->p_ucred->cr_uid, op == F_SETLK ? 1 : 2);
289 	if (!lock) {
290 		ls_rele(ls);
291 		error = ENOLCK;
292 		goto out;
293 	}
294 	lock->lf_flags = flags;
295 	lock->lf_type = fl->l_type;
296 	lock->lf_start = start;
297 	lock->lf_end = end;
298 	lock->lf_id = id;
299 	lock->lf_state = ls;
300 	lock->lf_blk = NULL;
301 	lock->lf_pid = (flags & F_POSIX) ? p->p_p->ps_pid : -1;
302 	TAILQ_INIT(&lock->lf_blkhd);
303 
304 	switch (op) {
305 	case F_SETLK:
306 		error = lf_setlock(lock);
307 		break;
308 	case F_UNLCK:
309 		error = lf_clearlock(lock);
310 		lf_free(lock);
311 		break;
312 	case F_GETLK:
313 		error = lf_getlock(lock, fl);
314 		lf_free(lock);
315 		break;
316 	default:
317 		lf_free(lock);
318 		error = EINVAL;
319 		break;
320 	}
321 
322 out:
323 	rw_exit_write(&lockf_lock);
324 	return (error);
325 }
326 
327 /*
328  * Set a byte-range lock.
329  */
330 int
331 lf_setlock(struct lockf *lock)
332 {
333 	struct lockf *block;
334 	struct lockf *overlap, *ltmp;
335 	static char lockstr[] = "lockf";
336 	int ovcase, priority, needtolink, error;
337 
338 	rw_assert_wrlock(&lockf_lock);
339 
340 	LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
341 
342 	priority = PLOCK;
343 	if (lock->lf_type == F_WRLCK)
344 		priority += 4;
345 	priority |= PCATCH;
346 	/*
347 	 * Scan lock list for this file looking for locks that would block us.
348 	 */
349 	while ((block = lf_getblock(lock)) != NULL) {
350 		if ((lock->lf_flags & F_WAIT) == 0) {
351 			lf_free(lock);
352 			return (EAGAIN);
353 		}
354 		/*
355 		 * We are blocked. Since flock style locks cover
356 		 * the whole file, there is no chance for deadlock.
357 		 * For byte-range locks we must check for deadlock.
358 		 *
359 		 * Deadlock detection is done by looking through the
360 		 * wait channels to see if there are any cycles that
361 		 * involve us. MAXDEPTH is set just to make sure we
362 		 * do not go off into neverland.
363 		 */
364 		if ((lock->lf_flags & F_POSIX) &&
365 		    (block->lf_flags & F_POSIX)) {
366 			struct proc *wproc;
367 			struct lockf *waitblock;
368 			int i = 0;
369 
370 			/* The block is waiting on something */
371 			KERNEL_LOCK();
372 			wproc = (struct proc *)block->lf_id;
373 			while (wproc->p_wchan &&
374 			    (wproc->p_wmesg == lockstr) &&
375 			    (i++ < maxlockdepth)) {
376 				waitblock = (struct lockf *)wproc->p_wchan;
377 				/* Get the owner of the blocking lock */
378 				waitblock = waitblock->lf_blk;
379 				if ((waitblock->lf_flags & F_POSIX) == 0)
380 					break;
381 				wproc = (struct proc *)waitblock->lf_id;
382 				if (wproc == (struct proc *)lock->lf_id) {
383 					KERNEL_UNLOCK();
384 					lf_free(lock);
385 					return (EDEADLK);
386 				}
387 			}
388 			KERNEL_UNLOCK();
389 		}
390 		/*
391 		 * For flock type locks, we must first remove
392 		 * any shared locks that we hold before we sleep
393 		 * waiting for an exclusive lock.
394 		 */
395 		if ((lock->lf_flags & F_FLOCK) && lock->lf_type == F_WRLCK) {
396 			lock->lf_type = F_UNLCK;
397 			(void)lf_clearlock(lock);
398 			lock->lf_type = F_WRLCK;
399 		}
400 		/*
401 		 * Add our lock to the blocked list and sleep until we're free.
402 		 * Remember who blocked us (for deadlock detection).
403 		 */
404 		lock->lf_blk = block;
405 		LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
406 		LFPRINT(("lf_setlock: blocking on", block), DEBUG_SETLOCK);
407 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
408 		lock->lf_state->ls_pending++;
409 		KERNEL_LOCK();
410 		error = rwsleep(lock, &lockf_lock, priority, lockstr, 0);
411 		KERNEL_UNLOCK();
412 		lock->lf_state->ls_pending--;
413 		wakeup_one(lock->lf_state);
414 		if (lock->lf_blk != NULL) {
415 			TAILQ_REMOVE(&lock->lf_blk->lf_blkhd, lock, lf_block);
416 			lock->lf_blk = NULL;
417 		}
418 		if (error) {
419 			lf_free(lock);
420 			return (error);
421 		}
422 		if (lock->lf_flags & F_INTR) {
423 			lf_free(lock);
424 			return (EINTR);
425 		}
426 	}
427 	/*
428 	 * No blocks!!  Add the lock.  Note that we will
429 	 * downgrade or upgrade any overlapping locks this
430 	 * process already owns.
431 	 *
432 	 * Skip over locks owned by other processes.
433 	 * Handle any locks that overlap and are owned by ourselves.
434 	 */
435 	block = TAILQ_FIRST(&lock->lf_state->ls_locks);
436 	overlap = NULL;
437 	needtolink = 1;
438 	for (;;) {
439 		ovcase = lf_findoverlap(block, lock, SELF, &overlap);
440 		if (ovcase)
441 			block = TAILQ_NEXT(overlap, lf_entry);
442 		/*
443 		 * Six cases:
444 		 *	0) no overlap
445 		 *	1) overlap == lock
446 		 *	2) overlap contains lock
447 		 *	3) lock contains overlap
448 		 *	4) overlap starts before lock
449 		 *	5) overlap ends after lock
450 		 */
451 		switch (ovcase) {
452 		case 0: /* no overlap */
453 			if (needtolink) {
454 				if (overlap)	/* insert before overlap */
455 					TAILQ_INSERT_BEFORE(overlap, lock,
456 					    lf_entry);
457 				else		/* first or last lock in list */
458 					TAILQ_INSERT_TAIL(&lock->lf_state->ls_locks,
459 					    lock, lf_entry);
460 			}
461 			break;
462 		case 1: /* overlap == lock */
463 			/*
464 			 * If downgrading lock, others may be
465 			 * able to acquire it.
466 			 */
467 			if (lock->lf_type == F_RDLCK &&
468 			    overlap->lf_type == F_WRLCK)
469 				lf_wakelock(overlap, 0);
470 			overlap->lf_type = lock->lf_type;
471 			lf_free(lock);
472 			lock = overlap; /* for debug output below */
473 			break;
474 		case 2: /* overlap contains lock */
475 			/*
476 			 * Check for common starting point and different types.
477 			 */
478 			if (overlap->lf_type == lock->lf_type) {
479 				lf_free(lock);
480 				lock = overlap; /* for debug output below */
481 				break;
482 			}
483 			if (overlap->lf_start == lock->lf_start) {
484 				if (!needtolink)
485 					TAILQ_REMOVE(&lock->lf_state->ls_locks,
486 					    lock, lf_entry);
487 				TAILQ_INSERT_BEFORE(overlap, lock, lf_entry);
488 				overlap->lf_start = lock->lf_end + 1;
489 			} else
490 				lf_split(overlap, lock);
491 			lf_wakelock(overlap, 0);
492 			break;
493 		case 3: /* lock contains overlap */
494 			/*
495 			 * If downgrading lock, others may be able to
496 			 * acquire it, otherwise take the list.
497 			 */
498 			if (lock->lf_type == F_RDLCK &&
499 			    overlap->lf_type == F_WRLCK) {
500 				lf_wakelock(overlap, 0);
501 			} else {
502 				while ((ltmp =
503 				    TAILQ_FIRST(&overlap->lf_blkhd))) {
504 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
505 					    lf_block);
506 					ltmp->lf_blk = lock;
507 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
508 					    ltmp, lf_block);
509 				}
510 			}
511 			/*
512 			 * Add the new lock if necessary and delete the overlap.
513 			 */
514 			if (needtolink) {
515 				TAILQ_INSERT_BEFORE(overlap, lock, lf_entry);
516 				needtolink = 0;
517 			}
518 			TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap, lf_entry);
519 			lf_free(overlap);
520 			continue;
521 		case 4: /* overlap starts before lock */
522 			/*
523 			 * Add lock after overlap on the list.
524 			 */
525 			if (!needtolink)
526 				TAILQ_REMOVE(&lock->lf_state->ls_locks, lock,
527 				    lf_entry);
528 			TAILQ_INSERT_AFTER(&lock->lf_state->ls_locks, overlap,
529 			    lock, lf_entry);
530 			overlap->lf_end = lock->lf_start - 1;
531 			lf_wakelock(overlap, 0);
532 			needtolink = 0;
533 			continue;
534 		case 5: /* overlap ends after lock */
535 			/*
536 			 * Add the new lock before overlap.
537 			 */
538 			if (needtolink)
539 				TAILQ_INSERT_BEFORE(overlap, lock, lf_entry);
540 			overlap->lf_start = lock->lf_end + 1;
541 			lf_wakelock(overlap, 0);
542 			break;
543 		}
544 		break;
545 	}
546 	LFPRINT(("lf_setlock: got the lock", lock), DEBUG_SETLOCK);
547 	return (0);
548 }
549 
550 /*
551  * Remove a byte-range lock on an inode.
552  *
553  * Generally, find the lock (or an overlap to that lock)
554  * and remove it (or shrink it), then wakeup anyone we can.
555  */
556 int
557 lf_clearlock(struct lockf *lock)
558 {
559 	struct lockf *lf, *overlap;
560 	int ovcase;
561 
562 	rw_assert_wrlock(&lockf_lock);
563 
564 	lf = TAILQ_FIRST(&lock->lf_state->ls_locks);
565 	if (lf == NULL)
566 		return (0);
567 
568 	LFPRINT(("lf_clearlock", lock), DEBUG_CLEARLOCK);
569 	while ((ovcase = lf_findoverlap(lf, lock, SELF, &overlap))) {
570 		lf_wakelock(overlap, 0);
571 
572 		switch (ovcase) {
573 		case 1: /* overlap == lock */
574 			TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap,
575 			    lf_entry);
576 			lf_free(overlap);
577 			break;
578 		case 2: /* overlap contains lock: split it */
579 			if (overlap->lf_start == lock->lf_start) {
580 				overlap->lf_start = lock->lf_end + 1;
581 				break;
582 			}
583 			lf_split(overlap, lock);
584 			/*
585 			 * The lock is now part of the list, lf_clearlock() must
586 			 * ensure that the lock remains detached from the list.
587 			 */
588 			TAILQ_REMOVE(&lock->lf_state->ls_locks, lock, lf_entry);
589 			break;
590 		case 3: /* lock contains overlap */
591 			lf = TAILQ_NEXT(overlap, lf_entry);
592 			TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap,
593 			    lf_entry);
594 			lf_free(overlap);
595 			continue;
596 		case 4: /* overlap starts before lock */
597 			overlap->lf_end = lock->lf_start - 1;
598 			lf = TAILQ_NEXT(overlap, lf_entry);
599 			continue;
600 		case 5: /* overlap ends after lock */
601 			overlap->lf_start = lock->lf_end + 1;
602 			break;
603 		}
604 		break;
605 	}
606 	return (0);
607 }
608 
609 /*
610  * Check whether there is a blocking lock,
611  * and if so return its process identifier.
612  */
613 int
614 lf_getlock(struct lockf *lock, struct flock *fl)
615 {
616 	struct lockf *block;
617 
618 	rw_assert_wrlock(&lockf_lock);
619 
620 	LFPRINT(("lf_getlock", lock), DEBUG_CLEARLOCK);
621 
622 	if ((block = lf_getblock(lock)) != NULL) {
623 		fl->l_type = block->lf_type;
624 		fl->l_whence = SEEK_SET;
625 		fl->l_start = block->lf_start;
626 		if (block->lf_end == -1)
627 			fl->l_len = 0;
628 		else
629 			fl->l_len = block->lf_end - block->lf_start + 1;
630 		fl->l_pid = block->lf_pid;
631 	} else {
632 		fl->l_type = F_UNLCK;
633 	}
634 	return (0);
635 }
636 
637 /*
638  * Walk the list of locks for an inode and
639  * return the first blocking lock.
640  */
641 struct lockf *
642 lf_getblock(struct lockf *lock)
643 {
644 	struct lockf *overlap, *lf;
645 
646 	rw_assert_wrlock(&lockf_lock);
647 
648 	lf = TAILQ_FIRST(&lock->lf_state->ls_locks);
649 	while (lf_findoverlap(lf, lock, OTHERS, &overlap) != 0) {
650 		/*
651 		 * We've found an overlap, see if it blocks us
652 		 */
653 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
654 			return (overlap);
655 		/*
656 		 * Nope, point to the next one on the list and
657 		 * see if it blocks us
658 		 */
659 		lf = TAILQ_NEXT(overlap, lf_entry);
660 	}
661 	return (NULL);
662 }
663 
664 /*
665  * Walk the list of locks for an inode to
666  * find an overlapping lock (if any).
667  *
668  * NOTE: this returns only the FIRST overlapping lock.  There
669  *	 may be more than one.
670  */
671 int
672 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
673     struct lockf **overlap)
674 {
675 	off_t start, end;
676 
677 	rw_assert_wrlock(&lockf_lock);
678 
679 	LFPRINT(("lf_findoverlap: looking for overlap in", lock), DEBUG_FINDOVR);
680 
681 	*overlap = lf;
682 	start = lock->lf_start;
683 	end = lock->lf_end;
684 	while (lf != NULL) {
685 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
686 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
687 			*overlap = lf = TAILQ_NEXT(lf, lf_entry);
688 			continue;
689 		}
690 		LFPRINT(("\tchecking", lf), DEBUG_FINDOVR);
691 		/*
692 		 * OK, check for overlap
693 		 *
694 		 * Six cases:
695 		 *	0) no overlap
696 		 *	1) overlap == lock
697 		 *	2) overlap contains lock
698 		 *	3) lock contains overlap
699 		 *	4) overlap starts before lock
700 		 *	5) overlap ends after lock
701 		 */
702 
703 		/* Case 0 */
704 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
705 		    (end != -1 && lf->lf_start > end)) {
706 			DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
707 			if ((type & SELF) && end != -1 && lf->lf_start > end)
708 				return (0);
709 			*overlap = lf = TAILQ_NEXT(lf, lf_entry);
710 			continue;
711 		}
712 		/* Case 1 */
713 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
714 			DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
715 			return (1);
716 		}
717 		/* Case 2 */
718 		if ((lf->lf_start <= start) &&
719 		    (lf->lf_end == -1 || (end != -1 && lf->lf_end >= end))) {
720 			DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
721 			return (2);
722 		}
723 		/* Case 3 */
724 		if (start <= lf->lf_start &&
725 		    (end == -1 || (lf->lf_end != -1 && end >= lf->lf_end))) {
726 			DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
727 			return (3);
728 		}
729 		/* Case 4 */
730 		if ((lf->lf_start < start) &&
731 		    ((lf->lf_end >= start) || (lf->lf_end == -1))) {
732 			DPRINTF(("overlap starts before lock\n"),
733 			    DEBUG_FINDOVR);
734 			return (4);
735 		}
736 		/* Case 5 */
737 		if ((lf->lf_start > start) && (end != -1) &&
738 		    ((lf->lf_end > end) || (lf->lf_end == -1))) {
739 			DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
740 			return (5);
741 		}
742 		panic("lf_findoverlap: default");
743 	}
744 	return (0);
745 }
746 
747 /*
748  * Purge all locks associated with the given lock state.
749  */
750 void
751 lf_purgelocks(struct lockf_state **state)
752 {
753 	struct lockf_state *ls;
754 	struct lockf *lock;
755 
756 	rw_enter_write(&lockf_lock);
757 
758 	ls = *state;
759 	if (ls == NULL)
760 		goto out;
761 
762 	ls_ref(ls);
763 
764 	/* Interrupt blocked locks and wait for all of them to finish. */
765 	TAILQ_FOREACH(lock, &ls->ls_locks, lf_entry) {
766 		LFPRINT(("lf_purgelocks: wakeup", lock), DEBUG_SETLOCK);
767 		lf_wakelock(lock, F_INTR);
768 	}
769 	while (ls->ls_pending > 0) {
770 		KERNEL_LOCK();
771 		rwsleep(ls, &lockf_lock, PLOCK, "lockfp", 0);
772 		KERNEL_UNLOCK();
773 	}
774 
775 	/*
776 	 * Any remaining locks cannot block other locks at this point and can
777 	 * safely be removed.
778 	 */
779 	while ((lock = TAILQ_FIRST(&ls->ls_locks))) {
780 		TAILQ_REMOVE(&ls->ls_locks, lock, lf_entry);
781 		lf_free(lock);
782 	}
783 
784 	/* This is the last expected thread to hold a lock state reference. */
785 #ifdef LOCKF_DIAGNOSTIC
786 	KASSERT(ls->ls_refs == 1);
787 #endif
788 	ls_rele(ls);
789 
790 out:
791 	rw_exit_write(&lockf_lock);
792 }
793 
794 /*
795  * Split a lock and a contained region into
796  * two or three locks as necessary.
797  */
798 void
799 lf_split(struct lockf *lock1, struct lockf *lock2)
800 {
801 	struct lockf *splitlock;
802 
803 	rw_assert_wrlock(&lockf_lock);
804 
805 	LFPRINT(("lf_split", lock1), DEBUG_SPLIT);
806 	LFPRINT(("splitting from", lock2), DEBUG_SPLIT);
807 
808 	/*
809 	 * Check to see if splitting into only two pieces.
810 	 */
811 	if (lock1->lf_start == lock2->lf_start) {
812 		lock1->lf_start = lock2->lf_end + 1;
813 		TAILQ_INSERT_BEFORE(lock1, lock2, lf_entry);
814 		return;
815 	}
816 	if (lock1->lf_end == lock2->lf_end) {
817 		lock1->lf_end = lock2->lf_start - 1;
818 		TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock1, lock2,
819 		    lf_entry);
820 		return;
821 	}
822 	/*
823 	 * Make a new lock consisting of the last part of
824 	 * the encompassing lock
825 	 */
826 	splitlock = lf_alloc(lock1->lf_uid, 0);
827 	splitlock->lf_flags = lock1->lf_flags;
828 	splitlock->lf_type = lock1->lf_type;
829 	splitlock->lf_start = lock2->lf_end + 1;
830 	splitlock->lf_end = lock1->lf_end;
831 	splitlock->lf_id = lock1->lf_id;
832 	splitlock->lf_state = lock1->lf_state;
833 	splitlock->lf_blk = NULL;
834 	splitlock->lf_pid = lock1->lf_pid;
835 	TAILQ_INIT(&splitlock->lf_blkhd);
836 	ls_ref(splitlock->lf_state);
837 	lock1->lf_end = lock2->lf_start - 1;
838 
839 	TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock1, lock2, lf_entry);
840 	TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock2, splitlock,
841 	    lf_entry);
842 }
843 
844 /*
845  * Wakeup a blocklist
846  */
847 void
848 lf_wakelock(struct lockf *lock, int flags)
849 {
850 	struct lockf *wakelock;
851 
852 	rw_assert_wrlock(&lockf_lock);
853 
854 	while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) {
855 		TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block);
856 		wakelock->lf_blk = NULL;
857 		wakelock->lf_flags |= flags;
858 		wakeup_one(wakelock);
859 	}
860 }
861 
862 #ifdef LOCKF_DEBUG
863 /*
864  * Print out a lock.
865  */
866 void
867 lf_print(const char *tag, struct lockf *lock)
868 {
869 	struct lockf	*block;
870 
871 	if (tag)
872 		printf("%s: ", tag);
873 	printf("lock %p", lock);
874 	if (lock == NULL) {
875 		printf("\n");
876 		return;
877 	}
878 	printf(", %s %p %s, start %lld, end %lld",
879 		lock->lf_flags & F_POSIX ? "posix" : "flock",
880 		lock->lf_id,
881 		lock->lf_type == F_RDLCK ? "shared" :
882 		lock->lf_type == F_WRLCK ? "exclusive" :
883 		lock->lf_type == F_UNLCK ? "unlock" :
884 		"unknown", lock->lf_start, lock->lf_end);
885 	printf(", next %p, state %p",
886 	    TAILQ_NEXT(lock, lf_entry), lock->lf_state);
887 	block = TAILQ_FIRST(&lock->lf_blkhd);
888 	if (block)
889 		printf(", block");
890 	TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)
891 		printf(" %p,", block);
892 	printf("\n");
893 }
894 
895 void
896 lf_printlist(const char *tag, struct lockf *lock)
897 {
898 	struct lockf *lf;
899 
900 	printf("%s: Lock list:\n", tag);
901 	TAILQ_FOREACH(lf, &lock->lf_state->ls_locks, lf_entry) {
902 		if (lock == lf)
903 			printf(" * ");
904 		else
905 			printf("   ");
906 		lf_print(NULL, lf);
907 	}
908 }
909 #endif /* LOCKF_DEBUG */
910