xref: /openbsd-src/sys/kern/vfs_lockf.c (revision 78fec973f57e9fc9edd564490c79661460ad807b)
1 /*	$OpenBSD: vfs_lockf.c,v 1.49 2022/06/02 05:32:28 anton Exp $	*/
2 /*	$NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Scooter Morris at Genentech Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/pool.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/rwlock.h>
47 #include <sys/unistd.h>
48 
49 /*
50  * The lockf structure is a kernel structure which contains the information
51  * associated with a byte range lock.  The lockf structures are linked into
52  * the inode structure. Locks are sorted by the starting byte of the lock for
53  * efficiency.
54  */
55 TAILQ_HEAD(locklist, lockf);
56 
57 struct lockf {
58 	short	lf_flags;	 /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */
59 	short	lf_type;	 /* Lock type: F_RDLCK, F_WRLCK */
60 	off_t	lf_start;	 /* The byte # of the start of the lock */
61 	off_t	lf_end;		 /* The byte # of the end of the lock (-1=EOF)*/
62 	caddr_t	lf_id;		 /* The id of the resource holding the lock */
63 	struct	lockf_state *lf_state;	/* State associated with the lock */
64 	TAILQ_ENTRY(lockf) lf_entry;
65 	struct	lockf *lf_blk;	 /* The lock that blocks us */
66 	struct	locklist lf_blkhd;	/* The list of blocked locks */
67 	TAILQ_ENTRY(lockf) lf_block; /* A request waiting for a lock */
68 	uid_t	lf_uid;		/* User ID responsible */
69 	pid_t	lf_pid;		/* POSIX - owner pid */
70 };
71 
72 struct lockf_state {
73 	TAILQ_HEAD(, lockf)	  ls_locks;	/* list of active locks */
74 	TAILQ_HEAD(, lockf)	  ls_pending;	/* list of pending locks */
75 	struct lockf_state	**ls_owner;	/* owner */
76 	int		 	  ls_refs;	/* reference counter */
77 };
78 
79 struct pool lockf_state_pool;
80 struct pool lockf_pool;
81 
82 #define SELF	0x1
83 #define OTHERS	0x2
84 
85 #ifdef LOCKF_DEBUG
86 
87 #define	DEBUG_SETLOCK		0x01
88 #define	DEBUG_CLEARLOCK		0x02
89 #define	DEBUG_GETLOCK		0x04
90 #define	DEBUG_FINDOVR		0x08
91 #define	DEBUG_SPLIT		0x10
92 #define	DEBUG_WAKELOCK		0x20
93 #define	DEBUG_LINK		0x40
94 
95 int	lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
96 
97 void	lf_print(const char *, struct lockf *);
98 void	lf_printlist(const char *, struct lockf *);
99 
100 #define	DPRINTF(args, level)	if (lockf_debug & (level)) printf args
101 #define	LFPRINT(args, level)	if (lockf_debug & (level)) lf_print args
102 #else
103 #define	DPRINTF(args, level)
104 #define	LFPRINT(args, level)
105 #endif
106 
107 struct lockf *lf_alloc(uid_t, int);
108 void lf_free(struct lockf *);
109 int lf_clearlock(struct lockf *);
110 int lf_findoverlap(struct lockf *, struct lockf *, int, struct lockf **);
111 struct lockf *lf_getblock(struct lockf *, struct lockf *);
112 int lf_getlock(struct lockf *, struct flock *);
113 int lf_setlock(struct lockf *);
114 void lf_split(struct lockf *, struct lockf *);
115 void lf_wakelock(struct lockf *, int);
116 int lf_deadlock(struct lockf *);
117 void ls_ref(struct lockf_state *);
118 void ls_rele(struct lockf_state *);
119 
120 /*
121  * Serializes access to each instance of struct lockf and struct lockf_state
122  * and each pointer from a vnode to struct lockf_state.
123  */
124 struct rwlock lockf_lock = RWLOCK_INITIALIZER("lockflk");
125 
126 void
127 lf_init(void)
128 {
129 	pool_init(&lockf_state_pool, sizeof(struct lockf_state), 0, IPL_NONE,
130 	    PR_WAITOK | PR_RWLOCK, "lockfspl", NULL);
131 	pool_init(&lockf_pool, sizeof(struct lockf), 0, IPL_NONE,
132 	    PR_WAITOK | PR_RWLOCK, "lockfpl", NULL);
133 }
134 
135 void
136 ls_ref(struct lockf_state *ls)
137 {
138 	rw_assert_wrlock(&lockf_lock);
139 
140 	ls->ls_refs++;
141 }
142 
143 void
144 ls_rele(struct lockf_state *ls)
145 {
146 	rw_assert_wrlock(&lockf_lock);
147 
148 	if (--ls->ls_refs > 0)
149 		return;
150 
151 	KASSERT(TAILQ_EMPTY(&ls->ls_locks));
152 	KASSERT(TAILQ_EMPTY(&ls->ls_pending));
153 
154 	*ls->ls_owner = NULL;
155 	pool_put(&lockf_state_pool, ls);
156 }
157 
158 /*
159  * We enforce a limit on locks by uid, so that a single user cannot
160  * run the kernel out of memory.  For now, the limit is pretty coarse.
161  * There is no limit on root.
162  *
163  * Splitting a lock will always succeed, regardless of current allocations.
164  * If you're slightly above the limit, we still have to permit an allocation
165  * so that the unlock can succeed.  If the unlocking causes too many splits,
166  * however, you're totally cutoff.
167  */
168 int maxlocksperuid = 1024;
169 
170 /*
171  * 3 options for allowfail.
172  * 0 - always allocate.  1 - cutoff at limit.  2 - cutoff at double limit.
173  */
174 struct lockf *
175 lf_alloc(uid_t uid, int allowfail)
176 {
177 	struct uidinfo *uip;
178 	struct lockf *lock;
179 
180 	uip = uid_find(uid);
181 	if (uid && allowfail && uip->ui_lockcnt >
182 	    (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
183 		uid_release(uip);
184 		return (NULL);
185 	}
186 	uip->ui_lockcnt++;
187 	uid_release(uip);
188 	lock = pool_get(&lockf_pool, PR_WAITOK);
189 	lock->lf_uid = uid;
190 	return (lock);
191 }
192 
193 void
194 lf_free(struct lockf *lock)
195 {
196 	struct uidinfo *uip;
197 
198 	rw_assert_wrlock(&lockf_lock);
199 
200 	LFPRINT(("lf_free", lock), DEBUG_LINK);
201 
202 	KASSERT(TAILQ_EMPTY(&lock->lf_blkhd));
203 
204 	ls_rele(lock->lf_state);
205 
206 	uip = uid_find(lock->lf_uid);
207 	uip->ui_lockcnt--;
208 	uid_release(uip);
209 	pool_put(&lockf_pool, lock);
210 }
211 
212 
213 /*
214  * Do an advisory lock operation.
215  */
216 int
217 lf_advlock(struct lockf_state **state, off_t size, caddr_t id, int op,
218     struct flock *fl, int flags)
219 {
220 	struct proc *p = curproc;
221 	struct lockf_state *ls;
222 	struct lockf *lock;
223 	off_t start, end;
224 	int error = 0;
225 
226 	/*
227 	 * Convert the flock structure into a start and end.
228 	 */
229 	switch (fl->l_whence) {
230 	case SEEK_SET:
231 	case SEEK_CUR:
232 		/*
233 		 * Caller is responsible for adding any necessary offset
234 		 * when SEEK_CUR is used.
235 		 */
236 		start = fl->l_start;
237 		break;
238 	case SEEK_END:
239 		start = size + fl->l_start;
240 		break;
241 	default:
242 		return (EINVAL);
243 	}
244 	if (start < 0)
245 		return (EINVAL);
246 	if (fl->l_len > 0) {
247 		if (fl->l_len - 1 > LLONG_MAX - start)
248 			return (EOVERFLOW);
249 		end = start + (fl->l_len - 1);
250 		/* Avoid ambiguity at the end of the range. */
251 		if (end == LLONG_MAX)
252 			end = -1;
253 	} else if (fl->l_len < 0) {
254 		if (start + fl->l_len < 0)
255 			return (EINVAL);
256 		end = start - 1;
257 		start += fl->l_len;
258 	} else {
259 		end = -1;
260 	}
261 
262 	rw_enter_write(&lockf_lock);
263 	ls = *state;
264 
265 	/*
266 	 * Avoid the common case of unlocking when inode has no locks.
267 	 */
268 	if (ls == NULL && op != F_SETLK) {
269 		fl->l_type = F_UNLCK;
270 		goto out;
271 	}
272 
273 	if (ls == NULL) {
274 		ls = pool_get(&lockf_state_pool, PR_WAITOK | PR_ZERO);
275 		ls->ls_owner = state;
276 		TAILQ_INIT(&ls->ls_locks);
277 		TAILQ_INIT(&ls->ls_pending);
278 		*state = ls;
279 	}
280 	ls_ref(ls);
281 
282 	lock = lf_alloc(p->p_ucred->cr_uid, op == F_SETLK ? 1 : 2);
283 	if (!lock) {
284 		ls_rele(ls);
285 		error = ENOLCK;
286 		goto out;
287 	}
288 	lock->lf_flags = flags;
289 	lock->lf_type = fl->l_type;
290 	lock->lf_start = start;
291 	lock->lf_end = end;
292 	lock->lf_id = id;
293 	lock->lf_state = ls;
294 	lock->lf_blk = NULL;
295 	lock->lf_pid = (flags & F_POSIX) ? p->p_p->ps_pid : -1;
296 	TAILQ_INIT(&lock->lf_blkhd);
297 
298 	switch (op) {
299 	case F_SETLK:
300 		error = lf_setlock(lock);
301 		break;
302 	case F_UNLCK:
303 		error = lf_clearlock(lock);
304 		lf_free(lock);
305 		break;
306 	case F_GETLK:
307 		error = lf_getlock(lock, fl);
308 		lf_free(lock);
309 		break;
310 	default:
311 		lf_free(lock);
312 		error = EINVAL;
313 		break;
314 	}
315 
316 out:
317 	rw_exit_write(&lockf_lock);
318 	return (error);
319 }
320 
321 /*
322  * Set a byte-range lock.
323  */
324 int
325 lf_setlock(struct lockf *lock)
326 {
327 	struct lockf *block;
328 	struct lockf *overlap, *ltmp;
329 	int ovcase, priority, needtolink, error;
330 
331 	rw_assert_wrlock(&lockf_lock);
332 
333 	LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
334 
335 	priority = PLOCK;
336 	if (lock->lf_type == F_WRLCK)
337 		priority += 4;
338 	priority |= PCATCH;
339 	/*
340 	 * Scan lock list for this file looking for locks that would block us.
341 	 */
342 	for (;;) {
343 		block = lf_getblock(TAILQ_FIRST(&lock->lf_state->ls_locks),
344 		    lock);
345 		if (block == NULL)
346 			break;
347 
348 		if ((lock->lf_flags & F_WAIT) == 0) {
349 			lf_free(lock);
350 			return (EAGAIN);
351 		}
352 
353 		/*
354 		 * Lock is blocked, check for deadlock before proceeding.
355 		 * Note: flock style locks cover the whole file, there is no
356 		 * chance for deadlock.
357 		 */
358 		if ((lock->lf_flags & F_POSIX) && lf_deadlock(lock)) {
359 			lf_free(lock);
360 			return (EDEADLK);
361 		}
362 
363 		/*
364 		 * For flock type locks, we must first remove
365 		 * any shared locks that we hold before we sleep
366 		 * waiting for an exclusive lock.
367 		 */
368 		if ((lock->lf_flags & F_FLOCK) && lock->lf_type == F_WRLCK) {
369 			lock->lf_type = F_UNLCK;
370 			(void)lf_clearlock(lock);
371 			lock->lf_type = F_WRLCK;
372 		}
373 		/*
374 		 * Add our lock to the blocked list and sleep until we're free.
375 		 * Remember who blocked us (for deadlock detection).
376 		 */
377 		lock->lf_blk = block;
378 		LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
379 		LFPRINT(("lf_setlock: blocking on", block), DEBUG_SETLOCK);
380 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
381 		TAILQ_INSERT_TAIL(&lock->lf_state->ls_pending, lock, lf_entry);
382 		error = rwsleep_nsec(lock, &lockf_lock, priority, "lockf",
383 		    INFSLP);
384 		TAILQ_REMOVE(&lock->lf_state->ls_pending, lock, lf_entry);
385 		wakeup_one(lock->lf_state);
386 		if (lock->lf_blk != NULL) {
387 			TAILQ_REMOVE(&lock->lf_blk->lf_blkhd, lock, lf_block);
388 			lock->lf_blk = NULL;
389 		}
390 		if (error) {
391 			lf_free(lock);
392 			return (error);
393 		}
394 		if (lock->lf_flags & F_INTR) {
395 			lf_free(lock);
396 			return (EINTR);
397 		}
398 	}
399 	/*
400 	 * No blocks!!  Add the lock.  Note that we will
401 	 * downgrade or upgrade any overlapping locks this
402 	 * process already owns.
403 	 *
404 	 * Skip over locks owned by other processes.
405 	 * Handle any locks that overlap and are owned by ourselves.
406 	 */
407 	block = TAILQ_FIRST(&lock->lf_state->ls_locks);
408 	overlap = NULL;
409 	needtolink = 1;
410 	for (;;) {
411 		ovcase = lf_findoverlap(block, lock, SELF, &overlap);
412 		if (ovcase)
413 			block = TAILQ_NEXT(overlap, lf_entry);
414 		/*
415 		 * Six cases:
416 		 *	0) no overlap
417 		 *	1) overlap == lock
418 		 *	2) overlap contains lock
419 		 *	3) lock contains overlap
420 		 *	4) overlap starts before lock
421 		 *	5) overlap ends after lock
422 		 */
423 		switch (ovcase) {
424 		case 0: /* no overlap */
425 			if (needtolink) {
426 				if (overlap)	/* insert before overlap */
427 					TAILQ_INSERT_BEFORE(overlap, lock,
428 					    lf_entry);
429 				else		/* first or last lock in list */
430 					TAILQ_INSERT_TAIL(&lock->lf_state->ls_locks,
431 					    lock, lf_entry);
432 			}
433 			break;
434 		case 1: /* overlap == lock */
435 			/*
436 			 * If downgrading lock, others may be
437 			 * able to acquire it.
438 			 */
439 			if (lock->lf_type == F_RDLCK &&
440 			    overlap->lf_type == F_WRLCK)
441 				lf_wakelock(overlap, 0);
442 			overlap->lf_type = lock->lf_type;
443 			lf_free(lock);
444 			lock = overlap; /* for debug output below */
445 			break;
446 		case 2: /* overlap contains lock */
447 			/*
448 			 * Check for common starting point and different types.
449 			 */
450 			if (overlap->lf_type == lock->lf_type) {
451 				if (!needtolink)
452 					TAILQ_REMOVE(&lock->lf_state->ls_locks,
453 					    lock, lf_entry);
454 				lf_free(lock);
455 				lock = overlap; /* for debug output below */
456 				break;
457 			}
458 			if (overlap->lf_start == lock->lf_start) {
459 				if (!needtolink)
460 					TAILQ_REMOVE(&lock->lf_state->ls_locks,
461 					    lock, lf_entry);
462 				TAILQ_INSERT_BEFORE(overlap, lock, lf_entry);
463 				overlap->lf_start = lock->lf_end + 1;
464 			} else
465 				lf_split(overlap, lock);
466 			lf_wakelock(overlap, 0);
467 			break;
468 		case 3: /* lock contains overlap */
469 			/*
470 			 * If downgrading lock, others may be able to
471 			 * acquire it, otherwise take the list.
472 			 */
473 			if (lock->lf_type == F_RDLCK &&
474 			    overlap->lf_type == F_WRLCK) {
475 				lf_wakelock(overlap, 0);
476 			} else {
477 				while ((ltmp =
478 				    TAILQ_FIRST(&overlap->lf_blkhd))) {
479 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
480 					    lf_block);
481 					ltmp->lf_blk = lock;
482 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
483 					    ltmp, lf_block);
484 				}
485 			}
486 			/*
487 			 * Add the new lock if necessary and delete the overlap.
488 			 */
489 			if (needtolink) {
490 				TAILQ_INSERT_BEFORE(overlap, lock, lf_entry);
491 				needtolink = 0;
492 			}
493 			TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap, lf_entry);
494 			lf_free(overlap);
495 			continue;
496 		case 4: /* overlap starts before lock */
497 			/*
498 			 * Add lock after overlap on the list.
499 			 */
500 			if (!needtolink)
501 				TAILQ_REMOVE(&lock->lf_state->ls_locks, lock,
502 				    lf_entry);
503 			TAILQ_INSERT_AFTER(&lock->lf_state->ls_locks, overlap,
504 			    lock, lf_entry);
505 			overlap->lf_end = lock->lf_start - 1;
506 			lf_wakelock(overlap, 0);
507 			needtolink = 0;
508 			continue;
509 		case 5: /* overlap ends after lock */
510 			/*
511 			 * Add the new lock before overlap.
512 			 */
513 			if (needtolink)
514 				TAILQ_INSERT_BEFORE(overlap, lock, lf_entry);
515 			overlap->lf_start = lock->lf_end + 1;
516 			lf_wakelock(overlap, 0);
517 			break;
518 		}
519 		break;
520 	}
521 	LFPRINT(("lf_setlock: got the lock", lock), DEBUG_SETLOCK);
522 	return (0);
523 }
524 
525 /*
526  * Remove a byte-range lock on an inode.
527  *
528  * Generally, find the lock (or an overlap to that lock)
529  * and remove it (or shrink it), then wakeup anyone we can.
530  */
531 int
532 lf_clearlock(struct lockf *lock)
533 {
534 	struct lockf *lf, *overlap;
535 	int ovcase;
536 
537 	rw_assert_wrlock(&lockf_lock);
538 
539 	lf = TAILQ_FIRST(&lock->lf_state->ls_locks);
540 	if (lf == NULL)
541 		return (0);
542 
543 	LFPRINT(("lf_clearlock", lock), DEBUG_CLEARLOCK);
544 	while ((ovcase = lf_findoverlap(lf, lock, SELF, &overlap))) {
545 		lf_wakelock(overlap, 0);
546 
547 		switch (ovcase) {
548 		case 1: /* overlap == lock */
549 			TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap,
550 			    lf_entry);
551 			lf_free(overlap);
552 			break;
553 		case 2: /* overlap contains lock: split it */
554 			if (overlap->lf_start == lock->lf_start) {
555 				overlap->lf_start = lock->lf_end + 1;
556 				break;
557 			}
558 			lf_split(overlap, lock);
559 			/*
560 			 * The lock is now part of the list, lf_clearlock() must
561 			 * ensure that the lock remains detached from the list.
562 			 */
563 			TAILQ_REMOVE(&lock->lf_state->ls_locks, lock, lf_entry);
564 			break;
565 		case 3: /* lock contains overlap */
566 			lf = TAILQ_NEXT(overlap, lf_entry);
567 			TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap,
568 			    lf_entry);
569 			lf_free(overlap);
570 			continue;
571 		case 4: /* overlap starts before lock */
572 			overlap->lf_end = lock->lf_start - 1;
573 			lf = TAILQ_NEXT(overlap, lf_entry);
574 			continue;
575 		case 5: /* overlap ends after lock */
576 			overlap->lf_start = lock->lf_end + 1;
577 			break;
578 		}
579 		break;
580 	}
581 	return (0);
582 }
583 
584 /*
585  * Check whether there is a blocking lock,
586  * and if so return its process identifier.
587  */
588 int
589 lf_getlock(struct lockf *lock, struct flock *fl)
590 {
591 	struct lockf *block, *lf;
592 
593 	rw_assert_wrlock(&lockf_lock);
594 
595 	LFPRINT(("lf_getlock", lock), DEBUG_CLEARLOCK);
596 
597 	lf = TAILQ_FIRST(&lock->lf_state->ls_locks);
598 	if ((block = lf_getblock(lf, lock)) != NULL) {
599 		fl->l_type = block->lf_type;
600 		fl->l_whence = SEEK_SET;
601 		fl->l_start = block->lf_start;
602 		if (block->lf_end == -1)
603 			fl->l_len = 0;
604 		else
605 			fl->l_len = block->lf_end - block->lf_start + 1;
606 		fl->l_pid = block->lf_pid;
607 	} else {
608 		fl->l_type = F_UNLCK;
609 	}
610 	return (0);
611 }
612 
613 /*
614  * Walk the list of locks for an inode and
615  * return the first blocking lock.
616  */
617 struct lockf *
618 lf_getblock(struct lockf *lf, struct lockf *lock)
619 {
620 	struct lockf *overlap;
621 
622 	rw_assert_wrlock(&lockf_lock);
623 
624 	while (lf_findoverlap(lf, lock, OTHERS, &overlap) != 0) {
625 		/*
626 		 * We've found an overlap, see if it blocks us
627 		 */
628 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
629 			return (overlap);
630 		/*
631 		 * Nope, point to the next one on the list and
632 		 * see if it blocks us
633 		 */
634 		lf = TAILQ_NEXT(overlap, lf_entry);
635 	}
636 	return (NULL);
637 }
638 
639 /*
640  * Walk the list of locks for an inode to
641  * find an overlapping lock (if any).
642  *
643  * NOTE: this returns only the FIRST overlapping lock.  There
644  *	 may be more than one.
645  */
646 int
647 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
648     struct lockf **overlap)
649 {
650 	off_t start, end;
651 
652 	rw_assert_wrlock(&lockf_lock);
653 
654 	LFPRINT(("lf_findoverlap: looking for overlap in", lock), DEBUG_FINDOVR);
655 
656 	*overlap = lf;
657 	start = lock->lf_start;
658 	end = lock->lf_end;
659 	while (lf != NULL) {
660 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
661 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
662 			*overlap = lf = TAILQ_NEXT(lf, lf_entry);
663 			continue;
664 		}
665 		LFPRINT(("\tchecking", lf), DEBUG_FINDOVR);
666 		/*
667 		 * OK, check for overlap
668 		 *
669 		 * Six cases:
670 		 *	0) no overlap
671 		 *	1) overlap == lock
672 		 *	2) overlap contains lock
673 		 *	3) lock contains overlap
674 		 *	4) overlap starts before lock
675 		 *	5) overlap ends after lock
676 		 */
677 
678 		/* Case 0 */
679 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
680 		    (end != -1 && lf->lf_start > end)) {
681 			DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
682 			if ((type & SELF) && end != -1 && lf->lf_start > end)
683 				return (0);
684 			*overlap = lf = TAILQ_NEXT(lf, lf_entry);
685 			continue;
686 		}
687 		/* Case 1 */
688 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
689 			DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
690 			return (1);
691 		}
692 		/* Case 2 */
693 		if ((lf->lf_start <= start) &&
694 		    (lf->lf_end == -1 || (end != -1 && lf->lf_end >= end))) {
695 			DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
696 			return (2);
697 		}
698 		/* Case 3 */
699 		if (start <= lf->lf_start &&
700 		    (end == -1 || (lf->lf_end != -1 && end >= lf->lf_end))) {
701 			DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
702 			return (3);
703 		}
704 		/* Case 4 */
705 		if ((lf->lf_start < start) &&
706 		    ((lf->lf_end >= start) || (lf->lf_end == -1))) {
707 			DPRINTF(("overlap starts before lock\n"),
708 			    DEBUG_FINDOVR);
709 			return (4);
710 		}
711 		/* Case 5 */
712 		if ((lf->lf_start > start) && (end != -1) &&
713 		    ((lf->lf_end > end) || (lf->lf_end == -1))) {
714 			DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
715 			return (5);
716 		}
717 		panic("lf_findoverlap: default");
718 	}
719 	return (0);
720 }
721 
722 /*
723  * Purge all locks associated with the given lock state.
724  */
725 void
726 lf_purgelocks(struct lockf_state **state)
727 {
728 	struct lockf_state *ls;
729 	struct lockf *lock;
730 
731 	rw_enter_write(&lockf_lock);
732 
733 	ls = *state;
734 	if (ls == NULL)
735 		goto out;
736 
737 	ls_ref(ls);
738 
739 	/* Interrupt blocked locks and wait for all of them to finish. */
740 	TAILQ_FOREACH(lock, &ls->ls_locks, lf_entry) {
741 		LFPRINT(("lf_purgelocks: wakeup", lock), DEBUG_SETLOCK);
742 		lf_wakelock(lock, F_INTR);
743 	}
744 	while (!TAILQ_EMPTY(&ls->ls_pending))
745 		rwsleep_nsec(ls, &lockf_lock, PLOCK, "lockfp", INFSLP);
746 
747 	/*
748 	 * Any remaining locks cannot block other locks at this point and can
749 	 * safely be removed.
750 	 */
751 	while ((lock = TAILQ_FIRST(&ls->ls_locks))) {
752 		TAILQ_REMOVE(&ls->ls_locks, lock, lf_entry);
753 		lf_free(lock);
754 	}
755 
756 	/* This is the last expected thread to hold a lock state reference. */
757 	KASSERT(ls->ls_refs == 1);
758 	ls_rele(ls);
759 
760 out:
761 	rw_exit_write(&lockf_lock);
762 }
763 
764 /*
765  * Split a lock and a contained region into
766  * two or three locks as necessary.
767  */
768 void
769 lf_split(struct lockf *lock1, struct lockf *lock2)
770 {
771 	struct lockf *splitlock;
772 
773 	rw_assert_wrlock(&lockf_lock);
774 
775 	LFPRINT(("lf_split", lock1), DEBUG_SPLIT);
776 	LFPRINT(("splitting from", lock2), DEBUG_SPLIT);
777 
778 	/*
779 	 * Check to see if splitting into only two pieces.
780 	 */
781 	if (lock1->lf_start == lock2->lf_start) {
782 		lock1->lf_start = lock2->lf_end + 1;
783 		TAILQ_INSERT_BEFORE(lock1, lock2, lf_entry);
784 		return;
785 	}
786 	if (lock1->lf_end == lock2->lf_end) {
787 		lock1->lf_end = lock2->lf_start - 1;
788 		TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock1, lock2,
789 		    lf_entry);
790 		return;
791 	}
792 	/*
793 	 * Make a new lock consisting of the last part of
794 	 * the encompassing lock
795 	 */
796 	splitlock = lf_alloc(lock1->lf_uid, 0);
797 	splitlock->lf_flags = lock1->lf_flags;
798 	splitlock->lf_type = lock1->lf_type;
799 	splitlock->lf_start = lock2->lf_end + 1;
800 	splitlock->lf_end = lock1->lf_end;
801 	splitlock->lf_id = lock1->lf_id;
802 	splitlock->lf_state = lock1->lf_state;
803 	splitlock->lf_blk = NULL;
804 	splitlock->lf_pid = lock1->lf_pid;
805 	TAILQ_INIT(&splitlock->lf_blkhd);
806 	ls_ref(splitlock->lf_state);
807 	lock1->lf_end = lock2->lf_start - 1;
808 
809 	TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock1, lock2, lf_entry);
810 	TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock2, splitlock,
811 	    lf_entry);
812 }
813 
814 /*
815  * Wakeup a blocklist
816  */
817 void
818 lf_wakelock(struct lockf *lock, int flags)
819 {
820 	struct lockf *wakelock;
821 
822 	rw_assert_wrlock(&lockf_lock);
823 
824 	while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) {
825 		TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block);
826 		wakelock->lf_blk = NULL;
827 		wakelock->lf_flags |= flags;
828 		wakeup_one(wakelock);
829 	}
830 }
831 
832 /*
833  * Returns non-zero if the given lock would cause a deadlock.
834  */
835 int
836 lf_deadlock(struct lockf *lock)
837 {
838 	struct lockf *block, *lf, *pending;
839 
840 	lf = TAILQ_FIRST(&lock->lf_state->ls_locks);
841 	for (; (block = lf_getblock(lf, lock)) != NULL;
842 	    lf = TAILQ_NEXT(block, lf_entry)) {
843 		if ((block->lf_flags & F_POSIX) == 0)
844 			continue;
845 
846 		TAILQ_FOREACH(pending, &lock->lf_state->ls_pending, lf_entry) {
847 			if (pending->lf_blk == NULL)
848 				continue; /* lock already unblocked */
849 
850 			if (pending->lf_pid == block->lf_pid &&
851 			    pending->lf_blk->lf_pid == lock->lf_pid)
852 				return (1);
853 		}
854 	}
855 
856 	return (0);
857 }
858 
859 #ifdef LOCKF_DEBUG
860 /*
861  * Print out a lock.
862  */
863 void
864 lf_print(const char *tag, struct lockf *lock)
865 {
866 	struct lockf	*block;
867 
868 	if (tag)
869 		printf("%s: ", tag);
870 	printf("lock %p", lock);
871 	if (lock == NULL) {
872 		printf("\n");
873 		return;
874 	}
875 	printf(", %s %p %s, start %lld, end %lld",
876 		lock->lf_flags & F_POSIX ? "posix" : "flock",
877 		lock->lf_id,
878 		lock->lf_type == F_RDLCK ? "shared" :
879 		lock->lf_type == F_WRLCK ? "exclusive" :
880 		lock->lf_type == F_UNLCK ? "unlock" :
881 		"unknown", lock->lf_start, lock->lf_end);
882 	printf(", next %p, state %p",
883 	    TAILQ_NEXT(lock, lf_entry), lock->lf_state);
884 	block = TAILQ_FIRST(&lock->lf_blkhd);
885 	if (block)
886 		printf(", block");
887 	TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)
888 		printf(" %p,", block);
889 	printf("\n");
890 }
891 
892 void
893 lf_printlist(const char *tag, struct lockf *lock)
894 {
895 	struct lockf *lf;
896 
897 	printf("%s: Lock list:\n", tag);
898 	TAILQ_FOREACH(lf, &lock->lf_state->ls_locks, lf_entry) {
899 		if (lock == lf)
900 			printf(" * ");
901 		else
902 			printf("   ");
903 		lf_print(NULL, lf);
904 	}
905 }
906 #endif /* LOCKF_DEBUG */
907