xref: /openbsd-src/sys/kern/vfs_lockf.c (revision c90a81c56dcebd6a1b73fe4aff9b03385b8e63b3)
1 /*	$OpenBSD: vfs_lockf.c,v 1.31 2018/11/10 21:21:15 anton Exp $	*/
2 /*	$NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Scooter Morris at Genentech Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/pool.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/unistd.h>
47 
48 struct pool lockfpool;
49 
50 /*
51  * This variable controls the maximum number of processes that will
52  * be checked in doing deadlock detection.
53  */
54 int maxlockdepth = MAXDEPTH;
55 
56 #define SELF	0x1
57 #define OTHERS	0x2
58 
59 #ifdef LOCKF_DEBUG
60 
61 #define	DEBUG_SETLOCK		0x01
62 #define	DEBUG_CLEARLOCK		0x02
63 #define	DEBUG_GETLOCK		0x04
64 #define	DEBUG_FINDOVR		0x08
65 #define	DEBUG_SPLIT		0x10
66 #define	DEBUG_WAKELOCK		0x20
67 #define	DEBUG_LINK		0x40
68 
69 int	lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
70 
71 #define	DPRINTF(args, level)	if (lockf_debug & (level)) printf args
72 #define	LFPRINT(args, level)	if (lockf_debug & (level)) lf_print args
73 #else
74 #define	DPRINTF(args, level)
75 #define	LFPRINT(args, level)
76 #endif
77 
78 void
79 lf_init(void)
80 {
81 	pool_init(&lockfpool, sizeof(struct lockf), 0, IPL_NONE, PR_WAITOK,
82 	    "lockfpl", NULL);
83 }
84 
85 struct lockf *lf_alloc(uid_t, int);
86 void lf_free(struct lockf *);
87 
88 /*
89  * We enforce a limit on locks by uid, so that a single user cannot
90  * run the kernel out of memory.  For now, the limit is pretty coarse.
91  * There is no limit on root.
92  *
93  * Splitting a lock will always succeed, regardless of current allocations.
94  * If you're slightly above the limit, we still have to permit an allocation
95  * so that the unlock can succeed.  If the unlocking causes too many splits,
96  * however, you're totally cutoff.
97  */
98 int maxlocksperuid = 1024;
99 
100 /*
101  * 3 options for allowfail.
102  * 0 - always allocate.  1 - cutoff at limit.  2 - cutoff at double limit.
103  */
104 struct lockf *
105 lf_alloc(uid_t uid, int allowfail)
106 {
107 	struct uidinfo *uip;
108 	struct lockf *lock;
109 
110 	uip = uid_find(uid);
111 	if (uid && allowfail && uip->ui_lockcnt >
112 	    (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
113 		uid_release(uip);
114 		return (NULL);
115 	}
116 	uip->ui_lockcnt++;
117 	uid_release(uip);
118 	lock = pool_get(&lockfpool, PR_WAITOK);
119 	lock->lf_uid = uid;
120 	return (lock);
121 }
122 
123 void
124 lf_free(struct lockf *lock)
125 {
126 	struct uidinfo *uip;
127 
128 	LFPRINT(("lf_free", lock), DEBUG_LINK);
129 
130 	if (*lock->lf_head == lock) {
131 		LFPRINT(("lf_free: head", lock->lf_next), DEBUG_LINK);
132 
133 #ifdef LOCKF_DIAGNOSTIC
134 		KASSERT(lock->lf_prev == NULL);
135 #endif /* LOCKF_DIAGNOSTIC */
136 
137 		*lock->lf_head = lock->lf_next;
138 	}
139 
140 #ifdef LOCKF_DIAGNOSTIC
141 	KASSERT(TAILQ_EMPTY(&lock->lf_blkhd));
142 #endif /* LOCKF_DIAGNOSTIC */
143 
144 	lf_unlink(lock);
145 
146 	uip = uid_find(lock->lf_uid);
147 	uip->ui_lockcnt--;
148 	uid_release(uip);
149 	pool_put(&lockfpool, lock);
150 }
151 
152 void
153 lf_link(struct lockf *lock1, struct lockf *lock2)
154 {
155 	LFPRINT(("lf_link: lock1", lock1), DEBUG_LINK);
156 	LFPRINT(("lf_link: lock2", lock2), DEBUG_LINK);
157 
158 #ifdef LOCKF_DIAGNOSTIC
159 	KASSERT(lock1 != NULL && lock2 != NULL);
160 	KASSERT(lock1 != lock2);
161 	if (lock1->lf_next != NULL)
162 		KASSERT(lock2->lf_next == NULL);
163 	if (lock2->lf_prev != NULL)
164 		KASSERT(lock1->lf_prev == NULL);
165 #endif /* LOCKF_DIAGNOSTIC */
166 
167 	if (lock1->lf_next != NULL) {
168 		lock2->lf_next = lock1->lf_next;
169 		lock1->lf_next->lf_prev = lock2;
170 	}
171 	lock1->lf_next = lock2;
172 
173 	if (lock2->lf_prev != NULL) {
174 		lock1->lf_prev = lock2->lf_prev;
175 		lock2->lf_prev->lf_next = lock1;
176 	}
177 	lock2->lf_prev = lock1;
178 
179 	if (*lock1->lf_head == NULL) {
180 		LFPRINT(("lf_link: head", lock1), DEBUG_LINK);
181 
182 #ifdef LOCKF_DIAGNOSTIC
183 		KASSERT(*lock2->lf_head == NULL);
184 #endif /* LOCKF_DIAGNOSTIC */
185 
186 		*lock1->lf_head = lock1;
187 	} else if (*lock2->lf_head == lock2) {
188 		LFPRINT(("lf_link: swap head", lock1), DEBUG_LINK);
189 
190 		*lock1->lf_head = lock1;
191 	}
192 }
193 
194 void
195 lf_unlink(struct lockf *lock)
196 {
197 	LFPRINT(("lf_unlink", lock), DEBUG_LINK);
198 
199 	if (lock->lf_prev != NULL)
200 		lock->lf_prev->lf_next = lock->lf_next;
201 	if (lock->lf_next != NULL)
202 		lock->lf_next->lf_prev = lock->lf_prev;
203 	lock->lf_prev = lock->lf_next = NULL;
204 }
205 
206 /*
207  * Do an advisory lock operation.
208  */
209 int
210 lf_advlock(struct lockf **head, off_t size, caddr_t id, int op,
211     struct flock *fl, int flags)
212 {
213 	struct proc *p = curproc;
214 	struct lockf *lock;
215 	off_t start, end;
216 	int error;
217 
218 	/*
219 	 * Convert the flock structure into a start and end.
220 	 */
221 	switch (fl->l_whence) {
222 	case SEEK_SET:
223 	case SEEK_CUR:
224 		/*
225 		 * Caller is responsible for adding any necessary offset
226 		 * when SEEK_CUR is used.
227 		 */
228 		start = fl->l_start;
229 		break;
230 	case SEEK_END:
231 		start = size + fl->l_start;
232 		break;
233 	default:
234 		return (EINVAL);
235 	}
236 	if (start < 0)
237 		return (EINVAL);
238 	if (fl->l_len > 0) {
239 		if (fl->l_len - 1 > LLONG_MAX - start)
240 			return (EOVERFLOW);
241 		end = start + (fl->l_len - 1);
242 	} else if (fl->l_len < 0) {
243 		if (fl->l_start + fl->l_len < 0)
244 			return (EINVAL);
245 		end = fl->l_start - 1;
246 		start += fl->l_len;
247 	} else {
248 		end = -1;
249 	}
250 
251 	/*
252 	 * Avoid the common case of unlocking when inode has no locks.
253 	 */
254 	if (*head == NULL) {
255 		if (op != F_SETLK) {
256 			fl->l_type = F_UNLCK;
257 			return (0);
258 		}
259 	}
260 
261 	lock = lf_alloc(p->p_ucred->cr_uid, op == F_SETLK ? 1 : 2);
262 	if (!lock)
263 		return (ENOLCK);
264 	lock->lf_start = start;
265 	lock->lf_end = end;
266 	lock->lf_id = id;
267 	lock->lf_head = head;
268 	lock->lf_type = fl->l_type;
269 	lock->lf_prev = NULL;
270 	lock->lf_next = NULL;
271 	TAILQ_INIT(&lock->lf_blkhd);
272 	lock->lf_flags = flags;
273 	lock->lf_pid = (flags & F_POSIX) ? p->p_p->ps_pid : -1;
274 
275 	switch (op) {
276 	case F_SETLK:
277 		return (lf_setlock(lock));
278 	case F_UNLCK:
279 		error = lf_clearlock(lock);
280 		lf_free(lock);
281 		return (error);
282 	case F_GETLK:
283 		error = lf_getlock(lock, fl);
284 		lf_free(lock);
285 		return (error);
286 	default:
287 		lf_free(lock);
288 		return (EINVAL);
289 	}
290 	/* NOTREACHED */
291 }
292 
293 /*
294  * Set a byte-range lock.
295  */
296 int
297 lf_setlock(struct lockf *lock)
298 {
299 	struct lockf *block;
300 	struct lockf **head = lock->lf_head;
301 	struct lockf *prev, *overlap, *ltmp;
302 	static char lockstr[] = "lockf";
303 	int ovcase, priority, needtolink, error;
304 
305 	LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
306 
307 	priority = PLOCK;
308 	if (lock->lf_type == F_WRLCK)
309 		priority += 4;
310 	priority |= PCATCH;
311 	/*
312 	 * Scan lock list for this file looking for locks that would block us.
313 	 */
314 	while ((block = lf_getblock(lock)) != NULL) {
315 		if ((lock->lf_flags & F_WAIT) == 0) {
316 			lf_free(lock);
317 			return (EAGAIN);
318 		}
319 		/*
320 		 * We are blocked. Since flock style locks cover
321 		 * the whole file, there is no chance for deadlock.
322 		 * For byte-range locks we must check for deadlock.
323 		 *
324 		 * Deadlock detection is done by looking through the
325 		 * wait channels to see if there are any cycles that
326 		 * involve us. MAXDEPTH is set just to make sure we
327 		 * do not go off into neverland.
328 		 */
329 		if ((lock->lf_flags & F_POSIX) &&
330 		    (block->lf_flags & F_POSIX)) {
331 			struct proc *wproc;
332 			struct lockf *waitblock;
333 			int i = 0;
334 
335 			/* The block is waiting on something */
336 			wproc = (struct proc *)block->lf_id;
337 			while (wproc->p_wchan &&
338 			    (wproc->p_wmesg == lockstr) &&
339 			    (i++ < maxlockdepth)) {
340 				waitblock = (struct lockf *)wproc->p_wchan;
341 				/* Get the owner of the blocking lock */
342 				waitblock = waitblock->lf_next;
343 				if ((waitblock->lf_flags & F_POSIX) == 0)
344 					break;
345 				wproc = (struct proc *)waitblock->lf_id;
346 				if (wproc == (struct proc *)lock->lf_id) {
347 					lf_free(lock);
348 					return (EDEADLK);
349 				}
350 			}
351 		}
352 		/*
353 		 * For flock type locks, we must first remove
354 		 * any shared locks that we hold before we sleep
355 		 * waiting for an exclusive lock.
356 		 */
357 		if ((lock->lf_flags & F_FLOCK) && lock->lf_type == F_WRLCK) {
358 			lock->lf_type = F_UNLCK;
359 			(void)lf_clearlock(lock);
360 			lock->lf_type = F_WRLCK;
361 		}
362 		/*
363 		 * Add our lock to the blocked list and sleep until we're free.
364 		 * Remember who blocked us (for deadlock detection).
365 		 * Since lock is not yet part of any list, it's safe to let the
366 		 * lf_next field refer to the blocking lock.
367 		 */
368 		lock->lf_next = block;
369 		LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
370 		LFPRINT(("lf_setlock: blocking on", block), DEBUG_SETLOCK);
371 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
372 		error = tsleep(lock, priority, lockstr, 0);
373 		if (lock->lf_next != NULL) {
374 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
375 			lock->lf_next = NULL;
376 		}
377 		if (error) {
378 			lf_free(lock);
379 			return (error);
380 		}
381 	}
382 	/*
383 	 * No blocks!!  Add the lock.  Note that we will
384 	 * downgrade or upgrade any overlapping locks this
385 	 * process already owns.
386 	 *
387 	 * Skip over locks owned by other processes.
388 	 * Handle any locks that overlap and are owned by ourselves.
389 	 */
390 	block = *head;
391 	prev = NULL;
392 	overlap = NULL;
393 	needtolink = 1;
394 	for (;;) {
395 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
396 		if (ovcase)
397 			block = overlap->lf_next;
398 		/*
399 		 * Six cases:
400 		 *	0) no overlap
401 		 *	1) overlap == lock
402 		 *	2) overlap contains lock
403 		 *	3) lock contains overlap
404 		 *	4) overlap starts before lock
405 		 *	5) overlap ends after lock
406 		 */
407 		switch (ovcase) {
408 		case 0: /* no overlap */
409 			if (needtolink) {
410 				if (overlap)	/* insert before overlap */
411 					lf_link(lock, overlap);
412 				else if (prev)	/* last lock in list */
413 					lf_link(prev, lock);
414 				else		/* first lock in list */
415 					*head = lock;
416 			}
417 			break;
418 		case 1: /* overlap == lock */
419 			/*
420 			 * If downgrading lock, others may be
421 			 * able to acquire it.
422 			 */
423 			if (lock->lf_type == F_RDLCK &&
424 			    overlap->lf_type == F_WRLCK)
425 				lf_wakelock(overlap);
426 			overlap->lf_type = lock->lf_type;
427 			lf_free(lock);
428 			lock = overlap; /* for debug output below */
429 			break;
430 		case 2: /* overlap contains lock */
431 			/*
432 			 * Check for common starting point and different types.
433 			 */
434 			if (overlap->lf_type == lock->lf_type) {
435 				lf_free(lock);
436 				lock = overlap; /* for debug output below */
437 				break;
438 			}
439 			if (overlap->lf_start == lock->lf_start) {
440 				if (!needtolink)
441 					lf_unlink(lock);
442 				lf_link(lock, overlap);
443 				overlap->lf_start = lock->lf_end + 1;
444 			} else
445 				lf_split(overlap, lock);
446 			lf_wakelock(overlap);
447 			break;
448 		case 3: /* lock contains overlap */
449 			/*
450 			 * If downgrading lock, others may be able to
451 			 * acquire it, otherwise take the list.
452 			 */
453 			if (lock->lf_type == F_RDLCK &&
454 			    overlap->lf_type == F_WRLCK) {
455 				lf_wakelock(overlap);
456 			} else {
457 				while ((ltmp =
458 				    TAILQ_FIRST(&overlap->lf_blkhd))) {
459 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
460 					    lf_block);
461 					ltmp->lf_next = lock;
462 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
463 					    ltmp, lf_block);
464 				}
465 			}
466 			/*
467 			 * Add the new lock if necessary and delete the overlap.
468 			 */
469 			if (needtolink) {
470 				lf_link(lock, overlap);
471 				needtolink = 0;
472 			}
473 			lf_free(overlap);
474 			continue;
475 		case 4: /* overlap starts before lock */
476 			/*
477 			 * Add lock after overlap on the list.
478 			 */
479 			if (!needtolink)
480 				lf_unlink(lock);
481 			lf_link(overlap, lock);
482 			overlap->lf_end = lock->lf_start - 1;
483 			lf_wakelock(overlap);
484 			needtolink = 0;
485 			continue;
486 		case 5: /* overlap ends after lock */
487 			/*
488 			 * Add the new lock before overlap.
489 			 */
490 			if (needtolink)
491 				lf_link(lock, overlap);
492 			overlap->lf_start = lock->lf_end + 1;
493 			lf_wakelock(overlap);
494 			break;
495 		}
496 		break;
497 	}
498 	LFPRINT(("lf_setlock: got the lock", lock), DEBUG_SETLOCK);
499 	return (0);
500 }
501 
502 /*
503  * Remove a byte-range lock on an inode.
504  *
505  * Generally, find the lock (or an overlap to that lock)
506  * and remove it (or shrink it), then wakeup anyone we can.
507  */
508 int
509 lf_clearlock(struct lockf *lock)
510 {
511 	struct lockf **head = lock->lf_head;
512 	struct lockf *lf = *head;
513 	struct lockf *overlap, *prev;
514 	int ovcase;
515 
516 	if (lf == NULL)
517 		return (0);
518 	LFPRINT(("lf_clearlock", lock), DEBUG_CLEARLOCK);
519 	while ((ovcase = lf_findoverlap(lf, lock, SELF, &prev, &overlap))) {
520 		lf_wakelock(overlap);
521 
522 		switch (ovcase) {
523 		case 1: /* overlap == lock */
524 			lf_free(overlap);
525 			break;
526 		case 2: /* overlap contains lock: split it */
527 			if (overlap->lf_start == lock->lf_start) {
528 				overlap->lf_start = lock->lf_end + 1;
529 				break;
530 			}
531 			lf_split(overlap, lock);
532 			break;
533 		case 3: /* lock contains overlap */
534 			lf = overlap->lf_next;
535 			lf_free(overlap);
536 			continue;
537 		case 4: /* overlap starts before lock */
538 			overlap->lf_end = lock->lf_start - 1;
539 			lf = overlap->lf_next;
540 			continue;
541 		case 5: /* overlap ends after lock */
542 			overlap->lf_start = lock->lf_end + 1;
543 			break;
544 		}
545 		break;
546 	}
547 	return (0);
548 }
549 
550 /*
551  * Check whether there is a blocking lock,
552  * and if so return its process identifier.
553  */
554 int
555 lf_getlock(struct lockf *lock, struct flock *fl)
556 {
557 	struct lockf *block;
558 
559 	LFPRINT(("lf_getlock", lock), DEBUG_CLEARLOCK);
560 
561 	if ((block = lf_getblock(lock)) != NULL) {
562 		fl->l_type = block->lf_type;
563 		fl->l_whence = SEEK_SET;
564 		fl->l_start = block->lf_start;
565 		if (block->lf_end == -1)
566 			fl->l_len = 0;
567 		else
568 			fl->l_len = block->lf_end - block->lf_start + 1;
569 		fl->l_pid = block->lf_pid;
570 	} else {
571 		fl->l_type = F_UNLCK;
572 	}
573 	return (0);
574 }
575 
576 /*
577  * Walk the list of locks for an inode and
578  * return the first blocking lock.
579  */
580 struct lockf *
581 lf_getblock(struct lockf *lock)
582 {
583 	struct lockf *prev, *overlap, *lf;
584 
585 	lf = *lock->lf_head;
586 	while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
587 		/*
588 		 * We've found an overlap, see if it blocks us
589 		 */
590 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
591 			return (overlap);
592 		/*
593 		 * Nope, point to the next one on the list and
594 		 * see if it blocks us
595 		 */
596 		lf = overlap->lf_next;
597 	}
598 	return (NULL);
599 }
600 
601 /*
602  * Walk the list of locks for an inode to
603  * find an overlapping lock (if any).
604  *
605  * NOTE: this returns only the FIRST overlapping lock.  There
606  *	 may be more than one.
607  */
608 int
609 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
610     struct lockf **prev, struct lockf **overlap)
611 {
612 	off_t start, end;
613 
614 	LFPRINT(("lf_findoverlap: looking for overlap in", lock), DEBUG_FINDOVR);
615 
616 	*overlap = lf;
617 	start = lock->lf_start;
618 	end = lock->lf_end;
619 	while (lf != NULL) {
620 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
621 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
622 			*prev = lf;
623 			*overlap = lf = lf->lf_next;
624 			continue;
625 		}
626 		LFPRINT(("\tchecking", lf), DEBUG_FINDOVR);
627 		/*
628 		 * OK, check for overlap
629 		 *
630 		 * Six cases:
631 		 *	0) no overlap
632 		 *	1) overlap == lock
633 		 *	2) overlap contains lock
634 		 *	3) lock contains overlap
635 		 *	4) overlap starts before lock
636 		 *	5) overlap ends after lock
637 		 */
638 
639 		/* Case 0 */
640 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
641 		    (end != -1 && lf->lf_start > end)) {
642 			DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
643 			if ((type & SELF) && end != -1 && lf->lf_start > end)
644 				return (0);
645 			*prev = lf;
646 			*overlap = lf = lf->lf_next;
647 			continue;
648 		}
649 		/* Case 1 */
650 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
651 			DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
652 			return (1);
653 		}
654 		/* Case 2 */
655 		if ((lf->lf_start <= start) &&
656 		    (lf->lf_end == -1 || (end != -1 && lf->lf_end >= end))) {
657 			DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
658 			return (2);
659 		}
660 		/* Case 3 */
661 		if (start <= lf->lf_start &&
662 		    (end == -1 || (lf->lf_end != -1 && end >= lf->lf_end))) {
663 			DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
664 			return (3);
665 		}
666 		/* Case 4 */
667 		if ((lf->lf_start < start) &&
668 		    ((lf->lf_end >= start) || (lf->lf_end == -1))) {
669 			DPRINTF(("overlap starts before lock\n"),
670 			    DEBUG_FINDOVR);
671 			return (4);
672 		}
673 		/* Case 5 */
674 		if ((lf->lf_start > start) && (end != -1) &&
675 		    ((lf->lf_end > end) || (lf->lf_end == -1))) {
676 			DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
677 			return (5);
678 		}
679 		panic("lf_findoverlap: default");
680 	}
681 	return (0);
682 }
683 
684 /*
685  * Split a lock and a contained region into
686  * two or three locks as necessary.
687  */
688 void
689 lf_split(struct lockf *lock1, struct lockf *lock2)
690 {
691 	struct lockf *splitlock;
692 
693 	LFPRINT(("lf_split", lock1), DEBUG_SPLIT);
694 	LFPRINT(("splitting from", lock2), DEBUG_SPLIT);
695 
696 	/*
697 	 * Check to see if splitting into only two pieces.
698 	 */
699 	if (lock1->lf_start == lock2->lf_start) {
700 		lock1->lf_start = lock2->lf_end + 1;
701 		lf_link(lock2, lock1);
702 		return;
703 	}
704 	if (lock1->lf_end == lock2->lf_end) {
705 		lock1->lf_end = lock2->lf_start - 1;
706 		lf_link(lock1, lock2);
707 		return;
708 	}
709 	/*
710 	 * Make a new lock consisting of the last part of
711 	 * the encompassing lock
712 	 */
713 	splitlock = lf_alloc(lock1->lf_uid, 0);
714 	memcpy(splitlock, lock1, sizeof(*splitlock));
715 	splitlock->lf_prev = NULL;
716 	splitlock->lf_next = NULL;
717 	splitlock->lf_start = lock2->lf_end + 1;
718 	splitlock->lf_block.tqe_next = NULL;
719 	TAILQ_INIT(&splitlock->lf_blkhd);
720 	lock1->lf_end = lock2->lf_start - 1;
721 
722 	lf_link(lock1, lock2);
723 	lf_link(lock2, splitlock);
724 }
725 
726 /*
727  * Wakeup a blocklist
728  */
729 void
730 lf_wakelock(struct lockf *lock)
731 {
732 	struct lockf *wakelock;
733 
734 	while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) {
735 		TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block);
736 		wakelock->lf_next = NULL;
737 		wakeup_one(wakelock);
738 	}
739 }
740 
741 #ifdef LOCKF_DEBUG
742 /*
743  * Print out a lock.
744  */
745 void
746 lf_print(char *tag, struct lockf *lock)
747 {
748 	struct lockf	*block;
749 
750 	if (tag)
751 		printf("%s: ", tag);
752 	printf("lock %p", lock);
753 	if (lock == NULL) {
754 		printf("\n");
755 		return;
756 	}
757 	printf(" for ");
758 	if (lock->lf_flags & F_POSIX)
759 		printf("thread %d", ((struct proc *)(lock->lf_id))->p_tid);
760 	else
761 		printf("id %p", lock->lf_id);
762 	printf(" %s, start %llx, end %llx",
763 		lock->lf_type == F_RDLCK ? "shared" :
764 		lock->lf_type == F_WRLCK ? "exclusive" :
765 		lock->lf_type == F_UNLCK ? "unlock" :
766 		"unknown", lock->lf_start, lock->lf_end);
767 	printf(", prev %p, next %p", lock->lf_prev, lock->lf_next);
768 	block = TAILQ_FIRST(&lock->lf_blkhd);
769 	if (block)
770 		printf(", block");
771 	TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)
772 		printf(" %p,", block);
773 	printf("\n");
774 }
775 
776 void
777 lf_printlist(char *tag, struct lockf *lock)
778 {
779 	struct lockf *lf;
780 #ifdef LOCKF_DIAGNOSTIC
781 	struct lockf *prev = NULL;
782 #endif /* LOCKF_DIAGNOSTIC */
783 
784 	printf("%s: Lock list:\n", tag);
785 	for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
786 		if (lock == lf)
787 			printf(" * ");
788 		else
789 			printf("   ");
790 		lf_print(NULL, lf);
791 
792 #ifdef LOCKF_DIAGNOSTIC
793 		KASSERT(lf->lf_prev == prev);
794 		prev = lf;
795 #endif /* LOCKF_DIAGNOSTIC */
796 	}
797 }
798 #endif /* LOCKF_DEBUG */
799