xref: /netbsd-src/sys/kern/vfs_lockf.c (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: vfs_lockf.c,v 1.32 2003/06/25 14:34:55 yamt Exp $	*/
2 
3 /*
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Scooter Morris at Genentech Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)ufs_lockf.c	8.4 (Berkeley) 10/26/94
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.32 2003/06/25 14:34:55 yamt Exp $");
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/file.h>
48 #include <sys/proc.h>
49 #include <sys/vnode.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52 #include <sys/lockf.h>
53 
54 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
55 
56 /*
57  * This variable controls the maximum number of processes that will
58  * be checked in doing deadlock detection.
59  */
60 int maxlockdepth = MAXDEPTH;
61 
62 #ifdef LOCKF_DEBUG
63 int	lockf_debug = 0;
64 #endif
65 
66 #define NOLOCKF (struct lockf *)0
67 #define SELF	0x1
68 #define OTHERS	0x2
69 
70 static int lf_clearlock(struct lockf *, struct lockf **);
71 static int lf_findoverlap(struct lockf *,
72 	    struct lockf *, int, struct lockf ***, struct lockf **);
73 static struct lockf *lf_getblock(struct lockf *);
74 static int lf_getlock(struct lockf *, struct flock *);
75 static int lf_setlock(struct lockf *, struct lockf **, struct simplelock *);
76 static void lf_split(struct lockf *, struct lockf *, struct lockf **);
77 static void lf_wakelock(struct lockf *);
78 
79 #ifdef LOCKF_DEBUG
80 static void lf_print(char *, struct lockf *);
81 static void lf_printlist(char *, struct lockf *);
82 #endif
83 
84 /*
85  * XXX TODO
86  * Misc cleanups: "caddr_t id" should be visible in the API as a
87  * "struct proc *".
88  * (This requires rototilling all VFS's which support advisory locking).
89  *
90  * Use pools for lock allocation.
91  */
92 
93 /*
94  * If there's a lot of lock contention on a single vnode, locking
95  * schemes which allow for more paralleism would be needed.  Given how
96  * infrequently byte-range locks are actually used in typical BSD
97  * code, a more complex approach probably isn't worth it.
98  */
99 
100 /*
101  * Do an advisory lock operation.
102  */
103 int
104 lf_advlock(struct vop_advlock_args *ap, struct lockf **head, off_t size)
105 {
106 	struct flock *fl = ap->a_fl;
107 	struct lockf *lock = NULL;
108 	struct lockf *sparelock;
109 	struct simplelock *interlock = &ap->a_vp->v_interlock;
110 	off_t start, end;
111 	int error;
112 
113 	/*
114 	 * Convert the flock structure into a start and end.
115 	 */
116 	switch (fl->l_whence) {
117 	case SEEK_SET:
118 	case SEEK_CUR:
119 		/*
120 		 * Caller is responsible for adding any necessary offset
121 		 * when SEEK_CUR is used.
122 		 */
123 		start = fl->l_start;
124 		break;
125 
126 	case SEEK_END:
127 		start = size + fl->l_start;
128 		break;
129 
130 	default:
131 		return EINVAL;
132 	}
133 	if (start < 0)
134 		return EINVAL;
135 
136 	/*
137 	 * allocate locks before acquire simple lock.
138 	 * we need two locks in the worst case.
139 	 */
140 	switch (ap->a_op) {
141 	case F_SETLK:
142 	case F_UNLCK:
143 		/*
144 		 * XXX for F_UNLCK case, we can re-use lock.
145 		 */
146 		if ((fl->l_type & F_FLOCK) == 0) {
147 			/*
148 			 * byte-range lock might need one more lock.
149 			 */
150 			MALLOC(sparelock, struct lockf *, sizeof(*lock),
151 			    M_LOCKF, M_WAITOK);
152 			if (sparelock == NULL) {
153 				error = ENOMEM;
154 				goto quit;
155 			}
156 			break;
157 		}
158 		/* FALLTHROUGH */
159 
160 	case F_GETLK:
161 		sparelock = NULL;
162 		break;
163 
164 	default:
165 		return EINVAL;
166 	}
167 
168 	MALLOC(lock, struct lockf *, sizeof(*lock), M_LOCKF, M_WAITOK);
169 	if (lock == NULL) {
170 		error = ENOMEM;
171 		goto quit;
172 	}
173 
174 	simple_lock(interlock);
175 
176 	/*
177 	 * Avoid the common case of unlocking when inode has no locks.
178 	 */
179 	if (*head == (struct lockf *)0) {
180 		if (ap->a_op != F_SETLK) {
181 			fl->l_type = F_UNLCK;
182 			error = 0;
183 			goto quit_unlock;
184 		}
185 	}
186 
187 	if (fl->l_len == 0)
188 		end = -1;
189 	else
190 		end = start + fl->l_len - 1;
191 	/*
192 	 * Create the lockf structure.
193 	 */
194 	lock->lf_start = start;
195 	lock->lf_end = end;
196 	/* XXX NJWLWP
197 	 * I don't want to make the entire VFS universe use LWPs, because
198 	 * they don't need them, for the most part. This is an exception,
199 	 * and a kluge.
200 	 */
201 
202 	lock->lf_head = head;
203 	lock->lf_type = fl->l_type;
204 	lock->lf_next = (struct lockf *)0;
205 	TAILQ_INIT(&lock->lf_blkhd);
206 	lock->lf_flags = ap->a_flags;
207 	if (lock->lf_flags & F_POSIX) {
208 		KASSERT(curproc == (struct proc *)ap->a_id);
209 	}
210 	lock->lf_id = (struct proc *)ap->a_id;
211 	lock->lf_lwp = curlwp;
212 
213 	/*
214 	 * Do the requested operation.
215 	 */
216 	switch (ap->a_op) {
217 
218 	case F_SETLK:
219 		error = lf_setlock(lock, &sparelock, interlock);
220 		lock = NULL; /* lf_setlock freed it */
221 		break;
222 
223 	case F_UNLCK:
224 		error = lf_clearlock(lock, &sparelock);
225 		break;
226 
227 	case F_GETLK:
228 		error = lf_getlock(lock, fl);
229 		break;
230 
231 	default:
232 		break;
233 		/* NOTREACHED */
234 	}
235 
236 quit_unlock:
237 	simple_unlock(interlock);
238 quit:
239 	if (lock)
240 		FREE(lock, M_LOCKF);
241 	if (sparelock)
242 		FREE(sparelock, M_LOCKF);
243 
244 	return error;
245 }
246 
247 /*
248  * Set a byte-range lock.
249  */
250 static int
251 lf_setlock(struct lockf *lock, struct lockf **sparelock,
252     struct simplelock *interlock)
253 {
254 	struct lockf *block;
255 	struct lockf **head = lock->lf_head;
256 	struct lockf **prev, *overlap, *ltmp;
257 	static char lockstr[] = "lockf";
258 	int ovcase, priority, needtolink, error;
259 
260 #ifdef LOCKF_DEBUG
261 	if (lockf_debug & 1)
262 		lf_print("lf_setlock", lock);
263 #endif /* LOCKF_DEBUG */
264 
265 	/*
266 	 * Set the priority
267 	 */
268 	priority = PLOCK;
269 	if (lock->lf_type == F_WRLCK)
270 		priority += 4;
271 	priority |= PCATCH;
272 	/*
273 	 * Scan lock list for this file looking for locks that would block us.
274 	 */
275 	while ((block = lf_getblock(lock)) != NULL) {
276 		/*
277 		 * Free the structure and return if nonblocking.
278 		 */
279 		if ((lock->lf_flags & F_WAIT) == 0) {
280 			FREE(lock, M_LOCKF);
281 			return EAGAIN;
282 		}
283 		/*
284 		 * We are blocked. Since flock style locks cover
285 		 * the whole file, there is no chance for deadlock.
286 		 * For byte-range locks we must check for deadlock.
287 		 *
288 		 * Deadlock detection is done by looking through the
289 		 * wait channels to see if there are any cycles that
290 		 * involve us. MAXDEPTH is set just to make sure we
291 		 * do not go off into neverneverland.
292 		 */
293 		if ((lock->lf_flags & F_POSIX) &&
294 		    (block->lf_flags & F_POSIX)) {
295 			struct lwp *wlwp;
296 			struct lockf *waitblock;
297 			int i = 0;
298 
299 			/*
300 			 * The block is waiting on something.  if_lwp will be
301 			 * 0 once the lock is granted, so we terminate the
302 			 * loop if we find this.
303 			 */
304 			wlwp = block->lf_lwp;
305 			while (wlwp && (i++ < maxlockdepth)) {
306 				waitblock = (struct lockf *)wlwp->l_wchan;
307 				/* Get the owner of the blocking lock */
308 				waitblock = waitblock->lf_next;
309 				if ((waitblock->lf_flags & F_POSIX) == 0)
310 					break;
311 				wlwp = waitblock->lf_lwp;
312 				if (wlwp == lock->lf_lwp) {
313 					free(lock, M_LOCKF);
314 					return EDEADLK;
315 				}
316 			}
317 			/*
318 			 * If we're still following a dependancy chain
319 			 * after maxlockdepth iterations, assume we're in
320 			 * a cycle to be safe.
321 			 */
322 			if (i >= maxlockdepth) {
323 				free(lock, M_LOCKF);
324 				return EDEADLK;
325 			}
326 		}
327 		/*
328 		 * For flock type locks, we must first remove
329 		 * any shared locks that we hold before we sleep
330 		 * waiting for an exclusive lock.
331 		 */
332 		if ((lock->lf_flags & F_FLOCK) &&
333 		    lock->lf_type == F_WRLCK) {
334 			lock->lf_type = F_UNLCK;
335 			(void) lf_clearlock(lock, NULL);
336 			lock->lf_type = F_WRLCK;
337 		}
338 		/*
339 		 * Add our lock to the blocked list and sleep until we're free.
340 		 * Remember who blocked us (for deadlock detection).
341 		 */
342 		lock->lf_next = block;
343 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
344 #ifdef LOCKF_DEBUG
345 		if (lockf_debug & 1) {
346 			lf_print("lf_setlock: blocking on", block);
347 			lf_printlist("lf_setlock", block);
348 		}
349 #endif /* LOCKF_DEBUG */
350 		error = ltsleep(lock, priority, lockstr, 0, interlock);
351 
352 		/*
353 		 * We may have been awakened by a signal (in
354 		 * which case we must remove ourselves from the
355 		 * blocked list) and/or by another process
356 		 * releasing a lock (in which case we have already
357 		 * been removed from the blocked list and our
358 		 * lf_next field set to NOLOCKF).
359 		 */
360 		if (lock->lf_next != NOLOCKF) {
361 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
362 			lock->lf_next = NOLOCKF;
363 		}
364 		if (error) {
365 			free(lock, M_LOCKF);
366 			return error;
367 		}
368 	}
369 	/*
370 	 * No blocks!!  Add the lock.  Note that we will
371 	 * downgrade or upgrade any overlapping locks this
372 	 * process already owns.
373 	 *
374 	 * Skip over locks owned by other processes.
375 	 * Handle any locks that overlap and are owned by ourselves.
376 	 */
377 	lock->lf_lwp = 0;
378 	prev = head;
379 	block = *head;
380 	needtolink = 1;
381 	for (;;) {
382 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
383 		if (ovcase)
384 			block = overlap->lf_next;
385 		/*
386 		 * Six cases:
387 		 *	0) no overlap
388 		 *	1) overlap == lock
389 		 *	2) overlap contains lock
390 		 *	3) lock contains overlap
391 		 *	4) overlap starts before lock
392 		 *	5) overlap ends after lock
393 		 */
394 		switch (ovcase) {
395 		case 0: /* no overlap */
396 			if (needtolink) {
397 				*prev = lock;
398 				lock->lf_next = overlap;
399 			}
400 			break;
401 
402 		case 1: /* overlap == lock */
403 			/*
404 			 * If downgrading lock, others may be
405 			 * able to acquire it.
406 			 */
407 			if (lock->lf_type == F_RDLCK &&
408 			    overlap->lf_type == F_WRLCK)
409 				lf_wakelock(overlap);
410 			overlap->lf_type = lock->lf_type;
411 			FREE(lock, M_LOCKF);
412 			lock = overlap; /* for debug output below */
413 			break;
414 
415 		case 2: /* overlap contains lock */
416 			/*
417 			 * Check for common starting point and different types.
418 			 */
419 			if (overlap->lf_type == lock->lf_type) {
420 				free(lock, M_LOCKF);
421 				lock = overlap; /* for debug output below */
422 				break;
423 			}
424 			if (overlap->lf_start == lock->lf_start) {
425 				*prev = lock;
426 				lock->lf_next = overlap;
427 				overlap->lf_start = lock->lf_end + 1;
428 			} else
429 				lf_split(overlap, lock, sparelock);
430 			lf_wakelock(overlap);
431 			break;
432 
433 		case 3: /* lock contains overlap */
434 			/*
435 			 * If downgrading lock, others may be able to
436 			 * acquire it, otherwise take the list.
437 			 */
438 			if (lock->lf_type == F_RDLCK &&
439 			    overlap->lf_type == F_WRLCK) {
440 				lf_wakelock(overlap);
441 			} else {
442 				while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
443 					KASSERT(ltmp->lf_next == overlap);
444 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
445 					    lf_block);
446 					ltmp->lf_next = lock;
447 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
448 					    ltmp, lf_block);
449 				}
450 			}
451 			/*
452 			 * Add the new lock if necessary and delete the overlap.
453 			 */
454 			if (needtolink) {
455 				*prev = lock;
456 				lock->lf_next = overlap->lf_next;
457 				prev = &lock->lf_next;
458 				needtolink = 0;
459 			} else
460 				*prev = overlap->lf_next;
461 			free(overlap, M_LOCKF);
462 			continue;
463 
464 		case 4: /* overlap starts before lock */
465 			/*
466 			 * Add lock after overlap on the list.
467 			 */
468 			lock->lf_next = overlap->lf_next;
469 			overlap->lf_next = lock;
470 			overlap->lf_end = lock->lf_start - 1;
471 			prev = &lock->lf_next;
472 			lf_wakelock(overlap);
473 			needtolink = 0;
474 			continue;
475 
476 		case 5: /* overlap ends after lock */
477 			/*
478 			 * Add the new lock before overlap.
479 			 */
480 			if (needtolink) {
481 				*prev = lock;
482 				lock->lf_next = overlap;
483 			}
484 			overlap->lf_start = lock->lf_end + 1;
485 			lf_wakelock(overlap);
486 			break;
487 		}
488 		break;
489 	}
490 #ifdef LOCKF_DEBUG
491 	if (lockf_debug & 1) {
492 		lf_print("lf_setlock: got the lock", lock);
493 		lf_printlist("lf_setlock", lock);
494 	}
495 #endif /* LOCKF_DEBUG */
496 	return 0;
497 }
498 
499 /*
500  * Remove a byte-range lock on an inode.
501  *
502  * Generally, find the lock (or an overlap to that lock)
503  * and remove it (or shrink it), then wakeup anyone we can.
504  */
505 static int
506 lf_clearlock(struct lockf *unlock, struct lockf **sparelock)
507 {
508 	struct lockf **head = unlock->lf_head;
509 	struct lockf *lf = *head;
510 	struct lockf *overlap, **prev;
511 	int ovcase;
512 
513 	if (lf == NOLOCKF)
514 		return 0;
515 #ifdef LOCKF_DEBUG
516 	if (unlock->lf_type != F_UNLCK)
517 		panic("lf_clearlock: bad type");
518 	if (lockf_debug & 1)
519 		lf_print("lf_clearlock", unlock);
520 #endif /* LOCKF_DEBUG */
521 	prev = head;
522 	while ((ovcase = lf_findoverlap(lf, unlock, SELF,
523 					&prev, &overlap)) != 0) {
524 		/*
525 		 * Wakeup the list of locks to be retried.
526 		 */
527 		lf_wakelock(overlap);
528 
529 		switch (ovcase) {
530 
531 		case 1: /* overlap == lock */
532 			*prev = overlap->lf_next;
533 			FREE(overlap, M_LOCKF);
534 			break;
535 
536 		case 2: /* overlap contains lock: split it */
537 			if (overlap->lf_start == unlock->lf_start) {
538 				overlap->lf_start = unlock->lf_end + 1;
539 				break;
540 			}
541 			lf_split(overlap, unlock, sparelock);
542 			overlap->lf_next = unlock->lf_next;
543 			break;
544 
545 		case 3: /* lock contains overlap */
546 			*prev = overlap->lf_next;
547 			lf = overlap->lf_next;
548 			free(overlap, M_LOCKF);
549 			continue;
550 
551 		case 4: /* overlap starts before lock */
552 			overlap->lf_end = unlock->lf_start - 1;
553 			prev = &overlap->lf_next;
554 			lf = overlap->lf_next;
555 			continue;
556 
557 		case 5: /* overlap ends after lock */
558 			overlap->lf_start = unlock->lf_end + 1;
559 			break;
560 		}
561 		break;
562 	}
563 #ifdef LOCKF_DEBUG
564 	if (lockf_debug & 1)
565 		lf_printlist("lf_clearlock", unlock);
566 #endif /* LOCKF_DEBUG */
567 	return 0;
568 }
569 
570 /*
571  * Check whether there is a blocking lock,
572  * and if so return its process identifier.
573  */
574 static int
575 lf_getlock(struct lockf *lock, struct flock *fl)
576 {
577 	struct lockf *block;
578 
579 #ifdef LOCKF_DEBUG
580 	if (lockf_debug & 1)
581 		lf_print("lf_getlock", lock);
582 #endif /* LOCKF_DEBUG */
583 
584 	if ((block = lf_getblock(lock)) != NULL) {
585 		fl->l_type = block->lf_type;
586 		fl->l_whence = SEEK_SET;
587 		fl->l_start = block->lf_start;
588 		if (block->lf_end == -1)
589 			fl->l_len = 0;
590 		else
591 			fl->l_len = block->lf_end - block->lf_start + 1;
592 		if (block->lf_flags & F_POSIX)
593 			fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
594 		else
595 			fl->l_pid = -1;
596 	} else {
597 		fl->l_type = F_UNLCK;
598 	}
599 	return 0;
600 }
601 
602 /*
603  * Walk the list of locks for an inode and
604  * return the first blocking lock.
605  */
606 static struct lockf *
607 lf_getblock(struct lockf *lock)
608 {
609 	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
610 
611 	prev = lock->lf_head;
612 	while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
613 		/*
614 		 * We've found an overlap, see if it blocks us
615 		 */
616 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
617 			return overlap;
618 		/*
619 		 * Nope, point to the next one on the list and
620 		 * see if it blocks us
621 		 */
622 		lf = overlap->lf_next;
623 	}
624 	return NOLOCKF;
625 }
626 
627 /*
628  * Walk the list of locks for an inode to
629  * find an overlapping lock (if any).
630  *
631  * NOTE: this returns only the FIRST overlapping lock.  There
632  *	 may be more than one.
633  */
634 static int
635 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
636     struct lockf ***prev, struct lockf **overlap)
637 {
638 	off_t start, end;
639 
640 	*overlap = lf;
641 	if (lf == NOLOCKF)
642 		return 0;
643 #ifdef LOCKF_DEBUG
644 	if (lockf_debug & 2)
645 		lf_print("lf_findoverlap: looking for overlap in", lock);
646 #endif /* LOCKF_DEBUG */
647 	start = lock->lf_start;
648 	end = lock->lf_end;
649 	while (lf != NOLOCKF) {
650 		if (((type == SELF) && lf->lf_id != lock->lf_id) ||
651 		    ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
652 			*prev = &lf->lf_next;
653 			*overlap = lf = lf->lf_next;
654 			continue;
655 		}
656 #ifdef LOCKF_DEBUG
657 		if (lockf_debug & 2)
658 			lf_print("\tchecking", lf);
659 #endif /* LOCKF_DEBUG */
660 		/*
661 		 * OK, check for overlap
662 		 *
663 		 * Six cases:
664 		 *	0) no overlap
665 		 *	1) overlap == lock
666 		 *	2) overlap contains lock
667 		 *	3) lock contains overlap
668 		 *	4) overlap starts before lock
669 		 *	5) overlap ends after lock
670 		 */
671 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
672 		    (end != -1 && lf->lf_start > end)) {
673 			/* Case 0 */
674 #ifdef LOCKF_DEBUG
675 			if (lockf_debug & 2)
676 				printf("no overlap\n");
677 #endif /* LOCKF_DEBUG */
678 			if ((type & SELF) && end != -1 && lf->lf_start > end)
679 				return 0;
680 			*prev = &lf->lf_next;
681 			*overlap = lf = lf->lf_next;
682 			continue;
683 		}
684 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
685 			/* Case 1 */
686 #ifdef LOCKF_DEBUG
687 			if (lockf_debug & 2)
688 				printf("overlap == lock\n");
689 #endif /* LOCKF_DEBUG */
690 			return 1;
691 		}
692 		if ((lf->lf_start <= start) &&
693 		    (end != -1) &&
694 		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
695 			/* Case 2 */
696 #ifdef LOCKF_DEBUG
697 			if (lockf_debug & 2)
698 				printf("overlap contains lock\n");
699 #endif /* LOCKF_DEBUG */
700 			return 2;
701 		}
702 		if (start <= lf->lf_start &&
703 		           (end == -1 ||
704 			   (lf->lf_end != -1 && end >= lf->lf_end))) {
705 			/* Case 3 */
706 #ifdef LOCKF_DEBUG
707 			if (lockf_debug & 2)
708 				printf("lock contains overlap\n");
709 #endif /* LOCKF_DEBUG */
710 			return 3;
711 		}
712 		if ((lf->lf_start < start) &&
713 			((lf->lf_end >= start) || (lf->lf_end == -1))) {
714 			/* Case 4 */
715 #ifdef LOCKF_DEBUG
716 			if (lockf_debug & 2)
717 				printf("overlap starts before lock\n");
718 #endif /* LOCKF_DEBUG */
719 			return 4;
720 		}
721 		if ((lf->lf_start > start) &&
722 			(end != -1) &&
723 			((lf->lf_end > end) || (lf->lf_end == -1))) {
724 			/* Case 5 */
725 #ifdef LOCKF_DEBUG
726 			if (lockf_debug & 2)
727 				printf("overlap ends after lock\n");
728 #endif /* LOCKF_DEBUG */
729 			return 5;
730 		}
731 		panic("lf_findoverlap: default");
732 	}
733 	return 0;
734 }
735 
736 /*
737  * Split a lock and a contained region into
738  * two or three locks as necessary.
739  */
740 static void
741 lf_split(struct lockf *lock1, struct lockf *lock2, struct lockf **sparelock)
742 {
743 	struct lockf *splitlock;
744 
745 #ifdef LOCKF_DEBUG
746 	if (lockf_debug & 2) {
747 		lf_print("lf_split", lock1);
748 		lf_print("splitting from", lock2);
749 	}
750 #endif /* LOCKF_DEBUG */
751 	/*
752 	 * Check to see if spliting into only two pieces.
753 	 */
754 	if (lock1->lf_start == lock2->lf_start) {
755 		lock1->lf_start = lock2->lf_end + 1;
756 		lock2->lf_next = lock1;
757 		return;
758 	}
759 	if (lock1->lf_end == lock2->lf_end) {
760 		lock1->lf_end = lock2->lf_start - 1;
761 		lock2->lf_next = lock1->lf_next;
762 		lock1->lf_next = lock2;
763 		return;
764 	}
765 	/*
766 	 * Make a new lock consisting of the last part of
767 	 * the encompassing lock
768 	 */
769 	splitlock = *sparelock;
770 	*sparelock = NULL;
771 	memcpy(splitlock, lock1, sizeof(*splitlock));
772 	splitlock->lf_start = lock2->lf_end + 1;
773 	TAILQ_INIT(&splitlock->lf_blkhd);
774 	lock1->lf_end = lock2->lf_start - 1;
775 	/*
776 	 * OK, now link it in
777 	 */
778 	splitlock->lf_next = lock1->lf_next;
779 	lock2->lf_next = splitlock;
780 	lock1->lf_next = lock2;
781 }
782 
783 /*
784  * Wakeup a blocklist
785  */
786 static void
787 lf_wakelock(struct lockf *listhead)
788 {
789 	struct lockf *wakelock;
790 
791 	while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
792 		KASSERT(wakelock->lf_next == listhead);
793 		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
794 		wakelock->lf_next = NOLOCKF;
795 #ifdef LOCKF_DEBUG
796 		if (lockf_debug & 2)
797 			lf_print("lf_wakelock: awakening", wakelock);
798 #endif
799 		wakeup(wakelock);
800 	}
801 }
802 
803 #ifdef LOCKF_DEBUG
804 /*
805  * Print out a lock.
806  */
807 static void
808 lf_print(char *tag, struct lockf *lock)
809 {
810 
811 	printf("%s: lock %p for ", tag, lock);
812 	if (lock->lf_flags & F_POSIX)
813 		printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
814 	else
815 		printf("file 0x%p", (struct file *)lock->lf_id);
816 	printf(" %s, start %qx, end %qx",
817 		lock->lf_type == F_RDLCK ? "shared" :
818 		lock->lf_type == F_WRLCK ? "exclusive" :
819 		lock->lf_type == F_UNLCK ? "unlock" :
820 		"unknown", lock->lf_start, lock->lf_end);
821 	if (TAILQ_FIRST(&lock->lf_blkhd))
822 		printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
823 	else
824 		printf("\n");
825 }
826 
827 static void
828 lf_printlist(char *tag, struct lockf *lock)
829 {
830 	struct lockf *lf, *blk;
831 
832 	printf("%s: Lock list:\n", tag);
833 	for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
834 		printf("\tlock %p for ", lf);
835 		if (lf->lf_flags & F_POSIX)
836 			printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
837 		else
838 			printf("file 0x%p", (struct file *)lf->lf_id);
839 		printf(", %s, start %qx, end %qx",
840 			lf->lf_type == F_RDLCK ? "shared" :
841 			lf->lf_type == F_WRLCK ? "exclusive" :
842 			lf->lf_type == F_UNLCK ? "unlock" :
843 			"unknown", lf->lf_start, lf->lf_end);
844 		TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
845 			if (blk->lf_flags & F_POSIX)
846 				printf("proc %d",
847 				    ((struct proc *)blk->lf_id)->p_pid);
848 			else
849 				printf("file 0x%p", (struct file *)blk->lf_id);
850 			printf(", %s, start %qx, end %qx",
851 				blk->lf_type == F_RDLCK ? "shared" :
852 				blk->lf_type == F_WRLCK ? "exclusive" :
853 				blk->lf_type == F_UNLCK ? "unlock" :
854 				"unknown", blk->lf_start, blk->lf_end);
855 			if (TAILQ_FIRST(&blk->lf_blkhd))
856 				 panic("lf_printlist: bad list");
857 		}
858 		printf("\n");
859 	}
860 }
861 #endif /* LOCKF_DEBUG */
862