xref: /dflybsd-src/sys/kern/kern_lockf.c (revision 7b39b21b9eddc5c8c0a213f0890ba59237298982)
1 /*
2  * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>
3  *
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Scooter Morris at Genentech Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
39  * $FreeBSD: src/sys/kern/kern_lockf.c,v 1.25 1999/11/16 16:28:56 phk Exp $
40  * $DragonFly: src/sys/kern/kern_lockf.c,v 1.15 2004/05/11 20:37:21 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/proc.h>
48 #include <sys/unistd.h>
49 #include <sys/vnode.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52 #include <sys/resourcevar.h>
53 
54 #include <sys/lockf.h>
55 #include <machine/limits.h>	/* for LLONG_MAX */
56 
57 #ifdef INVARIANTS
58 int lf_global_counter = 0;
59 #endif
60 #ifdef LOCKF_DEBUG
61 int lf_print_ranges = 0;
62 
63 static void	lf_print_lock(const struct lockf *);
64 #endif
65 
66 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
67 
68 static void	lf_wakeup(struct lockf *, off_t, off_t);
69 static int	lf_overlap(const struct lockf_range *, off_t, off_t);
70 static int	lf_overlap_left(const struct lockf_range *, off_t, off_t);
71 static int	lf_overlap_right(const struct lockf_range *, off_t, off_t);
72 static int	lf_overlap_left2(const struct lockf_range *, off_t, off_t);
73 static int	lf_overlap_right2(const struct lockf_range *, off_t, off_t);
74 static int	lf_overlap_embedded(const struct lockf_range *, off_t, off_t);
75 static struct lockf_range *lf_alloc_range(void);
76 static void	lf_create_range(struct lockf_range *, struct proc *, int, int,
77 				off_t, off_t, int);
78 static void	lf_destroy_range(struct lockf_range *, int);
79 
80 static int	lf_setlock(struct lockf *, struct proc *, int, int,
81 			   off_t, off_t);
82 static int	lf_clearlock(struct lockf *, struct proc *, int, int,
83 			     off_t, off_t);
84 static int	lf_getlock(struct flock *, struct lockf *, struct proc *,
85 			   int, int, off_t, off_t);
86 
87 static int	lf_count_change(struct proc *, int);
88 
89 /*
90  * Change the POSIX lock accounting for the given process.
91  */
92 void
93 lf_count_adjust(struct proc *p, int increase)
94 {
95 	struct uidinfo *uip;
96 
97 	KKASSERT(p != NULL);
98 
99 	uip = p->p_ucred->cr_uidinfo;
100 
101 	if (increase)
102 		uip->ui_posixlocks += p->p_numposixlocks;
103 	else
104 		uip->ui_posixlocks -= p->p_numposixlocks;
105 
106 	KASSERT(uip->ui_posixlocks >= 0,
107 		("Negative number of POSIX locks held by %s user: %d.",
108 		 increase ? "new" : "old", uip->ui_posixlocks));
109 }
110 
111 static int
112 lf_count_change(struct proc *owner, int diff)
113 {
114 	struct uidinfo *uip;
115 	int max;
116 
117 	/* we might actually not have a process context */
118 	if (owner == NULL)
119 		return(0);
120 
121 	uip = owner->p_ucred->cr_uidinfo;
122 
123 	max = MIN(owner->p_rlimit[RLIMIT_POSIXLOCKS].rlim_cur,
124 		  maxposixlocksperuid);
125 	if (diff > 0 && owner->p_ucred->cr_uid != 0 && max != -1 &&
126 	    uip->ui_posixlocks >= max )
127 		return(1);
128 
129 	uip->ui_posixlocks += diff;
130 
131 	KASSERT(uip->ui_posixlocks >= 0,
132 		("Negative number of POSIX locks held by user: %d.",
133 		 uip->ui_posixlocks));
134 
135 	return(0);
136 }
137 
138 /*
139  * Advisory record locking support
140  */
141 int
142 lf_advlock(struct vop_advlock_args *ap, struct lockf *lock, u_quad_t size)
143 {
144 	struct flock *fl = ap->a_fl;
145 	struct proc *owner;
146 	off_t start, end;
147 	int type, flags, error;
148 	lwkt_tokref ilock;
149 
150 	if (lock->init_done == 0) {
151 		TAILQ_INIT(&lock->lf_range);
152 		TAILQ_INIT(&lock->lf_blocked);
153 		lock->init_done = 1;
154 	}
155 
156 	/*
157 	 * Convert the flock structure into a start and end.
158 	 */
159 	switch (fl->l_whence) {
160 	case SEEK_SET:
161 	case SEEK_CUR:
162 		/*
163 		 * Caller is responsible for adding any necessary offset
164 		 * when SEEK_CUR is used.
165 		 */
166 		start = fl->l_start;
167 		break;
168 
169 	case SEEK_END:
170 		start = size + fl->l_start;
171 		break;
172 
173 	default:
174 		return(EINVAL);
175 	}
176 	if (start < 0)
177 		return(EINVAL);
178 	if (fl->l_len == 0) {
179 		flags |= F_NOEND;
180 		end = LLONG_MAX;
181 	} else {
182 		end = start + fl->l_len - 1;
183 		if (end < start)
184 			return(EINVAL);
185 	}
186 
187 	flags = ap->a_flags;
188 	type = fl->l_type;
189 	/*
190 	 * This isn't really correct for flock-style locks,
191 	 * but the current handling is somewhat broken anyway.
192 	 */
193 	owner = (struct proc *)ap->a_id;
194 
195 	/*
196 	 * Do the requested operation.
197 	 */
198 	lwkt_gettoken(&ilock, lwkt_token_pool_get(lock));
199 	switch(ap->a_op) {
200 	case F_SETLK:
201 		error = lf_setlock(lock, owner, type, flags, start, end);
202 		break;
203 
204 	case F_UNLCK:
205 		error = lf_clearlock(lock, owner, type, flags, start, end);
206 		break;
207 
208 	case F_GETLK:
209 		error = lf_getlock(fl, lock, owner, type, flags, start, end);
210 		break;
211 
212 	default:
213 		error = EINVAL;
214 		break;
215 	}
216 	lwkt_reltoken(&ilock);
217 	return(error);
218 }
219 
220 static int
221 lf_setlock(struct lockf *lock, struct proc *owner, int type, int flags,
222 	   off_t start, off_t end)
223 {
224 	struct lockf_range *range, *first_match, *insert_point;
225 	int wakeup_needed, lock_needed;
226 	/* pre-allocation to avoid blocking in the middle of the algorithm */
227 	struct lockf_range *new_range1 = NULL, *new_range2 = NULL;
228 	int error = 0;
229 
230 	/* for restauration in case of hitting the POSIX lock limit below */
231 	struct lockf_range *orig_first_match = NULL;
232 	off_t orig_end = -1;
233 	int orig_flags = 0;
234 
235 restart:
236 	if (new_range1 == NULL)
237 		new_range1 = lf_alloc_range();
238 	if (new_range2 == NULL)
239 		new_range2 = lf_alloc_range();
240 	first_match = NULL;
241 	insert_point = NULL;
242 	wakeup_needed = 0;
243 
244 #ifdef LOCKF_DEBUG
245 	if (lf_print_ranges)
246 		lf_print_lock(lock);
247 #endif
248 
249 	TAILQ_FOREACH(range, &lock->lf_range, lf_link) {
250 		if (insert_point == NULL && range->lf_start >= start)
251 			insert_point = range;
252 		if (lf_overlap(range, start, end) == 0)
253 			continue;
254 		if (range->lf_owner == owner) {
255 			if (first_match == NULL)
256 				first_match = range;
257 			continue;
258 		}
259 		if (type == F_WRLCK || range->lf_type == F_WRLCK)
260 			break;
261 	}
262 
263 	if (range != NULL) {
264 		struct lockf_range *brange;
265 
266 		if ((flags & F_WAIT) == 0) {
267 			error = EAGAIN;
268 			goto do_cleanup;
269 		}
270 
271 		/*
272 		 * We are blocked. For POSIX locks we have to check
273 		 * for deadlocks and return with EDEADLK. This is done
274 		 * by checking wether range->lf_owner is already
275 		 * blocked.
276 		 *
277 		 * Since flock-style locks cover the whole file, a
278 		 * deadlock between those is nearly impossible.
279 		 * This can only occur if a process tries to lock the
280 		 * same inode exclusively while holding a shared lock
281 		 * with another descriptor.
282 		 * XXX How can we cleanly detect this?
283 		 * XXX The current mixing of flock & fcntl/lockf is evil.
284 		 *
285 		 * Handle existing locks of flock-style like POSIX locks.
286 		 */
287 		if (flags & F_POSIX) {
288 			TAILQ_FOREACH(brange, &lock->lf_blocked, lf_link)
289 				if (brange->lf_owner == range->lf_owner) {
290 					error = EDEADLK;
291 					goto do_cleanup;
292 				}
293 		}
294 
295 		/*
296 		 * For flock-style locks, we must first remove
297 		 * any shared locks that we hold before we sleep
298 		 * waiting for an exclusive lock.
299 		 */
300 		if ((flags & F_FLOCK) && type == F_WRLCK)
301 			lf_clearlock(lock, owner, type, flags, start, end);
302 
303 		brange = new_range1;
304 		new_range1 = NULL;
305 		lf_create_range(brange, owner, type, 0, start, end, 0);
306 		TAILQ_INSERT_TAIL(&lock->lf_blocked, brange, lf_link);
307 		error = tsleep(brange, PCATCH, "lockf", 0);
308 
309 		/*
310 		 * We may have been awaked by a signal and/or by a
311 		 * debugger continuing us (in which case we must remove
312 		 * ourselves from the blocked list) and/or by another
313 		 * process releasing/downgrading a lock (in which case
314 		 * we have already been removed from the blocked list
315 		 * and our lf_flags field is 1).
316 		 */
317 		if (brange->lf_flags == 0)
318 			TAILQ_REMOVE(&lock->lf_blocked, brange, lf_link);
319 		lf_destroy_range(brange, 0);
320 
321 		if (error)
322 			goto do_cleanup;
323 		goto restart;
324 	}
325 
326 	if (first_match == NULL) {
327 		if (flags & F_POSIX) {
328 			if (lf_count_change(owner, 1)) {
329 				error = ENOLCK;
330 				goto do_cleanup;
331 			}
332 		}
333 		range = new_range1;
334 		new_range1 = NULL;
335 		lf_create_range(range, owner, type, flags, start, end, 1);
336 		if (insert_point != NULL)
337 			TAILQ_INSERT_BEFORE(insert_point, range, lf_link);
338 		else
339 			TAILQ_INSERT_TAIL(&lock->lf_range, range, lf_link);
340 		goto do_wakeup;
341 	}
342 
343 	lock_needed = 1;
344 
345 	if (lf_overlap_left(first_match, start, end)) {
346 		KKASSERT((flags & F_POSIX) != 0);
347 		if (first_match->lf_end > end) {
348 			if (first_match->lf_type == type)
349 				goto do_wakeup;
350 			if (lf_count_change(owner, 2)) {
351 				error = ENOLCK;
352 				goto do_cleanup;
353 			}
354 			range = new_range1;
355 			new_range1 = NULL;
356 			lf_create_range(range, owner, type, flags,
357 					start, end, 1);
358 			if (insert_point != NULL)
359 				TAILQ_INSERT_BEFORE(insert_point, range,
360 						    lf_link);
361 			else
362 				TAILQ_INSERT_TAIL(&lock->lf_range, range,
363 						  lf_link);
364 			insert_point = range;
365 			range = new_range2;
366 			new_range2 = NULL;
367 			lf_create_range(range, owner, first_match->lf_type,
368 					first_match->lf_flags, end + 1,
369 					first_match->lf_end, 1);
370 			TAILQ_INSERT_AFTER(&lock->lf_range, insert_point,
371 					   range, lf_link);
372 			first_match->lf_flags &= ~F_NOEND;
373 			first_match->lf_end = start - 1;
374 			if (type == F_RDLCK)
375 				wakeup_needed = 1;
376 			goto do_wakeup;
377 		}
378 		/*
379 		 * left match, but not right match
380 		 *
381 		 * handle the lf_type != type case directly,
382 		 * merge the other case with the !lock_needed path.
383 		 */
384 		if (first_match->lf_type != type) {
385 			/*
386 			 * This is needed if the lockf acquisition below fails.
387 			 */
388 			orig_first_match = first_match;
389 			orig_end = first_match->lf_end;
390 			orig_flags = first_match->lf_flags;
391 			first_match->lf_end = start - 1;
392 			first_match->lf_flags &= ~F_NOEND;
393 			if (type == F_RDLCK)
394 				wakeup_needed = 1;
395 			/* Try to find the next matching range */
396 			range = TAILQ_NEXT(first_match, lf_link);
397 			while (range != NULL) {
398 				if (range->lf_owner == owner &&
399 				    lf_overlap(range, start, end))
400 					break;
401 				range = TAILQ_NEXT(range, lf_link);
402 			}
403 			if (range == NULL)
404 				goto do_wakeup;
405 			first_match = range;
406 			/* fall through to !left_match behaviour */
407 		} else {
408 			first_match->lf_end = end;
409 			first_match->lf_flags |= flags & F_NOEND;
410 			lock_needed = 0;
411 		}
412 	}
413 
414 	if (lf_overlap_embedded(first_match, start, end)) {
415 		if (first_match != insert_point) {
416 			TAILQ_REMOVE(&lock->lf_range, first_match, lf_link);
417 			TAILQ_INSERT_BEFORE(insert_point, first_match, lf_link);
418 		}
419 		first_match->lf_start = start;
420 		first_match->lf_end = end;
421 		first_match->lf_flags |= flags & F_NOEND;
422 		first_match->lf_type = type;
423 		lock_needed = 0;
424 	}
425 
426 	if (lock_needed == 0) {
427 		struct lockf_range *nrange;
428 
429 		range = TAILQ_NEXT(first_match, lf_link);
430 		while (range != NULL) {
431 			if (range->lf_owner != owner) {
432 				range = TAILQ_NEXT(range, lf_link);
433 				continue;
434 			}
435 			if (lf_overlap_embedded(range, start, end)) {
436 				nrange = TAILQ_NEXT(range, lf_link);
437 				TAILQ_REMOVE(&lock->lf_range, range,
438 					     lf_link);
439 				lf_count_change(owner, -1);
440 				lf_destroy_range(range, 1);
441 				range = nrange;
442 				continue;
443 			}
444 			if (lf_overlap_right(range, start, end) == 0) {
445 				range = TAILQ_NEXT(range, lf_link);
446 				continue;
447 			}
448 			if (range->lf_type != type) {
449 				range->lf_start = end + 1;
450 				nrange = TAILQ_NEXT(range, lf_link);
451 				TAILQ_REMOVE(&lock->lf_range, range, lf_link);
452 				while (nrange != NULL) {
453 					if (nrange->lf_start >= end + 1)
454 						break;
455 					nrange = TAILQ_NEXT(nrange, lf_link);
456 				}
457 				if (nrange != NULL)
458 					TAILQ_INSERT_BEFORE(nrange, range,
459 							    lf_link);
460 				else
461 					TAILQ_INSERT_TAIL(&lock->lf_range,
462 							  range, lf_link);
463 				break;
464 			}
465 			first_match->lf_end = range->lf_end;
466 			first_match->lf_flags |=
467 			    range->lf_flags & F_NOEND;
468 			TAILQ_REMOVE(&lock->lf_range, range, lf_link);
469 			lf_count_change(owner, -1);
470 			lf_destroy_range(range, 1);
471 			break;
472 		}
473 		goto do_wakeup;
474 	}
475 
476 	if (lf_overlap_right(first_match, start, end)) {
477 		KKASSERT((flags & F_POSIX) != 0);
478 		if (first_match->lf_type == type) {
479 			first_match->lf_start = start;
480 			if (first_match != insert_point) {
481 				TAILQ_REMOVE(&lock->lf_range, first_match,
482 					     lf_link);
483 				TAILQ_INSERT_BEFORE(insert_point, first_match,
484 						    lf_link);
485 			}
486 			goto do_wakeup;
487 		}
488 		if (lf_count_change(owner, 1)) {
489 			if (orig_first_match != NULL) {
490 				orig_first_match->lf_end = orig_end;
491 				orig_first_match->lf_flags = orig_end;
492 			}
493 			error = ENOLCK;
494 			goto do_cleanup;
495 		}
496 		first_match->lf_start = end + 1;
497 		KKASSERT(new_range1 != NULL);
498 		range = new_range1;
499 		new_range1 = NULL;
500 		lf_create_range(range, owner, type, flags, start, end, 1);
501 		TAILQ_INSERT_BEFORE(insert_point, range, lf_link);
502 		range = TAILQ_NEXT(first_match, lf_link);
503 		TAILQ_REMOVE(&lock->lf_range, first_match, lf_link);
504 		while (range != NULL) {
505 			if (range->lf_start >= first_match->lf_start)
506 				break;
507 			range = TAILQ_NEXT(range, lf_link);
508 		}
509 		if (range != NULL)
510 			TAILQ_INSERT_BEFORE(range, first_match, lf_link);
511 		else
512 			TAILQ_INSERT_TAIL(&lock->lf_range, first_match, lf_link);
513 		goto do_wakeup;
514 	}
515 
516 do_wakeup:
517 #ifdef LOCKF_DEBUG
518 	if (lf_print_ranges)
519 		lf_print_lock(lock);
520 #endif
521 	if (wakeup_needed)
522 		lf_wakeup(lock, start, end);
523 	error = 0;
524 do_cleanup:
525 	if (new_range1 != NULL)
526 		lf_destroy_range(new_range1, 0);
527 	if (new_range2 != NULL)
528 		lf_destroy_range(new_range2, 0);
529 	return(error);
530 }
531 
532 static int
533 lf_clearlock(struct lockf *lock, struct proc *owner, int type, int flags,
534 	     off_t start, off_t end)
535 {
536 	struct lockf_range *range, *trange;
537 	struct lockf_range *new_range;
538 	int error = 0;
539 
540 	new_range = lf_alloc_range();
541 
542 	TAILQ_FOREACH_MUTABLE(range, &lock->lf_range, lf_link, trange) {
543 		if (range->lf_end < start)
544 			continue;
545 		if (range->lf_start > end)
546 			break;
547 		if (range->lf_owner != owner)
548 			continue;
549 		if (lf_overlap_embedded(range, start, end)) {
550 			TAILQ_REMOVE(&lock->lf_range, range, lf_link);
551 			/* flock-locks are equal */
552 			if (range->lf_flags & F_POSIX)
553 				lf_count_change(owner, -1);
554 			lf_destroy_range(range, 1);
555 			continue;
556 		}
557 		if (lf_overlap_left2(range, start, end)) {
558 			KKASSERT(range->lf_flags & F_POSIX);
559 			if (lf_overlap_right2(range, start, end)) {
560 				struct lockf_range *nrange;
561 
562 				if (lf_count_change(owner, 1)) {
563 					error = ENOLCK;
564 					goto do_cleanup;
565 				}
566 				nrange = new_range;
567 				new_range = NULL;
568 				lf_create_range(nrange, range->lf_owner,
569 				    range->lf_type, range->lf_flags,
570 				    end + 1, range->lf_end, 1);
571 				range->lf_end = start;
572 				range->lf_flags &= ~F_NOEND;
573 				for (; range != NULL;
574 				     range = TAILQ_NEXT(range, lf_link))
575 					if (range->lf_start >= nrange->lf_start)
576 						break;
577 				if (range != NULL)
578 					TAILQ_INSERT_BEFORE(range, nrange,
579 							    lf_link);
580 				else
581 					TAILQ_INSERT_TAIL(&lock->lf_range,
582 							  nrange, lf_link);
583 				break;
584 			}
585 			range->lf_end = start - 1;
586 			range->lf_flags &= ~F_NOEND;
587 			continue;
588 		}
589 		if (lf_overlap_right2(range, start, end)) {
590 			struct lockf_range *nrange = range;
591 
592 			KKASSERT(range->lf_flags & F_POSIX);
593 
594 			range  = TAILQ_NEXT(range, lf_link);
595 			TAILQ_REMOVE(&lock->lf_range, nrange, lf_link);
596 			for (; range != NULL;
597 			     range = TAILQ_NEXT(range, lf_link))
598 				if (range->lf_start >= nrange->lf_start)
599 					break;
600 			if (range != NULL)
601 				TAILQ_INSERT_BEFORE(range, nrange, lf_link);
602 			else
603 				TAILQ_INSERT_TAIL(&lock->lf_range, nrange,
604 						  lf_link);
605 			break;
606 		}
607 	}
608 
609 	lf_wakeup(lock, start, end);
610 	error = 0;
611 
612 do_cleanup:
613 	if (new_range != NULL)
614 		lf_destroy_range(new_range, 0);
615 
616 	return(error);
617 }
618 
619 /*
620  * Check whether there is a blocking lock,
621  * and if so return its process identifier.
622  */
623 static int
624 lf_getlock(struct flock *fl, struct lockf *lock, struct proc *owner,
625 	   int type, int flags, off_t start, off_t end)
626 {
627 	struct lockf_range *range;
628 
629 	TAILQ_FOREACH(range, &lock->lf_range, lf_link)
630 		if (range->lf_owner != owner &&
631 		    lf_overlap(range, start, end) &&
632 		    (type == F_WRLCK || range->lf_type == F_WRLCK))
633 			break;
634 	if (range == NULL) {
635 		fl->l_type = F_UNLCK;
636 		return(0);
637 	}
638 	fl->l_type = range->lf_type;
639 	fl->l_whence = SEEK_SET;
640 	fl->l_start = range->lf_start;
641 	if (range->lf_flags & F_NOEND)
642 		fl->l_len = 0;
643 	else
644 		fl->l_len = range->lf_end - range->lf_start + 1;
645 	if (range->lf_owner != NULL && (range->lf_flags & F_POSIX))
646 		fl->l_pid = range->lf_owner->p_pid;
647 	else
648 		fl->l_pid = -1;
649 	return(0);
650 }
651 
652 /*
653  * Check wether range and [start, end] overlap.
654  */
655 static int
656 lf_overlap(const struct lockf_range *range, off_t start, off_t end)
657 {
658 	if (range->lf_start >= start && range->lf_start <= end)
659 		return(1);
660 	else if (start >= range->lf_start && start <= range->lf_end)
661 		return(1);
662 	else
663 		return(0);
664 }
665 
666 /*
667  * Wakeup pending lock attempts.
668  */
669 static void
670 lf_wakeup(struct lockf *lock, off_t start, off_t end)
671 {
672 	struct lockf_range *range, *nrange;
673 	TAILQ_FOREACH_MUTABLE(range, &lock->lf_blocked, lf_link, nrange) {
674 		if (lf_overlap(range, start, end) == 0)
675 			continue;
676 		TAILQ_REMOVE(&lock->lf_blocked, range, lf_link);
677 		range->lf_flags = 1;
678 		wakeup(range);
679 	}
680 }
681 
682 static int
683 lf_overlap_left(const struct lockf_range *range, off_t start, off_t end)
684 {
685 	if (range->lf_start < start && range->lf_end >= start - 1 &&
686 	    range->lf_end <= end)
687 		return(1);
688 	else
689 		return(0);
690 
691 }
692 
693 static int
694 lf_overlap_right(const struct lockf_range *range, off_t start, off_t end)
695 {
696 	if (range->lf_end > end && range->lf_start >= start &&
697 	    range->lf_start - 1 <= end)
698 		return(1);
699 	else
700 		return(0);
701 }
702 
703 static int
704 lf_overlap_left2(const struct lockf_range *range, off_t start, off_t end)
705 {
706 	if (range->lf_start < start && range->lf_end >= start &&
707 	    range->lf_end <= end)
708 		return(1);
709 	else
710 		return(0);
711 
712 }
713 
714 static int
715 lf_overlap_right2(const struct lockf_range *range, off_t start, off_t end)
716 {
717 	if (range->lf_end > end && range->lf_start >= start &&
718 	    range->lf_start <= end)
719 		return(1);
720 	else
721 		return(0);
722 }
723 
724 static int
725 lf_overlap_embedded(const struct lockf_range *range, off_t start, off_t end)
726 {
727 	if (range->lf_start >= start && range->lf_end <= end)
728 		return(1);
729 	else
730 		return(0);
731 }
732 
733 /*
734  * Allocate a range structure and initialize it sufficiently such that
735  * lf_destroy_range() does not barf.
736  */
737 static struct lockf_range *
738 lf_alloc_range(void)
739 {
740 	struct lockf_range *range;
741 
742 #ifdef INVARIANTS
743 	lf_global_counter++;
744 #endif
745 	range = malloc(sizeof(struct lockf_range), M_LOCKF, M_WAITOK);
746 	range->lf_owner = NULL;
747 	return(range);
748 }
749 
750 static void
751 lf_create_range(struct lockf_range *range, struct proc *owner, int type,
752 		int flags, off_t start, off_t end, int accounting)
753 {
754 	KKASSERT(start <= end);
755 	if (owner != NULL && (flags & F_POSIX) && accounting)
756 		++owner->p_numposixlocks;
757 	range->lf_type = type;
758 	range->lf_flags = flags;
759 	range->lf_start = start;
760 	range->lf_end = end;
761 	range->lf_owner = owner;
762 
763 #ifdef LOCKF_DEBUG
764 	if (lf_print_ranges)
765 		printf("lf_create_range: %lld..%lld\n", range->lf_start,
766 		       range->lf_end);
767 #endif
768 }
769 
770 static void
771 lf_destroy_range(struct lockf_range *range, int accounting)
772 {
773 	struct proc *owner = range->lf_owner;
774 	int flags = range->lf_flags;
775 
776 #ifdef LOCKF_DEBUG
777 	if (lf_print_ranges)
778 		printf("lf_destroy_range: %lld..%lld\n", range->lf_start,
779 		       range->lf_end);
780 #endif
781 
782 	free(range, M_LOCKF);
783 	if (owner != NULL && (flags & F_POSIX) && accounting) {
784 		--owner->p_numposixlocks;
785 		KASSERT(owner->p_numposixlocks >= 0,
786 			("Negative number of POSIX locks held by process: %d",
787 			 owner->p_numposixlocks));
788 	}
789 
790 #ifdef INVARIANTS
791 	lf_global_counter--;
792 	KKASSERT(lf_global_counter>=0);
793 #endif
794 }
795 
796 #ifdef LOCKF_DEBUG
797 static void
798 lf_print_lock(const struct lockf *lock)
799 {
800 	struct lockf_range *range;
801 
802 	if (TAILQ_EMPTY(&lock->lf_range))
803 		printf("lockf %p: no ranges locked\n", lock);
804 	else
805 		printf("lockf %p:\n", lock);
806 	TAILQ_FOREACH(range, &lock->lf_range, lf_link)
807 		printf("\t%lld..%lld type %s owned by %d\n",
808 		       range->lf_start, range->lf_end,
809 		       range->lf_type == F_RDLCK ? "shared" : "exclusive",
810 		       range->lf_flags & F_POSIX ? range->lf_owner->p_pid : -1);
811 	if (TAILQ_EMPTY(&lock->lf_blocked))
812 		printf("no process waiting for range\n");
813 	else
814 		printf("blocked locks:");
815 	TAILQ_FOREACH(range, &lock->lf_range, lf_link)
816 		printf("\t%lld..%lld type %s waiting on %p\n",
817 		       range->lf_start, range->lf_end,
818 		       range->lf_type == F_RDLCK ? "shared" : "exclusive",
819 		       range);
820 }
821 #endif /* LOCKF_DEBUG */
822