xref: /netbsd-src/sys/kern/vfs_lockf.c (revision c41a4eebefede43f6950f838a387dc18c6a431bf)
1 /*	$NetBSD: vfs_lockf.c,v 1.11 1997/04/10 23:46:18 jtk Exp $	*/
2 
3 /*
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Scooter Morris at Genentech Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
39  */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/file.h>
45 #include <sys/proc.h>
46 #include <sys/vnode.h>
47 #include <sys/malloc.h>
48 #include <sys/fcntl.h>
49 #include <sys/lockf.h>
50 
51 /*
52  * This variable controls the maximum number of processes that will
53  * be checked in doing deadlock detection.
54  */
55 int maxlockdepth = MAXDEPTH;
56 
57 #ifdef LOCKF_DEBUG
58 int	lockf_debug = 0;
59 #endif
60 
61 #define NOLOCKF (struct lockf *)0
62 #define SELF	0x1
63 #define OTHERS	0x2
64 
65 /*
66  * Do an advisory lock operation.
67  */
68 int
69 lf_advlock(head, size, id, op, fl, flags)
70 	struct lockf **head;
71 	off_t size;
72 	caddr_t id;
73 	int op;
74 	register struct flock *fl;
75 	int flags;
76 {
77 	register struct lockf *lock;
78 	off_t start, end;
79 	int error;
80 
81 	/*
82 	 * Convert the flock structure into a start and end.
83 	 */
84 	switch (fl->l_whence) {
85 	case SEEK_SET:
86 	case SEEK_CUR:
87 		/*
88 		 * Caller is responsible for adding any necessary offset
89 		 * when SEEK_CUR is used.
90 		 */
91 		start = fl->l_start;
92 		break;
93 
94 	case SEEK_END:
95 		start = size + fl->l_start;
96 		break;
97 
98 	default:
99 		return (EINVAL);
100 	}
101 	if (start < 0)
102 		return (EINVAL);
103 
104 	/*
105 	 * Avoid the common case of unlocking when inode has no locks.
106 	 */
107 	if (*head == (struct lockf *)0) {
108 		if (op != F_SETLK) {
109 			fl->l_type = F_UNLCK;
110 			return (0);
111 		}
112 	}
113 
114 	if (fl->l_len == 0)
115 		end = -1;
116 	else
117 		end = start + fl->l_len - 1;
118 	/*
119 	 * Create the lockf structure.
120 	 */
121 	MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
122 	lock->lf_start = start;
123 	lock->lf_end = end;
124 	lock->lf_id = id;
125 	lock->lf_head = head;
126 	lock->lf_type = fl->l_type;
127 	lock->lf_next = (struct lockf *)0;
128 	lock->lf_block = (struct lockf *)0;
129 	lock->lf_flags = flags;
130 	/*
131 	 * Do the requested operation.
132 	 */
133 	switch (op) {
134 
135 	case F_SETLK:
136 		return (lf_setlock(lock));
137 
138 	case F_UNLCK:
139 		error = lf_clearlock(lock);
140 		FREE(lock, M_LOCKF);
141 		return (error);
142 
143 	case F_GETLK:
144 		error = lf_getlock(lock, fl);
145 		FREE(lock, M_LOCKF);
146 		return (error);
147 
148 	default:
149 		FREE(lock, M_LOCKF);
150 		return (EINVAL);
151 	}
152 	/* NOTREACHED */
153 }
154 
155 /*
156  * Set a byte-range lock.
157  */
158 int
159 lf_setlock(lock)
160 	register struct lockf *lock;
161 {
162 	register struct lockf *block;
163 	struct lockf **head = lock->lf_head;
164 	struct lockf **prev, *overlap, *ltmp;
165 	static char lockstr[] = "lockf";
166 	int ovcase, priority, needtolink, error;
167 
168 #ifdef LOCKF_DEBUG
169 	if (lockf_debug & 1)
170 		lf_print("lf_setlock", lock);
171 #endif /* LOCKF_DEBUG */
172 
173 	/*
174 	 * Set the priority
175 	 */
176 	priority = PLOCK;
177 	if (lock->lf_type == F_WRLCK)
178 		priority += 4;
179 	priority |= PCATCH;
180 	/*
181 	 * Scan lock list for this file looking for locks that would block us.
182 	 */
183 	while ((block = lf_getblock(lock)) != NULL) {
184 		/*
185 		 * Free the structure and return if nonblocking.
186 		 */
187 		if ((lock->lf_flags & F_WAIT) == 0) {
188 			FREE(lock, M_LOCKF);
189 			return (EAGAIN);
190 		}
191 		/*
192 		 * We are blocked. Since flock style locks cover
193 		 * the whole file, there is no chance for deadlock.
194 		 * For byte-range locks we must check for deadlock.
195 		 *
196 		 * Deadlock detection is done by looking through the
197 		 * wait channels to see if there are any cycles that
198 		 * involve us. MAXDEPTH is set just to make sure we
199 		 * do not go off into neverland.
200 		 */
201 		if ((lock->lf_flags & F_POSIX) &&
202 		    (block->lf_flags & F_POSIX)) {
203 			register struct proc *wproc;
204 			register struct lockf *waitblock;
205 			int i = 0;
206 
207 			/* The block is waiting on something */
208 			wproc = (struct proc *)block->lf_id;
209 			while (wproc->p_wchan &&
210 			       (wproc->p_wmesg == lockstr) &&
211 			       (i++ < maxlockdepth)) {
212 				waitblock = (struct lockf *)wproc->p_wchan;
213 				/* Get the owner of the blocking lock */
214 				waitblock = waitblock->lf_next;
215 				if ((waitblock->lf_flags & F_POSIX) == 0)
216 					break;
217 				wproc = (struct proc *)waitblock->lf_id;
218 				if (wproc == (struct proc *)lock->lf_id) {
219 					free(lock, M_LOCKF);
220 					return (EDEADLK);
221 				}
222 			}
223 		}
224 		/*
225 		 * For flock type locks, we must first remove
226 		 * any shared locks that we hold before we sleep
227 		 * waiting for an exclusive lock.
228 		 */
229 		if ((lock->lf_flags & F_FLOCK) &&
230 		    lock->lf_type == F_WRLCK) {
231 			lock->lf_type = F_UNLCK;
232 			(void) lf_clearlock(lock);
233 			lock->lf_type = F_WRLCK;
234 		}
235 		/*
236 		 * Add our lock to the blocked list and sleep until we're free.
237 		 * Remember who blocked us (for deadlock detection).
238 		 */
239 		lock->lf_next = block;
240 		lf_addblock(block, lock);
241 #ifdef LOCKF_DEBUG
242 		if (lockf_debug & 1) {
243 			lf_print("lf_setlock: blocking on", block);
244 			lf_printlist("lf_setlock", block);
245 		}
246 #endif /* LOCKF_DEBUG */
247 		error = tsleep((caddr_t)lock, priority, lockstr, 0);
248 		if (error) {
249 			/*
250 			 * Delete ourselves from the waiting to lock list.
251 			 */
252 			for (block = lock->lf_next;
253 			     block != NOLOCKF;
254 			     block = block->lf_block) {
255 				if (block->lf_block != lock)
256 					continue;
257 				block->lf_block = block->lf_block->lf_block;
258 				break;
259 			}
260 			/*
261 			 * If we did not find ourselves on the list, but
262 			 * are still linked onto a lock list, then something
263 			 * is very wrong.
264 			 */
265 			if (block == NOLOCKF && lock->lf_next != NOLOCKF)
266 				panic("lf_setlock: lost lock");
267 			free(lock, M_LOCKF);
268 			return (error);
269 		}
270 	}
271 	/*
272 	 * No blocks!!  Add the lock.  Note that we will
273 	 * downgrade or upgrade any overlapping locks this
274 	 * process already owns.
275 	 *
276 	 * Skip over locks owned by other processes.
277 	 * Handle any locks that overlap and are owned by ourselves.
278 	 */
279 	prev = head;
280 	block = *head;
281 	needtolink = 1;
282 	for (;;) {
283 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
284 		if (ovcase)
285 			block = overlap->lf_next;
286 		/*
287 		 * Six cases:
288 		 *	0) no overlap
289 		 *	1) overlap == lock
290 		 *	2) overlap contains lock
291 		 *	3) lock contains overlap
292 		 *	4) overlap starts before lock
293 		 *	5) overlap ends after lock
294 		 */
295 		switch (ovcase) {
296 		case 0: /* no overlap */
297 			if (needtolink) {
298 				*prev = lock;
299 				lock->lf_next = overlap;
300 			}
301 			break;
302 
303 		case 1: /* overlap == lock */
304 			/*
305 			 * If downgrading lock, others may be
306 			 * able to acquire it.
307 			 */
308 			if (lock->lf_type == F_RDLCK &&
309 			    overlap->lf_type == F_WRLCK)
310 				lf_wakelock(overlap);
311 			overlap->lf_type = lock->lf_type;
312 			FREE(lock, M_LOCKF);
313 			lock = overlap; /* for debug output below */
314 			break;
315 
316 		case 2: /* overlap contains lock */
317 			/*
318 			 * Check for common starting point and different types.
319 			 */
320 			if (overlap->lf_type == lock->lf_type) {
321 				free(lock, M_LOCKF);
322 				lock = overlap; /* for debug output below */
323 				break;
324 			}
325 			if (overlap->lf_start == lock->lf_start) {
326 				*prev = lock;
327 				lock->lf_next = overlap;
328 				overlap->lf_start = lock->lf_end + 1;
329 			} else
330 				lf_split(overlap, lock);
331 			lf_wakelock(overlap);
332 			break;
333 
334 		case 3: /* lock contains overlap */
335 			/*
336 			 * If downgrading lock, others may be able to
337 			 * acquire it, otherwise take the list.
338 			 */
339 			if (lock->lf_type == F_RDLCK &&
340 			    overlap->lf_type == F_WRLCK) {
341 				lf_wakelock(overlap);
342 			} else {
343 				ltmp = lock->lf_block;
344 				lock->lf_block = overlap->lf_block;
345 				lf_addblock(lock, ltmp);
346 			}
347 			/*
348 			 * Add the new lock if necessary and delete the overlap.
349 			 */
350 			if (needtolink) {
351 				*prev = lock;
352 				lock->lf_next = overlap->lf_next;
353 				prev = &lock->lf_next;
354 				needtolink = 0;
355 			} else
356 				*prev = overlap->lf_next;
357 			free(overlap, M_LOCKF);
358 			continue;
359 
360 		case 4: /* overlap starts before lock */
361 			/*
362 			 * Add lock after overlap on the list.
363 			 */
364 			lock->lf_next = overlap->lf_next;
365 			overlap->lf_next = lock;
366 			overlap->lf_end = lock->lf_start - 1;
367 			prev = &lock->lf_next;
368 			lf_wakelock(overlap);
369 			needtolink = 0;
370 			continue;
371 
372 		case 5: /* overlap ends after lock */
373 			/*
374 			 * Add the new lock before overlap.
375 			 */
376 			if (needtolink) {
377 				*prev = lock;
378 				lock->lf_next = overlap;
379 			}
380 			overlap->lf_start = lock->lf_end + 1;
381 			lf_wakelock(overlap);
382 			break;
383 		}
384 		break;
385 	}
386 #ifdef LOCKF_DEBUG
387 	if (lockf_debug & 1) {
388 		lf_print("lf_setlock: got the lock", lock);
389 		lf_printlist("lf_setlock", lock);
390 	}
391 #endif /* LOCKF_DEBUG */
392 	return (0);
393 }
394 
395 /*
396  * Remove a byte-range lock on an inode.
397  *
398  * Generally, find the lock (or an overlap to that lock)
399  * and remove it (or shrink it), then wakeup anyone we can.
400  */
401 int
402 lf_clearlock(unlock)
403 	register struct lockf *unlock;
404 {
405 	struct lockf **head = unlock->lf_head;
406 	register struct lockf *lf = *head;
407 	struct lockf *overlap, **prev;
408 	int ovcase;
409 
410 	if (lf == NOLOCKF)
411 		return (0);
412 #ifdef LOCKF_DEBUG
413 	if (unlock->lf_type != F_UNLCK)
414 		panic("lf_clearlock: bad type");
415 	if (lockf_debug & 1)
416 		lf_print("lf_clearlock", unlock);
417 #endif /* LOCKF_DEBUG */
418 	prev = head;
419 	while ((ovcase = lf_findoverlap(lf, unlock, SELF,
420 					&prev, &overlap)) != 0) {
421 		/*
422 		 * Wakeup the list of locks to be retried.
423 		 */
424 		lf_wakelock(overlap);
425 
426 		switch (ovcase) {
427 
428 		case 1: /* overlap == lock */
429 			*prev = overlap->lf_next;
430 			FREE(overlap, M_LOCKF);
431 			break;
432 
433 		case 2: /* overlap contains lock: split it */
434 			if (overlap->lf_start == unlock->lf_start) {
435 				overlap->lf_start = unlock->lf_end + 1;
436 				break;
437 			}
438 			lf_split(overlap, unlock);
439 			overlap->lf_next = unlock->lf_next;
440 			break;
441 
442 		case 3: /* lock contains overlap */
443 			*prev = overlap->lf_next;
444 			lf = overlap->lf_next;
445 			free(overlap, M_LOCKF);
446 			continue;
447 
448 		case 4: /* overlap starts before lock */
449 			overlap->lf_end = unlock->lf_start - 1;
450 			prev = &overlap->lf_next;
451 			lf = overlap->lf_next;
452 			continue;
453 
454 		case 5: /* overlap ends after lock */
455 			overlap->lf_start = unlock->lf_end + 1;
456 			break;
457 		}
458 		break;
459 	}
460 #ifdef LOCKF_DEBUG
461 	if (lockf_debug & 1)
462 		lf_printlist("lf_clearlock", unlock);
463 #endif /* LOCKF_DEBUG */
464 	return (0);
465 }
466 
467 /*
468  * Check whether there is a blocking lock,
469  * and if so return its process identifier.
470  */
471 int
472 lf_getlock(lock, fl)
473 	register struct lockf *lock;
474 	register struct flock *fl;
475 {
476 	register struct lockf *block;
477 
478 #ifdef LOCKF_DEBUG
479 	if (lockf_debug & 1)
480 		lf_print("lf_getlock", lock);
481 #endif /* LOCKF_DEBUG */
482 
483 	if ((block = lf_getblock(lock)) != NULL) {
484 		fl->l_type = block->lf_type;
485 		fl->l_whence = SEEK_SET;
486 		fl->l_start = block->lf_start;
487 		if (block->lf_end == -1)
488 			fl->l_len = 0;
489 		else
490 			fl->l_len = block->lf_end - block->lf_start + 1;
491 		if (block->lf_flags & F_POSIX)
492 			fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
493 		else
494 			fl->l_pid = -1;
495 	} else {
496 		fl->l_type = F_UNLCK;
497 	}
498 	return (0);
499 }
500 
501 /*
502  * Walk the list of locks for an inode and
503  * return the first blocking lock.
504  */
505 struct lockf *
506 lf_getblock(lock)
507 	register struct lockf *lock;
508 {
509 	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
510 	int ovcase;
511 
512 	prev = lock->lf_head;
513 	while ((ovcase = lf_findoverlap(lf, lock, OTHERS,
514 					&prev, &overlap)) != 0) {
515 		/*
516 		 * We've found an overlap, see if it blocks us
517 		 */
518 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
519 			return (overlap);
520 		/*
521 		 * Nope, point to the next one on the list and
522 		 * see if it blocks us
523 		 */
524 		lf = overlap->lf_next;
525 	}
526 	return (NOLOCKF);
527 }
528 
529 /*
530  * Walk the list of locks for an inode to
531  * find an overlapping lock (if any).
532  *
533  * NOTE: this returns only the FIRST overlapping lock.  There
534  *	 may be more than one.
535  */
536 int
537 lf_findoverlap(lf, lock, type, prev, overlap)
538 	register struct lockf *lf;
539 	struct lockf *lock;
540 	int type;
541 	struct lockf ***prev;
542 	struct lockf **overlap;
543 {
544 	off_t start, end;
545 
546 	*overlap = lf;
547 	if (lf == NOLOCKF)
548 		return (0);
549 #ifdef LOCKF_DEBUG
550 	if (lockf_debug & 2)
551 		lf_print("lf_findoverlap: looking for overlap in", lock);
552 #endif /* LOCKF_DEBUG */
553 	start = lock->lf_start;
554 	end = lock->lf_end;
555 	while (lf != NOLOCKF) {
556 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
557 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
558 			*prev = &lf->lf_next;
559 			*overlap = lf = lf->lf_next;
560 			continue;
561 		}
562 #ifdef LOCKF_DEBUG
563 		if (lockf_debug & 2)
564 			lf_print("\tchecking", lf);
565 #endif /* LOCKF_DEBUG */
566 		/*
567 		 * OK, check for overlap
568 		 *
569 		 * Six cases:
570 		 *	0) no overlap
571 		 *	1) overlap == lock
572 		 *	2) overlap contains lock
573 		 *	3) lock contains overlap
574 		 *	4) overlap starts before lock
575 		 *	5) overlap ends after lock
576 		 */
577 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
578 		    (end != -1 && lf->lf_start > end)) {
579 			/* Case 0 */
580 #ifdef LOCKF_DEBUG
581 			if (lockf_debug & 2)
582 				printf("no overlap\n");
583 #endif /* LOCKF_DEBUG */
584 			if ((type & SELF) && end != -1 && lf->lf_start > end)
585 				return (0);
586 			*prev = &lf->lf_next;
587 			*overlap = lf = lf->lf_next;
588 			continue;
589 		}
590 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
591 			/* Case 1 */
592 #ifdef LOCKF_DEBUG
593 			if (lockf_debug & 2)
594 				printf("overlap == lock\n");
595 #endif /* LOCKF_DEBUG */
596 			return (1);
597 		}
598 		if ((lf->lf_start <= start) &&
599 		    (end != -1) &&
600 		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
601 			/* Case 2 */
602 #ifdef LOCKF_DEBUG
603 			if (lockf_debug & 2)
604 				printf("overlap contains lock\n");
605 #endif /* LOCKF_DEBUG */
606 			return (2);
607 		}
608 		if (start <= lf->lf_start &&
609 		           (end == -1 ||
610 			   (lf->lf_end != -1 && end >= lf->lf_end))) {
611 			/* Case 3 */
612 #ifdef LOCKF_DEBUG
613 			if (lockf_debug & 2)
614 				printf("lock contains overlap\n");
615 #endif /* LOCKF_DEBUG */
616 			return (3);
617 		}
618 		if ((lf->lf_start < start) &&
619 			((lf->lf_end >= start) || (lf->lf_end == -1))) {
620 			/* Case 4 */
621 #ifdef LOCKF_DEBUG
622 			if (lockf_debug & 2)
623 				printf("overlap starts before lock\n");
624 #endif /* LOCKF_DEBUG */
625 			return (4);
626 		}
627 		if ((lf->lf_start > start) &&
628 			(end != -1) &&
629 			((lf->lf_end > end) || (lf->lf_end == -1))) {
630 			/* Case 5 */
631 #ifdef LOCKF_DEBUG
632 			if (lockf_debug & 2)
633 				printf("overlap ends after lock\n");
634 #endif /* LOCKF_DEBUG */
635 			return (5);
636 		}
637 		panic("lf_findoverlap: default");
638 	}
639 	return (0);
640 }
641 
642 /*
643  * Add a lock to the end of the blocked list.
644  */
645 void
646 lf_addblock(lock, blocked)
647 	struct lockf *lock;
648 	struct lockf *blocked;
649 {
650 	register struct lockf *lf;
651 
652 	if (blocked == NOLOCKF)
653 		return;
654 #ifdef LOCKF_DEBUG
655 	if (lockf_debug & 2) {
656 		lf_print("addblock: adding", blocked);
657 		lf_print("to blocked list of", lock);
658 	}
659 #endif /* LOCKF_DEBUG */
660 	if ((lf = lock->lf_block) == NOLOCKF) {
661 		lock->lf_block = blocked;
662 		return;
663 	}
664 	while (lf->lf_block != NOLOCKF)
665 		lf = lf->lf_block;
666 	lf->lf_block = blocked;
667 	return;
668 }
669 
670 /*
671  * Split a lock and a contained region into
672  * two or three locks as necessary.
673  */
674 void
675 lf_split(lock1, lock2)
676 	register struct lockf *lock1;
677 	register struct lockf *lock2;
678 {
679 	register struct lockf *splitlock;
680 
681 #ifdef LOCKF_DEBUG
682 	if (lockf_debug & 2) {
683 		lf_print("lf_split", lock1);
684 		lf_print("splitting from", lock2);
685 	}
686 #endif /* LOCKF_DEBUG */
687 	/*
688 	 * Check to see if spliting into only two pieces.
689 	 */
690 	if (lock1->lf_start == lock2->lf_start) {
691 		lock1->lf_start = lock2->lf_end + 1;
692 		lock2->lf_next = lock1;
693 		return;
694 	}
695 	if (lock1->lf_end == lock2->lf_end) {
696 		lock1->lf_end = lock2->lf_start - 1;
697 		lock2->lf_next = lock1->lf_next;
698 		lock1->lf_next = lock2;
699 		return;
700 	}
701 	/*
702 	 * Make a new lock consisting of the last part of
703 	 * the encompassing lock
704 	 */
705 	MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
706 	bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
707 	splitlock->lf_start = lock2->lf_end + 1;
708 	splitlock->lf_block = NOLOCKF;
709 	lock1->lf_end = lock2->lf_start - 1;
710 	/*
711 	 * OK, now link it in
712 	 */
713 	splitlock->lf_next = lock1->lf_next;
714 	lock2->lf_next = splitlock;
715 	lock1->lf_next = lock2;
716 }
717 
718 /*
719  * Wakeup a blocklist
720  */
721 void
722 lf_wakelock(listhead)
723 	struct lockf *listhead;
724 {
725 	register struct lockf *blocklist, *wakelock;
726 
727 	blocklist = listhead->lf_block;
728 	listhead->lf_block = NOLOCKF;
729 	while (blocklist != NOLOCKF) {
730 		wakelock = blocklist;
731 		blocklist = blocklist->lf_block;
732 		wakelock->lf_block = NOLOCKF;
733 		wakelock->lf_next = NOLOCKF;
734 #ifdef LOCKF_DEBUG
735 		if (lockf_debug & 2)
736 			lf_print("lf_wakelock: awakening", wakelock);
737 #endif /* LOCKF_DEBUG */
738 		wakeup((caddr_t)wakelock);
739 	}
740 }
741 
742 #ifdef LOCKF_DEBUG
743 /*
744  * Print out a lock.
745  */
746 void
747 lf_print(tag, lock)
748 	char *tag;
749 	register struct lockf *lock;
750 {
751 
752 	printf("%s: lock %p for ", tag, lock);
753 	if (lock->lf_flags & F_POSIX)
754 		printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
755 	else
756 		printf("id 0x%p", lock->lf_id);
757 	printf(" %s, start %qx, end %qx",
758 		lock->lf_type == F_RDLCK ? "shared" :
759 		lock->lf_type == F_WRLCK ? "exclusive" :
760 		lock->lf_type == F_UNLCK ? "unlock" :
761 		"unknown", lock->lf_start, lock->lf_end);
762 	if (lock->lf_block)
763 		printf(" block %p\n", lock->lf_block);
764 	else
765 		printf("\n");
766 }
767 
768 void
769 lf_printlist(tag, lock)
770 	char *tag;
771 	struct lockf *lock;
772 {
773 	register struct lockf *lf;
774 
775 	printf("%s: Lock list:\n", tag);
776 	for (lf = lock; lf; lf = lf->lf_block) {
777 		printf("\tlock %p for ", lf);
778 		if (lf->lf_flags & F_POSIX)
779 			printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid);
780 		else
781 			printf("id 0x%p", lf->lf_id);
782 		printf(", %s, start %qx, end %qx",
783 			lf->lf_type == F_RDLCK ? "shared" :
784 			lf->lf_type == F_WRLCK ? "exclusive" :
785 			lf->lf_type == F_UNLCK ? "unlock" :
786 			"unknown", lf->lf_start, lf->lf_end);
787 		if (lf->lf_block)
788 			printf(" block %p\n", lf->lf_block);
789 		else
790 			printf("\n");
791 	}
792 }
793 #endif /* LOCKF_DEBUG */
794