xref: /netbsd-src/sys/kern/vfs_lockf.c (revision 76dfffe33547c37f8bdd446e3e4ab0f3c16cea4b)
1 /*	$NetBSD: vfs_lockf.c,v 1.9 1996/10/13 02:32:51 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Scooter Morris at Genentech Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
39  */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/file.h>
45 #include <sys/proc.h>
46 #include <sys/vnode.h>
47 #include <sys/malloc.h>
48 #include <sys/fcntl.h>
49 #include <sys/lockf.h>
50 
51 /*
52  * This variable controls the maximum number of processes that will
53  * be checked in doing deadlock detection.
54  */
55 int maxlockdepth = MAXDEPTH;
56 
57 #ifdef LOCKF_DEBUG
58 int	lockf_debug = 0;
59 #endif
60 
61 #define NOLOCKF (struct lockf *)0
62 #define SELF	0x1
63 #define OTHERS	0x2
64 
65 /*
66  * Do an advisory lock operation.
67  */
68 int
69 lf_advlock(head, size, id, op, fl, flags)
70 	struct lockf **head;
71 	off_t size;
72 	caddr_t id;
73 	int op;
74 	register struct flock *fl;
75 	int flags;
76 {
77 	register struct lockf *lock;
78 	off_t start, end;
79 	int error;
80 
81 	/*
82 	 * Avoid the common case of unlocking when inode has no locks.
83 	 */
84 	if (*head == (struct lockf *)0) {
85 		if (op != F_SETLK) {
86 			fl->l_type = F_UNLCK;
87 			return (0);
88 		}
89 	}
90 	/*
91 	 * Convert the flock structure into a start and end.
92 	 */
93 	switch (fl->l_whence) {
94 
95 	case SEEK_SET:
96 	case SEEK_CUR:
97 		/*
98 		 * Caller is responsible for adding any necessary offset
99 		 * when SEEK_CUR is used.
100 		 */
101 		start = fl->l_start;
102 		break;
103 
104 	case SEEK_END:
105 		start = size + fl->l_start;
106 		break;
107 
108 	default:
109 		return (EINVAL);
110 	}
111 	if (start < 0)
112 		return (EINVAL);
113 	if (fl->l_len == 0)
114 		end = -1;
115 	else
116 		end = start + fl->l_len - 1;
117 	/*
118 	 * Create the lockf structure.
119 	 */
120 	MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
121 	lock->lf_start = start;
122 	lock->lf_end = end;
123 	lock->lf_id = id;
124 	lock->lf_head = head;
125 	lock->lf_type = fl->l_type;
126 	lock->lf_next = (struct lockf *)0;
127 	lock->lf_block = (struct lockf *)0;
128 	lock->lf_flags = flags;
129 	/*
130 	 * Do the requested operation.
131 	 */
132 	switch (op) {
133 
134 	case F_SETLK:
135 		return (lf_setlock(lock));
136 
137 	case F_UNLCK:
138 		error = lf_clearlock(lock);
139 		FREE(lock, M_LOCKF);
140 		return (error);
141 
142 	case F_GETLK:
143 		error = lf_getlock(lock, fl);
144 		FREE(lock, M_LOCKF);
145 		return (error);
146 
147 	default:
148 		FREE(lock, M_LOCKF);
149 		return (EINVAL);
150 	}
151 	/* NOTREACHED */
152 }
153 
154 /*
155  * Set a byte-range lock.
156  */
157 int
158 lf_setlock(lock)
159 	register struct lockf *lock;
160 {
161 	register struct lockf *block;
162 	struct lockf **head = lock->lf_head;
163 	struct lockf **prev, *overlap, *ltmp;
164 	static char lockstr[] = "lockf";
165 	int ovcase, priority, needtolink, error;
166 
167 #ifdef LOCKF_DEBUG
168 	if (lockf_debug & 1)
169 		lf_print("lf_setlock", lock);
170 #endif /* LOCKF_DEBUG */
171 
172 	/*
173 	 * Set the priority
174 	 */
175 	priority = PLOCK;
176 	if (lock->lf_type == F_WRLCK)
177 		priority += 4;
178 	priority |= PCATCH;
179 	/*
180 	 * Scan lock list for this file looking for locks that would block us.
181 	 */
182 	while ((block = lf_getblock(lock)) != NULL) {
183 		/*
184 		 * Free the structure and return if nonblocking.
185 		 */
186 		if ((lock->lf_flags & F_WAIT) == 0) {
187 			FREE(lock, M_LOCKF);
188 			return (EAGAIN);
189 		}
190 		/*
191 		 * We are blocked. Since flock style locks cover
192 		 * the whole file, there is no chance for deadlock.
193 		 * For byte-range locks we must check for deadlock.
194 		 *
195 		 * Deadlock detection is done by looking through the
196 		 * wait channels to see if there are any cycles that
197 		 * involve us. MAXDEPTH is set just to make sure we
198 		 * do not go off into neverland.
199 		 */
200 		if ((lock->lf_flags & F_POSIX) &&
201 		    (block->lf_flags & F_POSIX)) {
202 			register struct proc *wproc;
203 			register struct lockf *waitblock;
204 			int i = 0;
205 
206 			/* The block is waiting on something */
207 			wproc = (struct proc *)block->lf_id;
208 			while (wproc->p_wchan &&
209 			       (wproc->p_wmesg == lockstr) &&
210 			       (i++ < maxlockdepth)) {
211 				waitblock = (struct lockf *)wproc->p_wchan;
212 				/* Get the owner of the blocking lock */
213 				waitblock = waitblock->lf_next;
214 				if ((waitblock->lf_flags & F_POSIX) == 0)
215 					break;
216 				wproc = (struct proc *)waitblock->lf_id;
217 				if (wproc == (struct proc *)lock->lf_id) {
218 					free(lock, M_LOCKF);
219 					return (EDEADLK);
220 				}
221 			}
222 		}
223 		/*
224 		 * For flock type locks, we must first remove
225 		 * any shared locks that we hold before we sleep
226 		 * waiting for an exclusive lock.
227 		 */
228 		if ((lock->lf_flags & F_FLOCK) &&
229 		    lock->lf_type == F_WRLCK) {
230 			lock->lf_type = F_UNLCK;
231 			(void) lf_clearlock(lock);
232 			lock->lf_type = F_WRLCK;
233 		}
234 		/*
235 		 * Add our lock to the blocked list and sleep until we're free.
236 		 * Remember who blocked us (for deadlock detection).
237 		 */
238 		lock->lf_next = block;
239 		lf_addblock(block, lock);
240 #ifdef LOCKF_DEBUG
241 		if (lockf_debug & 1) {
242 			lf_print("lf_setlock: blocking on", block);
243 			lf_printlist("lf_setlock", block);
244 		}
245 #endif /* LOCKF_DEBUG */
246 		error = tsleep((caddr_t)lock, priority, lockstr, 0);
247 		if (error) {
248 			/*
249 			 * Delete ourselves from the waiting to lock list.
250 			 */
251 			for (block = lock->lf_next;
252 			     block != NOLOCKF;
253 			     block = block->lf_block) {
254 				if (block->lf_block != lock)
255 					continue;
256 				block->lf_block = block->lf_block->lf_block;
257 				break;
258 			}
259 			/*
260 			 * If we did not find ourselves on the list, but
261 			 * are still linked onto a lock list, then something
262 			 * is very wrong.
263 			 */
264 			if (block == NOLOCKF && lock->lf_next != NOLOCKF)
265 				panic("lf_setlock: lost lock");
266 			free(lock, M_LOCKF);
267 			return (error);
268 		}
269 	}
270 	/*
271 	 * No blocks!!  Add the lock.  Note that we will
272 	 * downgrade or upgrade any overlapping locks this
273 	 * process already owns.
274 	 *
275 	 * Skip over locks owned by other processes.
276 	 * Handle any locks that overlap and are owned by ourselves.
277 	 */
278 	prev = head;
279 	block = *head;
280 	needtolink = 1;
281 	for (;;) {
282 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
283 		if (ovcase)
284 			block = overlap->lf_next;
285 		/*
286 		 * Six cases:
287 		 *	0) no overlap
288 		 *	1) overlap == lock
289 		 *	2) overlap contains lock
290 		 *	3) lock contains overlap
291 		 *	4) overlap starts before lock
292 		 *	5) overlap ends after lock
293 		 */
294 		switch (ovcase) {
295 		case 0: /* no overlap */
296 			if (needtolink) {
297 				*prev = lock;
298 				lock->lf_next = overlap;
299 			}
300 			break;
301 
302 		case 1: /* overlap == lock */
303 			/*
304 			 * If downgrading lock, others may be
305 			 * able to acquire it.
306 			 */
307 			if (lock->lf_type == F_RDLCK &&
308 			    overlap->lf_type == F_WRLCK)
309 				lf_wakelock(overlap);
310 			overlap->lf_type = lock->lf_type;
311 			FREE(lock, M_LOCKF);
312 			lock = overlap; /* for debug output below */
313 			break;
314 
315 		case 2: /* overlap contains lock */
316 			/*
317 			 * Check for common starting point and different types.
318 			 */
319 			if (overlap->lf_type == lock->lf_type) {
320 				free(lock, M_LOCKF);
321 				lock = overlap; /* for debug output below */
322 				break;
323 			}
324 			if (overlap->lf_start == lock->lf_start) {
325 				*prev = lock;
326 				lock->lf_next = overlap;
327 				overlap->lf_start = lock->lf_end + 1;
328 			} else
329 				lf_split(overlap, lock);
330 			lf_wakelock(overlap);
331 			break;
332 
333 		case 3: /* lock contains overlap */
334 			/*
335 			 * If downgrading lock, others may be able to
336 			 * acquire it, otherwise take the list.
337 			 */
338 			if (lock->lf_type == F_RDLCK &&
339 			    overlap->lf_type == F_WRLCK) {
340 				lf_wakelock(overlap);
341 			} else {
342 				ltmp = lock->lf_block;
343 				lock->lf_block = overlap->lf_block;
344 				lf_addblock(lock, ltmp);
345 			}
346 			/*
347 			 * Add the new lock if necessary and delete the overlap.
348 			 */
349 			if (needtolink) {
350 				*prev = lock;
351 				lock->lf_next = overlap->lf_next;
352 				prev = &lock->lf_next;
353 				needtolink = 0;
354 			} else
355 				*prev = overlap->lf_next;
356 			free(overlap, M_LOCKF);
357 			continue;
358 
359 		case 4: /* overlap starts before lock */
360 			/*
361 			 * Add lock after overlap on the list.
362 			 */
363 			lock->lf_next = overlap->lf_next;
364 			overlap->lf_next = lock;
365 			overlap->lf_end = lock->lf_start - 1;
366 			prev = &lock->lf_next;
367 			lf_wakelock(overlap);
368 			needtolink = 0;
369 			continue;
370 
371 		case 5: /* overlap ends after lock */
372 			/*
373 			 * Add the new lock before overlap.
374 			 */
375 			if (needtolink) {
376 				*prev = lock;
377 				lock->lf_next = overlap;
378 			}
379 			overlap->lf_start = lock->lf_end + 1;
380 			lf_wakelock(overlap);
381 			break;
382 		}
383 		break;
384 	}
385 #ifdef LOCKF_DEBUG
386 	if (lockf_debug & 1) {
387 		lf_print("lf_setlock: got the lock", lock);
388 		lf_printlist("lf_setlock", lock);
389 	}
390 #endif /* LOCKF_DEBUG */
391 	return (0);
392 }
393 
394 /*
395  * Remove a byte-range lock on an inode.
396  *
397  * Generally, find the lock (or an overlap to that lock)
398  * and remove it (or shrink it), then wakeup anyone we can.
399  */
400 int
401 lf_clearlock(unlock)
402 	register struct lockf *unlock;
403 {
404 	struct lockf **head = unlock->lf_head;
405 	register struct lockf *lf = *head;
406 	struct lockf *overlap, **prev;
407 	int ovcase;
408 
409 	if (lf == NOLOCKF)
410 		return (0);
411 #ifdef LOCKF_DEBUG
412 	if (unlock->lf_type != F_UNLCK)
413 		panic("lf_clearlock: bad type");
414 	if (lockf_debug & 1)
415 		lf_print("lf_clearlock", unlock);
416 #endif /* LOCKF_DEBUG */
417 	prev = head;
418 	while ((ovcase = lf_findoverlap(lf, unlock, SELF,
419 					&prev, &overlap)) != 0) {
420 		/*
421 		 * Wakeup the list of locks to be retried.
422 		 */
423 		lf_wakelock(overlap);
424 
425 		switch (ovcase) {
426 
427 		case 1: /* overlap == lock */
428 			*prev = overlap->lf_next;
429 			FREE(overlap, M_LOCKF);
430 			break;
431 
432 		case 2: /* overlap contains lock: split it */
433 			if (overlap->lf_start == unlock->lf_start) {
434 				overlap->lf_start = unlock->lf_end + 1;
435 				break;
436 			}
437 			lf_split(overlap, unlock);
438 			overlap->lf_next = unlock->lf_next;
439 			break;
440 
441 		case 3: /* lock contains overlap */
442 			*prev = overlap->lf_next;
443 			lf = overlap->lf_next;
444 			free(overlap, M_LOCKF);
445 			continue;
446 
447 		case 4: /* overlap starts before lock */
448 			overlap->lf_end = unlock->lf_start - 1;
449 			prev = &overlap->lf_next;
450 			lf = overlap->lf_next;
451 			continue;
452 
453 		case 5: /* overlap ends after lock */
454 			overlap->lf_start = unlock->lf_end + 1;
455 			break;
456 		}
457 		break;
458 	}
459 #ifdef LOCKF_DEBUG
460 	if (lockf_debug & 1)
461 		lf_printlist("lf_clearlock", unlock);
462 #endif /* LOCKF_DEBUG */
463 	return (0);
464 }
465 
466 /*
467  * Check whether there is a blocking lock,
468  * and if so return its process identifier.
469  */
470 int
471 lf_getlock(lock, fl)
472 	register struct lockf *lock;
473 	register struct flock *fl;
474 {
475 	register struct lockf *block;
476 
477 #ifdef LOCKF_DEBUG
478 	if (lockf_debug & 1)
479 		lf_print("lf_getlock", lock);
480 #endif /* LOCKF_DEBUG */
481 
482 	if ((block = lf_getblock(lock)) != NULL) {
483 		fl->l_type = block->lf_type;
484 		fl->l_whence = SEEK_SET;
485 		fl->l_start = block->lf_start;
486 		if (block->lf_end == -1)
487 			fl->l_len = 0;
488 		else
489 			fl->l_len = block->lf_end - block->lf_start + 1;
490 		if (block->lf_flags & F_POSIX)
491 			fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
492 		else
493 			fl->l_pid = -1;
494 	} else {
495 		fl->l_type = F_UNLCK;
496 	}
497 	return (0);
498 }
499 
500 /*
501  * Walk the list of locks for an inode and
502  * return the first blocking lock.
503  */
504 struct lockf *
505 lf_getblock(lock)
506 	register struct lockf *lock;
507 {
508 	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
509 	int ovcase;
510 
511 	prev = lock->lf_head;
512 	while ((ovcase = lf_findoverlap(lf, lock, OTHERS,
513 					&prev, &overlap)) != 0) {
514 		/*
515 		 * We've found an overlap, see if it blocks us
516 		 */
517 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
518 			return (overlap);
519 		/*
520 		 * Nope, point to the next one on the list and
521 		 * see if it blocks us
522 		 */
523 		lf = overlap->lf_next;
524 	}
525 	return (NOLOCKF);
526 }
527 
528 /*
529  * Walk the list of locks for an inode to
530  * find an overlapping lock (if any).
531  *
532  * NOTE: this returns only the FIRST overlapping lock.  There
533  *	 may be more than one.
534  */
535 int
536 lf_findoverlap(lf, lock, type, prev, overlap)
537 	register struct lockf *lf;
538 	struct lockf *lock;
539 	int type;
540 	struct lockf ***prev;
541 	struct lockf **overlap;
542 {
543 	off_t start, end;
544 
545 	*overlap = lf;
546 	if (lf == NOLOCKF)
547 		return (0);
548 #ifdef LOCKF_DEBUG
549 	if (lockf_debug & 2)
550 		lf_print("lf_findoverlap: looking for overlap in", lock);
551 #endif /* LOCKF_DEBUG */
552 	start = lock->lf_start;
553 	end = lock->lf_end;
554 	while (lf != NOLOCKF) {
555 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
556 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
557 			*prev = &lf->lf_next;
558 			*overlap = lf = lf->lf_next;
559 			continue;
560 		}
561 #ifdef LOCKF_DEBUG
562 		if (lockf_debug & 2)
563 			lf_print("\tchecking", lf);
564 #endif /* LOCKF_DEBUG */
565 		/*
566 		 * OK, check for overlap
567 		 *
568 		 * Six cases:
569 		 *	0) no overlap
570 		 *	1) overlap == lock
571 		 *	2) overlap contains lock
572 		 *	3) lock contains overlap
573 		 *	4) overlap starts before lock
574 		 *	5) overlap ends after lock
575 		 */
576 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
577 		    (end != -1 && lf->lf_start > end)) {
578 			/* Case 0 */
579 #ifdef LOCKF_DEBUG
580 			if (lockf_debug & 2)
581 				printf("no overlap\n");
582 #endif /* LOCKF_DEBUG */
583 			if ((type & SELF) && end != -1 && lf->lf_start > end)
584 				return (0);
585 			*prev = &lf->lf_next;
586 			*overlap = lf = lf->lf_next;
587 			continue;
588 		}
589 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
590 			/* Case 1 */
591 #ifdef LOCKF_DEBUG
592 			if (lockf_debug & 2)
593 				printf("overlap == lock\n");
594 #endif /* LOCKF_DEBUG */
595 			return (1);
596 		}
597 		if ((lf->lf_start <= start) &&
598 		    (end != -1) &&
599 		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
600 			/* Case 2 */
601 #ifdef LOCKF_DEBUG
602 			if (lockf_debug & 2)
603 				printf("overlap contains lock\n");
604 #endif /* LOCKF_DEBUG */
605 			return (2);
606 		}
607 		if (start <= lf->lf_start &&
608 		           (end == -1 ||
609 			   (lf->lf_end != -1 && end >= lf->lf_end))) {
610 			/* Case 3 */
611 #ifdef LOCKF_DEBUG
612 			if (lockf_debug & 2)
613 				printf("lock contains overlap\n");
614 #endif /* LOCKF_DEBUG */
615 			return (3);
616 		}
617 		if ((lf->lf_start < start) &&
618 			((lf->lf_end >= start) || (lf->lf_end == -1))) {
619 			/* Case 4 */
620 #ifdef LOCKF_DEBUG
621 			if (lockf_debug & 2)
622 				printf("overlap starts before lock\n");
623 #endif /* LOCKF_DEBUG */
624 			return (4);
625 		}
626 		if ((lf->lf_start > start) &&
627 			(end != -1) &&
628 			((lf->lf_end > end) || (lf->lf_end == -1))) {
629 			/* Case 5 */
630 #ifdef LOCKF_DEBUG
631 			if (lockf_debug & 2)
632 				printf("overlap ends after lock\n");
633 #endif /* LOCKF_DEBUG */
634 			return (5);
635 		}
636 		panic("lf_findoverlap: default");
637 	}
638 	return (0);
639 }
640 
641 /*
642  * Add a lock to the end of the blocked list.
643  */
644 void
645 lf_addblock(lock, blocked)
646 	struct lockf *lock;
647 	struct lockf *blocked;
648 {
649 	register struct lockf *lf;
650 
651 	if (blocked == NOLOCKF)
652 		return;
653 #ifdef LOCKF_DEBUG
654 	if (lockf_debug & 2) {
655 		lf_print("addblock: adding", blocked);
656 		lf_print("to blocked list of", lock);
657 	}
658 #endif /* LOCKF_DEBUG */
659 	if ((lf = lock->lf_block) == NOLOCKF) {
660 		lock->lf_block = blocked;
661 		return;
662 	}
663 	while (lf->lf_block != NOLOCKF)
664 		lf = lf->lf_block;
665 	lf->lf_block = blocked;
666 	return;
667 }
668 
669 /*
670  * Split a lock and a contained region into
671  * two or three locks as necessary.
672  */
673 void
674 lf_split(lock1, lock2)
675 	register struct lockf *lock1;
676 	register struct lockf *lock2;
677 {
678 	register struct lockf *splitlock;
679 
680 #ifdef LOCKF_DEBUG
681 	if (lockf_debug & 2) {
682 		lf_print("lf_split", lock1);
683 		lf_print("splitting from", lock2);
684 	}
685 #endif /* LOCKF_DEBUG */
686 	/*
687 	 * Check to see if spliting into only two pieces.
688 	 */
689 	if (lock1->lf_start == lock2->lf_start) {
690 		lock1->lf_start = lock2->lf_end + 1;
691 		lock2->lf_next = lock1;
692 		return;
693 	}
694 	if (lock1->lf_end == lock2->lf_end) {
695 		lock1->lf_end = lock2->lf_start - 1;
696 		lock2->lf_next = lock1->lf_next;
697 		lock1->lf_next = lock2;
698 		return;
699 	}
700 	/*
701 	 * Make a new lock consisting of the last part of
702 	 * the encompassing lock
703 	 */
704 	MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
705 	bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
706 	splitlock->lf_start = lock2->lf_end + 1;
707 	splitlock->lf_block = NOLOCKF;
708 	lock1->lf_end = lock2->lf_start - 1;
709 	/*
710 	 * OK, now link it in
711 	 */
712 	splitlock->lf_next = lock1->lf_next;
713 	lock2->lf_next = splitlock;
714 	lock1->lf_next = lock2;
715 }
716 
717 /*
718  * Wakeup a blocklist
719  */
720 void
721 lf_wakelock(listhead)
722 	struct lockf *listhead;
723 {
724 	register struct lockf *blocklist, *wakelock;
725 
726 	blocklist = listhead->lf_block;
727 	listhead->lf_block = NOLOCKF;
728 	while (blocklist != NOLOCKF) {
729 		wakelock = blocklist;
730 		blocklist = blocklist->lf_block;
731 		wakelock->lf_block = NOLOCKF;
732 		wakelock->lf_next = NOLOCKF;
733 #ifdef LOCKF_DEBUG
734 		if (lockf_debug & 2)
735 			lf_print("lf_wakelock: awakening", wakelock);
736 #endif /* LOCKF_DEBUG */
737 		wakeup((caddr_t)wakelock);
738 	}
739 }
740 
741 #ifdef LOCKF_DEBUG
742 /*
743  * Print out a lock.
744  */
745 void
746 lf_print(tag, lock)
747 	char *tag;
748 	register struct lockf *lock;
749 {
750 
751 	printf("%s: lock %p for ", tag, lock);
752 	if (lock->lf_flags & F_POSIX)
753 		printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
754 	else
755 		printf("id 0x%x", lock->lf_id);
756 	printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d",
757 		lock->lf_inode->i_number,
758 		major(lock->lf_inode->i_dev),
759 		minor(lock->lf_inode->i_dev),
760 		lock->lf_type == F_RDLCK ? "shared" :
761 		lock->lf_type == F_WRLCK ? "exclusive" :
762 		lock->lf_type == F_UNLCK ? "unlock" :
763 		"unknown", lock->lf_start, lock->lf_end);
764 	if (lock->lf_block)
765 		printf(" block %p\n", lock->lf_block);
766 	else
767 		printf("\n");
768 }
769 
770 void
771 lf_printlist(tag, lock)
772 	char *tag;
773 	struct lockf *lock;
774 {
775 	register struct lockf *lf;
776 
777 	printf("%s: Lock list for ino %d on dev <%d, %d>:\n",
778 		tag, lock->lf_inode->i_number,
779 		major(lock->lf_inode->i_dev),
780 		minor(lock->lf_inode->i_dev));
781 	for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
782 		printf("\tlock %p for ", lf);
783 		if (lf->lf_flags & F_POSIX)
784 			printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid);
785 		else
786 			printf("id 0x%x", lf->lf_id);
787 		printf(", %s, start %d, end %d",
788 			lf->lf_type == F_RDLCK ? "shared" :
789 			lf->lf_type == F_WRLCK ? "exclusive" :
790 			lf->lf_type == F_UNLCK ? "unlock" :
791 			"unknown", lf->lf_start, lf->lf_end);
792 		if (lf->lf_block)
793 			printf(" block %p\n", lf->lf_block);
794 		else
795 			printf("\n");
796 	}
797 }
798 #endif /* LOCKF_DEBUG */
799