xref: /csrg-svn/sys/ufs/lfs/lfs_subr.c (revision 57064)
151500Sbostic /*
251503Sbostic  * Copyright (c) 1991 Regents of the University of California.
351500Sbostic  * All rights reserved.
451500Sbostic  *
551500Sbostic  * %sccs.include.redist.c%
651500Sbostic  *
7*57064Smargo  *	@(#)lfs_subr.c	7.14 (Berkeley) 12/10/92
851500Sbostic  */
951500Sbostic 
1051500Sbostic #include <sys/param.h>
1151500Sbostic #include <sys/namei.h>
1251500Sbostic #include <sys/vnode.h>
1351500Sbostic #include <sys/buf.h>
1454264Sbostic #include <sys/mount.h>
15*57064Smargo #include <sys/malloc.h>
16*57064Smargo #include <sys/proc.h>
1751500Sbostic 
1851500Sbostic #include <ufs/ufs/quota.h>
1951500Sbostic #include <ufs/ufs/inode.h>
2051500Sbostic #include <ufs/lfs/lfs.h>
2151500Sbostic #include <ufs/lfs/lfs_extern.h>
2251500Sbostic 
2351500Sbostic /*
2451500Sbostic  * Return buffer with the contents of block "offset" from the beginning of
2551500Sbostic  * directory "ip".  If "res" is non-zero, fill it in with a pointer to the
2651500Sbostic  * remaining space in the directory.
2751500Sbostic  */
2851500Sbostic int
2954691Sbostic lfs_blkatoff(ap)
3054691Sbostic 	struct vop_blkatoff_args /* {
3154691Sbostic 		struct vnode *a_vp;
3254691Sbostic 		off_t a_offset;
3354691Sbostic 		char **a_res;
3454691Sbostic 		struct buf **a_bpp;
3554691Sbostic 	} */ *ap;
3651500Sbostic {
3751500Sbostic 	register struct lfs *fs;
3851557Smckusick 	struct inode *ip;
3951500Sbostic 	struct buf *bp;
4051500Sbostic 	daddr_t lbn;
4151500Sbostic 	int bsize, error;
4251500Sbostic 
4353592Sheideman 	ip = VTOI(ap->a_vp);
4451500Sbostic 	fs = ip->i_lfs;
4553592Sheideman 	lbn = lblkno(fs, ap->a_offset);
4651500Sbostic 	bsize = blksize(fs);
4751500Sbostic 
4853592Sheideman 	*ap->a_bpp = NULL;
4953592Sheideman 	if (error = bread(ap->a_vp, lbn, bsize, NOCRED, &bp)) {
5051500Sbostic 		brelse(bp);
5151500Sbostic 		return (error);
5251500Sbostic 	}
5353592Sheideman 	if (ap->a_res)
5453592Sheideman 		*ap->a_res = bp->b_un.b_addr + blkoff(fs, ap->a_offset);
5553592Sheideman 	*ap->a_bpp = bp;
5651500Sbostic 	return (0);
5751500Sbostic }
5854264Sbostic 
59*57064Smargo 
6054691Sbostic /*
6154691Sbostic  * lfs_seglock --
6254691Sbostic  *	Single thread the segment writer.
6354691Sbostic  */
6454691Sbostic void
65*57064Smargo lfs_seglock(fs, flags)
6654691Sbostic 	struct lfs *fs;
67*57064Smargo 	unsigned long flags;
6854264Sbostic {
69*57064Smargo 	struct segment *sp;
70*57064Smargo 	int s;
71*57064Smargo 
72*57064Smargo 	if (fs->lfs_seglock)
73*57064Smargo 		if (fs->lfs_lockpid == curproc->p_pid) {
74*57064Smargo 			++fs->lfs_seglock;
75*57064Smargo 			fs->lfs_sp->seg_flags |= flags;
76*57064Smargo 			return;
77*57064Smargo 		} else while (fs->lfs_seglock)
78*57064Smargo 			(void)tsleep(&fs->lfs_seglock, PRIBIO + 1,
79*57064Smargo 			    "lfs seglock", 0);
80*57064Smargo 
8154691Sbostic 	fs->lfs_seglock = 1;
82*57064Smargo 	fs->lfs_lockpid = curproc->p_pid;
83*57064Smargo 
84*57064Smargo 	sp = fs->lfs_sp = malloc(sizeof(struct segment), M_SEGMENT, M_WAITOK);
85*57064Smargo 	sp->bpp = malloc(((LFS_SUMMARY_SIZE - sizeof(SEGSUM)) /
86*57064Smargo 	    sizeof(daddr_t) + 1) * sizeof(struct buf *), M_SEGMENT, M_WAITOK);
87*57064Smargo 	sp->seg_flags = flags;
88*57064Smargo 	sp->vp = NULL;
89*57064Smargo 	(void) lfs_initseg(fs);
90*57064Smargo 
91*57064Smargo 	/*
92*57064Smargo 	 * Keep a cumulative count of the outstanding I/O operations.  If the
93*57064Smargo 	 * disk drive catches up with us it could go to zero before we finish,
94*57064Smargo 	 * so we artificially increment it by one until we've scheduled all of
95*57064Smargo 	 * the writes we intend to do.
96*57064Smargo 	 */
97*57064Smargo 	s = splbio();
98*57064Smargo 	++fs->lfs_iocount;
99*57064Smargo 	splx(s);
10054264Sbostic }
10154264Sbostic /*
10254691Sbostic  * lfs_segunlock --
10354691Sbostic  *	Single thread the segment writer.
10454264Sbostic  */
10554691Sbostic void
10654691Sbostic lfs_segunlock(fs)
10754691Sbostic 	struct lfs *fs;
10854264Sbostic {
109*57064Smargo 	struct segment *sp;
110*57064Smargo 	unsigned long sync, ckp;
111*57064Smargo 	int s;
112*57064Smargo 
113*57064Smargo 	if (fs->lfs_seglock == 1) {
114*57064Smargo 
115*57064Smargo 		sp = fs->lfs_sp;
116*57064Smargo 		sync = sp->seg_flags & SEGM_SYNC;
117*57064Smargo 		ckp = sp->seg_flags & SEGM_CKP;
118*57064Smargo 		if (sp->bpp != sp->cbpp) {
119*57064Smargo 			/* Free allocated segment summary */
120*57064Smargo 			fs->lfs_offset -= LFS_SUMMARY_SIZE / DEV_BSIZE;
121*57064Smargo 			brelvp(*sp->bpp);
122*57064Smargo 			free((*sp->bpp)->b_un.b_addr, M_SEGMENT);
123*57064Smargo 			free(*sp->bpp, M_SEGMENT);
124*57064Smargo 		} else
125*57064Smargo 			printf ("unlock to 0 with no summary");
126*57064Smargo 		free(sp->bpp, M_SEGMENT);
127*57064Smargo 		free(sp, M_SEGMENT);
128*57064Smargo 
129*57064Smargo 		/*
130*57064Smargo 		 * If the I/O count is non-zero, sleep until it reaches zero.
131*57064Smargo 		 * At the moment, the user's process hangs around so we can
132*57064Smargo 		 * sleep.
133*57064Smargo 		 */
134*57064Smargo 		s = splbio();
135*57064Smargo 		--fs->lfs_iocount;
136*57064Smargo 		/*
137*57064Smargo 		 * We let checkpoints happen asynchronously.  That means
138*57064Smargo 		 * that during recovery, we have to roll forward between
139*57064Smargo 		 * the two segments described by the first and second
140*57064Smargo 		 * superblocks to make sure that the checkpoint described
141*57064Smargo 		 * by a superblock completed.
142*57064Smargo 		 */
143*57064Smargo 		if (sync && fs->lfs_iocount)
144*57064Smargo 		    (void)tsleep(&fs->lfs_iocount, PRIBIO + 1, "lfs vflush", 0);
145*57064Smargo 		splx(s);
146*57064Smargo 		if (ckp) {
147*57064Smargo 			fs->lfs_nactive = 0;
148*57064Smargo 			lfs_writesuper(fs);
149*57064Smargo 		}
150*57064Smargo 		--fs->lfs_seglock;
151*57064Smargo 		fs->lfs_lockpid = 0;
152*57064Smargo 		wakeup(&fs->lfs_seglock);
153*57064Smargo 	} else if (fs->lfs_seglock == 0) {
154*57064Smargo 		panic ("Seglock not held");
155*57064Smargo 	} else {
156*57064Smargo 		--fs->lfs_seglock;
157*57064Smargo 	}
15854264Sbostic }
159