xref: /csrg-svn/sys/ufs/lfs/lfs_vfsops.c (revision 51215)
1 /*
2  * Copyright (c) 1989, 1991 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)lfs_vfsops.c	7.59 (Berkeley) 10/02/91
8  */
9 
10 #ifdef LOGFS
11 #include "param.h"
12 #include "systm.h"
13 #include "namei.h"
14 #include "proc.h"
15 #include "kernel.h"
16 #include "vnode.h"
17 #include "specdev.h"
18 #include "mount.h"
19 #include "buf.h"
20 #include "file.h"
21 #include "disklabel.h"
22 #include "ioctl.h"
23 #include "errno.h"
24 #include "malloc.h"
25 
26 #include "../ufs/quota.h"
27 #include "../ufs/inode.h"
28 #include "../ufs/ufsmount.h"
29 #include "../vm/vm_param.h"
30 #include "../vm/lock.h"
31 #include "lfs.h"
32 #include "lfs_extern.h"
33 
34 static int	lfs_mountfs
35 		    __P((struct vnode *, struct mount *, struct proc *));
36 
37 static int 	lfs_umountdebug __P((struct mount *));
38 static int 	lfs_vinvalbuf __P((register struct vnode *));
39 
40 struct vfsops lfs_vfsops = {
41 	lfs_mount,
42 	ufs_start,
43 	lfs_unmount,
44 	lfs_root,
45 	ufs_quotactl,
46 	lfs_statfs,
47 	lfs_sync,
48 	lfs_fhtovp,
49 	ufs_vptofh,
50 	lfs_init
51 };
52 
53 /*
54  * Flag to allow forcible unmounting.
55  */
56 extern int doforce;						/* LFS */
57 
58 lfs_mountroot()
59 {
60 	/* LFS IMPLEMENT -- lfs_mountroot */
61 	panic("lfs_mountroot");
62 }
63 
64 /*
65  * VFS Operations.
66  *
67  * mount system call
68  */
69 lfs_mount(mp, path, data, ndp, p)
70 	register struct mount *mp;
71 	char *path;
72 	caddr_t data;
73 	struct nameidata *ndp;
74 	struct proc *p;
75 {
76 	struct vnode *devvp;
77 	struct ufs_args args;
78 	struct ufsmount *ump;
79 	register LFS *fs;					/* LFS */
80 	u_int size;
81 	int error;
82 
83 	if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)))
84 		return (error);
85 	/*
86 	 * Process export requests.
87 	 */
88 	if ((args.exflags & MNT_EXPORTED) || (mp->mnt_flag & MNT_EXPORTED)) {
89 		if (args.exflags & MNT_EXPORTED)
90 			mp->mnt_flag |= MNT_EXPORTED;
91 		else
92 			mp->mnt_flag &= ~MNT_EXPORTED;
93 		if (args.exflags & MNT_EXRDONLY)
94 			mp->mnt_flag |= MNT_EXRDONLY;
95 		else
96 			mp->mnt_flag &= ~MNT_EXRDONLY;
97 		mp->mnt_exroot = args.exroot;
98 	}
99 	/*
100 	 * If updating, check whether changing from read-only to
101 	 * read/write; if there is no device name, that's all we do.
102 	 */
103 	if (mp->mnt_flag & MNT_UPDATE) {
104 		ump = VFSTOUFS(mp);
105 #ifdef NOTLFS							/* LFS */
106 		fs = ump->um_fs;
107 		if (fs->fs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
108 			fs->fs_ronly = 0;
109 #else
110 		fs = ump->um_lfs;
111 		if (fs->lfs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
112 			fs->lfs_ronly = 0;
113 #endif
114 		if (args.fspec == 0)
115 			return (0);
116 	}
117 	/*
118 	 * Not an update, or updating the name: look up the name
119 	 * and verify that it refers to a sensible block device.
120 	 */
121 	ndp->ni_nameiop = LOOKUP | FOLLOW;
122 	ndp->ni_segflg = UIO_USERSPACE;
123 	ndp->ni_dirp = args.fspec;
124 	if (error = namei(ndp, p))
125 		return (error);
126 	devvp = ndp->ni_vp;
127 	if (devvp->v_type != VBLK) {
128 		vrele(devvp);
129 		return (ENOTBLK);
130 	}
131 	if (major(devvp->v_rdev) >= nblkdev) {
132 		vrele(devvp);
133 		return (ENXIO);
134 	}
135 	if ((mp->mnt_flag & MNT_UPDATE) == 0)
136 		error = lfs_mountfs(devvp, mp, p);		/* LFS */
137 	else {
138 		if (devvp != ump->um_devvp)
139 			error = EINVAL;	/* needs translation */
140 		else
141 			vrele(devvp);
142 	}
143 	if (error) {
144 		vrele(devvp);
145 		return (error);
146 	}
147 	ump = VFSTOUFS(mp);
148 	fs = ump->um_lfs;					/* LFS */
149 #ifdef NOTLFS							/* LFS */
150 	(void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
151 	bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size);
152 	bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
153 	    MNAMELEN);
154 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
155 	    &size);
156 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
157 	(void) ufs_statfs(mp, &mp->mnt_stat, p);
158 #else
159 	(void)copyinstr(path, fs->lfs_fsmnt, sizeof(fs->lfs_fsmnt) - 1, &size);
160 	bzero(fs->lfs_fsmnt + size, sizeof(fs->lfs_fsmnt) - size);
161 	bcopy((caddr_t)fs->lfs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
162 	    MNAMELEN);
163 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
164 	    &size);
165 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
166 	(void) lfs_statfs(mp, &mp->mnt_stat, p);
167 #endif
168 	return (0);
169 }
170 
171 /*
172  * Common code for mount and mountroot
173  * LFS specific
174  */
175 static int
176 lfs_mountfs(devvp, mp, p)
177 	register struct vnode *devvp;
178 	struct mount *mp;
179 	struct proc *p;
180 {
181 	extern struct vnode *rootvp;
182 	register LFS *fs;
183 	register struct ufsmount *ump;
184 	struct inode *ip;
185 	struct vnode *vp;
186 	struct buf *bp;
187 	struct partinfo dpart;
188 	daddr_t seg_addr;
189 	dev_t dev;
190 	int error, i, ronly, size;
191 
192 	/*
193 	 * Disallow multiple mounts of the same device.
194 	 * Disallow mounting of a device that is currently in use
195 	 * (except for root, which might share swap device for miniroot).
196 	 * Flush out any old buffers remaining from a previous use.
197 	 */
198 	if (error = mountedon(devvp))
199 		return (error);
200 	if (vcount(devvp) > 1 && devvp != rootvp)
201 		return (EBUSY);
202 	vinvalbuf(devvp, 1);
203 
204 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
205 	if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p))
206 		return (error);
207 
208 	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
209 		size = DEV_BSIZE;
210 	else {
211 		size = dpart.disklab->d_secsize;
212 #ifdef NEVER_USED
213 		dpart.part->p_fstype = FS_LFS;
214 		dpart.part->p_fsize = fs->lfs_fsize;	/* frag size */
215 		dpart.part->p_frag = fs->lfs_frag;	/* frags per block */
216 		dpart.part->p_cpg = fs->lfs_segshift;	/* segment shift */
217 #endif
218 	}
219 
220 	/* Don't free random space on error. */
221 	bp = NULL;
222 	ump = NULL;
223 
224 	/* Read in the superblock. */
225 	if (error = bread(devvp, LFS_LABELPAD / size, LFS_SBPAD, NOCRED, &bp))
226 		goto out;
227 	fs = bp->b_un.b_lfs;
228 
229 	/* Check the basics. */
230 	if (fs->lfs_magic != LFS_MAGIC || fs->lfs_bsize > MAXBSIZE ||
231 	    fs->lfs_bsize < sizeof(LFS)) {
232 		error = EINVAL;		/* XXX needs translation */
233 		goto out;
234 	}
235 #ifdef DEBUG
236 	dump_super(fs);
237 #endif
238 
239 	/* Allocate the mount structure, copy the superblock into it. */
240 	ump = (struct ufsmount *)malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
241 	ump->um_lfs = malloc(sizeof(LFS), M_SUPERBLK, M_WAITOK);
242 	bcopy(bp->b_un.b_addr, ump->um_lfs, sizeof(LFS));
243 	if (sizeof(LFS) < LFS_SBPAD)			/* XXX why? */
244 		bp->b_flags |= B_INVAL;
245 	brelse(bp);
246 	bp = NULL;
247 
248 	/* Set up the I/O information */
249 	fs->lfs_iocount = 0;
250 	fs->lfs_seglist = NULL;
251 
252 	/* Set the file system readonly/modify bits. */
253 	fs = ump->um_lfs;
254 	fs->lfs_ronly = ronly;
255 	if (ronly == 0)
256 		fs->lfs_fmod = 1;
257 
258 	/* Initialize the mount structure. */
259 	dev = devvp->v_rdev;
260 	mp->mnt_data = (qaddr_t)ump;
261 	mp->mnt_stat.f_fsid.val[0] = (long)dev;
262 	mp->mnt_stat.f_fsid.val[1] = MOUNT_LFS;
263 	mp->mnt_flag |= MNT_LOCAL;
264 	ump->um_mountp = mp;
265 	ump->um_dev = dev;
266 	ump->um_devvp = devvp;
267 	for (i = 0; i < MAXQUOTAS; i++)
268 		ump->um_quotas[i] = NULLVP;
269 
270 	/* Read the ifile disk inode and store it in a vnode. */
271 	error = bread(devvp, fs->lfs_idaddr, fs->lfs_bsize, NOCRED, &bp);
272 	if (error)
273 		goto out;
274 	error = lfs_vcreate(mp, LFS_IFILE_INUM, &vp);
275 	if (error)
276 		goto out;
277 	ip = VTOI(vp);
278 
279 	/* The ifile inode is stored in the superblock. */
280 	fs->lfs_ivnode = vp;
281 
282 	/* Copy the on-disk inode into place. */
283 	ip->i_din = *lfs_ifind(fs, LFS_IFILE_INUM, bp->b_un.b_dino);
284 	brelse(bp);
285 
286 	/* Initialize the associated vnode */
287 	vp->v_type = IFTOVT(ip->i_mode);
288 
289 	/*
290 	 * Read in the segusage table.
291 	 *
292 	 * Since we always explicitly write the segusage table at a checkpoint,
293 	 * we're assuming that it is continguous on disk.
294 	 */
295 	seg_addr = ip->i_din.di_db[0];
296 	size = fs->lfs_segtabsz << fs->lfs_bshift;
297 	fs->lfs_segtab = malloc(size, M_SUPERBLK, M_WAITOK);
298 	error = bread(devvp, seg_addr, size, NOCRED, &bp);
299 	if (error) {
300 		free(fs->lfs_segtab, M_SUPERBLK);
301 		goto out;
302 	}
303 	bcopy((caddr_t)bp->b_un.b_addr, fs->lfs_segtab, size);
304 	brelse(bp);
305 	devvp->v_specflags |= SI_MOUNTEDON;
306 	VREF(ip->i_devvp);
307 	return (0);
308 out:
309 	if (bp)
310 		brelse(bp);
311 	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
312 	if (ump) {
313 		free((caddr_t)ump->um_lfs, M_SUPERBLK);
314 		free((caddr_t)ump, M_UFSMNT);
315 		mp->mnt_data = (qaddr_t)0;
316 	}
317 	return (error);
318 }
319 
320 /*
321  * unmount system call
322  */
323 lfs_unmount(mp, mntflags, p)
324 	struct mount *mp;
325 	int mntflags;
326 	struct proc *p;
327 {
328 	register struct ufsmount *ump;
329 	register LFS *fs;					/* LFS */
330 	int i, error, ronly, flags = 0;
331 	int ndirty;						/* LFS */
332 
333 printf("lfs_unmount\n");
334 	if (mntflags & MNT_FORCE) {
335 		if (!doforce || mp == rootfs)
336 			return (EINVAL);
337 		flags |= FORCECLOSE;
338 	}
339 	if (error = lfs_segwrite(mp, 1))
340 		return(error);
341 
342 ndirty = lfs_umountdebug(mp);
343 printf("lfs_umountdebug: returned %d dirty\n", ndirty);
344 return(0);
345 	if (mntinvalbuf(mp))
346 		return (EBUSY);
347 	ump = VFSTOUFS(mp);
348 #ifdef QUOTA
349 	if (mp->mnt_flag & MNT_QUOTA) {
350 		if (error = vflush(mp, NULLVP, SKIPSYSTEM|flags))
351 			return (error);
352 		for (i = 0; i < MAXQUOTAS; i++) {
353 			if (ump->um_quotas[i] == NULLVP)
354 				continue;
355 			quotaoff(p, mp, i);
356 		}
357 		/*
358 		 * Here we fall through to vflush again to ensure
359 		 * that we have gotten rid of all the system vnodes.
360 		 */
361 	}
362 #endif
363 	if (error = vflush(mp, NULLVP, flags))
364 		return (error);
365 #ifdef NOTLFS							/* LFS */
366 	fs = ump->um_fs;
367 	ronly = !fs->fs_ronly;
368 #else
369 	fs = ump->um_lfs;
370 	ronly = !fs->lfs_ronly;
371 #endif
372 	ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
373 	error = VOP_CLOSE(ump->um_devvp, ronly ? FREAD : FREAD|FWRITE,
374 		NOCRED, p);
375 	vrele(ump->um_devvp);
376 #ifdef NOTLFS							/* LFS */
377 	free((caddr_t)fs->fs_csp[0], M_SUPERBLK);
378 #else
379 	free(fs->lfs_segtab, M_SUPERBLK);
380 	iput(VTOI(fs->lfs_ivnode));
381 #endif
382 	free((caddr_t)fs, M_SUPERBLK);
383 	free((caddr_t)ump, M_UFSMNT);
384 	mp->mnt_data = (qaddr_t)0;
385 	mp->mnt_flag &= ~MNT_LOCAL;
386 	return (error);
387 }
388 
389 /*
390  * Return root of a filesystem
391  */
392 lfs_root(mp, vpp)
393 	struct mount *mp;
394 	struct vnode **vpp;
395 {
396 	register struct inode *ip;
397 	struct inode *nip;
398 	struct vnode tvp;
399 	int error;
400 
401 	tvp.v_mount = mp;
402 	ip = VTOI(&tvp);
403 	ip->i_vnode = &tvp;
404 	ip->i_dev = VFSTOUFS(mp)->um_dev;
405 	error = lfs_iget(ip, (ino_t)ROOTINO, &nip);		/* LFS */
406 	if (error)
407 		return (error);
408 	*vpp = ITOV(nip);
409 	return (0);
410 }
411 
412 /*
413  * Get file system statistics.
414  */
415 lfs_statfs(mp, sbp, p)
416 	struct mount *mp;
417 	register struct statfs *sbp;
418 	struct proc *p;
419 {
420 	register LFS *fs;
421 	register struct ufsmount *ump;
422 
423 	ump = VFSTOUFS(mp);
424 #ifdef NOTLFS							/* LFS */
425 	fs = ump->um_fs;
426 	if (fs->fs_magic != FS_MAGIC)
427 		panic("ufs_statfs");
428 	sbp->f_type = MOUNT_UFS;
429 	sbp->f_fsize = fs->fs_fsize;
430 	sbp->f_bsize = fs->fs_bsize;
431 	sbp->f_blocks = fs->fs_dsize;
432 	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
433 		fs->fs_cstotal.cs_nffree;
434 	sbp->f_bavail = (fs->fs_dsize * (100 - fs->fs_minfree) / 100) -
435 		(fs->fs_dsize - sbp->f_bfree);
436 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
437 	sbp->f_ffree = fs->fs_cstotal.cs_nifree;
438 #else
439 	fs = ump->um_lfs;
440 	if (fs->lfs_magic != LFS_MAGIC)
441 		panic("lfs_statfs: magic");
442 	sbp->f_type = MOUNT_LFS;
443 	sbp->f_fsize = fs->lfs_bsize;
444 	sbp->f_bsize = fs->lfs_bsize;
445 	sbp->f_blocks = fs->lfs_dsize;
446 	sbp->f_bfree = fs->lfs_bfree;
447 	sbp->f_bavail = (fs->lfs_dsize * (100 - fs->lfs_minfree) / 100) -
448 		(fs->lfs_dsize - sbp->f_bfree);
449 	sbp->f_files = fs->lfs_nfiles;
450 	sbp->f_ffree = fs->lfs_bfree * INOPB(fs);
451 #endif
452 	if (sbp != &mp->mnt_stat) {
453 		bcopy((caddr_t)mp->mnt_stat.f_mntonname,
454 			(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
455 		bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
456 			(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
457 	}
458 	return (0);
459 }
460 
461 extern int	syncprt;					/* LFS */
462 extern lock_data_t lfs_sync_lock;
463 
464 /*
465  * Go through the disk queues to initiate sandbagged IO;
466  * go through the inodes to write those that have been modified;
467  * initiate the writing of the super block if it has been modified.
468  *
469  * Note: we are always called with the filesystem marked `MPBUSY'.
470  */
471 int STOPNOW;
472 lfs_sync(mp, waitfor)
473 	struct mount *mp;
474 	int waitfor;
475 {
476 	int error;
477 
478 printf("lfs_sync\n");
479 
480 	/*
481 	 * Concurrent syncs aren't possible because the meta data blocks are
482 	 * only marked dirty, not busy!
483 	 */
484 	lock_write(&lfs_sync_lock);
485 
486 	if (syncprt)
487 		bufstats();
488 	/*
489 	 * If we do roll forward, then all syncs do not have to be checkpoints.
490 	 * Until then, make sure they are.
491 	 */
492 STOPNOW=1;
493 	error = lfs_segwrite(mp, 1);
494 	lock_done(&lfs_sync_lock);
495 #ifdef QUOTA
496 	qsync(mp);
497 #endif
498 	return (error);
499 }
500 
501 /*
502  * File handle to vnode
503  *
504  * Have to be really careful about stale file handles:
505  * - check that the inode number is in range
506  * - call iget() to get the locked inode
507  * - check for an unallocated inode (i_mode == 0)
508  * - check that the generation number matches
509  */
510 lfs_fhtovp(mp, fhp, vpp)
511 	register struct mount *mp;
512 	struct fid *fhp;
513 	struct vnode **vpp;
514 {
515 	register struct ufid *ufhp;
516 	register LFS *fs;					/* LFS */
517 	register struct inode *ip;
518 	IFILE *ifp;
519 	struct buf *bp;
520 	struct inode *nip;
521 	struct vnode tvp;
522 	int error;
523 
524 	ufhp = (struct ufid *)fhp;
525 #ifdef NOTLFS							/* LFS */
526 	fs = VFSTOUFS(mp)->um_fs;
527 	if (ufhp->ufid_ino < ROOTINO ||
528 	    ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg) {
529 		*vpp = NULLVP;
530 		return (EINVAL);
531 	}
532 #else
533 	fs = VFSTOUFS(mp)->um_lfs;
534 	if (ufhp->ufid_ino < ROOTINO) {
535 		*vpp = NULLVP;
536 		return (EINVAL);
537 	}
538 #endif
539 	tvp.v_mount = mp;
540 	ip = VTOI(&tvp);
541 	ip->i_vnode = &tvp;
542 	ip->i_dev = VFSTOUFS(mp)->um_dev;
543 	if (error = lfs_iget(ip, ufhp->ufid_ino, &nip)) {	/* LFS */
544 		*vpp = NULLVP;
545 		return (error);
546 	}
547 	ip = nip;
548 	if (ip->i_mode == 0) {
549 		iput(ip);
550 		*vpp = NULLVP;
551 		return (EINVAL);
552 	}
553 	if (ip->i_gen != ufhp->ufid_gen) {
554 		iput(ip);
555 		*vpp = NULLVP;
556 		return (EINVAL);
557 	}
558 	*vpp = ITOV(ip);
559 	return (0);
560 }
561 
562 static int
563 lfs_umountdebug(mp)
564 	struct mount *mp;
565 {
566 	struct vnode *vp;
567 	int dirty;
568 
569 	dirty = 0;
570 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
571 		panic("umountdebug: not busy");
572 loop:
573 	for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) {
574 		if (vget(vp))
575 			goto loop;
576 		dirty += lfs_vinvalbuf(vp);
577 		vput(vp);
578 		if (vp->v_mount != mp)
579 			goto loop;
580 	}
581 	return (dirty);
582 }
583 static int
584 lfs_vinvalbuf(vp)
585 	register struct vnode *vp;
586 {
587 	register struct buf *bp;
588 	struct buf *nbp, *blist;
589 	int s, dirty = 0;
590 
591 	for (;;) {
592 		if (blist = vp->v_dirtyblkhd)
593 			/* void */;
594 		else if (blist = vp->v_cleanblkhd)
595 			/* void */;
596 		else
597 			break;
598 		for (bp = blist; bp; bp = nbp) {
599 printf("lfs_vinvalbuf: ino %d, lblkno %d, blkno %lx flags %xl\n",
600 VTOI(vp)->i_number, bp->b_lblkno, bp->b_blkno, bp->b_flags);
601 			nbp = bp->b_blockf;
602 			s = splbio();
603 			if (bp->b_flags & B_BUSY) {
604 printf("lfs_vinvalbuf: buffer busy, would normally sleep\n");
605 /*
606 				bp->b_flags |= B_WANTED;
607 				sleep((caddr_t)bp, PRIBIO + 1);
608 */
609 				splx(s);
610 				break;
611 			}
612 			bremfree(bp);
613 			bp->b_flags |= B_BUSY;
614 			splx(s);
615 			if (bp->b_flags & B_DELWRI) {
616 				dirty++;			/* XXX */
617 printf("lfs_vinvalbuf: buffer dirty (DELWRI). would normally write\n");
618 				break;
619 			}
620 			if (bp->b_vp != vp)
621 				reassignbuf(bp, bp->b_vp);
622 			else
623 				bp->b_flags |= B_INVAL;
624 			brelse(bp);
625 		}
626 	}
627 	if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
628 		panic("lfs_vinvalbuf: flush failed");
629 	return (dirty);
630 }
631 #endif /* LOGFS */
632