xref: /csrg-svn/sys/ufs/lfs/lfs_vfsops.c (revision 54264)
1 /*
2  * Copyright (c) 1989, 1991 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)lfs_vfsops.c	7.75 (Berkeley) 06/23/92
8  */
9 
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/namei.h>
13 #include <sys/proc.h>
14 #include <sys/kernel.h>
15 #include <sys/vnode.h>
16 #include <sys/specdev.h>
17 #include <sys/mount.h>
18 #include <sys/buf.h>
19 #include <sys/file.h>
20 #include <sys/disklabel.h>
21 #include <sys/ioctl.h>
22 #include <sys/errno.h>
23 #include <sys/malloc.h>
24 
25 #include <ufs/ufs/quota.h>
26 #include <ufs/ufs/inode.h>
27 #include <ufs/ufs/ufsmount.h>
28 #include <ufs/ufs/ufs_extern.h>
29 
30 #include <ufs/lfs/lfs.h>
31 #include <ufs/lfs/lfs_extern.h>
32 
33 int lfs_mountfs __P((struct vnode *, struct mount *, struct proc *));
34 
35 struct vfsops lfs_vfsops = {
36 	lfs_mount,
37 	ufs_start,
38 	lfs_unmount,
39 	lfs_root,
40 	ufs_quotactl,
41 	lfs_statfs,
42 	lfs_sync,
43 	lfs_fhtovp,
44 	lfs_vptofh,
45 	lfs_init,
46 };
47 
48 int
49 lfs_mountroot()
50 {
51 	panic("lfs_mountroot");		/* XXX -- implement */
52 }
53 
54 /*
55  * VFS Operations.
56  *
57  * mount system call
58  */
59 lfs_mount(mp, path, data, ndp, p)
60 	register struct mount *mp;
61 	char *path;
62 	caddr_t data;
63 	struct nameidata *ndp;
64 	struct proc *p;
65 {
66 	struct vnode *devvp;
67 	struct ufs_args args;
68 	struct ufsmount *ump;
69 	register struct lfs *fs;				/* LFS */
70 	u_int size;
71 	int error;
72 
73 	if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)))
74 		return (error);
75 
76 	/* Until LFS can do NFS right.		XXX */
77 	if (args.exflags & MNT_EXPORTED)
78 		return (EINVAL);
79 	/*
80 	 * If updating, check whether changing from read-only to
81 	 * read/write; if there is no device name, that's all we do.
82 	 */
83 	if (mp->mnt_flag & MNT_UPDATE) {
84 		ump = VFSTOUFS(mp);
85 #ifdef NOTLFS							/* LFS */
86 		fs = ump->um_fs;
87 		if (fs->fs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
88 			fs->fs_ronly = 0;
89 #else
90 		fs = ump->um_lfs;
91 		if (fs->lfs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
92 			fs->lfs_ronly = 0;
93 #endif
94 		if (args.fspec == 0) {
95 			/*
96 			 * Process export requests.
97 			 */
98 			if (args.exflags & MNT_EXPORTED) {
99 				if (error = hang_addrlist(mp, &args))
100 					return (error);
101 				mp->mnt_flag |= MNT_EXPORTED;
102 			}
103 			if (args.exflags & MNT_DELEXPORT) {
104 				free_addrlist(ump);
105 				mp->mnt_flag &=
106 				    ~(MNT_EXPORTED | MNT_DEFEXPORTED);
107 			}
108 			return (0);
109 		}
110 	}
111 	/*
112 	 * Not an update, or updating the name: look up the name
113 	 * and verify that it refers to a sensible block device.
114 	 */
115 	NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
116 	if (error = namei(ndp))
117 		return (error);
118 	devvp = ndp->ni_vp;
119 	if (devvp->v_type != VBLK) {
120 		vrele(devvp);
121 		return (ENOTBLK);
122 	}
123 	if (major(devvp->v_rdev) >= nblkdev) {
124 		vrele(devvp);
125 		return (ENXIO);
126 	}
127 	if ((mp->mnt_flag & MNT_UPDATE) == 0)
128 		error = lfs_mountfs(devvp, mp, p);		/* LFS */
129 	else {
130 		if (devvp != ump->um_devvp)
131 			error = EINVAL;	/* needs translation */
132 		else
133 			vrele(devvp);
134 	}
135 	if (error) {
136 		vrele(devvp);
137 		return (error);
138 	}
139 	ump = VFSTOUFS(mp);
140 	fs = ump->um_lfs;					/* LFS */
141 #ifdef NOTLFS							/* LFS */
142 	(void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
143 	bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size);
144 	bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
145 	    MNAMELEN);
146 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
147 	    &size);
148 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
149 	(void) ufs_statfs(mp, &mp->mnt_stat, p);
150 #else
151 	(void)copyinstr(path, fs->lfs_fsmnt, sizeof(fs->lfs_fsmnt) - 1, &size);
152 	bzero(fs->lfs_fsmnt + size, sizeof(fs->lfs_fsmnt) - size);
153 	bcopy((caddr_t)fs->lfs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
154 	    MNAMELEN);
155 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
156 	    &size);
157 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
158 	(void) lfs_statfs(mp, &mp->mnt_stat, p);
159 #endif
160 	return (0);
161 }
162 
163 /*
164  * Common code for mount and mountroot
165  * LFS specific
166  */
167 int
168 lfs_mountfs(devvp, mp, p)
169 	register struct vnode *devvp;
170 	struct mount *mp;
171 	struct proc *p;
172 {
173 	USES_VOP_CLOSE;
174 	USES_VOP_IOCTL;
175 	USES_VOP_OPEN;
176 	USES_VOP_VGET;
177 	extern struct vnode *rootvp;
178 	register struct lfs *fs;
179 	register struct ufsmount *ump;
180 	struct vnode *vp;
181 	struct buf *bp;
182 	struct partinfo dpart;
183 	dev_t dev;
184 	int error, i, ronly, size;
185 
186 	/*
187 	 * Disallow multiple mounts of the same device.
188 	 * Disallow mounting of a device that is currently in use
189 	 * (except for root, which might share swap device for miniroot).
190 	 * Flush out any old buffers remaining from a previous use.
191 	 */
192 	if (error = ufs_mountedon(devvp))
193 		return (error);
194 	if (vcount(devvp) > 1 && devvp != rootvp)
195 		return (EBUSY);
196 	vinvalbuf(devvp, 1);
197 
198 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
199 	if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p))
200 		return (error);
201 
202 	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
203 		size = DEV_BSIZE;
204 	else {
205 		size = dpart.disklab->d_secsize;
206 #ifdef NEVER_USED
207 		dpart.part->p_fstype = FS_LFS;
208 		dpart.part->p_fsize = fs->lfs_fsize;	/* frag size */
209 		dpart.part->p_frag = fs->lfs_frag;	/* frags per block */
210 		dpart.part->p_cpg = fs->lfs_segshift;	/* segment shift */
211 #endif
212 	}
213 
214 	/* Don't free random space on error. */
215 	bp = NULL;
216 	ump = NULL;
217 
218 	/* Read in the superblock. */
219 	if (error = bread(devvp, LFS_LABELPAD / size, LFS_SBPAD, NOCRED, &bp))
220 		goto out;
221 	fs = bp->b_un.b_lfs;
222 
223 	/* Check the basics. */
224 	if (fs->lfs_magic != LFS_MAGIC || fs->lfs_bsize > MAXBSIZE ||
225 	    fs->lfs_bsize < sizeof(struct lfs)) {
226 		error = EINVAL;		/* XXX needs translation */
227 		goto out;
228 	}
229 #ifdef DEBUG
230 	lfs_dump_super(fs);
231 #endif
232 
233 	/* Allocate the mount structure, copy the superblock into it. */
234 	ump = (struct ufsmount *)malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
235 	ump->um_lfs = malloc(sizeof(struct lfs), M_UFSMNT, M_WAITOK);
236 	bcopy(bp->b_un.b_addr, ump->um_lfs, sizeof(struct lfs));
237 	if (sizeof(struct lfs) < LFS_SBPAD)			/* XXX why? */
238 		bp->b_flags |= B_INVAL;
239 	brelse(bp);
240 	bp = NULL;
241 
242 	/* Set up the I/O information */
243 	fs->lfs_iocount = 0;
244 
245 	/* Set up the ifile flags */
246 	fs->lfs_doifile = 0;
247 	fs->lfs_writer = 0;
248 	fs->lfs_dirops = 0;
249 
250 	/* Set the file system readonly/modify bits. */
251 	fs = ump->um_lfs;
252 	fs->lfs_ronly = ronly;
253 	if (ronly == 0)
254 		fs->lfs_fmod = 1;
255 
256 	/* Initialize the mount structure. */
257 	dev = devvp->v_rdev;
258 	mp->mnt_data = (qaddr_t)ump;
259 	mp->mnt_stat.f_fsid.val[0] = (long)dev;
260 	mp->mnt_stat.f_fsid.val[1] = MOUNT_LFS;
261 	mp->mnt_flag |= MNT_LOCAL;
262 	ump->um_mountp = mp;
263 	ump->um_dev = dev;
264 	ump->um_devvp = devvp;
265 	for (i = 0; i < MAXQUOTAS; i++)
266 		ump->um_quotas[i] = NULLVP;
267 	devvp->v_specflags |= SI_MOUNTEDON;
268 
269 	/*
270 	 * We use the ifile vnode for almost every operation.  Instead of
271 	 * retrieving it from the hash table each time we retrieve it here,
272 	 * artificially increment the reference count and keep a pointer
273 	 * to it in the incore copy of the superblock.
274 	 */
275 	if (error = LFS_VGET(mp, LFS_IFILE_INUM, &vp))
276 		goto out;
277 	fs->lfs_ivnode = vp;
278 	VREF(vp);
279 	vput(vp);
280 
281 	return (0);
282 out:
283 	if (bp)
284 		brelse(bp);
285 	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
286 	if (ump) {
287 		free(ump->um_lfs, M_UFSMNT);
288 		free(ump, M_UFSMNT);
289 		mp->mnt_data = (qaddr_t)0;
290 	}
291 	return (error);
292 }
293 
294 /*
295  * unmount system call
296  */
297 lfs_unmount(mp, mntflags, p)
298 	struct mount *mp;
299 	int mntflags;
300 	struct proc *p;
301 {
302 	USES_VOP_CLOSE;
303 	extern int doforce;
304 	register struct ufsmount *ump;
305 	register struct lfs *fs;				/* LFS */
306 	int i, error, ronly, flags = 0;
307 	int ndirty;						/* LFS */
308 
309 #ifdef VERBOSE
310 	printf("lfs_unmount\n");
311 #endif
312 	if (mntflags & MNT_FORCE) {
313 		if (!doforce || mp == rootfs)
314 			return (EINVAL);
315 		flags |= FORCECLOSE;
316 	}
317 	/*
318 	 * FFS does a mntflushbuf here.  Our analagous operation
319 	 * would be a segment write, but that has already been
320 	 * done in the vfs code.
321 	 */
322 	if (lfs_mntinvalbuf(mp))
323 		return(EBUSY);
324 
325 	/* Need to checkpoint again to pick up any new ifile changes */
326 	if (error = lfs_segwrite(mp, 1))
327 		return(error);
328 	ump = VFSTOUFS(mp);
329 	fs = ump->um_lfs;
330 	if (fs->lfs_ivnode->v_dirtyblkhd)
331 		panic("Still have dirty blocks on ifile vnode\n");
332 	if (lfs_vinvalbuf(fs->lfs_ivnode))
333 		panic("lfs_vinvalbuf failed on ifile\n");
334 
335 #ifdef QUOTA
336 	if (mp->mnt_flag & MNT_QUOTA) {
337 		if (error = vflush(mp, fs->lfs_ivnode, SKIPSYSTEM|flags))
338 			return (error);
339 		for (i = 0; i < MAXQUOTAS; i++) {
340 			if (ump->um_quotas[i] == NULLVP)
341 				continue;
342 			quotaoff(p, mp, i);
343 		}
344 		/*
345 		 * Here we fall through to vflush again to ensure
346 		 * that we have gotten rid of all the system vnodes.
347 		 */
348 	}
349 #endif
350 	vrele(fs->lfs_ivnode);
351 	if (error = vflush(mp, NULLVP, flags))
352 		return (error);
353 	ronly = !fs->lfs_ronly;
354 	ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
355 	error = VOP_CLOSE(ump->um_devvp, ronly ? FREAD : FREAD|FWRITE,
356 		NOCRED, p);
357 	vrele(ump->um_devvp);
358 	free(fs, M_UFSMNT);
359 	free(ump, M_UFSMNT);
360 	mp->mnt_data = (qaddr_t)0;
361 	mp->mnt_flag &= ~MNT_LOCAL;
362 	return (error);
363 }
364 
365 /*
366  * Return root of a filesystem
367  */
368 int
369 lfs_root(mp, vpp)
370 	struct mount *mp;
371 	struct vnode **vpp;
372 {
373 	USES_VOP_VGET;
374 	struct vnode *nvp;
375 	int error;
376 
377 #ifdef VERBOSE
378 	printf("lfs_root\n");
379 #endif
380 	if (error = LFS_VGET(mp, (ino_t)ROOTINO, &nvp))
381 		return (error);
382 	*vpp = nvp;
383 	return (0);
384 }
385 
386 /*
387  * Get file system statistics.
388  */
389 lfs_statfs(mp, sbp, p)
390 	struct mount *mp;
391 	register struct statfs *sbp;
392 	struct proc *p;
393 {
394 	register struct lfs *fs;
395 	register struct ufsmount *ump;
396 
397 	ump = VFSTOUFS(mp);
398 	fs = ump->um_lfs;
399 	if (fs->lfs_magic != LFS_MAGIC)
400 		panic("lfs_statfs: magic");
401 	sbp->f_type = MOUNT_LFS;
402 	sbp->f_bsize = fs->lfs_bsize;
403 	sbp->f_iosize = fs->lfs_bsize;
404 	sbp->f_blocks = fs->lfs_dsize;
405 	sbp->f_bfree = fs->lfs_bfree;
406 	sbp->f_bavail = (fs->lfs_dsize * (100 - fs->lfs_minfree) / 100) -
407 		(fs->lfs_dsize - sbp->f_bfree);
408 	sbp->f_files = fs->lfs_nfiles;
409 	sbp->f_ffree = fs->lfs_bfree * INOPB(fs);
410 	if (sbp != &mp->mnt_stat) {
411 		bcopy((caddr_t)mp->mnt_stat.f_mntonname,
412 			(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
413 		bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
414 			(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
415 	}
416 	return (0);
417 }
418 
419 /*
420  * Go through the disk queues to initiate sandbagged IO;
421  * go through the inodes to write those that have been modified;
422  * initiate the writing of the super block if it has been modified.
423  *
424  * Note: we are always called with the filesystem marked `MPBUSY'.
425  */
426 lfs_sync(mp, waitfor)
427 	struct mount *mp;
428 	int waitfor;
429 {
430 	extern int crashandburn, syncprt;
431 	int error;
432 
433 #ifdef VERBOSE
434 	printf("lfs_sync\n");
435 #endif
436 
437 #ifdef DIAGNOSTIC
438 	if (crashandburn)
439 		return (0);
440 #endif
441 	if (syncprt)
442 		ufs_bufstats();
443 
444 	/* All syncs must be checkpoints until roll-forward is implemented. */
445 	error = lfs_segwrite(mp, 1);
446 #ifdef QUOTA
447 	qsync(mp);
448 #endif
449 	return (error);
450 }
451 
452 /*
453  * File handle to vnode
454  *
455  * Have to be really careful about stale file handles:
456  * - check that the inode number is valid
457  * - call lfs_vget() to get the locked inode
458  * - check for an unallocated inode (i_mode == 0)
459  * - check that the generation number matches
460  *
461  * XXX
462  * use ifile to see if inode is allocated instead of reading off disk
463  * what is the relationship between my generational number and the NFS
464  * generational number.
465  */
466 int
467 lfs_fhtovp(mp, fhp, setgen, vpp)
468 	register struct mount *mp;
469 	struct fid *fhp;
470 	int setgen;
471 	struct vnode **vpp;
472 {
473 	USES_VOP_VGET;
474 	register struct inode *ip;
475 	register struct ufid *ufhp;
476 	struct vnode *nvp;
477 	int error;
478 
479 	ufhp = (struct ufid *)fhp;
480 	if (ufhp->ufid_ino < ROOTINO)
481 		return (EINVAL);
482 	if (error = LFS_VGET(mp, ufhp->ufid_ino, &nvp)) {
483 		*vpp = NULLVP;
484 		return (error);
485 	}
486 	ip = VTOI(nvp);
487 	if (ip->i_mode == 0) {
488 		ufs_iput(ip);
489 		*vpp = NULLVP;
490 		return (EINVAL);
491 	}
492 	if (ip->i_gen != ufhp->ufid_gen) {
493 		if (setgen)
494 			ufhp->ufid_gen = ip->i_gen;
495 		else {
496 			ufs_iput(ip);
497 			*vpp = NULLVP;
498 			return (EINVAL);
499 		}
500 	}
501 	*vpp = nvp;
502 	return (0);
503 }
504 
505 /*
506  * Vnode pointer to File handle
507  */
508 /* ARGSUSED */
509 lfs_vptofh(vp, fhp)
510 	struct vnode *vp;
511 	struct fid *fhp;
512 {
513 	register struct inode *ip;
514 	register struct ufid *ufhp;
515 
516 	ip = VTOI(vp);
517 	ufhp = (struct ufid *)fhp;
518 	ufhp->ufid_len = sizeof(struct ufid);
519 	ufhp->ufid_ino = ip->i_number;
520 	ufhp->ufid_gen = ip->i_gen;
521 	return (0);
522 }
523