xref: /dflybsd-src/sys/kern/vfs_vnops.c (revision 1f7ab7c9fc18f47a2f16dc45b13dee254c603ce7)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40  * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.48 2006/09/18 18:19:33 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/mount.h>
50 #include <sys/nlookup.h>
51 #include <sys/vnode.h>
52 #include <sys/buf.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
55 #include <sys/conf.h>
56 #include <sys/syslog.h>
57 
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 		struct ucred *cred);
61 static int vn_read (struct file *fp, struct uio *uio,
62 		struct ucred *cred, int flags);
63 static int svn_read (struct file *fp, struct uio *uio,
64 		struct ucred *cred, int flags);
65 static int vn_poll (struct file *fp, int events, struct ucred *cred);
66 static int vn_kqfilter (struct file *fp, struct knote *kn);
67 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
68 static int vn_write (struct file *fp, struct uio *uio,
69 		struct ucred *cred, int flags);
70 static int svn_write (struct file *fp, struct uio *uio,
71 		struct ucred *cred, int flags);
72 
73 struct fileops vnode_fileops = {
74 	.fo_read = vn_read,
75 	.fo_write = vn_write,
76 	.fo_ioctl = vn_ioctl,
77 	.fo_poll = vn_poll,
78 	.fo_kqfilter = vn_kqfilter,
79 	.fo_stat = vn_statfile,
80 	.fo_close = vn_closefile,
81 	.fo_shutdown = nofo_shutdown
82 };
83 
84 struct fileops specvnode_fileops = {
85 	.fo_read = svn_read,
86 	.fo_write = svn_write,
87 	.fo_ioctl = vn_ioctl,
88 	.fo_poll = vn_poll,
89 	.fo_kqfilter = vn_kqfilter,
90 	.fo_stat = vn_statfile,
91 	.fo_close = vn_closefile,
92 	.fo_shutdown = nofo_shutdown
93 };
94 
95 /*
96  * Shortcut the device read/write.  This avoids a lot of vnode junk.
97  * Basically the specfs vnops for read and write take the locked vnode,
98  * unlock it (because we can't hold the vnode locked while reading or writing
99  * a device which may block indefinitely), issues the device operation, then
100  * relock the vnode before returning, plus other junk.  This bypasses all
101  * of that and just does the device operation.
102  */
103 void
104 vn_setspecops(struct file *fp)
105 {
106 	if (vfs_fastdev && fp->f_ops == &vnode_fileops) {
107 		fp->f_ops = &specvnode_fileops;
108 	}
109 }
110 
111 /*
112  * Common code for vnode open operations.  Check permissions, and call
113  * the VOP_NOPEN or VOP_NCREATE routine.
114  *
115  * The caller is responsible for setting up nd with nlookup_init() and
116  * for cleaning it up with nlookup_done(), whether we return an error
117  * or not.
118  *
119  * On success nd->nl_open_vp will hold a referenced and, if requested,
120  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
121  * is non-NULL the vnode will be installed in the file pointer.
122  *
123  * NOTE: The vnode is referenced just once on return whether or not it
124  * is also installed in the file pointer.
125  */
126 int
127 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
128 {
129 	struct vnode *vp;
130 	struct ucred *cred = nd->nl_cred;
131 	struct vattr vat;
132 	struct vattr *vap = &vat;
133 	struct namecache *ncp;
134 	int mode, error;
135 
136 	/*
137 	 * Lookup the path and create or obtain the vnode.  After a
138 	 * successful lookup a locked nd->nl_ncp will be returned.
139 	 *
140 	 * The result of this section should be a locked vnode.
141 	 *
142 	 * XXX with only a little work we should be able to avoid locking
143 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
144 	 */
145 	if (fmode & O_CREAT) {
146 		/*
147 		 * CONDITIONAL CREATE FILE CASE
148 		 *
149 		 * Setting NLC_CREATE causes a negative hit to store
150 		 * the negative hit ncp and not return an error.  Then
151 		 * nc_error or nc_vp may be checked to see if the ncp
152 		 * represents a negative hit.  NLC_CREATE also requires
153 		 * write permission on the governing directory or EPERM
154 		 * is returned.
155 		 */
156 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
157 			nd->nl_flags |= NLC_FOLLOW;
158 		nd->nl_flags |= NLC_CREATE;
159 		bwillwrite();
160 		error = nlookup(nd);
161 	} else {
162 		/*
163 		 * NORMAL OPEN FILE CASE
164 		 */
165 		error = nlookup(nd);
166 	}
167 
168 	if (error)
169 		return (error);
170 	ncp = nd->nl_ncp;
171 
172 	/*
173 	 * split case to allow us to re-resolve and retry the ncp in case
174 	 * we get ESTALE.
175 	 */
176 again:
177 	if (fmode & O_CREAT) {
178 		if (ncp->nc_vp == NULL) {
179 			if ((error = ncp_writechk(ncp)) != 0)
180 				return (error);
181 			VATTR_NULL(vap);
182 			vap->va_type = VREG;
183 			vap->va_mode = cmode;
184 			if (fmode & O_EXCL)
185 				vap->va_vaflags |= VA_EXCLUSIVE;
186 			error = VOP_NCREATE(ncp, &vp, nd->nl_cred, vap);
187 			if (error)
188 				return (error);
189 			fmode &= ~O_TRUNC;
190 			/* locked vnode is returned */
191 		} else {
192 			if (fmode & O_EXCL) {
193 				error = EEXIST;
194 			} else {
195 				error = cache_vget(ncp, cred,
196 						    LK_EXCLUSIVE, &vp);
197 			}
198 			if (error)
199 				return (error);
200 			fmode &= ~O_CREAT;
201 		}
202 	} else {
203 		error = cache_vget(ncp, cred, LK_EXCLUSIVE, &vp);
204 		if (error)
205 			return (error);
206 	}
207 
208 	/*
209 	 * We have a locked vnode and ncp now.  Note that the ncp will
210 	 * be cleaned up by the caller if nd->nl_ncp is left intact.
211 	 */
212 	if (vp->v_type == VLNK) {
213 		error = EMLINK;
214 		goto bad;
215 	}
216 	if (vp->v_type == VSOCK) {
217 		error = EOPNOTSUPP;
218 		goto bad;
219 	}
220 	if ((fmode & O_CREAT) == 0) {
221 		mode = 0;
222 		if (fmode & (FWRITE | O_TRUNC)) {
223 			if (vp->v_type == VDIR) {
224 				error = EISDIR;
225 				goto bad;
226 			}
227 			error = vn_writechk(vp, ncp);
228 			if (error) {
229 				/*
230 				 * Special stale handling, re-resolve the
231 				 * vnode.
232 				 */
233 				if (error == ESTALE) {
234 					vput(vp);
235 					vp = NULL;
236 					cache_setunresolved(ncp);
237 					error = cache_resolve(ncp, cred);
238 					if (error == 0)
239 						goto again;
240 				}
241 				goto bad;
242 			}
243 			mode |= VWRITE;
244 		}
245 		if (fmode & FREAD)
246 			mode |= VREAD;
247 		if (mode) {
248 		        error = VOP_ACCESS(vp, mode, cred);
249 			if (error) {
250 				/*
251 				 * Special stale handling, re-resolve the
252 				 * vnode.
253 				 */
254 				if (error == ESTALE) {
255 					vput(vp);
256 					vp = NULL;
257 					cache_setunresolved(ncp);
258 					error = cache_resolve(ncp, cred);
259 					if (error == 0)
260 						goto again;
261 				}
262 				goto bad;
263 			}
264 		}
265 	}
266 	if (fmode & O_TRUNC) {
267 		vn_unlock(vp);				/* XXX */
268 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
269 		VATTR_NULL(vap);
270 		vap->va_size = 0;
271 		error = VOP_SETATTR(vp, vap, cred);
272 		if (error)
273 			goto bad;
274 	}
275 
276 	/*
277 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
278 	 * associated with the fp yet so we own it clean.
279 	 *
280 	 * f_ncp inherits nl_ncp.  This used to be necessary only for
281 	 * directories but now we do it unconditionally so f*() ops
282 	 * such as fchmod() can access the actual namespace that was
283 	 * used to open the file.
284 	 */
285 	if (fp) {
286 		fp->f_ncp = nd->nl_ncp;
287 		nd->nl_ncp = NULL;
288 		cache_unlock(fp->f_ncp);
289 	}
290 
291 	/*
292 	 * Get rid of nl_ncp.  vn_open does not return it (it returns the
293 	 * vnode or the file pointer).  Note: we can't leave nl_ncp locked
294 	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
295 	 * on /dev/ttyd0
296 	 */
297 	if (nd->nl_ncp) {
298 		cache_put(nd->nl_ncp);
299 		nd->nl_ncp = NULL;
300 	}
301 
302 	error = VOP_OPEN(vp, fmode, cred, fp);
303 	if (error) {
304 		/*
305 		 * setting f_ops to &badfileops will prevent the descriptor
306 		 * code from trying to close and release the vnode, since
307 		 * the open failed we do not want to call close.
308 		 */
309 		if (fp) {
310 			fp->f_data = NULL;
311 			fp->f_ops = &badfileops;
312 		}
313 		goto bad;
314 	}
315 
316 #if 0
317 	/*
318 	 * Assert that VREG files have been setup for vmio.
319 	 */
320 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
321 		("vn_open: regular file was not VMIO enabled!"));
322 #endif
323 
324 	/*
325 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
326 	 * only returned in the fp == NULL case.
327 	 */
328 	if (fp == NULL) {
329 		nd->nl_open_vp = vp;
330 		nd->nl_vp_fmode = fmode;
331 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
332 			vn_unlock(vp);
333 	} else {
334 		vput(vp);
335 	}
336 	return (0);
337 bad:
338 	if (vp)
339 		vput(vp);
340 	return (error);
341 }
342 
343 /*
344  * Check for write permissions on the specified vnode.
345  */
346 int
347 vn_writechk(struct vnode *vp, struct namecache *ncp)
348 {
349 	/*
350 	 * If there's shared text associated with
351 	 * the vnode, try to free it up once.  If
352 	 * we fail, we can't allow writing.
353 	 */
354 	if (vp->v_flag & VTEXT)
355 		return (ETXTBSY);
356 
357 	/*
358 	 * If the vnode represents a regular file, check the mount
359 	 * point via the ncp.  This may be a different mount point
360 	 * then the one embedded in the vnode (e.g. nullfs).
361 	 *
362 	 * We can still write to non-regular files (e.g. devices)
363 	 * via read-only mounts.
364 	 */
365 	if (ncp && vp->v_type == VREG)
366 		return (ncp_writechk(ncp));
367 	return (0);
368 }
369 
370 /*
371  * Check whether the underlying mount is read-only.  The mount point
372  * referenced by the namecache may be different from the mount point
373  * used by the underlying vnode in the case of NULLFS, so a separate
374  * check is needed.
375  */
376 int
377 ncp_writechk(struct namecache *ncp)
378 {
379 	if (ncp->nc_mount && (ncp->nc_mount->mnt_flag & MNT_RDONLY))
380 		return (EROFS);
381 	return(0);
382 }
383 
384 /*
385  * Vnode close call
386  */
387 int
388 vn_close(struct vnode *vp, int flags)
389 {
390 	int error;
391 
392 	if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) == 0) {
393 		error = VOP_CLOSE(vp, flags);
394 		vn_unlock(vp);
395 	}
396 	vrele(vp);
397 	return (error);
398 }
399 
400 static __inline
401 int
402 sequential_heuristic(struct uio *uio, struct file *fp)
403 {
404 	/*
405 	 * Sequential heuristic - detect sequential operation
406 	 */
407 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
408 	    uio->uio_offset == fp->f_nextoff) {
409 		int tmpseq = fp->f_seqcount;
410 		/*
411 		 * XXX we assume that the filesystem block size is
412 		 * the default.  Not true, but still gives us a pretty
413 		 * good indicator of how sequential the read operations
414 		 * are.
415 		 */
416 		tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
417 		if (tmpseq > IO_SEQMAX)
418 			tmpseq = IO_SEQMAX;
419 		fp->f_seqcount = tmpseq;
420 		return(fp->f_seqcount << IO_SEQSHIFT);
421 	}
422 
423 	/*
424 	 * Not sequential, quick draw-down of seqcount
425 	 */
426 	if (fp->f_seqcount > 1)
427 		fp->f_seqcount = 1;
428 	else
429 		fp->f_seqcount = 0;
430 	return(0);
431 }
432 
433 /*
434  * Package up an I/O request on a vnode into a uio and do it.
435  */
436 int
437 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
438 	off_t offset, enum uio_seg segflg, int ioflg,
439 	struct ucred *cred, int *aresid)
440 {
441 	struct uio auio;
442 	struct iovec aiov;
443 	struct ccms_lock ccms_lock;
444 	int error;
445 
446 	if ((ioflg & IO_NODELOCKED) == 0)
447 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
448 	auio.uio_iov = &aiov;
449 	auio.uio_iovcnt = 1;
450 	aiov.iov_base = base;
451 	aiov.iov_len = len;
452 	auio.uio_resid = len;
453 	auio.uio_offset = offset;
454 	auio.uio_segflg = segflg;
455 	auio.uio_rw = rw;
456 	auio.uio_td = curthread;
457 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio);
458 	if (rw == UIO_READ) {
459 		error = VOP_READ(vp, &auio, ioflg, cred);
460 	} else {
461 		error = VOP_WRITE(vp, &auio, ioflg, cred);
462 	}
463 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
464 	if (aresid)
465 		*aresid = auio.uio_resid;
466 	else
467 		if (auio.uio_resid && error == 0)
468 			error = EIO;
469 	if ((ioflg & IO_NODELOCKED) == 0)
470 		vn_unlock(vp);
471 	return (error);
472 }
473 
474 /*
475  * Package up an I/O request on a vnode into a uio and do it.  The I/O
476  * request is split up into smaller chunks and we try to avoid saturating
477  * the buffer cache while potentially holding a vnode locked, so we
478  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
479  * to give other processes a chance to lock the vnode (either other processes
480  * core'ing the same binary, or unrelated processes scanning the directory).
481  */
482 int
483 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
484 		 off_t offset, enum uio_seg segflg, int ioflg,
485 		 struct ucred *cred, int *aresid)
486 {
487 	int error = 0;
488 
489 	do {
490 		int chunk;
491 
492 		/*
493 		 * Force `offset' to a multiple of MAXBSIZE except possibly
494 		 * for the first chunk, so that filesystems only need to
495 		 * write full blocks except possibly for the first and last
496 		 * chunks.
497 		 */
498 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
499 
500 		if (chunk > len)
501 			chunk = len;
502 		if (rw != UIO_READ && vp->v_type == VREG)
503 			bwillwrite();
504 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
505 			    ioflg, cred, aresid);
506 		len -= chunk;	/* aresid calc already includes length */
507 		if (error)
508 			break;
509 		offset += chunk;
510 		base += chunk;
511 		uio_yield();
512 	} while (len);
513 	if (aresid)
514 		*aresid += len;
515 	return (error);
516 }
517 
518 /*
519  * MPALMOSTSAFE - acquires mplock
520  */
521 static int
522 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
523 {
524 	struct ccms_lock ccms_lock;
525 	struct vnode *vp;
526 	int error, ioflag;
527 
528 	get_mplock();
529 	KASSERT(uio->uio_td == curthread,
530 		("uio_td %p is not td %p", uio->uio_td, curthread));
531 	vp = (struct vnode *)fp->f_data;
532 
533 	ioflag = 0;
534 	if (flags & O_FBLOCKING) {
535 		/* ioflag &= ~IO_NDELAY; */
536 	} else if (flags & O_FNONBLOCKING) {
537 		ioflag |= IO_NDELAY;
538 	} else if (fp->f_flag & FNONBLOCK) {
539 		ioflag |= IO_NDELAY;
540 	}
541 	if (flags & O_FBUFFERED) {
542 		/* ioflag &= ~IO_DIRECT; */
543 	} else if (flags & O_FUNBUFFERED) {
544 		ioflag |= IO_DIRECT;
545 	} else if (fp->f_flag & O_DIRECT) {
546 		ioflag |= IO_DIRECT;
547 	}
548 	vn_lock(vp, LK_SHARED | LK_RETRY);
549 	if ((flags & O_FOFFSET) == 0)
550 		uio->uio_offset = fp->f_offset;
551 	ioflag |= sequential_heuristic(uio, fp);
552 
553 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
554 	error = VOP_READ(vp, uio, ioflag, cred);
555 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
556 	if ((flags & O_FOFFSET) == 0)
557 		fp->f_offset = uio->uio_offset;
558 	fp->f_nextoff = uio->uio_offset;
559 	vn_unlock(vp);
560 	rel_mplock();
561 	return (error);
562 }
563 
564 /*
565  * Device-optimized file table vnode read routine.
566  *
567  * This bypasses the VOP table and talks directly to the device.  Most
568  * filesystems just route to specfs and can make this optimization.
569  *
570  * MPALMOSTSAFE - acquires mplock
571  */
572 static int
573 svn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
574 {
575 	struct vnode *vp;
576 	int ioflag;
577 	int error;
578 	cdev_t dev;
579 
580 	get_mplock();
581 	KASSERT(uio->uio_td == curthread,
582 		("uio_td %p is not td %p", uio->uio_td, curthread));
583 
584 	vp = (struct vnode *)fp->f_data;
585 	if (vp == NULL || vp->v_type == VBAD) {
586 		error = EBADF;
587 		goto done;
588 	}
589 
590 	if ((dev = vp->v_rdev) == NULL) {
591 		error = EBADF;
592 		goto done;
593 	}
594 	reference_dev(dev);
595 
596 	if (uio->uio_resid == 0) {
597 		error = 0;
598 		goto done;
599 	}
600 	if ((flags & O_FOFFSET) == 0)
601 		uio->uio_offset = fp->f_offset;
602 
603 	ioflag = 0;
604 	if (flags & O_FBLOCKING) {
605 		/* ioflag &= ~IO_NDELAY; */
606 	} else if (flags & O_FNONBLOCKING) {
607 		ioflag |= IO_NDELAY;
608 	} else if (fp->f_flag & FNONBLOCK) {
609 		ioflag |= IO_NDELAY;
610 	}
611 	if (flags & O_FBUFFERED) {
612 		/* ioflag &= ~IO_DIRECT; */
613 	} else if (flags & O_FUNBUFFERED) {
614 		ioflag |= IO_DIRECT;
615 	} else if (fp->f_flag & O_DIRECT) {
616 		ioflag |= IO_DIRECT;
617 	}
618 	ioflag |= sequential_heuristic(uio, fp);
619 
620 	error = dev_dread(dev, uio, ioflag);
621 
622 	release_dev(dev);
623 	if ((flags & O_FOFFSET) == 0)
624 		fp->f_offset = uio->uio_offset;
625 	fp->f_nextoff = uio->uio_offset;
626 done:
627 	rel_mplock();
628 	return (error);
629 }
630 
631 /*
632  * MPALMOSTSAFE - acquires mplock
633  */
634 static int
635 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
636 {
637 	struct ccms_lock ccms_lock;
638 	struct vnode *vp;
639 	int error, ioflag;
640 
641 	get_mplock();
642 	KASSERT(uio->uio_td == curthread,
643 		("uio_procp %p is not p %p", uio->uio_td, curthread));
644 	vp = (struct vnode *)fp->f_data;
645 	if (vp->v_type == VREG)
646 		bwillwrite();
647 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
648 
649 	ioflag = IO_UNIT;
650 	if (vp->v_type == VREG &&
651 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
652 		ioflag |= IO_APPEND;
653 	}
654 
655 	if (flags & O_FBLOCKING) {
656 		/* ioflag &= ~IO_NDELAY; */
657 	} else if (flags & O_FNONBLOCKING) {
658 		ioflag |= IO_NDELAY;
659 	} else if (fp->f_flag & FNONBLOCK) {
660 		ioflag |= IO_NDELAY;
661 	}
662 	if (flags & O_FBUFFERED) {
663 		/* ioflag &= ~IO_DIRECT; */
664 	} else if (flags & O_FUNBUFFERED) {
665 		ioflag |= IO_DIRECT;
666 	} else if (fp->f_flag & O_DIRECT) {
667 		ioflag |= IO_DIRECT;
668 	}
669 	if (flags & O_FASYNCWRITE) {
670 		/* ioflag &= ~IO_SYNC; */
671 	} else if (flags & O_FSYNCWRITE) {
672 		ioflag |= IO_SYNC;
673 	} else if (fp->f_flag & O_FSYNC) {
674 		ioflag |= IO_SYNC;
675 	}
676 
677 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
678 		ioflag |= IO_SYNC;
679 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
680 	if ((flags & O_FOFFSET) == 0)
681 		uio->uio_offset = fp->f_offset;
682 	ioflag |= sequential_heuristic(uio, fp);
683 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
684 	error = VOP_WRITE(vp, uio, ioflag, cred);
685 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
686 	if ((flags & O_FOFFSET) == 0)
687 		fp->f_offset = uio->uio_offset;
688 	fp->f_nextoff = uio->uio_offset;
689 	vn_unlock(vp);
690 	rel_mplock();
691 	return (error);
692 }
693 
694 /*
695  * Device-optimized file table vnode write routine.
696  *
697  * This bypasses the VOP table and talks directly to the device.  Most
698  * filesystems just route to specfs and can make this optimization.
699  *
700  * MPALMOSTSAFE - acquires mplock
701  */
702 static int
703 svn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
704 {
705 	struct vnode *vp;
706 	int ioflag;
707 	int error;
708 	cdev_t dev;
709 
710 	get_mplock();
711 	KASSERT(uio->uio_td == curthread,
712 		("uio_procp %p is not p %p", uio->uio_td, curthread));
713 
714 	vp = (struct vnode *)fp->f_data;
715 	if (vp == NULL || vp->v_type == VBAD) {
716 		error = EBADF;
717 		goto done;
718 	}
719 	if (vp->v_type == VREG)
720 		bwillwrite();
721 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
722 
723 	if ((dev = vp->v_rdev) == NULL) {
724 		error = EBADF;
725 		goto done;
726 	}
727 	reference_dev(dev);
728 
729 	if ((flags & O_FOFFSET) == 0)
730 		uio->uio_offset = fp->f_offset;
731 
732 	ioflag = IO_UNIT;
733 	if (vp->v_type == VREG &&
734 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
735 		ioflag |= IO_APPEND;
736 	}
737 
738 	if (flags & O_FBLOCKING) {
739 		/* ioflag &= ~IO_NDELAY; */
740 	} else if (flags & O_FNONBLOCKING) {
741 		ioflag |= IO_NDELAY;
742 	} else if (fp->f_flag & FNONBLOCK) {
743 		ioflag |= IO_NDELAY;
744 	}
745 	if (flags & O_FBUFFERED) {
746 		/* ioflag &= ~IO_DIRECT; */
747 	} else if (flags & O_FUNBUFFERED) {
748 		ioflag |= IO_DIRECT;
749 	} else if (fp->f_flag & O_DIRECT) {
750 		ioflag |= IO_DIRECT;
751 	}
752 	if (flags & O_FASYNCWRITE) {
753 		/* ioflag &= ~IO_SYNC; */
754 	} else if (flags & O_FSYNCWRITE) {
755 		ioflag |= IO_SYNC;
756 	} else if (fp->f_flag & O_FSYNC) {
757 		ioflag |= IO_SYNC;
758 	}
759 
760 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
761 		ioflag |= IO_SYNC;
762 	ioflag |= sequential_heuristic(uio, fp);
763 
764 	error = dev_dwrite(dev, uio, ioflag);
765 
766 	release_dev(dev);
767 	if ((flags & O_FOFFSET) == 0)
768 		fp->f_offset = uio->uio_offset;
769 	fp->f_nextoff = uio->uio_offset;
770 done:
771 	rel_mplock();
772 	return (error);
773 }
774 
775 /*
776  * MPALMOSTSAFE - acquires mplock
777  */
778 static int
779 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
780 {
781 	struct vnode *vp;
782 	int error;
783 
784 	get_mplock();
785 	vp = (struct vnode *)fp->f_data;
786 	error = vn_stat(vp, sb, cred);
787 	rel_mplock();
788 	return (error);
789 }
790 
791 int
792 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
793 {
794 	struct vattr vattr;
795 	struct vattr *vap;
796 	int error;
797 	u_short mode;
798 	cdev_t dev;
799 
800 	vap = &vattr;
801 	error = VOP_GETATTR(vp, vap);
802 	if (error)
803 		return (error);
804 
805 	/*
806 	 * Zero the spare stat fields
807 	 */
808 	sb->st_lspare = 0;
809 	sb->st_qspare = 0;
810 
811 	/*
812 	 * Copy from vattr table
813 	 */
814 	if (vap->va_fsid != VNOVAL)
815 		sb->st_dev = vap->va_fsid;
816 	else
817 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
818 	sb->st_ino = vap->va_fileid;
819 	mode = vap->va_mode;
820 	switch (vap->va_type) {
821 	case VREG:
822 		mode |= S_IFREG;
823 		break;
824 	case VDIR:
825 		mode |= S_IFDIR;
826 		break;
827 	case VBLK:
828 		mode |= S_IFBLK;
829 		break;
830 	case VCHR:
831 		mode |= S_IFCHR;
832 		break;
833 	case VLNK:
834 		mode |= S_IFLNK;
835 		/* This is a cosmetic change, symlinks do not have a mode. */
836 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
837 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
838 		else
839 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
840 		break;
841 	case VSOCK:
842 		mode |= S_IFSOCK;
843 		break;
844 	case VFIFO:
845 		mode |= S_IFIFO;
846 		break;
847 	default:
848 		return (EBADF);
849 	};
850 	sb->st_mode = mode;
851 	sb->st_nlink = vap->va_nlink;
852 	sb->st_uid = vap->va_uid;
853 	sb->st_gid = vap->va_gid;
854 	sb->st_rdev = vap->va_rdev;
855 	sb->st_size = vap->va_size;
856 	sb->st_atimespec = vap->va_atime;
857 	sb->st_mtimespec = vap->va_mtime;
858 	sb->st_ctimespec = vap->va_ctime;
859 
860 	/*
861 	 * A VCHR and VBLK device may track the last access and last modified
862 	 * time independantly of the filesystem.  This is particularly true
863 	 * because device read and write calls may bypass the filesystem.
864 	 */
865 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
866 		if ((dev = vp->v_rdev) != NULL) {
867 			if (dev->si_lastread) {
868 				sb->st_atimespec.tv_sec = dev->si_lastread;
869 				sb->st_atimespec.tv_nsec = 0;
870 			}
871 			if (dev->si_lastwrite) {
872 				sb->st_atimespec.tv_sec = dev->si_lastwrite;
873 				sb->st_atimespec.tv_nsec = 0;
874 			}
875 		}
876 	}
877 
878         /*
879 	 * According to www.opengroup.org, the meaning of st_blksize is
880 	 *   "a filesystem-specific preferred I/O block size for this
881 	 *    object.  In some filesystem types, this may vary from file
882 	 *    to file"
883 	 * Default to PAGE_SIZE after much discussion.
884 	 */
885 
886 	if (vap->va_type == VREG) {
887 		sb->st_blksize = vap->va_blocksize;
888 	} else if (vn_isdisk(vp, NULL)) {
889 		/*
890 		 * XXX this is broken.  If the device is not yet open (aka
891 		 * stat() call, aka v_rdev == NULL), how are we supposed
892 		 * to get a valid block size out of it?
893 		 */
894 		cdev_t dev;
895 
896 		if ((dev = vp->v_rdev) == NULL)
897 			dev = udev2dev(vp->v_udev, vp->v_type == VBLK);
898 		sb->st_blksize = dev->si_bsize_best;
899 		if (sb->st_blksize < dev->si_bsize_phys)
900 			sb->st_blksize = dev->si_bsize_phys;
901 		if (sb->st_blksize < BLKDEV_IOSIZE)
902 			sb->st_blksize = BLKDEV_IOSIZE;
903 	} else {
904 		sb->st_blksize = PAGE_SIZE;
905 	}
906 
907 	sb->st_flags = vap->va_flags;
908 	if (suser_cred(cred, 0))
909 		sb->st_gen = 0;
910 	else
911 		sb->st_gen = vap->va_gen;
912 
913 #if (S_BLKSIZE == 512)
914 	/* Optimize this case */
915 	sb->st_blocks = vap->va_bytes >> 9;
916 #else
917 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
918 #endif
919 	sb->st_fsmid = vap->va_fsmid;
920 	return (0);
921 }
922 
923 /*
924  * MPALMOSTSAFE - acquires mplock
925  */
926 static int
927 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
928 {
929 	struct vnode *vp = ((struct vnode *)fp->f_data);
930 	struct vnode *ovp;
931 	struct vattr vattr;
932 	int error;
933 
934 	get_mplock();
935 
936 	switch (vp->v_type) {
937 	case VREG:
938 	case VDIR:
939 		if (com == FIONREAD) {
940 			if ((error = VOP_GETATTR(vp, &vattr)) != 0)
941 				break;
942 			*(int *)data = vattr.va_size - fp->f_offset;
943 			error = 0;
944 			break;
945 		}
946 		if (com == FIOASYNC) {				/* XXX */
947 			error = 0;				/* XXX */
948 			break;
949 		}
950 		/* fall into ... */
951 	default:
952 #if 0
953 		return (ENOTTY);
954 #endif
955 	case VFIFO:
956 	case VCHR:
957 	case VBLK:
958 		if (com == FIODTYPE) {
959 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
960 				error = ENOTTY;
961 				break;
962 			}
963 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
964 			error = 0;
965 			break;
966 		}
967 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred);
968 		if (error == 0 && com == TIOCSCTTY) {
969 			struct proc *p = curthread->td_proc;
970 			struct session *sess;
971 
972 			if (p == NULL) {
973 				error = ENOTTY;
974 				break;
975 			}
976 
977 			sess = p->p_session;
978 			/* Do nothing if reassigning same control tty */
979 			if (sess->s_ttyvp == vp) {
980 				error = 0;
981 				break;
982 			}
983 
984 			/* Get rid of reference to old control tty */
985 			ovp = sess->s_ttyvp;
986 			vref(vp);
987 			sess->s_ttyvp = vp;
988 			if (ovp)
989 				vrele(ovp);
990 		}
991 		break;
992 	}
993 	rel_mplock();
994 	return (error);
995 }
996 
997 /*
998  * MPALMOSTSAFE - acquires mplock
999  */
1000 static int
1001 vn_poll(struct file *fp, int events, struct ucred *cred)
1002 {
1003 	int error;
1004 
1005 	get_mplock();
1006 	error = VOP_POLL(((struct vnode *)fp->f_data), events, cred);
1007 	rel_mplock();
1008 	return (error);
1009 }
1010 
1011 /*
1012  * Check that the vnode is still valid, and if so
1013  * acquire requested lock.
1014  */
1015 int
1016 #ifndef	DEBUG_LOCKS
1017 vn_lock(struct vnode *vp, int flags)
1018 #else
1019 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
1020 #endif
1021 {
1022 	int error;
1023 
1024 	do {
1025 #ifdef	DEBUG_LOCKS
1026 		vp->filename = filename;
1027 		vp->line = line;
1028 		error = debuglockmgr(&vp->v_lock, flags,
1029 				     "vn_lock", filename, line);
1030 #else
1031 		error = lockmgr(&vp->v_lock, flags);
1032 #endif
1033 		if (error == 0)
1034 			break;
1035 	} while (flags & LK_RETRY);
1036 
1037 	/*
1038 	 * Because we (had better!) have a ref on the vnode, once it
1039 	 * goes to VRECLAIMED state it will not be recycled until all
1040 	 * refs go away.  So we can just check the flag.
1041 	 */
1042 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1043 		lockmgr(&vp->v_lock, LK_RELEASE);
1044 		error = ENOENT;
1045 	}
1046 	return (error);
1047 }
1048 
1049 void
1050 vn_unlock(struct vnode *vp)
1051 {
1052 	lockmgr(&vp->v_lock, LK_RELEASE);
1053 }
1054 
1055 int
1056 vn_islocked(struct vnode *vp)
1057 {
1058 	return (lockstatus(&vp->v_lock, curthread));
1059 }
1060 
1061 /*
1062  * MPALMOSTSAFE - acquires mplock
1063  */
1064 static int
1065 vn_closefile(struct file *fp)
1066 {
1067 	int error;
1068 
1069 	get_mplock();
1070 	fp->f_ops = &badfileops;
1071 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1072 	rel_mplock();
1073 	return(error);
1074 }
1075 
1076 /*
1077  * MPALMOSTSAFE - acquires mplock
1078  */
1079 static int
1080 vn_kqfilter(struct file *fp, struct knote *kn)
1081 {
1082 	int error;
1083 
1084 	get_mplock();
1085 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1086 	rel_mplock();
1087 	return (error);
1088 }
1089