xref: /dflybsd-src/sys/kern/vfs_vnops.c (revision 41871674d0079dec70d55eb824f39d07dc7b3310)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40  * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.37 2006/04/01 20:46:48 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/mount.h>
50 #include <sys/nlookup.h>
51 #include <sys/vnode.h>
52 #include <sys/buf.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
55 #include <sys/conf.h>
56 #include <sys/syslog.h>
57 
58 static int vn_closefile (struct file *fp, struct thread *td);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 		struct thread *td);
61 static int vn_read (struct file *fp, struct uio *uio,
62 		struct ucred *cred, int flags, struct thread *td);
63 static int svn_read (struct file *fp, struct uio *uio,
64 		struct ucred *cred, int flags, struct thread *td);
65 static int vn_poll (struct file *fp, int events, struct ucred *cred,
66 		struct thread *td);
67 static int vn_kqfilter (struct file *fp, struct knote *kn);
68 static int vn_statfile (struct file *fp, struct stat *sb, struct thread *td);
69 static int vn_write (struct file *fp, struct uio *uio,
70 		struct ucred *cred, int flags, struct thread *td);
71 static int svn_write (struct file *fp, struct uio *uio,
72 		struct ucred *cred, int flags, struct thread *td);
73 
74 struct fileops vnode_fileops = {
75 	NULL,	/* port */
76 	NULL,	/* clone */
77 	vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter,
78 	vn_statfile, vn_closefile, nofo_shutdown
79 };
80 
81 struct fileops specvnode_fileops = {
82 	NULL,	/* port */
83 	NULL,	/* clone */
84 	svn_read, svn_write, vn_ioctl, vn_poll, vn_kqfilter,
85 	vn_statfile, vn_closefile, nofo_shutdown
86 };
87 
88 /*
89  * Shortcut the device read/write.  This avoids a lot of vnode junk.
90  * Basically the specfs vnops for read and write take the locked vnode,
91  * unlock it (because we can't hold the vnode locked while reading or writing
92  * a device which may block indefinitely), issues the device operation, then
93  * relock the vnode before returning, plus other junk.  This bypasses all
94  * of that and just does the device operation.
95  */
96 void
97 vn_setspecops(struct file *fp)
98 {
99 	if (vfs_fastdev && fp->f_ops == &vnode_fileops) {
100 		fp->f_ops = &specvnode_fileops;
101 	}
102 }
103 
104 /*
105  * Common code for vnode open operations.  Check permissions, and call
106  * the VOP_NOPEN or VOP_NCREATE routine.
107  *
108  * The caller is responsible for setting up nd with nlookup_init() and
109  * for cleaning it up with nlookup_done(), whether we return an error
110  * or not.
111  *
112  * On success nd->nl_open_vp will hold a referenced and, if requested,
113  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
114  * is non-NULL the vnode will be installed in the file pointer.
115  *
116  * NOTE: The vnode is referenced just once on return whether or not it
117  * is also installed in the file pointer.
118  */
119 int
120 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
121 {
122 	struct vnode *vp;
123 	struct thread *td = nd->nl_td;
124 	struct ucred *cred = nd->nl_cred;
125 	struct vattr vat;
126 	struct vattr *vap = &vat;
127 	struct namecache *ncp;
128 	int mode, error;
129 
130 	/*
131 	 * Lookup the path and create or obtain the vnode.  After a
132 	 * successful lookup a locked nd->nl_ncp will be returned.
133 	 *
134 	 * The result of this section should be a locked vnode.
135 	 *
136 	 * XXX with only a little work we should be able to avoid locking
137 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
138 	 */
139 	if (fmode & O_CREAT) {
140 		/*
141 		 * CONDITIONAL CREATE FILE CASE
142 		 *
143 		 * Setting NLC_CREATE causes a negative hit to store
144 		 * the negative hit ncp and not return an error.  Then
145 		 * nc_error or nc_vp may be checked to see if the ncp
146 		 * represents a negative hit.  NLC_CREATE also requires
147 		 * write permission on the governing directory or EPERM
148 		 * is returned.
149 		 */
150 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
151 			nd->nl_flags |= NLC_FOLLOW;
152 		nd->nl_flags |= NLC_CREATE;
153 		bwillwrite();
154 		error = nlookup(nd);
155 	} else {
156 		/*
157 		 * NORMAL OPEN FILE CASE
158 		 */
159 		error = nlookup(nd);
160 	}
161 
162 	if (error)
163 		return (error);
164 	ncp = nd->nl_ncp;
165 
166 	/*
167 	 * split case to allow us to re-resolve and retry the ncp in case
168 	 * we get ESTALE.
169 	 */
170 again:
171 	if (fmode & O_CREAT) {
172 		if (ncp->nc_vp == NULL) {
173 			VATTR_NULL(vap);
174 			vap->va_type = VREG;
175 			vap->va_mode = cmode;
176 			if (fmode & O_EXCL)
177 				vap->va_vaflags |= VA_EXCLUSIVE;
178 			error = VOP_NCREATE(ncp, &vp, nd->nl_cred, vap);
179 			if (error)
180 				return (error);
181 			fmode &= ~O_TRUNC;
182 			ASSERT_VOP_LOCKED(vp, "create");
183 			/* locked vnode is returned */
184 		} else {
185 			if (fmode & O_EXCL) {
186 				error = EEXIST;
187 			} else {
188 				error = cache_vget(ncp, cred,
189 						    LK_EXCLUSIVE, &vp);
190 			}
191 			if (error)
192 				return (error);
193 			fmode &= ~O_CREAT;
194 		}
195 	} else {
196 		error = cache_vget(ncp, cred, LK_EXCLUSIVE, &vp);
197 		if (error)
198 			return (error);
199 	}
200 
201 	/*
202 	 * We have a locked vnode and ncp now.  Note that the ncp will
203 	 * be cleaned up by the caller if nd->nl_ncp is left intact.
204 	 */
205 	if (vp->v_type == VLNK) {
206 		error = EMLINK;
207 		goto bad;
208 	}
209 	if (vp->v_type == VSOCK) {
210 		error = EOPNOTSUPP;
211 		goto bad;
212 	}
213 	if ((fmode & O_CREAT) == 0) {
214 		mode = 0;
215 		if (fmode & (FWRITE | O_TRUNC)) {
216 			if (vp->v_type == VDIR) {
217 				error = EISDIR;
218 				goto bad;
219 			}
220 			error = vn_writechk(vp);
221 			if (error) {
222 				/*
223 				 * Special stale handling, re-resolve the
224 				 * vnode.
225 				 */
226 				if (error == ESTALE) {
227 					vput(vp);
228 					vp = NULL;
229 					cache_setunresolved(ncp);
230 					error = cache_resolve(ncp, cred);
231 					if (error == 0)
232 						goto again;
233 				}
234 				goto bad;
235 			}
236 			mode |= VWRITE;
237 		}
238 		if (fmode & FREAD)
239 			mode |= VREAD;
240 		if (mode) {
241 		        error = VOP_ACCESS(vp, mode, cred, td);
242 			if (error) {
243 				/*
244 				 * Special stale handling, re-resolve the
245 				 * vnode.
246 				 */
247 				if (error == ESTALE) {
248 					vput(vp);
249 					vp = NULL;
250 					cache_setunresolved(ncp);
251 					error = cache_resolve(ncp, cred);
252 					if (error == 0)
253 						goto again;
254 				}
255 				goto bad;
256 			}
257 		}
258 	}
259 	if (fmode & O_TRUNC) {
260 		VOP_UNLOCK(vp, 0, td);			/* XXX */
261 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);	/* XXX */
262 		VATTR_NULL(vap);
263 		vap->va_size = 0;
264 		error = VOP_SETATTR(vp, vap, cred, td);
265 		if (error)
266 			goto bad;
267 	}
268 
269 	/*
270 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
271 	 * associated with the fp yet so we own it clean.  f_ncp inherits
272 	 * nl_ncp .
273 	 */
274 	if (fp) {
275 		if (vp->v_type == VDIR) {
276 			fp->f_ncp = nd->nl_ncp;
277 			nd->nl_ncp = NULL;
278 			cache_unlock(fp->f_ncp);
279 		}
280 	}
281 
282 	/*
283 	 * Get rid of nl_ncp.  vn_open does not return it (it returns the
284 	 * vnode or the file pointer).  Note: we can't leave nl_ncp locked
285 	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
286 	 * on /dev/ttyd0
287 	 */
288 	if (nd->nl_ncp) {
289 		cache_put(nd->nl_ncp);
290 		nd->nl_ncp = NULL;
291 	}
292 
293 	error = VOP_OPEN(vp, fmode, cred, fp, td);
294 	if (error) {
295 		/*
296 		 * setting f_ops to &badfileops will prevent the descriptor
297 		 * code from trying to close and release the vnode, since
298 		 * the open failed we do not want to call close.
299 		 */
300 		if (fp) {
301 			fp->f_data = NULL;
302 			fp->f_ops = &badfileops;
303 		}
304 		goto bad;
305 	}
306 
307 #if 0
308 	/*
309 	 * Assert that VREG files have been setup for vmio.
310 	 */
311 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
312 		("vn_open: regular file was not VMIO enabled!"));
313 #endif
314 
315 	/*
316 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
317 	 * only returned in the fp == NULL case.
318 	 */
319 	if (fp == NULL) {
320 		nd->nl_open_vp = vp;
321 		nd->nl_vp_fmode = fmode;
322 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
323 			VOP_UNLOCK(vp, 0, td);
324 	} else {
325 		vput(vp);
326 	}
327 	return (0);
328 bad:
329 	if (vp)
330 		vput(vp);
331 	return (error);
332 }
333 
334 /*
335  * Check for write permissions on the specified vnode.
336  * Prototype text segments cannot be written.
337  */
338 int
339 vn_writechk(vp)
340 	struct vnode *vp;
341 {
342 
343 	/*
344 	 * If there's shared text associated with
345 	 * the vnode, try to free it up once.  If
346 	 * we fail, we can't allow writing.
347 	 */
348 	if (vp->v_flag & VTEXT)
349 		return (ETXTBSY);
350 	return (0);
351 }
352 
353 /*
354  * Vnode close call
355  */
356 int
357 vn_close(struct vnode *vp, int flags, struct thread *td)
358 {
359 	int error;
360 
361 	if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td)) == 0) {
362 		error = VOP_CLOSE(vp, flags, td);
363 		VOP_UNLOCK(vp, 0, td);
364 	}
365 	vrele(vp);
366 	return (error);
367 }
368 
369 static __inline
370 int
371 sequential_heuristic(struct uio *uio, struct file *fp)
372 {
373 	/*
374 	 * Sequential heuristic - detect sequential operation
375 	 */
376 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
377 	    uio->uio_offset == fp->f_nextoff) {
378 		int tmpseq = fp->f_seqcount;
379 		/*
380 		 * XXX we assume that the filesystem block size is
381 		 * the default.  Not true, but still gives us a pretty
382 		 * good indicator of how sequential the read operations
383 		 * are.
384 		 */
385 		tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
386 		if (tmpseq > IO_SEQMAX)
387 			tmpseq = IO_SEQMAX;
388 		fp->f_seqcount = tmpseq;
389 		return(fp->f_seqcount << IO_SEQSHIFT);
390 	}
391 
392 	/*
393 	 * Not sequential, quick draw-down of seqcount
394 	 */
395 	if (fp->f_seqcount > 1)
396 		fp->f_seqcount = 1;
397 	else
398 		fp->f_seqcount = 0;
399 	return(0);
400 }
401 
402 /*
403  * Package up an I/O request on a vnode into a uio and do it.
404  */
405 int
406 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
407 	enum uio_rw rw;
408 	struct vnode *vp;
409 	caddr_t base;
410 	int len;
411 	off_t offset;
412 	enum uio_seg segflg;
413 	int ioflg;
414 	struct ucred *cred;
415 	int *aresid;
416 	struct thread *td;
417 {
418 	struct uio auio;
419 	struct iovec aiov;
420 	int error;
421 
422 	if ((ioflg & IO_NODELOCKED) == 0)
423 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
424 	auio.uio_iov = &aiov;
425 	auio.uio_iovcnt = 1;
426 	aiov.iov_base = base;
427 	aiov.iov_len = len;
428 	auio.uio_resid = len;
429 	auio.uio_offset = offset;
430 	auio.uio_segflg = segflg;
431 	auio.uio_rw = rw;
432 	auio.uio_td = td;
433 	if (rw == UIO_READ) {
434 		error = VOP_READ(vp, &auio, ioflg, cred);
435 	} else {
436 		error = VOP_WRITE(vp, &auio, ioflg, cred);
437 	}
438 	if (aresid)
439 		*aresid = auio.uio_resid;
440 	else
441 		if (auio.uio_resid && error == 0)
442 			error = EIO;
443 	if ((ioflg & IO_NODELOCKED) == 0)
444 		VOP_UNLOCK(vp, 0, td);
445 	return (error);
446 }
447 
448 /*
449  * Package up an I/O request on a vnode into a uio and do it.  The I/O
450  * request is split up into smaller chunks and we try to avoid saturating
451  * the buffer cache while potentially holding a vnode locked, so we
452  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
453  * to give other processes a chance to lock the vnode (either other processes
454  * core'ing the same binary, or unrelated processes scanning the directory).
455  */
456 int
457 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
458 	enum uio_rw rw;
459 	struct vnode *vp;
460 	caddr_t base;
461 	int len;
462 	off_t offset;
463 	enum uio_seg segflg;
464 	int ioflg;
465 	struct ucred *cred;
466 	int *aresid;
467 	struct thread *td;
468 {
469 	int error = 0;
470 
471 	do {
472 		int chunk;
473 
474 		/*
475 		 * Force `offset' to a multiple of MAXBSIZE except possibly
476 		 * for the first chunk, so that filesystems only need to
477 		 * write full blocks except possibly for the first and last
478 		 * chunks.
479 		 */
480 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
481 
482 		if (chunk > len)
483 			chunk = len;
484 		if (rw != UIO_READ && vp->v_type == VREG)
485 			bwillwrite();
486 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
487 			    ioflg, cred, aresid, td);
488 		len -= chunk;	/* aresid calc already includes length */
489 		if (error)
490 			break;
491 		offset += chunk;
492 		base += chunk;
493 		uio_yield();
494 	} while (len);
495 	if (aresid)
496 		*aresid += len;
497 	return (error);
498 }
499 
500 /*
501  * File table vnode read routine.
502  */
503 static int
504 vn_read(fp, uio, cred, flags, td)
505 	struct file *fp;
506 	struct uio *uio;
507 	struct ucred *cred;
508 	struct thread *td;
509 	int flags;
510 {
511 	struct vnode *vp;
512 	int error, ioflag;
513 
514 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
515 	vp = (struct vnode *)fp->f_data;
516 	ioflag = 0;
517 	if (fp->f_flag & FNONBLOCK)
518 		ioflag |= IO_NDELAY;
519 	if (fp->f_flag & O_DIRECT)
520 		ioflag |= IO_DIRECT;
521 	vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
522 	if ((flags & FOF_OFFSET) == 0)
523 		uio->uio_offset = fp->f_offset;
524 
525 	ioflag |= sequential_heuristic(uio, fp);
526 
527 	error = VOP_READ(vp, uio, ioflag, cred);
528 	if ((flags & FOF_OFFSET) == 0)
529 		fp->f_offset = uio->uio_offset;
530 	fp->f_nextoff = uio->uio_offset;
531 	VOP_UNLOCK(vp, 0, td);
532 	return (error);
533 }
534 
535 /*
536  * Device-optimized file table vnode read routine.
537  *
538  * This bypasses the VOP table and talks directly to the device.  Most
539  * filesystems just route to specfs and can make this optimization.
540  */
541 static int
542 svn_read(fp, uio, cred, flags, td)
543 	struct file *fp;
544 	struct uio *uio;
545 	struct ucred *cred;
546 	struct thread *td;
547 	int flags;
548 {
549 	struct vnode *vp;
550 	int ioflag;
551 	int error;
552 	dev_t dev;
553 
554 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
555 
556 	vp = (struct vnode *)fp->f_data;
557 	if (vp == NULL || vp->v_type == VBAD)
558 		return (EBADF);
559 
560 	if ((dev = vp->v_rdev) == NULL)
561 		return (EBADF);
562 	reference_dev(dev);
563 
564 	if (uio->uio_resid == 0)
565 		return (0);
566 	if ((flags & FOF_OFFSET) == 0)
567 		uio->uio_offset = fp->f_offset;
568 
569 	ioflag = 0;
570 	if (fp->f_flag & FNONBLOCK)
571 		ioflag |= IO_NDELAY;
572 	if (fp->f_flag & O_DIRECT)
573 		ioflag |= IO_DIRECT;
574 	ioflag |= sequential_heuristic(uio, fp);
575 
576 	error = dev_dread(dev, uio, ioflag);
577 
578 	release_dev(dev);
579 	if ((flags & FOF_OFFSET) == 0)
580 		fp->f_offset = uio->uio_offset;
581 	fp->f_nextoff = uio->uio_offset;
582 	return (error);
583 }
584 
585 /*
586  * File table vnode write routine.
587  */
588 static int
589 vn_write(fp, uio, cred, flags, td)
590 	struct file *fp;
591 	struct uio *uio;
592 	struct ucred *cred;
593 	struct thread *td;
594 	int flags;
595 {
596 	struct vnode *vp;
597 	int error, ioflag;
598 
599 	KASSERT(uio->uio_td == td, ("uio_procp %p is not p %p",
600 	    uio->uio_td, td));
601 	vp = (struct vnode *)fp->f_data;
602 	if (vp->v_type == VREG)
603 		bwillwrite();
604 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
605 	ioflag = IO_UNIT;
606 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
607 		ioflag |= IO_APPEND;
608 	if (fp->f_flag & FNONBLOCK)
609 		ioflag |= IO_NDELAY;
610 	if (fp->f_flag & O_DIRECT)
611 		ioflag |= IO_DIRECT;
612 	if ((fp->f_flag & O_FSYNC) ||
613 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
614 		ioflag |= IO_SYNC;
615 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
616 	if ((flags & FOF_OFFSET) == 0)
617 		uio->uio_offset = fp->f_offset;
618 	ioflag |= sequential_heuristic(uio, fp);
619 	error = VOP_WRITE(vp, uio, ioflag, cred);
620 	if ((flags & FOF_OFFSET) == 0)
621 		fp->f_offset = uio->uio_offset;
622 	fp->f_nextoff = uio->uio_offset;
623 	VOP_UNLOCK(vp, 0, td);
624 	return (error);
625 }
626 
627 /*
628  * Device-optimized file table vnode write routine.
629  *
630  * This bypasses the VOP table and talks directly to the device.  Most
631  * filesystems just route to specfs and can make this optimization.
632  */
633 static int
634 svn_write(fp, uio, cred, flags, td)
635 	struct file *fp;
636 	struct uio *uio;
637 	struct ucred *cred;
638 	struct thread *td;
639 	int flags;
640 {
641 	struct vnode *vp;
642 	int ioflag;
643 	int error;
644 	dev_t dev;
645 
646 	KASSERT(uio->uio_td == td, ("uio_procp %p is not p %p",
647 	    uio->uio_td, td));
648 
649 	vp = (struct vnode *)fp->f_data;
650 	if (vp == NULL || vp->v_type == VBAD)
651 		return (EBADF);
652 	if (vp->v_type == VREG)
653 		bwillwrite();
654 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
655 
656 	if ((dev = vp->v_rdev) == NULL)
657 		return (EBADF);
658 	reference_dev(dev);
659 
660 	if ((flags & FOF_OFFSET) == 0)
661 		uio->uio_offset = fp->f_offset;
662 
663 	ioflag = IO_UNIT;
664 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
665 		ioflag |= IO_APPEND;
666 	if (fp->f_flag & FNONBLOCK)
667 		ioflag |= IO_NDELAY;
668 	if (fp->f_flag & O_DIRECT)
669 		ioflag |= IO_DIRECT;
670 	if ((fp->f_flag & O_FSYNC) ||
671 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
672 		ioflag |= IO_SYNC;
673 	ioflag |= sequential_heuristic(uio, fp);
674 
675 	error = dev_dwrite(dev, uio, ioflag);
676 
677 	release_dev(dev);
678 	if ((flags & FOF_OFFSET) == 0)
679 		fp->f_offset = uio->uio_offset;
680 	fp->f_nextoff = uio->uio_offset;
681 
682 	return (error);
683 }
684 
685 /*
686  * File table vnode stat routine.
687  */
688 static int
689 vn_statfile(struct file *fp, struct stat *sb, struct thread *td)
690 {
691 	struct vnode *vp = (struct vnode *)fp->f_data;
692 
693 	return vn_stat(vp, sb, td);
694 }
695 
696 int
697 vn_stat(struct vnode *vp, struct stat *sb, struct thread *td)
698 {
699 	struct vattr vattr;
700 	struct vattr *vap;
701 	int error;
702 	u_short mode;
703 	dev_t dev;
704 
705 	vap = &vattr;
706 	error = VOP_GETATTR(vp, vap, td);
707 	if (error)
708 		return (error);
709 
710 	/*
711 	 * Zero the spare stat fields
712 	 */
713 	sb->st_lspare = 0;
714 	sb->st_qspare = 0;
715 
716 	/*
717 	 * Copy from vattr table
718 	 */
719 	if (vap->va_fsid != VNOVAL)
720 		sb->st_dev = vap->va_fsid;
721 	else
722 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
723 	sb->st_ino = vap->va_fileid;
724 	mode = vap->va_mode;
725 	switch (vap->va_type) {
726 	case VREG:
727 		mode |= S_IFREG;
728 		break;
729 	case VDIR:
730 		mode |= S_IFDIR;
731 		break;
732 	case VBLK:
733 		mode |= S_IFBLK;
734 		break;
735 	case VCHR:
736 		mode |= S_IFCHR;
737 		break;
738 	case VLNK:
739 		mode |= S_IFLNK;
740 		/* This is a cosmetic change, symlinks do not have a mode. */
741 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
742 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
743 		else
744 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
745 		break;
746 	case VSOCK:
747 		mode |= S_IFSOCK;
748 		break;
749 	case VFIFO:
750 		mode |= S_IFIFO;
751 		break;
752 	default:
753 		return (EBADF);
754 	};
755 	sb->st_mode = mode;
756 	sb->st_nlink = vap->va_nlink;
757 	sb->st_uid = vap->va_uid;
758 	sb->st_gid = vap->va_gid;
759 	sb->st_rdev = vap->va_rdev;
760 	sb->st_size = vap->va_size;
761 	sb->st_atimespec = vap->va_atime;
762 	sb->st_mtimespec = vap->va_mtime;
763 	sb->st_ctimespec = vap->va_ctime;
764 
765 	/*
766 	 * A VCHR and VBLK device may track the last access and last modified
767 	 * time independantly of the filesystem.  This is particularly true
768 	 * because device read and write calls may bypass the filesystem.
769 	 */
770 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
771 		if ((dev = vp->v_rdev) != NULL) {
772 			if (dev->si_lastread) {
773 				sb->st_atimespec.tv_sec = dev->si_lastread;
774 				sb->st_atimespec.tv_nsec = 0;
775 			}
776 			if (dev->si_lastwrite) {
777 				sb->st_atimespec.tv_sec = dev->si_lastwrite;
778 				sb->st_atimespec.tv_nsec = 0;
779 			}
780 		}
781 	}
782 
783         /*
784 	 * According to www.opengroup.org, the meaning of st_blksize is
785 	 *   "a filesystem-specific preferred I/O block size for this
786 	 *    object.  In some filesystem types, this may vary from file
787 	 *    to file"
788 	 * Default to PAGE_SIZE after much discussion.
789 	 */
790 
791 	if (vap->va_type == VREG) {
792 		sb->st_blksize = vap->va_blocksize;
793 	} else if (vn_isdisk(vp, NULL)) {
794 		/*
795 		 * XXX this is broken.  If the device is not yet open (aka
796 		 * stat() call, aka v_rdev == NULL), how are we supposed
797 		 * to get a valid block size out of it?
798 		 */
799 		dev_t dev;
800 
801 		if ((dev = vp->v_rdev) == NULL)
802 			dev = udev2dev(vp->v_udev, vp->v_type == VBLK);
803 		sb->st_blksize = dev->si_bsize_best;
804 		if (sb->st_blksize < dev->si_bsize_phys)
805 			sb->st_blksize = dev->si_bsize_phys;
806 		if (sb->st_blksize < BLKDEV_IOSIZE)
807 			sb->st_blksize = BLKDEV_IOSIZE;
808 	} else {
809 		sb->st_blksize = PAGE_SIZE;
810 	}
811 
812 	sb->st_flags = vap->va_flags;
813 	if (suser(td))
814 		sb->st_gen = 0;
815 	else
816 		sb->st_gen = vap->va_gen;
817 
818 #if (S_BLKSIZE == 512)
819 	/* Optimize this case */
820 	sb->st_blocks = vap->va_bytes >> 9;
821 #else
822 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
823 #endif
824 	sb->st_fsmid = vap->va_fsmid;
825 	return (0);
826 }
827 
828 /*
829  * File table vnode ioctl routine.
830  */
831 static int
832 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct thread *td)
833 {
834 	struct vnode *vp = ((struct vnode *)fp->f_data);
835 	struct vnode *ovp;
836 	struct ucred *ucred;
837 	struct vattr vattr;
838 	int error;
839 
840 	KKASSERT(td->td_proc != NULL);
841 	ucred = td->td_proc->p_ucred;
842 
843 	switch (vp->v_type) {
844 	case VREG:
845 	case VDIR:
846 		if (com == FIONREAD) {
847 			error = VOP_GETATTR(vp, &vattr, td);
848 			if (error)
849 				return (error);
850 			*(int *)data = vattr.va_size - fp->f_offset;
851 			return (0);
852 		}
853 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
854 			return (0);			/* XXX */
855 		/* fall into ... */
856 	default:
857 #if 0
858 		return (ENOTTY);
859 #endif
860 	case VFIFO:
861 	case VCHR:
862 	case VBLK:
863 		if (com == FIODTYPE) {
864 			if (vp->v_type != VCHR && vp->v_type != VBLK)
865 				return (ENOTTY);
866 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
867 			return (0);
868 		}
869 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, td);
870 		if (error == 0 && com == TIOCSCTTY) {
871 			struct session *sess = td->td_proc->p_session;
872 
873 			/* Do nothing if reassigning same control tty */
874 			if (sess->s_ttyvp == vp)
875 				return (0);
876 
877 			/* Get rid of reference to old control tty */
878 			ovp = sess->s_ttyvp;
879 			vref(vp);
880 			sess->s_ttyvp = vp;
881 			if (ovp)
882 				vrele(ovp);
883 		}
884 		return (error);
885 	}
886 }
887 
888 /*
889  * File table vnode poll routine.
890  */
891 static int
892 vn_poll(struct file *fp, int events, struct ucred *cred, struct thread *td)
893 {
894 	return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, td));
895 }
896 
897 /*
898  * Check that the vnode is still valid, and if so
899  * acquire requested lock.
900  */
901 int
902 #ifndef	DEBUG_LOCKS
903 vn_lock(struct vnode *vp, int flags, struct thread *td)
904 #else
905 debug_vn_lock(struct vnode *vp, int flags, struct thread *td,
906 		const char *filename, int line)
907 #endif
908 {
909 	int error;
910 
911 	do {
912 #ifdef	DEBUG_LOCKS
913 		vp->filename = filename;
914 		vp->line = line;
915 #endif
916 		error = VOP_LOCK(vp, flags | LK_NOPAUSE, td);
917 		if (error == 0)
918 			break;
919 	} while (flags & LK_RETRY);
920 
921 	/*
922 	 * Because we (had better!) have a ref on the vnode, once it
923 	 * goes to VRECLAIMED state it will not be recycled until all
924 	 * refs go away.  So we can just check the flag.
925 	 */
926 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
927 		VOP_UNLOCK(vp, 0, td);
928 		error = ENOENT;
929 	}
930 	return (error);
931 }
932 
933 /*
934  * File table vnode close routine.
935  */
936 static int
937 vn_closefile(struct file *fp, struct thread *td)
938 {
939 	int err;
940 
941 	fp->f_ops = &badfileops;
942 	err = vn_close(((struct vnode *)fp->f_data), fp->f_flag, td);
943 	return(err);
944 }
945 
946 static int
947 vn_kqfilter(struct file *fp, struct knote *kn)
948 {
949 
950 	return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn));
951 }
952