1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
35 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
36 */
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/uio.h>
41 #include <sys/fcntl.h>
42 #include <sys/file.h>
43 #include <sys/stat.h>
44 #include <sys/proc.h>
45 #include <sys/caps.h>
46 #include <sys/mount.h>
47 #include <sys/nlookup.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/filio.h>
51 #include <sys/ttycom.h>
52 #include <sys/conf.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/spinlock.h>
56 #include <sys/spinlock2.h>
57
58 #include <sys/mplock2.h>
59
60 static int vn_closefile (struct file *fp);
61 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
62 struct ucred *cred, struct sysmsg *msg);
63 static int vn_read (struct file *fp, struct uio *uio,
64 struct ucred *cred, int flags);
65 static int vn_kqfilter (struct file *fp, struct knote *kn);
66 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
67 static int vn_write (struct file *fp, struct uio *uio,
68 struct ucred *cred, int flags);
69 static int vn_seek (struct file *fp, off_t offset, int whence, off_t *res);
70
71 struct fileops vnode_fileops = {
72 .fo_read = vn_read,
73 .fo_write = vn_write,
74 .fo_ioctl = vn_ioctl,
75 .fo_kqfilter = vn_kqfilter,
76 .fo_stat = vn_statfile,
77 .fo_close = vn_closefile,
78 .fo_shutdown = nofo_shutdown,
79 .fo_seek = vn_seek
80 };
81
82 /*
83 * Common code for vnode open operations. Check permissions, and call
84 * the VOP_NOPEN or VOP_NCREATE routine.
85 *
86 * The caller is responsible for setting up nd with nlookup_init() and
87 * for cleaning it up with nlookup_done(), whether we return an error
88 * or not.
89 *
90 * On success nd->nl_open_vp will hold a referenced and, if requested,
91 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
92 * is non-NULL the vnode will be installed in the file pointer.
93 *
94 * NOTE: If the caller wishes the namecache entry to be operated with
95 * a shared lock it must use NLC_SHAREDLOCK. If NLC_LOCKVP is set
96 * then the vnode lock will also be shared.
97 *
98 * NOTE: The vnode is referenced just once on return whether or not it
99 * is also installed in the file pointer.
100 */
101 int
vn_open(struct nlookupdata * nd,struct file ** fpp,int fmode,int cmode)102 vn_open(struct nlookupdata *nd, struct file **fpp, int fmode, int cmode)
103 {
104 struct file *fp = fpp ? *fpp : NULL;
105 struct vnode *vp;
106 struct ucred *cred = nd->nl_cred;
107 struct vattr vat;
108 struct vattr *vap = &vat;
109 int error;
110 int vpexcl;
111 u_int flags;
112 uint64_t osize;
113 struct mount *mp;
114
115 /*
116 * Certain combinations are illegal
117 */
118 if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
119 return(EACCES);
120
121 /*
122 * Lookup the path and create or obtain the vnode. After a
123 * successful lookup a locked nd->nl_nch will be returned.
124 *
125 * The result of this section should be a locked vnode.
126 *
127 * XXX with only a little work we should be able to avoid locking
128 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
129 */
130 nd->nl_flags |= NLC_OPEN;
131 if (fmode & O_APPEND)
132 nd->nl_flags |= NLC_APPEND;
133 if (fmode & O_TRUNC)
134 nd->nl_flags |= NLC_TRUNCATE;
135 if (fmode & FREAD)
136 nd->nl_flags |= NLC_READ;
137 if (fmode & FWRITE)
138 nd->nl_flags |= NLC_WRITE;
139 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
140 nd->nl_flags |= NLC_FOLLOW;
141
142 if (fmode & O_CREAT) {
143 /*
144 * CONDITIONAL CREATE FILE CASE
145 *
146 * Setting NLC_CREATE causes a negative hit to store
147 * the negative hit ncp and not return an error. Then
148 * nc_error or nc_vp may be checked to see if the ncp
149 * represents a negative hit. NLC_CREATE also requires
150 * write permission on the governing directory or EPERM
151 * is returned.
152 *
153 * If the file exists but is missing write permission,
154 * nlookup() returns EACCES. This has to be handled specially
155 * when combined with O_EXCL.
156 */
157 nd->nl_flags |= NLC_CREATE;
158 nd->nl_flags |= NLC_REFDVP;
159 bwillinode(1);
160 error = nlookup(nd);
161 if (error == EACCES && nd->nl_nch.ncp->nc_vp != NULL &&
162 (fmode & O_EXCL) && !nd->nl_dir_error)
163 {
164 error = EEXIST;
165 }
166
167 /*
168 * If no error and nd->nl_dvp is NULL, the nlookup represents
169 * a mount-point or cross-mount situation. e.g.
170 * open("/var/cache", O_CREAT), where /var/cache is a
171 * mount point or a null-mount point.
172 */
173 if (error == 0 && nd->nl_dvp == NULL)
174 error = EINVAL;
175 } else {
176 /*
177 * NORMAL OPEN FILE CASE
178 */
179 error = nlookup(nd);
180 }
181
182 if (error)
183 return (error);
184
185 /*
186 * split case to allow us to re-resolve and retry the ncp in case
187 * we get ESTALE.
188 *
189 * (error is 0 on entry / retry)
190 */
191 again:
192 /*
193 * Checks for (likely) filesystem-modifying cases and allows
194 * the filesystem to stall the front-end.
195 */
196 if ((fmode & (FWRITE | O_TRUNC)) ||
197 ((fmode & O_CREAT) && nd->nl_nch.ncp->nc_vp == NULL)) {
198 error = ncp_writechk(&nd->nl_nch);
199 if (error)
200 return error;
201 }
202
203 vpexcl = 1;
204 if (fmode & O_CREAT) {
205 if (nd->nl_nch.ncp->nc_vp == NULL) {
206 VATTR_NULL(vap);
207 vap->va_type = VREG;
208 vap->va_mode = cmode;
209 vap->va_fuseflags = fmode; /* FUSE */
210 if (fmode & O_EXCL)
211 vap->va_vaflags |= VA_EXCLUSIVE;
212 error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
213 nd->nl_cred, vap);
214 if (error)
215 return (error);
216 fmode &= ~O_TRUNC;
217 /* locked vnode is returned */
218 } else {
219 if (fmode & O_EXCL) {
220 error = EEXIST;
221 } else {
222 error = cache_vget(&nd->nl_nch, cred,
223 LK_EXCLUSIVE, &vp);
224 }
225 if (error)
226 return (error);
227 fmode &= ~O_CREAT;
228 }
229 } else {
230 /*
231 * In most other cases a shared lock on the vnode is
232 * sufficient. However, the O_RDWR case needs an
233 * exclusive lock if the vnode is executable. The
234 * NLC_EXCLLOCK_IFEXEC and NCF_NOTX flags help resolve
235 * this.
236 *
237 * NOTE: If NCF_NOTX is not set, we do not know the
238 * the state of the 'x' bits and have to get
239 * an exclusive lock for the EXCLLOCK_IFEXEC case.
240 */
241 if ((nd->nl_flags & NLC_SHAREDLOCK) &&
242 ((nd->nl_flags & NLC_EXCLLOCK_IFEXEC) == 0 ||
243 nd->nl_nch.ncp->nc_flag & NCF_NOTX)) {
244 error = cache_vget(&nd->nl_nch, cred, LK_SHARED, &vp);
245 vpexcl = 0;
246 } else {
247 error = cache_vget(&nd->nl_nch, cred,
248 LK_EXCLUSIVE, &vp);
249 }
250 if (error)
251 return (error);
252 }
253
254 /*
255 * We have a locked vnode and ncp now. Note that the ncp will
256 * be cleaned up by the caller if nd->nl_nch is left intact.
257 */
258 if (vp->v_type == VLNK) {
259 error = EMLINK;
260 goto bad;
261 }
262 if (vp->v_type == VSOCK) {
263 error = EOPNOTSUPP;
264 goto bad;
265 }
266 if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) {
267 error = ENOTDIR;
268 goto bad;
269 }
270 if ((fmode & O_CREAT) == 0) {
271 if (fmode & (FWRITE | O_TRUNC)) {
272 if (vp->v_type == VDIR) {
273 error = EISDIR;
274 goto bad;
275 }
276
277 /*
278 * Additional checks on vnode (does not substitute
279 * for ncp_writechk()).
280 */
281 error = vn_writechk(vp);
282 if (error) {
283 /*
284 * Special stale handling, re-resolve the
285 * vnode.
286 */
287 if (error == ESTALE) {
288 u_int dummy_gen = 0;
289
290 vput(vp);
291 vp = NULL;
292 if (vpexcl == 0) {
293 cache_unlock(&nd->nl_nch);
294 cache_lock(&nd->nl_nch);
295 }
296 cache_setunresolved(&nd->nl_nch);
297 error = cache_resolve(&nd->nl_nch,
298 &dummy_gen,
299 cred);
300 if (error == 0)
301 goto again;
302 }
303 goto bad;
304 }
305 }
306 }
307 if (fmode & O_TRUNC) {
308 vn_unlock(vp); /* XXX */
309 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
310 osize = vp->v_filesize;
311 VATTR_NULL(vap);
312 vap->va_size = 0;
313 error = VOP_SETATTR_FP(vp, vap, cred, fp);
314 if (error)
315 goto bad;
316 error = VOP_GETATTR(vp, vap);
317 if (error)
318 goto bad;
319 mp = vq_vptomp(vp);
320 VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
321 }
322
323 /*
324 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
325 * These particular bits a tracked all the way from the root.
326 *
327 * NOTE: Might not work properly on NFS servers due to the
328 * disconnected namecache.
329 */
330 flags = nd->nl_nch.ncp->nc_flag;
331 if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
332 (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
333 vsetflags(vp, VSWAPCACHE);
334 } else {
335 vclrflags(vp, VSWAPCACHE);
336 }
337
338 /*
339 * Setup the fp so VOP_OPEN can override it. No descriptor has been
340 * associated with the fp yet so we own it clean.
341 *
342 * f_nchandle inherits nl_nch. This used to be necessary only for
343 * directories but now we do it unconditionally so f*() ops
344 * such as fchmod() can access the actual namespace that was
345 * used to open the file.
346 */
347 if (fp) {
348 if (nd->nl_flags & NLC_APPENDONLY)
349 fmode |= FAPPENDONLY;
350 fp->f_nchandle = nd->nl_nch;
351 cache_zero(&nd->nl_nch);
352 cache_unlock(&fp->f_nchandle);
353 }
354
355 /*
356 * Get rid of nl_nch. vn_open does not return it (it returns the
357 * vnode or the file pointer).
358 *
359 * NOTE: We can't leave nl_nch locked through the VOP_OPEN anyway
360 * since the VOP_OPEN may block, e.g. on /dev/ttyd0
361 *
362 * NOTE: The VOP_OPEN() can replace the *fpp we supply with its own
363 * (it will fdrop/fhold), and can also set the *fpp up however
364 * it wants, not necessarily using DTYPE_VNODE.
365 */
366 if (nd->nl_nch.ncp)
367 cache_put(&nd->nl_nch);
368
369 error = VOP_OPEN(vp, fmode, cred, fpp);
370 fp = fpp ? *fpp : NULL;
371
372 if (error) {
373 /*
374 * setting f_ops to &badfileops will prevent the descriptor
375 * code from trying to close and release the vnode, since
376 * the open failed we do not want to call close.
377 */
378 if (fp) {
379 fp->f_data = NULL;
380 fp->f_ops = &badfileops;
381 }
382 goto bad;
383 }
384
385 #if 0
386 /*
387 * Assert that VREG files have been setup for vmio.
388 */
389 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
390 ("vn_open: regular file was not VMIO enabled!"));
391 #endif
392
393 /*
394 * Return the vnode. XXX needs some cleaning up. The vnode is
395 * only returned in the fp == NULL case.
396 *
397 * NOTE: vnode stored in fp may be different
398 */
399 if (fp == NULL) {
400 nd->nl_open_vp = vp;
401 nd->nl_vp_fmode = fmode;
402 if ((nd->nl_flags & NLC_LOCKVP) == 0)
403 vn_unlock(vp);
404 } else {
405 vput(vp);
406 }
407 return (0);
408 bad:
409 if (vp)
410 vput(vp);
411 return (error);
412 }
413
414 int
vn_opendisk(const char * devname,int fmode,struct vnode ** vpp)415 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
416 {
417 struct vnode *vp;
418 int error;
419
420 if (strncmp(devname, "/dev/", 5) == 0)
421 devname += 5;
422 if ((vp = getsynthvnode(devname)) == NULL) {
423 error = ENODEV;
424 } else {
425 error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
426 vn_unlock(vp);
427 if (error) {
428 vrele(vp);
429 vp = NULL;
430 }
431 }
432 *vpp = vp;
433 return (error);
434 }
435
436 /*
437 * Checks for special conditions on the vnode which might prevent writing
438 * after the vnode has (likely) been locked. The vnode might or might not
439 * be locked as of this call, but will be at least referenced.
440 *
441 * Also re-checks the mount RDONLY flag that ncp_writechk() checked prior
442 * to the vnode being locked.
443 */
444 int
vn_writechk(struct vnode * vp)445 vn_writechk(struct vnode *vp)
446 {
447 /*
448 * If there's shared text associated with
449 * the vnode, try to free it up once. If
450 * we fail, we can't allow writing.
451 */
452 if (vp->v_flag & VTEXT)
453 return (ETXTBSY);
454 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_RDONLY))
455 return (EROFS);
456 return 0;
457 }
458
459 /*
460 * Check whether the underlying mount is read-only. The mount point
461 * referenced by the namecache may be different from the mount point
462 * used by the underlying vnode in the case of NULLFS, so a separate
463 * check is needed.
464 *
465 * Must be called PRIOR to any vnodes being locked.
466 */
467 int
ncp_writechk(struct nchandle * nch)468 ncp_writechk(struct nchandle *nch)
469 {
470 struct mount *mp;
471
472 if ((mp = nch->mount) != NULL) {
473 if (mp->mnt_flag & MNT_RDONLY)
474 return (EROFS);
475 if (mp->mnt_op->vfs_modifying != vfs_stdmodifying)
476 VFS_MODIFYING(mp);
477 }
478 return(0);
479 }
480
481 /*
482 * Vnode close call
483 *
484 * MPSAFE
485 */
486 int
vn_close(struct vnode * vp,int flags,struct file * fp)487 vn_close(struct vnode *vp, int flags, struct file *fp)
488 {
489 int error;
490
491 error = vn_lock(vp, LK_SHARED | LK_RETRY | LK_FAILRECLAIM);
492 if (error == 0) {
493 error = VOP_CLOSE(vp, flags, fp);
494 vn_unlock(vp);
495 }
496 vrele(vp);
497 return (error);
498 }
499
500 /*
501 * Sequential heuristic.
502 *
503 * MPSAFE (f_seqcount and f_nextoff are allowed to race)
504 */
505 static __inline
506 int
sequential_heuristic(struct uio * uio,struct file * fp)507 sequential_heuristic(struct uio *uio, struct file *fp)
508 {
509 /*
510 * Sequential heuristic - detect sequential operation
511 *
512 * NOTE: SMP: We allow f_seqcount updates to race.
513 */
514 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
515 uio->uio_offset == fp->f_nextoff) {
516 int tmpseq = fp->f_seqcount;
517
518 tmpseq += howmany(uio->uio_resid, MAXBSIZE);
519 if (tmpseq > IO_SEQMAX)
520 tmpseq = IO_SEQMAX;
521 fp->f_seqcount = tmpseq;
522 return(fp->f_seqcount << IO_SEQSHIFT);
523 }
524
525 /*
526 * Not sequential, quick draw-down of seqcount
527 *
528 * NOTE: SMP: We allow f_seqcount updates to race.
529 */
530 if (fp->f_seqcount > 1)
531 fp->f_seqcount = 1;
532 else
533 fp->f_seqcount = 0;
534 return(0);
535 }
536
537 /*
538 * get - lock and return the f_offset field.
539 * set - set and unlock the f_offset field.
540 *
541 * These routines serve the dual purpose of serializing access to the
542 * f_offset field (at least on x86) and guaranteeing operational integrity
543 * when multiple read()ers and write()ers are present on the same fp.
544 *
545 * MPSAFE
546 */
547 static __inline off_t
vn_get_fpf_offset(struct file * fp)548 vn_get_fpf_offset(struct file *fp)
549 {
550 u_int flags;
551 u_int nflags;
552
553 /*
554 * Shortcut critical path.
555 */
556 flags = fp->f_flag & ~FOFFSETLOCK;
557 if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
558 return(fp->f_offset);
559
560 /*
561 * The hard way
562 */
563 for (;;) {
564 flags = fp->f_flag;
565 if (flags & FOFFSETLOCK) {
566 nflags = flags | FOFFSETWAKE;
567 tsleep_interlock(&fp->f_flag, 0);
568 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
569 tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
570 } else {
571 nflags = flags | FOFFSETLOCK;
572 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
573 break;
574 }
575 }
576 return(fp->f_offset);
577 }
578
579 /*
580 * MPSAFE
581 */
582 static __inline void
vn_set_fpf_offset(struct file * fp,off_t offset)583 vn_set_fpf_offset(struct file *fp, off_t offset)
584 {
585 u_int flags;
586 u_int nflags;
587
588 /*
589 * We hold the lock so we can set the offset without interference.
590 */
591 fp->f_offset = offset;
592
593 /*
594 * Normal release is already a reasonably critical path.
595 */
596 for (;;) {
597 flags = fp->f_flag;
598 nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
599 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
600 if (flags & FOFFSETWAKE)
601 wakeup(&fp->f_flag);
602 break;
603 }
604 }
605 }
606
607 /*
608 * MPSAFE
609 */
610 static __inline off_t
vn_poll_fpf_offset(struct file * fp)611 vn_poll_fpf_offset(struct file *fp)
612 {
613 #if defined(__x86_64__)
614 return(fp->f_offset);
615 #else
616 off_t off = vn_get_fpf_offset(fp);
617 vn_set_fpf_offset(fp, off);
618 return(off);
619 #endif
620 }
621
622 /*
623 * Package up an I/O request on a vnode into a uio and do it.
624 *
625 * MPSAFE
626 */
627 int
vn_rdwr(enum uio_rw rw,struct vnode * vp,caddr_t base,int len,off_t offset,enum uio_seg segflg,int ioflg,struct ucred * cred,int * aresid)628 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
629 off_t offset, enum uio_seg segflg, int ioflg,
630 struct ucred *cred, int *aresid)
631 {
632 struct uio auio;
633 struct iovec aiov;
634 int error;
635
636 if ((ioflg & IO_NODELOCKED) == 0)
637 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
638 auio.uio_iov = &aiov;
639 auio.uio_iovcnt = 1;
640 aiov.iov_base = base;
641 aiov.iov_len = len;
642 auio.uio_resid = len;
643 auio.uio_offset = offset;
644 auio.uio_segflg = segflg;
645 auio.uio_rw = rw;
646 auio.uio_td = curthread;
647 if (rw == UIO_READ) {
648 error = VOP_READ(vp, &auio, ioflg, cred);
649 } else {
650 error = VOP_WRITE(vp, &auio, ioflg, cred);
651 }
652 if (aresid)
653 *aresid = auio.uio_resid;
654 else
655 if (auio.uio_resid && error == 0)
656 error = EIO;
657 if ((ioflg & IO_NODELOCKED) == 0)
658 vn_unlock(vp);
659 return (error);
660 }
661
662 /*
663 * Package up an I/O request on a vnode into a uio and do it. The I/O
664 * request is split up into smaller chunks and we try to avoid saturating
665 * the buffer cache while potentially holding a vnode locked, so we
666 * check bwillwrite() before calling vn_rdwr(). We also call lwkt_user_yield()
667 * to give other processes a chance to lock the vnode (either other processes
668 * core'ing the same binary, or unrelated processes scanning the directory).
669 *
670 * MPSAFE
671 */
672 int
vn_rdwr_inchunks(enum uio_rw rw,struct vnode * vp,caddr_t base,int len,off_t offset,enum uio_seg segflg,int ioflg,struct ucred * cred,int * aresid)673 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
674 off_t offset, enum uio_seg segflg, int ioflg,
675 struct ucred *cred, int *aresid)
676 {
677 int error = 0;
678
679 do {
680 int chunk;
681
682 /*
683 * Force `offset' to a multiple of MAXBSIZE except possibly
684 * for the first chunk, so that filesystems only need to
685 * write full blocks except possibly for the first and last
686 * chunks.
687 */
688 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
689
690 if (chunk > len)
691 chunk = len;
692 if (vp->v_type == VREG && (ioflg & IO_RECURSE) == 0) {
693 switch(rw) {
694 case UIO_READ:
695 bwillread(chunk);
696 break;
697 case UIO_WRITE:
698 bwillwrite(chunk);
699 break;
700 }
701 }
702 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
703 ioflg, cred, aresid);
704 len -= chunk; /* aresid calc already includes length */
705 if (error)
706 break;
707 offset += chunk;
708 base += chunk;
709 lwkt_user_yield();
710 } while (len);
711 if (aresid)
712 *aresid += len;
713 return (error);
714 }
715
716 /*
717 * File pointers can no longer get ripped up by revoke so
718 * we don't need to lock access to the vp.
719 *
720 * f_offset updates are not guaranteed against multiple readers
721 */
722 static int
vn_read(struct file * fp,struct uio * uio,struct ucred * cred,int flags)723 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
724 {
725 struct vnode *vp;
726 int error, ioflag;
727
728 KASSERT(uio->uio_td == curthread,
729 ("uio_td %p is not td %p", uio->uio_td, curthread));
730 vp = (struct vnode *)fp->f_data;
731
732 ioflag = 0;
733 if (flags & O_FBLOCKING) {
734 /* ioflag &= ~IO_NDELAY; */
735 } else if (flags & O_FNONBLOCKING) {
736 ioflag |= IO_NDELAY;
737 } else if (fp->f_flag & FNONBLOCK) {
738 ioflag |= IO_NDELAY;
739 }
740 if (fp->f_flag & O_DIRECT) {
741 ioflag |= IO_DIRECT;
742 }
743 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
744 uio->uio_offset = vn_get_fpf_offset(fp);
745 vn_lock(vp, LK_SHARED | LK_RETRY);
746 ioflag |= sequential_heuristic(uio, fp);
747
748 error = VOP_READ_FP(vp, uio, ioflag, cred, fp);
749 fp->f_nextoff = uio->uio_offset;
750 vn_unlock(vp);
751 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
752 vn_set_fpf_offset(fp, uio->uio_offset);
753 return (error);
754 }
755
756 /*
757 * MPSAFE
758 */
759 static int
vn_write(struct file * fp,struct uio * uio,struct ucred * cred,int flags)760 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
761 {
762 struct vnode *vp;
763 int error, ioflag;
764
765 KASSERT(uio->uio_td == curthread,
766 ("uio_td %p is not p %p", uio->uio_td, curthread));
767 vp = (struct vnode *)fp->f_data;
768
769 ioflag = IO_UNIT;
770 if (vp->v_type == VREG &&
771 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
772 ioflag |= IO_APPEND;
773 }
774
775 if (flags & O_FBLOCKING) {
776 /* ioflag &= ~IO_NDELAY; */
777 } else if (flags & O_FNONBLOCKING) {
778 ioflag |= IO_NDELAY;
779 } else if (fp->f_flag & FNONBLOCK) {
780 ioflag |= IO_NDELAY;
781 }
782 if (fp->f_flag & O_DIRECT) {
783 ioflag |= IO_DIRECT;
784 }
785 if (flags & O_FASYNCWRITE) {
786 /* ioflag &= ~IO_SYNC; */
787 } else if (flags & O_FSYNCWRITE) {
788 ioflag |= IO_SYNC;
789 } else if (fp->f_flag & O_FSYNC) {
790 ioflag |= IO_SYNC;
791 }
792
793 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
794 ioflag |= IO_SYNC;
795 if ((flags & O_FOFFSET) == 0)
796 uio->uio_offset = vn_get_fpf_offset(fp);
797 if (vp->v_mount)
798 VFS_MODIFYING(vp->v_mount);
799 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
800 ioflag |= sequential_heuristic(uio, fp);
801 error = VOP_WRITE_FP(vp, uio, ioflag, cred, fp);
802 fp->f_nextoff = uio->uio_offset;
803 vn_unlock(vp);
804 if ((flags & O_FOFFSET) == 0)
805 vn_set_fpf_offset(fp, uio->uio_offset);
806 return (error);
807 }
808
809 /*
810 * MPSAFE
811 */
812 static int
vn_statfile(struct file * fp,struct stat * sb,struct ucred * cred)813 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
814 {
815 struct vnode *vp;
816 int error;
817
818 vp = (struct vnode *)fp->f_data;
819 error = vn_stat(vp, sb, cred);
820 return (error);
821 }
822
823 /*
824 * MPSAFE
825 */
826 int
vn_stat(struct vnode * vp,struct stat * sb,struct ucred * cred)827 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
828 {
829 struct vattr vattr;
830 struct vattr *vap;
831 int error;
832 u_short mode;
833 cdev_t dev;
834
835 /*
836 * vp already has a ref and is validated, can call unlocked.
837 */
838 vap = &vattr;
839 error = VOP_GETATTR(vp, vap);
840 if (error)
841 return (error);
842
843 /*
844 * Zero the spare stat fields
845 */
846 sb->st_lspare = 0;
847 sb->st_qspare2 = 0;
848
849 /*
850 * Copy from vattr table
851 */
852 if (vap->va_fsid != VNOVAL)
853 sb->st_dev = vap->va_fsid;
854 else
855 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
856 sb->st_ino = vap->va_fileid;
857 mode = vap->va_mode;
858 switch (vap->va_type) {
859 case VREG:
860 mode |= S_IFREG;
861 break;
862 case VDATABASE:
863 mode |= S_IFDB;
864 break;
865 case VDIR:
866 mode |= S_IFDIR;
867 break;
868 case VBLK:
869 mode |= S_IFBLK;
870 break;
871 case VCHR:
872 mode |= S_IFCHR;
873 break;
874 case VLNK:
875 mode |= S_IFLNK;
876 /* This is a cosmetic change, symlinks do not have a mode. */
877 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
878 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
879 else
880 sb->st_mode |= ACCESSPERMS; /* 0777 */
881 break;
882 case VSOCK:
883 mode |= S_IFSOCK;
884 break;
885 case VFIFO:
886 mode |= S_IFIFO;
887 break;
888 default:
889 return (EBADF);
890 }
891 sb->st_mode = mode;
892 if (vap->va_nlink > (nlink_t)-1)
893 sb->st_nlink = (nlink_t)-1;
894 else
895 sb->st_nlink = vap->va_nlink;
896 sb->st_uid = vap->va_uid;
897 sb->st_gid = vap->va_gid;
898 sb->st_rdev = devid_from_dev(vp->v_rdev);
899 sb->st_size = vap->va_size;
900 sb->st_atimespec = vap->va_atime;
901 sb->st_mtimespec = vap->va_mtime;
902 sb->st_ctimespec = vap->va_ctime;
903
904 /*
905 * A VCHR and VBLK device may track the last access and last modified
906 * time independantly of the filesystem. This is particularly true
907 * because device read and write calls may bypass the filesystem.
908 */
909 if (vp->v_type == VCHR || vp->v_type == VBLK) {
910 dev = vp->v_rdev;
911 if (dev != NULL) {
912 if (dev->si_lastread) {
913 sb->st_atimespec.tv_sec = time_second +
914 (dev->si_lastread -
915 time_uptime);
916 sb->st_atimespec.tv_nsec = 0;
917 }
918 if (dev->si_lastwrite) {
919 sb->st_mtimespec.tv_sec = time_second +
920 (dev->si_lastwrite -
921 time_uptime);
922 sb->st_mtimespec.tv_nsec = 0;
923 }
924 }
925 }
926
927 /*
928 * According to www.opengroup.org, the meaning of st_blksize is
929 * "a filesystem-specific preferred I/O block size for this
930 * object. In some filesystem types, this may vary from file
931 * to file"
932 * Default to PAGE_SIZE after much discussion.
933 */
934
935 if (vap->va_type == VREG) {
936 sb->st_blksize = vap->va_blocksize;
937 } else if (vn_isdisk(vp, NULL)) {
938 /*
939 * XXX this is broken. If the device is not yet open (aka
940 * stat() call, aka v_rdev == NULL), how are we supposed
941 * to get a valid block size out of it?
942 */
943 dev = vp->v_rdev;
944
945 sb->st_blksize = dev->si_bsize_best;
946 if (sb->st_blksize < dev->si_bsize_phys)
947 sb->st_blksize = dev->si_bsize_phys;
948 if (sb->st_blksize < BLKDEV_IOSIZE)
949 sb->st_blksize = BLKDEV_IOSIZE;
950 } else {
951 sb->st_blksize = PAGE_SIZE;
952 }
953
954 sb->st_flags = vap->va_flags;
955
956 error = caps_priv_check(cred, SYSCAP_NOVFS_GENERATION);
957 if (error)
958 sb->st_gen = 0;
959 else
960 sb->st_gen = (u_int32_t)vap->va_gen;
961
962 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
963
964 /*
965 * This is for ABI compatibility <= 5.7 (for ABI change made in
966 * 5.7 master).
967 */
968 sb->__old_st_blksize = sb->st_blksize;
969
970 return (0);
971 }
972
973 /*
974 * MPALMOSTSAFE - acquires mplock
975 */
976 static int
vn_ioctl(struct file * fp,u_long com,caddr_t data,struct ucred * ucred,struct sysmsg * msg)977 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
978 struct sysmsg *msg)
979 {
980 struct vnode *vp = ((struct vnode *)fp->f_data);
981 struct vnode *ovp;
982 struct vattr vattr;
983 int error;
984 off_t size;
985
986 switch (vp->v_type) {
987 case VREG:
988 case VDIR:
989 if (com == FIONREAD) {
990 error = VOP_GETATTR(vp, &vattr);
991 if (error)
992 break;
993 size = vattr.va_size;
994 if ((vp->v_flag & VNOTSEEKABLE) == 0)
995 size -= vn_poll_fpf_offset(fp);
996 if (size > 0x7FFFFFFF)
997 size = 0x7FFFFFFF;
998 *(int *)data = size;
999 error = 0;
1000 break;
1001 }
1002 if (com == FIOASYNC) { /* XXX */
1003 error = 0; /* XXX */
1004 break;
1005 }
1006 /* fall into ... */
1007 default:
1008 #if 0
1009 return (ENOTTY);
1010 #endif
1011 case VFIFO:
1012 case VCHR:
1013 case VBLK:
1014 if (com == FIODTYPE) {
1015 if (vp->v_type != VCHR && vp->v_type != VBLK) {
1016 error = ENOTTY;
1017 break;
1018 }
1019 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
1020 error = 0;
1021 break;
1022 }
1023 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
1024 if (error == 0 && com == TIOCSCTTY) {
1025 struct proc *p = curthread->td_proc;
1026 struct session *sess;
1027
1028 if (p == NULL) {
1029 error = ENOTTY;
1030 break;
1031 }
1032
1033 get_mplock();
1034 sess = p->p_session;
1035 /* Do nothing if reassigning same control tty */
1036 if (sess->s_ttyvp == vp) {
1037 error = 0;
1038 rel_mplock();
1039 break;
1040 }
1041
1042 /* Get rid of reference to old control tty */
1043 ovp = sess->s_ttyvp;
1044 vref(vp);
1045 sess->s_ttyvp = vp;
1046 if (ovp)
1047 vrele(ovp);
1048 rel_mplock();
1049 }
1050 break;
1051 }
1052 return (error);
1053 }
1054
1055 /*
1056 * Obtain the requested vnode lock
1057 *
1058 * LK_RETRY Automatically retry on timeout
1059 * LK_FAILRECLAIM Fail if the vnode is being reclaimed
1060 *
1061 * Failures will occur if the vnode is undergoing recyclement, but not
1062 * all callers expect that the function will fail so the caller must pass
1063 * LK_FAILOK if it wants to process an error code.
1064 *
1065 * Errors can occur for other reasons if you pass in other LK_ flags,
1066 * regardless of whether you pass in LK_FAILRECLAIM
1067 */
1068 int
vn_lock(struct vnode * vp,int flags)1069 vn_lock(struct vnode *vp, int flags)
1070 {
1071 int error;
1072
1073 do {
1074 error = lockmgr(&vp->v_lock, flags);
1075 if (error == 0)
1076 break;
1077 } while (flags & LK_RETRY);
1078
1079 /*
1080 * Because we (had better!) have a ref on the vnode, once it
1081 * goes to VRECLAIMED state it will not be recycled until all
1082 * refs go away. So we can just check the flag.
1083 */
1084 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1085 if (flags & LK_FAILRECLAIM) {
1086 lockmgr(&vp->v_lock, LK_RELEASE);
1087 error = ENOENT;
1088 }
1089 }
1090 return (error);
1091 }
1092
1093 int
vn_relock(struct vnode * vp,int flags)1094 vn_relock(struct vnode *vp, int flags)
1095 {
1096 int error;
1097
1098 do {
1099 error = lockmgr(&vp->v_lock, flags);
1100 if (error == 0)
1101 break;
1102 } while (flags & LK_RETRY);
1103
1104 return error;
1105 }
1106
1107 #ifdef DEBUG_VN_UNLOCK
1108
1109 void
debug_vn_unlock(struct vnode * vp,const char * filename,int line)1110 debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1111 {
1112 kprintf("vn_unlock from %s:%d\n", filename, line);
1113 lockmgr(&vp->v_lock, LK_RELEASE);
1114 }
1115
1116 #else
1117
1118 void
vn_unlock(struct vnode * vp)1119 vn_unlock(struct vnode *vp)
1120 {
1121 lockmgr(&vp->v_lock, LK_RELEASE);
1122 }
1123
1124 #endif
1125
1126 /*
1127 * MPSAFE
1128 */
1129 int
vn_islocked(struct vnode * vp)1130 vn_islocked(struct vnode *vp)
1131 {
1132 return (lockstatus(&vp->v_lock, curthread));
1133 }
1134
1135 /*
1136 * Return the lock status of a vnode and unlock the vnode
1137 * if we owned the lock. This is not a boolean, if the
1138 * caller cares what the lock status is the caller must
1139 * check the various possible values.
1140 *
1141 * This only unlocks exclusive locks held by the caller,
1142 * it will NOT unlock shared locks (there is no way to
1143 * tell who the shared lock belongs to).
1144 *
1145 * MPSAFE
1146 */
1147 int
vn_islocked_unlock(struct vnode * vp)1148 vn_islocked_unlock(struct vnode *vp)
1149 {
1150 int vpls;
1151
1152 vpls = lockstatus(&vp->v_lock, curthread);
1153 if (vpls == LK_EXCLUSIVE)
1154 lockmgr(&vp->v_lock, LK_RELEASE);
1155 return(vpls);
1156 }
1157
1158 /*
1159 * Restore a vnode lock that we previously released via
1160 * vn_islocked_unlock(). This is a NOP if we did not
1161 * own the original lock.
1162 *
1163 * MPSAFE
1164 */
1165 void
vn_islocked_relock(struct vnode * vp,int vpls)1166 vn_islocked_relock(struct vnode *vp, int vpls)
1167 {
1168 int error;
1169
1170 if (vpls == LK_EXCLUSIVE)
1171 error = lockmgr(&vp->v_lock, vpls);
1172 }
1173
1174 /*
1175 * MPSAFE
1176 */
1177 static int
vn_closefile(struct file * fp)1178 vn_closefile(struct file *fp)
1179 {
1180 int error;
1181
1182 fp->f_ops = &badfileops;
1183 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag, fp);
1184 return (error);
1185 }
1186
1187 /*
1188 * MPSAFE
1189 */
1190 static int
vn_kqfilter(struct file * fp,struct knote * kn)1191 vn_kqfilter(struct file *fp, struct knote *kn)
1192 {
1193 int error;
1194
1195 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1196 return (error);
1197 }
1198
1199 int
vn_seek(struct file * fp,off_t offset,int whence,off_t * res)1200 vn_seek(struct file *fp, off_t offset, int whence, off_t *res)
1201 {
1202 /*
1203 * NOTE: devfs_dev_fileops uses exact same code
1204 */
1205 struct vnode *vp;
1206 struct vattr_lite lva;
1207 off_t new_offset;
1208 int error;
1209
1210 vp = (struct vnode *)fp->f_data;
1211
1212 switch (whence) {
1213 case L_INCR:
1214 spin_lock(&fp->f_spin);
1215 new_offset = fp->f_offset + offset;
1216 error = 0;
1217 break;
1218 case L_XTND:
1219 error = VOP_GETATTR_LITE(vp, &lva);
1220 spin_lock(&fp->f_spin);
1221 new_offset = offset + lva.va_size;
1222 break;
1223 case L_SET:
1224 new_offset = offset;
1225 error = 0;
1226 spin_lock(&fp->f_spin);
1227 break;
1228 default:
1229 new_offset = 0;
1230 error = EINVAL;
1231 spin_lock(&fp->f_spin);
1232 break;
1233 }
1234
1235 /*
1236 * Validate the seek position. Negative offsets are not allowed
1237 * for regular files or directories.
1238 *
1239 * Normally we would also not want to allow negative offsets for
1240 * character and block-special devices. However kvm addresses
1241 * on 64 bit architectures might appear to be negative and must
1242 * be allowed.
1243 */
1244 if (error == 0) {
1245 if (new_offset < 0 &&
1246 (vp->v_type == VREG || vp->v_type == VDIR)) {
1247 error = EINVAL;
1248 } else {
1249 fp->f_offset = new_offset;
1250 }
1251 }
1252 *res = fp->f_offset;
1253 spin_unlock(&fp->f_spin);
1254
1255 return (error);
1256 }
1257