xref: /openbsd-src/sys/kern/vfs_vnops.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /*	$OpenBSD: vfs_vnops.c,v 1.71 2012/07/11 12:39:20 guenther Exp $	*/
2 /*	$NetBSD: vfs_vnops.c,v 1.20 1996/02/04 02:18:41 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vfs_vnops.c	8.5 (Berkeley) 12/8/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/file.h>
45 #include <sys/stat.h>
46 #include <sys/buf.h>
47 #include <sys/proc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/signalvar.h>
50 #include <sys/mount.h>
51 #include <sys/namei.h>
52 #include <sys/vnode.h>
53 #include <sys/ioctl.h>
54 #include <sys/tty.h>
55 #include <sys/cdio.h>
56 #include <sys/poll.h>
57 #include <sys/filedesc.h>
58 #include <sys/specdev.h>
59 
60 #include <uvm/uvm_extern.h>
61 
62 int vn_read(struct file *, off_t *, struct uio *, struct ucred *);
63 int vn_write(struct file *, off_t *, struct uio *, struct ucred *);
64 int vn_poll(struct file *, int, struct proc *);
65 int vn_kqfilter(struct file *, struct knote *);
66 int vn_closefile(struct file *, struct proc *);
67 
68 struct 	fileops vnops =
69 	{ vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter, vn_statfile,
70 	  vn_closefile };
71 
72 /*
73  * Common code for vnode open operations.
74  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
75  */
76 int
77 vn_open(struct nameidata *ndp, int fmode, int cmode)
78 {
79 	struct vnode *vp;
80 	struct proc *p = ndp->ni_cnd.cn_proc;
81 	struct ucred *cred = p->p_ucred;
82 	struct vattr va;
83 	struct cloneinfo *cip;
84 	int error;
85 
86 	if ((fmode & (FREAD|FWRITE)) == 0)
87 		return (EINVAL);
88 	if ((fmode & (O_TRUNC | FWRITE)) == O_TRUNC)
89 		return (EINVAL);
90 	if (fmode & O_CREAT) {
91 		ndp->ni_cnd.cn_nameiop = CREATE;
92 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
93 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
94 			ndp->ni_cnd.cn_flags |= FOLLOW;
95 		if ((error = namei(ndp)) != 0)
96 			return (error);
97 
98 		if (ndp->ni_vp == NULL) {
99 			VATTR_NULL(&va);
100 			va.va_type = VREG;
101 			va.va_mode = cmode;
102 			if (fmode & O_EXCL)
103 				va.va_vaflags |= VA_EXCLUSIVE;
104 			error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
105 					   &ndp->ni_cnd, &va);
106 			if (error)
107 				return (error);
108 			fmode &= ~O_TRUNC;
109 			vp = ndp->ni_vp;
110 		} else {
111 			VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
112 			if (ndp->ni_dvp == ndp->ni_vp)
113 				vrele(ndp->ni_dvp);
114 			else
115 				vput(ndp->ni_dvp);
116 			ndp->ni_dvp = NULL;
117 			vp = ndp->ni_vp;
118 			if (fmode & O_EXCL) {
119 				error = EEXIST;
120 				goto bad;
121 			}
122 			fmode &= ~O_CREAT;
123 		}
124 	} else {
125 		ndp->ni_cnd.cn_nameiop = LOOKUP;
126 		ndp->ni_cnd.cn_flags =
127 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
128 		if ((error = namei(ndp)) != 0)
129 			return (error);
130 		vp = ndp->ni_vp;
131 	}
132 	if (vp->v_type == VSOCK) {
133 		error = EOPNOTSUPP;
134 		goto bad;
135 	}
136 	if (vp->v_type == VLNK) {
137 		error = ELOOP;
138 		goto bad;
139 	}
140 	if ((fmode & O_DIRECTORY) && vp->v_type != VDIR) {
141 		error = ENOTDIR;
142 		goto bad;
143 	}
144 	if ((fmode & O_CREAT) == 0) {
145 		if (fmode & FREAD) {
146 			if ((error = VOP_ACCESS(vp, VREAD, cred, p)) != 0)
147 				goto bad;
148 		}
149 		if (fmode & FWRITE) {
150 			if (vp->v_type == VDIR) {
151 				error = EISDIR;
152 				goto bad;
153 			}
154 			if ((error = vn_writechk(vp)) != 0 ||
155 			    (error = VOP_ACCESS(vp, VWRITE, cred, p)) != 0)
156 				goto bad;
157 		}
158 	}
159 	if ((fmode & O_TRUNC) && vp->v_type == VREG) {
160 		VATTR_NULL(&va);
161 		va.va_size = 0;
162 		if ((error = VOP_SETATTR(vp, &va, cred, p)) != 0)
163 			goto bad;
164 	}
165 	if ((error = VOP_OPEN(vp, fmode, cred, p)) != 0)
166 		goto bad;
167 
168 	if (vp->v_flag & VCLONED) {
169 		cip = (struct cloneinfo *)vp->v_data;
170 
171 		vp->v_flag &= ~VCLONED;
172 
173 		ndp->ni_vp = cip->ci_vp;	/* return cloned vnode */
174 		vp->v_data = cip->ci_data;	/* restore v_data */
175 		VOP_UNLOCK(vp, 0, p);		/* keep a reference */
176 		vp = ndp->ni_vp;		/* for the increment below */
177 
178 		free(cip, M_TEMP);
179 	}
180 
181 	if (fmode & FWRITE)
182 		vp->v_writecount++;
183 	return (0);
184 bad:
185 	vput(vp);
186 	return (error);
187 }
188 
189 /*
190  * Check for write permissions on the specified vnode.
191  * Prototype text segments cannot be written.
192  */
193 int
194 vn_writechk(struct vnode *vp)
195 {
196 	/*
197 	 * Disallow write attempts on read-only file systems;
198 	 * unless the file is a socket or a block or character
199 	 * device resident on the file system.
200 	 */
201 	if (vp->v_mount->mnt_flag & MNT_RDONLY) {
202 		switch (vp->v_type) {
203 		case VREG:
204 		case VDIR:
205 		case VLNK:
206 			return (EROFS);
207 		case VNON:
208 		case VCHR:
209 		case VSOCK:
210 		case VFIFO:
211 		case VBAD:
212 		case VBLK:
213 			break;
214 		}
215 	}
216 	/*
217 	 * If there's shared text associated with
218 	 * the vnode, try to free it up once.  If
219 	 * we fail, we can't allow writing.
220 	 */
221 	if ((vp->v_flag & VTEXT) && !uvm_vnp_uncache(vp))
222 		return (ETXTBSY);
223 
224 	return (0);
225 }
226 
227 /*
228  * Check whether a write operation would exceed the file size rlimit
229  * for the process, if one should be applied for this operation.
230  * If a partial write should take place, the uio is adjusted and the
231  * amount by which the request would have exceeded the limit is returned
232  * via the 'overrun' argument.
233  */
234 int
235 vn_fsizechk(struct vnode *vp, struct uio *uio, int ioflag, int *overrun)
236 {
237 	struct proc *p = uio->uio_procp;
238 
239 	*overrun = 0;
240 	if (vp->v_type == VREG && p != NULL && !(ioflag & IO_NOLIMIT)) {
241 		rlim_t limit = p->p_rlimit[RLIMIT_FSIZE].rlim_cur;
242 
243 		/* if already at or over the limit, send the signal and fail */
244 		if (uio->uio_offset >= limit) {
245 			psignal(p, SIGXFSZ);
246 			return (EFBIG);
247 		}
248 
249 		/* otherwise, clamp the write to stay under the limit */
250 		if (uio->uio_resid > limit - uio->uio_offset) {
251 			*overrun = uio->uio_resid - (limit - uio->uio_offset);
252 			uio->uio_resid = limit - uio->uio_offset;
253 		}
254 	}
255 
256 	return (0);
257 }
258 
259 
260 /*
261  * Mark a vnode as being the text image of a running process.
262  */
263 void
264 vn_marktext(struct vnode *vp)
265 {
266 	vp->v_flag |= VTEXT;
267 }
268 
269 /*
270  * Vnode close call
271  */
272 int
273 vn_close(struct vnode *vp, int flags, struct ucred *cred, struct proc *p)
274 {
275 	int error;
276 
277 	if (flags & FWRITE)
278 		vp->v_writecount--;
279 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
280 	error = VOP_CLOSE(vp, flags, cred, p);
281 	vput(vp);
282 	return (error);
283 }
284 
285 /*
286  * Package up an I/O request on a vnode into a uio and do it.
287  */
288 int
289 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset,
290     enum uio_seg segflg, int ioflg, struct ucred *cred, size_t *aresid,
291     struct proc *p)
292 {
293 	struct uio auio;
294 	struct iovec aiov;
295 	int error;
296 
297 	auio.uio_iov = &aiov;
298 	auio.uio_iovcnt = 1;
299 	aiov.iov_base = base;
300 	aiov.iov_len = len;
301 	auio.uio_resid = len;
302 	auio.uio_offset = offset;
303 	auio.uio_segflg = segflg;
304 	auio.uio_rw = rw;
305 	auio.uio_procp = p;
306 
307 	if ((ioflg & IO_NODELOCKED) == 0)
308 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
309 	if (rw == UIO_READ) {
310 		error = VOP_READ(vp, &auio, ioflg, cred);
311 	} else {
312 		error = VOP_WRITE(vp, &auio, ioflg, cred);
313 	}
314 	if ((ioflg & IO_NODELOCKED) == 0)
315 		VOP_UNLOCK(vp, 0, p);
316 
317 	if (aresid)
318 		*aresid = auio.uio_resid;
319 	else
320 		if (auio.uio_resid && error == 0)
321 			error = EIO;
322 	return (error);
323 }
324 
325 /*
326  * File table vnode read routine.
327  */
328 int
329 vn_read(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
330 {
331 	struct vnode *vp = (struct vnode *)fp->f_data;
332 	int error = 0;
333 	size_t count = uio->uio_resid;
334 	struct proc *p = uio->uio_procp;
335 
336 	/* no wrap around of offsets except on character devices */
337 	if (vp->v_type != VCHR && count > LLONG_MAX - *poff)
338 		return (EINVAL);
339 
340 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
341 	uio->uio_offset = *poff;
342 	if (vp->v_type != VDIR)
343 		error = VOP_READ(vp, uio,
344 		    (fp->f_flag & FNONBLOCK) ? IO_NDELAY : 0, cred);
345 	*poff += count - uio->uio_resid;
346 	VOP_UNLOCK(vp, 0, p);
347 	return (error);
348 }
349 
350 /*
351  * File table vnode write routine.
352  */
353 int
354 vn_write(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
355 {
356 	struct vnode *vp = (struct vnode *)fp->f_data;
357 	struct proc *p = uio->uio_procp;
358 	int error, ioflag = IO_UNIT;
359 	size_t count;
360 
361 	/* note: pwrite/pwritev are unaffected by O_APPEND */
362 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND) &&
363 	    poff == &fp->f_offset)
364 		ioflag |= IO_APPEND;
365 	if (fp->f_flag & FNONBLOCK)
366 		ioflag |= IO_NDELAY;
367 	if ((fp->f_flag & FFSYNC) ||
368 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
369 		ioflag |= IO_SYNC;
370 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
371 	uio->uio_offset = *poff;
372 	count = uio->uio_resid;
373 	error = VOP_WRITE(vp, uio, ioflag, cred);
374 	if (ioflag & IO_APPEND)
375 		*poff = uio->uio_offset;
376 	else
377 		*poff += count - uio->uio_resid;
378 	VOP_UNLOCK(vp, 0, p);
379 	return (error);
380 }
381 
382 /*
383  * File table wrapper for vn_stat
384  */
385 int
386 vn_statfile(struct file *fp, struct stat *sb, struct proc *p)
387 {
388 	struct vnode *vp = (struct vnode *)fp->f_data;
389 	return vn_stat(vp, sb, p);
390 }
391 
392 /*
393  * vnode stat routine.
394  */
395 int
396 vn_stat(struct vnode *vp, struct stat *sb, struct proc *p)
397 {
398 	struct vattr va;
399 	int error;
400 	mode_t mode;
401 
402 	error = VOP_GETATTR(vp, &va, p->p_ucred, p);
403 	if (error)
404 		return (error);
405 	/*
406 	 * Copy from vattr table
407 	 */
408 	sb->st_dev = va.va_fsid;
409 	sb->st_ino = va.va_fileid;
410 	mode = va.va_mode;
411 	switch (vp->v_type) {
412 	case VREG:
413 		mode |= S_IFREG;
414 		break;
415 	case VDIR:
416 		mode |= S_IFDIR;
417 		break;
418 	case VBLK:
419 		mode |= S_IFBLK;
420 		break;
421 	case VCHR:
422 		mode |= S_IFCHR;
423 		break;
424 	case VLNK:
425 		mode |= S_IFLNK;
426 		break;
427 	case VSOCK:
428 		mode |= S_IFSOCK;
429 		break;
430 	case VFIFO:
431 		mode |= S_IFIFO;
432 		break;
433 	default:
434 		return (EBADF);
435 	}
436 	sb->st_mode = mode;
437 	sb->st_nlink = va.va_nlink;
438 	sb->st_uid = va.va_uid;
439 	sb->st_gid = va.va_gid;
440 	sb->st_rdev = va.va_rdev;
441 	sb->st_size = va.va_size;
442 	sb->st_atim = va.va_atime;
443 	sb->st_mtim = va.va_mtime;
444 	sb->st_ctim = va.va_ctime;
445 	sb->st_blksize = va.va_blocksize;
446 	sb->st_flags = va.va_flags;
447 	sb->st_gen = va.va_gen;
448 	sb->st_blocks = va.va_bytes / S_BLKSIZE;
449 	return (0);
450 }
451 
452 /*
453  * File table vnode ioctl routine.
454  */
455 int
456 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
457 {
458 	struct vnode *vp = ((struct vnode *)fp->f_data);
459 	struct vattr vattr;
460 	int error;
461 
462 	switch (vp->v_type) {
463 
464 	case VREG:
465 	case VDIR:
466 		if (com == FIONREAD) {
467 			error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
468 			if (error)
469 				return (error);
470 			*(int *)data = vattr.va_size - fp->f_offset;
471 			return (0);
472 		}
473 		if (com == FIONBIO || com == FIOASYNC)  /* XXX */
474 			return (0);			/* XXX */
475 		/* FALLTHROUGH */
476 	default:
477 		return (ENOTTY);
478 
479 	case VFIFO:
480 	case VCHR:
481 	case VBLK:
482 		error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
483 		if (error == 0 && com == TIOCSCTTY) {
484 			struct session *s = p->p_p->ps_session;
485 			if (s->s_ttyvp)
486 				vrele(s->s_ttyvp);
487 			s->s_ttyvp = vp;
488 			vref(vp);
489 		}
490 		return (error);
491 	}
492 }
493 
494 /*
495  * File table vnode poll routine.
496  */
497 int
498 vn_poll(struct file *fp, int events, struct proc *p)
499 {
500 	return (VOP_POLL(((struct vnode *)fp->f_data), events, p));
501 }
502 
503 /*
504  * Check that the vnode is still valid, and if so
505  * acquire requested lock.
506  */
507 int
508 vn_lock(struct vnode *vp, int flags, struct proc *p)
509 {
510 	int error;
511 
512 	if ((flags & LK_RECURSEFAIL) == 0)
513 		flags |= LK_CANRECURSE;
514 
515 	do {
516 		if (vp->v_flag & VXLOCK) {
517 			vp->v_flag |= VXWANT;
518 			tsleep(vp, PINOD, "vn_lock", 0);
519 			error = ENOENT;
520 		} else {
521 			error = VOP_LOCK(vp, flags, p);
522 			if (error == 0)
523 				return (error);
524 		}
525 	} while (flags & LK_RETRY);
526 	return (error);
527 }
528 
529 /*
530  * File table vnode close routine.
531  */
532 int
533 vn_closefile(struct file *fp, struct proc *p)
534 {
535 	struct vnode *vp = fp->f_data;
536 	struct flock lf;
537 
538 	if ((fp->f_flag & FHASLOCK)) {
539 		lf.l_whence = SEEK_SET;
540 		lf.l_start = 0;
541 		lf.l_len = 0;
542 		lf.l_type = F_UNLCK;
543 		(void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
544 	}
545 
546 	return (vn_close(vp, fp->f_flag, fp->f_cred, p));
547 }
548 
549 int
550 vn_kqfilter(struct file *fp, struct knote *kn)
551 {
552 	return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn));
553 }
554 
555 /*
556  * Common code for vnode access operations.
557  */
558 
559 /* Check if a directory can be found inside another in the hierarchy */
560 int
561 vn_isunder(struct vnode *lvp, struct vnode *rvp, struct proc *p)
562 {
563 	int error;
564 
565 	error = vfs_getcwd_common(lvp, rvp, NULL, NULL, MAXPATHLEN/2, 0, p);
566 
567 	if (!error)
568 		return (1);
569 
570 	return (0);
571 }
572