xref: /openbsd-src/sys/kern/vfs_vnops.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: vfs_vnops.c,v 1.85 2016/06/19 11:54:33 natano Exp $	*/
2 /*	$NetBSD: vfs_vnops.c,v 1.20 1996/02/04 02:18:41 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vfs_vnops.c	8.5 (Berkeley) 12/8/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/file.h>
45 #include <sys/stat.h>
46 #include <sys/proc.h>
47 #include <sys/resourcevar.h>
48 #include <sys/signalvar.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/lock.h>
52 #include <sys/vnode.h>
53 #include <sys/ioctl.h>
54 #include <sys/tty.h>
55 #include <sys/cdio.h>
56 #include <sys/poll.h>
57 #include <sys/filedesc.h>
58 #include <sys/specdev.h>
59 #include <sys/unistd.h>
60 
61 int vn_read(struct file *, off_t *, struct uio *, struct ucred *);
62 int vn_write(struct file *, off_t *, struct uio *, struct ucred *);
63 int vn_poll(struct file *, int, struct proc *);
64 int vn_kqfilter(struct file *, struct knote *);
65 int vn_closefile(struct file *, struct proc *);
66 
67 struct 	fileops vnops =
68 	{ vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter, vn_statfile,
69 	  vn_closefile };
70 
71 /*
72  * Common code for vnode open operations.
73  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
74  */
75 int
76 vn_open(struct nameidata *ndp, int fmode, int cmode)
77 {
78 	struct vnode *vp;
79 	struct proc *p = ndp->ni_cnd.cn_proc;
80 	struct ucred *cred = p->p_ucred;
81 	struct vattr va;
82 	struct cloneinfo *cip;
83 	int error;
84 
85 	if ((fmode & (FREAD|FWRITE)) == 0)
86 		return (EINVAL);
87 	if ((fmode & (O_TRUNC | FWRITE)) == O_TRUNC)
88 		return (EINVAL);
89 	if (fmode & O_CREAT) {
90 		ndp->ni_cnd.cn_nameiop = CREATE;
91 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
92 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
93 			ndp->ni_cnd.cn_flags |= FOLLOW;
94 		if ((error = namei(ndp)) != 0)
95 			return (error);
96 
97 		if (ndp->ni_vp == NULL) {
98 			VATTR_NULL(&va);
99 			va.va_type = VREG;
100 			va.va_mode = cmode;
101 			if (fmode & O_EXCL)
102 				va.va_vaflags |= VA_EXCLUSIVE;
103 			error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
104 					   &ndp->ni_cnd, &va);
105 			if (error)
106 				return (error);
107 			fmode &= ~O_TRUNC;
108 			vp = ndp->ni_vp;
109 		} else {
110 			VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
111 			if (ndp->ni_dvp == ndp->ni_vp)
112 				vrele(ndp->ni_dvp);
113 			else
114 				vput(ndp->ni_dvp);
115 			ndp->ni_dvp = NULL;
116 			vp = ndp->ni_vp;
117 			if (fmode & O_EXCL) {
118 				error = EEXIST;
119 				goto bad;
120 			}
121 			fmode &= ~O_CREAT;
122 		}
123 	} else {
124 		ndp->ni_cnd.cn_nameiop = LOOKUP;
125 		ndp->ni_cnd.cn_flags =
126 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
127 		if ((error = namei(ndp)) != 0)
128 			return (error);
129 		vp = ndp->ni_vp;
130 	}
131 	if (vp->v_type == VSOCK) {
132 		error = EOPNOTSUPP;
133 		goto bad;
134 	}
135 	if (vp->v_type == VLNK) {
136 		error = ELOOP;
137 		goto bad;
138 	}
139 	if ((fmode & O_DIRECTORY) && vp->v_type != VDIR) {
140 		error = ENOTDIR;
141 		goto bad;
142 	}
143 	if ((fmode & O_CREAT) == 0) {
144 		if (fmode & FREAD) {
145 			if ((error = VOP_ACCESS(vp, VREAD, cred, p)) != 0)
146 				goto bad;
147 		}
148 		if (fmode & FWRITE) {
149 			if (vp->v_type == VDIR) {
150 				error = EISDIR;
151 				goto bad;
152 			}
153 			if ((error = vn_writechk(vp)) != 0 ||
154 			    (error = VOP_ACCESS(vp, VWRITE, cred, p)) != 0)
155 				goto bad;
156 		}
157 	}
158 	if ((fmode & O_TRUNC) && vp->v_type == VREG) {
159 		VATTR_NULL(&va);
160 		va.va_size = 0;
161 		if ((error = VOP_SETATTR(vp, &va, cred, p)) != 0)
162 			goto bad;
163 	}
164 	if ((error = VOP_OPEN(vp, fmode, cred, p)) != 0)
165 		goto bad;
166 
167 	if (vp->v_flag & VCLONED) {
168 		cip = (struct cloneinfo *)vp->v_data;
169 
170 		vp->v_flag &= ~VCLONED;
171 
172 		ndp->ni_vp = cip->ci_vp;	/* return cloned vnode */
173 		vp->v_data = cip->ci_data;	/* restore v_data */
174 		VOP_UNLOCK(vp, p);		/* keep a reference */
175 		vp = ndp->ni_vp;		/* for the increment below */
176 
177 		free(cip, M_TEMP, sizeof(*cip));
178 	}
179 
180 	if (fmode & FWRITE)
181 		vp->v_writecount++;
182 	return (0);
183 bad:
184 	vput(vp);
185 	return (error);
186 }
187 
188 /*
189  * Check for write permissions on the specified vnode.
190  * Prototype text segments cannot be written.
191  */
192 int
193 vn_writechk(struct vnode *vp)
194 {
195 	/*
196 	 * Disallow write attempts on read-only file systems;
197 	 * unless the file is a socket or a block or character
198 	 * device resident on the file system.
199 	 */
200 	if (vp->v_mount->mnt_flag & MNT_RDONLY) {
201 		switch (vp->v_type) {
202 		case VREG:
203 		case VDIR:
204 		case VLNK:
205 			return (EROFS);
206 		case VNON:
207 		case VCHR:
208 		case VSOCK:
209 		case VFIFO:
210 		case VBAD:
211 		case VBLK:
212 			break;
213 		}
214 	}
215 	/*
216 	 * If there's shared text associated with
217 	 * the vnode, try to free it up once.  If
218 	 * we fail, we can't allow writing.
219 	 */
220 	if ((vp->v_flag & VTEXT) && !uvm_vnp_uncache(vp))
221 		return (ETXTBSY);
222 
223 	return (0);
224 }
225 
226 /*
227  * Check whether a write operation would exceed the file size rlimit
228  * for the process, if one should be applied for this operation.
229  * If a partial write should take place, the uio is adjusted and the
230  * amount by which the request would have exceeded the limit is returned
231  * via the 'overrun' argument.
232  */
233 int
234 vn_fsizechk(struct vnode *vp, struct uio *uio, int ioflag, ssize_t *overrun)
235 {
236 	struct proc *p = uio->uio_procp;
237 
238 	*overrun = 0;
239 	if (vp->v_type == VREG && p != NULL && !(ioflag & IO_NOLIMIT)) {
240 		rlim_t limit = p->p_rlimit[RLIMIT_FSIZE].rlim_cur;
241 
242 		/* if already at or over the limit, send the signal and fail */
243 		if (uio->uio_offset >= limit) {
244 			psignal(p, SIGXFSZ);
245 			return (EFBIG);
246 		}
247 
248 		/* otherwise, clamp the write to stay under the limit */
249 		if (uio->uio_resid > limit - uio->uio_offset) {
250 			*overrun = uio->uio_resid - (limit - uio->uio_offset);
251 			uio->uio_resid = limit - uio->uio_offset;
252 		}
253 	}
254 
255 	return (0);
256 }
257 
258 
259 /*
260  * Mark a vnode as being the text image of a running process.
261  */
262 void
263 vn_marktext(struct vnode *vp)
264 {
265 	vp->v_flag |= VTEXT;
266 }
267 
268 /*
269  * Vnode close call
270  */
271 int
272 vn_close(struct vnode *vp, int flags, struct ucred *cred, struct proc *p)
273 {
274 	int error;
275 
276 	if (flags & FWRITE)
277 		vp->v_writecount--;
278 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
279 	error = VOP_CLOSE(vp, flags, cred, p);
280 	vput(vp);
281 	return (error);
282 }
283 
284 /*
285  * Package up an I/O request on a vnode into a uio and do it.
286  */
287 int
288 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset,
289     enum uio_seg segflg, int ioflg, struct ucred *cred, size_t *aresid,
290     struct proc *p)
291 {
292 	struct uio auio;
293 	struct iovec aiov;
294 	int error;
295 
296 	auio.uio_iov = &aiov;
297 	auio.uio_iovcnt = 1;
298 	aiov.iov_base = base;
299 	aiov.iov_len = len;
300 	auio.uio_resid = len;
301 	auio.uio_offset = offset;
302 	auio.uio_segflg = segflg;
303 	auio.uio_rw = rw;
304 	auio.uio_procp = p;
305 
306 	if ((ioflg & IO_NODELOCKED) == 0)
307 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
308 	if (rw == UIO_READ) {
309 		error = VOP_READ(vp, &auio, ioflg, cred);
310 	} else {
311 		error = VOP_WRITE(vp, &auio, ioflg, cred);
312 	}
313 	if ((ioflg & IO_NODELOCKED) == 0)
314 		VOP_UNLOCK(vp, p);
315 
316 	if (aresid)
317 		*aresid = auio.uio_resid;
318 	else
319 		if (auio.uio_resid && error == 0)
320 			error = EIO;
321 	return (error);
322 }
323 
324 /*
325  * File table vnode read routine.
326  */
327 int
328 vn_read(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
329 {
330 	struct vnode *vp = fp->f_data;
331 	int error = 0;
332 	size_t count = uio->uio_resid;
333 	struct proc *p = uio->uio_procp;
334 
335 	/* no wrap around of offsets except on character devices */
336 	if (vp->v_type != VCHR && count > LLONG_MAX - *poff)
337 		return (EINVAL);
338 
339 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
340 	uio->uio_offset = *poff;
341 	if (vp->v_type != VDIR)
342 		error = VOP_READ(vp, uio,
343 		    (fp->f_flag & FNONBLOCK) ? IO_NDELAY : 0, cred);
344 	*poff += count - uio->uio_resid;
345 	VOP_UNLOCK(vp, p);
346 	return (error);
347 }
348 
349 /*
350  * File table vnode write routine.
351  */
352 int
353 vn_write(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
354 {
355 	struct vnode *vp = fp->f_data;
356 	struct proc *p = uio->uio_procp;
357 	int error, ioflag = IO_UNIT;
358 	size_t count;
359 
360 	/* note: pwrite/pwritev are unaffected by O_APPEND */
361 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND) &&
362 	    poff == &fp->f_offset)
363 		ioflag |= IO_APPEND;
364 	if (fp->f_flag & FNONBLOCK)
365 		ioflag |= IO_NDELAY;
366 	if ((fp->f_flag & FFSYNC) ||
367 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
368 		ioflag |= IO_SYNC;
369 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
370 	uio->uio_offset = *poff;
371 	count = uio->uio_resid;
372 	error = VOP_WRITE(vp, uio, ioflag, cred);
373 	if (ioflag & IO_APPEND)
374 		*poff = uio->uio_offset;
375 	else
376 		*poff += count - uio->uio_resid;
377 	VOP_UNLOCK(vp, p);
378 	return (error);
379 }
380 
381 /*
382  * File table wrapper for vn_stat
383  */
384 int
385 vn_statfile(struct file *fp, struct stat *sb, struct proc *p)
386 {
387 	struct vnode *vp = fp->f_data;
388 	return vn_stat(vp, sb, p);
389 }
390 
391 /*
392  * vnode stat routine.
393  */
394 int
395 vn_stat(struct vnode *vp, struct stat *sb, struct proc *p)
396 {
397 	struct vattr va;
398 	int error;
399 	mode_t mode;
400 
401 	error = VOP_GETATTR(vp, &va, p->p_ucred, p);
402 	if (error)
403 		return (error);
404 	/*
405 	 * Copy from vattr table
406 	 */
407 	memset(sb, 0, sizeof(*sb));
408 	sb->st_dev = va.va_fsid;
409 	sb->st_ino = va.va_fileid;
410 	mode = va.va_mode;
411 	switch (vp->v_type) {
412 	case VREG:
413 		mode |= S_IFREG;
414 		break;
415 	case VDIR:
416 		mode |= S_IFDIR;
417 		break;
418 	case VBLK:
419 		mode |= S_IFBLK;
420 		break;
421 	case VCHR:
422 		mode |= S_IFCHR;
423 		break;
424 	case VLNK:
425 		mode |= S_IFLNK;
426 		break;
427 	case VSOCK:
428 		mode |= S_IFSOCK;
429 		break;
430 	case VFIFO:
431 		mode |= S_IFIFO;
432 		break;
433 	default:
434 		return (EBADF);
435 	}
436 	sb->st_mode = mode;
437 	sb->st_nlink = va.va_nlink;
438 	sb->st_uid = va.va_uid;
439 	sb->st_gid = va.va_gid;
440 	sb->st_rdev = va.va_rdev;
441 	sb->st_size = va.va_size;
442 	sb->st_atim.tv_sec  = va.va_atime.tv_sec;
443 	sb->st_atim.tv_nsec = va.va_atime.tv_nsec;
444 	sb->st_mtim.tv_sec  = va.va_mtime.tv_sec;
445 	sb->st_mtim.tv_nsec = va.va_mtime.tv_nsec;
446 	sb->st_ctim.tv_sec  = va.va_ctime.tv_sec;
447 	sb->st_ctim.tv_nsec = va.va_ctime.tv_nsec;
448 	sb->st_blksize = va.va_blocksize;
449 	sb->st_flags = va.va_flags;
450 	sb->st_gen = va.va_gen;
451 	sb->st_blocks = va.va_bytes / S_BLKSIZE;
452 	return (0);
453 }
454 
455 /*
456  * File table vnode ioctl routine.
457  */
458 int
459 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
460 {
461 	struct vnode *vp = fp->f_data;
462 	struct vattr vattr;
463 	int error;
464 
465 	switch (vp->v_type) {
466 
467 	case VREG:
468 	case VDIR:
469 		if (com == FIONREAD) {
470 			error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
471 			if (error)
472 				return (error);
473 			*(int *)data = vattr.va_size - fp->f_offset;
474 			return (0);
475 		}
476 		if (com == FIONBIO || com == FIOASYNC)  /* XXX */
477 			return (0);			/* XXX */
478 		/* FALLTHROUGH */
479 	default:
480 		return (ENOTTY);
481 
482 	case VFIFO:
483 	case VCHR:
484 	case VBLK:
485 		error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
486 		if (error == 0 && com == TIOCSCTTY) {
487 			struct session *s = p->p_p->ps_session;
488 			struct vnode *ovp = s->s_ttyvp;
489 			s->s_ttyvp = vp;
490 			vref(vp);
491 			if (ovp)
492 				vrele(ovp);
493 		}
494 		return (error);
495 	}
496 }
497 
498 /*
499  * File table vnode poll routine.
500  */
501 int
502 vn_poll(struct file *fp, int events, struct proc *p)
503 {
504 	return (VOP_POLL(fp->f_data, fp->f_flag, events, p));
505 }
506 
507 /*
508  * Check that the vnode is still valid, and if so
509  * acquire requested lock.
510  */
511 int
512 vn_lock(struct vnode *vp, int flags, struct proc *p)
513 {
514 	int error;
515 
516 	do {
517 		if (vp->v_flag & VXLOCK) {
518 			vp->v_flag |= VXWANT;
519 			tsleep(vp, PINOD, "vn_lock", 0);
520 			error = ENOENT;
521 		} else {
522 			error = VOP_LOCK(vp, flags, p);
523 			if (error == 0)
524 				return (error);
525 		}
526 	} while (flags & LK_RETRY);
527 	return (error);
528 }
529 
530 /*
531  * File table vnode close routine.
532  */
533 int
534 vn_closefile(struct file *fp, struct proc *p)
535 {
536 	struct vnode *vp = fp->f_data;
537 	struct flock lf;
538 
539 	if ((fp->f_iflags & FIF_HASLOCK)) {
540 		lf.l_whence = SEEK_SET;
541 		lf.l_start = 0;
542 		lf.l_len = 0;
543 		lf.l_type = F_UNLCK;
544 		(void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
545 	}
546 
547 	return (vn_close(vp, fp->f_flag, fp->f_cred, p));
548 }
549 
550 int
551 vn_kqfilter(struct file *fp, struct knote *kn)
552 {
553 	return (VOP_KQFILTER(fp->f_data, kn));
554 }
555 
556 /*
557  * Common code for vnode access operations.
558  */
559 
560 /* Check if a directory can be found inside another in the hierarchy */
561 int
562 vn_isunder(struct vnode *lvp, struct vnode *rvp, struct proc *p)
563 {
564 	int error;
565 
566 	error = vfs_getcwd_common(lvp, rvp, NULL, NULL, MAXPATHLEN/2, 0, p);
567 
568 	if (!error)
569 		return (1);
570 
571 	return (0);
572 }
573