xref: /csrg-svn/sys/kern/vfs_vnops.c (revision 52230)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)vfs_vnops.c	7.35 (Berkeley) 01/22/92
8  */
9 
10 #include "param.h"
11 #include "systm.h"
12 #include "kernel.h"
13 #include "file.h"
14 #include "stat.h"
15 #include "buf.h"
16 #include "proc.h"
17 #include "mount.h"
18 #include "namei.h"
19 #include "vnode.h"
20 #include "ioctl.h"
21 #include "tty.h"
22 
23 struct 	fileops vnops =
24 	{ vn_read, vn_write, vn_ioctl, vn_select, vn_closefile };
25 
26 /*
27  * Common code for vnode open operations.
28  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
29  */
30 vn_open(ndp, p, fmode, cmode)
31 	register struct nameidata *ndp;
32 	struct proc *p;
33 	int fmode, cmode;
34 {
35 	register struct vnode *vp;
36 	register struct ucred *cred = p->p_ucred;
37 	struct vattr vat;
38 	struct vattr *vap = &vat;
39 	int error;
40 
41 	if (fmode & O_CREAT) {
42 		ndp->ni_nameiop = CREATE | LOCKPARENT | LOCKLEAF;
43 		if ((fmode & O_EXCL) == 0)
44 			ndp->ni_nameiop |= FOLLOW;
45 		if (error = namei(ndp, p))
46 			return (error);
47 		if (ndp->ni_vp == NULL) {
48 			VATTR_NULL(vap);
49 			vap->va_type = VREG;
50 			vap->va_mode = cmode;
51 			LEASE_CHECK(ndp->ni_dvp, p, cred, LEASE_WRITE);
52 			if (error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, &ndp->ni_cnd, vap))
53 				return (error);
54 			fmode &= ~O_TRUNC;
55 			vp = ndp->ni_vp;
56 		} else {
57 			VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
58 			if (ndp->ni_dvp == ndp->ni_vp)
59 				vrele(ndp->ni_dvp);
60 			else
61 				vput(ndp->ni_dvp);
62 			ndp->ni_dvp = NULL;
63 			vp = ndp->ni_vp;
64 			if (fmode & O_EXCL) {
65 				error = EEXIST;
66 				goto bad;
67 			}
68 			fmode &= ~O_CREAT;
69 		}
70 	} else {
71 		ndp->ni_nameiop = LOOKUP | FOLLOW | LOCKLEAF;
72 		if (error = namei(ndp, p))
73 			return (error);
74 		vp = ndp->ni_vp;
75 	}
76 	if (vp->v_type == VSOCK) {
77 		error = EOPNOTSUPP;
78 		goto bad;
79 	}
80 	if ((fmode & O_CREAT) == 0) {
81 		if (fmode & FREAD) {
82 			if (error = VOP_ACCESS(vp, VREAD, cred, p))
83 				goto bad;
84 		}
85 		if (fmode & (FWRITE | O_TRUNC)) {
86 			if (vp->v_type == VDIR) {
87 				error = EISDIR;
88 				goto bad;
89 			}
90 			if ((error = vn_writechk(vp)) ||
91 			    (error = VOP_ACCESS(vp, VWRITE, cred, p)))
92 				goto bad;
93 		}
94 	}
95 	if (fmode & O_TRUNC) {
96 		VATTR_NULL(vap);
97 		vap->va_size = 0;
98 		LEASE_CHECK(vp, p, cred, LEASE_WRITE);
99 		if (error = VOP_SETATTR(vp, vap, cred, p))
100 			goto bad;
101 	}
102 	if (error = VOP_OPEN(vp, fmode, cred, p))
103 		goto bad;
104 	if (fmode & FWRITE)
105 		vp->v_writecount++;
106 	return (0);
107 bad:
108 	vput(vp);
109 	return (error);
110 }
111 
112 /*
113  * Check for write permissions on the specified vnode.
114  * The read-only status of the file system is checked.
115  * Also, prototype text segments cannot be written.
116  */
117 vn_writechk(vp)
118 	register struct vnode *vp;
119 {
120 
121 	/*
122 	 * Disallow write attempts on read-only file systems;
123 	 * unless the file is a socket or a block or character
124 	 * device resident on the file system.
125 	 */
126 	if (vp->v_mount->mnt_flag & MNT_RDONLY) {
127 		switch (vp->v_type) {
128 		case VREG: case VDIR: case VLNK:
129 			return (EROFS);
130 		}
131 	}
132 	/*
133 	 * If there's shared text associated with
134 	 * the vnode, try to free it up once.  If
135 	 * we fail, we can't allow writing.
136 	 */
137 	if ((vp->v_flag & VTEXT) && !vnode_pager_uncache(vp))
138 		return (ETXTBSY);
139 	return (0);
140 }
141 
142 /*
143  * Vnode close call
144  */
145 vn_close(vp, flags, cred, p)
146 	register struct vnode *vp;
147 	int flags;
148 	struct ucred *cred;
149 	struct proc *p;
150 {
151 	int error;
152 
153 	if (flags & FWRITE)
154 		vp->v_writecount--;
155 	error = VOP_CLOSE(vp, flags, cred, p);
156 	vrele(vp);
157 	return (error);
158 }
159 
160 /*
161  * Package up an I/O request on a vnode into a uio and do it.
162  */
163 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
164 	enum uio_rw rw;
165 	struct vnode *vp;
166 	caddr_t base;
167 	int len;
168 	off_t offset;
169 	enum uio_seg segflg;
170 	int ioflg;
171 	struct ucred *cred;
172 	int *aresid;
173 	struct proc *p;
174 {
175 	struct uio auio;
176 	struct iovec aiov;
177 	int error;
178 
179 	if ((ioflg & IO_NODELOCKED) == 0)
180 		VOP_LOCK(vp);
181 	auio.uio_iov = &aiov;
182 	auio.uio_iovcnt = 1;
183 	aiov.iov_base = base;
184 	aiov.iov_len = len;
185 	auio.uio_resid = len;
186 	auio.uio_offset = offset;
187 	auio.uio_segflg = segflg;
188 	auio.uio_rw = rw;
189 	auio.uio_procp = p;
190 	if (rw == UIO_READ) {
191 		LEASE_CHECK(vp, p, cred, LEASE_READ);
192 		error = VOP_READ(vp, &auio, ioflg, cred);
193 	} else {
194 		LEASE_CHECK(vp, p, cred, LEASE_WRITE);
195 		error = VOP_WRITE(vp, &auio, ioflg, cred);
196 	}
197 	if (aresid)
198 		*aresid = auio.uio_resid;
199 	else
200 		if (auio.uio_resid && error == 0)
201 			error = EIO;
202 	if ((ioflg & IO_NODELOCKED) == 0)
203 		VOP_UNLOCK(vp);
204 	return (error);
205 }
206 
207 /*
208  * File table vnode read routine.
209  */
210 vn_read(fp, uio, cred)
211 	struct file *fp;
212 	struct uio *uio;
213 	struct ucred *cred;
214 {
215 	register struct vnode *vp = (struct vnode *)fp->f_data;
216 	int count, error;
217 
218 	VOP_LOCK(vp);
219 	uio->uio_offset = fp->f_offset;
220 	count = uio->uio_resid;
221 	LEASE_CHECK(vp, uio->uio_procp, cred, LEASE_READ);
222 	error = VOP_READ(vp, uio, (fp->f_flag & FNONBLOCK) ? IO_NDELAY : 0,
223 		cred);
224 	fp->f_offset += count - uio->uio_resid;
225 	VOP_UNLOCK(vp);
226 	return (error);
227 }
228 
229 /*
230  * File table vnode write routine.
231  */
232 vn_write(fp, uio, cred)
233 	struct file *fp;
234 	struct uio *uio;
235 	struct ucred *cred;
236 {
237 	register struct vnode *vp = (struct vnode *)fp->f_data;
238 	int count, error, ioflag = 0;
239 
240 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
241 		ioflag |= IO_APPEND;
242 	if (fp->f_flag & FNONBLOCK)
243 		ioflag |= IO_NDELAY;
244 	VOP_LOCK(vp);
245 	uio->uio_offset = fp->f_offset;
246 	count = uio->uio_resid;
247 	LEASE_CHECK(vp, uio->uio_procp, cred, LEASE_WRITE);
248 	error = VOP_WRITE(vp, uio, ioflag, cred);
249 	if (ioflag & IO_APPEND)
250 		fp->f_offset = uio->uio_offset;
251 	else
252 		fp->f_offset += count - uio->uio_resid;
253 	VOP_UNLOCK(vp);
254 	return (error);
255 }
256 
257 /*
258  * File table vnode stat routine.
259  */
260 vn_stat(vp, sb, p)
261 	struct vnode *vp;
262 	register struct stat *sb;
263 	struct proc *p;
264 {
265 	struct vattr vattr;
266 	register struct vattr *vap;
267 	int error;
268 	u_short mode;
269 
270 	vap = &vattr;
271 	error = VOP_GETATTR(vp, vap, p->p_ucred, p);
272 	if (error)
273 		return (error);
274 	/*
275 	 * Copy from vattr table
276 	 */
277 	sb->st_dev = vap->va_fsid;
278 	sb->st_ino = vap->va_fileid;
279 	mode = vap->va_mode;
280 	switch (vp->v_type) {
281 	case VREG:
282 		mode |= S_IFREG;
283 		break;
284 	case VDIR:
285 		mode |= S_IFDIR;
286 		break;
287 	case VBLK:
288 		mode |= S_IFBLK;
289 		break;
290 	case VCHR:
291 		mode |= S_IFCHR;
292 		break;
293 	case VLNK:
294 		mode |= S_IFLNK;
295 		break;
296 	case VSOCK:
297 		mode |= S_IFSOCK;
298 		break;
299 	case VFIFO:
300 		mode |= S_IFIFO;
301 		break;
302 	default:
303 		return (EBADF);
304 	};
305 	sb->st_mode = mode;
306 	sb->st_nlink = vap->va_nlink;
307 	sb->st_uid = vap->va_uid;
308 	sb->st_gid = vap->va_gid;
309 	sb->st_rdev = vap->va_rdev;
310 	sb->st_size = vap->va_size;
311 	sb->st_atime = vap->va_atime.tv_sec;
312 	sb->st_spare1 = 0;
313 	sb->st_mtime = vap->va_mtime.tv_sec;
314 	sb->st_spare2 = 0;
315 	sb->st_ctime = vap->va_ctime.tv_sec;
316 	sb->st_spare3 = 0;
317 	sb->st_blksize = vap->va_blocksize;
318 	sb->st_flags = vap->va_flags;
319 	sb->st_gen = vap->va_gen;
320 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
321 	return (0);
322 }
323 
324 /*
325  * File table vnode ioctl routine.
326  */
327 vn_ioctl(fp, com, data, p)
328 	struct file *fp;
329 	int com;
330 	caddr_t data;
331 	struct proc *p;
332 {
333 	register struct vnode *vp = ((struct vnode *)fp->f_data);
334 	struct vattr vattr;
335 	int error;
336 
337 	switch (vp->v_type) {
338 
339 	case VREG:
340 	case VDIR:
341 		if (com == FIONREAD) {
342 			if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p))
343 				return (error);
344 			*(off_t *)data = vattr.va_size - fp->f_offset;
345 			return (0);
346 		}
347 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
348 			return (0);			/* XXX */
349 		/* fall into ... */
350 
351 	default:
352 		return (ENOTTY);
353 
354 	case VFIFO:
355 	case VCHR:
356 	case VBLK:
357 		error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
358 		if (error == 0 && com == TIOCSCTTY) {
359 			p->p_session->s_ttyvp = vp;
360 			VREF(vp);
361 		}
362 		return (error);
363 	}
364 }
365 
366 /*
367  * File table vnode select routine.
368  */
369 vn_select(fp, which, p)
370 	struct file *fp;
371 	int which;
372 	struct proc *p;
373 {
374 
375 	return (VOP_SELECT(((struct vnode *)fp->f_data), which, fp->f_flag,
376 		fp->f_cred, p));
377 }
378 
379 /*
380  * File table vnode close routine.
381  */
382 vn_closefile(fp, p)
383 	struct file *fp;
384 	struct proc *p;
385 {
386 
387 	return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
388 		fp->f_cred, p));
389 }
390 
391 /*
392  * vn_fhtovp() - convert a fh to a vnode ptr (optionally locked)
393  * 	- look up fsid in mount list (if not found ret error)
394  *	- get vp by calling VFS_FHTOVP() macro
395  *	- if lockflag lock it with VOP_LOCK()
396  */
397 vn_fhtovp(fhp, lockflag, vpp)
398 	fhandle_t *fhp;
399 	int lockflag;
400 	struct vnode **vpp;
401 {
402 	register struct mount *mp;
403 
404 	if ((mp = getvfs(&fhp->fh_fsid)) == NULL)
405 		return (ESTALE);
406 	if (VFS_FHTOVP(mp, &fhp->fh_fid, 0, vpp))
407 		return (ESTALE);
408 	if (!lockflag)
409 		VOP_UNLOCK(*vpp);
410 	return (0);
411 }
412