xref: /dflybsd-src/sys/kern/kern_fp.c (revision 48d201a5a8c1dab4aa7166b0812594c101fc43c3)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/kern_fp.c,v 1.7 2004/07/29 20:32:59 dillon Exp $
35  */
36 
37 /*
38  * Direct file pointer API functions for in-kernel operations on files.  These
39  * functions provide a open/read/write/close like interface within the kernel
40  * for operating on files that are not necessarily associated with processes
41  * and which do not (typically) have descriptors.
42  *
43  * FUTURE: file handle conversion routines to support checkpointing,
44  * and additional file operations (ioctl, fcntl).
45  */
46 
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
50 #include <sys/malloc.h>
51 #include <sys/sysproto.h>
52 #include <sys/conf.h>
53 #include <sys/filedesc.h>
54 #include <sys/sysctl.h>
55 #include <sys/vnode.h>
56 #include <sys/proc.h>
57 #include <sys/namei.h>
58 #include <sys/file.h>
59 #include <sys/stat.h>
60 #include <sys/filio.h>
61 #include <sys/fcntl.h>
62 #include <sys/unistd.h>
63 #include <sys/resourcevar.h>
64 #include <sys/event.h>
65 #include <sys/mman.h>
66 
67 #include <vm/vm.h>
68 #include <vm/vm_param.h>
69 #include <sys/lock.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_pager.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_extern.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_kern.h>
79 
80 #include <sys/file2.h>
81 #include <machine/limits.h>
82 
83 typedef struct file *file_t;
84 
85 /*
86  * fp_open:
87  *
88  *	Open a file as specified.  Use O_* flags for flags.
89  *
90  *	NOTE! O_ROOTCRED not quite working yet, vn_open() asserts that the
91  *	cred must match the process's cred. XXX
92  *
93  *	NOTE! when fp_open() is called from a pure thread, root creds are
94  *	used.
95  */
96 int
97 fp_open(const char *path, int flags, int mode, file_t *fpp)
98 {
99     struct nameidata nd;
100     struct thread *td;
101     struct file *fp;
102     int error;
103 
104     if ((error = falloc(NULL, fpp, NULL)) != 0)
105 	return (error);
106     fp = *fpp;
107     td = curthread;
108     if (td->td_proc) {
109 	if ((flags & O_ROOTCRED) == 0 && td->td_proc)
110 	    fsetcred(fp, td->td_proc->p_ucred);
111 	NDINIT(&nd, NAMEI_LOOKUP, 0, UIO_SYSSPACE, path, td);
112     } else {
113 	NDINIT2(&nd, NAMEI_LOOKUP, 0, UIO_SYSSPACE, path, td, proc0.p_ucred);
114     }
115     flags = FFLAGS(flags);
116     if ((error = vn_open(&nd, flags, mode)) == 0) {
117 	NDFREE(&nd, NDF_ONLY_PNBUF);
118 	fp->f_data = (caddr_t)nd.ni_vp;
119 	fp->f_flag = flags;
120 	fp->f_ops = &vnops;
121 	fp->f_type = DTYPE_VNODE;
122 	VOP_UNLOCK(nd.ni_vp, NULL, 0, td);
123     } else {
124 	fdrop(fp, td);
125 	*fpp = NULL;
126     }
127     return(error);
128 }
129 
130 
131 /*
132  * fp_vpopen():	open a file pointer given a vnode.  The vnode must be locked.
133  * The vnode will be returned unlocked whether an error occurs or not.
134  */
135 int
136 fp_vpopen(struct vnode *vp, int flags, file_t *fpp)
137 {
138     struct thread *td;
139     struct file *fp;
140     int vmode;
141     int error;
142 
143     *fpp = NULL;
144     td = curthread;
145 
146     /*
147      * Vnode checks (from vn_open())
148      */
149     if (vp->v_type == VLNK) {
150 	error = EMLINK;
151 	goto done;
152     }
153     if (vp->v_type == VSOCK) {
154 	error = EOPNOTSUPP;
155 	goto done;
156     }
157     flags = FFLAGS(flags);
158     vmode = 0;
159     if (flags & (FWRITE | O_TRUNC)) {
160 	if (vp->v_type == VDIR) {
161 	    error = EISDIR;
162 	    goto done;
163 	}
164 	error = vn_writechk(vp);
165 	if (error)
166 	    goto done;
167 	vmode |= VWRITE;
168     }
169     if (flags & FREAD)
170 	vmode |= VREAD;
171     if (vmode) {
172 	error = VOP_ACCESS(vp, vmode, td->td_proc->p_ucred, td);
173 	if (error)
174 	    goto done;
175     }
176     error = VOP_OPEN(vp, flags, td->td_proc->p_ucred, td);
177     if (error)
178 	goto done;
179     /*
180      * Make sure that a VM object is created for VMIO support.
181      */
182     if (vn_canvmio(vp) == TRUE) {
183 	if ((error = vfs_object_create(vp, td)) != 0)
184 	    goto done;
185     }
186 
187     /*
188      * File pointer setup
189      */
190     if ((error = falloc(NULL, fpp, NULL)) != 0)
191 	goto done;
192     fp = *fpp;
193     if ((flags & O_ROOTCRED) == 0 && td->td_proc)
194 	fsetcred(fp, td->td_proc->p_ucred);
195     fp->f_data = (caddr_t)vp;
196     fp->f_flag = flags;
197     fp->f_ops = &vnops;
198     fp->f_type = DTYPE_VNODE;
199 
200     /*
201      * All done, set return value and update v_writecount now that no more
202      * errors can occur.
203      */
204     *fpp = fp;
205     if (flags & FWRITE)
206 	vp->v_writecount++;
207 done:
208     VOP_UNLOCK(vp, NULL, 0, td);
209     return (error);
210 }
211 
212 /*
213  * fp_*read() is meant to operate like the normal descriptor based syscalls
214  * would.  Note that if 'buf' points to user memory a UIO_USERSPACE
215  * transfer will be used.
216  */
217 int
218 fp_pread(file_t fp, void *buf, size_t nbytes, off_t offset, ssize_t *res)
219 {
220     struct uio auio;
221     struct iovec aiov;
222     size_t count;
223     int error;
224 
225     if (res)
226 	*res = 0;
227     if (nbytes > INT_MAX)
228 	return (EINVAL);
229     bzero(&auio, sizeof(auio));
230     aiov.iov_base = (caddr_t)buf;
231     aiov.iov_len = nbytes;
232     auio.uio_iov = &aiov;
233     auio.uio_iovcnt = 1;
234     auio.uio_offset = offset;
235     auio.uio_resid = nbytes;
236     auio.uio_rw = UIO_READ;
237     if ((vm_offset_t)buf < VM_MAXUSER_ADDRESS)
238 	auio.uio_segflg = UIO_USERSPACE;
239     else
240 	auio.uio_segflg = UIO_SYSSPACE;
241     auio.uio_td = curthread;
242 
243     count = nbytes;
244     error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, auio.uio_td);
245     if (error) {
246 	if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
247 	    error == EWOULDBLOCK)
248 	) {
249 	    error = 0;
250 	}
251     }
252     count -= auio.uio_resid;
253     if (res)
254 	*res = count;
255     return(error);
256 }
257 
258 int
259 fp_read(file_t fp, void *buf, size_t nbytes, ssize_t *res)
260 {
261     struct uio auio;
262     struct iovec aiov;
263     size_t count;
264     int error;
265 
266     if (res)
267 	*res = 0;
268     if (nbytes > INT_MAX)
269 	return (EINVAL);
270     bzero(&auio, sizeof(auio));
271     aiov.iov_base = (caddr_t)buf;
272     aiov.iov_len = nbytes;
273     auio.uio_iov = &aiov;
274     auio.uio_iovcnt = 1;
275     auio.uio_offset = 0;
276     auio.uio_resid = nbytes;
277     auio.uio_rw = UIO_READ;
278     if ((vm_offset_t)buf < VM_MAXUSER_ADDRESS)
279 	auio.uio_segflg = UIO_USERSPACE;
280     else
281 	auio.uio_segflg = UIO_SYSSPACE;
282     auio.uio_td = curthread;
283 
284     count = nbytes;
285     error = fo_read(fp, &auio, fp->f_cred, 0, auio.uio_td);
286     if (error) {
287 	if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
288 	    error == EWOULDBLOCK)
289 	) {
290 	    error = 0;
291 	}
292     }
293     count -= auio.uio_resid;
294     if (res)
295 	*res = count;
296     return(error);
297 }
298 
299 int
300 fp_pwrite(file_t fp, void *buf, size_t nbytes, off_t offset, ssize_t *res)
301 {
302     struct uio auio;
303     struct iovec aiov;
304     size_t count;
305     int error;
306 
307     if (res)
308 	*res = 0;
309     if (nbytes > INT_MAX)
310 	return (EINVAL);
311     bzero(&auio, sizeof(auio));
312     aiov.iov_base = (caddr_t)buf;
313     aiov.iov_len = nbytes;
314     auio.uio_iov = &aiov;
315     auio.uio_iovcnt = 1;
316     auio.uio_offset = offset;
317     auio.uio_resid = nbytes;
318     auio.uio_rw = UIO_WRITE;
319     if ((vm_offset_t)buf < VM_MAXUSER_ADDRESS)
320 	auio.uio_segflg = UIO_USERSPACE;
321     else
322 	auio.uio_segflg = UIO_SYSSPACE;
323     auio.uio_td = curthread;
324 
325     count = nbytes;
326     error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, auio.uio_td);
327     if (error) {
328 	if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
329 	    error == EWOULDBLOCK)
330 	) {
331 	    error = 0;
332 	}
333     }
334     count -= auio.uio_resid;
335     if (res)
336 	*res = count;
337     return(error);
338 }
339 
340 
341 int
342 fp_write(file_t fp, void *buf, size_t nbytes, ssize_t *res)
343 {
344     struct uio auio;
345     struct iovec aiov;
346     size_t count;
347     int error;
348 
349     if (res)
350 	*res = 0;
351     if (nbytes > INT_MAX)
352 	return (EINVAL);
353     bzero(&auio, sizeof(auio));
354     aiov.iov_base = (caddr_t)buf;
355     aiov.iov_len = nbytes;
356     auio.uio_iov = &aiov;
357     auio.uio_iovcnt = 1;
358     auio.uio_offset = 0;
359     auio.uio_resid = nbytes;
360     auio.uio_rw = UIO_WRITE;
361     if ((vm_offset_t)buf < VM_MAXUSER_ADDRESS)
362 	auio.uio_segflg = UIO_USERSPACE;
363     else
364 	auio.uio_segflg = UIO_SYSSPACE;
365     auio.uio_td = curthread;
366 
367     count = nbytes;
368     error = fo_write(fp, &auio, fp->f_cred, 0, auio.uio_td);
369     if (error) {
370 	if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
371 	    error == EWOULDBLOCK)
372 	) {
373 	    error = 0;
374 	}
375     }
376     count -= auio.uio_resid;
377     if (res)
378 	*res = count;
379     return(error);
380 }
381 
382 int
383 fp_stat(file_t fp, struct stat *ub)
384 {
385     int error;
386 
387     error = fo_stat(fp, ub, curthread);
388     return(error);
389 }
390 
391 /*
392  * non-anonymous, non-stack descriptor mappings only!
393  *
394  * This routine mostly snarfed from vm/vm_mmap.c
395  */
396 int
397 fp_mmap(void *addr_arg, size_t size, int prot, int flags, struct file *fp,
398     off_t pos, void **resp)
399 {
400     struct thread *td = curthread;
401     struct proc *p = td->td_proc;
402     vm_size_t pageoff;
403     vm_prot_t maxprot;
404     vm_offset_t addr;
405     void *handle;
406     int error;
407     vm_object_t obj;
408     struct vmspace *vms = p->p_vmspace;
409     struct vnode *vp;
410     int disablexworkaround;
411 
412     prot &= VM_PROT_ALL;
413 
414     if ((ssize_t)size < 0 || (flags & MAP_ANON))
415 	return(EINVAL);
416 
417     pageoff = (pos & PAGE_MASK);
418     pos -= pageoff;
419 
420     /* Adjust size for rounding (on both ends). */
421     size += pageoff;				/* low end... */
422     size = (vm_size_t)round_page(size);		/* hi end */
423     addr = (vm_offset_t)addr_arg;
424 
425     /*
426      * Check for illegal addresses.  Watch out for address wrap... Note
427      * that VM_*_ADDRESS are not constants due to casts (argh).
428      */
429     if (flags & MAP_FIXED) {
430 	/*
431 	 * The specified address must have the same remainder
432 	 * as the file offset taken modulo PAGE_SIZE, so it
433 	 * should be aligned after adjustment by pageoff.
434 	 */
435 	addr -= pageoff;
436 	if (addr & PAGE_MASK)
437 	    return (EINVAL);
438 	/* Address range must be all in user VM space. */
439 	if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
440 	    return (EINVAL);
441 #ifndef i386
442 	if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
443 	    return (EINVAL);
444 #endif
445 	if (addr + size < addr)
446 	    return (EINVAL);
447     } else if (addr == 0 ||
448 	(addr >= round_page((vm_offset_t)vms->vm_taddr) &&
449 	 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))
450     ) {
451 	/*
452 	 * XXX for non-fixed mappings where no hint is provided or
453 	 * the hint would fall in the potential heap space,
454 	 * place it after the end of the largest possible heap.
455 	 *
456 	 * There should really be a pmap call to determine a reasonable
457 	 * location.
458 	 */
459 	addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
460     }
461 
462     /*
463      * Mapping file, get fp for validation. Obtain vnode and make
464      * sure it is of appropriate type.
465      */
466     if (fp->f_type != DTYPE_VNODE)
467 	return (EINVAL);
468 
469     /*
470      * POSIX shared-memory objects are defined to have
471      * kernel persistence, and are not defined to support
472      * read(2)/write(2) -- or even open(2).  Thus, we can
473      * use MAP_ASYNC to trade on-disk coherence for speed.
474      * The shm_open(3) library routine turns on the FPOSIXSHM
475      * flag to request this behavior.
476      */
477     if (fp->f_flag & FPOSIXSHM)
478 	flags |= MAP_NOSYNC;
479     vp = (struct vnode *) fp->f_data;
480     if (vp->v_type != VREG && vp->v_type != VCHR)
481 	return (EINVAL);
482 
483     /*
484      * Get the proper underlying object
485      */
486     if (vp->v_type == VREG) {
487 	if (VOP_GETVOBJECT(vp, &obj) != 0)
488 	    return (EINVAL);
489 	vp = (struct vnode*)obj->handle;
490     }
491 
492     /*
493      * XXX hack to handle use of /dev/zero to map anon memory (ala
494      * SunOS).
495      */
496     if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
497 	handle = NULL;
498 	maxprot = VM_PROT_ALL;
499 	flags |= MAP_ANON;
500 	pos = 0;
501     } else {
502 	/*
503 	 * cdevs does not provide private mappings of any kind.
504 	 */
505 	/*
506 	 * However, for XIG X server to continue to work,
507 	 * we should allow the superuser to do it anyway.
508 	 * We only allow it at securelevel < 1.
509 	 * (Because the XIG X server writes directly to video
510 	 * memory via /dev/mem, it should never work at any
511 	 * other securelevel.
512 	 * XXX this will have to go
513 	 */
514 	if (securelevel >= 1)
515 	    disablexworkaround = 1;
516 	else
517 	    disablexworkaround = suser(td);
518 	if (vp->v_type == VCHR && disablexworkaround &&
519 	    (flags & (MAP_PRIVATE|MAP_COPY))) {
520 		error = EINVAL;
521 		goto done;
522 	}
523 	/*
524 	 * Ensure that file and memory protections are
525 	 * compatible.  Note that we only worry about
526 	 * writability if mapping is shared; in this case,
527 	 * current and max prot are dictated by the open file.
528 	 * XXX use the vnode instead?  Problem is: what
529 	 * credentials do we use for determination? What if
530 	 * proc does a setuid?
531 	 */
532 	maxprot = VM_PROT_EXECUTE;	/* ??? */
533 	if (fp->f_flag & FREAD) {
534 	    maxprot |= VM_PROT_READ;
535 	} else if (prot & PROT_READ) {
536 	    error = EACCES;
537 	    goto done;
538 	}
539 	/*
540 	 * If we are sharing potential changes (either via
541 	 * MAP_SHARED or via the implicit sharing of character
542 	 * device mappings), and we are trying to get write
543 	 * permission although we opened it without asking
544 	 * for it, bail out.  Check for superuser, only if
545 	 * we're at securelevel < 1, to allow the XIG X server
546 	 * to continue to work.
547 	 */
548 
549 	if ((flags & MAP_SHARED) != 0 ||
550 	    (vp->v_type == VCHR && disablexworkaround)
551 	) {
552 	    if ((fp->f_flag & FWRITE) != 0) {
553 		struct vattr va;
554 		if ((error = VOP_GETATTR(vp, &va, td))) {
555 		    goto done;
556 		}
557 		if ((va.va_flags & (IMMUTABLE|APPEND)) == 0) {
558 		    maxprot |= VM_PROT_WRITE;
559 		} else if (prot & PROT_WRITE) {
560 		    error = EPERM;
561 		    goto done;
562 		}
563 	    } else if ((prot & PROT_WRITE) != 0) {
564 		error = EACCES;
565 		goto done;
566 	    }
567 	} else {
568 	    maxprot |= VM_PROT_WRITE;
569 	}
570 	handle = (void *)vp;
571     }
572     error = vm_mmap(&vms->vm_map, &addr, size, prot,
573 		    maxprot, flags, handle, pos);
574     if (error == 0 && addr_arg)
575 	*resp = (void *)addr;
576 done:
577     return (error);
578 }
579 
580 int
581 fp_close(file_t fp)
582 {
583     return(fdrop(fp, curthread));
584 }
585 
586