xref: /openbsd-src/sys/uvm/uvm_io.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: uvm_io.c,v 1.22 2014/07/11 16:35:40 jsg Exp $	*/
2 /*	$NetBSD: uvm_io.c,v 1.12 2000/06/27 17:29:23 mrg Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * from: Id: uvm_io.c,v 1.1.2.2 1997/12/30 12:02:00 mrg Exp
29  */
30 
31 /*
32  * uvm_io.c: uvm i/o ops
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/mman.h>
38 #include <sys/proc.h>
39 #include <sys/malloc.h>
40 #include <sys/uio.h>
41 
42 #include <uvm/uvm.h>
43 
44 /*
45  * functions
46  */
47 
48 /*
49  * uvm_io: perform I/O on a map
50  *
51  * => caller must have a reference to "map" so that it doesn't go away
52  *    while we are working.
53  */
54 
55 int
56 uvm_io(vm_map_t map, struct uio *uio, int flags)
57 {
58 	vaddr_t baseva, endva, pageoffset, kva;
59 	vsize_t chunksz, togo, sz;
60 	struct uvm_map_deadq dead_entries;
61 	int error, extractflags;
62 
63 	/*
64 	 * step 0: sanity checks and set up for copy loop.  start with a
65 	 * large chunk size.  if we have trouble finding vm space we will
66 	 * reduce it.
67 	 */
68 	if (uio->uio_resid == 0)
69 		return(0);
70 	togo = uio->uio_resid;
71 
72 	baseva = (vaddr_t) uio->uio_offset;
73 	endva = baseva + (togo - 1);
74 
75 	if (endva < baseva)   /* wrap around? */
76 		return(EIO);
77 
78 	if (baseva >= VM_MAXUSER_ADDRESS)
79 		return(0);
80 	if (endva >= VM_MAXUSER_ADDRESS)
81 		/* EOF truncate */
82 		togo = togo - (endva - VM_MAXUSER_ADDRESS + 1);
83 	pageoffset = baseva & PAGE_MASK;
84 	baseva = trunc_page(baseva);
85 	chunksz = min(round_page(togo + pageoffset), MAXBSIZE);
86 	error = 0;
87 
88 	extractflags = 0;
89 	if (flags & UVM_IO_FIXPROT)
90 		extractflags |= UVM_EXTRACT_FIXPROT;
91 
92 	/* step 1: main loop...  while we've got data to move */
93 	for (/*null*/; togo > 0 ; pageoffset = 0) {
94 		/* step 2: extract mappings from the map into kernel_map */
95 		error = uvm_map_extract(map, baseva, chunksz, &kva,
96 		    extractflags);
97 		if (error) {
98 
99 			/* retry with a smaller chunk... */
100 			if (error == ENOMEM && chunksz > PAGE_SIZE) {
101 				chunksz = trunc_page(chunksz / 2);
102 				if (chunksz < PAGE_SIZE)
103 					chunksz = PAGE_SIZE;
104 				continue;
105 			}
106 
107 			break;
108 		}
109 
110 		/* step 3: move a chunk of data */
111 		sz = chunksz - pageoffset;
112 		if (sz > togo)
113 			sz = togo;
114 		error = uiomove((caddr_t) (kva + pageoffset), sz, uio);
115 		togo -= sz;
116 		baseva += chunksz;
117 
118 		/* step 4: unmap the area of kernel memory */
119 		vm_map_lock(kernel_map);
120 		TAILQ_INIT(&dead_entries);
121 		uvm_unmap_remove(kernel_map, kva, kva+chunksz,
122 		    &dead_entries, FALSE, TRUE);
123 		vm_map_unlock(kernel_map);
124 		uvm_unmap_detach(&dead_entries, AMAP_REFALL);
125 
126 		/*
127 		 * We defer checking the error return from uiomove until
128 		 * here so that we won't leak memory.
129 		 */
130 		if (error)
131 			break;
132 	}
133 
134 	return (error);
135 }
136