xref: /openbsd-src/sys/uvm/uvm_unix.c (revision 5ad04d351680822078003e2b066cfc9680d6157d)
1 /*	$OpenBSD: uvm_unix.c,v 1.49 2014/04/13 23:14:15 tedu Exp $	*/
2 /*	$NetBSD: uvm_unix.c,v 1.18 2000/09/13 15:00:25 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * Copyright (c) 1991, 1993 The Regents of the University of California.
7  * Copyright (c) 1988 University of Utah.
8  *
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * the Systems Programming Group of the University of Utah Computer
13  * Science Department.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *      This product includes software developed by Charles D. Cranor,
26  *	Washington University, the University of California, Berkeley and
27  *	its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  * from: Utah $Hdr: vm_unix.c 1.1 89/11/07$
45  *      @(#)vm_unix.c   8.1 (Berkeley) 6/11/93
46  * from: Id: uvm_unix.c,v 1.1.2.2 1997/08/25 18:52:30 chuck Exp
47  */
48 
49 /*
50  * uvm_unix.c: traditional sbrk/grow interface to vm.
51  */
52 
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/resourcevar.h>
57 #include <sys/vnode.h>
58 #include <sys/core.h>
59 
60 #include <sys/mount.h>
61 #include <sys/syscallargs.h>
62 
63 #include <uvm/uvm.h>
64 
65 /*
66  * sys_obreak: set break
67  */
68 
69 int
70 sys_obreak(struct proc *p, void *v, register_t *retval)
71 {
72 	struct sys_obreak_args /* {
73 		syscallarg(char *) nsize;
74 	} */ *uap = v;
75 	struct vmspace *vm = p->p_vmspace;
76 	vaddr_t new, old, base;
77 	int error;
78 
79 	base = (vaddr_t)vm->vm_daddr;
80 	new = round_page((vaddr_t)SCARG(uap, nsize));
81 	if (new < base || (new - base) > p->p_rlimit[RLIMIT_DATA].rlim_cur)
82 		return (ENOMEM);
83 
84 	old = round_page(base + ptoa(vm->vm_dsize));
85 
86 	if (new == old)
87 		return (0);
88 
89 	/* grow or shrink? */
90 	if (new > old) {
91 		error = uvm_map(&vm->vm_map, &old, new - old, NULL,
92 		    UVM_UNKNOWN_OFFSET, 0,
93 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RWX, UVM_INH_COPY,
94 		    UVM_ADV_NORMAL, UVM_FLAG_AMAPPAD|UVM_FLAG_FIXED|
95 		    UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
96 		if (error) {
97 			uprintf("sbrk: grow %ld failed, error = %d\n",
98 			    new - old, error);
99 			return (ENOMEM);
100 		}
101 		vm->vm_dsize += atop(new - old);
102 	} else {
103 		uvm_deallocate(&vm->vm_map, new, old - new);
104 		vm->vm_dsize -= atop(old - new);
105 	}
106 
107 	return (0);
108 }
109 
110 /*
111  * uvm_grow: enlarge the "stack segment" to include sp.
112  */
113 void
114 uvm_grow(struct proc *p, vaddr_t sp)
115 {
116 	struct vmspace *vm = p->p_vmspace;
117 	int si;
118 
119 	/* For user defined stacks (from sendsig). */
120 	if (sp < (vaddr_t)vm->vm_maxsaddr)
121 		return;
122 
123 	/* For common case of already allocated (from trap). */
124 #ifdef MACHINE_STACK_GROWS_UP
125 	if (sp < USRSTACK + ptoa(vm->vm_ssize))
126 #else
127 	if (sp >= USRSTACK - ptoa(vm->vm_ssize))
128 #endif
129 		return;
130 
131 	/* Really need to check vs limit and increment stack size if ok. */
132 #ifdef MACHINE_STACK_GROWS_UP
133 	si = atop(sp - USRSTACK) - vm->vm_ssize + 1;
134 #else
135 	si = atop(USRSTACK - sp) - vm->vm_ssize;
136 #endif
137 	if (vm->vm_ssize + si <= atop(p->p_rlimit[RLIMIT_STACK].rlim_cur))
138 		vm->vm_ssize += si;
139 }
140 
141 #ifndef SMALL_KERNEL
142 
143 /*
144  * uvm_coredump: dump core!
145  */
146 
147 int
148 uvm_coredump(struct proc *p, struct vnode *vp, struct ucred *cred,
149     struct core *chdr)
150 {
151 	struct vmspace *vm = p->p_vmspace;
152 	vm_map_t map = &vm->vm_map;
153 	vm_map_entry_t entry, safe;
154 	vaddr_t start, end, top;
155 	struct coreseg cseg;
156 	off_t offset, coffset;
157 	int csize, chunk, flag, error = 0;
158 
159 	offset = chdr->c_hdrsize + chdr->c_seghdrsize + chdr->c_cpusize;
160 
161 	RB_FOREACH_SAFE(entry, uvm_map_addr, &map->addr, safe) {
162 		/* should never happen for a user process */
163 		if (UVM_ET_ISSUBMAP(entry)) {
164 			panic("uvm_coredump: user process with submap?");
165 		}
166 
167 		if (!(entry->protection & VM_PROT_WRITE) &&
168 		    entry->start != p->p_p->ps_sigcode)
169 			continue;
170 
171 		/* Don't dump mmaped devices. */
172 		if (entry->object.uvm_obj != NULL &&
173 		    UVM_OBJ_IS_DEVICE(entry->object.uvm_obj))
174 			continue;
175 
176 		start = entry->start;
177 		end = entry->end;
178 
179 		if (start >= VM_MAXUSER_ADDRESS)
180 			continue;
181 
182 		if (end > VM_MAXUSER_ADDRESS)
183 			end = VM_MAXUSER_ADDRESS;
184 
185 #ifdef MACHINE_STACK_GROWS_UP
186 		if (USRSTACK <= start && start < (USRSTACK + MAXSSIZ)) {
187 			top = round_page(USRSTACK + ptoa(vm->vm_ssize));
188 			if (end > top)
189 				end = top;
190 
191 			if (start >= end)
192 				continue;
193 #else
194 		if (start >= (vaddr_t)vm->vm_maxsaddr) {
195 			top = trunc_page(USRSTACK - ptoa(vm->vm_ssize));
196 			if (start < top)
197 				start = top;
198 
199 			if (start >= end)
200 				continue;
201 #endif
202 			flag = CORE_STACK;
203 		} else
204 			flag = CORE_DATA;
205 
206 		/* Set up a new core file segment. */
207 		CORE_SETMAGIC(cseg, CORESEGMAGIC, CORE_GETMID(*chdr), flag);
208 		cseg.c_addr = start;
209 		cseg.c_size = end - start;
210 
211 		error = vn_rdwr(UIO_WRITE, vp,
212 		    (caddr_t)&cseg, chdr->c_seghdrsize,
213 		    offset, UIO_SYSSPACE, IO_UNIT, cred, NULL, p);
214 		/*
215 		 * We might get an EFAULT on objects mapped beyond
216 		 * EOF. Ignore the error.
217 		 */
218 		if (error && error != EFAULT)
219 			break;
220 
221 		offset += chdr->c_seghdrsize;
222 
223 		coffset = 0;
224 		csize = (int)cseg.c_size;
225 		do {
226 			if (p->p_siglist & sigmask(SIGKILL))
227 				return (EINTR);
228 
229 			/* Rest of the loop sleeps with lock held, so... */
230 			yield();
231 
232 			chunk = MIN(csize, MAXPHYS);
233 			error = vn_rdwr(UIO_WRITE, vp,
234 			    (caddr_t)(u_long)cseg.c_addr + coffset,
235 			    chunk, offset + coffset, UIO_USERSPACE,
236 			    IO_UNIT, cred, NULL, p);
237 			if (error)
238 				return (error);
239 
240 			coffset += chunk;
241 			csize -= chunk;
242 		} while (csize > 0);
243 		offset += cseg.c_size;
244 
245 		/* Discard the memory */
246 		uvm_unmap(map, cseg.c_addr, cseg.c_addr + cseg.c_size);
247 
248 		chdr->c_nseg++;
249 	}
250 
251 	return (error);
252 }
253 
254 int
255 uvm_coredump_walkmap(struct proc *p, void *iocookie,
256     int (*func)(struct proc *, void *, struct uvm_coredump_state *),
257     void *cookie)
258 {
259 	struct uvm_coredump_state state;
260 	struct vmspace *vm = p->p_vmspace;
261 	struct vm_map *map = &vm->vm_map;
262 	struct vm_map_entry *entry;
263 	vaddr_t top;
264 	int error;
265 
266 	RB_FOREACH(entry, uvm_map_addr, &map->addr) {
267 		state.cookie = cookie;
268 		state.prot = entry->protection;
269 		state.flags = 0;
270 
271 		/* should never happen for a user process */
272 		if (UVM_ET_ISSUBMAP(entry)) {
273 			panic("uvm_coredump: user process with submap?");
274 		}
275 
276 		if (!(entry->protection & VM_PROT_WRITE) &&
277 		    entry->start != p->p_p->ps_sigcode)
278 			continue;
279 
280 		/* Don't dump mmaped devices. */
281 		if (entry->object.uvm_obj != NULL &&
282 		    UVM_OBJ_IS_DEVICE(entry->object.uvm_obj))
283 			continue;
284 
285 		state.start = entry->start;
286 		state.realend = entry->end;
287 		state.end = entry->end;
288 
289 		if (state.start >= VM_MAXUSER_ADDRESS)
290 			continue;
291 
292 		if (state.end > VM_MAXUSER_ADDRESS)
293 			state.end = VM_MAXUSER_ADDRESS;
294 
295 #ifdef MACHINE_STACK_GROWS_UP
296 		if (USRSTACK <= state.start &&
297 		    state.start < (USRSTACK + MAXSSIZ)) {
298 			top = round_page(USRSTACK + ptoa(vm->vm_ssize));
299 			if (state.end > top)
300 				state.end = top;
301 
302 			if (state.start >= state.end)
303 				continue;
304 #else
305 		if (state.start >= (vaddr_t)vm->vm_maxsaddr) {
306 			top = trunc_page(USRSTACK - ptoa(vm->vm_ssize));
307 			if (state.start < top)
308 				state.start = top;
309 
310 			if (state.start >= state.end)
311 				continue;
312 #endif
313 			state.flags |= UVM_COREDUMP_STACK;
314 		}
315 
316 		error = (*func)(p, iocookie, &state);
317 		if (error)
318 			return (error);
319 	}
320 
321 	return (0);
322 }
323 
324 #endif	/* !SMALL_KERNEL */
325