xref: /openbsd-src/sys/uvm/uvm_unix.c (revision ae3cb403620ab940fbaabb3055fac045a63d56b7)
1 /*	$OpenBSD: uvm_unix.c,v 1.64 2017/03/09 20:27:41 guenther Exp $	*/
2 /*	$NetBSD: uvm_unix.c,v 1.18 2000/09/13 15:00:25 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * Copyright (c) 1991, 1993 The Regents of the University of California.
7  * Copyright (c) 1988 University of Utah.
8  *
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * the Systems Programming Group of the University of Utah Computer
13  * Science Department.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * from: Utah $Hdr: vm_unix.c 1.1 89/11/07$
40  *      @(#)vm_unix.c   8.1 (Berkeley) 6/11/93
41  * from: Id: uvm_unix.c,v 1.1.2.2 1997/08/25 18:52:30 chuck Exp
42  */
43 
44 /*
45  * uvm_unix.c: traditional sbrk/grow interface to vm.
46  */
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/resourcevar.h>
52 #include <sys/vnode.h>
53 
54 #include <sys/mount.h>
55 #include <sys/syscallargs.h>
56 
57 #include <uvm/uvm.h>
58 
59 /*
60  * sys_obreak: set break
61  */
62 
63 int
64 sys_obreak(struct proc *p, void *v, register_t *retval)
65 {
66 	struct sys_obreak_args /* {
67 		syscallarg(char *) nsize;
68 	} */ *uap = v;
69 	struct vmspace *vm = p->p_vmspace;
70 	vaddr_t new, old, base;
71 	int error;
72 
73 	base = (vaddr_t)vm->vm_daddr;
74 	new = round_page((vaddr_t)SCARG(uap, nsize));
75 	if (new < base || (new - base) > p->p_rlimit[RLIMIT_DATA].rlim_cur)
76 		return (ENOMEM);
77 
78 	old = round_page(base + ptoa(vm->vm_dsize));
79 
80 	if (new == old)
81 		return (0);
82 
83 	/* grow or shrink? */
84 	if (new > old) {
85 		error = uvm_map(&vm->vm_map, &old, new - old, NULL,
86 		    UVM_UNKNOWN_OFFSET, 0,
87 		    UVM_MAPFLAG(PROT_READ | PROT_WRITE,
88 		    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY,
89 		    MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
90 		if (error) {
91 			uprintf("sbrk: grow %ld failed, error = %d\n",
92 			    new - old, error);
93 			return (ENOMEM);
94 		}
95 		vm->vm_dsize += atop(new - old);
96 	} else {
97 		uvm_deallocate(&vm->vm_map, new, old - new);
98 		vm->vm_dsize -= atop(old - new);
99 	}
100 
101 	return (0);
102 }
103 
104 /*
105  * uvm_grow: enlarge the "stack segment" to include sp.
106  */
107 void
108 uvm_grow(struct proc *p, vaddr_t sp)
109 {
110 	struct vmspace *vm = p->p_vmspace;
111 	int si;
112 
113 	/* For user defined stacks (from sendsig). */
114 	if (sp < (vaddr_t)vm->vm_maxsaddr)
115 		return;
116 
117 	/* For common case of already allocated (from trap). */
118 #ifdef MACHINE_STACK_GROWS_UP
119 	if (sp < (vaddr_t)vm->vm_maxsaddr + ptoa(vm->vm_ssize))
120 #else
121 	if (sp >= (vaddr_t)vm->vm_minsaddr - ptoa(vm->vm_ssize))
122 #endif
123 		return;
124 
125 	/* Really need to check vs limit and increment stack size if ok. */
126 #ifdef MACHINE_STACK_GROWS_UP
127 	si = atop(sp - (vaddr_t)vm->vm_maxsaddr) - vm->vm_ssize + 1;
128 #else
129 	si = atop((vaddr_t)vm->vm_minsaddr - sp) - vm->vm_ssize;
130 #endif
131 	if (vm->vm_ssize + si <= atop(p->p_rlimit[RLIMIT_STACK].rlim_cur))
132 		vm->vm_ssize += si;
133 }
134 
135 #ifndef SMALL_KERNEL
136 
137 #define WALK_CHUNK	32
138 /*
139  * Not all the pages in an amap may be present.  When dumping core,
140  * we don't want to force all the pages to be present: it's a waste
141  * of time and memory when we already know what they contain (zeros)
142  * and the ELF format at least can adequately represent them as a
143  * segment with memory size larger than its file size.
144  *
145  * So, we walk the amap with calls to amap_lookups() and scan the
146  * resulting pointers to find ranges of zero or more present pages
147  * followed by at least one absent page or the end of the amap.
148  * When then pass that range to the walk callback with 'start'
149  * pointing to the start of the present range, 'realend' pointing
150  * to the first absent page (or the end of the entry), and 'end'
151  * pointing to the page page the last absent page (or the end of
152  * the entry).
153  *
154  * Note that if the first page of the amap is empty then the callback
155  * must be invoked with 'start' == 'realend' so it can present that
156  * first range of absent pages.
157  */
158 int
159 uvm_coredump_walk_amap(struct vm_map_entry *entry, int *nsegmentp,
160     uvm_coredump_walk_cb *walk, void *cookie)
161 {
162 	struct vm_anon *anons[WALK_CHUNK];
163 	vaddr_t pos, start, realend, end, entry_end;
164 	vm_prot_t prot;
165 	int nsegment, absent, npages, i, error;
166 
167 	prot = entry->protection;
168 	nsegment = *nsegmentp;
169 	start = entry->start;
170 	entry_end = MIN(entry->end, VM_MAXUSER_ADDRESS);
171 
172 	absent = 0;
173 	for (pos = start; pos < entry_end; pos += npages << PAGE_SHIFT) {
174 		npages = (entry_end - pos) >> PAGE_SHIFT;
175 		if (npages > WALK_CHUNK)
176 			npages = WALK_CHUNK;
177 		amap_lookups(&entry->aref, pos - entry->start, anons, npages);
178 		for (i = 0; i < npages; i++) {
179 			if ((anons[i] == NULL) == absent)
180 				continue;
181 			if (!absent) {
182 				/* going from present to absent: set realend */
183 				realend = pos + (i << PAGE_SHIFT);
184 				absent = 1;
185 				continue;
186 			}
187 
188 			/* going from absent to present: invoke callback */
189 			end = pos + (i << PAGE_SHIFT);
190 			if (start != end) {
191 				error = (*walk)(start, realend, end, prot,
192 				    nsegment, cookie);
193 				if (error)
194 					return error;
195 				nsegment++;
196 			}
197 			start = realend = end;
198 			absent = 0;
199 		}
200 	}
201 
202 	if (!absent)
203 		realend = entry_end;
204 	error = (*walk)(start, realend, entry_end, prot, nsegment, cookie);
205 	*nsegmentp = nsegment + 1;
206 	return error;
207 }
208 
209 /*
210  * Common logic for whether a map entry should be included in a coredump
211  */
212 static inline int
213 uvm_should_coredump(struct proc *p, struct vm_map_entry *entry)
214 {
215 	if (!(entry->protection & PROT_WRITE) &&
216 	    entry->aref.ar_amap == NULL &&
217 	    entry->start != p->p_p->ps_sigcode)
218 		return 0;
219 
220 	/*
221 	 * Skip ranges marked as unreadable, as uiomove(UIO_USERSPACE)
222 	 * will fail on them.  Maybe this really should be a test of
223 	 * entry->max_protection, but doing
224 	 *	uvm_map_extract(UVM_EXTRACT_FIXPROT)
225 	 * on each such page would suck.
226 	 */
227 	if ((entry->protection & PROT_READ) == 0)
228 		return 0;
229 
230 	/* Don't dump mmaped devices. */
231 	if (entry->object.uvm_obj != NULL &&
232 	    UVM_OBJ_IS_DEVICE(entry->object.uvm_obj))
233 		return 0;
234 
235 	if (entry->start >= VM_MAXUSER_ADDRESS)
236 		return 0;
237 
238 	return 1;
239 }
240 
241 
242 /* do nothing callback for uvm_coredump_walk_amap() */
243 static int
244 noop(vaddr_t start, vaddr_t realend, vaddr_t end, vm_prot_t prot,
245     int nsegment, void *cookie)
246 {
247 	return 0;
248 }
249 
250 /*
251  * Walk the VA space for a process to identify what to write to
252  * a coredump.  First the number of contiguous ranges is counted,
253  * then the 'setup' callback is invoked to prepare for actually
254  * recording the ranges, then the VA is walked again, invoking
255  * the 'walk' callback for each range.  The number of ranges walked
256  * is guaranteed to match the count seen by the 'setup' callback.
257  */
258 
259 int
260 uvm_coredump_walkmap(struct proc *p, uvm_coredump_setup_cb *setup,
261     uvm_coredump_walk_cb *walk, void *cookie)
262 {
263 	struct vmspace *vm = p->p_vmspace;
264 	struct vm_map *map = &vm->vm_map;
265 	struct vm_map_entry *entry;
266 	vaddr_t end;
267 	int refed_amaps = 0;
268 	int nsegment, error;
269 
270 	/*
271 	 * Walk the map once to count the segments.  If an amap is
272 	 * referenced more than once than take *another* reference
273 	 * and treat the amap as exactly one segment instead of
274 	 * checking page presence inside it.  On the second pass
275 	 * we'll recognize which amaps we did that for by the ref
276 	 * count being >1...and decrement it then.
277 	 */
278 	nsegment = 0;
279 	RBT_FOREACH(entry, uvm_map_addr, &map->addr) {
280 		/* should never happen for a user process */
281 		if (UVM_ET_ISSUBMAP(entry)) {
282 			panic("%s: user process with submap?", __func__);
283 		}
284 
285 		if (! uvm_should_coredump(p, entry))
286 			continue;
287 
288 		if (entry->aref.ar_amap != NULL) {
289 			if (entry->aref.ar_amap->am_ref == 1) {
290 				uvm_coredump_walk_amap(entry, &nsegment,
291 				    &noop, cookie);
292 				continue;
293 			}
294 
295 			/*
296 			 * Multiple refs currently, so take another and
297 			 * treat it as a single segment
298 			 */
299 			entry->aref.ar_amap->am_ref++;
300 			refed_amaps++;
301 		}
302 
303 		nsegment++;
304 	}
305 
306 	/*
307 	 * Okay, we have a count in nsegment.  Prepare to
308 	 * walk it again, then invoke the setup callback.
309 	 */
310 	entry = RBT_MIN(uvm_map_addr, &map->addr);
311 	error = (*setup)(nsegment, cookie);
312 	if (error)
313 		goto cleanup;
314 
315 	/*
316 	 * Setup went okay, so do the second walk, invoking the walk
317 	 * callback on the counted segments and cleaning up references
318 	 * as we go.
319 	 */
320 	nsegment = 0;
321 	for (; entry != NULL; entry = RBT_NEXT(uvm_map_addr, entry)) {
322 		if (! uvm_should_coredump(p, entry))
323 			continue;
324 
325 		if (entry->aref.ar_amap != NULL &&
326 		    entry->aref.ar_amap->am_ref == 1) {
327 			error = uvm_coredump_walk_amap(entry, &nsegment,
328 			    walk, cookie);
329 			if (error)
330 				break;
331 			continue;
332 		}
333 
334 		end = entry->end;
335 		if (end > VM_MAXUSER_ADDRESS)
336 			end = VM_MAXUSER_ADDRESS;
337 
338 		error = (*walk)(entry->start, end, end, entry->protection,
339 		    nsegment, cookie);
340 		if (error)
341 			break;
342 		nsegment++;
343 
344 		if (entry->aref.ar_amap != NULL &&
345 		    entry->aref.ar_amap->am_ref > 1) {
346 			/* multiple refs, so we need to drop one */
347 			entry->aref.ar_amap->am_ref--;
348 			refed_amaps--;
349 		}
350 	}
351 
352 	if (error) {
353 cleanup:
354 		/* clean up the extra references from where we left off */
355 		if (refed_amaps > 0) {
356 			for (; entry != NULL;
357 			    entry = RBT_NEXT(uvm_map_addr, entry)) {
358 				if (entry->aref.ar_amap == NULL ||
359 				    entry->aref.ar_amap->am_ref == 1)
360 					continue;
361 				if (! uvm_should_coredump(p, entry))
362 					continue;
363 				entry->aref.ar_amap->am_ref--;
364 				if (refed_amaps-- == 0)
365 					break;
366 			}
367 		}
368 	}
369 
370 	return error;
371 }
372 
373 #endif	/* !SMALL_KERNEL */
374