xref: /openbsd-src/sys/uvm/uvm_unix.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: uvm_unix.c,v 1.67 2019/11/05 08:18:47 mpi Exp $	*/
2 /*	$NetBSD: uvm_unix.c,v 1.18 2000/09/13 15:00:25 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * Copyright (c) 1991, 1993 The Regents of the University of California.
7  * Copyright (c) 1988 University of Utah.
8  *
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * the Systems Programming Group of the University of Utah Computer
13  * Science Department.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * from: Utah $Hdr: vm_unix.c 1.1 89/11/07$
40  *      @(#)vm_unix.c   8.1 (Berkeley) 6/11/93
41  * from: Id: uvm_unix.c,v 1.1.2.2 1997/08/25 18:52:30 chuck Exp
42  */
43 
44 /*
45  * uvm_unix.c: traditional sbrk/grow interface to vm.
46  */
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/resourcevar.h>
52 #include <sys/vnode.h>
53 
54 #include <sys/mount.h>
55 #include <sys/syscallargs.h>
56 
57 #include <uvm/uvm.h>
58 
59 /*
60  * sys_obreak: set break
61  */
62 
63 int
64 sys_obreak(struct proc *p, void *v, register_t *retval)
65 {
66 	struct sys_obreak_args /* {
67 		syscallarg(char *) nsize;
68 	} */ *uap = v;
69 	struct vmspace *vm = p->p_vmspace;
70 	vaddr_t new, old, base;
71 	int error;
72 
73 	base = (vaddr_t)vm->vm_daddr;
74 	new = round_page((vaddr_t)SCARG(uap, nsize));
75 	if (new < base || (new - base) > lim_cur(RLIMIT_DATA))
76 		return (ENOMEM);
77 
78 	old = round_page(base + ptoa(vm->vm_dsize));
79 
80 	if (new == old)
81 		return (0);
82 
83 	/* grow or shrink? */
84 	if (new > old) {
85 		error = uvm_map(&vm->vm_map, &old, new - old, NULL,
86 		    UVM_UNKNOWN_OFFSET, 0,
87 		    UVM_MAPFLAG(PROT_READ | PROT_WRITE,
88 		    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY,
89 		    MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
90 		if (error) {
91 			uprintf("sbrk: grow %ld failed, error = %d\n",
92 			    new - old, error);
93 			return (ENOMEM);
94 		}
95 		vm->vm_dsize += atop(new - old);
96 	} else {
97 		uvm_unmap(&vm->vm_map, new, old);
98 		vm->vm_dsize -= atop(old - new);
99 	}
100 
101 	return (0);
102 }
103 
104 /*
105  * uvm_grow: enlarge the "stack segment" to include sp.
106  */
107 void
108 uvm_grow(struct proc *p, vaddr_t sp)
109 {
110 	struct vmspace *vm = p->p_vmspace;
111 	int si;
112 
113 	/* For user defined stacks (from sendsig). */
114 	if (sp < (vaddr_t)vm->vm_maxsaddr)
115 		return;
116 
117 	/* For common case of already allocated (from trap). */
118 #ifdef MACHINE_STACK_GROWS_UP
119 	if (sp < (vaddr_t)vm->vm_maxsaddr + ptoa(vm->vm_ssize))
120 #else
121 	if (sp >= (vaddr_t)vm->vm_minsaddr - ptoa(vm->vm_ssize))
122 #endif
123 		return;
124 
125 	/* Really need to check vs limit and increment stack size if ok. */
126 #ifdef MACHINE_STACK_GROWS_UP
127 	si = atop(sp - (vaddr_t)vm->vm_maxsaddr) - vm->vm_ssize + 1;
128 #else
129 	si = atop((vaddr_t)vm->vm_minsaddr - sp) - vm->vm_ssize;
130 #endif
131 	if (vm->vm_ssize + si <= atop(lim_cur(RLIMIT_STACK)))
132 		vm->vm_ssize += si;
133 }
134 
135 #ifndef SMALL_KERNEL
136 
137 #define WALK_CHUNK	32
138 /*
139  * Not all the pages in an amap may be present.  When dumping core,
140  * we don't want to force all the pages to be present: it's a waste
141  * of time and memory when we already know what they contain (zeros)
142  * and the ELF format at least can adequately represent them as a
143  * segment with memory size larger than its file size.
144  *
145  * So, we walk the amap with calls to amap_lookups() and scan the
146  * resulting pointers to find ranges of zero or more present pages
147  * followed by at least one absent page or the end of the amap.
148  * When then pass that range to the walk callback with 'start'
149  * pointing to the start of the present range, 'realend' pointing
150  * to the first absent page (or the end of the entry), and 'end'
151  * pointing to the page page the last absent page (or the end of
152  * the entry).
153  *
154  * Note that if the first page of the amap is empty then the callback
155  * must be invoked with 'start' == 'realend' so it can present that
156  * first range of absent pages.
157  */
158 int
159 uvm_coredump_walk_amap(struct vm_map_entry *entry, int *nsegmentp,
160     uvm_coredump_walk_cb *walk, void *cookie)
161 {
162 	struct vm_anon *anons[WALK_CHUNK];
163 	vaddr_t pos, start, realend, end, entry_end;
164 	vm_prot_t prot;
165 	int nsegment, absent, npages, i, error;
166 
167 	prot = entry->protection;
168 	nsegment = *nsegmentp;
169 	start = entry->start;
170 	entry_end = MIN(entry->end, VM_MAXUSER_ADDRESS);
171 
172 	absent = 0;
173 	for (pos = start; pos < entry_end; pos += npages << PAGE_SHIFT) {
174 		npages = (entry_end - pos) >> PAGE_SHIFT;
175 		if (npages > WALK_CHUNK)
176 			npages = WALK_CHUNK;
177 		amap_lookups(&entry->aref, pos - entry->start, anons, npages);
178 		for (i = 0; i < npages; i++) {
179 			if ((anons[i] == NULL) == absent)
180 				continue;
181 			if (!absent) {
182 				/* going from present to absent: set realend */
183 				realend = pos + (i << PAGE_SHIFT);
184 				absent = 1;
185 				continue;
186 			}
187 
188 			/* going from absent to present: invoke callback */
189 			end = pos + (i << PAGE_SHIFT);
190 			if (start != end) {
191 				error = (*walk)(start, realend, end, prot,
192 				    nsegment, cookie);
193 				if (error)
194 					return error;
195 				nsegment++;
196 			}
197 			start = realend = end;
198 			absent = 0;
199 		}
200 	}
201 
202 	if (!absent)
203 		realend = entry_end;
204 	error = (*walk)(start, realend, entry_end, prot, nsegment, cookie);
205 	*nsegmentp = nsegment + 1;
206 	return error;
207 }
208 
209 /*
210  * Common logic for whether a map entry should be included in a coredump
211  */
212 static inline int
213 uvm_should_coredump(struct proc *p, struct vm_map_entry *entry)
214 {
215 	if (!(entry->protection & PROT_WRITE) &&
216 	    entry->aref.ar_amap == NULL &&
217 	    entry->start != p->p_p->ps_sigcode)
218 		return 0;
219 
220 	/*
221 	 * Skip ranges marked as unreadable, as uiomove(UIO_USERSPACE)
222 	 * will fail on them.  Maybe this really should be a test of
223 	 * entry->max_protection, but doing
224 	 *	uvm_map_extract(UVM_EXTRACT_FIXPROT)
225 	 * on each such page would suck.
226 	 */
227 	if ((entry->protection & PROT_READ) == 0)
228 		return 0;
229 
230 	/* Skip ranges excluded from coredumps. */
231 	if (UVM_ET_ISCONCEAL(entry))
232 		return 0;
233 
234 	/* Don't dump mmaped devices. */
235 	if (entry->object.uvm_obj != NULL &&
236 	    UVM_OBJ_IS_DEVICE(entry->object.uvm_obj))
237 		return 0;
238 
239 	if (entry->start >= VM_MAXUSER_ADDRESS)
240 		return 0;
241 
242 	return 1;
243 }
244 
245 
246 /* do nothing callback for uvm_coredump_walk_amap() */
247 static int
248 noop(vaddr_t start, vaddr_t realend, vaddr_t end, vm_prot_t prot,
249     int nsegment, void *cookie)
250 {
251 	return 0;
252 }
253 
254 /*
255  * Walk the VA space for a process to identify what to write to
256  * a coredump.  First the number of contiguous ranges is counted,
257  * then the 'setup' callback is invoked to prepare for actually
258  * recording the ranges, then the VA is walked again, invoking
259  * the 'walk' callback for each range.  The number of ranges walked
260  * is guaranteed to match the count seen by the 'setup' callback.
261  */
262 
263 int
264 uvm_coredump_walkmap(struct proc *p, uvm_coredump_setup_cb *setup,
265     uvm_coredump_walk_cb *walk, void *cookie)
266 {
267 	struct vmspace *vm = p->p_vmspace;
268 	struct vm_map *map = &vm->vm_map;
269 	struct vm_map_entry *entry;
270 	vaddr_t end;
271 	int refed_amaps = 0;
272 	int nsegment, error;
273 
274 	/*
275 	 * Walk the map once to count the segments.  If an amap is
276 	 * referenced more than once than take *another* reference
277 	 * and treat the amap as exactly one segment instead of
278 	 * checking page presence inside it.  On the second pass
279 	 * we'll recognize which amaps we did that for by the ref
280 	 * count being >1...and decrement it then.
281 	 */
282 	nsegment = 0;
283 	RBT_FOREACH(entry, uvm_map_addr, &map->addr) {
284 		/* should never happen for a user process */
285 		if (UVM_ET_ISSUBMAP(entry)) {
286 			panic("%s: user process with submap?", __func__);
287 		}
288 
289 		if (! uvm_should_coredump(p, entry))
290 			continue;
291 
292 		if (entry->aref.ar_amap != NULL) {
293 			if (entry->aref.ar_amap->am_ref == 1) {
294 				uvm_coredump_walk_amap(entry, &nsegment,
295 				    &noop, cookie);
296 				continue;
297 			}
298 
299 			/*
300 			 * Multiple refs currently, so take another and
301 			 * treat it as a single segment
302 			 */
303 			entry->aref.ar_amap->am_ref++;
304 			refed_amaps++;
305 		}
306 
307 		nsegment++;
308 	}
309 
310 	/*
311 	 * Okay, we have a count in nsegment.  Prepare to
312 	 * walk it again, then invoke the setup callback.
313 	 */
314 	entry = RBT_MIN(uvm_map_addr, &map->addr);
315 	error = (*setup)(nsegment, cookie);
316 	if (error)
317 		goto cleanup;
318 
319 	/*
320 	 * Setup went okay, so do the second walk, invoking the walk
321 	 * callback on the counted segments and cleaning up references
322 	 * as we go.
323 	 */
324 	nsegment = 0;
325 	for (; entry != NULL; entry = RBT_NEXT(uvm_map_addr, entry)) {
326 		if (! uvm_should_coredump(p, entry))
327 			continue;
328 
329 		if (entry->aref.ar_amap != NULL &&
330 		    entry->aref.ar_amap->am_ref == 1) {
331 			error = uvm_coredump_walk_amap(entry, &nsegment,
332 			    walk, cookie);
333 			if (error)
334 				break;
335 			continue;
336 		}
337 
338 		end = entry->end;
339 		if (end > VM_MAXUSER_ADDRESS)
340 			end = VM_MAXUSER_ADDRESS;
341 
342 		error = (*walk)(entry->start, end, end, entry->protection,
343 		    nsegment, cookie);
344 		if (error)
345 			break;
346 		nsegment++;
347 
348 		if (entry->aref.ar_amap != NULL &&
349 		    entry->aref.ar_amap->am_ref > 1) {
350 			/* multiple refs, so we need to drop one */
351 			entry->aref.ar_amap->am_ref--;
352 			refed_amaps--;
353 		}
354 	}
355 
356 	if (error) {
357 cleanup:
358 		/* clean up the extra references from where we left off */
359 		if (refed_amaps > 0) {
360 			for (; entry != NULL;
361 			    entry = RBT_NEXT(uvm_map_addr, entry)) {
362 				if (entry->aref.ar_amap == NULL ||
363 				    entry->aref.ar_amap->am_ref == 1)
364 					continue;
365 				if (! uvm_should_coredump(p, entry))
366 					continue;
367 				entry->aref.ar_amap->am_ref--;
368 				if (refed_amaps-- == 0)
369 					break;
370 			}
371 		}
372 	}
373 
374 	return error;
375 }
376 
377 #endif	/* !SMALL_KERNEL */
378