xref: /openbsd-src/sys/uvm/uvm_unix.c (revision e7a40e26f00ddfe6d952478f6c86a040ecf230b2)
1*e7a40e26Skurt /*	$OpenBSD: uvm_unix.c,v 1.73 2024/01/17 22:22:25 kurt Exp $	*/
21414b0faSart /*	$NetBSD: uvm_unix.c,v 1.18 2000/09/13 15:00:25 thorpej Exp $	*/
3cd7ee8acSart 
4cd7ee8acSart /*
5cd7ee8acSart  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6cd7ee8acSart  * Copyright (c) 1991, 1993 The Regents of the University of California.
7cd7ee8acSart  * Copyright (c) 1988 University of Utah.
8cd7ee8acSart  *
9cd7ee8acSart  * All rights reserved.
10cd7ee8acSart  *
11cd7ee8acSart  * This code is derived from software contributed to Berkeley by
12cd7ee8acSart  * the Systems Programming Group of the University of Utah Computer
13cd7ee8acSart  * Science Department.
14cd7ee8acSart  *
15cd7ee8acSart  * Redistribution and use in source and binary forms, with or without
16cd7ee8acSart  * modification, are permitted provided that the following conditions
17cd7ee8acSart  * are met:
18cd7ee8acSart  * 1. Redistributions of source code must retain the above copyright
19cd7ee8acSart  *    notice, this list of conditions and the following disclaimer.
20cd7ee8acSart  * 2. Redistributions in binary form must reproduce the above copyright
21cd7ee8acSart  *    notice, this list of conditions and the following disclaimer in the
22cd7ee8acSart  *    documentation and/or other materials provided with the distribution.
23188f0ea4Sjsg  * 3. Neither the name of the University nor the names of its contributors
24cd7ee8acSart  *    may be used to endorse or promote products derived from this software
25cd7ee8acSart  *    without specific prior written permission.
26cd7ee8acSart  *
27cd7ee8acSart  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28cd7ee8acSart  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29cd7ee8acSart  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30cd7ee8acSart  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31cd7ee8acSart  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32cd7ee8acSart  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33cd7ee8acSart  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34cd7ee8acSart  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35cd7ee8acSart  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36cd7ee8acSart  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37cd7ee8acSart  * SUCH DAMAGE.
38cd7ee8acSart  *
39cd7ee8acSart  * from: Utah $Hdr: vm_unix.c 1.1 89/11/07$
40cd7ee8acSart  *      @(#)vm_unix.c   8.1 (Berkeley) 6/11/93
41cd7ee8acSart  * from: Id: uvm_unix.c,v 1.1.2.2 1997/08/25 18:52:30 chuck Exp
42cd7ee8acSart  */
43cd7ee8acSart 
44cd7ee8acSart /*
45cd7ee8acSart  * uvm_unix.c: traditional sbrk/grow interface to vm.
46cd7ee8acSart  */
47cd7ee8acSart 
48cd7ee8acSart #include <sys/param.h>
49cd7ee8acSart #include <sys/systm.h>
50cd7ee8acSart #include <sys/proc.h>
51cd7ee8acSart #include <sys/resourcevar.h>
52cd7ee8acSart #include <sys/vnode.h>
53cd7ee8acSart 
54cd7ee8acSart #include <sys/mount.h>
55cd7ee8acSart #include <sys/syscallargs.h>
56cd7ee8acSart 
57cd7ee8acSart #include <uvm/uvm.h>
58cd7ee8acSart 
59cd7ee8acSart /*
60cd7ee8acSart  * sys_obreak: set break
61cd7ee8acSart  */
62cd7ee8acSart 
63cd7ee8acSart int
sys_obreak(struct proc * p,void * v,register_t * retval)642023d591Soga sys_obreak(struct proc *p, void *v, register_t *retval)
65cd7ee8acSart {
66cd7ee8acSart 	struct sys_obreak_args /* {
67cd7ee8acSart 		syscallarg(char *) nsize;
68cd7ee8acSart 	} */ *uap = v;
696912996aSart 	struct vmspace *vm = p->p_vmspace;
70b9e718d4Sotto 	vaddr_t new, old, base;
7193cbfefaSart 	int error;
72cd7ee8acSart 
73b9e718d4Sotto 	base = (vaddr_t)vm->vm_daddr;
744c78623bSart 	new = round_page((vaddr_t)SCARG(uap, nsize));
75edc99bcdSvisa 	if (new < base || (new - base) > lim_cur(RLIMIT_DATA))
76cd7ee8acSart 		return (ENOMEM);
77cd7ee8acSart 
78b9e718d4Sotto 	old = round_page(base + ptoa(vm->vm_dsize));
79cd7ee8acSart 
8093cbfefaSart 	if (new == old)
816912996aSart 		return (0);
826912996aSart 
8335164244Stedu 	/* grow or shrink? */
8493cbfefaSart 	if (new > old) {
8593cbfefaSart 		error = uvm_map(&vm->vm_map, &old, new - old, NULL,
8693cbfefaSart 		    UVM_UNKNOWN_OFFSET, 0,
879084c337Sderaadt 		    UVM_MAPFLAG(PROT_READ | PROT_WRITE,
88e087cc70Sguenther 		    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY,
89dfb3c047Sstefan 		    MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
9093cbfefaSart 		if (error) {
9193cbfefaSart 			uprintf("sbrk: grow %ld failed, error = %d\n",
9293cbfefaSart 			    new - old, error);
9393cbfefaSart 			return (ENOMEM);
94738a5b4dSart 		}
9593cbfefaSart 		vm->vm_dsize += atop(new - old);
966912996aSart 	} else {
97782069b1Smpi 		uvm_unmap(&vm->vm_map, new, old);
9893cbfefaSart 		vm->vm_dsize -= atop(old - new);
994784f64cSart 	}
1004784f64cSart 
10193cbfefaSart 	return (0);
1024784f64cSart }
103cd7ee8acSart 
104cd7ee8acSart /*
105cd7ee8acSart  * uvm_grow: enlarge the "stack segment" to include sp.
106cd7ee8acSart  */
107e0d503b3Smiod void
uvm_grow(struct proc * p,vaddr_t sp)1082023d591Soga uvm_grow(struct proc *p, vaddr_t sp)
109cd7ee8acSart {
110b1990b04Sart 	struct vmspace *vm = p->p_vmspace;
1111df98543Smpi 	vm_map_t map = &vm->vm_map;
112b1990b04Sart 	int si;
113cd7ee8acSart 
11435164244Stedu 	/* For user defined stacks (from sendsig). */
115cd7ee8acSart 	if (sp < (vaddr_t)vm->vm_maxsaddr)
116e0d503b3Smiod 		return;
11764fad640Sderaadt #ifdef MACHINE_STACK_GROWS_UP
11864fad640Sderaadt 	if (sp >= (vaddr_t)vm->vm_minsaddr)
11964fad640Sderaadt 		return;
12064fad640Sderaadt #endif
121cd7ee8acSart 
1221df98543Smpi 	vm_map_lock(map);
1231df98543Smpi 
12435164244Stedu 	/* For common case of already allocated (from trap). */
125924833b9Smickey #ifdef MACHINE_STACK_GROWS_UP
1267180d9eaSmiod 	if (sp < (vaddr_t)vm->vm_maxsaddr + ptoa(vm->vm_ssize))
127924833b9Smickey #else
1287180d9eaSmiod 	if (sp >= (vaddr_t)vm->vm_minsaddr - ptoa(vm->vm_ssize))
129924833b9Smickey #endif
1301df98543Smpi 		goto out;
131cd7ee8acSart 
13235164244Stedu 	/* Really need to check vs limit and increment stack size if ok. */
133924833b9Smickey #ifdef MACHINE_STACK_GROWS_UP
1347180d9eaSmiod 	si = atop(sp - (vaddr_t)vm->vm_maxsaddr) - vm->vm_ssize + 1;
135924833b9Smickey #else
1367180d9eaSmiod 	si = atop((vaddr_t)vm->vm_minsaddr - sp) - vm->vm_ssize;
137924833b9Smickey #endif
138edc99bcdSvisa 	if (vm->vm_ssize + si <= atop(lim_cur(RLIMIT_STACK)))
139cd7ee8acSart 		vm->vm_ssize += si;
1401df98543Smpi out:
1411df98543Smpi 	vm_map_unlock(map);
142cd7ee8acSart }
143cd7ee8acSart 
144bef04876Skettenis #ifndef SMALL_KERNEL
145bef04876Skettenis 
1463831a1c1Sguenther #define WALK_CHUNK	32
1473831a1c1Sguenther /*
1483831a1c1Sguenther  * Not all the pages in an amap may be present.  When dumping core,
1493831a1c1Sguenther  * we don't want to force all the pages to be present: it's a waste
1503831a1c1Sguenther  * of time and memory when we already know what they contain (zeros)
1513831a1c1Sguenther  * and the ELF format at least can adequately represent them as a
1523831a1c1Sguenther  * segment with memory size larger than its file size.
1533831a1c1Sguenther  *
1543831a1c1Sguenther  * So, we walk the amap with calls to amap_lookups() and scan the
1553831a1c1Sguenther  * resulting pointers to find ranges of zero or more present pages
1563831a1c1Sguenther  * followed by at least one absent page or the end of the amap.
1573831a1c1Sguenther  * When then pass that range to the walk callback with 'start'
1583831a1c1Sguenther  * pointing to the start of the present range, 'realend' pointing
1593831a1c1Sguenther  * to the first absent page (or the end of the entry), and 'end'
160e7a54d53Smpi  * pointing to the page past the last absent page (or the end of
1613831a1c1Sguenther  * the entry).
1623831a1c1Sguenther  *
1633831a1c1Sguenther  * Note that if the first page of the amap is empty then the callback
1643831a1c1Sguenther  * must be invoked with 'start' == 'realend' so it can present that
1653831a1c1Sguenther  * first range of absent pages.
1663831a1c1Sguenther  */
1673831a1c1Sguenther int
uvm_coredump_walk_amap(struct vm_map_entry * entry,int * nsegmentp,uvm_coredump_walk_cb * walk,void * cookie)1683831a1c1Sguenther uvm_coredump_walk_amap(struct vm_map_entry *entry, int *nsegmentp,
1693831a1c1Sguenther     uvm_coredump_walk_cb *walk, void *cookie)
1703831a1c1Sguenther {
1713831a1c1Sguenther 	struct vm_anon *anons[WALK_CHUNK];
1723831a1c1Sguenther 	vaddr_t pos, start, realend, end, entry_end;
1733831a1c1Sguenther 	vm_prot_t prot;
1743831a1c1Sguenther 	int nsegment, absent, npages, i, error;
1753831a1c1Sguenther 
1763831a1c1Sguenther 	prot = entry->protection;
1773831a1c1Sguenther 	nsegment = *nsegmentp;
1783831a1c1Sguenther 	start = entry->start;
1793831a1c1Sguenther 	entry_end = MIN(entry->end, VM_MAXUSER_ADDRESS);
1803831a1c1Sguenther 
1813831a1c1Sguenther 	absent = 0;
1823831a1c1Sguenther 	for (pos = start; pos < entry_end; pos += npages << PAGE_SHIFT) {
1833831a1c1Sguenther 		npages = (entry_end - pos) >> PAGE_SHIFT;
1843831a1c1Sguenther 		if (npages > WALK_CHUNK)
1853831a1c1Sguenther 			npages = WALK_CHUNK;
1863831a1c1Sguenther 		amap_lookups(&entry->aref, pos - entry->start, anons, npages);
1873831a1c1Sguenther 		for (i = 0; i < npages; i++) {
1883831a1c1Sguenther 			if ((anons[i] == NULL) == absent)
1893831a1c1Sguenther 				continue;
1903831a1c1Sguenther 			if (!absent) {
1913831a1c1Sguenther 				/* going from present to absent: set realend */
1923831a1c1Sguenther 				realend = pos + (i << PAGE_SHIFT);
1933831a1c1Sguenther 				absent = 1;
1943831a1c1Sguenther 				continue;
1953831a1c1Sguenther 			}
1963831a1c1Sguenther 
1973831a1c1Sguenther 			/* going from absent to present: invoke callback */
1983831a1c1Sguenther 			end = pos + (i << PAGE_SHIFT);
1993831a1c1Sguenther 			if (start != end) {
2003831a1c1Sguenther 				error = (*walk)(start, realend, end, prot,
201*e7a40e26Skurt 				    0, nsegment, cookie);
2023831a1c1Sguenther 				if (error)
2033831a1c1Sguenther 					return error;
2043831a1c1Sguenther 				nsegment++;
2053831a1c1Sguenther 			}
2063831a1c1Sguenther 			start = realend = end;
2073831a1c1Sguenther 			absent = 0;
2083831a1c1Sguenther 		}
2093831a1c1Sguenther 	}
2103831a1c1Sguenther 
2113831a1c1Sguenther 	if (!absent)
2123831a1c1Sguenther 		realend = entry_end;
213*e7a40e26Skurt 	error = (*walk)(start, realend, entry_end, prot, 0, nsegment, cookie);
2143831a1c1Sguenther 	*nsegmentp = nsegment + 1;
2153831a1c1Sguenther 	return error;
2163831a1c1Sguenther }
2173831a1c1Sguenther 
218cd7ee8acSart /*
21917448fe7Sguenther  * Common logic for whether a map entry should be included in a coredump
220cd7ee8acSart  */
22117448fe7Sguenther static inline int
uvm_should_coredump(struct proc * p,struct vm_map_entry * entry)22217448fe7Sguenther uvm_should_coredump(struct proc *p, struct vm_map_entry *entry)
223217c6e6eSkettenis {
22417448fe7Sguenther 	if (!(entry->protection & PROT_WRITE) &&
22517448fe7Sguenther 	    entry->aref.ar_amap == NULL &&
226d82e6535Spirofti 	    entry->start != p->p_p->ps_sigcode &&
227d82e6535Spirofti 	    entry->start != p->p_p->ps_timekeep)
22817448fe7Sguenther 		return 0;
22917448fe7Sguenther 
23017448fe7Sguenther 	/*
23117448fe7Sguenther 	 * Skip ranges marked as unreadable, as uiomove(UIO_USERSPACE)
23217448fe7Sguenther 	 * will fail on them.  Maybe this really should be a test of
23317448fe7Sguenther 	 * entry->max_protection, but doing
23417448fe7Sguenther 	 *	uvm_map_extract(UVM_EXTRACT_FIXPROT)
23517448fe7Sguenther 	 * on each such page would suck.
23617448fe7Sguenther 	 */
23713f0bae7Skettenis 	if (!(entry->protection & PROT_READ) &&
23813f0bae7Skettenis 	    entry->start != p->p_p->ps_sigcode)
23917448fe7Sguenther 		return 0;
24017448fe7Sguenther 
24102dbcf75Scheloha 	/* Skip ranges excluded from coredumps. */
24202dbcf75Scheloha 	if (UVM_ET_ISCONCEAL(entry))
24302dbcf75Scheloha 		return 0;
24402dbcf75Scheloha 
24517448fe7Sguenther 	/* Don't dump mmaped devices. */
24617448fe7Sguenther 	if (entry->object.uvm_obj != NULL &&
24717448fe7Sguenther 	    UVM_OBJ_IS_DEVICE(entry->object.uvm_obj))
24817448fe7Sguenther 		return 0;
24917448fe7Sguenther 
25017448fe7Sguenther 	if (entry->start >= VM_MAXUSER_ADDRESS)
25117448fe7Sguenther 		return 0;
25217448fe7Sguenther 
25317448fe7Sguenther 	return 1;
25417448fe7Sguenther }
25517448fe7Sguenther 
2563831a1c1Sguenther 
2573831a1c1Sguenther /* do nothing callback for uvm_coredump_walk_amap() */
2583831a1c1Sguenther static int
noop(vaddr_t start,vaddr_t realend,vaddr_t end,vm_prot_t prot,int isvnode,int nsegment,void * cookie)2593831a1c1Sguenther noop(vaddr_t start, vaddr_t realend, vaddr_t end, vm_prot_t prot,
260*e7a40e26Skurt     int isvnode, int nsegment, void *cookie)
2613831a1c1Sguenther {
2623831a1c1Sguenther 	return 0;
2633831a1c1Sguenther }
2643831a1c1Sguenther 
26517448fe7Sguenther /*
26617448fe7Sguenther  * Walk the VA space for a process to identify what to write to
26717448fe7Sguenther  * a coredump.  First the number of contiguous ranges is counted,
26817448fe7Sguenther  * then the 'setup' callback is invoked to prepare for actually
26917448fe7Sguenther  * recording the ranges, then the VA is walked again, invoking
27017448fe7Sguenther  * the 'walk' callback for each range.  The number of ranges walked
27117448fe7Sguenther  * is guaranteed to match the count seen by the 'setup' callback.
27217448fe7Sguenther  */
27317448fe7Sguenther 
27417448fe7Sguenther int
uvm_coredump_walkmap(struct proc * p,uvm_coredump_setup_cb * setup,uvm_coredump_walk_cb * walk,void * cookie)27517448fe7Sguenther uvm_coredump_walkmap(struct proc *p, uvm_coredump_setup_cb *setup,
27617448fe7Sguenther     uvm_coredump_walk_cb *walk, void *cookie)
27717448fe7Sguenther {
278217c6e6eSkettenis 	struct vmspace *vm = p->p_vmspace;
279217c6e6eSkettenis 	struct vm_map *map = &vm->vm_map;
280217c6e6eSkettenis 	struct vm_map_entry *entry;
28117448fe7Sguenther 	vaddr_t end;
2823831a1c1Sguenther 	int refed_amaps = 0;
283*e7a40e26Skurt 	int nsegment, error, isvnode;
284217c6e6eSkettenis 
28517448fe7Sguenther 	/*
2863831a1c1Sguenther 	 * Walk the map once to count the segments.  If an amap is
2873831a1c1Sguenther 	 * referenced more than once than take *another* reference
2883831a1c1Sguenther 	 * and treat the amap as exactly one segment instead of
2893831a1c1Sguenther 	 * checking page presence inside it.  On the second pass
2903831a1c1Sguenther 	 * we'll recognize which amaps we did that for by the ref
2913831a1c1Sguenther 	 * count being >1...and decrement it then.
29217448fe7Sguenther 	 */
29317448fe7Sguenther 	nsegment = 0;
294415d6aa0Sdlg 	RBT_FOREACH(entry, uvm_map_addr, &map->addr) {
295217c6e6eSkettenis 		/* should never happen for a user process */
296217c6e6eSkettenis 		if (UVM_ET_ISSUBMAP(entry)) {
2972df12b22Sguenther 			panic("%s: user process with submap?", __func__);
298217c6e6eSkettenis 		}
299217c6e6eSkettenis 
30017448fe7Sguenther 		if (! uvm_should_coredump(p, entry))
301217c6e6eSkettenis 			continue;
302217c6e6eSkettenis 
3033831a1c1Sguenther 		if (entry->aref.ar_amap != NULL) {
3043831a1c1Sguenther 			if (entry->aref.ar_amap->am_ref == 1) {
3053831a1c1Sguenther 				uvm_coredump_walk_amap(entry, &nsegment,
3063831a1c1Sguenther 				    &noop, cookie);
3073831a1c1Sguenther 				continue;
3083831a1c1Sguenther 			}
3093831a1c1Sguenther 
3103831a1c1Sguenther 			/*
3113831a1c1Sguenther 			 * Multiple refs currently, so take another and
3123831a1c1Sguenther 			 * treat it as a single segment
3133831a1c1Sguenther 			 */
3143831a1c1Sguenther 			entry->aref.ar_amap->am_ref++;
3153831a1c1Sguenther 			refed_amaps++;
3163831a1c1Sguenther 		}
3173831a1c1Sguenther 
31817448fe7Sguenther 		nsegment++;
31917448fe7Sguenther 	}
32017448fe7Sguenther 
32113ca7240Sguenther 	/*
3223831a1c1Sguenther 	 * Okay, we have a count in nsegment.  Prepare to
3233831a1c1Sguenther 	 * walk it again, then invoke the setup callback.
32413ca7240Sguenther 	 */
3253831a1c1Sguenther 	entry = RBT_MIN(uvm_map_addr, &map->addr);
32617448fe7Sguenther 	error = (*setup)(nsegment, cookie);
327217c6e6eSkettenis 	if (error)
32817448fe7Sguenther 		goto cleanup;
32917448fe7Sguenther 
33017448fe7Sguenther 	/*
33117448fe7Sguenther 	 * Setup went okay, so do the second walk, invoking the walk
3323831a1c1Sguenther 	 * callback on the counted segments and cleaning up references
3333831a1c1Sguenther 	 * as we go.
33417448fe7Sguenther 	 */
33517448fe7Sguenther 	nsegment = 0;
3363831a1c1Sguenther 	for (; entry != NULL; entry = RBT_NEXT(uvm_map_addr, entry)) {
33717448fe7Sguenther 		if (! uvm_should_coredump(p, entry))
33817448fe7Sguenther 			continue;
33917448fe7Sguenther 
3403831a1c1Sguenther 		if (entry->aref.ar_amap != NULL &&
3413831a1c1Sguenther 		    entry->aref.ar_amap->am_ref == 1) {
3423831a1c1Sguenther 			error = uvm_coredump_walk_amap(entry, &nsegment,
3433831a1c1Sguenther 			    walk, cookie);
3443831a1c1Sguenther 			if (error)
3453831a1c1Sguenther 				break;
3463831a1c1Sguenther 			continue;
3473831a1c1Sguenther 		}
3483831a1c1Sguenther 
34917448fe7Sguenther 		end = entry->end;
35017448fe7Sguenther 		if (end > VM_MAXUSER_ADDRESS)
35117448fe7Sguenther 			end = VM_MAXUSER_ADDRESS;
35217448fe7Sguenther 
353*e7a40e26Skurt 		isvnode = (entry->object.uvm_obj != NULL &&
354*e7a40e26Skurt 		    UVM_OBJ_IS_VNODE(entry->object.uvm_obj));
35517448fe7Sguenther 		error = (*walk)(entry->start, end, end, entry->protection,
356*e7a40e26Skurt 		    isvnode, nsegment, cookie);
35717448fe7Sguenther 		if (error)
35817448fe7Sguenther 			break;
35917448fe7Sguenther 		nsegment++;
3603831a1c1Sguenther 
3613831a1c1Sguenther 		if (entry->aref.ar_amap != NULL &&
3623831a1c1Sguenther 		    entry->aref.ar_amap->am_ref > 1) {
3633831a1c1Sguenther 			/* multiple refs, so we need to drop one */
3643831a1c1Sguenther 			entry->aref.ar_amap->am_ref--;
3653831a1c1Sguenther 			refed_amaps--;
3663831a1c1Sguenther 		}
367217c6e6eSkettenis 	}
368217c6e6eSkettenis 
3693831a1c1Sguenther 	if (error) {
37017448fe7Sguenther cleanup:
3713831a1c1Sguenther 		/* clean up the extra references from where we left off */
3723831a1c1Sguenther 		if (refed_amaps > 0) {
3733831a1c1Sguenther 			for (; entry != NULL;
3743831a1c1Sguenther 			    entry = RBT_NEXT(uvm_map_addr, entry)) {
3753831a1c1Sguenther 				if (entry->aref.ar_amap == NULL ||
3763831a1c1Sguenther 				    entry->aref.ar_amap->am_ref == 1)
3773831a1c1Sguenther 					continue;
3783831a1c1Sguenther 				if (! uvm_should_coredump(p, entry))
3793831a1c1Sguenther 					continue;
3803831a1c1Sguenther 				entry->aref.ar_amap->am_ref--;
3813831a1c1Sguenther 				if (refed_amaps-- == 0)
3823831a1c1Sguenther 					break;
3833831a1c1Sguenther 			}
3843831a1c1Sguenther 		}
3853831a1c1Sguenther 	}
38617448fe7Sguenther 
38717448fe7Sguenther 	return error;
388217c6e6eSkettenis }
389bef04876Skettenis 
390bef04876Skettenis #endif	/* !SMALL_KERNEL */
391