xref: /openbsd-src/sys/uvm/uvm_glue.c (revision e451d4134a6fd1c35115d5c6f4de24706ace28d0)
1*e451d413Smpi /*	$OpenBSD: uvm_glue.c,v 1.87 2024/10/28 08:25:32 mpi Exp $	*/
21414b0faSart /*	$NetBSD: uvm_glue.c,v 1.44 2001/02/06 19:54:44 eeh Exp $	*/
3cd7ee8acSart 
4cd7ee8acSart /*
5cd7ee8acSart  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6cd7ee8acSart  * Copyright (c) 1991, 1993, The Regents of the University of California.
7cd7ee8acSart  *
8cd7ee8acSart  * All rights reserved.
9cd7ee8acSart  *
10cd7ee8acSart  * This code is derived from software contributed to Berkeley by
11cd7ee8acSart  * The Mach Operating System project at Carnegie-Mellon University.
12cd7ee8acSart  *
13cd7ee8acSart  * Redistribution and use in source and binary forms, with or without
14cd7ee8acSart  * modification, are permitted provided that the following conditions
15cd7ee8acSart  * are met:
16cd7ee8acSart  * 1. Redistributions of source code must retain the above copyright
17cd7ee8acSart  *    notice, this list of conditions and the following disclaimer.
18cd7ee8acSart  * 2. Redistributions in binary form must reproduce the above copyright
19cd7ee8acSart  *    notice, this list of conditions and the following disclaimer in the
20cd7ee8acSart  *    documentation and/or other materials provided with the distribution.
21188f0ea4Sjsg  * 3. Neither the name of the University nor the names of its contributors
22cd7ee8acSart  *    may be used to endorse or promote products derived from this software
23cd7ee8acSart  *    without specific prior written permission.
24cd7ee8acSart  *
25cd7ee8acSart  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26cd7ee8acSart  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27cd7ee8acSart  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28cd7ee8acSart  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29cd7ee8acSart  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30cd7ee8acSart  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31cd7ee8acSart  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32cd7ee8acSart  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33cd7ee8acSart  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34cd7ee8acSart  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35cd7ee8acSart  * SUCH DAMAGE.
36cd7ee8acSart  *
37cd7ee8acSart  *	@(#)vm_glue.c	8.6 (Berkeley) 1/5/94
38cd7ee8acSart  * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
39cd7ee8acSart  *
40cd7ee8acSart  *
41cd7ee8acSart  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42cd7ee8acSart  * All rights reserved.
43cd7ee8acSart  *
44cd7ee8acSart  * Permission to use, copy, modify and distribute this software and
45cd7ee8acSart  * its documentation is hereby granted, provided that both the copyright
46cd7ee8acSart  * notice and this permission notice appear in all copies of the
47cd7ee8acSart  * software, derivative works or modified versions, and any portions
48cd7ee8acSart  * thereof, and that both notices appear in supporting documentation.
49cd7ee8acSart  *
50cd7ee8acSart  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51cd7ee8acSart  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52cd7ee8acSart  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53cd7ee8acSart  *
54cd7ee8acSart  * Carnegie Mellon requests users of this software to return to
55cd7ee8acSart  *
56cd7ee8acSart  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57cd7ee8acSart  *  School of Computer Science
58cd7ee8acSart  *  Carnegie Mellon University
59cd7ee8acSart  *  Pittsburgh PA 15213-3890
60cd7ee8acSart  *
61cd7ee8acSart  * any improvements or extensions that they make and grant Carnegie the
62cd7ee8acSart  * rights to redistribute these changes.
63cd7ee8acSart  */
64cd7ee8acSart 
65cd7ee8acSart /*
66cd7ee8acSart  * uvm_glue.c: glue functions
67cd7ee8acSart  */
68cd7ee8acSart 
69cd7ee8acSart #include <sys/param.h>
70cd7ee8acSart #include <sys/systm.h>
71cd7ee8acSart #include <sys/proc.h>
72cd7ee8acSart #include <sys/resourcevar.h>
73cd7ee8acSart #include <sys/buf.h>
74cd7ee8acSart #ifdef SYSVSHM
75cd7ee8acSart #include <sys/shm.h>
76cd7ee8acSart #endif
77cd7ee8acSart 
78cd7ee8acSart #include <uvm/uvm.h>
79cd7ee8acSart 
80cd7ee8acSart /*
81cd7ee8acSart  * uvm_kernacc: can the kernel access a region of memory
82cd7ee8acSart  *
83cd7ee8acSart  * - called from malloc [DIAGNOSTIC], and /dev/kmem driver (mem.c)
84cd7ee8acSart  */
85cd7ee8acSart boolean_t
862023d591Soga uvm_kernacc(caddr_t addr, size_t len, int rw)
87cd7ee8acSart {
88cd7ee8acSart 	boolean_t rv;
89cd7ee8acSart 	vaddr_t saddr, eaddr;
901e8cdc2eSderaadt 	vm_prot_t prot = rw == B_READ ? PROT_READ : PROT_WRITE;
91cd7ee8acSart 
924c78623bSart 	saddr = trunc_page((vaddr_t)addr);
934c78623bSart 	eaddr = round_page((vaddr_t)addr + len);
94cd7ee8acSart 	vm_map_lock_read(kernel_map);
95cd7ee8acSart 	rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
96cd7ee8acSart 	vm_map_unlock_read(kernel_map);
97cd7ee8acSart 
98b9df1565Smpi 	return rv;
99cd7ee8acSart }
100cd7ee8acSart 
101cd7ee8acSart /*
1024257bd50Sgrange  * uvm_vslock: wire user memory for I/O
103cd7ee8acSart  *
1047f72a660Skettenis  * - called from sys_sysctl
105cd7ee8acSart  */
10607b6088bSart int
1072023d591Soga uvm_vslock(struct proc *p, caddr_t addr, size_t len, vm_prot_t access_type)
108cd7ee8acSart {
1097f72a660Skettenis 	struct vm_map *map = &p->p_vmspace->vm_map;
11007b6088bSart 	vaddr_t start, end;
1117cb53682Sart 
11207b6088bSart 	start = trunc_page((vaddr_t)addr);
11307b6088bSart 	end = round_page((vaddr_t)addr + len);
114bebf65abSgrange 	if (end <= start)
115bebf65abSgrange 		return (EINVAL);
1161414b0faSart 
117447db83cSmpi 	return uvm_fault_wire(map, start, end, access_type);
118cd7ee8acSart }
119cd7ee8acSart 
120cd7ee8acSart /*
1214257bd50Sgrange  * uvm_vsunlock: unwire user memory wired by uvm_vslock()
122cd7ee8acSart  *
1237f72a660Skettenis  * - called from sys_sysctl
124cd7ee8acSart  */
125cd7ee8acSart void
1262023d591Soga uvm_vsunlock(struct proc *p, caddr_t addr, size_t len)
127cd7ee8acSart {
128bebf65abSgrange 	vaddr_t start, end;
129bebf65abSgrange 
130bebf65abSgrange 	start = trunc_page((vaddr_t)addr);
131bebf65abSgrange 	end = round_page((vaddr_t)addr + len);
1327f72a660Skettenis 	KASSERT(end > start);
133bebf65abSgrange 
134447db83cSmpi 	uvm_fault_unwire(&p->p_vmspace->vm_map, start, end);
135cd7ee8acSart }
136cd7ee8acSart 
137cd7ee8acSart /*
1389b40e6e1Sart  * uvm_vslock_device: wire user memory, make sure it's device reachable
1399b40e6e1Sart  *  and bounce if necessary.
1407f72a660Skettenis  *
1417f72a660Skettenis  * - called from physio
1429b40e6e1Sart  */
1439b40e6e1Sart int
1449b40e6e1Sart uvm_vslock_device(struct proc *p, void *addr, size_t len,
1459b40e6e1Sart     vm_prot_t access_type, void **retp)
1469b40e6e1Sart {
1477f72a660Skettenis 	struct vm_map *map = &p->p_vmspace->vm_map;
1489b40e6e1Sart 	struct vm_page *pg;
1499b40e6e1Sart 	struct pglist pgl;
1509b40e6e1Sart 	int npages;
1519b40e6e1Sart 	vaddr_t start, end, off;
1529b40e6e1Sart 	vaddr_t sva, va;
1539b40e6e1Sart 	vsize_t sz;
1547f72a660Skettenis 	int error, mapv, i;
1559b40e6e1Sart 
1569b40e6e1Sart 	start = trunc_page((vaddr_t)addr);
1579b40e6e1Sart 	end = round_page((vaddr_t)addr + len);
1589b40e6e1Sart 	sz = end - start;
1599b40e6e1Sart 	off = (vaddr_t)addr - start;
1609b40e6e1Sart 	if (end <= start)
1619b40e6e1Sart 		return (EINVAL);
1629b40e6e1Sart 
1637f72a660Skettenis 	vm_map_lock_read(map);
1647f72a660Skettenis retry:
1657f72a660Skettenis 	mapv = map->timestamp;
1667f72a660Skettenis 	vm_map_unlock_read(map);
1677f72a660Skettenis 
1687f72a660Skettenis 	if ((error = uvm_fault_wire(map, start, end, access_type)))
1699b40e6e1Sart 		return (error);
1707f72a660Skettenis 
1717f72a660Skettenis 	vm_map_lock_read(map);
1727f72a660Skettenis 	if (mapv != map->timestamp)
1737f72a660Skettenis 		goto retry;
1749b40e6e1Sart 
1759b40e6e1Sart 	npages = atop(sz);
1769b40e6e1Sart 	for (i = 0; i < npages; i++) {
1779b40e6e1Sart 		paddr_t pa;
1789b40e6e1Sart 
1797f72a660Skettenis 		if (!pmap_extract(map->pmap, start + ptoa(i), &pa)) {
180ecbb61a4Sart 			error = EFAULT;
181ecbb61a4Sart 			goto out_unwire;
182ecbb61a4Sart 		}
1839b40e6e1Sart 		if (!PADDR_IS_DMA_REACHABLE(pa))
1849b40e6e1Sart 			break;
1859b40e6e1Sart 	}
1869b40e6e1Sart 	if (i == npages) {
1879b40e6e1Sart 		*retp = NULL;
1889b40e6e1Sart 		return (0);
1899b40e6e1Sart 	}
1909b40e6e1Sart 
191128c160bSkettenis 	va = (vaddr_t)km_alloc(sz, &kv_any, &kp_none, &kd_nowait);
192128c160bSkettenis 	if (va == 0) {
193ecbb61a4Sart 		error = ENOMEM;
194ecbb61a4Sart 		goto out_unwire;
1959b40e6e1Sart 	}
19641c24d30Smiod 	sva = va;
19753e7e760Sthib 
1989b40e6e1Sart 	TAILQ_INIT(&pgl);
19953e7e760Sthib 	error = uvm_pglistalloc(npages * PAGE_SIZE, dma_constraint.ucr_low,
20053e7e760Sthib 	    dma_constraint.ucr_high, 0, 0, &pgl, npages, UVM_PLA_WAITOK);
201ecbb61a4Sart 	if (error)
202ecbb61a4Sart 		goto out_unmap;
2039b40e6e1Sart 
2049b40e6e1Sart 	while ((pg = TAILQ_FIRST(&pgl)) != NULL) {
2059b40e6e1Sart 		TAILQ_REMOVE(&pgl, pg, pageq);
2061e8cdc2eSderaadt 		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
2079b40e6e1Sart 		va += PAGE_SIZE;
2089b40e6e1Sart 	}
2096b8af1e3Sart 	pmap_update(pmap_kernel());
2109b40e6e1Sart 	KASSERT(va == sva + sz);
2119b40e6e1Sart 	*retp = (void *)(sva + off);
2129b40e6e1Sart 
213ecbb61a4Sart 	if ((error = copyin(addr, *retp, len)) == 0)
214ecbb61a4Sart 		return 0;
215ecbb61a4Sart 
216ecbb61a4Sart 	uvm_km_pgremove_intrsafe(sva, sva + sz);
217ecbb61a4Sart 	pmap_kremove(sva, sz);
218ecbb61a4Sart 	pmap_update(pmap_kernel());
219ecbb61a4Sart out_unmap:
220128c160bSkettenis 	km_free((void *)sva, sz, &kv_any, &kp_none);
221ecbb61a4Sart out_unwire:
2227f72a660Skettenis 	uvm_fault_unwire_locked(map, start, end);
2237f72a660Skettenis 	vm_map_unlock_read(map);
2249b40e6e1Sart 	return (error);
2259b40e6e1Sart }
2269b40e6e1Sart 
2277f72a660Skettenis /*
2287f72a660Skettenis  * uvm_vsunlock_device: unwire user memory wired by uvm_vslock_device()
2297f72a660Skettenis  *
2307f72a660Skettenis  * - called from physio
2317f72a660Skettenis  */
2329b40e6e1Sart void
2339b40e6e1Sart uvm_vsunlock_device(struct proc *p, void *addr, size_t len, void *map)
2349b40e6e1Sart {
2359b40e6e1Sart 	vaddr_t start, end;
2369b40e6e1Sart 	vaddr_t kva;
2379b40e6e1Sart 	vsize_t sz;
2389b40e6e1Sart 
2399b40e6e1Sart 	start = trunc_page((vaddr_t)addr);
2409b40e6e1Sart 	end = round_page((vaddr_t)addr + len);
2417f72a660Skettenis 	KASSERT(end > start);
2429b40e6e1Sart 	sz = end - start;
2439b40e6e1Sart 
2449b40e6e1Sart 	if (map)
2459b40e6e1Sart 		copyout(map, addr, len);
2467f72a660Skettenis 
2477f72a660Skettenis 	uvm_fault_unwire_locked(&p->p_vmspace->vm_map, start, end);
2487f72a660Skettenis 	vm_map_unlock_read(&p->p_vmspace->vm_map);
2499b40e6e1Sart 
2509b40e6e1Sart 	if (!map)
2519b40e6e1Sart 		return;
2529b40e6e1Sart 
2539b40e6e1Sart 	kva = trunc_page((vaddr_t)map);
254ecbb61a4Sart 	uvm_km_pgremove_intrsafe(kva, kva + sz);
2559b40e6e1Sart 	pmap_kremove(kva, sz);
2566b8af1e3Sart 	pmap_update(pmap_kernel());
2579b40e6e1Sart 	uvm_km_free(kernel_map, kva, sz);
2589b40e6e1Sart }
2599b40e6e1Sart 
26040be1218Skettenis const struct kmem_va_mode kv_uarea = {
26140be1218Skettenis 	.kv_map = &kernel_map,
26240be1218Skettenis 	.kv_align = USPACE_ALIGN
26340be1218Skettenis };
26440be1218Skettenis 
2659b40e6e1Sart /*
2660411e090Sguenther  * uvm_uarea_alloc: allocate the u-area for a new thread
2670411e090Sguenther  */
2680411e090Sguenther vaddr_t
2690411e090Sguenther uvm_uarea_alloc(void)
2700411e090Sguenther {
27140be1218Skettenis 	return (vaddr_t)km_alloc(USPACE, &kv_uarea, &kp_zero, &kd_waitok);
2720411e090Sguenther }
2730411e090Sguenther 
2740411e090Sguenther /*
275924be113Sguenther  * uvm_uarea_free: free a dead thread's stack
276cd7ee8acSart  *
277924be113Sguenther  * - the thread passed to us is a dead thread; we
278924be113Sguenther  *   are running on a different context now (the reaper).
279cd7ee8acSart  */
280cd7ee8acSart void
281924be113Sguenther uvm_uarea_free(struct proc *p)
282cd7ee8acSart {
28340be1218Skettenis 	km_free(p->p_addr, USPACE, &kv_uarea, &kp_zero);
284924be113Sguenther 	p->p_addr = NULL;
285cd7ee8acSart }
286cd7ee8acSart 
287cd7ee8acSart /*
288cd7ee8acSart  * uvm_exit: exit a virtual address space
289cd7ee8acSart  */
290cd7ee8acSart void
291924be113Sguenther uvm_exit(struct process *pr)
292cd7ee8acSart {
293b2476e15Skettenis 	struct vmspace *vm = pr->ps_vmspace;
294b2476e15Skettenis 
295924be113Sguenther 	pr->ps_vmspace = NULL;
296b2476e15Skettenis 	uvmspace_free(vm);
297cd7ee8acSart }
298cd7ee8acSart 
299cd7ee8acSart /*
300cd7ee8acSart  * uvm_init_limit: init per-process VM limits
301cd7ee8acSart  *
302cd7ee8acSart  * - called for process 0 and then inherited by all others.
303cd7ee8acSart  */
304cd7ee8acSart void
305edc99bcdSvisa uvm_init_limits(struct plimit *limit0)
306cd7ee8acSart {
307cd7ee8acSart 	/*
308cd7ee8acSart 	 * Set up the initial limits on process VM.  Set the maximum
309cd7ee8acSart 	 * resident set size to be all of (reasonably) available memory.
310cd7ee8acSart 	 * This causes any single, large process to start random page
311cd7ee8acSart 	 * replacement once it fills memory.
312cd7ee8acSart 	 */
313edc99bcdSvisa 	limit0->pl_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
314edc99bcdSvisa 	limit0->pl_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
315edc99bcdSvisa 	limit0->pl_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
316edc99bcdSvisa 	limit0->pl_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
317edc99bcdSvisa 	limit0->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
318cd7ee8acSart }
319cd7ee8acSart 
320f88a4ea9Smiod #ifdef __HAVE_PMAP_COLLECT
321f88a4ea9Smiod 
322cd7ee8acSart #ifdef DEBUG
323cd7ee8acSart int	enableswap = 1;
324cd7ee8acSart int	swapdebug = 0;
325cd7ee8acSart #define	SDB_FOLLOW	1
326cd7ee8acSart #define SDB_SWAPIN	2
327cd7ee8acSart #define SDB_SWAPOUT	4
328cd7ee8acSart #endif
329cd7ee8acSart 
330cd7ee8acSart 
331cd7ee8acSart /*
33246718002Smiod  * swapout_threads: find threads that can be swapped
333cd7ee8acSart  *
334cd7ee8acSart  * - called by the pagedaemon
3354af3577fSjsg  * - try and swap at least one process
336cd7ee8acSart  * - processes that are sleeping or stopped for maxslp or more seconds
337cd7ee8acSart  *   are swapped... otherwise the longest-sleeping or stopped process
338cd7ee8acSart  *   is swapped, otherwise the longest resident process...
339cd7ee8acSart  */
340*e451d413Smpi int
34146718002Smiod uvm_swapout_threads(void)
342cd7ee8acSart {
343924be113Sguenther 	struct process *pr;
344924be113Sguenther 	struct proc *p, *slpp;
345924be113Sguenther 	struct process *outpr;
346*e451d413Smpi 	int free, outpri;
347cd7ee8acSart 	int didswap = 0;
348cd7ee8acSart 	extern int maxslp;
349cd7ee8acSart 	/* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
350cd7ee8acSart 
351cd7ee8acSart #ifdef DEBUG
352cd7ee8acSart 	if (!enableswap)
353cd7ee8acSart 		return;
354cd7ee8acSart #endif
355cd7ee8acSart 
356*e451d413Smpi 	free = uvmexp.free;
357*e451d413Smpi 
358cd7ee8acSart 	/*
359924be113Sguenther 	 * outpr/outpri  : stop/sleep process whose most active thread has
360924be113Sguenther 	 *	the largest sleeptime < maxslp
361cd7ee8acSart 	 */
362924be113Sguenther 	outpr = NULL;
363924be113Sguenther 	outpri = 0;
364924be113Sguenther 	LIST_FOREACH(pr, &allprocess, ps_list) {
365924be113Sguenther 		if (pr->ps_flags & (PS_SYSTEM | PS_EXITING))
366cd7ee8acSart 			continue;
367924be113Sguenther 
368924be113Sguenther 		/*
369924be113Sguenther 		 * slpp: the sleeping or stopped thread in pr with
370924be113Sguenther 		 * the smallest p_slptime
371924be113Sguenther 		 */
372924be113Sguenther 		slpp = NULL;
373193f316cSmpi 		TAILQ_FOREACH(p, &pr->ps_threads, p_thr_link) {
374cd7ee8acSart 			switch (p->p_stat) {
375cd7ee8acSart 			case SRUN:
376924be113Sguenther 			case SONPROC:
377924be113Sguenther 				goto next_process;
378cd7ee8acSart 
379cd7ee8acSart 			case SSLEEP:
380cd7ee8acSart 			case SSTOP:
381924be113Sguenther 				if (slpp == NULL ||
382924be113Sguenther 				    slpp->p_slptime < p->p_slptime)
383924be113Sguenther 					slpp = p;
384cd7ee8acSart 				continue;
385cd7ee8acSart 			}
386cd7ee8acSart 		}
387cd7ee8acSart 
388924be113Sguenther 		if (slpp != NULL) {
389924be113Sguenther 			if (slpp->p_slptime >= maxslp) {
390924be113Sguenther 				pmap_collect(pr->ps_vmspace->vm_map.pmap);
391924be113Sguenther 				didswap++;
392924be113Sguenther 			} else if (slpp->p_slptime > outpri) {
393924be113Sguenther 				outpr = pr;
394924be113Sguenther 				outpri = slpp->p_slptime;
395924be113Sguenther 			}
396924be113Sguenther 		}
397924be113Sguenther next_process:	;
398924be113Sguenther 	}
399924be113Sguenther 
400cd7ee8acSart 	/*
401cd7ee8acSart 	 * If we didn't get rid of any real duds, toss out the next most
402cd7ee8acSart 	 * likely sleeping/stopped or running candidate.  We only do this
403cd7ee8acSart 	 * if we are real low on memory since we don't gain much by doing
40446718002Smiod 	 * it.
405cd7ee8acSart 	 */
406*e451d413Smpi 	if (didswap == 0 && free <= atop(round_page(USPACE)) && outpr != NULL) {
407cd7ee8acSart #ifdef DEBUG
408cd7ee8acSart 		if (swapdebug & SDB_SWAPOUT)
409924be113Sguenther 			printf("swapout_threads: no duds, try procpr %p\n",
410924be113Sguenther 			    outpr);
411cd7ee8acSart #endif
412924be113Sguenther 		pmap_collect(outpr->ps_vmspace->vm_map.pmap);
413cd7ee8acSart 	}
414*e451d413Smpi 
415*e451d413Smpi 	/*
416*e451d413Smpi 	 * XXX might return a non-0 value even if pmap_collect() didn't
417*e451d413Smpi 	 * free anything.
418*e451d413Smpi 	 */
419*e451d413Smpi 	return (uvmexp.free - free);
420cd7ee8acSart }
42159f84dd2Soga 
422f88a4ea9Smiod #endif	/* __HAVE_PMAP_COLLECT */
423f88a4ea9Smiod 
42459f84dd2Soga /*
42559f84dd2Soga  * uvm_atopg: convert KVAs back to their page structures.
42659f84dd2Soga  */
42759f84dd2Soga struct vm_page *
42859f84dd2Soga uvm_atopg(vaddr_t kva)
42959f84dd2Soga {
43059f84dd2Soga 	struct vm_page *pg;
43159f84dd2Soga 	paddr_t pa;
43259f84dd2Soga 	boolean_t rv;
43359f84dd2Soga 
43459f84dd2Soga 	rv = pmap_extract(pmap_kernel(), kva, &pa);
43559f84dd2Soga 	KASSERT(rv);
43659f84dd2Soga 	pg = PHYS_TO_VM_PAGE(pa);
43759f84dd2Soga 	KASSERT(pg != NULL);
43859f84dd2Soga 	return (pg);
43959f84dd2Soga }
440a371dcf1Stedu 
441d15bb7c9Suebayasi #ifndef SMALL_KERNEL
442d15bb7c9Suebayasi int
443d15bb7c9Suebayasi fill_vmmap(struct process *pr, struct kinfo_vmentry *kve,
444d15bb7c9Suebayasi     size_t *lenp)
445d15bb7c9Suebayasi {
446d15bb7c9Suebayasi 	struct vm_map *map;
447d15bb7c9Suebayasi 
448d15bb7c9Suebayasi 	if (pr != NULL)
449d15bb7c9Suebayasi 		map = &pr->ps_vmspace->vm_map;
450d15bb7c9Suebayasi 	else
451d15bb7c9Suebayasi 		map = kernel_map;
452d15bb7c9Suebayasi 	return uvm_map_fill_vmmap(map, kve, lenp);
453d15bb7c9Suebayasi }
454d15bb7c9Suebayasi #endif
455