xref: /openbsd-src/sys/uvm/uvm_extern.h (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: uvm_extern.h,v 1.119 2014/07/11 16:35:40 jsg Exp $	*/
2 /*	$NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * from: Id: uvm_extern.h,v 1.1.2.21 1998/02/07 01:16:53 chs Exp
29  */
30 
31 /*-
32  * Copyright (c) 1991, 1992, 1993
33  *	The Regents of the University of California.  All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. Neither the name of the University nor the names of its contributors
44  *    may be used to endorse or promote products derived from this software
45  *    without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57  * SUCH DAMAGE.
58  *
59  *	@(#)vm_extern.h	8.5 (Berkeley) 5/3/95
60  */
61 
62 #ifndef _UVM_UVM_EXTERN_H_
63 #define _UVM_UVM_EXTERN_H_
64 
65 typedef unsigned int  uvm_flag_t;
66 typedef int vm_fault_t;
67 
68 typedef int vm_inherit_t;	/* XXX: inheritance codes */
69 typedef off_t voff_t;		/* XXX: offset within a uvm_object */
70 
71 union vm_map_object;
72 typedef union vm_map_object vm_map_object_t;
73 
74 struct vm_map_entry;
75 typedef struct vm_map_entry *vm_map_entry_t;
76 
77 struct vm_map;
78 typedef struct vm_map *vm_map_t;
79 
80 struct vm_page;
81 typedef struct vm_page  *vm_page_t;
82 
83 /* protections bits */
84 #define UVM_PROT_MASK	0x07	/* protection mask */
85 #define UVM_PROT_NONE	0x00	/* protection none */
86 #define UVM_PROT_ALL	0x07	/* everything */
87 #define UVM_PROT_READ	0x01	/* read */
88 #define UVM_PROT_WRITE  0x02	/* write */
89 #define UVM_PROT_EXEC	0x04	/* exec */
90 
91 /* protection short codes */
92 #define UVM_PROT_R	0x01	/* read */
93 #define UVM_PROT_W	0x02	/* write */
94 #define UVM_PROT_RW	0x03    /* read-write */
95 #define UVM_PROT_X	0x04	/* exec */
96 #define UVM_PROT_RX	0x05	/* read-exec */
97 #define UVM_PROT_WX	0x06	/* write-exec */
98 #define UVM_PROT_RWX	0x07	/* read-write-exec */
99 
100 /* 0x08: not used */
101 
102 /* inherit codes */
103 #define UVM_INH_MASK	0x30	/* inherit mask */
104 #define UVM_INH_SHARE	0x00	/* "share" */
105 #define UVM_INH_COPY	0x10	/* "copy" */
106 #define UVM_INH_NONE	0x20	/* "none" */
107 #define UVM_INH_ZERO	0x30	/* "zero" */
108 
109 /* 0x40, 0x80: not used */
110 
111 /* bits 0x700: max protection, 0x800: not used */
112 
113 /* bits 0x7000: advice, 0x8000: not used */
114 
115 typedef int		vm_prot_t;
116 
117 /*
118  *	Protection values, defined as bits within the vm_prot_t type
119  *
120  *   These are funky definitions from old CMU VM and are kept
121  *   for compatibility reasons, one day they are going to die,
122  *   just like everybody else.
123  */
124 
125 #define	VM_PROT_NONE	((vm_prot_t) 0x00)
126 
127 #define VM_PROT_READ	((vm_prot_t) 0x01)	/* read permission */
128 #define VM_PROT_WRITE	((vm_prot_t) 0x02)	/* write permission */
129 #define VM_PROT_EXECUTE	((vm_prot_t) 0x04)	/* execute permission */
130 
131 /*
132  *	The default protection for newly-created virtual memory
133  */
134 
135 #define VM_PROT_DEFAULT	(VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
136 
137 /*
138  *	The maximum privileges possible, for parameter checking.
139  */
140 
141 #define VM_PROT_ALL	(VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
142 
143 /* advice: matches MADV_* from sys/mman.h */
144 #define UVM_ADV_NORMAL	0x0	/* 'normal' */
145 #define UVM_ADV_RANDOM	0x1	/* 'random' */
146 #define UVM_ADV_SEQUENTIAL 0x2	/* 'sequential' */
147 /* 0x3: will need, 0x4: dontneed */
148 #define UVM_ADV_MASK	0x7	/* mask */
149 
150 /* mapping flags */
151 #define UVM_FLAG_FIXED   0x010000 /* find space */
152 #define UVM_FLAG_OVERLAY 0x020000 /* establish overlay */
153 #define UVM_FLAG_NOMERGE 0x040000 /* don't merge map entries */
154 #define UVM_FLAG_COPYONW 0x080000 /* set copy_on_write flag */
155 #define UVM_FLAG_AMAPPAD 0x100000 /* for bss: pad amap to reduce malloc() */
156 #define UVM_FLAG_TRYLOCK 0x200000 /* fail if we can not lock map */
157 #define	UVM_FLAG_HOLE    0x400000 /* no backend */
158 #define UVM_FLAG_QUERY   0x800000 /* do everything, except actual execution */
159 
160 /* macros to extract info */
161 #define UVM_PROTECTION(X)	((X) & UVM_PROT_MASK)
162 #define UVM_INHERIT(X)		(((X) & UVM_INH_MASK) >> 4)
163 #define UVM_MAXPROTECTION(X)	(((X) >> 8) & UVM_PROT_MASK)
164 #define UVM_ADVICE(X)		(((X) >> 12) & UVM_ADV_MASK)
165 
166 #define UVM_MAPFLAG(PROT,MAXPROT,INH,ADVICE,FLAGS) \
167 	((MAXPROT << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS))
168 
169 /* magic offset value */
170 #define UVM_UNKNOWN_OFFSET ((voff_t) -1)
171 				/* offset not known(obj) or don't care(!obj) */
172 
173 /*
174  * the following defines are for uvm_km_kmemalloc's flags
175  */
176 #define UVM_KMF_NOWAIT	0x1			/* matches M_NOWAIT */
177 #define UVM_KMF_VALLOC	0x2			/* allocate VA only */
178 #define UVM_KMF_CANFAIL	0x4			/* caller handles failure */
179 #define UVM_KMF_ZERO	0x08			/* zero pages */
180 #define UVM_KMF_TRYLOCK	UVM_FLAG_TRYLOCK	/* try locking only */
181 
182 /*
183  * flags for uvm_pagealloc()
184  */
185 #define UVM_PGA_USERESERVE	0x0001	/* ok to use reserve pages */
186 #define	UVM_PGA_ZERO		0x0002	/* returned page must be zeroed */
187 
188 /*
189  * flags for uvm_pglistalloc()
190  */
191 #define UVM_PLA_WAITOK		0x0001	/* may sleep */
192 #define UVM_PLA_NOWAIT		0x0002	/* can't sleep (need one of the two) */
193 #define UVM_PLA_ZERO		0x0004	/* zero all pages before returning */
194 #define UVM_PLA_TRYCONTIG	0x0008	/* try to allocate contig physmem */
195 #define UVM_PLA_FAILOK		0x0010	/* caller can handle failure */
196 
197 /*
198  * lockflags that control the locking behavior of various functions.
199  */
200 #define	UVM_LK_ENTER	0x00000001	/* map locked on entry */
201 #define	UVM_LK_EXIT	0x00000002	/* leave map locked on exit */
202 
203 /*
204  * flags to uvm_page_physload.
205  */
206 #define	PHYSLOAD_DEVICE	0x01	/* don't add to the page queue */
207 
208 #include <sys/queue.h>
209 #include <sys/tree.h>
210 #include <sys/lock.h>
211 
212 #ifdef _KERNEL
213 struct buf;
214 struct core;
215 struct mount;
216 struct pglist;
217 struct vmspace;
218 struct pmap;
219 #endif
220 
221 #include <uvm/uvm_param.h>
222 
223 #include <uvm/uvm_pmap.h>
224 #include <uvm/uvm_object.h>
225 #include <uvm/uvm_page.h>
226 #include <uvm/uvm_map.h>
227 
228 #ifdef _KERNEL
229 #include <uvm/uvm_fault.h>
230 #include <uvm/uvm_pager.h>
231 #endif
232 
233 /*
234  * Shareable process virtual address space.
235  * May eventually be merged with vm_map.
236  * Several fields are temporary (text, data stuff).
237  */
238 struct vmspace {
239 	struct	vm_map vm_map;	/* VM address map */
240 	int	vm_refcnt;	/* number of references */
241 	caddr_t	vm_shm;		/* SYS5 shared memory private data XXX */
242 /* we copy from vm_startcopy to the end of the structure on fork */
243 #define vm_startcopy vm_rssize
244 	segsz_t vm_rssize; 	/* current resident set size in pages */
245 	segsz_t vm_swrss;	/* resident set size before last swap */
246 	segsz_t vm_tsize;	/* text size (pages) XXX */
247 	segsz_t vm_dsize;	/* data size (pages) XXX */
248 	segsz_t vm_dused;	/* data segment length (pages) XXX */
249 	segsz_t vm_ssize;	/* stack size (pages) */
250 	caddr_t	vm_taddr;	/* user virtual address of text XXX */
251 	caddr_t	vm_daddr;	/* user virtual address of data XXX */
252 	caddr_t vm_maxsaddr;	/* user VA at max stack growth */
253 	caddr_t vm_minsaddr;	/* user VA at top of stack */
254 };
255 
256 /*
257  * uvm_constraint_range's:
258  * MD code is allowed to setup constraint ranges for memory allocators, the
259  * primary use for this is to keep allocation for certain memory consumers
260  * such as mbuf pools withing address ranges that are reachable by devices
261  * that perform DMA.
262  *
263  * It is also to discourge memory allocations from being satisfied from ranges
264  * such as the ISA memory range, if they can be satisfied with allocation
265  * from other ranges.
266  *
267  * the MD ranges are defined in arch/ARCH/ARCH/machdep.c
268  */
269 struct uvm_constraint_range {
270 	paddr_t	ucr_low;
271 	paddr_t ucr_high;
272 };
273 
274 #ifdef _KERNEL
275 
276 #include <uvm/uvmexp.h>
277 extern struct uvmexp uvmexp;
278 
279 /* Constraint ranges, set by MD code. */
280 extern struct uvm_constraint_range  isa_constraint;
281 extern struct uvm_constraint_range  dma_constraint;
282 extern struct uvm_constraint_range  no_constraint;
283 extern struct uvm_constraint_range *uvm_md_constraints[];
284 
285 extern struct pool *uvm_aiobuf_pool;
286 
287 /*
288  * used to keep state while iterating over the map for a core dump.
289  */
290 struct uvm_coredump_state {
291 	void *cookie;		/* opaque for the caller */
292 	vaddr_t start;		/* start of region */
293 	vaddr_t realend;	/* real end of region */
294 	vaddr_t end;		/* virtual end of region */
295 	vm_prot_t prot;		/* protection of region */
296 	int flags;		/* flags; see below */
297 };
298 
299 #define	UVM_COREDUMP_STACK	0x01	/* region is user stack */
300 
301 /*
302  * the various kernel maps, owned by MD code
303  */
304 extern struct vm_map *exec_map;
305 extern struct vm_map *kernel_map;
306 extern struct vm_map *kmem_map;
307 extern struct vm_map *phys_map;
308 
309 
310 /* zalloc zeros memory, alloc does not */
311 #define uvm_km_zalloc(MAP,SIZE) uvm_km_alloc1(MAP,SIZE,0,TRUE)
312 #define uvm_km_alloc(MAP,SIZE)  uvm_km_alloc1(MAP,SIZE,0,FALSE)
313 
314 #ifdef	pmap_resident_count
315 #define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
316 #else
317 #define vm_resident_count(vm) ((vm)->vm_rssize)
318 #endif
319 
320 void			vmapbuf(struct buf *, vsize_t);
321 void			vunmapbuf(struct buf *, vsize_t);
322 void			cpu_fork(struct proc *, struct proc *, void *,
323 			    size_t, void (*)(void *), void *);
324 struct uvm_object	*uao_create(vsize_t, int);
325 void			uao_detach(struct uvm_object *);
326 void			uao_detach_locked(struct uvm_object *);
327 void			uao_reference(struct uvm_object *);
328 void			uao_reference_locked(struct uvm_object *);
329 int			uvm_fault(vm_map_t, vaddr_t, vm_fault_t, vm_prot_t);
330 
331 #if defined(KGDB)
332 void			uvm_chgkprot(caddr_t, size_t, int);
333 #endif
334 vaddr_t			uvm_uarea_alloc(void);
335 void			uvm_uarea_free(struct proc *);
336 void			uvm_exit(struct process *);
337 void			uvm_init_limits(struct proc *);
338 boolean_t		uvm_kernacc(caddr_t, size_t, int);
339 
340 int			uvm_vslock(struct proc *, caddr_t, size_t,
341 			    vm_prot_t);
342 void			uvm_vsunlock(struct proc *, caddr_t, size_t);
343 int			uvm_vslock_device(struct proc *, void *, size_t,
344 			    vm_prot_t, void **);
345 void			uvm_vsunlock_device(struct proc *, void *, size_t,
346 			    void *);
347 void			uvm_pause(void);
348 void			uvm_init(void);
349 int			uvm_io(vm_map_t, struct uio *, int);
350 
351 #define	UVM_IO_FIXPROT	0x01
352 
353 vaddr_t			uvm_km_alloc1(vm_map_t, vsize_t, vsize_t, boolean_t);
354 void			uvm_km_free(vm_map_t, vaddr_t, vsize_t);
355 void			uvm_km_free_wakeup(vm_map_t, vaddr_t, vsize_t);
356 vaddr_t			uvm_km_kmemalloc_pla(struct vm_map *,
357 			    struct uvm_object *, vsize_t, vsize_t, int,
358 			    paddr_t, paddr_t, paddr_t, paddr_t, int);
359 #define uvm_km_kmemalloc(map, obj, sz, flags)				\
360 	uvm_km_kmemalloc_pla(map, obj, sz, 0, flags, 0, (paddr_t)-1, 0, 0, 0)
361 vaddr_t			uvm_km_valloc(vm_map_t, vsize_t);
362 vaddr_t			uvm_km_valloc_try(vm_map_t, vsize_t);
363 vaddr_t			uvm_km_valloc_wait(vm_map_t, vsize_t);
364 vaddr_t			uvm_km_valloc_align(struct vm_map *, vsize_t,
365 			    vsize_t, int);
366 vaddr_t			uvm_km_valloc_prefer_wait(vm_map_t, vsize_t, voff_t);
367 struct vm_map		*uvm_km_suballoc(vm_map_t, vaddr_t *, vaddr_t *,
368 			    vsize_t, int, boolean_t, vm_map_t);
369 /*
370  * Allocation mode for virtual space.
371  *
372  *  kv_map - pointer to the pointer to the map we're allocating from.
373  *  kv_align - alignment.
374  *  kv_wait - wait for free space in the map if it's full. The default
375  *   allocators don't wait since running out of space in kernel_map and
376  *   kmem_map is usually fatal. Special maps like exec_map are specifically
377  *   limited, so waiting for space in them is necessary.
378  *  kv_singlepage - use the single page allocator.
379  *  kv_executable - map the physical pages with PROT_EXEC.
380  */
381 struct kmem_va_mode {
382 	struct vm_map **kv_map;
383 	vsize_t kv_align;
384 	char kv_wait;
385 	char kv_singlepage;
386 	char kv_executable;
387 };
388 
389 /*
390  * Allocation mode for physical pages.
391  *
392  *  kp_constraint - allocation constraint for physical pages.
393  *  kp_object - if the pages should be allocated from an object.
394  *  kp_align - physical alignment of the first page in the allocation.
395  *  kp_boundary - boundary that the physical addresses can't cross if
396  *   the allocation is contiguous.
397  *  kp_nomem - don't allocate any backing pages.
398  *  kp_maxseg - maximal amount of contiguous segments.
399  *  kp_zero - zero the returned memory.
400  *  kp_pageable - allocate pageable memory.
401  */
402 struct kmem_pa_mode {
403 	struct uvm_constraint_range *kp_constraint;
404 	struct uvm_object **kp_object;
405 	paddr_t kp_align;
406 	paddr_t kp_boundary;
407 	int kp_maxseg;
408 	char kp_nomem;
409 	char kp_zero;
410 	char kp_pageable;
411 };
412 
413 /*
414  * Dynamic allocation parameters. Stuff that changes too often or too much
415  * to create separate va and pa modes for.
416  *
417  * kd_waitok - is it ok to sleep?
418  * kd_trylock - don't sleep on map locks.
419  * kd_prefer - offset to feed to PMAP_PREFER
420  * kd_slowdown - special parameter for the singlepage va allocator
421  *  that tells the caller to sleep if possible to let the singlepage
422  *  allocator catch up.
423  */
424 struct kmem_dyn_mode {
425 	voff_t kd_prefer;
426 	int *kd_slowdown;
427 	char kd_waitok;
428 	char kd_trylock;
429 };
430 
431 #define KMEM_DYN_INITIALIZER { UVM_UNKNOWN_OFFSET, NULL, 0, 0 }
432 
433 /*
434  * Notice that for kv_ waiting has a different meaning. It's only supposed
435  * to be used for very space constrained maps where waiting is a way
436  * to throttle some other operation.
437  * The exception is kv_page which needs to wait relatively often.
438  * All kv_ except kv_intrsafe will potentially sleep.
439  */
440 extern const struct kmem_va_mode kv_any;
441 extern const struct kmem_va_mode kv_intrsafe;
442 extern const struct kmem_va_mode kv_page;
443 
444 extern const struct kmem_pa_mode kp_dirty;
445 extern const struct kmem_pa_mode kp_zero;
446 extern const struct kmem_pa_mode kp_dma;
447 extern const struct kmem_pa_mode kp_dma_contig;
448 extern const struct kmem_pa_mode kp_dma_zero;
449 extern const struct kmem_pa_mode kp_pageable;
450 extern const struct kmem_pa_mode kp_none;
451 
452 extern const struct kmem_dyn_mode kd_waitok;
453 extern const struct kmem_dyn_mode kd_nowait;
454 extern const struct kmem_dyn_mode kd_trylock;
455 
456 void			*km_alloc(size_t, const struct kmem_va_mode *,
457 			    const struct kmem_pa_mode *,
458 			    const struct kmem_dyn_mode *);
459 void			km_free(void *, size_t, const struct kmem_va_mode *,
460 			    const struct kmem_pa_mode *);
461 int			uvm_map(vm_map_t, vaddr_t *, vsize_t,
462 			    struct uvm_object *, voff_t, vsize_t, uvm_flag_t);
463 int			uvm_map_pageable(vm_map_t, vaddr_t,
464 			    vaddr_t, boolean_t, int);
465 int			uvm_map_pageable_all(vm_map_t, int, vsize_t);
466 boolean_t		uvm_map_checkprot(vm_map_t, vaddr_t,
467 			    vaddr_t, vm_prot_t);
468 int			uvm_map_protect(vm_map_t, vaddr_t,
469 			    vaddr_t, vm_prot_t, boolean_t);
470 struct vmspace		*uvmspace_alloc(vaddr_t, vaddr_t,
471 			    boolean_t, boolean_t);
472 void			uvmspace_init(struct vmspace *, struct pmap *,
473 			    vaddr_t, vaddr_t, boolean_t, boolean_t);
474 void			uvmspace_exec(struct proc *, vaddr_t, vaddr_t);
475 struct vmspace		*uvmspace_fork(struct process *);
476 void			uvmspace_free(struct vmspace *);
477 struct vmspace		*uvmspace_share(struct process *);
478 void			uvm_meter(void);
479 int			uvm_sysctl(int *, u_int, void *, size_t *,
480 			    void *, size_t, struct proc *);
481 int			uvm_mmap(vm_map_t, vaddr_t *, vsize_t,
482 			    vm_prot_t, vm_prot_t, int,
483 			    caddr_t, voff_t, vsize_t, struct proc *);
484 struct vm_page		*uvm_pagealloc(struct uvm_object *,
485 			    voff_t, struct vm_anon *, int);
486 vaddr_t			uvm_pagealloc_contig(vaddr_t, vaddr_t,
487 			    vaddr_t, vaddr_t);
488 void			uvm_pagealloc_multi(struct uvm_object *, voff_t,
489     			    vsize_t, int);
490 void			uvm_pagerealloc(struct vm_page *,
491 			    struct uvm_object *, voff_t);
492 void			uvm_pagerealloc_multi(struct uvm_object *, voff_t,
493 			    vsize_t, int, struct uvm_constraint_range *);
494 /* Actually, uvm_page_physload takes PF#s which need their own type */
495 void			uvm_page_physload(paddr_t, paddr_t, paddr_t,
496 			    paddr_t, int);
497 void			uvm_setpagesize(void);
498 void			uvm_shutdown(void);
499 void			uvm_aio_biodone1(struct buf *);
500 void			uvm_aio_biodone(struct buf *);
501 void			uvm_aio_aiodone(struct buf *);
502 void			uvm_pageout(void *);
503 void			uvm_aiodone_daemon(void *);
504 void			uvm_wait(const char *);
505 int			uvm_pglistalloc(psize_t, paddr_t, paddr_t,
506 			    paddr_t, paddr_t, struct pglist *, int, int);
507 void			uvm_pglistfree(struct pglist *);
508 void			uvm_pmr_use_inc(paddr_t, paddr_t);
509 void			uvm_swap_init(void);
510 int			uvm_coredump(struct proc *, struct vnode *,
511 			    struct ucred *, struct core *);
512 int			uvm_coredump_walkmap(struct proc *,
513 			    void *, int (*)(struct proc *, void *,
514 			    struct uvm_coredump_state *), void *);
515 void			uvm_grow(struct proc *, vaddr_t);
516 void			uvm_deallocate(vm_map_t, vaddr_t, vsize_t);
517 void			uvm_vnp_setsize(struct vnode *, voff_t);
518 void			uvm_vnp_sync(struct mount *);
519 void 			uvm_vnp_terminate(struct vnode *);
520 boolean_t		uvm_vnp_uncache(struct vnode *);
521 struct uvm_object	*uvn_attach(struct vnode *, vm_prot_t);
522 void			kmeminit_nkmempages(void);
523 void			kmeminit(void);
524 extern u_int		nkmempages;
525 
526 #endif /* _KERNEL */
527 
528 #endif /* _UVM_UVM_EXTERN_H_ */
529