xref: /openbsd-src/sys/uvm/uvm_extern.h (revision 64a83ff19b36271dfa1223dee5d34ba6e249006e)
1*64a83ff1Sjsg /*	$OpenBSD: uvm_extern.h,v 1.180 2024/11/19 06:18:26 jsg Exp $	*/
21414b0faSart /*	$NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $	*/
3cd7ee8acSart 
4cd7ee8acSart /*
5cd7ee8acSart  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6cd7ee8acSart  * All rights reserved.
7cd7ee8acSart  *
8cd7ee8acSart  * Redistribution and use in source and binary forms, with or without
9cd7ee8acSart  * modification, are permitted provided that the following conditions
10cd7ee8acSart  * are met:
11cd7ee8acSart  * 1. Redistributions of source code must retain the above copyright
12cd7ee8acSart  *    notice, this list of conditions and the following disclaimer.
13cd7ee8acSart  * 2. Redistributions in binary form must reproduce the above copyright
14cd7ee8acSart  *    notice, this list of conditions and the following disclaimer in the
15cd7ee8acSart  *    documentation and/or other materials provided with the distribution.
16cd7ee8acSart  *
17cd7ee8acSart  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18cd7ee8acSart  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19cd7ee8acSart  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20cd7ee8acSart  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21cd7ee8acSart  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22cd7ee8acSart  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23cd7ee8acSart  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24cd7ee8acSart  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25cd7ee8acSart  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26cd7ee8acSart  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27cd7ee8acSart  */
28cd7ee8acSart 
293a40dae1Sart /*-
303a40dae1Sart  * Copyright (c) 1991, 1992, 1993
313a40dae1Sart  *	The Regents of the University of California.  All rights reserved.
323a40dae1Sart  *
333a40dae1Sart  * Redistribution and use in source and binary forms, with or without
343a40dae1Sart  * modification, are permitted provided that the following conditions
353a40dae1Sart  * are met:
363a40dae1Sart  * 1. Redistributions of source code must retain the above copyright
373a40dae1Sart  *    notice, this list of conditions and the following disclaimer.
383a40dae1Sart  * 2. Redistributions in binary form must reproduce the above copyright
393a40dae1Sart  *    notice, this list of conditions and the following disclaimer in the
403a40dae1Sart  *    documentation and/or other materials provided with the distribution.
4129295d1cSmillert  * 3. Neither the name of the University nor the names of its contributors
423a40dae1Sart  *    may be used to endorse or promote products derived from this software
433a40dae1Sart  *    without specific prior written permission.
443a40dae1Sart  *
453a40dae1Sart  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
463a40dae1Sart  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
473a40dae1Sart  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
483a40dae1Sart  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
493a40dae1Sart  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
503a40dae1Sart  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
513a40dae1Sart  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
523a40dae1Sart  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
533a40dae1Sart  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
543a40dae1Sart  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
553a40dae1Sart  * SUCH DAMAGE.
563a40dae1Sart  *
573a40dae1Sart  *	@(#)vm_extern.h	8.5 (Berkeley) 5/3/95
583a40dae1Sart  */
593a40dae1Sart 
60cd7ee8acSart #ifndef _UVM_UVM_EXTERN_H_
61cd7ee8acSart #define _UVM_UVM_EXTERN_H_
62cd7ee8acSart 
633a40dae1Sart typedef int vm_fault_t;
643a40dae1Sart 
653a40dae1Sart typedef int vm_inherit_t;	/* XXX: inheritance codes */
663a40dae1Sart typedef off_t voff_t;		/* XXX: offset within a uvm_object */
673a40dae1Sart 
681414b0faSart struct vm_map_entry;
691414b0faSart typedef struct vm_map_entry *vm_map_entry_t;
701414b0faSart 
711414b0faSart struct vm_map;
721414b0faSart typedef struct vm_map *vm_map_t;
731414b0faSart 
741414b0faSart struct vm_page;
751414b0faSart typedef struct vm_page  *vm_page_t;
761414b0faSart 
77e087cc70Sguenther /*
78e087cc70Sguenther  * Bit assignments assigned by UVM_MAPFLAG() and extracted by
79e087cc70Sguenther  * UVM_{PROTECTION,INHERIT,MAXPROTECTION,ADVICE}():
80e087cc70Sguenther  * bits 0-2	protection
81e087cc70Sguenther  *  bit 3	 unused
82e087cc70Sguenther  * bits 4-5	inheritance
83e087cc70Sguenther  *  bits 6-7	 unused
84e087cc70Sguenther  * bits 8-10	max protection
85e087cc70Sguenther  *  bit 11	 unused
86e087cc70Sguenther  * bits 12-14	advice
87e087cc70Sguenther  *  bit 15	 unused
88e087cc70Sguenther  * bits 16-N	flags
89e087cc70Sguenther  */
90e087cc70Sguenther 
91cd7ee8acSart /* protections bits */
921e8cdc2eSderaadt #define PROT_MASK	(PROT_READ | PROT_WRITE | PROT_EXEC)
93cd7ee8acSart 
94cd7ee8acSart /* inherit codes */
95e087cc70Sguenther #define MAP_INHERIT_MASK	0x3	/* inherit mask */
9636ae53b3Smickey 
9736ae53b3Smickey typedef int		vm_prot_t;
9836ae53b3Smickey 
99e087cc70Sguenther #define MADV_MASK	0x7	/* mask */
100cd7ee8acSart 
101cd7ee8acSart /* mapping flags */
10237f480c7Skettenis #define UVM_FLAG_FIXED   0x0010000 /* find space */
10337f480c7Skettenis #define UVM_FLAG_OVERLAY 0x0020000 /* establish overlay */
10437f480c7Skettenis #define UVM_FLAG_NOMERGE 0x0040000 /* don't merge map entries */
10537f480c7Skettenis #define UVM_FLAG_COPYONW 0x0080000 /* set copy_on_write flag */
106dfb3c047Sstefan #define UVM_FLAG_TRYLOCK 0x0100000 /* fail if we can not lock map */
107dfb3c047Sstefan #define UVM_FLAG_HOLE    0x0200000 /* no backend */
108dfb3c047Sstefan #define UVM_FLAG_QUERY   0x0400000 /* do everything, except actual execution */
109dfb3c047Sstefan #define UVM_FLAG_NOFAULT 0x0800000 /* don't fault */
110dfb3c047Sstefan #define UVM_FLAG_UNMAP   0x1000000 /* unmap to make space */
111003f5e42Sderaadt #define UVM_FLAG_STACK   0x2000000 /* page may contain a stack */
112642e262eSkettenis #define UVM_FLAG_WC      0x4000000 /* write combining */
11302dbcf75Scheloha #define UVM_FLAG_CONCEAL 0x8000000 /* omit from dumps */
1142e53ad5eSderaadt #define UVM_FLAG_SIGALTSTACK 0x20000000 /* sigaltstack validation required */
115cd7ee8acSart 
116cd7ee8acSart /* macros to extract info */
1171e8cdc2eSderaadt #define UVM_PROTECTION(X)	((X) & PROT_MASK)
118e087cc70Sguenther #define UVM_INHERIT(X)		(((X) >> 4) & MAP_INHERIT_MASK)
1191e8cdc2eSderaadt #define UVM_MAXPROTECTION(X)	(((X) >> 8) & PROT_MASK)
120e087cc70Sguenther #define UVM_ADVICE(X)		(((X) >> 12) & MADV_MASK)
121cd7ee8acSart 
122f8e6dc9cSderaadt #define UVM_MAPFLAG(prot, maxprot, inh, advice, flags) \
123e087cc70Sguenther 	((prot) | ((maxprot) << 8) | ((inh) << 4) | ((advice) << 12) | (flags))
124cd7ee8acSart 
125cd7ee8acSart /* magic offset value */
12640cf655dSart #define UVM_UNKNOWN_OFFSET ((voff_t) -1)
127cd7ee8acSart 				/* offset not known(obj) or don't care(!obj) */
128cd7ee8acSart 
129cd7ee8acSart /*
130cd7ee8acSart  * the following defines are for uvm_km_kmemalloc's flags
131cd7ee8acSart  */
132cd7ee8acSart #define UVM_KMF_NOWAIT	0x1			/* matches M_NOWAIT */
133cd7ee8acSart #define UVM_KMF_VALLOC	0x2			/* allocate VA only */
134bbd99cb4Sniklas #define UVM_KMF_CANFAIL	0x4			/* caller handles failure */
135b426ab7bSthib #define UVM_KMF_ZERO	0x08			/* zero pages */
136cd7ee8acSart #define UVM_KMF_TRYLOCK	UVM_FLAG_TRYLOCK	/* try locking only */
137cd7ee8acSart 
138cd7ee8acSart /*
139a3544580Soga  * flags for uvm_pagealloc()
1408a42ed70Sart  */
141b1990b04Sart #define UVM_PGA_USERESERVE	0x0001	/* ok to use reserve pages */
14201a5757cSjmc #define	UVM_PGA_ZERO		0x0002	/* returned page must be zeroed */
1438a42ed70Sart 
1448a42ed70Sart /*
14503c020efSmpi  * flags for uvm_pglistalloc() also used by uvm_pmr_getpages()
14626340d65Soga  */
14726340d65Soga #define UVM_PLA_WAITOK		0x0001	/* may sleep */
14826340d65Soga #define UVM_PLA_NOWAIT		0x0002	/* can't sleep (need one of the two) */
149ff01fa82Soga #define UVM_PLA_ZERO		0x0004	/* zero all pages before returning */
150a3544580Soga #define UVM_PLA_TRYCONTIG	0x0008	/* try to allocate contig physmem */
15190ee2fe0Sbeck #define UVM_PLA_FAILOK		0x0010	/* caller can handle failure */
15203c020efSmpi #define UVM_PLA_NOWAKE		0x0020	/* don't wake page daemon on failure */
15303c020efSmpi #define UVM_PLA_USERESERVE	0x0040	/* can allocate from kernel reserve */
15426340d65Soga 
15526340d65Soga /*
156e920f2c9Ssmart  * lockflags that control the locking behavior of various functions.
157e920f2c9Ssmart  */
158e920f2c9Ssmart #define	UVM_LK_ENTER	0x00000001	/* map locked on entry */
159e920f2c9Ssmart #define	UVM_LK_EXIT	0x00000002	/* leave map locked on exit */
160e920f2c9Ssmart 
161e920f2c9Ssmart /*
162e6c6495dSderaadt  * flags to uvm_page_physload.
1633fc14555Soga  */
1643fc14555Soga #define	PHYSLOAD_DEVICE	0x01	/* don't add to the page queue */
1653fc14555Soga 
166e6c6495dSderaadt #include <sys/queue.h>
167e6c6495dSderaadt #include <sys/tree.h>
1681e8cdc2eSderaadt #include <sys/mman.h>
169cd7ee8acSart 
170e6c6495dSderaadt #ifdef _KERNEL
171fdbe3b3cSmickey struct buf;
172cd7ee8acSart struct pglist;
173cd7ee8acSart struct vmspace;
174cd7ee8acSart struct pmap;
1753a40dae1Sart #endif
1763a40dae1Sart 
1774bb7a4f6Smpi #include <uvm/uvm_param.h>
178e6c6495dSderaadt 
1793a40dae1Sart #include <uvm/uvm_pmap.h>
1804bb7a4f6Smpi #include <uvm/uvm_object.h>
18162fe2d4bSmiod #include <uvm/uvm_page.h>
1823a40dae1Sart #include <uvm/uvm_map.h>
183e6c6495dSderaadt 
184e6c6495dSderaadt #ifdef _KERNEL
1853a40dae1Sart #include <uvm/uvm_fault.h>
1863a40dae1Sart #include <uvm/uvm_pager.h>
187e6c6495dSderaadt #endif
1884bb7a4f6Smpi 
1893a40dae1Sart /*
1903a40dae1Sart  * Shareable process virtual address space.
1913a40dae1Sart  * May eventually be merged with vm_map.
1923a40dae1Sart  * Several fields are temporary (text, data stuff).
193b5af37e9Smpi  *
194b5af37e9Smpi  *  Locks used to protect struct members in this file:
1951df98543Smpi  *	K	kernel lock
196b5af37e9Smpi  *	I	immutable after creation
1978e4f8f34Smpi  *	a	atomic operations
1981df98543Smpi  *	v	vm_map's lock
1993a40dae1Sart  */
2003a40dae1Sart struct vmspace {
2013a40dae1Sart 	struct	vm_map vm_map;	/* VM address map */
2028e4f8f34Smpi 	int	vm_refcnt;	/* [a] number of references */
2033a40dae1Sart 	caddr_t	vm_shm;		/* SYS5 shared memory private data XXX */
2043a40dae1Sart /* we copy from vm_startcopy to the end of the structure on fork */
2053a40dae1Sart #define vm_startcopy vm_rssize
2063a40dae1Sart 	segsz_t vm_rssize; 	/* current resident set size in pages */
2073a40dae1Sart 	segsz_t vm_swrss;	/* resident set size before last swap */
2083a40dae1Sart 	segsz_t vm_tsize;	/* text size (pages) XXX */
2093a40dae1Sart 	segsz_t vm_dsize;	/* data size (pages) XXX */
2105ef4d903Stedu 	segsz_t vm_dused;	/* data segment length (pages) XXX */
2111df98543Smpi 	segsz_t vm_ssize;	/* [v] stack size (pages) */
2121df98543Smpi 	caddr_t	vm_taddr;	/* [I] user virtual address of text */
2131df98543Smpi 	caddr_t	vm_daddr;	/* [I] user virtual address of data */
214b5af37e9Smpi 	caddr_t vm_maxsaddr;	/* [I] user VA at max stack growth */
215b5af37e9Smpi 	caddr_t vm_minsaddr;	/* [I] user VA at top of stack */
2163a40dae1Sart };
2173a40dae1Sart 
218e6c6495dSderaadt /*
219e6c6495dSderaadt  * uvm_constraint_range's:
220e6c6495dSderaadt  * MD code is allowed to setup constraint ranges for memory allocators, the
221e6c6495dSderaadt  * primary use for this is to keep allocation for certain memory consumers
2224af3577fSjsg  * such as mbuf pools within address ranges that are reachable by devices
223e6c6495dSderaadt  * that perform DMA.
224e6c6495dSderaadt  *
225b076d4fbSjsg  * It is also to discourage memory allocations from being satisfied from ranges
226e6c6495dSderaadt  * such as the ISA memory range, if they can be satisfied with allocation
227e6c6495dSderaadt  * from other ranges.
228e6c6495dSderaadt  *
229e6c6495dSderaadt  * the MD ranges are defined in arch/ARCH/ARCH/machdep.c
230e6c6495dSderaadt  */
231e6c6495dSderaadt struct uvm_constraint_range {
232e6c6495dSderaadt 	paddr_t	ucr_low;
233e6c6495dSderaadt 	paddr_t ucr_high;
234e6c6495dSderaadt };
235e6c6495dSderaadt 
2363a40dae1Sart #ifdef _KERNEL
237cd7ee8acSart 
238e6c6495dSderaadt #include <uvm/uvmexp.h>
239e6c6495dSderaadt extern struct uvmexp uvmexp;
240e6c6495dSderaadt 
241e6c6495dSderaadt /* Constraint ranges, set by MD code. */
242e6c6495dSderaadt extern struct uvm_constraint_range  isa_constraint;
243e6c6495dSderaadt extern struct uvm_constraint_range  dma_constraint;
244e6c6495dSderaadt extern struct uvm_constraint_range  no_constraint;
245e6c6495dSderaadt extern struct uvm_constraint_range *uvm_md_constraints[];
246e6c6495dSderaadt 
2478ef75ebfSmickey /*
2488ef75ebfSmickey  * the various kernel maps, owned by MD code
2498ef75ebfSmickey  */
2508ef75ebfSmickey extern struct vm_map *exec_map;
2518ef75ebfSmickey extern struct vm_map *kernel_map;
2528ef75ebfSmickey extern struct vm_map *kmem_map;
2538ef75ebfSmickey extern struct vm_map *phys_map;
2548ef75ebfSmickey 
2555de1d0f0Smiod /* base of kernel virtual memory */
2565de1d0f0Smiod extern vaddr_t vm_min_kernel_address;
2571414b0faSart 
2583a40dae1Sart #define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
259cd7ee8acSart 
260edc99bcdSvisa struct plimit;
261edc99bcdSvisa 
262c4071fd1Smillert void			vmapbuf(struct buf *, vsize_t);
263c4071fd1Smillert void			vunmapbuf(struct buf *, vsize_t);
264c4071fd1Smillert struct uvm_object	*uao_create(vsize_t, int);
265c4071fd1Smillert void			uao_detach(struct uvm_object *);
266c4071fd1Smillert void			uao_reference(struct uvm_object *);
267e6c6495dSderaadt int			uvm_fault(vm_map_t, vaddr_t, vm_fault_t, vm_prot_t);
268cd7ee8acSart 
2690411e090Sguenther vaddr_t			uvm_uarea_alloc(void);
270924be113Sguenther void			uvm_uarea_free(struct proc *);
271924be113Sguenther void			uvm_exit(struct process *);
272edc99bcdSvisa void			uvm_init_limits(struct plimit *);
273c4071fd1Smillert boolean_t		uvm_kernacc(caddr_t, size_t, int);
27446718002Smiod 
275c4071fd1Smillert int			uvm_vslock(struct proc *, caddr_t, size_t,
276c4071fd1Smillert 			    vm_prot_t);
277c4071fd1Smillert void			uvm_vsunlock(struct proc *, caddr_t, size_t);
2789b40e6e1Sart int			uvm_vslock_device(struct proc *, void *, size_t,
2799b40e6e1Sart 			    vm_prot_t, void **);
2809b40e6e1Sart void			uvm_vsunlock_device(struct proc *, void *, size_t,
2819b40e6e1Sart 			    void *);
282c4071fd1Smillert void			uvm_init(void);
283627a59d1Smpi void			uvm_init_percpu(void);
284e00eec33Smiod int			uvm_io(vm_map_t, struct uio *, int);
285e00eec33Smiod 
286e00eec33Smiod #define	UVM_IO_FIXPROT	0x01
287cd7ee8acSart 
288c4071fd1Smillert void			uvm_km_free(vm_map_t, vaddr_t, vsize_t);
289b426ab7bSthib vaddr_t			uvm_km_kmemalloc_pla(struct vm_map *,
29042096da3Sart 			    struct uvm_object *, vsize_t, vsize_t, int,
29142096da3Sart 			    paddr_t, paddr_t, paddr_t, paddr_t, int);
292b426ab7bSthib #define uvm_km_kmemalloc(map, obj, sz, flags)				\
29342096da3Sart 	uvm_km_kmemalloc_pla(map, obj, sz, 0, flags, 0, (paddr_t)-1, 0, 0, 0)
294e6c6495dSderaadt struct vm_map		*uvm_km_suballoc(vm_map_t, vaddr_t *, vaddr_t *,
295e6c6495dSderaadt 			    vsize_t, int, boolean_t, vm_map_t);
296ea41019aSart /*
297ea41019aSart  * Allocation mode for virtual space.
298ea41019aSart  *
299ea41019aSart  *  kv_map - pointer to the pointer to the map we're allocating from.
300ea41019aSart  *  kv_align - alignment.
301ea41019aSart  *  kv_wait - wait for free space in the map if it's full. The default
302ea41019aSart  *   allocators don't wait since running out of space in kernel_map and
303ea41019aSart  *   kmem_map is usually fatal. Special maps like exec_map are specifically
304ea41019aSart  *   limited, so waiting for space in them is necessary.
305ea41019aSart  *  kv_singlepage - use the single page allocator.
306ea41019aSart  *  kv_executable - map the physical pages with PROT_EXEC.
307ea41019aSart  */
308ea41019aSart struct kmem_va_mode {
309ea41019aSart 	struct vm_map **kv_map;
310ea41019aSart 	vsize_t kv_align;
311ae931b54Sart 	char kv_wait;
312ae931b54Sart 	char kv_singlepage;
313ea41019aSart };
314ea41019aSart 
315ea41019aSart /*
316ea41019aSart  * Allocation mode for physical pages.
317ea41019aSart  *
318ea41019aSart  *  kp_constraint - allocation constraint for physical pages.
319ea41019aSart  *  kp_object - if the pages should be allocated from an object.
320ea41019aSart  *  kp_align - physical alignment of the first page in the allocation.
321ea41019aSart  *  kp_boundary - boundary that the physical addresses can't cross if
322ea41019aSart  *   the allocation is contiguous.
323ea41019aSart  *  kp_nomem - don't allocate any backing pages.
324ea41019aSart  *  kp_maxseg - maximal amount of contiguous segments.
325ea41019aSart  *  kp_zero - zero the returned memory.
326ea41019aSart  *  kp_pageable - allocate pageable memory.
327ea41019aSart  */
328ea41019aSart struct kmem_pa_mode {
329ea41019aSart 	struct uvm_constraint_range *kp_constraint;
330ea41019aSart 	struct uvm_object **kp_object;
331ea41019aSart 	paddr_t kp_align;
332ea41019aSart 	paddr_t kp_boundary;
333ea41019aSart 	int kp_maxseg;
334ae931b54Sart 	char kp_nomem;
335ae931b54Sart 	char kp_zero;
336ae931b54Sart 	char kp_pageable;
337ea41019aSart };
338ea41019aSart 
339ea41019aSart /*
340ea41019aSart  * Dynamic allocation parameters. Stuff that changes too often or too much
341ea41019aSart  * to create separate va and pa modes for.
342ea41019aSart  *
343ea41019aSart  * kd_waitok - is it ok to sleep?
344ea41019aSart  * kd_trylock - don't sleep on map locks.
345ea41019aSart  * kd_prefer - offset to feed to PMAP_PREFER
346ea41019aSart  * kd_slowdown - special parameter for the singlepage va allocator
347ea41019aSart  *  that tells the caller to sleep if possible to let the singlepage
348ea41019aSart  *  allocator catch up.
349ea41019aSart  */
350ea41019aSart struct kmem_dyn_mode {
351ea41019aSart 	voff_t kd_prefer;
352ea41019aSart 	int *kd_slowdown;
353ae931b54Sart 	char kd_waitok;
354ae931b54Sart 	char kd_trylock;
355ea41019aSart };
356ea41019aSart 
357ae931b54Sart #define KMEM_DYN_INITIALIZER { UVM_UNKNOWN_OFFSET, NULL, 0, 0 }
358ea41019aSart 
359ea41019aSart /*
360ea41019aSart  * Notice that for kv_ waiting has a different meaning. It's only supposed
361ea41019aSart  * to be used for very space constrained maps where waiting is a way
362ea41019aSart  * to throttle some other operation.
363ea41019aSart  * The exception is kv_page which needs to wait relatively often.
364ea41019aSart  * All kv_ except kv_intrsafe will potentially sleep.
365ea41019aSart  */
366803ae8aaSart extern const struct kmem_va_mode kv_any;
367803ae8aaSart extern const struct kmem_va_mode kv_intrsafe;
368803ae8aaSart extern const struct kmem_va_mode kv_page;
369ea41019aSart 
370803ae8aaSart extern const struct kmem_pa_mode kp_dirty;
371803ae8aaSart extern const struct kmem_pa_mode kp_zero;
372803ae8aaSart extern const struct kmem_pa_mode kp_dma;
37312e839c5Sariane extern const struct kmem_pa_mode kp_dma_contig;
374803ae8aaSart extern const struct kmem_pa_mode kp_dma_zero;
375803ae8aaSart extern const struct kmem_pa_mode kp_pageable;
376803ae8aaSart extern const struct kmem_pa_mode kp_none;
377ea41019aSart 
378803ae8aaSart extern const struct kmem_dyn_mode kd_waitok;
379803ae8aaSart extern const struct kmem_dyn_mode kd_nowait;
380803ae8aaSart extern const struct kmem_dyn_mode kd_trylock;
381ea41019aSart 
382e6c6495dSderaadt void			*km_alloc(size_t, const struct kmem_va_mode *,
383e6c6495dSderaadt 			    const struct kmem_pa_mode *,
384803ae8aaSart 			    const struct kmem_dyn_mode *);
385803ae8aaSart void			km_free(void *, size_t, const struct kmem_va_mode *,
386803ae8aaSart 			    const struct kmem_pa_mode *);
387181c6205Sariane int			uvm_map(vm_map_t, vaddr_t *, vsize_t,
3880583b8edSmmcc 			    struct uvm_object *, voff_t, vsize_t, unsigned int);
3890583b8edSmmcc int			uvm_mapanon(vm_map_t, vaddr_t *, vsize_t, vsize_t, unsigned int);
390c4071fd1Smillert int			uvm_map_pageable(vm_map_t, vaddr_t,
391c4071fd1Smillert 			    vaddr_t, boolean_t, int);
392c4071fd1Smillert int			uvm_map_pageable_all(vm_map_t, int, vsize_t);
393c4071fd1Smillert boolean_t		uvm_map_checkprot(vm_map_t, vaddr_t,
394c4071fd1Smillert 			    vaddr_t, vm_prot_t);
395c4071fd1Smillert int			uvm_map_protect(vm_map_t, vaddr_t,
3964c8ae43bSderaadt 			    vaddr_t, vm_prot_t, int etype, boolean_t, boolean_t);
397c4071fd1Smillert struct vmspace		*uvmspace_alloc(vaddr_t, vaddr_t,
3988e46a4c8Smiod 			    boolean_t, boolean_t);
399c4071fd1Smillert void			uvmspace_init(struct vmspace *, struct pmap *,
4008e46a4c8Smiod 			    vaddr_t, vaddr_t, boolean_t, boolean_t);
401c4071fd1Smillert void			uvmspace_exec(struct proc *, vaddr_t, vaddr_t);
402924be113Sguenther struct vmspace		*uvmspace_fork(struct process *);
4031df98543Smpi void			uvmspace_addref(struct vmspace *);
404c4071fd1Smillert void			uvmspace_free(struct vmspace *);
405924be113Sguenther struct vmspace		*uvmspace_share(struct process *);
40662ef99a7Sstefan int			uvm_share(vm_map_t, vaddr_t, vm_prot_t,
40762ef99a7Sstefan 			    vm_map_t, vaddr_t, vsize_t);
408c4071fd1Smillert int			uvm_sysctl(int *, u_int, void *, size_t *,
409c4071fd1Smillert 			    void *, size_t, struct proc *);
410a3544580Soga struct vm_page		*uvm_pagealloc(struct uvm_object *,
411a3544580Soga 			    voff_t, struct vm_anon *, int);
412d4f4e279Sbeck int			uvm_pagealloc_multi(struct uvm_object *, voff_t,
413e97e67d8Sbeck     			    vsize_t, int);
414c4071fd1Smillert void			uvm_pagerealloc(struct vm_page *,
415c4071fd1Smillert 			    struct uvm_object *, voff_t);
416d4f4e279Sbeck int			uvm_pagerealloc_multi(struct uvm_object *, voff_t,
41790ee2fe0Sbeck 			    vsize_t, int, struct uvm_constraint_range *);
418cd7ee8acSart /* Actually, uvm_page_physload takes PF#s which need their own type */
4192ce3b4a8Soga void			uvm_page_physload(paddr_t, paddr_t, paddr_t,
4202ce3b4a8Soga 			    paddr_t, int);
421c4071fd1Smillert void			uvm_setpagesize(void);
422c98e8b29Sderaadt void			uvm_shutdown(void);
423c4071fd1Smillert void			uvm_aio_biodone(struct buf *);
424c4071fd1Smillert void			uvm_aio_aiodone(struct buf *);
425c4071fd1Smillert void			uvm_pageout(void *);
426c4071fd1Smillert void			uvm_aiodone_daemon(void *);
4272e2e4853Sthib void			uvm_wait(const char *);
428e6c6495dSderaadt int			uvm_pglistalloc(psize_t, paddr_t, paddr_t,
429e6c6495dSderaadt 			    paddr_t, paddr_t, struct pglist *, int, int);
430c4071fd1Smillert void			uvm_pglistfree(struct pglist *);
431a3544580Soga void			uvm_pmr_use_inc(paddr_t, paddr_t);
432c4071fd1Smillert void			uvm_swap_init(void);
43317448fe7Sguenther typedef int		uvm_coredump_setup_cb(int _nsegment, void *_cookie);
43417448fe7Sguenther typedef int		uvm_coredump_walk_cb(vaddr_t _start, vaddr_t _realend,
435e7a40e26Skurt 			    vaddr_t _end, vm_prot_t _prot, int _isvnode,
436e7a40e26Skurt 			    int _nsegment, void *_cookie);
43717448fe7Sguenther int			uvm_coredump_walkmap(struct proc *_p,
43817448fe7Sguenther 			    uvm_coredump_setup_cb *_setup,
43917448fe7Sguenther 			    uvm_coredump_walk_cb *_walk, void *_cookie);
440e0d503b3Smiod void			uvm_grow(struct proc *, vaddr_t);
441ef440d08Skettenis void			uvm_pagezero_thread(void *);
442c4071fd1Smillert void			kmeminit_nkmempages(void);
443c4071fd1Smillert void			kmeminit(void);
4447e3cd69dSmiod extern u_int		nkmempages;
445b04f36ebSart 
446af074ab7Smpi struct vnode;
447af074ab7Smpi struct uvm_object	*uvn_attach(struct vnode *, vm_prot_t);
448af074ab7Smpi 
449d15bb7c9Suebayasi struct process;
450d15bb7c9Suebayasi struct kinfo_vmentry;
451d15bb7c9Suebayasi int			fill_vmmap(struct process *, struct kinfo_vmentry *,
452d15bb7c9Suebayasi 			    size_t *);
453d15bb7c9Suebayasi 
454fdbe3b3cSmickey #endif /* _KERNEL */
455fd628a11Sart 
456ab87c361Ssmart #endif /* _UVM_UVM_EXTERN_H_ */
457