xref: /openbsd-src/sys/uvm/uvm_extern.h (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /*	$OpenBSD: uvm_extern.h,v 1.103 2011/07/08 00:10:59 tedu Exp $	*/
2 /*	$NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $	*/
3 
4 /*
5  *
6  * Copyright (c) 1997 Charles D. Cranor and Washington University.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Charles D. Cranor and
20  *      Washington University.
21  * 4. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * from: Id: uvm_extern.h,v 1.1.2.21 1998/02/07 01:16:53 chs Exp
36  */
37 
38 /*-
39  * Copyright (c) 1991, 1992, 1993
40  *	The Regents of the University of California.  All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)vm_extern.h	8.5 (Berkeley) 5/3/95
67  */
68 
69 #ifndef _UVM_UVM_EXTERN_H_
70 #define _UVM_UVM_EXTERN_H_
71 
72 /*
73  * uvm_extern.h: this file defines the external interface to the VM system.
74  *
75  * this should be the only file included by non-VM parts of the kernel
76  * which need access to VM services.   if you want to know the interface
77  * to the MI VM layer without knowing the details, this is the file to
78  * learn.
79  *
80  * NOTE: vm system calls are prototyped in syscallargs.h
81  */
82 
83 /*
84  * typedefs, necessary for standard UVM headers.
85  */
86 
87 typedef unsigned int  uvm_flag_t;
88 typedef int vm_fault_t;
89 
90 typedef int vm_inherit_t;	/* XXX: inheritance codes */
91 typedef off_t voff_t;		/* XXX: offset within a uvm_object */
92 
93 union vm_map_object;
94 typedef union vm_map_object vm_map_object_t;
95 
96 struct vm_map_entry;
97 typedef struct vm_map_entry *vm_map_entry_t;
98 
99 struct vm_map;
100 typedef struct vm_map *vm_map_t;
101 
102 struct vm_page;
103 typedef struct vm_page  *vm_page_t;
104 
105 /*
106  * defines
107  */
108 
109 /*
110  * the following defines are for uvm_map and functions which call it.
111  */
112 
113 /* protections bits */
114 #define UVM_PROT_MASK	0x07	/* protection mask */
115 #define UVM_PROT_NONE	0x00	/* protection none */
116 #define UVM_PROT_ALL	0x07	/* everything */
117 #define UVM_PROT_READ	0x01	/* read */
118 #define UVM_PROT_WRITE  0x02	/* write */
119 #define UVM_PROT_EXEC	0x04	/* exec */
120 
121 /* protection short codes */
122 #define UVM_PROT_R	0x01	/* read */
123 #define UVM_PROT_W	0x02	/* write */
124 #define UVM_PROT_RW	0x03    /* read-write */
125 #define UVM_PROT_X	0x04	/* exec */
126 #define UVM_PROT_RX	0x05	/* read-exec */
127 #define UVM_PROT_WX	0x06	/* write-exec */
128 #define UVM_PROT_RWX	0x07	/* read-write-exec */
129 
130 /* 0x08: not used */
131 
132 /* inherit codes */
133 #define UVM_INH_MASK	0x30	/* inherit mask */
134 #define UVM_INH_SHARE	0x00	/* "share" */
135 #define UVM_INH_COPY	0x10	/* "copy" */
136 #define UVM_INH_NONE	0x20	/* "none" */
137 #define UVM_INH_DONATE	0x30	/* "donate" << not used */
138 
139 /* 0x40, 0x80: not used */
140 
141 /* bits 0x700: max protection, 0x800: not used */
142 
143 /* bits 0x7000: advice, 0x8000: not used */
144 
145 typedef int		vm_prot_t;
146 
147 /*
148  *	Protection values, defined as bits within the vm_prot_t type
149  *
150  *   These are funky definitions from old CMU VM and are kept
151  *   for compatibility reasons, one day they are going to die,
152  *   just like everybody else.
153  */
154 
155 #define	VM_PROT_NONE	((vm_prot_t) 0x00)
156 
157 #define VM_PROT_READ	((vm_prot_t) 0x01)	/* read permission */
158 #define VM_PROT_WRITE	((vm_prot_t) 0x02)	/* write permission */
159 #define VM_PROT_EXECUTE	((vm_prot_t) 0x04)	/* execute permission */
160 
161 /*
162  *	The default protection for newly-created virtual memory
163  */
164 
165 #define VM_PROT_DEFAULT	(VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
166 
167 /*
168  *	The maximum privileges possible, for parameter checking.
169  */
170 
171 #define VM_PROT_ALL	(VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
172 
173 /* advice: matches MADV_* from sys/mman.h */
174 #define UVM_ADV_NORMAL	0x0	/* 'normal' */
175 #define UVM_ADV_RANDOM	0x1	/* 'random' */
176 #define UVM_ADV_SEQUENTIAL 0x2	/* 'sequential' */
177 /* 0x3: will need, 0x4: dontneed */
178 #define UVM_ADV_MASK	0x7	/* mask */
179 
180 /* mapping flags */
181 #define UVM_FLAG_FIXED   0x010000 /* find space */
182 #define UVM_FLAG_OVERLAY 0x020000 /* establish overlay */
183 #define UVM_FLAG_NOMERGE 0x040000 /* don't merge map entries */
184 #define UVM_FLAG_COPYONW 0x080000 /* set copy_on_write flag */
185 #define UVM_FLAG_AMAPPAD 0x100000 /* for bss: pad amap to reduce malloc() */
186 #define UVM_FLAG_TRYLOCK 0x200000 /* fail if we can not lock map */
187 #define	UVM_FLAG_HOLE    0x400000 /* no backend */
188 
189 /* macros to extract info */
190 #define UVM_PROTECTION(X)	((X) & UVM_PROT_MASK)
191 #define UVM_INHERIT(X)		(((X) & UVM_INH_MASK) >> 4)
192 #define UVM_MAXPROTECTION(X)	(((X) >> 8) & UVM_PROT_MASK)
193 #define UVM_ADVICE(X)		(((X) >> 12) & UVM_ADV_MASK)
194 
195 #define UVM_MAPFLAG(PROT,MAXPROT,INH,ADVICE,FLAGS) \
196 	((MAXPROT << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS))
197 
198 /* magic offset value */
199 #define UVM_UNKNOWN_OFFSET ((voff_t) -1)
200 				/* offset not known(obj) or don't care(!obj) */
201 
202 /*
203  * the following defines are for uvm_km_kmemalloc's flags
204  */
205 #define UVM_KMF_NOWAIT	0x1			/* matches M_NOWAIT */
206 #define UVM_KMF_VALLOC	0x2			/* allocate VA only */
207 #define UVM_KMF_CANFAIL	0x4			/* caller handles failure */
208 #define UVM_KMF_ZERO	0x08			/* zero pages */
209 #define UVM_KMF_TRYLOCK	UVM_FLAG_TRYLOCK	/* try locking only */
210 
211 /*
212  * flags for uvm_pagealloc()
213  */
214 #define UVM_PGA_USERESERVE	0x0001	/* ok to use reserve pages */
215 #define	UVM_PGA_ZERO		0x0002	/* returned page must be zeroed */
216 
217 /*
218  * flags for uvm_pglistalloc()
219  */
220 #define UVM_PLA_WAITOK		0x0001	/* may sleep */
221 #define UVM_PLA_NOWAIT		0x0002	/* can't sleep (need one of the two) */
222 #define UVM_PLA_ZERO		0x0004	/* zero all pages before returning */
223 #define UVM_PLA_TRYCONTIG	0x0008	/* try to allocate contig physmem */
224 #define UVM_PLA_FAILOK		0x0010	/* caller can handle failure */
225 
226 /*
227  * lockflags that control the locking behavior of various functions.
228  */
229 #define	UVM_LK_ENTER	0x00000001	/* map locked on entry */
230 #define	UVM_LK_EXIT	0x00000002	/* leave map locked on exit */
231 
232 /*
233  * flags to uvm_physload.
234  */
235 #define	PHYSLOAD_DEVICE	0x01	/* don't add to the page queue */
236 
237 /*
238  * structures
239  */
240 
241 struct buf;
242 struct core;
243 struct mount;
244 struct pglist;
245 struct proc;
246 struct ucred;
247 struct uio;
248 struct uvm_object;
249 struct vm_anon;
250 struct vm_aref;
251 struct vm_map;
252 struct vmspace;
253 struct pmap;
254 struct vnode;
255 struct pool;
256 struct simplelock;
257 
258 /*
259  * uvm_constraint_range's:
260  * MD code is allowed to setup constraint ranges for memory allocators, the
261  * primary use for this is to keep allocation for certain memory consumers
262  * such as mbuf pools withing address ranges that are reachable by devices
263  * that perform DMA.
264  *
265  * It is also to discourge memory allocations from being satisfied from ranges
266  * such as the ISA memory range, if they can be satisfied with allocation
267  * from other ranges.
268  *
269  * the MD ranges are defined in arch/ARCH/ARCH/machdep.c
270  */
271 struct uvm_constraint_range {
272 	paddr_t	ucr_low;
273 	paddr_t ucr_high;
274 };
275 
276 extern struct pool *uvm_aiobuf_pool;
277 
278 /*
279  * uvmexp: global data structures that are exported to parts of the kernel
280  * other than the vm system.
281  */
282 
283 struct uvmexp {
284 	/* vm_page constants */
285 	int pagesize;   /* size of a page (PAGE_SIZE): must be power of 2 */
286 	int pagemask;   /* page mask */
287 	int pageshift;  /* page shift */
288 
289 	/* vm_page counters */
290 	int npages;     /* number of pages we manage */
291 	int free;       /* number of free pages */
292 	int active;     /* number of active pages */
293 	int inactive;   /* number of pages that we free'd but may want back */
294 	int paging;	/* number of pages in the process of being paged out */
295 	int wired;      /* number of wired pages */
296 
297 	int zeropages;		/* number of zero'd pages */
298 	int reserve_pagedaemon; /* number of pages reserved for pagedaemon */
299 	int reserve_kernel;	/* number of pages reserved for kernel */
300 	int anonpages;		/* number of pages used by anon pagers */
301 	int vnodepages;		/* number of pages used by vnode page cache */
302 	int vtextpages;		/* number of pages used by vtext vnodes */
303 
304 	/* pageout params */
305 	int freemin;    /* min number of free pages */
306 	int freetarg;   /* target number of free pages */
307 	int inactarg;   /* target number of inactive pages */
308 	int wiredmax;   /* max number of wired pages */
309 	int anonmin;	/* min threshold for anon pages */
310 	int vtextmin;	/* min threshold for vtext pages */
311 	int vnodemin;	/* min threshold for vnode pages */
312 	int anonminpct;	/* min percent anon pages */
313 	int vtextminpct;/* min percent vtext pages */
314 	int vnodeminpct;/* min percent vnode pages */
315 
316 	/* swap */
317 	int nswapdev;	/* number of configured swap devices in system */
318 	int swpages;	/* number of PAGE_SIZE'ed swap pages */
319 	int swpginuse;	/* number of swap pages in use */
320 	int swpgonly;	/* number of swap pages in use, not also in RAM */
321 	int nswget;	/* number of times fault calls uvm_swap_get() */
322 	int nanon;	/* number total of anon's in system */
323 	int nanonneeded;/* number of anons currently needed */
324 	int nfreeanon;	/* number of free anon's */
325 
326 	/* stat counters */
327 	int faults;		/* page fault count */
328 	int traps;		/* trap count */
329 	int intrs;		/* interrupt count */
330 	int swtch;		/* context switch count */
331 	int softs;		/* software interrupt count */
332 	int syscalls;		/* system calls */
333 	int pageins;		/* pagein operation count */
334 				/* pageouts are in pdpageouts below */
335 	int swapins;		/* swapins */
336 	int swapouts;		/* swapouts */
337 	int pgswapin;		/* pages swapped in */
338 	int pgswapout;		/* pages swapped out */
339 	int forks;  		/* forks */
340 	int forks_ppwait;	/* forks where parent waits */
341 	int forks_sharevm;	/* forks where vmspace is shared */
342 	int pga_zerohit;	/* pagealloc where zero wanted and zero
343 				   was available */
344 	int pga_zeromiss;	/* pagealloc where zero wanted and zero
345 				   not available */
346 	int zeroaborts;		/* number of times page zeroing was
347 				   aborted */
348 
349 	/* fault subcounters */
350 	int fltnoram;	/* number of times fault was out of ram */
351 	int fltnoanon;	/* number of times fault was out of anons */
352 	int fltpgwait;	/* number of times fault had to wait on a page */
353 	int fltpgrele;	/* number of times fault found a released page */
354 	int fltrelck;	/* number of times fault relock called */
355 	int fltrelckok;	/* number of times fault relock is a success */
356 	int fltanget;	/* number of times fault gets anon page */
357 	int fltanretry;	/* number of times fault retrys an anon get */
358 	int fltamcopy;	/* number of times fault clears "needs copy" */
359 	int fltnamap;	/* number of times fault maps a neighbor anon page */
360 	int fltnomap;	/* number of times fault maps a neighbor obj page */
361 	int fltlget;	/* number of times fault does a locked pgo_get */
362 	int fltget;	/* number of times fault does an unlocked get */
363 	int flt_anon;	/* number of times fault anon (case 1a) */
364 	int flt_acow;	/* number of times fault anon cow (case 1b) */
365 	int flt_obj;	/* number of times fault is on object page (2a) */
366 	int flt_prcopy;	/* number of times fault promotes with copy (2b) */
367 	int flt_przero;	/* number of times fault promotes with zerofill (2b) */
368 
369 	/* daemon counters */
370 	int pdwoke;	/* number of times daemon woke up */
371 	int pdrevs;	/* number of times daemon rev'd clock hand */
372 	int pdswout;	/* number of times daemon called for swapout */
373 	int pdfreed;	/* number of pages daemon freed since boot */
374 	int pdscans;	/* number of pages daemon scanned since boot */
375 	int pdanscan;	/* number of anonymous pages scanned by daemon */
376 	int pdobscan;	/* number of object pages scanned by daemon */
377 	int pdreact;	/* number of pages daemon reactivated since boot */
378 	int pdbusy;	/* number of times daemon found a busy page */
379 	int pdpageouts;	/* number of times daemon started a pageout */
380 	int pdpending;	/* number of times daemon got a pending pagout */
381 	int pddeact;	/* number of pages daemon deactivates */
382 	int pdreanon;	/* anon pages reactivated due to min threshold */
383 	int pdrevnode;	/* vnode pages reactivated due to min threshold */
384 	int pdrevtext;	/* vtext pages reactivated due to min threshold */
385 
386 	int fpswtch;	/* FPU context switches */
387 	int kmapent;	/* number of kernel map entries */
388 };
389 
390 #ifdef _KERNEL
391 extern struct uvmexp uvmexp;
392 #endif
393 
394 /*
395  * Finally, bring in standard UVM headers.
396  */
397 #include <sys/vmmeter.h>
398 #include <sys/queue.h>
399 #include <sys/tree.h>
400 #include <uvm/uvm_param.h>
401 #include <sys/lock.h>
402 #include <sys/mutex.h>
403 #include <uvm/uvm_page.h>
404 #include <uvm/uvm_pmap.h>
405 #include <uvm/uvm_map.h>
406 #include <uvm/uvm_fault.h>
407 #include <uvm/uvm_pager.h>
408 
409 /*
410  * Shareable process virtual address space.
411  * May eventually be merged with vm_map.
412  * Several fields are temporary (text, data stuff).
413  */
414 struct vmspace {
415 	struct	vm_map vm_map;	/* VM address map */
416 	int	vm_refcnt;	/* number of references */
417 	caddr_t	vm_shm;		/* SYS5 shared memory private data XXX */
418 /* we copy from vm_startcopy to the end of the structure on fork */
419 #define vm_startcopy vm_rssize
420 	segsz_t vm_rssize; 	/* current resident set size in pages */
421 	segsz_t vm_swrss;	/* resident set size before last swap */
422 	segsz_t vm_tsize;	/* text size (pages) XXX */
423 	segsz_t vm_dsize;	/* data size (pages) XXX */
424 	segsz_t vm_dused;	/* data segment length (pages) XXX */
425 	segsz_t vm_ssize;	/* stack size (pages) */
426 	caddr_t	vm_taddr;	/* user virtual address of text XXX */
427 	caddr_t	vm_daddr;	/* user virtual address of data XXX */
428 	caddr_t vm_maxsaddr;	/* user VA at max stack growth */
429 	caddr_t vm_minsaddr;	/* user VA at top of stack */
430 };
431 
432 #ifdef _KERNEL
433 
434 /*
435  * used to keep state while iterating over the map for a core dump.
436  */
437 struct uvm_coredump_state {
438 	void *cookie;		/* opaque for the caller */
439 	vaddr_t start;		/* start of region */
440 	vaddr_t realend;	/* real end of region */
441 	vaddr_t end;		/* virtual end of region */
442 	vm_prot_t prot;		/* protection of region */
443 	int flags;		/* flags; see below */
444 };
445 
446 #define	UVM_COREDUMP_STACK	0x01	/* region is user stack */
447 
448 /*
449  * the various kernel maps, owned by MD code
450  */
451 extern struct vm_map *exec_map;
452 extern struct vm_map *kernel_map;
453 extern struct vm_map *kmem_map;
454 extern struct vm_map *phys_map;
455 
456 
457 /*
458  * macros
459  */
460 
461 /* zalloc zeros memory, alloc does not */
462 #define uvm_km_zalloc(MAP,SIZE) uvm_km_alloc1(MAP,SIZE,0,TRUE)
463 #define uvm_km_alloc(MAP,SIZE)  uvm_km_alloc1(MAP,SIZE,0,FALSE)
464 
465 #endif /* _KERNEL */
466 
467 #ifdef	pmap_resident_count
468 #define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
469 #else
470 #define vm_resident_count(vm) ((vm)->vm_rssize)
471 #endif
472 
473 /* XXX clean up later */
474 struct buf;
475 struct loadavg;
476 struct proc;
477 struct pmap;
478 struct vmspace;
479 struct vmtotal;
480 struct mount;
481 struct vnode;
482 struct core;
483 
484 #ifdef _KERNEL
485 
486 /* vm_machdep.c */
487 void		vmapbuf(struct buf *, vsize_t);
488 void		vunmapbuf(struct buf *, vsize_t);
489 void		cpu_fork(struct proc *, struct proc *, void *, size_t,
490 		    void (*)(void *), void *);
491 
492 /* uvm_aobj.c */
493 struct uvm_object	*uao_create(vsize_t, int);
494 void			uao_detach(struct uvm_object *);
495 void			uao_detach_locked(struct uvm_object *);
496 void			uao_reference(struct uvm_object *);
497 void			uao_reference_locked(struct uvm_object *);
498 
499 /* uvm_fault.c */
500 int			uvm_fault(vm_map_t, vaddr_t,
501 				vm_fault_t, vm_prot_t);
502 				/* handle a page fault */
503 
504 /* uvm_glue.c */
505 #if defined(KGDB)
506 void			uvm_chgkprot(caddr_t, size_t, int);
507 #endif
508 void			uvm_fork(struct proc *, struct proc *, boolean_t,
509 			    void *, size_t, void (*)(void *), void *);
510 void			uvm_exit(struct proc *);
511 void			uvm_init_limits(struct proc *);
512 boolean_t		uvm_kernacc(caddr_t, size_t, int);
513 
514 int			uvm_vslock(struct proc *, caddr_t, size_t,
515 			    vm_prot_t);
516 void			uvm_vsunlock(struct proc *, caddr_t, size_t);
517 
518 int			uvm_vslock_device(struct proc *, void *, size_t,
519 			    vm_prot_t, void **);
520 void			uvm_vsunlock_device(struct proc *, void *, size_t,
521 			    void *);
522 
523 
524 /* uvm_init.c */
525 void			uvm_init(void);
526 				/* init the uvm system */
527 
528 /* uvm_io.c */
529 int			uvm_io(vm_map_t, struct uio *, int);
530 
531 #define	UVM_IO_FIXPROT	0x01
532 
533 /* uvm_km.c */
534 vaddr_t			uvm_km_alloc1(vm_map_t, vsize_t, vsize_t, boolean_t);
535 void			uvm_km_free(vm_map_t, vaddr_t, vsize_t);
536 void			uvm_km_free_wakeup(vm_map_t, vaddr_t, vsize_t);
537 vaddr_t			uvm_km_kmemalloc_pla(struct vm_map *,
538 			    struct uvm_object *, vsize_t, vsize_t, int,
539 			    paddr_t, paddr_t, paddr_t, paddr_t, int);
540 #define uvm_km_kmemalloc(map, obj, sz, flags)				\
541 	uvm_km_kmemalloc_pla(map, obj, sz, 0, flags, 0, (paddr_t)-1, 0, 0, 0)
542 vaddr_t			uvm_km_valloc(vm_map_t, vsize_t);
543 vaddr_t			uvm_km_valloc_try(vm_map_t, vsize_t);
544 vaddr_t			uvm_km_valloc_wait(vm_map_t, vsize_t);
545 vaddr_t			uvm_km_valloc_align(struct vm_map *, vsize_t, vsize_t, int);
546 vaddr_t			uvm_km_valloc_prefer_wait(vm_map_t, vsize_t,
547 					voff_t);
548 
549 struct vm_map		*uvm_km_suballoc(vm_map_t, vaddr_t *,
550 				vaddr_t *, vsize_t, int,
551 				boolean_t, vm_map_t);
552 
553 /*
554  * Allocation mode for virtual space.
555  *
556  *  kv_map - pointer to the pointer to the map we're allocating from.
557  *  kv_align - alignment.
558  *  kv_wait - wait for free space in the map if it's full. The default
559  *   allocators don't wait since running out of space in kernel_map and
560  *   kmem_map is usually fatal. Special maps like exec_map are specifically
561  *   limited, so waiting for space in them is necessary.
562  *  kv_singlepage - use the single page allocator.
563  *  kv_executable - map the physical pages with PROT_EXEC.
564  */
565 struct kmem_va_mode {
566 	struct vm_map **kv_map;
567 	vsize_t kv_align;
568 	char kv_wait;
569 	char kv_singlepage;
570 	char kv_executable;
571 };
572 
573 /*
574  * Allocation mode for physical pages.
575  *
576  *  kp_constraint - allocation constraint for physical pages.
577  *  kp_object - if the pages should be allocated from an object.
578  *  kp_align - physical alignment of the first page in the allocation.
579  *  kp_boundary - boundary that the physical addresses can't cross if
580  *   the allocation is contiguous.
581  *  kp_nomem - don't allocate any backing pages.
582  *  kp_maxseg - maximal amount of contiguous segments.
583  *  kp_zero - zero the returned memory.
584  *  kp_pageable - allocate pageable memory.
585  */
586 struct kmem_pa_mode {
587 	struct uvm_constraint_range *kp_constraint;
588 	struct uvm_object **kp_object;
589 	paddr_t kp_align;
590 	paddr_t kp_boundary;
591 	int kp_maxseg;
592 	char kp_nomem;
593 	char kp_zero;
594 	char kp_pageable;
595 };
596 
597 /*
598  * Dynamic allocation parameters. Stuff that changes too often or too much
599  * to create separate va and pa modes for.
600  *
601  * kd_waitok - is it ok to sleep?
602  * kd_trylock - don't sleep on map locks.
603  * kd_prefer - offset to feed to PMAP_PREFER
604  * kd_slowdown - special parameter for the singlepage va allocator
605  *  that tells the caller to sleep if possible to let the singlepage
606  *  allocator catch up.
607  */
608 struct kmem_dyn_mode {
609 	voff_t kd_prefer;
610 	int *kd_slowdown;
611 	char kd_waitok;
612 	char kd_trylock;
613 };
614 
615 #define KMEM_DYN_INITIALIZER { UVM_UNKNOWN_OFFSET, NULL, 0, 0 }
616 
617 /*
618  * Notice that for kv_ waiting has a different meaning. It's only supposed
619  * to be used for very space constrained maps where waiting is a way
620  * to throttle some other operation.
621  * The exception is kv_page which needs to wait relatively often.
622  * All kv_ except kv_intrsafe will potentially sleep.
623  */
624 extern const struct kmem_va_mode kv_any;
625 extern const struct kmem_va_mode kv_intrsafe;
626 extern const struct kmem_va_mode kv_page;
627 
628 extern const struct kmem_pa_mode kp_dirty;
629 extern const struct kmem_pa_mode kp_zero;
630 extern const struct kmem_pa_mode kp_dma;
631 extern const struct kmem_pa_mode kp_dma_contig;
632 extern const struct kmem_pa_mode kp_dma_zero;
633 extern const struct kmem_pa_mode kp_pageable;
634 extern const struct kmem_pa_mode kp_none;
635 
636 extern const struct kmem_dyn_mode kd_waitok;
637 extern const struct kmem_dyn_mode kd_nowait;
638 extern const struct kmem_dyn_mode kd_trylock;
639 
640 
641 void *km_alloc(size_t, const struct kmem_va_mode *, const struct kmem_pa_mode *,
642     const struct kmem_dyn_mode *);
643 void km_free(void *, size_t, const struct kmem_va_mode *,
644     const struct kmem_pa_mode *);
645 
646 /* uvm_map.c */
647 #define	uvm_map(_m, _a, _sz, _u, _f, _al, _fl) uvm_map_p(_m, _a, _sz, _u, _f, _al, _fl, 0)
648 int			uvm_map_p(vm_map_t, vaddr_t *, vsize_t,
649 				struct uvm_object *, voff_t, vsize_t,
650 				uvm_flag_t, struct proc *);
651 int			uvm_map_pageable(vm_map_t, vaddr_t,
652 				vaddr_t, boolean_t, int);
653 int			uvm_map_pageable_all(vm_map_t, int, vsize_t);
654 boolean_t		uvm_map_checkprot(vm_map_t, vaddr_t,
655 				vaddr_t, vm_prot_t);
656 int			uvm_map_protect(vm_map_t, vaddr_t,
657 				vaddr_t, vm_prot_t, boolean_t);
658 struct vmspace		*uvmspace_alloc(vaddr_t, vaddr_t,
659 				boolean_t, boolean_t);
660 void			uvmspace_init(struct vmspace *, struct pmap *,
661 				vaddr_t, vaddr_t, boolean_t, boolean_t);
662 void			uvmspace_exec(struct proc *, vaddr_t, vaddr_t);
663 struct vmspace		*uvmspace_fork(struct vmspace *);
664 void			uvmspace_free(struct vmspace *);
665 void			uvmspace_share(struct proc *, struct proc *);
666 
667 
668 /* uvm_meter.c */
669 void			uvm_meter(void);
670 int			uvm_sysctl(int *, u_int, void *, size_t *,
671 				void *, size_t, struct proc *);
672 void			uvm_total(struct vmtotal *);
673 
674 /* uvm_mmap.c */
675 int			uvm_mmap(vm_map_t, vaddr_t *, vsize_t,
676 				vm_prot_t, vm_prot_t, int,
677 				caddr_t, voff_t, vsize_t, struct proc *);
678 
679 /* uvm_page.c */
680 struct vm_page		*uvm_pagealloc(struct uvm_object *,
681 				voff_t, struct vm_anon *, int);
682 vaddr_t			uvm_pagealloc_contig(vaddr_t, vaddr_t,
683 				vaddr_t, vaddr_t);
684 void			uvm_pagealloc_multi(struct uvm_object *, voff_t,
685     			    vsize_t, int);
686 void			uvm_pagerealloc(struct vm_page *,
687 					     struct uvm_object *, voff_t);
688 void			uvm_pagerealloc_multi(struct uvm_object *, voff_t,
689 			    vsize_t, int, struct uvm_constraint_range *);
690 /* Actually, uvm_page_physload takes PF#s which need their own type */
691 void			uvm_page_physload(paddr_t, paddr_t, paddr_t,
692 			    paddr_t, int);
693 void			uvm_setpagesize(void);
694 void			uvm_shutdown(void);
695 
696 /* uvm_pager.c */
697 void			uvm_aio_biodone1(struct buf *);
698 void			uvm_aio_biodone(struct buf *);
699 void			uvm_aio_aiodone(struct buf *);
700 
701 /* uvm_pdaemon.c */
702 void			uvm_pageout(void *);
703 void			uvm_aiodone_daemon(void *);
704 void			uvm_wait(const char *);
705 
706 /* uvm_pglist.c */
707 int			uvm_pglistalloc(psize_t, paddr_t,
708 				paddr_t, paddr_t, paddr_t,
709 				struct pglist *, int, int);
710 void			uvm_pglistfree(struct pglist *);
711 
712 /* uvm_pmemrange.c */
713 void			uvm_pmr_use_inc(paddr_t, paddr_t);
714 
715 /* uvm_swap.c */
716 void			uvm_swap_init(void);
717 
718 /* uvm_unix.c */
719 int			uvm_coredump(struct proc *, struct vnode *,
720 				struct ucred *, struct core *);
721 int			uvm_coredump_walkmap(struct proc *,
722 			    void *,
723 			    int (*)(struct proc *, void *,
724 				    struct uvm_coredump_state *), void *);
725 void			uvm_grow(struct proc *, vaddr_t);
726 
727 /* uvm_user.c */
728 void			uvm_deallocate(vm_map_t, vaddr_t, vsize_t);
729 
730 /* uvm_vnode.c */
731 void			uvm_vnp_setsize(struct vnode *, voff_t);
732 void			uvm_vnp_sync(struct mount *);
733 void 			uvm_vnp_terminate(struct vnode *);
734 				/* terminate a uvm/uvn object */
735 boolean_t		uvm_vnp_uncache(struct vnode *);
736 struct uvm_object	*uvn_attach(void *, vm_prot_t);
737 
738 /* kern_malloc.c */
739 void			kmeminit_nkmempages(void);
740 void			kmeminit(void);
741 extern u_int		nkmempages;
742 
743 #endif /* _KERNEL */
744 
745 #endif /* _UVM_UVM_EXTERN_H_ */
746