xref: /dflybsd-src/sys/vm/vm_map.h (revision c9e3d8f96688a159959b1af2d4fef14b744173e3)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_map.h	8.9 (Berkeley) 5/17/95
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_map.h,v 1.54.2.5 2003/01/13 22:51:17 dillon Exp $
65  * $DragonFly: src/sys/vm/vm_map.h,v 1.30 2007/04/29 18:25:41 dillon Exp $
66  */
67 
68 /*
69  *	Virtual memory map module definitions.
70  */
71 
72 #ifndef	_VM_VM_MAP_H_
73 #define	_VM_VM_MAP_H_
74 
75 #ifndef _SYS_TYPES_H_
76 #include <sys/types.h>
77 #endif
78 #ifndef _SYS_TREE_H_
79 #include <sys/tree.h>
80 #endif
81 #ifndef _SYS_SYSREF_H_
82 #include <sys/sysref.h>
83 #endif
84 #ifndef _SYS_LOCK_H_
85 #include <sys/lock.h>
86 #endif
87 #ifndef _SYS_VKERNEL_H_
88 #include <sys/vkernel.h>
89 #endif
90 #ifndef _VM_VM_H_
91 #include <vm/vm.h>
92 #endif
93 #ifndef _MACHINE_PMAP_H_
94 #include <machine/pmap.h>
95 #endif
96 
97 struct vm_map_rb_tree;
98 RB_PROTOTYPE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
99 
100 /*
101  *	Types defined:
102  *
103  *	vm_map_t		the high-level address map data structure.
104  *	vm_map_entry_t		an entry in an address map.
105  */
106 
107 typedef u_int vm_eflags_t;
108 
109 /*
110  *	Objects which live in maps may be either VM objects, or
111  *	another map (called a "sharing map") which denotes read-write
112  *	sharing with other maps.
113  */
114 union vm_map_object {
115 	struct vm_object *vm_object;	/* object object */
116 	struct vm_map *sub_map;		/* belongs to another map */
117 };
118 
119 union vm_map_aux {
120 	vm_offset_t avail_ssize;	/* amt can grow if this is a stack */
121 	vpte_t master_pde;		/* virtual page table root */
122 };
123 
124 /*
125  *	Address map entries consist of start and end addresses,
126  *	a VM object (or sharing map) and offset into that object,
127  *	and user-exported inheritance and protection information.
128  *	Also included is control information for virtual copy operations.
129  *
130  *	When used with MAP_STACK, avail_ssize is used to determine the
131  *	limits of stack growth.
132  *
133  *	When used with VM_MAPTYPE_VPAGETABLE, avail_ssize stores the
134  *	page directory index.
135  */
136 struct vm_map_entry {
137 	struct vm_map_entry *prev;	/* previous entry */
138 	struct vm_map_entry *next;	/* next entry */
139 	RB_ENTRY(vm_map_entry) rb_entry;
140 	vm_offset_t start;		/* start address */
141 	vm_offset_t end;		/* end address */
142 	union vm_map_aux aux;		/* auxillary data */
143 	union vm_map_object object;	/* object I point to */
144 	vm_ooffset_t offset;		/* offset into object */
145 	vm_eflags_t eflags;		/* map entry flags */
146 	vm_maptype_t maptype;		/* type of VM mapping */
147 	vm_prot_t protection;		/* protection code */
148 	vm_prot_t max_protection;	/* maximum protection */
149 	vm_inherit_t inheritance;	/* inheritance */
150 	int wired_count;		/* can be paged if = 0 */
151 };
152 
153 #define MAP_ENTRY_NOSYNC		0x0001
154 #define MAP_ENTRY_STACK			0x0002
155 #define MAP_ENTRY_COW			0x0004
156 #define MAP_ENTRY_NEEDS_COPY		0x0008
157 #define MAP_ENTRY_NOFAULT		0x0010
158 #define MAP_ENTRY_USER_WIRED		0x0020
159 
160 #define MAP_ENTRY_BEHAV_NORMAL		0x0000	/* default behavior */
161 #define MAP_ENTRY_BEHAV_SEQUENTIAL	0x0040	/* expect sequential access */
162 #define MAP_ENTRY_BEHAV_RANDOM		0x0080	/* expect random access */
163 #define MAP_ENTRY_BEHAV_RESERVED	0x00C0	/* future use */
164 
165 #define MAP_ENTRY_BEHAV_MASK		0x00C0
166 
167 #define MAP_ENTRY_IN_TRANSITION		0x0100	/* entry being changed */
168 #define MAP_ENTRY_NEEDS_WAKEUP		0x0200	/* waiter's in transition */
169 #define MAP_ENTRY_NOCOREDUMP		0x0400	/* don't include in a core */
170 
171 /*
172  * flags for vm_map_[un]clip_range()
173  */
174 #define MAP_CLIP_NO_HOLES		0x0001
175 
176 /*
177  * This reserve count for vm_map_entry_reserve() should cover all nominal
178  * single-insertion operations, including any necessary clipping.
179  */
180 #define MAP_RESERVE_COUNT	4
181 #define MAP_RESERVE_SLOP	32
182 
183 static __inline u_char
184 vm_map_entry_behavior(struct vm_map_entry *entry)
185 {
186 	return entry->eflags & MAP_ENTRY_BEHAV_MASK;
187 }
188 
189 static __inline void
190 vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
191 {
192 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
193 		(behavior & MAP_ENTRY_BEHAV_MASK);
194 }
195 
196 /*
197  *	Maps are doubly-linked lists of map entries, kept sorted
198  *	by address.  A single hint is provided to start
199  *	searches again from the last successful search,
200  *	insertion, or removal.
201  *
202  *	Note: the lock structure cannot be the first element of vm_map
203  *	because this can result in a running lockup between two or more
204  *	system processes trying to kmem_alloc_wait() due to kmem_alloc_wait()
205  *	and free tsleep/waking up 'map' and the underlying lockmgr also
206  *	sleeping and waking up on 'map'.  The lockup occurs when the map fills
207  *	up.  The 'exec' map, for example.
208  */
209 struct vm_map {
210 	struct vm_map_entry header;	/* List of entries */
211 	RB_HEAD(vm_map_rb_tree, vm_map_entry) rb_root;
212 	struct lock lock;		/* Lock for map data */
213 	int nentries;			/* Number of entries */
214 	vm_size_t size;			/* virtual size */
215 	u_char system_map;		/* Am I a system map? */
216 	u_char infork;			/* Am I in fork processing? */
217 	vm_map_entry_t hint;		/* hint for quick lookups */
218 	unsigned int timestamp;		/* Version number */
219 	vm_map_entry_t first_free;	/* First free space hint */
220 	struct pmap *pmap;		/* Physical map */
221 #define	min_offset		header.start
222 #define max_offset		header.end
223 };
224 
225 /*
226  * Registered upcall
227  */
228 struct upcall;
229 
230 struct vmupcall {
231 	struct vmupcall	*vu_next;
232 	void		*vu_func;	/* user upcall function */
233 	void		*vu_data;	/* user data */
234 	void		*vu_ctx;	/* user context function */
235 	struct lwp	*vu_lwp;	/* process that registered upcall */
236 	int		vu_id;		/* upcall identifier */
237 	int		vu_pending;	/* upcall request pending */
238 };
239 
240 /*
241  * Shareable process virtual address space.
242  *
243  * Refd pointers from vmresident, proc
244  */
245 struct vmspace {
246 	struct vm_map vm_map;	/* VM address map */
247 	struct pmap vm_pmap;	/* private physical map */
248 	int vm_unused01;
249 	caddr_t vm_shm;		/* SYS5 shared memory private data XXX */
250 /* we copy from vm_startcopy to the end of the structure on fork */
251 #define vm_startcopy vm_rssize
252 	segsz_t vm_rssize;	/* current resident set size in pages */
253 	segsz_t vm_swrss;	/* resident set size before last swap */
254 	segsz_t vm_tsize;	/* text size (pages) XXX */
255 	segsz_t vm_dsize;	/* data size (pages) XXX */
256 	segsz_t vm_ssize;	/* stack size (pages) */
257 	caddr_t vm_taddr;	/* user virtual address of text XXX */
258 	caddr_t vm_daddr;	/* user virtual address of data XXX */
259 	caddr_t vm_maxsaddr;	/* user VA at max stack growth */
260 	caddr_t vm_minsaddr;	/* user VA at max stack growth */
261 #define vm_endcopy	vm_exitingcnt
262 	int	vm_exitingcnt;	/* several procsses zombied in exit1 */
263 	int	vm_upccount;	/* number of registered upcalls */
264 	int	vm_pagesupply;
265 	struct vmupcall *vm_upcalls;	/* registered upcalls */
266 	struct sysref vm_sysref;	/* sysref, refcnt, etc */
267 };
268 
269 /*
270  * Resident executable holding structure.  A user program can take a snapshot
271  * of just its VM address space (typically done just after dynamic link
272  * libraries have completed loading) and register it as a resident
273  * executable associated with the program binary's vnode, which is also
274  * locked into memory.  Future execs of the vnode will start with a copy
275  * of the resident vmspace instead of running the binary from scratch,
276  * avoiding both the kernel ELF loader *AND* all shared library mapping and
277  * relocation code, and will call a different entry point (the stack pointer
278  * is reset to the top of the stack) supplied when the vmspace was registered.
279  */
280 struct vmresident {
281 	struct vnode	*vr_vnode;		/* associated vnode */
282 	TAILQ_ENTRY(vmresident) vr_link;	/* linked list of res sts */
283 	struct vmspace	*vr_vmspace;		/* vmspace to fork */
284 	intptr_t	vr_entry_addr;		/* registered entry point */
285 	struct sysentvec *vr_sysent;		/* system call vects */
286 	int		vr_id;			/* registration id */
287 };
288 
289 #ifdef _KERNEL
290 /*
291  *	Macros:		vm_map_lock, etc.
292  *	Function:
293  *		Perform locking on the data portion of a map.  Note that
294  *		these macros mimic procedure calls returning void.  The
295  *		semicolon is supplied by the user of these macros, not
296  *		by the macros themselves.  The macros can safely be used
297  *		as unbraced elements in a higher level statement.
298  */
299 
300 #ifdef DIAGNOSTIC
301 /* #define MAP_LOCK_DIAGNOSTIC 1 */
302 #ifdef MAP_LOCK_DIAGNOSTIC
303 #define	vm_map_lock(map) \
304 	do { \
305 		kprintf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \
306 		if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \
307 			panic("vm_map_lock: failed to get lock"); \
308 		} \
309 		(map)->timestamp++; \
310 	} while(0)
311 #else
312 #define	vm_map_lock(map) \
313 	do { \
314 		if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \
315 			panic("vm_map_lock: failed to get lock"); \
316 		} \
317 		(map)->timestamp++; \
318 	} while(0)
319 #endif
320 #else
321 #define	vm_map_lock(map) \
322 	do { \
323 		lockmgr(&(map)->lock, LK_EXCLUSIVE); \
324 		(map)->timestamp++; \
325 	} while(0)
326 #endif /* DIAGNOSTIC */
327 
328 #if defined(MAP_LOCK_DIAGNOSTIC)
329 #define	vm_map_unlock(map) \
330 	do { \
331 		kprintf ("locking map LK_RELEASE: 0x%x\n", map); \
332 		lockmgr(&(map)->lock, LK_RELEASE); \
333 	} while (0)
334 #define	vm_map_lock_read(map) \
335 	do { \
336 		kprintf ("locking map LK_SHARED: 0x%x\n", map); \
337 		lockmgr(&(map)->lock, LK_SHARED); \
338 	} while (0)
339 #define	vm_map_unlock_read(map) \
340 	do { \
341 		kprintf ("locking map LK_RELEASE: 0x%x\n", map); \
342 		lockmgr(&(map)->lock, LK_RELEASE); \
343 	} while (0)
344 #else
345 #define	vm_map_unlock(map) \
346 	lockmgr(&(map)->lock, LK_RELEASE)
347 #define	vm_map_lock_read(map) \
348 	lockmgr(&(map)->lock, LK_SHARED)
349 #define	vm_map_unlock_read(map) \
350 	lockmgr(&(map)->lock, LK_RELEASE)
351 #endif
352 
353 static __inline__ int
354 vm_map_lock_upgrade(vm_map_t map) {
355 	int error;
356 #if defined(MAP_LOCK_DIAGNOSTIC)
357 	kprintf("locking map LK_EXCLUPGRADE: 0x%x\n", map);
358 #endif
359 	error = lockmgr(&map->lock, LK_EXCLUPGRADE);
360 	if (error == 0)
361 		map->timestamp++;
362 	return error;
363 }
364 
365 #if defined(MAP_LOCK_DIAGNOSTIC)
366 #define vm_map_lock_downgrade(map) \
367 	do { \
368 		kprintf ("locking map LK_DOWNGRADE: 0x%x\n", map); \
369 		lockmgr(&(map)->lock, LK_DOWNGRADE); \
370 	} while (0)
371 #else
372 #define vm_map_lock_downgrade(map) \
373 	lockmgr(&(map)->lock, LK_DOWNGRADE)
374 #endif
375 
376 #endif /* _KERNEL */
377 
378 /*
379  *	Functions implemented as macros
380  */
381 #define		vm_map_min(map)		((map)->min_offset)
382 #define		vm_map_max(map)		((map)->max_offset)
383 #define		vm_map_pmap(map)	((map)->pmap)
384 
385 static __inline struct pmap *
386 vmspace_pmap(struct vmspace *vmspace)
387 {
388 	return &vmspace->vm_pmap;
389 }
390 
391 static __inline long
392 vmspace_resident_count(struct vmspace *vmspace)
393 {
394 	return pmap_resident_count(vmspace_pmap(vmspace));
395 }
396 
397 /*
398  * Number of kernel maps and entries to statically allocate, required
399  * during boot to bootstrap the VM system.
400  */
401 #define MAX_KMAP	10
402 #define	MAX_MAPENT	256
403 
404 /*
405  * Copy-on-write flags for vm_map operations
406  */
407 #define MAP_UNUSED_01		0x0001
408 #define MAP_COPY_ON_WRITE	0x0002
409 #define MAP_NOFAULT		0x0004
410 #define MAP_PREFAULT		0x0008
411 #define MAP_PREFAULT_PARTIAL	0x0010
412 #define MAP_DISABLE_SYNCER	0x0020
413 #define MAP_IS_STACK		0x0040
414 #define MAP_DISABLE_COREDUMP	0x0100
415 #define MAP_PREFAULT_MADVISE	0x0200	/* from (user) madvise request */
416 
417 /*
418  * vm_fault option flags
419  */
420 #define VM_FAULT_NORMAL		0x00	/* Nothing special */
421 #define VM_FAULT_CHANGE_WIRING	0x01	/* Change the wiring as appropriate */
422 #define VM_FAULT_USER_WIRE	0x02	/* Likewise, but for user purposes */
423 #define VM_FAULT_BURST		0x04	/* Burst fault can be done */
424 #define VM_FAULT_DIRTY		0x08	/* Dirty the page */
425 #define VM_FAULT_WIRE_MASK	(VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
426 
427 #ifdef _KERNEL
428 
429 extern struct sysref_class vmspace_sysref_class;
430 
431 boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t);
432 struct pmap;
433 struct globaldata;
434 void vm_map_entry_allocate_object(vm_map_entry_t);
435 void vm_map_entry_reserve_cpu_init(struct globaldata *gd);
436 int vm_map_entry_reserve(int);
437 int vm_map_entry_kreserve(int);
438 void vm_map_entry_release(int);
439 void vm_map_entry_krelease(int);
440 vm_map_t vm_map_create (vm_map_t, struct pmap *, vm_offset_t, vm_offset_t);
441 int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *);
442 int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t,
443 		 vm_offset_t *, vm_size_t, vm_size_t,
444 		 boolean_t, vm_maptype_t,
445 		 vm_prot_t, vm_prot_t,
446 		 int);
447 int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_size_t,
448 		      int, vm_offset_t *);
449 int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
450 void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t, pmap_t);
451 int vm_map_insert (vm_map_t, int *, vm_object_t, vm_ooffset_t,
452 		   vm_offset_t, vm_offset_t,
453 		   vm_maptype_t,
454 		   vm_prot_t, vm_prot_t,
455 		   int);
456 int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
457     vm_pindex_t *, vm_prot_t *, boolean_t *);
458 void vm_map_lookup_done (vm_map_t, vm_map_entry_t, int);
459 boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
460 int vm_map_wire (vm_map_t, vm_offset_t, vm_offset_t, int);
461 int vm_map_unwire (vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
462 int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
463 int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
464 int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
465 void vm_map_startup (void);
466 int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
467 int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int, off_t);
468 void vm_map_simplify_entry (vm_map_t, vm_map_entry_t, int *);
469 void vm_init2 (void);
470 int vm_uiomove (vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *);
471 int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, int,
472 		  vm_prot_t, vm_prot_t, int);
473 int vm_map_growstack (struct proc *p, vm_offset_t addr);
474 int vmspace_swap_count (struct vmspace *vmspace);
475 int vmspace_anonymous_count (struct vmspace *vmspace);
476 void vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *);
477 
478 #endif
479 #endif				/* _VM_VM_MAP_H_ */
480