xref: /netbsd-src/sys/uvm/uvm_map.h (revision 9f15ed54af759217ba3a6257a3503f5b2540dfd4)
1*9f15ed54Skamil /*	$NetBSD: uvm_map.h,v 1.80 2020/05/26 00:50:53 kamil Exp $	*/
2f2caacc7Smrg 
3f2caacc7Smrg /*
4f2caacc7Smrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5f2caacc7Smrg  * Copyright (c) 1991, 1993, The Regents of the University of California.
6f2caacc7Smrg  *
7f2caacc7Smrg  * All rights reserved.
8f2caacc7Smrg  *
9f2caacc7Smrg  * This code is derived from software contributed to Berkeley by
10f2caacc7Smrg  * The Mach Operating System project at Carnegie-Mellon University.
11f2caacc7Smrg  *
12f2caacc7Smrg  * Redistribution and use in source and binary forms, with or without
13f2caacc7Smrg  * modification, are permitted provided that the following conditions
14f2caacc7Smrg  * are met:
15f2caacc7Smrg  * 1. Redistributions of source code must retain the above copyright
16f2caacc7Smrg  *    notice, this list of conditions and the following disclaimer.
17f2caacc7Smrg  * 2. Redistributions in binary form must reproduce the above copyright
18f2caacc7Smrg  *    notice, this list of conditions and the following disclaimer in the
19f2caacc7Smrg  *    documentation and/or other materials provided with the distribution.
2040ec801aSchuck  * 3. Neither the name of the University nor the names of its contributors
21f2caacc7Smrg  *    may be used to endorse or promote products derived from this software
22f2caacc7Smrg  *    without specific prior written permission.
23f2caacc7Smrg  *
24f2caacc7Smrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25f2caacc7Smrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26f2caacc7Smrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27f2caacc7Smrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28f2caacc7Smrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29f2caacc7Smrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30f2caacc7Smrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31f2caacc7Smrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32f2caacc7Smrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33f2caacc7Smrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34f2caacc7Smrg  * SUCH DAMAGE.
35f2caacc7Smrg  *
36f2caacc7Smrg  *	@(#)vm_map.h    8.3 (Berkeley) 3/15/94
371f6b921cSmrg  * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
38f2caacc7Smrg  *
39f2caacc7Smrg  *
40f2caacc7Smrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41f2caacc7Smrg  * All rights reserved.
42f2caacc7Smrg  *
43f2caacc7Smrg  * Permission to use, copy, modify and distribute this software and
44f2caacc7Smrg  * its documentation is hereby granted, provided that both the copyright
45f2caacc7Smrg  * notice and this permission notice appear in all copies of the
46f2caacc7Smrg  * software, derivative works or modified versions, and any portions
47f2caacc7Smrg  * thereof, and that both notices appear in supporting documentation.
48f2caacc7Smrg  *
49f2caacc7Smrg  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50f2caacc7Smrg  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51f2caacc7Smrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52f2caacc7Smrg  *
53f2caacc7Smrg  * Carnegie Mellon requests users of this software to return to
54f2caacc7Smrg  *
55f2caacc7Smrg  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
56f2caacc7Smrg  *  School of Computer Science
57f2caacc7Smrg  *  Carnegie Mellon University
58f2caacc7Smrg  *  Pittsburgh PA 15213-3890
59f2caacc7Smrg  *
60f2caacc7Smrg  * any improvements or extensions that they make and grant Carnegie the
61f2caacc7Smrg  * rights to redistribute these changes.
62f2caacc7Smrg  */
63f2caacc7Smrg 
64021fdb64Sperry #ifndef _UVM_UVM_MAP_H_
65021fdb64Sperry #define _UVM_UVM_MAP_H_
66021fdb64Sperry 
67f2caacc7Smrg /*
68f2caacc7Smrg  * uvm_map.h
69f2caacc7Smrg  */
70f2caacc7Smrg 
71abb48c5bSthorpej #ifdef _KERNEL
72abb48c5bSthorpej 
73f2caacc7Smrg /*
74f2caacc7Smrg  * macros
75f2caacc7Smrg  */
76f2caacc7Smrg 
77f2caacc7Smrg /*
78f2caacc7Smrg  * UVM_MAP_CLIP_START: ensure that the entry begins at or after
79f2caacc7Smrg  * the starting address, if it doesn't we split the entry.
80f2caacc7Smrg  *
81f2caacc7Smrg  * => map must be locked by caller
82f2caacc7Smrg  */
83f2caacc7Smrg 
84e62ee4d4Spara #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \
85caaf3a94Schs 	if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \
86e62ee4d4Spara 		uvm_map_clip_start(MAP,ENTRY,VA); \
87caaf3a94Schs 	} \
88caaf3a94Schs }
89f2caacc7Smrg 
90f2caacc7Smrg /*
91f2caacc7Smrg  * UVM_MAP_CLIP_END: ensure that the entry ends at or before
92f2caacc7Smrg  *      the ending address, if it does't we split the entry.
93f2caacc7Smrg  *
94f2caacc7Smrg  * => map must be locked by caller
95f2caacc7Smrg  */
96f2caacc7Smrg 
97e62ee4d4Spara #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \
98caaf3a94Schs 	if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \
99e62ee4d4Spara 		uvm_map_clip_end(MAP,ENTRY,VA); \
100caaf3a94Schs 	} \
101caaf3a94Schs }
102f2caacc7Smrg 
103f2caacc7Smrg /*
104f2caacc7Smrg  * extract flags
105f2caacc7Smrg  */
106e8e7ab86Syamt #define UVM_EXTRACT_REMOVE	0x01	/* remove mapping from old map */
107e8e7ab86Syamt #define UVM_EXTRACT_CONTIG	0x02	/* try to keep it contig */
108e8e7ab86Syamt #define UVM_EXTRACT_QREF	0x04	/* use quick refs */
109e8e7ab86Syamt #define UVM_EXTRACT_FIXPROT	0x08	/* set prot to maxprot as we go */
110e8e7ab86Syamt #define UVM_EXTRACT_RESERVED	0x10	/* caller did uvm_map_reserve() */
11119ea7434Schristos #define UVM_EXTRACT_PROT_ALL	0x20	/* set prot to UVM_PROT_ALL */
112f2caacc7Smrg 
1136b5536f2Smrg #endif /* _KERNEL */
1146b5536f2Smrg 
11519e6c76bSmatt #include <sys/rbtree.h>
116a880e5e2Syamt #include <sys/pool.h>
1174688843dSad #include <sys/rwlock.h>
1184688843dSad #include <sys/mutex.h>
1194688843dSad #include <sys/condvar.h>
12057e554daSyamt 
12176655997Smrg #include <uvm/uvm_anon.h>
12276655997Smrg 
12376655997Smrg /*
12476655997Smrg  * Address map entries consist of start and end addresses,
12576655997Smrg  * a VM object (or sharing map) and offset into that object,
12676655997Smrg  * and user-exported inheritance and protection information.
12776655997Smrg  * Also included is control information for virtual copy operations.
1283b6112efSad  *
1293b6112efSad  * At runtime this is aligned on a cacheline boundary, with fields
1303b6112efSad  * used during fault processing to do RB tree lookup clustered at
1313b6112efSad  * the beginning.
13276655997Smrg  */
13376655997Smrg struct vm_map_entry {
13456989387Smatt 	struct rb_node		rb_node;	/* tree information */
1353b6112efSad 	vaddr_t			start;		/* start address */
1363b6112efSad 	vaddr_t			end;		/* end address */
13756989387Smatt 	vsize_t			gap;		/* free space after */
13856989387Smatt 	vsize_t			maxgap;		/* space in subtree */
13976655997Smrg 	struct vm_map_entry	*prev;		/* previous entry */
14076655997Smrg 	struct vm_map_entry	*next;		/* next entry */
141821ec03eSchs 	union {
142821ec03eSchs 		struct uvm_object *uvm_obj;	/* uvm object */
143821ec03eSchs 		struct vm_map	*sub_map;	/* belongs to another map */
144821ec03eSchs 	} object;				/* object I point to */
14576655997Smrg 	voff_t			offset;		/* offset into object */
1463b6112efSad 	uint8_t			etype;		/* entry type */
1473b6112efSad 	uint8_t			flags;		/* flags */
1483b6112efSad 	uint8_t			advice;		/* madvise advice */
1493b6112efSad 	uint8_t			unused;		/* unused */
15076655997Smrg 	vm_prot_t		protection;	/* protection code */
15176655997Smrg 	vm_prot_t		max_protection;	/* maximum protection */
15276655997Smrg 	vm_inherit_t		inheritance;	/* inheritance */
15376655997Smrg 	int			wired_count;	/* can be paged if == 0 */
15476655997Smrg 	struct vm_aref		aref;		/* anonymous overlay */
1553b6112efSad };
15676655997Smrg 
1573b6112efSad /* flags */
1581207308bSyamt #define	UVM_MAP_KERNEL		0x01		/* kernel map entry */
159e62ee4d4Spara #define	UVM_MAP_STATIC		0x04		/* special static entries */
160e62ee4d4Spara #define	UVM_MAP_NOMERGE		0x08		/* this entry is not mergable */
161a78a1b07Smatt 
16276655997Smrg #define	VM_MAPENT_ISWIRED(entry)	((entry)->wired_count != 0)
16376655997Smrg 
16476655997Smrg /*
16576655997Smrg  *	Maps are doubly-linked lists of map entries, kept sorted
16676655997Smrg  *	by address.  A single hint is provided to start
16776655997Smrg  *	searches again from the last successful search,
16876655997Smrg  *	insertion, or removal.
16976655997Smrg  *
17076655997Smrg  *	LOCKING PROTOCOL NOTES:
17176655997Smrg  *	-----------------------
17276655997Smrg  *
17376655997Smrg  *	VM map locking is a little complicated.  There are both shared
17476655997Smrg  *	and exclusive locks on maps.  However, it is sometimes required
17576655997Smrg  *	to downgrade an exclusive lock to a shared lock, and upgrade to
17676655997Smrg  *	an exclusive lock again (to perform error recovery).  However,
17776655997Smrg  *	another thread *must not* queue itself to receive an exclusive
17876655997Smrg  *	lock while before we upgrade back to exclusive, otherwise the
17976655997Smrg  *	error recovery becomes extremely difficult, if not impossible.
18076655997Smrg  *
18176655997Smrg  *	In order to prevent this scenario, we introduce the notion of
18276655997Smrg  *	a `busy' map.  A `busy' map is read-locked, but other threads
18376655997Smrg  *	attempting to write-lock wait for this flag to clear before
18476655997Smrg  *	entering the lock manager.  A map may only be marked busy
18576655997Smrg  *	when the map is write-locked (and then the map must be downgraded
18676655997Smrg  *	to read-locked), and may only be marked unbusy by the thread
18776655997Smrg  *	which marked it busy (holding *either* a read-lock or a
18876655997Smrg  *	write-lock, the latter being gained by an upgrade).
18976655997Smrg  *
19076655997Smrg  *	Access to the map `flags' member is controlled by the `flags_lock'
19176655997Smrg  *	simple lock.  Note that some flags are static (set once at map
19276655997Smrg  *	creation time, and never changed), and thus require no locking
19376655997Smrg  *	to check those flags.  All flags which are r/w must be set or
19476655997Smrg  *	cleared while the `flags_lock' is asserted.  Additional locking
19576655997Smrg  *	requirements are:
19676655997Smrg  *
19776655997Smrg  *		VM_MAP_PAGEABLE		r/o static flag; no locking required
19876655997Smrg  *
19976655997Smrg  *		VM_MAP_WIREFUTURE	r/w; may only be set or cleared when
20076655997Smrg  *					map is write-locked.  may be tested
20176655997Smrg  *					without asserting `flags_lock'.
20276655997Smrg  *
203df0a9badSatatat  *		VM_MAP_DYING		r/o; set when a vmspace is being
204df0a9badSatatat  *					destroyed to indicate that updates
205df0a9badSatatat  *					to the pmap can be skipped.
206df0a9badSatatat  *
207df0a9badSatatat  *		VM_MAP_TOPDOWN		r/o; set when the vmspace is
208df0a9badSatatat  *					created if the unspecified map
209df0a9badSatatat  *					allocations are to be arranged in
210df0a9badSatatat  *					a "top down" manner.
21176655997Smrg  */
21276655997Smrg struct vm_map {
21376655997Smrg 	struct pmap *		pmap;		/* Physical map */
2144688843dSad 	krwlock_t		lock;		/* Non-intrsafe lock */
2154688843dSad 	struct lwp *		busy;		/* LWP holding map busy */
2160ad265a1Sad 	kmutex_t		misc_lock;	/* Lock for cv, busy */
2174688843dSad 	kcondvar_t		cv;		/* For signalling */
2184688843dSad 	int			flags;		/* flags */
21956989387Smatt 	struct rb_tree		rb_tree;	/* Tree for entries */
22076655997Smrg 	struct vm_map_entry	header;		/* List of entries */
22176655997Smrg 	int			nentries;	/* Number of entries */
22276655997Smrg 	vsize_t			size;		/* virtual size */
223*9f15ed54Skamil 	volatile int		ref_count;	/* Reference count */
224821ec03eSchs 	struct vm_map_entry *	hint;		/* hint for quick lookups */
225821ec03eSchs 	struct vm_map_entry *	first_free;	/* First free space hint */
22676655997Smrg 	unsigned int		timestamp;	/* Version number */
22776655997Smrg };
22876655997Smrg 
22995c82bfeSyamt #if defined(_KERNEL)
230c24f70bcSyamt 
231c24f70bcSyamt #include <sys/callback.h>
232c24f70bcSyamt 
23395c82bfeSyamt #endif /* defined(_KERNEL) */
23495c82bfeSyamt 
23595c82bfeSyamt #define	VM_MAP_IS_KERNEL(map)	(vm_map_pmap(map) == pmap_kernel())
23695c82bfeSyamt 
23776655997Smrg /* vm_map flags */
23876655997Smrg #define	VM_MAP_PAGEABLE		0x01		/* ro: entries are pageable */
23976655997Smrg #define	VM_MAP_WIREFUTURE	0x04		/* rw: wire future mappings */
24094a62d45Schs #define	VM_MAP_DYING		0x20		/* rw: map is being destroyed */
241df0a9badSatatat #define	VM_MAP_TOPDOWN		0x40		/* ro: arrange map top-down */
24222099ab7Syamt #define	VM_MAP_WANTVA		0x100		/* rw: want va */
24376655997Smrg 
2444fdd4095Schristos #define VM_MAP_BITS	"\177\020\
2454fdd4095Schristos b\0PAGEABLE\0\
2464fdd4095Schristos b\2WIREFUTURE\0\
2474fdd4095Schristos b\5DYING\0\
2484fdd4095Schristos b\6TOPDOWN\0\
2494fdd4095Schristos b\10WANTVA\0"
2504fdd4095Schristos 
2511207308bSyamt #ifdef _KERNEL
2521207308bSyamt struct uvm_map_args {
2531207308bSyamt 	struct vm_map_entry *uma_prev;
2541207308bSyamt 
2551207308bSyamt 	vaddr_t uma_start;
2561207308bSyamt 	vsize_t uma_size;
2571207308bSyamt 
2581207308bSyamt 	struct uvm_object *uma_uobj;
2591207308bSyamt 	voff_t uma_uoffset;
2601207308bSyamt 
2611207308bSyamt 	uvm_flag_t uma_flags;
2621207308bSyamt };
2631207308bSyamt #endif /* _KERNEL */
26476655997Smrg 
26576655997Smrg /*
266f311a1c3Sthorpej  * globals:
267f311a1c3Sthorpej  */
268f311a1c3Sthorpej 
2696b5536f2Smrg #ifdef _KERNEL
2706b5536f2Smrg 
271e7a46bb8She #include <sys/proc.h>
272e7a46bb8She 
273f311a1c3Sthorpej #ifdef PMAP_GROWKERNEL
274f311a1c3Sthorpej extern vaddr_t	uvm_maxkaddr;
275f311a1c3Sthorpej #endif
276f311a1c3Sthorpej 
277f311a1c3Sthorpej /*
278f2caacc7Smrg  * protos: the following prototypes define the interface to vm_map
279f2caacc7Smrg  */
280f2caacc7Smrg 
281aa87bee0Senami void		uvm_map_deallocate(struct vm_map *);
282f2caacc7Smrg 
28316babfa6Syamt int		uvm_map_willneed(struct vm_map *, vaddr_t, vaddr_t);
284aa87bee0Senami int		uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
285aa87bee0Senami void		uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
286e62ee4d4Spara 		    vaddr_t);
287aa87bee0Senami void		uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
288e62ee4d4Spara 		    vaddr_t);
289aa87bee0Senami int		uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
290aa87bee0Senami 		    struct vm_map *, vaddr_t *, int);
291aa87bee0Senami struct vm_map_entry *
292aa87bee0Senami 		uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t,
293aa87bee0Senami 		    vaddr_t *, struct uvm_object *, voff_t, vsize_t, int);
294aa87bee0Senami int		uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t,
295aa87bee0Senami 		    vm_inherit_t);
296aa87bee0Senami int		uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
297aa87bee0Senami void		uvm_map_init(void);
298e62ee4d4Spara void		uvm_map_init_caches(void);
299712239e3Sthorpej bool		uvm_map_lookup_entry(struct vm_map *, vaddr_t,
300aa87bee0Senami 		    struct vm_map_entry **);
301aa87bee0Senami void		uvm_map_reference(struct vm_map *);
302aa87bee0Senami int		uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
303e8e7ab86Syamt 		    vaddr_t *, uvm_flag_t);
304aa87bee0Senami void		uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int);
305aa87bee0Senami int		uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
306aa87bee0Senami 		    struct vm_map *);
307e4666bf7Syamt void		uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int);
308e4666bf7Syamt #define	uvm_unmap(map, s, e)	uvm_unmap1((map), (s), (e), 0)
309aa87bee0Senami void		uvm_unmap_detach(struct vm_map_entry *,int);
310aa87bee0Senami void		uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t,
311e62ee4d4Spara 		    struct vm_map_entry **, int);
3121207308bSyamt 
3131207308bSyamt int		uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t,
3141207308bSyamt 		    struct uvm_object *, voff_t, vsize_t, uvm_flag_t,
3151207308bSyamt 		    struct uvm_map_args *);
3161207308bSyamt int		uvm_map_enter(struct vm_map *, const struct uvm_map_args *,
3171207308bSyamt 		    struct vm_map_entry *);
3181207308bSyamt 
31950a25accSyamt int		uvm_mapent_trymerge(struct vm_map *,
32050a25accSyamt 		    struct vm_map_entry *, int);
32150a25accSyamt #define	UVM_MERGE_COPYING	1
322f2caacc7Smrg 
32376655997Smrg /*
3244688843dSad  * VM map locking operations.
32576655997Smrg  */
32676655997Smrg 
3274688843dSad bool		vm_map_lock_try(struct vm_map *);
3284688843dSad void		vm_map_lock(struct vm_map *);
3294688843dSad void		vm_map_unlock(struct vm_map *);
3304688843dSad void		vm_map_unbusy(struct vm_map *);
3314a780c9aSad void		vm_map_lock_read(struct vm_map *);
3324a780c9aSad void		vm_map_unlock_read(struct vm_map *);
3334a780c9aSad void		vm_map_busy(struct vm_map *);
3344a780c9aSad bool		vm_map_locked_p(struct vm_map *);
33576655997Smrg 
336d2a0ebb6Sad void		uvm_map_lock_entry(struct vm_map_entry *, krw_t);
337e225b7bdSrmind void		uvm_map_unlock_entry(struct vm_map_entry *);
338e225b7bdSrmind 
33976655997Smrg #endif /* _KERNEL */
34076655997Smrg 
34176655997Smrg /*
34276655997Smrg  *	Functions implemented as macros
34376655997Smrg  */
344467487d2Schs #define		vm_map_min(map)		((map)->header.end)
345467487d2Schs #define		vm_map_max(map)		((map)->header.start)
346467487d2Schs #define		vm_map_setmin(map, v)	((map)->header.end = (v))
347467487d2Schs #define		vm_map_setmax(map, v)	((map)->header.start = (v))
348467487d2Schs 
34976655997Smrg #define		vm_map_pmap(map)	((map)->pmap)
35076655997Smrg 
351021fdb64Sperry #endif /* _UVM_UVM_MAP_H_ */
352