xref: /netbsd-src/sys/uvm/uvm_map.h (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: uvm_map.h,v 1.58 2007/07/22 21:07:47 he Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Charles D. Cranor,
23  *      Washington University, the University of California, Berkeley and
24  *      its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)vm_map.h    8.3 (Berkeley) 3/15/94
42  * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  */
68 
69 #ifndef _UVM_UVM_MAP_H_
70 #define _UVM_UVM_MAP_H_
71 
72 /*
73  * uvm_map.h
74  */
75 
76 #ifdef _KERNEL
77 
78 /*
79  * macros
80  */
81 
82 /*
83  * UVM_MAP_CLIP_START: ensure that the entry begins at or after
84  * the starting address, if it doesn't we split the entry.
85  *
86  * => map must be locked by caller
87  */
88 
89 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA,UMR) { \
90 	if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA,UMR); }
91 
92 /*
93  * UVM_MAP_CLIP_END: ensure that the entry ends at or before
94  *      the ending address, if it does't we split the entry.
95  *
96  * => map must be locked by caller
97  */
98 
99 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA,UMR) { \
100 	if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA,UMR); }
101 
102 /*
103  * extract flags
104  */
105 #define UVM_EXTRACT_REMOVE	0x01	/* remove mapping from old map */
106 #define UVM_EXTRACT_CONTIG	0x02	/* try to keep it contig */
107 #define UVM_EXTRACT_QREF	0x04	/* use quick refs */
108 #define UVM_EXTRACT_FIXPROT	0x08	/* set prot to maxprot as we go */
109 #define UVM_EXTRACT_RESERVED	0x10	/* caller did uvm_map_reserve() */
110 
111 #endif /* _KERNEL */
112 
113 #include <sys/tree.h>
114 #include <sys/pool.h>
115 #include <sys/rwlock.h>
116 #include <sys/mutex.h>
117 #include <sys/condvar.h>
118 
119 #include <uvm/uvm_anon.h>
120 
121 /*
122  * Address map entries consist of start and end addresses,
123  * a VM object (or sharing map) and offset into that object,
124  * and user-exported inheritance and protection information.
125  * Also included is control information for virtual copy operations.
126  */
127 struct vm_map_entry {
128 	RB_ENTRY(vm_map_entry)	rb_entry;	/* tree information */
129 	vaddr_t			ownspace;	/* free space after */
130 	vaddr_t			space;		/* space in subtree */
131 	struct vm_map_entry	*prev;		/* previous entry */
132 	struct vm_map_entry	*next;		/* next entry */
133 	vaddr_t			start;		/* start address */
134 	vaddr_t			end;		/* end address */
135 	union {
136 		struct uvm_object *uvm_obj;	/* uvm object */
137 		struct vm_map	*sub_map;	/* belongs to another map */
138 	} object;				/* object I point to */
139 	voff_t			offset;		/* offset into object */
140 	int			etype;		/* entry type */
141 	vm_prot_t		protection;	/* protection code */
142 	vm_prot_t		max_protection;	/* maximum protection */
143 	vm_inherit_t		inheritance;	/* inheritance */
144 	int			wired_count;	/* can be paged if == 0 */
145 	struct vm_aref		aref;		/* anonymous overlay */
146 	int			advice;		/* madvise advice */
147 #define uvm_map_entry_stop_copy flags
148 	u_int8_t		flags;		/* flags */
149 
150 #define	UVM_MAP_KERNEL		0x01		/* kernel map entry */
151 #define	UVM_MAP_KMAPENT		0x02		/* contains map entries */
152 #define	UVM_MAP_FIRST		0x04		/* the first special entry */
153 #define	UVM_MAP_QUANTUM		0x08		/* allocated with
154 						 * UVM_FLAG_QUANTUM */
155 #define	UVM_MAP_NOMERGE		0x10		/* this entry is not mergable */
156 
157 };
158 
159 #define	VM_MAPENT_ISWIRED(entry)	((entry)->wired_count != 0)
160 
161 /*
162  *	Maps are doubly-linked lists of map entries, kept sorted
163  *	by address.  A single hint is provided to start
164  *	searches again from the last successful search,
165  *	insertion, or removal.
166  *
167  *	LOCKING PROTOCOL NOTES:
168  *	-----------------------
169  *
170  *	VM map locking is a little complicated.  There are both shared
171  *	and exclusive locks on maps.  However, it is sometimes required
172  *	to downgrade an exclusive lock to a shared lock, and upgrade to
173  *	an exclusive lock again (to perform error recovery).  However,
174  *	another thread *must not* queue itself to receive an exclusive
175  *	lock while before we upgrade back to exclusive, otherwise the
176  *	error recovery becomes extremely difficult, if not impossible.
177  *
178  *	In order to prevent this scenario, we introduce the notion of
179  *	a `busy' map.  A `busy' map is read-locked, but other threads
180  *	attempting to write-lock wait for this flag to clear before
181  *	entering the lock manager.  A map may only be marked busy
182  *	when the map is write-locked (and then the map must be downgraded
183  *	to read-locked), and may only be marked unbusy by the thread
184  *	which marked it busy (holding *either* a read-lock or a
185  *	write-lock, the latter being gained by an upgrade).
186  *
187  *	Access to the map `flags' member is controlled by the `flags_lock'
188  *	simple lock.  Note that some flags are static (set once at map
189  *	creation time, and never changed), and thus require no locking
190  *	to check those flags.  All flags which are r/w must be set or
191  *	cleared while the `flags_lock' is asserted.  Additional locking
192  *	requirements are:
193  *
194  *		VM_MAP_PAGEABLE		r/o static flag; no locking required
195  *
196  *		VM_MAP_INTRSAFE		r/o static flag; no locking required
197  *
198  *		VM_MAP_WIREFUTURE	r/w; may only be set or cleared when
199  *					map is write-locked.  may be tested
200  *					without asserting `flags_lock'.
201  *
202  *		VM_MAP_DYING		r/o; set when a vmspace is being
203  *					destroyed to indicate that updates
204  *					to the pmap can be skipped.
205  *
206  *		VM_MAP_TOPDOWN		r/o; set when the vmspace is
207  *					created if the unspecified map
208  *					allocations are to be arranged in
209  *					a "top down" manner.
210  */
211 struct vm_map {
212 	struct pmap *		pmap;		/* Physical map */
213 	krwlock_t		lock;		/* Non-intrsafe lock */
214 	struct lwp *		busy;		/* LWP holding map busy */
215 	kmutex_t		mutex;		/* INTRSAFE lock */
216 	kmutex_t		misc_lock;	/* Lock for ref_count, cv */
217 	kmutex_t		hint_lock;	/* lock for hint storage */
218 	kcondvar_t		cv;		/* For signalling */
219 	int			flags;		/* flags */
220 	RB_HEAD(uvm_tree, vm_map_entry) rbhead;	/* Tree for entries */
221 	struct vm_map_entry	header;		/* List of entries */
222 	int			nentries;	/* Number of entries */
223 	vsize_t			size;		/* virtual size */
224 	int			ref_count;	/* Reference count */
225 	struct vm_map_entry *	hint;		/* hint for quick lookups */
226 	struct vm_map_entry *	first_free;	/* First free space hint */
227 	unsigned int		timestamp;	/* Version number */
228 };
229 
230 #if defined(_KERNEL)
231 
232 #include <sys/callback.h>
233 
234 struct vm_map_kernel {
235 	struct vm_map vmk_map;
236 	LIST_HEAD(, uvm_kmapent_hdr) vmk_kentry_free;
237 			/* Freelist of map entry */
238 	struct vm_map_entry	*vmk_merged_entries;
239 			/* Merged entries, kept for later splitting */
240 
241 	struct callback_head vmk_reclaim_callback;
242 #if !defined(PMAP_MAP_POOLPAGE)
243 	struct pool vmk_vacache; /* kva cache */
244 	struct pool_allocator vmk_vacache_allocator; /* ... and its allocator */
245 #endif
246 };
247 #endif /* defined(_KERNEL) */
248 
249 #define	VM_MAP_IS_KERNEL(map)	(vm_map_pmap(map) == pmap_kernel())
250 
251 /* vm_map flags */
252 #define	VM_MAP_PAGEABLE		0x01		/* ro: entries are pageable */
253 #define	VM_MAP_INTRSAFE		0x02		/* ro: interrupt safe map */
254 #define	VM_MAP_WIREFUTURE	0x04		/* rw: wire future mappings */
255 #define	VM_MAP_DYING		0x20		/* rw: map is being destroyed */
256 #define	VM_MAP_TOPDOWN		0x40		/* ro: arrange map top-down */
257 #define	VM_MAP_VACACHE		0x80		/* ro: use kva cache */
258 #define	VM_MAP_WANTVA		0x100		/* rw: want va */
259 
260 #ifdef _KERNEL
261 struct uvm_mapent_reservation {
262 	struct vm_map_entry *umr_entries[2];
263 	int umr_nentries;
264 };
265 #define	UMR_EMPTY(umr)		((umr) == NULL || (umr)->umr_nentries == 0)
266 #define	UMR_GETENTRY(umr)	((umr)->umr_entries[--(umr)->umr_nentries])
267 #define	UMR_PUTENTRY(umr, ent)	\
268 	(umr)->umr_entries[(umr)->umr_nentries++] = (ent)
269 
270 struct uvm_map_args {
271 	struct vm_map_entry *uma_prev;
272 
273 	vaddr_t uma_start;
274 	vsize_t uma_size;
275 
276 	struct uvm_object *uma_uobj;
277 	voff_t uma_uoffset;
278 
279 	uvm_flag_t uma_flags;
280 };
281 #endif /* _KERNEL */
282 
283 /*
284  * globals:
285  */
286 
287 #ifdef _KERNEL
288 
289 #include <sys/proc.h>
290 
291 #ifdef PMAP_GROWKERNEL
292 extern vaddr_t	uvm_maxkaddr;
293 #endif
294 
295 /*
296  * protos: the following prototypes define the interface to vm_map
297  */
298 
299 void		uvm_map_deallocate(struct vm_map *);
300 
301 int		uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
302 void		uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
303 		    vaddr_t, struct uvm_mapent_reservation *);
304 void		uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
305 		    vaddr_t, struct uvm_mapent_reservation *);
306 struct vm_map	*uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
307 int		uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
308 		    struct vm_map *, vaddr_t *, int);
309 struct vm_map_entry *
310 		uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t,
311 		    vaddr_t *, struct uvm_object *, voff_t, vsize_t, int);
312 int		uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t,
313 		    vm_inherit_t);
314 int		uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
315 void		uvm_map_init(void);
316 bool		uvm_map_lookup_entry(struct vm_map *, vaddr_t,
317 		    struct vm_map_entry **);
318 void		uvm_map_reference(struct vm_map *);
319 int		uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t,
320 		    struct vm_map_entry *, int);
321 int		uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
322 		    vaddr_t *, uvm_flag_t);
323 void		uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int);
324 void		uvm_map_setup_kernel(struct vm_map_kernel *,
325 		    vaddr_t, vaddr_t, int);
326 struct vm_map_kernel *
327 		vm_map_to_kernel(struct vm_map *);
328 int		uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
329 		    struct vm_map *);
330 void		uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int);
331 #define	uvm_unmap(map, s, e)	uvm_unmap1((map), (s), (e), 0)
332 void		uvm_unmap_detach(struct vm_map_entry *,int);
333 void		uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t,
334 		    struct vm_map_entry **, struct uvm_mapent_reservation *,
335 		    int);
336 
337 int		uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t,
338 		    struct uvm_object *, voff_t, vsize_t, uvm_flag_t,
339 		    struct uvm_map_args *);
340 int		uvm_map_enter(struct vm_map *, const struct uvm_map_args *,
341 		    struct vm_map_entry *);
342 
343 int		uvm_mapent_reserve(struct vm_map *,
344 		    struct uvm_mapent_reservation *, int, int);
345 void		uvm_mapent_unreserve(struct vm_map *,
346 		    struct uvm_mapent_reservation *);
347 
348 vsize_t		uvm_mapent_overhead(vsize_t, int);
349 
350 int		uvm_mapent_trymerge(struct vm_map *,
351 		    struct vm_map_entry *, int);
352 #define	UVM_MERGE_COPYING	1
353 
354 bool		vm_map_starved_p(struct vm_map *);
355 
356 /*
357  * VM map locking operations.
358  */
359 
360 bool		vm_map_lock_try(struct vm_map *);
361 void		vm_map_lock(struct vm_map *);
362 void		vm_map_unlock(struct vm_map *);
363 void		vm_map_upgrade(struct vm_map *);
364 void		vm_map_unbusy(struct vm_map *);
365 
366 /*
367  * vm_map_lock_read: acquire a shared (read) lock on a map.
368  */
369 
370 static inline void
371 vm_map_lock_read(struct vm_map *map)
372 {
373 
374 	KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
375 
376 	rw_enter(&map->lock, RW_READER);
377 }
378 
379 /*
380  * vm_map_unlock_read: release a shared lock on a map.
381  */
382 
383 static inline void
384 vm_map_unlock_read(struct vm_map *map)
385 {
386 
387 	KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
388 
389 	rw_exit(&map->lock);
390 }
391 /*
392  * vm_map_downgrade: downgrade an exclusive lock to a shared lock.
393  */
394 
395 static inline void
396 vm_map_downgrade(struct vm_map *map)
397 {
398 
399 	rw_downgrade(&map->lock);
400 }
401 
402 /*
403  * vm_map_busy: mark a map as busy.
404  *
405  * => the caller must hold the map write locked
406  */
407 
408 static inline void
409 vm_map_busy(struct vm_map *map)
410 {
411 
412 	KASSERT(rw_write_held(&map->lock));
413 	KASSERT(map->busy == NULL);
414 
415 	map->busy = curlwp;
416 }
417 
418 #endif /* _KERNEL */
419 
420 /*
421  *	Functions implemented as macros
422  */
423 #define		vm_map_min(map)		((map)->header.end)
424 #define		vm_map_max(map)		((map)->header.start)
425 #define		vm_map_setmin(map, v)	((map)->header.end = (v))
426 #define		vm_map_setmax(map, v)	((map)->header.start = (v))
427 
428 #define		vm_map_pmap(map)	((map)->pmap)
429 
430 #endif /* _UVM_UVM_MAP_H_ */
431