1 /* $OpenBSD: uvm_map.h,v 1.51 2014/07/11 16:35:40 jsg Exp $ */ 2 /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */ 3 4 /* 5 * Copyright (c) 2011 Ariane van der Steldt <ariane@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 * 19 * 20 * Copyright (c) 1997 Charles D. Cranor and Washington University. 21 * Copyright (c) 1991, 1993, The Regents of the University of California. 22 * 23 * All rights reserved. 24 * 25 * This code is derived from software contributed to Berkeley by 26 * The Mach Operating System project at Carnegie-Mellon University. 27 * 28 * Redistribution and use in source and binary forms, with or without 29 * modification, are permitted provided that the following conditions 30 * are met: 31 * 1. Redistributions of source code must retain the above copyright 32 * notice, this list of conditions and the following disclaimer. 33 * 2. Redistributions in binary form must reproduce the above copyright 34 * notice, this list of conditions and the following disclaimer in the 35 * documentation and/or other materials provided with the distribution. 36 * 3. Neither the name of the University nor the names of its contributors 37 * may be used to endorse or promote products derived from this software 38 * without specific prior written permission. 39 * 40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * SUCH DAMAGE. 51 * 52 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94 53 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp 54 * 55 * 56 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 57 * All rights reserved. 58 * 59 * Permission to use, copy, modify and distribute this software and 60 * its documentation is hereby granted, provided that both the copyright 61 * notice and this permission notice appear in all copies of the 62 * software, derivative works or modified versions, and any portions 63 * thereof, and that both notices appear in supporting documentation. 64 * 65 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 66 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 67 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 68 * 69 * Carnegie Mellon requests users of this software to return to 70 * 71 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 72 * School of Computer Science 73 * Carnegie Mellon University 74 * Pittsburgh PA 15213-3890 75 * 76 * any improvements or extensions that they make and grant Carnegie the 77 * rights to redistribute these changes. 78 */ 79 80 #ifndef _UVM_UVM_MAP_H_ 81 #define _UVM_UVM_MAP_H_ 82 83 #include <sys/rwlock.h> 84 85 #ifdef _KERNEL 86 87 /* 88 * Internal functions. 89 * 90 * Required by clipping macros. 91 */ 92 void uvm_map_clip_end(struct vm_map*, struct vm_map_entry*, 93 vaddr_t); 94 void uvm_map_clip_start(struct vm_map*, 95 struct vm_map_entry*, vaddr_t); 96 97 /* 98 * UVM_MAP_CLIP_START: ensure that the entry begins at or after 99 * the starting address, if it doesn't we split the entry. 100 * 101 * => map must be locked by caller 102 */ 103 104 #define UVM_MAP_CLIP_START(_map, _entry, _addr) \ 105 do { \ 106 KASSERT((_entry)->end + (_entry)->fspace > (_addr)); \ 107 if ((_entry)->start < (_addr)) \ 108 uvm_map_clip_start((_map), (_entry), (_addr)); \ 109 } while (0) 110 111 /* 112 * UVM_MAP_CLIP_END: ensure that the entry ends at or before 113 * the ending address, if it does't we split the entry. 114 * 115 * => map must be locked by caller 116 */ 117 118 #define UVM_MAP_CLIP_END(_map, _entry, _addr) \ 119 do { \ 120 KASSERT((_entry)->start < (_addr)); \ 121 if ((_entry)->end > (_addr)) \ 122 uvm_map_clip_end((_map), (_entry), (_addr)); \ 123 } while (0) 124 125 /* 126 * extract flags 127 */ 128 #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */ 129 130 #endif /* _KERNEL */ 131 132 #include <uvm/uvm_anon.h> 133 134 /* 135 * types defined: 136 * 137 * vm_map_t the high-level address map data structure. 138 * vm_map_entry_t an entry in an address map. 139 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup 140 */ 141 142 /* 143 * Objects which live in maps may be either VM objects, or another map 144 * (called a "sharing map") which denotes read-write sharing with other maps. 145 * 146 * XXXCDC: private pager data goes here now 147 */ 148 149 union vm_map_object { 150 struct uvm_object *uvm_obj; /* UVM OBJECT */ 151 struct vm_map *sub_map; /* belongs to another map */ 152 }; 153 154 /* 155 * Address map entries consist of start and end addresses, 156 * a VM object (or sharing map) and offset into that object, 157 * and user-exported inheritance and protection information. 158 * Also included is control information for virtual copy operations. 159 */ 160 struct vm_map_entry { 161 union { 162 RB_ENTRY(vm_map_entry) addr_entry; /* address tree */ 163 } daddrs; 164 165 union { 166 RB_ENTRY(vm_map_entry) rbtree; /* Link freespace tree. */ 167 TAILQ_ENTRY(vm_map_entry) tailq;/* Link freespace queue. */ 168 TAILQ_ENTRY(vm_map_entry) deadq;/* dead entry queue */ 169 } dfree; 170 171 #define uvm_map_entry_start_copy start 172 vaddr_t start; /* start address */ 173 vaddr_t end; /* end address */ 174 175 vsize_t guard; /* bytes in guard */ 176 vsize_t fspace; /* free space */ 177 178 union vm_map_object object; /* object I point to */ 179 voff_t offset; /* offset into object */ 180 struct vm_aref aref; /* anonymous overlay */ 181 182 int etype; /* entry type */ 183 184 vm_prot_t protection; /* protection code */ 185 vm_prot_t max_protection; /* maximum protection */ 186 vm_inherit_t inheritance; /* inheritance */ 187 188 int wired_count; /* can be paged if == 0 */ 189 int advice; /* madvise advice */ 190 #define uvm_map_entry_stop_copy flags 191 u_int8_t flags; /* flags */ 192 193 #define UVM_MAP_STATIC 0x01 /* static map entry */ 194 #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ 195 196 vsize_t fspace_augment; /* max(fspace) in subtree */ 197 }; 198 199 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) 200 201 TAILQ_HEAD(uvm_map_deadq, vm_map_entry); /* dead entry queue */ 202 RB_HEAD(uvm_map_addr, vm_map_entry); 203 RB_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry, 204 uvm_mapentry_addrcmp); 205 206 /* 207 * A Map is a rbtree of map entries, kept sorted by address. 208 * In addition, free space entries are also kept in a rbtree, 209 * indexed by free size. 210 * 211 * 212 * 213 * LOCKING PROTOCOL NOTES: 214 * ----------------------- 215 * 216 * VM map locking is a little complicated. There are both shared 217 * and exclusive locks on maps. However, it is sometimes required 218 * to downgrade an exclusive lock to a shared lock, and upgrade to 219 * an exclusive lock again (to perform error recovery). However, 220 * another thread *must not* queue itself to receive an exclusive 221 * lock while before we upgrade back to exclusive, otherwise the 222 * error recovery becomes extremely difficult, if not impossible. 223 * 224 * In order to prevent this scenario, we introduce the notion of 225 * a `busy' map. A `busy' map is read-locked, but other threads 226 * attempting to write-lock wait for this flag to clear before 227 * entering the lock manager. A map may only be marked busy 228 * when the map is write-locked (and then the map must be downgraded 229 * to read-locked), and may only be marked unbusy by the thread 230 * which marked it busy (holding *either* a read-lock or a 231 * write-lock, the latter being gained by an upgrade). 232 * 233 * Access to the map `flags' member is controlled by the `flags_lock' 234 * simple lock. Note that some flags are static (set once at map 235 * creation time, and never changed), and thus require no locking 236 * to check those flags. All flags which are r/w must be set or 237 * cleared while the `flags_lock' is asserted. Additional locking 238 * requirements are: 239 * 240 * VM_MAP_PAGEABLE r/o static flag; no locking required 241 * 242 * VM_MAP_INTRSAFE r/o static flag; no locking required 243 * 244 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when 245 * map is write-locked. may be tested 246 * without asserting `flags_lock'. 247 * 248 * VM_MAP_BUSY r/w; may only be set when map is 249 * write-locked, may only be cleared by 250 * thread which set it, map read-locked 251 * or write-locked. must be tested 252 * while `flags_lock' is asserted. 253 * 254 * VM_MAP_WANTLOCK r/w; may only be set when the map 255 * is busy, and thread is attempting 256 * to write-lock. must be tested 257 * while `flags_lock' is asserted. 258 * 259 * VM_MAP_GUARDPAGES r/o; must be specified at map 260 * initialization time. 261 * If set, guards will appear between 262 * automatic allocations. 263 * No locking required. 264 * 265 * VM_MAP_ISVMSPACE r/o; set by uvmspace_alloc. 266 * Signifies that this map is a vmspace. 267 * (The implementation treats all maps 268 * without this bit as kernel maps.) 269 * No locking required. 270 * 271 * 272 * All automatic allocations (uvm_map without MAP_FIXED) will allocate 273 * from vm_map.free. 274 * If that allocation fails: 275 * - vmspace maps will spill over into vm_map.bfree, 276 * - all other maps will call uvm_map_kmem_grow() to increase the arena. 277 * 278 * vmspace maps have their data, brk() and stack arenas automatically 279 * updated when uvm_map() is invoked without MAP_FIXED. 280 * The spill over arena (vm_map.bfree) will contain the space in the brk() 281 * and stack ranges. 282 * Kernel maps never have a bfree arena and this tree will always be empty. 283 * 284 * 285 * read_locks and write_locks are used in lock debugging code. 286 */ 287 struct vm_map { 288 struct pmap * pmap; /* Physical map */ 289 struct rwlock lock; /* Lock for map data */ 290 291 struct uvm_map_addr addr; /* Entry tree, by addr */ 292 293 vsize_t size; /* virtual size */ 294 int ref_count; /* Reference count */ 295 int flags; /* flags */ 296 unsigned int timestamp; /* Version number */ 297 298 vaddr_t min_offset; /* First address in map. */ 299 vaddr_t max_offset; /* Last address in map. */ 300 301 /* 302 * Allocation overflow regions. 303 */ 304 vaddr_t b_start; /* Start for brk() alloc. */ 305 vaddr_t b_end; /* End for brk() alloc. */ 306 vaddr_t s_start; /* Start for stack alloc. */ 307 vaddr_t s_end; /* End for stack alloc. */ 308 309 /* 310 * Special address selectors. 311 * 312 * The uaddr_exe mapping is used if: 313 * - protX is selected 314 * - the pointer is not NULL 315 * 316 * If uaddr_exe is not used, the other mappings are checked in 317 * order of appearance. 318 * If a hint is given, the selection will only be used if the hint 319 * falls in the range described by the mapping. 320 * 321 * The states are pointers because: 322 * - they may not all be in use 323 * - the struct size for different schemes is variable 324 * 325 * The uaddr_brk_stack selector will select addresses that are in 326 * the brk/stack area of the map. 327 */ 328 struct uvm_addr_state *uaddr_exe; /* Executable selector. */ 329 struct uvm_addr_state *uaddr_any[4]; /* More selectors. */ 330 struct uvm_addr_state *uaddr_brk_stack; /* Brk/stack selector. */ 331 }; 332 333 /* vm_map flags */ 334 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ 335 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */ 336 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ 337 #define VM_MAP_BUSY 0x08 /* rw: map is busy */ 338 #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */ 339 #define VM_MAP_GUARDPAGES 0x20 /* rw: add guard pgs to map */ 340 #define VM_MAP_ISVMSPACE 0x40 /* ro: map is a vmspace */ 341 342 /* XXX: number of kernel maps and entries to statically allocate */ 343 344 #if !defined(MAX_KMAPENT) 345 #define MAX_KMAPENT 1024 /* Sufficient to make it to the scheduler. */ 346 #endif /* !defined MAX_KMAPENT */ 347 348 #ifdef _KERNEL 349 #define vm_map_modflags(map, set, clear) \ 350 do { \ 351 (map)->flags = ((map)->flags | (set)) & ~(clear); \ 352 } while (0) 353 #endif /* _KERNEL */ 354 355 /* 356 * Interrupt-safe maps must also be kept on a special list, 357 * to assist uvm_fault() in avoiding locking problems. 358 */ 359 struct vm_map_intrsafe { 360 struct vm_map vmi_map; 361 LIST_ENTRY(vm_map_intrsafe) vmi_list; 362 }; 363 364 /* 365 * globals: 366 */ 367 368 #ifdef _KERNEL 369 370 extern vaddr_t uvm_maxkaddr; 371 372 /* 373 * protos: the following prototypes define the interface to vm_map 374 */ 375 376 void uvm_map_deallocate(vm_map_t); 377 378 int uvm_map_clean(vm_map_t, vaddr_t, vaddr_t, int); 379 vm_map_t uvm_map_create(pmap_t, vaddr_t, vaddr_t, int); 380 int uvm_map_extract(struct vm_map*, vaddr_t, vsize_t, vaddr_t*, 381 int); 382 vaddr_t uvm_map_pie(vaddr_t); 383 vaddr_t uvm_map_hint(struct vmspace *, vm_prot_t); 384 int uvm_map_inherit(vm_map_t, vaddr_t, vaddr_t, vm_inherit_t); 385 int uvm_map_advice(vm_map_t, vaddr_t, vaddr_t, int); 386 void uvm_map_init(void); 387 boolean_t uvm_map_lookup_entry(vm_map_t, vaddr_t, vm_map_entry_t *); 388 int uvm_map_replace(vm_map_t, vaddr_t, vaddr_t, 389 vm_map_entry_t, int); 390 int uvm_map_reserve(vm_map_t, vsize_t, vaddr_t, vsize_t, 391 vaddr_t *); 392 void uvm_map_setup(vm_map_t, vaddr_t, vaddr_t, int); 393 int uvm_map_submap(vm_map_t, vaddr_t, vaddr_t, vm_map_t); 394 void uvm_unmap(vm_map_t, vaddr_t, vaddr_t); 395 void uvm_map_set_uaddr(struct vm_map*, struct uvm_addr_state**, 396 struct uvm_addr_state*); 397 int uvm_map_mquery(struct vm_map*, vaddr_t*, vsize_t, voff_t, int); 398 399 void uvm_unmap_detach(struct uvm_map_deadq*, int); 400 void uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t, 401 struct uvm_map_deadq*, boolean_t, boolean_t); 402 403 #endif /* _KERNEL */ 404 405 /* 406 * VM map locking operations: 407 * 408 * These operations perform locking on the data portion of the 409 * map. 410 * 411 * vm_map_lock_try: try to lock a map, failing if it is already locked. 412 * 413 * vm_map_lock: acquire an exclusive (write) lock on a map. 414 * 415 * vm_map_lock_read: acquire a shared (read) lock on a map. 416 * 417 * vm_map_unlock: release an exclusive lock on a map. 418 * 419 * vm_map_unlock_read: release a shared lock on a map. 420 * 421 * vm_map_downgrade: downgrade an exclusive lock to a shared lock. 422 * 423 * vm_map_upgrade: upgrade a shared lock to an exclusive lock. 424 * 425 * vm_map_busy: mark a map as busy. 426 * 427 * vm_map_unbusy: clear busy status on a map. 428 * 429 */ 430 431 #ifdef _KERNEL 432 /* 433 * XXX: clean up later 434 * Half the kernel seems to depend on them being included here. 435 */ 436 #include <sys/time.h> 437 #include <sys/systm.h> /* for panic() */ 438 439 boolean_t vm_map_lock_try_ln(struct vm_map*, char*, int); 440 void vm_map_lock_ln(struct vm_map*, char*, int); 441 void vm_map_lock_read_ln(struct vm_map*, char*, int); 442 void vm_map_unlock_ln(struct vm_map*, char*, int); 443 void vm_map_unlock_read_ln(struct vm_map*, char*, int); 444 void vm_map_downgrade_ln(struct vm_map*, char*, int); 445 void vm_map_upgrade_ln(struct vm_map*, char*, int); 446 void vm_map_busy_ln(struct vm_map*, char*, int); 447 void vm_map_unbusy_ln(struct vm_map*, char*, int); 448 449 #ifdef DIAGNOSTIC 450 #define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__) 451 #define vm_map_lock(map) vm_map_lock_ln(map, __FILE__, __LINE__) 452 #define vm_map_lock_read(map) vm_map_lock_read_ln(map, __FILE__, __LINE__) 453 #define vm_map_unlock(map) vm_map_unlock_ln(map, __FILE__, __LINE__) 454 #define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, __FILE__, __LINE__) 455 #define vm_map_downgrade(map) vm_map_downgrade_ln(map, __FILE__, __LINE__) 456 #define vm_map_upgrade(map) vm_map_upgrade_ln(map, __FILE__, __LINE__) 457 #define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__) 458 #define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__) 459 #else 460 #define vm_map_lock_try(map) vm_map_lock_try_ln(map, NULL, 0) 461 #define vm_map_lock(map) vm_map_lock_ln(map, NULL, 0) 462 #define vm_map_lock_read(map) vm_map_lock_read_ln(map, NULL, 0) 463 #define vm_map_unlock(map) vm_map_unlock_ln(map, NULL, 0) 464 #define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, NULL, 0) 465 #define vm_map_downgrade(map) vm_map_downgrade_ln(map, NULL, 0) 466 #define vm_map_upgrade(map) vm_map_upgrade_ln(map, NULL, 0) 467 #define vm_map_busy(map) vm_map_busy_ln(map, NULL, 0) 468 #define vm_map_unbusy(map) vm_map_unbusy_ln(map, NULL, 0) 469 #endif 470 471 #endif /* _KERNEL */ 472 473 /* 474 * Functions implemented as macros 475 */ 476 #define vm_map_min(map) ((map)->min_offset) 477 #define vm_map_max(map) ((map)->max_offset) 478 #define vm_map_pmap(map) ((map)->pmap) 479 480 #endif /* _UVM_UVM_MAP_H_ */ 481