1 /* $OpenBSD: uvm_map.h,v 1.59 2016/09/16 03:39:25 dlg Exp $ */ 2 /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */ 3 4 /* 5 * Copyright (c) 2011 Ariane van der Steldt <ariane@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 * 19 * 20 * Copyright (c) 1997 Charles D. Cranor and Washington University. 21 * Copyright (c) 1991, 1993, The Regents of the University of California. 22 * 23 * All rights reserved. 24 * 25 * This code is derived from software contributed to Berkeley by 26 * The Mach Operating System project at Carnegie-Mellon University. 27 * 28 * Redistribution and use in source and binary forms, with or without 29 * modification, are permitted provided that the following conditions 30 * are met: 31 * 1. Redistributions of source code must retain the above copyright 32 * notice, this list of conditions and the following disclaimer. 33 * 2. Redistributions in binary form must reproduce the above copyright 34 * notice, this list of conditions and the following disclaimer in the 35 * documentation and/or other materials provided with the distribution. 36 * 3. Neither the name of the University nor the names of its contributors 37 * may be used to endorse or promote products derived from this software 38 * without specific prior written permission. 39 * 40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * SUCH DAMAGE. 51 * 52 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94 53 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp 54 * 55 * 56 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 57 * All rights reserved. 58 * 59 * Permission to use, copy, modify and distribute this software and 60 * its documentation is hereby granted, provided that both the copyright 61 * notice and this permission notice appear in all copies of the 62 * software, derivative works or modified versions, and any portions 63 * thereof, and that both notices appear in supporting documentation. 64 * 65 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 66 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 67 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 68 * 69 * Carnegie Mellon requests users of this software to return to 70 * 71 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 72 * School of Computer Science 73 * Carnegie Mellon University 74 * Pittsburgh PA 15213-3890 75 * 76 * any improvements or extensions that they make and grant Carnegie the 77 * rights to redistribute these changes. 78 */ 79 80 #ifndef _UVM_UVM_MAP_H_ 81 #define _UVM_UVM_MAP_H_ 82 83 #include <sys/mutex.h> 84 #include <sys/rwlock.h> 85 86 #ifdef _KERNEL 87 88 /* 89 * Internal functions. 90 * 91 * Required by clipping macros. 92 */ 93 void uvm_map_clip_end(struct vm_map*, struct vm_map_entry*, 94 vaddr_t); 95 void uvm_map_clip_start(struct vm_map*, 96 struct vm_map_entry*, vaddr_t); 97 98 /* 99 * UVM_MAP_CLIP_START: ensure that the entry begins at or after 100 * the starting address, if it doesn't we split the entry. 101 * 102 * => map must be locked by caller 103 */ 104 105 #define UVM_MAP_CLIP_START(_map, _entry, _addr) \ 106 do { \ 107 KASSERT((_entry)->end + (_entry)->fspace > (_addr)); \ 108 if ((_entry)->start < (_addr)) \ 109 uvm_map_clip_start((_map), (_entry), (_addr)); \ 110 } while (0) 111 112 /* 113 * UVM_MAP_CLIP_END: ensure that the entry ends at or before 114 * the ending address, if it does't we split the entry. 115 * 116 * => map must be locked by caller 117 */ 118 119 #define UVM_MAP_CLIP_END(_map, _entry, _addr) \ 120 do { \ 121 KASSERT((_entry)->start < (_addr)); \ 122 if ((_entry)->end > (_addr)) \ 123 uvm_map_clip_end((_map), (_entry), (_addr)); \ 124 } while (0) 125 126 /* 127 * extract flags 128 */ 129 #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */ 130 131 #endif /* _KERNEL */ 132 133 #include <uvm/uvm_anon.h> 134 135 /* 136 * types defined: 137 * 138 * vm_map_t the high-level address map data structure. 139 * vm_map_entry_t an entry in an address map. 140 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup 141 */ 142 143 /* 144 * Objects which live in maps may be either VM objects, or another map 145 * (called a "sharing map") which denotes read-write sharing with other maps. 146 * 147 * XXXCDC: private pager data goes here now 148 */ 149 150 union vm_map_object { 151 struct uvm_object *uvm_obj; /* UVM OBJECT */ 152 struct vm_map *sub_map; /* belongs to another map */ 153 }; 154 155 /* 156 * Address map entries consist of start and end addresses, 157 * a VM object (or sharing map) and offset into that object, 158 * and user-exported inheritance and protection information. 159 * Also included is control information for virtual copy operations. 160 */ 161 struct vm_map_entry { 162 union { 163 RBT_ENTRY(vm_map_entry) addr_entry; /* address tree */ 164 SLIST_ENTRY(vm_map_entry) addr_kentry; 165 } daddrs; 166 167 union { 168 RBT_ENTRY(vm_map_entry) rbtree; /* Link freespace tree. */ 169 TAILQ_ENTRY(vm_map_entry) tailq;/* Link freespace queue. */ 170 TAILQ_ENTRY(vm_map_entry) deadq;/* dead entry queue */ 171 } dfree; 172 173 #define uvm_map_entry_start_copy start 174 vaddr_t start; /* start address */ 175 vaddr_t end; /* end address */ 176 177 vsize_t guard; /* bytes in guard */ 178 vsize_t fspace; /* free space */ 179 180 union vm_map_object object; /* object I point to */ 181 voff_t offset; /* offset into object */ 182 struct vm_aref aref; /* anonymous overlay */ 183 184 int etype; /* entry type */ 185 186 vm_prot_t protection; /* protection code */ 187 vm_prot_t max_protection; /* maximum protection */ 188 vm_inherit_t inheritance; /* inheritance */ 189 190 int wired_count; /* can be paged if == 0 */ 191 int advice; /* madvise advice */ 192 #define uvm_map_entry_stop_copy flags 193 u_int8_t flags; /* flags */ 194 195 #define UVM_MAP_STATIC 0x01 /* static map entry */ 196 #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ 197 198 vsize_t fspace_augment; /* max(fspace) in subtree */ 199 }; 200 201 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) 202 203 TAILQ_HEAD(uvm_map_deadq, vm_map_entry); /* dead entry queue */ 204 RBT_HEAD(uvm_map_addr, vm_map_entry); 205 #ifdef _KERNEL 206 RBT_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry, 207 uvm_mapentry_addrcmp); 208 #endif 209 210 /* 211 * A Map is a rbtree of map entries, kept sorted by address. 212 * In addition, free space entries are also kept in a rbtree, 213 * indexed by free size. 214 * 215 * 216 * 217 * LOCKING PROTOCOL NOTES: 218 * ----------------------- 219 * 220 * VM map locking is a little complicated. There are both shared 221 * and exclusive locks on maps. However, it is sometimes required 222 * to downgrade an exclusive lock to a shared lock, and upgrade to 223 * an exclusive lock again (to perform error recovery). However, 224 * another thread *must not* queue itself to receive an exclusive 225 * lock while before we upgrade back to exclusive, otherwise the 226 * error recovery becomes extremely difficult, if not impossible. 227 * 228 * In order to prevent this scenario, we introduce the notion of 229 * a `busy' map. A `busy' map is read-locked, but other threads 230 * attempting to write-lock wait for this flag to clear before 231 * entering the lock manager. A map may only be marked busy 232 * when the map is write-locked (and then the map must be downgraded 233 * to read-locked), and may only be marked unbusy by the thread 234 * which marked it busy (holding *either* a read-lock or a 235 * write-lock, the latter being gained by an upgrade). 236 * 237 * Access to the map `flags' member is controlled by the `flags_lock' 238 * simple lock. Note that some flags are static (set once at map 239 * creation time, and never changed), and thus require no locking 240 * to check those flags. All flags which are r/w must be set or 241 * cleared while the `flags_lock' is asserted. Additional locking 242 * requirements are: 243 * 244 * VM_MAP_PAGEABLE r/o static flag; no locking required 245 * 246 * VM_MAP_INTRSAFE r/o static flag; no locking required 247 * 248 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when 249 * map is write-locked. may be tested 250 * without asserting `flags_lock'. 251 * 252 * VM_MAP_BUSY r/w; may only be set when map is 253 * write-locked, may only be cleared by 254 * thread which set it, map read-locked 255 * or write-locked. must be tested 256 * while `flags_lock' is asserted. 257 * 258 * VM_MAP_WANTLOCK r/w; may only be set when the map 259 * is busy, and thread is attempting 260 * to write-lock. must be tested 261 * while `flags_lock' is asserted. 262 * 263 * VM_MAP_GUARDPAGES r/o; must be specified at map 264 * initialization time. 265 * If set, guards will appear between 266 * automatic allocations. 267 * No locking required. 268 * 269 * VM_MAP_ISVMSPACE r/o; set by uvmspace_alloc. 270 * Signifies that this map is a vmspace. 271 * (The implementation treats all maps 272 * without this bit as kernel maps.) 273 * No locking required. 274 * 275 * 276 * All automatic allocations (uvm_map without MAP_FIXED) will allocate 277 * from vm_map.free. 278 * If that allocation fails: 279 * - vmspace maps will spill over into vm_map.bfree, 280 * - all other maps will call uvm_map_kmem_grow() to increase the arena. 281 * 282 * vmspace maps have their data, brk() and stack arenas automatically 283 * updated when uvm_map() is invoked without MAP_FIXED. 284 * The spill over arena (vm_map.bfree) will contain the space in the brk() 285 * and stack ranges. 286 * Kernel maps never have a bfree arena and this tree will always be empty. 287 * 288 * 289 * read_locks and write_locks are used in lock debugging code. 290 */ 291 struct vm_map { 292 struct pmap * pmap; /* Physical map */ 293 struct rwlock lock; /* Lock for map data */ 294 struct mutex mtx; 295 296 struct uvm_map_addr addr; /* Entry tree, by addr */ 297 298 vsize_t size; /* virtual size */ 299 int ref_count; /* Reference count */ 300 int flags; /* flags */ 301 struct mutex flags_lock; /* flags lock */ 302 unsigned int timestamp; /* Version number */ 303 304 vaddr_t min_offset; /* First address in map. */ 305 vaddr_t max_offset; /* Last address in map. */ 306 307 /* 308 * Allocation overflow regions. 309 */ 310 vaddr_t b_start; /* Start for brk() alloc. */ 311 vaddr_t b_end; /* End for brk() alloc. */ 312 vaddr_t s_start; /* Start for stack alloc. */ 313 vaddr_t s_end; /* End for stack alloc. */ 314 315 /* 316 * Special address selectors. 317 * 318 * The uaddr_exe mapping is used if: 319 * - protX is selected 320 * - the pointer is not NULL 321 * 322 * If uaddr_exe is not used, the other mappings are checked in 323 * order of appearance. 324 * If a hint is given, the selection will only be used if the hint 325 * falls in the range described by the mapping. 326 * 327 * The states are pointers because: 328 * - they may not all be in use 329 * - the struct size for different schemes is variable 330 * 331 * The uaddr_brk_stack selector will select addresses that are in 332 * the brk/stack area of the map. 333 */ 334 struct uvm_addr_state *uaddr_exe; /* Executable selector. */ 335 struct uvm_addr_state *uaddr_any[4]; /* More selectors. */ 336 struct uvm_addr_state *uaddr_brk_stack; /* Brk/stack selector. */ 337 }; 338 339 /* vm_map flags */ 340 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ 341 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */ 342 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ 343 #define VM_MAP_BUSY 0x08 /* rw: map is busy */ 344 #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */ 345 #define VM_MAP_GUARDPAGES 0x20 /* rw: add guard pgs to map */ 346 #define VM_MAP_ISVMSPACE 0x40 /* ro: map is a vmspace */ 347 348 /* XXX: number of kernel maps and entries to statically allocate */ 349 350 #if !defined(MAX_KMAPENT) 351 #define MAX_KMAPENT 1024 /* Sufficient to make it to the scheduler. */ 352 #endif /* !defined MAX_KMAPENT */ 353 354 #ifdef _KERNEL 355 #define vm_map_modflags(map, set, clear) \ 356 do { \ 357 mtx_enter(&(map)->flags_lock); \ 358 (map)->flags = ((map)->flags | (set)) & ~(clear); \ 359 mtx_leave(&(map)->flags_lock); \ 360 } while (0) 361 #endif /* _KERNEL */ 362 363 /* 364 * Interrupt-safe maps must also be kept on a special list, 365 * to assist uvm_fault() in avoiding locking problems. 366 */ 367 struct vm_map_intrsafe { 368 struct vm_map vmi_map; 369 LIST_ENTRY(vm_map_intrsafe) vmi_list; 370 }; 371 372 /* 373 * globals: 374 */ 375 376 #ifdef _KERNEL 377 378 extern vaddr_t uvm_maxkaddr; 379 380 /* 381 * protos: the following prototypes define the interface to vm_map 382 */ 383 384 void uvm_map_deallocate(vm_map_t); 385 386 int uvm_map_clean(vm_map_t, vaddr_t, vaddr_t, int); 387 vm_map_t uvm_map_create(pmap_t, vaddr_t, vaddr_t, int); 388 int uvm_map_extract(struct vm_map*, vaddr_t, vsize_t, vaddr_t*, 389 int); 390 vaddr_t uvm_map_pie(vaddr_t); 391 vaddr_t uvm_map_hint(struct vmspace *, vm_prot_t, vaddr_t, vaddr_t); 392 int uvm_map_inherit(vm_map_t, vaddr_t, vaddr_t, vm_inherit_t); 393 int uvm_map_advice(vm_map_t, vaddr_t, vaddr_t, int); 394 void uvm_map_init(void); 395 boolean_t uvm_map_lookup_entry(vm_map_t, vaddr_t, vm_map_entry_t *); 396 int uvm_map_replace(vm_map_t, vaddr_t, vaddr_t, 397 vm_map_entry_t, int); 398 int uvm_map_reserve(vm_map_t, vsize_t, vaddr_t, vsize_t, 399 vaddr_t *); 400 void uvm_map_setup(vm_map_t, vaddr_t, vaddr_t, int); 401 int uvm_map_submap(vm_map_t, vaddr_t, vaddr_t, vm_map_t); 402 void uvm_unmap(vm_map_t, vaddr_t, vaddr_t); 403 void uvm_map_set_uaddr(struct vm_map*, struct uvm_addr_state**, 404 struct uvm_addr_state*); 405 int uvm_map_mquery(struct vm_map*, vaddr_t*, vsize_t, voff_t, int); 406 407 void uvm_unmap_detach(struct uvm_map_deadq*, int); 408 void uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t, 409 struct uvm_map_deadq*, boolean_t, boolean_t); 410 411 struct kinfo_vmentry; 412 413 int uvm_map_fill_vmmap(struct vm_map *, struct kinfo_vmentry *, 414 size_t *); 415 416 #endif /* _KERNEL */ 417 418 /* 419 * VM map locking operations: 420 * 421 * These operations perform locking on the data portion of the 422 * map. 423 * 424 * vm_map_lock_try: try to lock a map, failing if it is already locked. 425 * 426 * vm_map_lock: acquire an exclusive (write) lock on a map. 427 * 428 * vm_map_lock_read: acquire a shared (read) lock on a map. 429 * 430 * vm_map_unlock: release an exclusive lock on a map. 431 * 432 * vm_map_unlock_read: release a shared lock on a map. 433 * 434 * vm_map_downgrade: downgrade an exclusive lock to a shared lock. 435 * 436 * vm_map_upgrade: upgrade a shared lock to an exclusive lock. 437 * 438 * vm_map_busy: mark a map as busy. 439 * 440 * vm_map_unbusy: clear busy status on a map. 441 * 442 */ 443 444 #ifdef _KERNEL 445 /* 446 * XXX: clean up later 447 * Half the kernel seems to depend on them being included here. 448 */ 449 #include <sys/time.h> 450 #include <sys/systm.h> /* for panic() */ 451 452 boolean_t vm_map_lock_try_ln(struct vm_map*, char*, int); 453 void vm_map_lock_ln(struct vm_map*, char*, int); 454 void vm_map_lock_read_ln(struct vm_map*, char*, int); 455 void vm_map_unlock_ln(struct vm_map*, char*, int); 456 void vm_map_unlock_read_ln(struct vm_map*, char*, int); 457 void vm_map_downgrade_ln(struct vm_map*, char*, int); 458 void vm_map_upgrade_ln(struct vm_map*, char*, int); 459 void vm_map_busy_ln(struct vm_map*, char*, int); 460 void vm_map_unbusy_ln(struct vm_map*, char*, int); 461 462 #ifdef DIAGNOSTIC 463 #define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__) 464 #define vm_map_lock(map) vm_map_lock_ln(map, __FILE__, __LINE__) 465 #define vm_map_lock_read(map) vm_map_lock_read_ln(map, __FILE__, __LINE__) 466 #define vm_map_unlock(map) vm_map_unlock_ln(map, __FILE__, __LINE__) 467 #define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, __FILE__, __LINE__) 468 #define vm_map_downgrade(map) vm_map_downgrade_ln(map, __FILE__, __LINE__) 469 #define vm_map_upgrade(map) vm_map_upgrade_ln(map, __FILE__, __LINE__) 470 #define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__) 471 #define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__) 472 #else 473 #define vm_map_lock_try(map) vm_map_lock_try_ln(map, NULL, 0) 474 #define vm_map_lock(map) vm_map_lock_ln(map, NULL, 0) 475 #define vm_map_lock_read(map) vm_map_lock_read_ln(map, NULL, 0) 476 #define vm_map_unlock(map) vm_map_unlock_ln(map, NULL, 0) 477 #define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, NULL, 0) 478 #define vm_map_downgrade(map) vm_map_downgrade_ln(map, NULL, 0) 479 #define vm_map_upgrade(map) vm_map_upgrade_ln(map, NULL, 0) 480 #define vm_map_busy(map) vm_map_busy_ln(map, NULL, 0) 481 #define vm_map_unbusy(map) vm_map_unbusy_ln(map, NULL, 0) 482 #endif 483 484 #endif /* _KERNEL */ 485 486 /* 487 * Functions implemented as macros 488 */ 489 #define vm_map_min(map) ((map)->min_offset) 490 #define vm_map_max(map) ((map)->max_offset) 491 #define vm_map_pmap(map) ((map)->pmap) 492 493 #endif /* _UVM_UVM_MAP_H_ */ 494