1 /* $OpenBSD: uvm_map.h,v 1.68 2020/10/09 08:16:28 mpi Exp $ */ 2 /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */ 3 4 /* 5 * Copyright (c) 2011 Ariane van der Steldt <ariane@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 * 19 * 20 * Copyright (c) 1997 Charles D. Cranor and Washington University. 21 * Copyright (c) 1991, 1993, The Regents of the University of California. 22 * 23 * All rights reserved. 24 * 25 * This code is derived from software contributed to Berkeley by 26 * The Mach Operating System project at Carnegie-Mellon University. 27 * 28 * Redistribution and use in source and binary forms, with or without 29 * modification, are permitted provided that the following conditions 30 * are met: 31 * 1. Redistributions of source code must retain the above copyright 32 * notice, this list of conditions and the following disclaimer. 33 * 2. Redistributions in binary form must reproduce the above copyright 34 * notice, this list of conditions and the following disclaimer in the 35 * documentation and/or other materials provided with the distribution. 36 * 3. Neither the name of the University nor the names of its contributors 37 * may be used to endorse or promote products derived from this software 38 * without specific prior written permission. 39 * 40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * SUCH DAMAGE. 51 * 52 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94 53 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp 54 * 55 * 56 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 57 * All rights reserved. 58 * 59 * Permission to use, copy, modify and distribute this software and 60 * its documentation is hereby granted, provided that both the copyright 61 * notice and this permission notice appear in all copies of the 62 * software, derivative works or modified versions, and any portions 63 * thereof, and that both notices appear in supporting documentation. 64 * 65 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 66 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 67 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 68 * 69 * Carnegie Mellon requests users of this software to return to 70 * 71 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 72 * School of Computer Science 73 * Carnegie Mellon University 74 * Pittsburgh PA 15213-3890 75 * 76 * any improvements or extensions that they make and grant Carnegie the 77 * rights to redistribute these changes. 78 */ 79 80 #ifndef _UVM_UVM_MAP_H_ 81 #define _UVM_UVM_MAP_H_ 82 83 #include <sys/mutex.h> 84 #include <sys/rwlock.h> 85 86 #ifdef _KERNEL 87 88 /* 89 * UVM_MAP_CLIP_START: ensure that the entry begins at or after 90 * the starting address, if it doesn't we split the entry. 91 * 92 * => map must be locked by caller 93 */ 94 95 #define UVM_MAP_CLIP_START(_map, _entry, _addr) \ 96 do { \ 97 KASSERT((_entry)->end + (_entry)->fspace > (_addr)); \ 98 if ((_entry)->start < (_addr)) \ 99 uvm_map_clip_start((_map), (_entry), (_addr)); \ 100 } while (0) 101 102 /* 103 * UVM_MAP_CLIP_END: ensure that the entry ends at or before 104 * the ending address, if it does't we split the entry. 105 * 106 * => map must be locked by caller 107 */ 108 109 #define UVM_MAP_CLIP_END(_map, _entry, _addr) \ 110 do { \ 111 KASSERT((_entry)->start < (_addr)); \ 112 if ((_entry)->end > (_addr)) \ 113 uvm_map_clip_end((_map), (_entry), (_addr)); \ 114 } while (0) 115 116 /* 117 * extract flags 118 */ 119 #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */ 120 121 #endif /* _KERNEL */ 122 123 #include <uvm/uvm_anon.h> 124 125 /* 126 * Address map entries consist of start and end addresses, 127 * a VM object (or sharing map) and offset into that object, 128 * and user-exported inheritance and protection information. 129 * Also included is control information for virtual copy operations. 130 */ 131 struct vm_map_entry { 132 union { 133 RBT_ENTRY(vm_map_entry) addr_entry; /* address tree */ 134 SLIST_ENTRY(vm_map_entry) addr_kentry; 135 } daddrs; 136 137 union { 138 RBT_ENTRY(vm_map_entry) rbtree; /* Link freespace tree. */ 139 TAILQ_ENTRY(vm_map_entry) tailq;/* Link freespace queue. */ 140 TAILQ_ENTRY(vm_map_entry) deadq;/* dead entry queue */ 141 } dfree; 142 143 #define uvm_map_entry_start_copy start 144 vaddr_t start; /* start address */ 145 vaddr_t end; /* end address */ 146 147 vsize_t guard; /* bytes in guard */ 148 vsize_t fspace; /* free space */ 149 150 union { 151 struct uvm_object *uvm_obj; /* uvm object */ 152 struct vm_map *sub_map; /* belongs to another map */ 153 } object; /* object I point to */ 154 voff_t offset; /* offset into object */ 155 struct vm_aref aref; /* anonymous overlay */ 156 int etype; /* entry type */ 157 vm_prot_t protection; /* protection code */ 158 vm_prot_t max_protection; /* maximum protection */ 159 vm_inherit_t inheritance; /* inheritance */ 160 int wired_count; /* can be paged if == 0 */ 161 int advice; /* madvise advice */ 162 #define uvm_map_entry_stop_copy flags 163 u_int8_t flags; /* flags */ 164 165 #define UVM_MAP_STATIC 0x01 /* static map entry */ 166 #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ 167 168 vsize_t fspace_augment; /* max(fspace) in subtree */ 169 }; 170 171 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) 172 173 TAILQ_HEAD(uvm_map_deadq, vm_map_entry); /* dead entry queue */ 174 RBT_HEAD(uvm_map_addr, vm_map_entry); 175 #ifdef _KERNEL 176 RBT_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry, 177 uvm_mapentry_addrcmp); 178 #endif 179 180 /* 181 * A Map is a rbtree of map entries, kept sorted by address. 182 * In addition, free space entries are also kept in a rbtree, 183 * indexed by free size. 184 * 185 * 186 * 187 * LOCKING PROTOCOL NOTES: 188 * ----------------------- 189 * 190 * VM map locking is a little complicated. There are both shared 191 * and exclusive locks on maps. However, it is sometimes required 192 * to downgrade an exclusive lock to a shared lock, and upgrade to 193 * an exclusive lock again (to perform error recovery). However, 194 * another thread *must not* queue itself to receive an exclusive 195 * lock while before we upgrade back to exclusive, otherwise the 196 * error recovery becomes extremely difficult, if not impossible. 197 * 198 * In order to prevent this scenario, we introduce the notion of 199 * a `busy' map. A `busy' map is read-locked, but other threads 200 * attempting to write-lock wait for this flag to clear before 201 * entering the lock manager. A map may only be marked busy 202 * when the map is write-locked (and then the map must be downgraded 203 * to read-locked), and may only be marked unbusy by the thread 204 * which marked it busy (holding *either* a read-lock or a 205 * write-lock, the latter being gained by an upgrade). 206 * 207 * Access to the map `flags' member is controlled by the `flags_lock' 208 * simple lock. Note that some flags are static (set once at map 209 * creation time, and never changed), and thus require no locking 210 * to check those flags. All flags which are r/w must be set or 211 * cleared while the `flags_lock' is asserted. Additional locking 212 * requirements are: 213 * 214 * VM_MAP_PAGEABLE r/o static flag; no locking required 215 * 216 * VM_MAP_INTRSAFE r/o static flag; no locking required 217 * 218 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when 219 * map is write-locked. may be tested 220 * without asserting `flags_lock'. 221 * 222 * VM_MAP_BUSY r/w; may only be set when map is 223 * write-locked, may only be cleared by 224 * thread which set it, map read-locked 225 * or write-locked. must be tested 226 * while `flags_lock' is asserted. 227 * 228 * VM_MAP_WANTLOCK r/w; may only be set when the map 229 * is busy, and thread is attempting 230 * to write-lock. must be tested 231 * while `flags_lock' is asserted. 232 * 233 * VM_MAP_GUARDPAGES r/o; must be specified at map 234 * initialization time. 235 * If set, guards will appear between 236 * automatic allocations. 237 * No locking required. 238 * 239 * VM_MAP_ISVMSPACE r/o; set by uvmspace_alloc. 240 * Signifies that this map is a vmspace. 241 * (The implementation treats all maps 242 * without this bit as kernel maps.) 243 * No locking required. 244 * 245 * 246 * All automatic allocations (uvm_map without MAP_FIXED) will allocate 247 * from vm_map.free. 248 * If that allocation fails: 249 * - vmspace maps will spill over into vm_map.bfree, 250 * - all other maps will call uvm_map_kmem_grow() to increase the arena. 251 * 252 * vmspace maps have their data, brk() and stack arenas automatically 253 * updated when uvm_map() is invoked without MAP_FIXED. 254 * The spill over arena (vm_map.bfree) will contain the space in the brk() 255 * and stack ranges. 256 * Kernel maps never have a bfree arena and this tree will always be empty. 257 * 258 * 259 * read_locks and write_locks are used in lock debugging code. 260 * 261 * Locks used to protect struct members in this file: 262 * I immutable after creation or exec(2) 263 * v `vm_map_lock' (this map `lock' or `mtx') 264 */ 265 struct vm_map { 266 struct pmap *pmap; /* [I] Physical map */ 267 struct rwlock lock; /* Non-intrsafe lock */ 268 struct mutex mtx; /* Intrsafe lock */ 269 u_long sserial; /* [v] # stack changes */ 270 u_long wserial; /* [v] # PROT_WRITE increases */ 271 272 struct uvm_map_addr addr; /* [v] Entry tree, by addr */ 273 274 vsize_t size; /* virtual size */ 275 int ref_count; /* Reference count */ 276 int flags; /* flags */ 277 struct mutex flags_lock; /* flags lock */ 278 unsigned int timestamp; /* Version number */ 279 280 vaddr_t min_offset; /* [I] First address in map. */ 281 vaddr_t max_offset; /* [I] Last address in map. */ 282 283 /* 284 * Allocation overflow regions. 285 */ 286 vaddr_t b_start; /* [v] Start for brk() alloc. */ 287 vaddr_t b_end; /* [v] End for brk() alloc. */ 288 vaddr_t s_start; /* [v] Start for stack alloc. */ 289 vaddr_t s_end; /* [v] End for stack alloc. */ 290 291 /* 292 * Special address selectors. 293 * 294 * The uaddr_exe mapping is used if: 295 * - protX is selected 296 * - the pointer is not NULL 297 * 298 * If uaddr_exe is not used, the other mappings are checked in 299 * order of appearance. 300 * If a hint is given, the selection will only be used if the hint 301 * falls in the range described by the mapping. 302 * 303 * The states are pointers because: 304 * - they may not all be in use 305 * - the struct size for different schemes is variable 306 * 307 * The uaddr_brk_stack selector will select addresses that are in 308 * the brk/stack area of the map. 309 */ 310 struct uvm_addr_state *uaddr_exe; /* Executable selector. */ 311 struct uvm_addr_state *uaddr_any[4]; /* More selectors. */ 312 struct uvm_addr_state *uaddr_brk_stack; /* Brk/stack selector. */ 313 }; 314 315 /* vm_map flags */ 316 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ 317 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */ 318 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ 319 #define VM_MAP_BUSY 0x08 /* rw: map is busy */ 320 #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */ 321 #define VM_MAP_GUARDPAGES 0x20 /* rw: add guard pgs to map */ 322 #define VM_MAP_ISVMSPACE 0x40 /* ro: map is a vmspace */ 323 #define VM_MAP_SYSCALL_ONCE 0x80 /* rw: libc syscall registered */ 324 325 /* Number of kernel maps and entries to statically allocate */ 326 #define MAX_KMAPENT 1024 /* Sufficient to make it to the scheduler. */ 327 328 #ifdef _KERNEL 329 /* 330 * globals: 331 */ 332 333 extern vaddr_t uvm_maxkaddr; 334 335 /* 336 * protos: the following prototypes define the interface to vm_map 337 */ 338 339 void uvm_map_deallocate(struct vm_map *); 340 341 int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int); 342 void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *, 343 vaddr_t); 344 void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *, 345 vaddr_t); 346 int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t, 347 vaddr_t *, int); 348 struct vm_map * uvm_map_create(pmap_t, vaddr_t, vaddr_t, int); 349 vaddr_t uvm_map_pie(vaddr_t); 350 vaddr_t uvm_map_hint(struct vmspace *, vm_prot_t, vaddr_t, vaddr_t); 351 int uvm_map_syscall(struct vm_map *, vaddr_t, vaddr_t); 352 int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, vm_inherit_t); 353 int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int); 354 void uvm_map_init(void); 355 boolean_t uvm_map_lookup_entry(struct vm_map *, vaddr_t, vm_map_entry_t *); 356 boolean_t uvm_map_is_stack_remappable(struct vm_map *, vaddr_t, vsize_t); 357 int uvm_map_remap_as_stack(struct proc *, vaddr_t, vsize_t); 358 int uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t, 359 vm_map_entry_t, int); 360 int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t, 361 vaddr_t *); 362 void uvm_map_setup(struct vm_map *, pmap_t, vaddr_t, vaddr_t, int); 363 int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t, 364 struct vm_map *); 365 void uvm_unmap(struct vm_map *, vaddr_t, vaddr_t); 366 void uvm_unmap_detach(struct uvm_map_deadq *, int); 367 void uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t, 368 struct uvm_map_deadq *, boolean_t, boolean_t); 369 void uvm_map_set_uaddr(struct vm_map*, struct uvm_addr_state**, 370 struct uvm_addr_state*); 371 int uvm_map_mquery(struct vm_map*, vaddr_t*, vsize_t, voff_t, int); 372 373 374 struct p_inentry; 375 376 int uvm_map_inentry_sp(vm_map_entry_t); 377 int uvm_map_inentry_pc(vm_map_entry_t); 378 boolean_t uvm_map_inentry(struct proc *, struct p_inentry *, vaddr_t addr, 379 const char *fmt, int (*fn)(vm_map_entry_t), u_long serial); 380 381 struct kinfo_vmentry; 382 383 int uvm_map_fill_vmmap(struct vm_map *, struct kinfo_vmentry *, 384 size_t *); 385 386 /* 387 * VM map locking operations: 388 * 389 * These operations perform locking on the data portion of the 390 * map. 391 * 392 * vm_map_lock_try: try to lock a map, failing if it is already locked. 393 * 394 * vm_map_lock: acquire an exclusive (write) lock on a map. 395 * 396 * vm_map_lock_read: acquire a shared (read) lock on a map. 397 * 398 * vm_map_unlock: release an exclusive lock on a map. 399 * 400 * vm_map_unlock_read: release a shared lock on a map. 401 * 402 * vm_map_downgrade: downgrade an exclusive lock to a shared lock. 403 * 404 * vm_map_upgrade: upgrade a shared lock to an exclusive lock. 405 * 406 * vm_map_busy: mark a map as busy. 407 * 408 * vm_map_unbusy: clear busy status on a map. 409 * 410 */ 411 412 boolean_t vm_map_lock_try_ln(struct vm_map*, char*, int); 413 void vm_map_lock_ln(struct vm_map*, char*, int); 414 void vm_map_lock_read_ln(struct vm_map*, char*, int); 415 void vm_map_unlock_ln(struct vm_map*, char*, int); 416 void vm_map_unlock_read_ln(struct vm_map*, char*, int); 417 void vm_map_downgrade_ln(struct vm_map*, char*, int); 418 void vm_map_upgrade_ln(struct vm_map*, char*, int); 419 void vm_map_busy_ln(struct vm_map*, char*, int); 420 void vm_map_unbusy_ln(struct vm_map*, char*, int); 421 422 #ifdef DIAGNOSTIC 423 #define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__) 424 #define vm_map_lock(map) vm_map_lock_ln(map, __FILE__, __LINE__) 425 #define vm_map_lock_read(map) vm_map_lock_read_ln(map, __FILE__, __LINE__) 426 #define vm_map_unlock(map) vm_map_unlock_ln(map, __FILE__, __LINE__) 427 #define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, __FILE__, __LINE__) 428 #define vm_map_downgrade(map) vm_map_downgrade_ln(map, __FILE__, __LINE__) 429 #define vm_map_upgrade(map) vm_map_upgrade_ln(map, __FILE__, __LINE__) 430 #define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__) 431 #define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__) 432 #else 433 #define vm_map_lock_try(map) vm_map_lock_try_ln(map, NULL, 0) 434 #define vm_map_lock(map) vm_map_lock_ln(map, NULL, 0) 435 #define vm_map_lock_read(map) vm_map_lock_read_ln(map, NULL, 0) 436 #define vm_map_unlock(map) vm_map_unlock_ln(map, NULL, 0) 437 #define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, NULL, 0) 438 #define vm_map_downgrade(map) vm_map_downgrade_ln(map, NULL, 0) 439 #define vm_map_upgrade(map) vm_map_upgrade_ln(map, NULL, 0) 440 #define vm_map_busy(map) vm_map_busy_ln(map, NULL, 0) 441 #define vm_map_unbusy(map) vm_map_unbusy_ln(map, NULL, 0) 442 #endif 443 444 #endif /* _KERNEL */ 445 446 /* 447 * Functions implemented as macros 448 */ 449 #define vm_map_min(map) ((map)->min_offset) 450 #define vm_map_max(map) ((map)->max_offset) 451 #define vm_map_pmap(map) ((map)->pmap) 452 453 #endif /* _UVM_UVM_MAP_H_ */ 454