1 /* $NetBSD: uvm_map.h,v 1.29 2001/06/26 17:55:15 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94 42 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69 #ifndef _UVM_UVM_MAP_H_ 70 #define _UVM_UVM_MAP_H_ 71 72 /* 73 * uvm_map.h 74 */ 75 76 #ifdef _KERNEL 77 78 /* 79 * macros 80 */ 81 82 /* 83 * UVM_MAP_CLIP_START: ensure that the entry begins at or after 84 * the starting address, if it doesn't we split the entry. 85 * 86 * => map must be locked by caller 87 */ 88 89 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \ 90 if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA); } 91 92 /* 93 * UVM_MAP_CLIP_END: ensure that the entry ends at or before 94 * the ending address, if it does't we split the entry. 95 * 96 * => map must be locked by caller 97 */ 98 99 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \ 100 if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA); } 101 102 /* 103 * extract flags 104 */ 105 #define UVM_EXTRACT_REMOVE 0x1 /* remove mapping from old map */ 106 #define UVM_EXTRACT_CONTIG 0x2 /* try to keep it contig */ 107 #define UVM_EXTRACT_QREF 0x4 /* use quick refs */ 108 #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */ 109 110 #endif /* _KERNEL */ 111 112 #include <uvm/uvm_anon.h> 113 114 /* 115 * Address map entries consist of start and end addresses, 116 * a VM object (or sharing map) and offset into that object, 117 * and user-exported inheritance and protection information. 118 * Also included is control information for virtual copy operations. 119 */ 120 struct vm_map_entry { 121 struct vm_map_entry *prev; /* previous entry */ 122 struct vm_map_entry *next; /* next entry */ 123 vaddr_t start; /* start address */ 124 vaddr_t end; /* end address */ 125 union { 126 struct uvm_object *uvm_obj; /* uvm object */ 127 struct vm_map *sub_map; /* belongs to another map */ 128 } object; /* object I point to */ 129 voff_t offset; /* offset into object */ 130 int etype; /* entry type */ 131 vm_prot_t protection; /* protection code */ 132 vm_prot_t max_protection; /* maximum protection */ 133 vm_inherit_t inheritance; /* inheritance */ 134 int wired_count; /* can be paged if == 0 */ 135 struct vm_aref aref; /* anonymous overlay */ 136 int advice; /* madvise advice */ 137 #define uvm_map_entry_stop_copy flags 138 u_int8_t flags; /* flags */ 139 140 #define UVM_MAP_STATIC 0x01 /* static map entry */ 141 142 }; 143 144 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) 145 146 /* 147 * Maps are doubly-linked lists of map entries, kept sorted 148 * by address. A single hint is provided to start 149 * searches again from the last successful search, 150 * insertion, or removal. 151 * 152 * LOCKING PROTOCOL NOTES: 153 * ----------------------- 154 * 155 * VM map locking is a little complicated. There are both shared 156 * and exclusive locks on maps. However, it is sometimes required 157 * to downgrade an exclusive lock to a shared lock, and upgrade to 158 * an exclusive lock again (to perform error recovery). However, 159 * another thread *must not* queue itself to receive an exclusive 160 * lock while before we upgrade back to exclusive, otherwise the 161 * error recovery becomes extremely difficult, if not impossible. 162 * 163 * In order to prevent this scenario, we introduce the notion of 164 * a `busy' map. A `busy' map is read-locked, but other threads 165 * attempting to write-lock wait for this flag to clear before 166 * entering the lock manager. A map may only be marked busy 167 * when the map is write-locked (and then the map must be downgraded 168 * to read-locked), and may only be marked unbusy by the thread 169 * which marked it busy (holding *either* a read-lock or a 170 * write-lock, the latter being gained by an upgrade). 171 * 172 * Access to the map `flags' member is controlled by the `flags_lock' 173 * simple lock. Note that some flags are static (set once at map 174 * creation time, and never changed), and thus require no locking 175 * to check those flags. All flags which are r/w must be set or 176 * cleared while the `flags_lock' is asserted. Additional locking 177 * requirements are: 178 * 179 * VM_MAP_PAGEABLE r/o static flag; no locking required 180 * 181 * VM_MAP_INTRSAFE r/o static flag; no locking required 182 * 183 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when 184 * map is write-locked. may be tested 185 * without asserting `flags_lock'. 186 * 187 * VM_MAP_BUSY r/w; may only be set when map is 188 * write-locked, may only be cleared by 189 * thread which set it, map read-locked 190 * or write-locked. must be tested 191 * while `flags_lock' is asserted. 192 * 193 * VM_MAP_WANTLOCK r/w; may only be set when the map 194 * is busy, and thread is attempting 195 * to write-lock. must be tested 196 * while `flags_lock' is asserted. 197 */ 198 struct vm_map { 199 struct pmap * pmap; /* Physical map */ 200 struct lock lock; /* Lock for map data */ 201 struct vm_map_entry header; /* List of entries */ 202 int nentries; /* Number of entries */ 203 vsize_t size; /* virtual size */ 204 int ref_count; /* Reference count */ 205 struct simplelock ref_lock; /* Lock for ref_count field */ 206 struct vm_map_entry * hint; /* hint for quick lookups */ 207 struct simplelock hint_lock; /* lock for hint storage */ 208 struct vm_map_entry * first_free; /* First free space hint */ 209 int flags; /* flags */ 210 struct simplelock flags_lock; /* Lock for flags field */ 211 unsigned int timestamp; /* Version number */ 212 #define min_offset header.start 213 #define max_offset header.end 214 }; 215 216 /* vm_map flags */ 217 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ 218 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */ 219 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ 220 #define VM_MAP_BUSY 0x08 /* rw: map is busy */ 221 #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */ 222 223 /* XXX: number of kernel maps and entries to statically allocate */ 224 225 #if !defined(MAX_KMAPENT) 226 #if (50 + (2 * NPROC) > 1000) 227 #define MAX_KMAPENT (50 + (2 * NPROC)) 228 #else 229 #define MAX_KMAPENT 1000 /* XXXCDC: no crash */ 230 #endif 231 #endif /* !defined MAX_KMAPENT */ 232 233 #ifdef _KERNEL 234 #define vm_map_modflags(map, set, clear) \ 235 do { \ 236 simple_lock(&(map)->flags_lock); \ 237 (map)->flags = ((map)->flags | (set)) & ~(clear); \ 238 simple_unlock(&(map)->flags_lock); \ 239 } while (0) 240 #endif /* _KERNEL */ 241 242 /* 243 * handle inline options 244 */ 245 246 #ifdef UVM_MAP_INLINE 247 #define MAP_INLINE static __inline 248 #else 249 #define MAP_INLINE /* nothing */ 250 #endif /* UVM_MAP_INLINE */ 251 252 /* 253 * globals: 254 */ 255 256 #ifdef _KERNEL 257 258 #ifdef PMAP_GROWKERNEL 259 extern vaddr_t uvm_maxkaddr; 260 #endif 261 262 /* 263 * protos: the following prototypes define the interface to vm_map 264 */ 265 266 MAP_INLINE 267 void uvm_map_deallocate __P((struct vm_map *)); 268 269 int uvm_map_clean __P((struct vm_map *, vaddr_t, vaddr_t, int)); 270 void uvm_map_clip_start __P((struct vm_map *, struct vm_map_entry *, 271 vaddr_t)); 272 void uvm_map_clip_end __P((struct vm_map *, struct vm_map_entry *, 273 vaddr_t)); 274 MAP_INLINE 275 struct vm_map *uvm_map_create __P((pmap_t, vaddr_t, vaddr_t, int)); 276 int uvm_map_extract __P((struct vm_map *, vaddr_t, vsize_t, 277 struct vm_map *, vaddr_t *, int)); 278 struct vm_map_entry *uvm_map_findspace __P((struct vm_map *, vaddr_t, vsize_t, 279 vaddr_t *, struct uvm_object *, voff_t, vsize_t, int)); 280 int uvm_map_inherit __P((struct vm_map *, vaddr_t, vaddr_t, 281 vm_inherit_t)); 282 int uvm_map_advice __P((struct vm_map *, vaddr_t, vaddr_t, int)); 283 void uvm_map_init __P((void)); 284 boolean_t uvm_map_lookup_entry __P((struct vm_map *, vaddr_t, 285 struct vm_map_entry **)); 286 MAP_INLINE 287 void uvm_map_reference __P((struct vm_map *)); 288 int uvm_map_replace __P((struct vm_map *, vaddr_t, vaddr_t, 289 struct vm_map_entry *, int)); 290 int uvm_map_reserve __P((struct vm_map *, vsize_t, vaddr_t, vsize_t, 291 vaddr_t *)); 292 void uvm_map_setup __P((struct vm_map *, vaddr_t, vaddr_t, int)); 293 int uvm_map_submap __P((struct vm_map *, vaddr_t, vaddr_t, 294 struct vm_map *)); 295 MAP_INLINE 296 void uvm_unmap __P((struct vm_map *, vaddr_t, vaddr_t)); 297 void uvm_unmap_detach __P((struct vm_map_entry *,int)); 298 void uvm_unmap_remove __P((struct vm_map *, vaddr_t, vaddr_t, 299 struct vm_map_entry **)); 300 301 #endif /* _KERNEL */ 302 303 /* 304 * VM map locking operations: 305 * 306 * These operations perform locking on the data portion of the 307 * map. 308 * 309 * vm_map_lock_try: try to lock a map, failing if it is already locked. 310 * 311 * vm_map_lock: acquire an exclusive (write) lock on a map. 312 * 313 * vm_map_lock_read: acquire a shared (read) lock on a map. 314 * 315 * vm_map_unlock: release an exclusive lock on a map. 316 * 317 * vm_map_unlock_read: release a shared lock on a map. 318 * 319 * vm_map_downgrade: downgrade an exclusive lock to a shared lock. 320 * 321 * vm_map_upgrade: upgrade a shared lock to an exclusive lock. 322 * 323 * vm_map_busy: mark a map as busy. 324 * 325 * vm_map_unbusy: clear busy status on a map. 326 * 327 * Note that "intrsafe" maps use only exclusive, spin locks. We simply 328 * use the sleep lock's interlock for this. 329 */ 330 331 #ifdef _KERNEL 332 /* XXX: clean up later */ 333 #include <sys/time.h> 334 #include <sys/proc.h> /* for tsleep(), wakeup() */ 335 #include <sys/systm.h> /* for panic() */ 336 337 static __inline boolean_t vm_map_lock_try __P((struct vm_map *)); 338 static __inline void vm_map_lock __P((struct vm_map *)); 339 extern const char vmmapbsy[]; 340 341 static __inline boolean_t 342 vm_map_lock_try(map) 343 struct vm_map *map; 344 { 345 boolean_t rv; 346 347 if (map->flags & VM_MAP_INTRSAFE) 348 rv = simple_lock_try(&map->lock.lk_interlock); 349 else { 350 simple_lock(&map->flags_lock); 351 if (map->flags & VM_MAP_BUSY) { 352 simple_unlock(&map->flags_lock); 353 return (FALSE); 354 } 355 rv = (lockmgr(&map->lock, LK_EXCLUSIVE|LK_NOWAIT|LK_INTERLOCK, 356 &map->flags_lock) == 0); 357 } 358 359 if (rv) 360 map->timestamp++; 361 362 return (rv); 363 } 364 365 static __inline void 366 vm_map_lock(map) 367 struct vm_map *map; 368 { 369 int error; 370 371 if (map->flags & VM_MAP_INTRSAFE) { 372 simple_lock(&map->lock.lk_interlock); 373 return; 374 } 375 376 try_again: 377 simple_lock(&map->flags_lock); 378 while (map->flags & VM_MAP_BUSY) { 379 map->flags |= VM_MAP_WANTLOCK; 380 ltsleep(&map->flags, PVM, vmmapbsy, 0, &map->flags_lock); 381 } 382 383 error = lockmgr(&map->lock, LK_EXCLUSIVE|LK_SLEEPFAIL|LK_INTERLOCK, 384 &map->flags_lock); 385 386 if (error) { 387 KASSERT(error == ENOLCK); 388 goto try_again; 389 } 390 391 (map)->timestamp++; 392 } 393 394 #ifdef DIAGNOSTIC 395 #define vm_map_lock_read(map) \ 396 do { \ 397 if (map->flags & VM_MAP_INTRSAFE) \ 398 panic("vm_map_lock_read: intrsafe map"); \ 399 (void) lockmgr(&(map)->lock, LK_SHARED, NULL); \ 400 } while (0) 401 #else 402 #define vm_map_lock_read(map) \ 403 (void) lockmgr(&(map)->lock, LK_SHARED, NULL) 404 #endif 405 406 #define vm_map_unlock(map) \ 407 do { \ 408 if ((map)->flags & VM_MAP_INTRSAFE) \ 409 simple_unlock(&(map)->lock.lk_interlock); \ 410 else \ 411 (void) lockmgr(&(map)->lock, LK_RELEASE, NULL); \ 412 } while (0) 413 414 #define vm_map_unlock_read(map) \ 415 (void) lockmgr(&(map)->lock, LK_RELEASE, NULL) 416 417 #define vm_map_downgrade(map) \ 418 (void) lockmgr(&(map)->lock, LK_DOWNGRADE, NULL) 419 420 #ifdef DIAGNOSTIC 421 #define vm_map_upgrade(map) \ 422 do { \ 423 if (lockmgr(&(map)->lock, LK_UPGRADE, NULL) != 0) \ 424 panic("vm_map_upgrade: failed to upgrade lock"); \ 425 } while (0) 426 #else 427 #define vm_map_upgrade(map) \ 428 (void) lockmgr(&(map)->lock, LK_UPGRADE, NULL) 429 #endif 430 431 #define vm_map_busy(map) \ 432 do { \ 433 simple_lock(&(map)->flags_lock); \ 434 (map)->flags |= VM_MAP_BUSY; \ 435 simple_unlock(&(map)->flags_lock); \ 436 } while (0) 437 438 #define vm_map_unbusy(map) \ 439 do { \ 440 int oflags; \ 441 \ 442 simple_lock(&(map)->flags_lock); \ 443 oflags = (map)->flags; \ 444 (map)->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK); \ 445 simple_unlock(&(map)->flags_lock); \ 446 if (oflags & VM_MAP_WANTLOCK) \ 447 wakeup(&(map)->flags); \ 448 } while (0) 449 #endif /* _KERNEL */ 450 451 /* 452 * Functions implemented as macros 453 */ 454 #define vm_map_min(map) ((map)->min_offset) 455 #define vm_map_max(map) ((map)->max_offset) 456 #define vm_map_pmap(map) ((map)->pmap) 457 458 #endif /* _UVM_UVM_MAP_H_ */ 459