1 /* $NetBSD: uvm_map.c,v 1.402 2022/06/08 16:55:00 macallan Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp 38 * 39 * 40 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 41 * All rights reserved. 42 * 43 * Permission to use, copy, modify and distribute this software and 44 * its documentation is hereby granted, provided that both the copyright 45 * notice and this permission notice appear in all copies of the 46 * software, derivative works or modified versions, and any portions 47 * thereof, and that both notices appear in supporting documentation. 48 * 49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 * 53 * Carnegie Mellon requests users of this software to return to 54 * 55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 56 * School of Computer Science 57 * Carnegie Mellon University 58 * Pittsburgh PA 15213-3890 59 * 60 * any improvements or extensions that they make and grant Carnegie the 61 * rights to redistribute these changes. 62 */ 63 64 /* 65 * uvm_map.c: uvm map operations 66 */ 67 68 #include <sys/cdefs.h> 69 __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.402 2022/06/08 16:55:00 macallan Exp $"); 70 71 #include "opt_ddb.h" 72 #include "opt_pax.h" 73 #include "opt_uvmhist.h" 74 #include "opt_uvm.h" 75 #include "opt_sysv.h" 76 77 #include <sys/param.h> 78 #include <sys/systm.h> 79 #include <sys/mman.h> 80 #include <sys/proc.h> 81 #include <sys/pool.h> 82 #include <sys/kernel.h> 83 #include <sys/mount.h> 84 #include <sys/pax.h> 85 #include <sys/vnode.h> 86 #include <sys/filedesc.h> 87 #include <sys/lockdebug.h> 88 #include <sys/atomic.h> 89 #include <sys/sysctl.h> 90 #ifndef __USER_VA0_IS_SAFE 91 #include <sys/kauth.h> 92 #include "opt_user_va0_disable_default.h" 93 #endif 94 95 #include <sys/shm.h> 96 97 #include <uvm/uvm.h> 98 #include <uvm/uvm_readahead.h> 99 100 #if defined(DDB) || defined(DEBUGPRINT) 101 #include <uvm/uvm_ddb.h> 102 #endif 103 104 #ifdef UVMHIST 105 #ifndef UVMHIST_MAPHIST_SIZE 106 #define UVMHIST_MAPHIST_SIZE 100 107 #endif 108 static struct kern_history_ent maphistbuf[UVMHIST_MAPHIST_SIZE]; 109 UVMHIST_DEFINE(maphist) = UVMHIST_INITIALIZER(maphist, maphistbuf); 110 #endif 111 112 #if !defined(UVMMAP_COUNTERS) 113 114 #define UVMMAP_EVCNT_DEFINE(name) /* nothing */ 115 #define UVMMAP_EVCNT_INCR(ev) /* nothing */ 116 #define UVMMAP_EVCNT_DECR(ev) /* nothing */ 117 118 #else /* defined(UVMMAP_NOCOUNTERS) */ 119 120 #include <sys/evcnt.h> 121 #define UVMMAP_EVCNT_DEFINE(name) \ 122 struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \ 123 "uvmmap", #name); \ 124 EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name); 125 #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++ 126 #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count-- 127 128 #endif /* defined(UVMMAP_NOCOUNTERS) */ 129 130 UVMMAP_EVCNT_DEFINE(ubackmerge) 131 UVMMAP_EVCNT_DEFINE(uforwmerge) 132 UVMMAP_EVCNT_DEFINE(ubimerge) 133 UVMMAP_EVCNT_DEFINE(unomerge) 134 UVMMAP_EVCNT_DEFINE(kbackmerge) 135 UVMMAP_EVCNT_DEFINE(kforwmerge) 136 UVMMAP_EVCNT_DEFINE(kbimerge) 137 UVMMAP_EVCNT_DEFINE(knomerge) 138 UVMMAP_EVCNT_DEFINE(map_call) 139 UVMMAP_EVCNT_DEFINE(mlk_call) 140 UVMMAP_EVCNT_DEFINE(mlk_hint) 141 UVMMAP_EVCNT_DEFINE(mlk_tree) 142 UVMMAP_EVCNT_DEFINE(mlk_treeloop) 143 144 const char vmmapbsy[] = "vmmapbsy"; 145 146 /* 147 * cache for vmspace structures. 148 */ 149 150 static struct pool_cache uvm_vmspace_cache; 151 152 /* 153 * cache for dynamically-allocated map entries. 154 */ 155 156 static struct pool_cache uvm_map_entry_cache; 157 158 #ifdef PMAP_GROWKERNEL 159 /* 160 * This global represents the end of the kernel virtual address 161 * space. If we want to exceed this, we must grow the kernel 162 * virtual address space dynamically. 163 * 164 * Note, this variable is locked by kernel_map's lock. 165 */ 166 vaddr_t uvm_maxkaddr; 167 #endif 168 169 #ifndef __USER_VA0_IS_SAFE 170 #ifndef __USER_VA0_DISABLE_DEFAULT 171 #define __USER_VA0_DISABLE_DEFAULT 1 172 #endif 173 #ifdef USER_VA0_DISABLE_DEFAULT /* kernel config option overrides */ 174 #undef __USER_VA0_DISABLE_DEFAULT 175 #define __USER_VA0_DISABLE_DEFAULT USER_VA0_DISABLE_DEFAULT 176 #endif 177 int user_va0_disable = __USER_VA0_DISABLE_DEFAULT; 178 #endif 179 180 /* 181 * macros 182 */ 183 184 /* 185 * uvm_map_align_va: round down or up virtual address 186 */ 187 static __inline void 188 uvm_map_align_va(vaddr_t *vap, vsize_t align, int topdown) 189 { 190 191 KASSERT(powerof2(align)); 192 193 if (align != 0 && (*vap & (align - 1)) != 0) { 194 if (topdown) 195 *vap = rounddown2(*vap, align); 196 else 197 *vap = roundup2(*vap, align); 198 } 199 } 200 201 /* 202 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging 203 */ 204 extern struct vm_map *pager_map; 205 206 #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \ 207 prot, maxprot, inh, adv, wire) \ 208 ((ent)->etype == (type) && \ 209 (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE)) == 0 && \ 210 (ent)->object.uvm_obj == (uobj) && \ 211 (ent)->protection == (prot) && \ 212 (ent)->max_protection == (maxprot) && \ 213 (ent)->inheritance == (inh) && \ 214 (ent)->advice == (adv) && \ 215 (ent)->wired_count == (wire)) 216 217 /* 218 * uvm_map_entry_link: insert entry into a map 219 * 220 * => map must be locked 221 */ 222 #define uvm_map_entry_link(map, after_where, entry) do { \ 223 uvm_mapent_check(entry); \ 224 (map)->nentries++; \ 225 (entry)->prev = (after_where); \ 226 (entry)->next = (after_where)->next; \ 227 (entry)->prev->next = (entry); \ 228 (entry)->next->prev = (entry); \ 229 uvm_rb_insert((map), (entry)); \ 230 } while (/*CONSTCOND*/ 0) 231 232 /* 233 * uvm_map_entry_unlink: remove entry from a map 234 * 235 * => map must be locked 236 */ 237 #define uvm_map_entry_unlink(map, entry) do { \ 238 KASSERT((entry) != (map)->first_free); \ 239 KASSERT((entry) != (map)->hint); \ 240 uvm_mapent_check(entry); \ 241 (map)->nentries--; \ 242 (entry)->next->prev = (entry)->prev; \ 243 (entry)->prev->next = (entry)->next; \ 244 uvm_rb_remove((map), (entry)); \ 245 } while (/*CONSTCOND*/ 0) 246 247 /* 248 * SAVE_HINT: saves the specified entry as the hint for future lookups. 249 * 250 * => map need not be locked. 251 */ 252 #define SAVE_HINT(map, check, value) do { \ 253 if ((map)->hint == (check)) \ 254 (map)->hint = (value); \ 255 } while (/*CONSTCOND*/ 0) 256 257 /* 258 * clear_hints: ensure that hints don't point to the entry. 259 * 260 * => map must be write-locked. 261 */ 262 static void 263 clear_hints(struct vm_map *map, struct vm_map_entry *ent) 264 { 265 266 SAVE_HINT(map, ent, ent->prev); 267 if (map->first_free == ent) { 268 map->first_free = ent->prev; 269 } 270 } 271 272 /* 273 * VM_MAP_RANGE_CHECK: check and correct range 274 * 275 * => map must at least be read locked 276 */ 277 278 #define VM_MAP_RANGE_CHECK(map, start, end) do { \ 279 if (start < vm_map_min(map)) \ 280 start = vm_map_min(map); \ 281 if (end > vm_map_max(map)) \ 282 end = vm_map_max(map); \ 283 if (start > end) \ 284 start = end; \ 285 } while (/*CONSTCOND*/ 0) 286 287 /* 288 * local prototypes 289 */ 290 291 static struct vm_map_entry * 292 uvm_mapent_alloc(struct vm_map *, int); 293 static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *); 294 static void uvm_mapent_free(struct vm_map_entry *); 295 #if defined(DEBUG) 296 static void _uvm_mapent_check(const struct vm_map_entry *, int); 297 #define uvm_mapent_check(map) _uvm_mapent_check(map, __LINE__) 298 #else /* defined(DEBUG) */ 299 #define uvm_mapent_check(e) /* nothing */ 300 #endif /* defined(DEBUG) */ 301 302 static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *); 303 static void uvm_map_reference_amap(struct vm_map_entry *, int); 304 static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int, 305 int, struct vm_map_entry *); 306 static void uvm_map_unreference_amap(struct vm_map_entry *, int); 307 308 int _uvm_map_sanity(struct vm_map *); 309 int _uvm_tree_sanity(struct vm_map *); 310 static vsize_t uvm_rb_maxgap(const struct vm_map_entry *); 311 312 #define ROOT_ENTRY(map) ((struct vm_map_entry *)(map)->rb_tree.rbt_root) 313 #define LEFT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_left) 314 #define RIGHT_ENTRY(entry) ((struct vm_map_entry *)(entry)->rb_node.rb_right) 315 #define PARENT_ENTRY(map, entry) \ 316 (ROOT_ENTRY(map) == (entry) \ 317 ? NULL : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node)) 318 319 /* 320 * These get filled in if/when SYSVSHM shared memory code is loaded 321 * 322 * We do this with function pointers rather the #ifdef SYSVSHM so the 323 * SYSVSHM code can be loaded and unloaded 324 */ 325 void (*uvm_shmexit)(struct vmspace *) = NULL; 326 void (*uvm_shmfork)(struct vmspace *, struct vmspace *) = NULL; 327 328 static int 329 uvm_map_compare_nodes(void *ctx, const void *nparent, const void *nkey) 330 { 331 const struct vm_map_entry *eparent = nparent; 332 const struct vm_map_entry *ekey = nkey; 333 334 KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end); 335 KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end); 336 337 if (eparent->start < ekey->start) 338 return -1; 339 if (eparent->end >= ekey->start) 340 return 1; 341 return 0; 342 } 343 344 static int 345 uvm_map_compare_key(void *ctx, const void *nparent, const void *vkey) 346 { 347 const struct vm_map_entry *eparent = nparent; 348 const vaddr_t va = *(const vaddr_t *) vkey; 349 350 if (eparent->start < va) 351 return -1; 352 if (eparent->end >= va) 353 return 1; 354 return 0; 355 } 356 357 static const rb_tree_ops_t uvm_map_tree_ops = { 358 .rbto_compare_nodes = uvm_map_compare_nodes, 359 .rbto_compare_key = uvm_map_compare_key, 360 .rbto_node_offset = offsetof(struct vm_map_entry, rb_node), 361 .rbto_context = NULL 362 }; 363 364 /* 365 * uvm_rb_gap: return the gap size between our entry and next entry. 366 */ 367 static inline vsize_t 368 uvm_rb_gap(const struct vm_map_entry *entry) 369 { 370 371 KASSERT(entry->next != NULL); 372 return entry->next->start - entry->end; 373 } 374 375 static vsize_t 376 uvm_rb_maxgap(const struct vm_map_entry *entry) 377 { 378 struct vm_map_entry *child; 379 vsize_t maxgap = entry->gap; 380 381 /* 382 * We need maxgap to be the largest gap of us or any of our 383 * descendents. Since each of our children's maxgap is the 384 * cached value of their largest gap of themselves or their 385 * descendents, we can just use that value and avoid recursing 386 * down the tree to calculate it. 387 */ 388 if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap) 389 maxgap = child->maxgap; 390 391 if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap) 392 maxgap = child->maxgap; 393 394 return maxgap; 395 } 396 397 static void 398 uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry) 399 { 400 struct vm_map_entry *parent; 401 402 KASSERT(entry->gap == uvm_rb_gap(entry)); 403 entry->maxgap = uvm_rb_maxgap(entry); 404 405 while ((parent = PARENT_ENTRY(map, entry)) != NULL) { 406 struct vm_map_entry *brother; 407 vsize_t maxgap = parent->gap; 408 unsigned int which; 409 410 KDASSERT(parent->gap == uvm_rb_gap(parent)); 411 if (maxgap < entry->maxgap) 412 maxgap = entry->maxgap; 413 /* 414 * Since we work towards the root, we know entry's maxgap 415 * value is OK, but its brothers may now be out-of-date due 416 * to rebalancing. So refresh it. 417 */ 418 which = RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER; 419 brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[which]; 420 if (brother != NULL) { 421 KDASSERT(brother->gap == uvm_rb_gap(brother)); 422 brother->maxgap = uvm_rb_maxgap(brother); 423 if (maxgap < brother->maxgap) 424 maxgap = brother->maxgap; 425 } 426 427 parent->maxgap = maxgap; 428 entry = parent; 429 } 430 } 431 432 static void 433 uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry) 434 { 435 struct vm_map_entry *ret __diagused; 436 437 entry->gap = entry->maxgap = uvm_rb_gap(entry); 438 if (entry->prev != &map->header) 439 entry->prev->gap = uvm_rb_gap(entry->prev); 440 441 ret = rb_tree_insert_node(&map->rb_tree, entry); 442 KASSERTMSG(ret == entry, 443 "uvm_rb_insert: map %p: duplicate entry %p", map, ret); 444 445 /* 446 * If the previous entry is not our immediate left child, then it's an 447 * ancestor and will be fixed up on the way to the root. We don't 448 * have to check entry->prev against &map->header since &map->header 449 * will never be in the tree. 450 */ 451 uvm_rb_fixup(map, 452 LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry); 453 } 454 455 static void 456 uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry) 457 { 458 struct vm_map_entry *prev_parent = NULL, *next_parent = NULL; 459 460 /* 461 * If we are removing an interior node, then an adjacent node will 462 * be used to replace its position in the tree. Therefore we will 463 * need to fixup the tree starting at the parent of the replacement 464 * node. So record their parents for later use. 465 */ 466 if (entry->prev != &map->header) 467 prev_parent = PARENT_ENTRY(map, entry->prev); 468 if (entry->next != &map->header) 469 next_parent = PARENT_ENTRY(map, entry->next); 470 471 rb_tree_remove_node(&map->rb_tree, entry); 472 473 /* 474 * If the previous node has a new parent, fixup the tree starting 475 * at the previous node's old parent. 476 */ 477 if (entry->prev != &map->header) { 478 /* 479 * Update the previous entry's gap due to our absence. 480 */ 481 entry->prev->gap = uvm_rb_gap(entry->prev); 482 uvm_rb_fixup(map, entry->prev); 483 if (prev_parent != NULL 484 && prev_parent != entry 485 && prev_parent != PARENT_ENTRY(map, entry->prev)) 486 uvm_rb_fixup(map, prev_parent); 487 } 488 489 /* 490 * If the next node has a new parent, fixup the tree starting 491 * at the next node's old parent. 492 */ 493 if (entry->next != &map->header) { 494 uvm_rb_fixup(map, entry->next); 495 if (next_parent != NULL 496 && next_parent != entry 497 && next_parent != PARENT_ENTRY(map, entry->next)) 498 uvm_rb_fixup(map, next_parent); 499 } 500 } 501 502 #if defined(DEBUG) 503 int uvm_debug_check_map = 0; 504 int uvm_debug_check_rbtree = 0; 505 #define uvm_map_check(map, name) \ 506 _uvm_map_check((map), (name), __FILE__, __LINE__) 507 static void 508 _uvm_map_check(struct vm_map *map, const char *name, 509 const char *file, int line) 510 { 511 512 if ((uvm_debug_check_map && _uvm_map_sanity(map)) || 513 (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) { 514 panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)", 515 name, map, file, line); 516 } 517 } 518 #else /* defined(DEBUG) */ 519 #define uvm_map_check(map, name) /* nothing */ 520 #endif /* defined(DEBUG) */ 521 522 #if defined(DEBUG) || defined(DDB) 523 int 524 _uvm_map_sanity(struct vm_map *map) 525 { 526 bool first_free_found = false; 527 bool hint_found = false; 528 const struct vm_map_entry *e; 529 struct vm_map_entry *hint = map->hint; 530 531 e = &map->header; 532 for (;;) { 533 if (map->first_free == e) { 534 first_free_found = true; 535 } else if (!first_free_found && e->next->start > e->end) { 536 printf("first_free %p should be %p\n", 537 map->first_free, e); 538 return -1; 539 } 540 if (hint == e) { 541 hint_found = true; 542 } 543 544 e = e->next; 545 if (e == &map->header) { 546 break; 547 } 548 } 549 if (!first_free_found) { 550 printf("stale first_free\n"); 551 return -1; 552 } 553 if (!hint_found) { 554 printf("stale hint\n"); 555 return -1; 556 } 557 return 0; 558 } 559 560 int 561 _uvm_tree_sanity(struct vm_map *map) 562 { 563 struct vm_map_entry *tmp, *trtmp; 564 int n = 0, i = 1; 565 566 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) { 567 if (tmp->gap != uvm_rb_gap(tmp)) { 568 printf("%d/%d gap %#lx != %#lx %s\n", 569 n + 1, map->nentries, 570 (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp), 571 tmp->next == &map->header ? "(last)" : ""); 572 goto error; 573 } 574 /* 575 * If any entries are out of order, tmp->gap will be unsigned 576 * and will likely exceed the size of the map. 577 */ 578 if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) { 579 printf("too large gap %zu\n", (size_t)tmp->gap); 580 goto error; 581 } 582 n++; 583 } 584 585 if (n != map->nentries) { 586 printf("nentries: %d vs %d\n", n, map->nentries); 587 goto error; 588 } 589 590 trtmp = NULL; 591 for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) { 592 if (tmp->maxgap != uvm_rb_maxgap(tmp)) { 593 printf("maxgap %#lx != %#lx\n", 594 (ulong)tmp->maxgap, 595 (ulong)uvm_rb_maxgap(tmp)); 596 goto error; 597 } 598 if (trtmp != NULL && trtmp->start >= tmp->start) { 599 printf("corrupt: 0x%"PRIxVADDR"x >= 0x%"PRIxVADDR"x\n", 600 trtmp->start, tmp->start); 601 goto error; 602 } 603 604 trtmp = tmp; 605 } 606 607 for (tmp = map->header.next; tmp != &map->header; 608 tmp = tmp->next, i++) { 609 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_LEFT); 610 if (trtmp == NULL) 611 trtmp = &map->header; 612 if (tmp->prev != trtmp) { 613 printf("lookup: %d: %p->prev=%p: %p\n", 614 i, tmp, tmp->prev, trtmp); 615 goto error; 616 } 617 trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_RIGHT); 618 if (trtmp == NULL) 619 trtmp = &map->header; 620 if (tmp->next != trtmp) { 621 printf("lookup: %d: %p->next=%p: %p\n", 622 i, tmp, tmp->next, trtmp); 623 goto error; 624 } 625 trtmp = rb_tree_find_node(&map->rb_tree, &tmp->start); 626 if (trtmp != tmp) { 627 printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp, 628 PARENT_ENTRY(map, tmp)); 629 goto error; 630 } 631 } 632 633 return (0); 634 error: 635 return (-1); 636 } 637 #endif /* defined(DEBUG) || defined(DDB) */ 638 639 /* 640 * vm_map_lock: acquire an exclusive (write) lock on a map. 641 * 642 * => The locking protocol provides for guaranteed upgrade from shared -> 643 * exclusive by whichever thread currently has the map marked busy. 644 * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among 645 * other problems, it defeats any fairness guarantees provided by RW 646 * locks. 647 */ 648 649 void 650 vm_map_lock(struct vm_map *map) 651 { 652 653 for (;;) { 654 rw_enter(&map->lock, RW_WRITER); 655 if (map->busy == NULL || map->busy == curlwp) { 656 break; 657 } 658 mutex_enter(&map->misc_lock); 659 rw_exit(&map->lock); 660 if (map->busy != NULL) { 661 cv_wait(&map->cv, &map->misc_lock); 662 } 663 mutex_exit(&map->misc_lock); 664 } 665 map->timestamp++; 666 } 667 668 /* 669 * vm_map_lock_try: try to lock a map, failing if it is already locked. 670 */ 671 672 bool 673 vm_map_lock_try(struct vm_map *map) 674 { 675 676 if (!rw_tryenter(&map->lock, RW_WRITER)) { 677 return false; 678 } 679 if (map->busy != NULL) { 680 rw_exit(&map->lock); 681 return false; 682 } 683 map->timestamp++; 684 return true; 685 } 686 687 /* 688 * vm_map_unlock: release an exclusive lock on a map. 689 */ 690 691 void 692 vm_map_unlock(struct vm_map *map) 693 { 694 695 KASSERT(rw_write_held(&map->lock)); 696 KASSERT(map->busy == NULL || map->busy == curlwp); 697 rw_exit(&map->lock); 698 } 699 700 /* 701 * vm_map_unbusy: mark the map as unbusy, and wake any waiters that 702 * want an exclusive lock. 703 */ 704 705 void 706 vm_map_unbusy(struct vm_map *map) 707 { 708 709 KASSERT(map->busy == curlwp); 710 711 /* 712 * Safe to clear 'busy' and 'waiters' with only a read lock held: 713 * 714 * o they can only be set with a write lock held 715 * o writers are blocked out with a read or write hold 716 * o at any time, only one thread owns the set of values 717 */ 718 mutex_enter(&map->misc_lock); 719 map->busy = NULL; 720 cv_broadcast(&map->cv); 721 mutex_exit(&map->misc_lock); 722 } 723 724 /* 725 * vm_map_lock_read: acquire a shared (read) lock on a map. 726 */ 727 728 void 729 vm_map_lock_read(struct vm_map *map) 730 { 731 732 rw_enter(&map->lock, RW_READER); 733 } 734 735 /* 736 * vm_map_unlock_read: release a shared lock on a map. 737 */ 738 739 void 740 vm_map_unlock_read(struct vm_map *map) 741 { 742 743 rw_exit(&map->lock); 744 } 745 746 /* 747 * vm_map_busy: mark a map as busy. 748 * 749 * => the caller must hold the map write locked 750 */ 751 752 void 753 vm_map_busy(struct vm_map *map) 754 { 755 756 KASSERT(rw_write_held(&map->lock)); 757 KASSERT(map->busy == NULL); 758 759 map->busy = curlwp; 760 } 761 762 /* 763 * vm_map_locked_p: return true if the map is write locked. 764 * 765 * => only for debug purposes like KASSERTs. 766 * => should not be used to verify that a map is not locked. 767 */ 768 769 bool 770 vm_map_locked_p(struct vm_map *map) 771 { 772 773 return rw_write_held(&map->lock); 774 } 775 776 /* 777 * uvm_mapent_alloc: allocate a map entry 778 */ 779 780 static struct vm_map_entry * 781 uvm_mapent_alloc(struct vm_map *map, int flags) 782 { 783 struct vm_map_entry *me; 784 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK; 785 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 786 787 me = pool_cache_get(&uvm_map_entry_cache, pflags); 788 if (__predict_false(me == NULL)) { 789 return NULL; 790 } 791 me->flags = 0; 792 793 UVMHIST_LOG(maphist, "<- new entry=%#jx [kentry=%jd]", (uintptr_t)me, 794 (map == kernel_map), 0, 0); 795 return me; 796 } 797 798 /* 799 * uvm_mapent_free: free map entry 800 */ 801 802 static void 803 uvm_mapent_free(struct vm_map_entry *me) 804 { 805 UVMHIST_FUNC(__func__); 806 UVMHIST_CALLARGS(maphist,"<- freeing map entry=%#jx [flags=%#jx]", 807 (uintptr_t)me, me->flags, 0, 0); 808 pool_cache_put(&uvm_map_entry_cache, me); 809 } 810 811 /* 812 * uvm_mapent_copy: copy a map entry, preserving flags 813 */ 814 815 static inline void 816 uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst) 817 { 818 819 memcpy(dst, src, sizeof(*dst)); 820 dst->flags = 0; 821 } 822 823 #if defined(DEBUG) 824 static void 825 _uvm_mapent_check(const struct vm_map_entry *entry, int line) 826 { 827 828 if (entry->start >= entry->end) { 829 goto bad; 830 } 831 if (UVM_ET_ISOBJ(entry)) { 832 if (entry->object.uvm_obj == NULL) { 833 goto bad; 834 } 835 } else if (UVM_ET_ISSUBMAP(entry)) { 836 if (entry->object.sub_map == NULL) { 837 goto bad; 838 } 839 } else { 840 if (entry->object.uvm_obj != NULL || 841 entry->object.sub_map != NULL) { 842 goto bad; 843 } 844 } 845 if (!UVM_ET_ISOBJ(entry)) { 846 if (entry->offset != 0) { 847 goto bad; 848 } 849 } 850 851 return; 852 853 bad: 854 panic("%s: bad entry %p, line %d", __func__, entry, line); 855 } 856 #endif /* defined(DEBUG) */ 857 858 /* 859 * uvm_map_entry_unwire: unwire a map entry 860 * 861 * => map should be locked by caller 862 */ 863 864 static inline void 865 uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry) 866 { 867 868 entry->wired_count = 0; 869 uvm_fault_unwire_locked(map, entry->start, entry->end); 870 } 871 872 873 /* 874 * wrapper for calling amap_ref() 875 */ 876 static inline void 877 uvm_map_reference_amap(struct vm_map_entry *entry, int flags) 878 { 879 880 amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff, 881 (entry->end - entry->start) >> PAGE_SHIFT, flags); 882 } 883 884 885 /* 886 * wrapper for calling amap_unref() 887 */ 888 static inline void 889 uvm_map_unreference_amap(struct vm_map_entry *entry, int flags) 890 { 891 892 amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff, 893 (entry->end - entry->start) >> PAGE_SHIFT, flags); 894 } 895 896 897 /* 898 * uvm_map_init: init mapping system at boot time. 899 */ 900 901 void 902 uvm_map_init(void) 903 { 904 /* 905 * first, init logging system. 906 */ 907 908 UVMHIST_FUNC(__func__); 909 UVMHIST_LINK_STATIC(maphist); 910 UVMHIST_LINK_STATIC(pdhist); 911 UVMHIST_CALLED(maphist); 912 UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0); 913 914 /* 915 * initialize the global lock for kernel map entry. 916 */ 917 918 mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM); 919 } 920 921 /* 922 * uvm_map_init_caches: init mapping system caches. 923 */ 924 void 925 uvm_map_init_caches(void) 926 { 927 /* 928 * initialize caches. 929 */ 930 931 pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry), 932 coherency_unit, 0, PR_LARGECACHE, "vmmpepl", NULL, IPL_NONE, NULL, 933 NULL, NULL); 934 pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace), 935 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL); 936 } 937 938 /* 939 * clippers 940 */ 941 942 /* 943 * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy. 944 */ 945 946 static void 947 uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2, 948 vaddr_t splitat) 949 { 950 vaddr_t adj; 951 952 KASSERT(entry1->start < splitat); 953 KASSERT(splitat < entry1->end); 954 955 adj = splitat - entry1->start; 956 entry1->end = entry2->start = splitat; 957 958 if (entry1->aref.ar_amap) { 959 amap_splitref(&entry1->aref, &entry2->aref, adj); 960 } 961 if (UVM_ET_ISSUBMAP(entry1)) { 962 /* ... unlikely to happen, but play it safe */ 963 uvm_map_reference(entry1->object.sub_map); 964 } else if (UVM_ET_ISOBJ(entry1)) { 965 KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */ 966 entry2->offset += adj; 967 if (entry1->object.uvm_obj->pgops && 968 entry1->object.uvm_obj->pgops->pgo_reference) 969 entry1->object.uvm_obj->pgops->pgo_reference( 970 entry1->object.uvm_obj); 971 } 972 } 973 974 /* 975 * uvm_map_clip_start: ensure that the entry begins at or after 976 * the starting address, if it doesn't we split the entry. 977 * 978 * => caller should use UVM_MAP_CLIP_START macro rather than calling 979 * this directly 980 * => map must be locked by caller 981 */ 982 983 void 984 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry, 985 vaddr_t start) 986 { 987 struct vm_map_entry *new_entry; 988 989 /* uvm_map_simplify_entry(map, entry); */ /* XXX */ 990 991 uvm_map_check(map, "clip_start entry"); 992 uvm_mapent_check(entry); 993 994 /* 995 * Split off the front portion. note that we must insert the new 996 * entry BEFORE this one, so that this entry has the specified 997 * starting address. 998 */ 999 new_entry = uvm_mapent_alloc(map, 0); 1000 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ 1001 uvm_mapent_splitadj(new_entry, entry, start); 1002 uvm_map_entry_link(map, entry->prev, new_entry); 1003 1004 uvm_map_check(map, "clip_start leave"); 1005 } 1006 1007 /* 1008 * uvm_map_clip_end: ensure that the entry ends at or before 1009 * the ending address, if it does't we split the reference 1010 * 1011 * => caller should use UVM_MAP_CLIP_END macro rather than calling 1012 * this directly 1013 * => map must be locked by caller 1014 */ 1015 1016 void 1017 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end) 1018 { 1019 struct vm_map_entry *new_entry; 1020 1021 uvm_map_check(map, "clip_end entry"); 1022 uvm_mapent_check(entry); 1023 1024 /* 1025 * Create a new entry and insert it 1026 * AFTER the specified entry 1027 */ 1028 new_entry = uvm_mapent_alloc(map, 0); 1029 uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ 1030 uvm_mapent_splitadj(entry, new_entry, end); 1031 uvm_map_entry_link(map, entry, new_entry); 1032 1033 uvm_map_check(map, "clip_end leave"); 1034 } 1035 1036 /* 1037 * M A P - m a i n e n t r y p o i n t 1038 */ 1039 /* 1040 * uvm_map: establish a valid mapping in a map 1041 * 1042 * => assume startp is page aligned. 1043 * => assume size is a multiple of PAGE_SIZE. 1044 * => assume sys_mmap provides enough of a "hint" to have us skip 1045 * over text/data/bss area. 1046 * => map must be unlocked (we will lock it) 1047 * => <uobj,uoffset> value meanings (4 cases): 1048 * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER 1049 * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER 1050 * [3] <uobj,uoffset> == normal mapping 1051 * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA 1052 * 1053 * case [4] is for kernel mappings where we don't know the offset until 1054 * we've found a virtual address. note that kernel object offsets are 1055 * always relative to vm_map_min(kernel_map). 1056 * 1057 * => if `align' is non-zero, we align the virtual address to the specified 1058 * alignment. 1059 * this is provided as a mechanism for large pages. 1060 * 1061 * => XXXCDC: need way to map in external amap? 1062 */ 1063 1064 int 1065 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size, 1066 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags) 1067 { 1068 struct uvm_map_args args; 1069 struct vm_map_entry *new_entry; 1070 int error; 1071 1072 KASSERT((size & PAGE_MASK) == 0); 1073 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0); 1074 1075 /* 1076 * for pager_map, allocate the new entry first to avoid sleeping 1077 * for memory while we have the map locked. 1078 */ 1079 1080 new_entry = NULL; 1081 if (map == pager_map) { 1082 new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT)); 1083 if (__predict_false(new_entry == NULL)) 1084 return ENOMEM; 1085 } 1086 if (map == pager_map) 1087 flags |= UVM_FLAG_NOMERGE; 1088 1089 error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align, 1090 flags, &args); 1091 if (!error) { 1092 error = uvm_map_enter(map, &args, new_entry); 1093 *startp = args.uma_start; 1094 } else if (new_entry) { 1095 uvm_mapent_free(new_entry); 1096 } 1097 1098 #if defined(DEBUG) 1099 if (!error && VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) { 1100 uvm_km_check_empty(map, *startp, *startp + size); 1101 } 1102 #endif /* defined(DEBUG) */ 1103 1104 return error; 1105 } 1106 1107 /* 1108 * uvm_map_prepare: 1109 * 1110 * called with map unlocked. 1111 * on success, returns the map locked. 1112 */ 1113 1114 int 1115 uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size, 1116 struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags, 1117 struct uvm_map_args *args) 1118 { 1119 struct vm_map_entry *prev_entry; 1120 vm_prot_t prot = UVM_PROTECTION(flags); 1121 vm_prot_t maxprot = UVM_MAXPROTECTION(flags); 1122 1123 UVMHIST_FUNC(__func__); 1124 UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%jx, flags=%#jx)", 1125 (uintptr_t)map, start, size, flags); 1126 UVMHIST_LOG(maphist, " uobj/offset %#jx/%jd", (uintptr_t)uobj, 1127 uoffset,0,0); 1128 1129 /* 1130 * detect a popular device driver bug. 1131 */ 1132 1133 KASSERT(doing_shutdown || curlwp != NULL); 1134 1135 /* 1136 * zero-sized mapping doesn't make any sense. 1137 */ 1138 KASSERT(size > 0); 1139 1140 KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0); 1141 1142 uvm_map_check(map, "map entry"); 1143 1144 /* 1145 * check sanity of protection code 1146 */ 1147 1148 if ((prot & maxprot) != prot) { 1149 UVMHIST_LOG(maphist, "<- prot. failure: prot=%#jx, max=%#jx", 1150 prot, maxprot,0,0); 1151 return EACCES; 1152 } 1153 1154 /* 1155 * figure out where to put new VM range 1156 */ 1157 retry: 1158 if (vm_map_lock_try(map) == false) { 1159 if ((flags & UVM_FLAG_TRYLOCK) != 0) { 1160 return EAGAIN; 1161 } 1162 vm_map_lock(map); /* could sleep here */ 1163 } 1164 if (flags & UVM_FLAG_UNMAP) { 1165 KASSERT(flags & UVM_FLAG_FIXED); 1166 KASSERT((flags & UVM_FLAG_NOWAIT) == 0); 1167 1168 /* 1169 * Set prev_entry to what it will need to be after any existing 1170 * entries are removed later in uvm_map_enter(). 1171 */ 1172 1173 if (uvm_map_lookup_entry(map, start, &prev_entry)) { 1174 if (start == prev_entry->start) 1175 prev_entry = prev_entry->prev; 1176 else 1177 UVM_MAP_CLIP_END(map, prev_entry, start); 1178 SAVE_HINT(map, map->hint, prev_entry); 1179 } 1180 } else { 1181 prev_entry = uvm_map_findspace(map, start, size, &start, 1182 uobj, uoffset, align, flags); 1183 } 1184 if (prev_entry == NULL) { 1185 unsigned int timestamp; 1186 1187 timestamp = map->timestamp; 1188 UVMHIST_LOG(maphist,"waiting va timestamp=%#jx", 1189 timestamp,0,0,0); 1190 map->flags |= VM_MAP_WANTVA; 1191 vm_map_unlock(map); 1192 1193 /* 1194 * try to reclaim kva and wait until someone does unmap. 1195 * fragile locking here, so we awaken every second to 1196 * recheck the condition. 1197 */ 1198 1199 mutex_enter(&map->misc_lock); 1200 while ((map->flags & VM_MAP_WANTVA) != 0 && 1201 map->timestamp == timestamp) { 1202 if ((flags & UVM_FLAG_WAITVA) == 0) { 1203 mutex_exit(&map->misc_lock); 1204 UVMHIST_LOG(maphist, 1205 "<- uvm_map_findspace failed!", 0,0,0,0); 1206 return ENOMEM; 1207 } else { 1208 cv_timedwait(&map->cv, &map->misc_lock, hz); 1209 } 1210 } 1211 mutex_exit(&map->misc_lock); 1212 goto retry; 1213 } 1214 1215 #ifdef PMAP_GROWKERNEL 1216 /* 1217 * If the kernel pmap can't map the requested space, 1218 * then allocate more resources for it. 1219 */ 1220 if (map == kernel_map && uvm_maxkaddr < (start + size)) 1221 uvm_maxkaddr = pmap_growkernel(start + size); 1222 #endif 1223 1224 UVMMAP_EVCNT_INCR(map_call); 1225 1226 /* 1227 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER 1228 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in 1229 * either case we want to zero it before storing it in the map entry 1230 * (because it looks strange and confusing when debugging...) 1231 * 1232 * if uobj is not null 1233 * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping 1234 * and we do not need to change uoffset. 1235 * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset 1236 * now (based on the starting address of the map). this case is 1237 * for kernel object mappings where we don't know the offset until 1238 * the virtual address is found (with uvm_map_findspace). the 1239 * offset is the distance we are from the start of the map. 1240 */ 1241 1242 if (uobj == NULL) { 1243 uoffset = 0; 1244 } else { 1245 if (uoffset == UVM_UNKNOWN_OFFSET) { 1246 KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj)); 1247 uoffset = start - vm_map_min(kernel_map); 1248 } 1249 } 1250 1251 args->uma_flags = flags; 1252 args->uma_prev = prev_entry; 1253 args->uma_start = start; 1254 args->uma_size = size; 1255 args->uma_uobj = uobj; 1256 args->uma_uoffset = uoffset; 1257 1258 UVMHIST_LOG(maphist, "<- done!", 0,0,0,0); 1259 return 0; 1260 } 1261 1262 /* 1263 * uvm_map_enter: 1264 * 1265 * called with map locked. 1266 * unlock the map before returning. 1267 */ 1268 1269 int 1270 uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args, 1271 struct vm_map_entry *new_entry) 1272 { 1273 struct vm_map_entry *prev_entry = args->uma_prev; 1274 struct vm_map_entry *dead = NULL, *dead_entries = NULL; 1275 1276 const uvm_flag_t flags = args->uma_flags; 1277 const vm_prot_t prot = UVM_PROTECTION(flags); 1278 const vm_prot_t maxprot = UVM_MAXPROTECTION(flags); 1279 const vm_inherit_t inherit = UVM_INHERIT(flags); 1280 const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ? 1281 AMAP_EXTEND_NOWAIT : 0; 1282 const int advice = UVM_ADVICE(flags); 1283 1284 vaddr_t start = args->uma_start; 1285 vsize_t size = args->uma_size; 1286 struct uvm_object *uobj = args->uma_uobj; 1287 voff_t uoffset = args->uma_uoffset; 1288 1289 const int kmap = (vm_map_pmap(map) == pmap_kernel()); 1290 int merged = 0; 1291 int error; 1292 int newetype; 1293 1294 UVMHIST_FUNC(__func__); 1295 UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%ju, flags=%#jx)", 1296 (uintptr_t)map, start, size, flags); 1297 UVMHIST_LOG(maphist, " uobj/offset %#jx/%jd", (uintptr_t)uobj, 1298 uoffset,0,0); 1299 1300 KASSERT(map->hint == prev_entry); /* bimerge case assumes this */ 1301 KASSERT(vm_map_locked_p(map)); 1302 KASSERT((flags & (UVM_FLAG_NOWAIT | UVM_FLAG_UNMAP)) != 1303 (UVM_FLAG_NOWAIT | UVM_FLAG_UNMAP)); 1304 1305 if (uobj) 1306 newetype = UVM_ET_OBJ; 1307 else 1308 newetype = 0; 1309 1310 if (flags & UVM_FLAG_COPYONW) { 1311 newetype |= UVM_ET_COPYONWRITE; 1312 if ((flags & UVM_FLAG_OVERLAY) == 0) 1313 newetype |= UVM_ET_NEEDSCOPY; 1314 } 1315 1316 /* 1317 * For mappings with unmap, remove any old entries now. Adding the new 1318 * entry cannot fail because that can only happen if UVM_FLAG_NOWAIT 1319 * is set, and we do not support nowait and unmap together. 1320 */ 1321 1322 if (flags & UVM_FLAG_UNMAP) { 1323 KASSERT(flags & UVM_FLAG_FIXED); 1324 uvm_unmap_remove(map, start, start + size, &dead_entries, 0); 1325 #ifdef DEBUG 1326 struct vm_map_entry *tmp_entry __diagused; 1327 bool rv __diagused; 1328 1329 rv = uvm_map_lookup_entry(map, start, &tmp_entry); 1330 KASSERT(!rv); 1331 KASSERTMSG(prev_entry == tmp_entry, 1332 "args %p prev_entry %p tmp_entry %p", 1333 args, prev_entry, tmp_entry); 1334 #endif 1335 SAVE_HINT(map, map->hint, prev_entry); 1336 } 1337 1338 /* 1339 * try and insert in map by extending previous entry, if possible. 1340 * XXX: we don't try and pull back the next entry. might be useful 1341 * for a stack, but we are currently allocating our stack in advance. 1342 */ 1343 1344 if (flags & UVM_FLAG_NOMERGE) 1345 goto nomerge; 1346 1347 if (prev_entry->end == start && 1348 prev_entry != &map->header && 1349 UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, 0, 1350 prot, maxprot, inherit, advice, 0)) { 1351 1352 if (uobj && prev_entry->offset + 1353 (prev_entry->end - prev_entry->start) != uoffset) 1354 goto forwardmerge; 1355 1356 /* 1357 * can't extend a shared amap. note: no need to lock amap to 1358 * look at refs since we don't care about its exact value. 1359 * if it is one (i.e. we have only reference) it will stay there 1360 */ 1361 1362 if (prev_entry->aref.ar_amap && 1363 amap_refs(prev_entry->aref.ar_amap) != 1) { 1364 goto forwardmerge; 1365 } 1366 1367 if (prev_entry->aref.ar_amap) { 1368 error = amap_extend(prev_entry, size, 1369 amapwaitflag | AMAP_EXTEND_FORWARDS); 1370 if (error) 1371 goto nomerge; 1372 } 1373 1374 if (kmap) { 1375 UVMMAP_EVCNT_INCR(kbackmerge); 1376 } else { 1377 UVMMAP_EVCNT_INCR(ubackmerge); 1378 } 1379 UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0); 1380 1381 /* 1382 * drop our reference to uobj since we are extending a reference 1383 * that we already have (the ref count can not drop to zero). 1384 */ 1385 1386 if (uobj && uobj->pgops->pgo_detach) 1387 uobj->pgops->pgo_detach(uobj); 1388 1389 /* 1390 * Now that we've merged the entries, note that we've grown 1391 * and our gap has shrunk. Then fix the tree. 1392 */ 1393 prev_entry->end += size; 1394 prev_entry->gap -= size; 1395 uvm_rb_fixup(map, prev_entry); 1396 1397 uvm_map_check(map, "map backmerged"); 1398 1399 UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0); 1400 merged++; 1401 } 1402 1403 forwardmerge: 1404 if (prev_entry->next->start == (start + size) && 1405 prev_entry->next != &map->header && 1406 UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, 0, 1407 prot, maxprot, inherit, advice, 0)) { 1408 1409 if (uobj && prev_entry->next->offset != uoffset + size) 1410 goto nomerge; 1411 1412 /* 1413 * can't extend a shared amap. note: no need to lock amap to 1414 * look at refs since we don't care about its exact value. 1415 * if it is one (i.e. we have only reference) it will stay there. 1416 * 1417 * note that we also can't merge two amaps, so if we 1418 * merged with the previous entry which has an amap, 1419 * and the next entry also has an amap, we give up. 1420 * 1421 * Interesting cases: 1422 * amap, new, amap -> give up second merge (single fwd extend) 1423 * amap, new, none -> double forward extend (extend again here) 1424 * none, new, amap -> double backward extend (done here) 1425 * uobj, new, amap -> single backward extend (done here) 1426 * 1427 * XXX should we attempt to deal with someone refilling 1428 * the deallocated region between two entries that are 1429 * backed by the same amap (ie, arefs is 2, "prev" and 1430 * "next" refer to it, and adding this allocation will 1431 * close the hole, thus restoring arefs to 1 and 1432 * deallocating the "next" vm_map_entry)? -- @@@ 1433 */ 1434 1435 if (prev_entry->next->aref.ar_amap && 1436 (amap_refs(prev_entry->next->aref.ar_amap) != 1 || 1437 (merged && prev_entry->aref.ar_amap))) { 1438 goto nomerge; 1439 } 1440 1441 if (merged) { 1442 /* 1443 * Try to extend the amap of the previous entry to 1444 * cover the next entry as well. If it doesn't work 1445 * just skip on, don't actually give up, since we've 1446 * already completed the back merge. 1447 */ 1448 if (prev_entry->aref.ar_amap) { 1449 if (amap_extend(prev_entry, 1450 prev_entry->next->end - 1451 prev_entry->next->start, 1452 amapwaitflag | AMAP_EXTEND_FORWARDS)) 1453 goto nomerge; 1454 } 1455 1456 /* 1457 * Try to extend the amap of the *next* entry 1458 * back to cover the new allocation *and* the 1459 * previous entry as well (the previous merge 1460 * didn't have an amap already otherwise we 1461 * wouldn't be checking here for an amap). If 1462 * it doesn't work just skip on, again, don't 1463 * actually give up, since we've already 1464 * completed the back merge. 1465 */ 1466 else if (prev_entry->next->aref.ar_amap) { 1467 if (amap_extend(prev_entry->next, 1468 prev_entry->end - 1469 prev_entry->start, 1470 amapwaitflag | AMAP_EXTEND_BACKWARDS)) 1471 goto nomerge; 1472 } 1473 } else { 1474 /* 1475 * Pull the next entry's amap backwards to cover this 1476 * new allocation. 1477 */ 1478 if (prev_entry->next->aref.ar_amap) { 1479 error = amap_extend(prev_entry->next, size, 1480 amapwaitflag | AMAP_EXTEND_BACKWARDS); 1481 if (error) 1482 goto nomerge; 1483 } 1484 } 1485 1486 if (merged) { 1487 if (kmap) { 1488 UVMMAP_EVCNT_DECR(kbackmerge); 1489 UVMMAP_EVCNT_INCR(kbimerge); 1490 } else { 1491 UVMMAP_EVCNT_DECR(ubackmerge); 1492 UVMMAP_EVCNT_INCR(ubimerge); 1493 } 1494 } else { 1495 if (kmap) { 1496 UVMMAP_EVCNT_INCR(kforwmerge); 1497 } else { 1498 UVMMAP_EVCNT_INCR(uforwmerge); 1499 } 1500 } 1501 UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0); 1502 1503 /* 1504 * drop our reference to uobj since we are extending a reference 1505 * that we already have (the ref count can not drop to zero). 1506 */ 1507 if (uobj && uobj->pgops->pgo_detach) 1508 uobj->pgops->pgo_detach(uobj); 1509 1510 if (merged) { 1511 dead = prev_entry->next; 1512 prev_entry->end = dead->end; 1513 uvm_map_entry_unlink(map, dead); 1514 if (dead->aref.ar_amap != NULL) { 1515 prev_entry->aref = dead->aref; 1516 dead->aref.ar_amap = NULL; 1517 } 1518 } else { 1519 prev_entry->next->start -= size; 1520 if (prev_entry != &map->header) { 1521 prev_entry->gap -= size; 1522 KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry)); 1523 uvm_rb_fixup(map, prev_entry); 1524 } 1525 if (uobj) 1526 prev_entry->next->offset = uoffset; 1527 } 1528 1529 uvm_map_check(map, "map forwardmerged"); 1530 1531 UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0); 1532 merged++; 1533 } 1534 1535 nomerge: 1536 if (!merged) { 1537 UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0); 1538 if (kmap) { 1539 UVMMAP_EVCNT_INCR(knomerge); 1540 } else { 1541 UVMMAP_EVCNT_INCR(unomerge); 1542 } 1543 1544 /* 1545 * allocate new entry and link it in. 1546 */ 1547 1548 if (new_entry == NULL) { 1549 new_entry = uvm_mapent_alloc(map, 1550 (flags & UVM_FLAG_NOWAIT)); 1551 if (__predict_false(new_entry == NULL)) { 1552 error = ENOMEM; 1553 goto done; 1554 } 1555 } 1556 new_entry->start = start; 1557 new_entry->end = new_entry->start + size; 1558 new_entry->object.uvm_obj = uobj; 1559 new_entry->offset = uoffset; 1560 1561 new_entry->etype = newetype; 1562 1563 if (flags & UVM_FLAG_NOMERGE) { 1564 new_entry->flags |= UVM_MAP_NOMERGE; 1565 } 1566 1567 new_entry->protection = prot; 1568 new_entry->max_protection = maxprot; 1569 new_entry->inheritance = inherit; 1570 new_entry->wired_count = 0; 1571 new_entry->advice = advice; 1572 if (flags & UVM_FLAG_OVERLAY) { 1573 1574 /* 1575 * to_add: for BSS we overallocate a little since we 1576 * are likely to extend 1577 */ 1578 1579 vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ? 1580 UVM_AMAP_CHUNK << PAGE_SHIFT : 0; 1581 struct vm_amap *amap = amap_alloc(size, to_add, 1582 (flags & UVM_FLAG_NOWAIT)); 1583 if (__predict_false(amap == NULL)) { 1584 error = ENOMEM; 1585 goto done; 1586 } 1587 new_entry->aref.ar_pageoff = 0; 1588 new_entry->aref.ar_amap = amap; 1589 } else { 1590 new_entry->aref.ar_pageoff = 0; 1591 new_entry->aref.ar_amap = NULL; 1592 } 1593 uvm_map_entry_link(map, prev_entry, new_entry); 1594 1595 /* 1596 * Update the free space hint 1597 */ 1598 1599 if ((map->first_free == prev_entry) && 1600 (prev_entry->end >= new_entry->start)) 1601 map->first_free = new_entry; 1602 1603 new_entry = NULL; 1604 } 1605 1606 map->size += size; 1607 1608 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0); 1609 1610 error = 0; 1611 1612 done: 1613 vm_map_unlock(map); 1614 1615 if (new_entry) { 1616 uvm_mapent_free(new_entry); 1617 } 1618 if (dead) { 1619 KDASSERT(merged); 1620 uvm_mapent_free(dead); 1621 } 1622 if (dead_entries) 1623 uvm_unmap_detach(dead_entries, 0); 1624 1625 return error; 1626 } 1627 1628 /* 1629 * uvm_map_lookup_entry_bytree: lookup an entry in tree 1630 */ 1631 1632 static inline bool 1633 uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address, 1634 struct vm_map_entry **entry /* OUT */) 1635 { 1636 struct vm_map_entry *prev = &map->header; 1637 struct vm_map_entry *cur = ROOT_ENTRY(map); 1638 1639 while (cur) { 1640 UVMMAP_EVCNT_INCR(mlk_treeloop); 1641 if (address >= cur->start) { 1642 if (address < cur->end) { 1643 *entry = cur; 1644 return true; 1645 } 1646 prev = cur; 1647 cur = RIGHT_ENTRY(cur); 1648 } else 1649 cur = LEFT_ENTRY(cur); 1650 } 1651 *entry = prev; 1652 return false; 1653 } 1654 1655 /* 1656 * uvm_map_lookup_entry: find map entry at or before an address 1657 * 1658 * => map must at least be read-locked by caller 1659 * => entry is returned in "entry" 1660 * => return value is true if address is in the returned entry 1661 */ 1662 1663 bool 1664 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address, 1665 struct vm_map_entry **entry /* OUT */) 1666 { 1667 struct vm_map_entry *cur; 1668 UVMHIST_FUNC(__func__); 1669 UVMHIST_CALLARGS(maphist,"(map=%#jx,addr=%#jx,ent=%#jx)", 1670 (uintptr_t)map, address, (uintptr_t)entry, 0); 1671 1672 /* 1673 * make a quick check to see if we are already looking at 1674 * the entry we want (which is usually the case). note also 1675 * that we don't need to save the hint here... it is the 1676 * same hint (unless we are at the header, in which case the 1677 * hint didn't buy us anything anyway). 1678 */ 1679 1680 cur = map->hint; 1681 UVMMAP_EVCNT_INCR(mlk_call); 1682 if (cur != &map->header && 1683 address >= cur->start && cur->end > address) { 1684 UVMMAP_EVCNT_INCR(mlk_hint); 1685 *entry = cur; 1686 UVMHIST_LOG(maphist,"<- got it via hint (%#jx)", 1687 (uintptr_t)cur, 0, 0, 0); 1688 uvm_mapent_check(*entry); 1689 return (true); 1690 } 1691 uvm_map_check(map, __func__); 1692 1693 /* 1694 * lookup in the tree. 1695 */ 1696 1697 UVMMAP_EVCNT_INCR(mlk_tree); 1698 if (__predict_true(uvm_map_lookup_entry_bytree(map, address, entry))) { 1699 SAVE_HINT(map, map->hint, *entry); 1700 UVMHIST_LOG(maphist,"<- search got it (%#jx)", 1701 (uintptr_t)cur, 0, 0, 0); 1702 KDASSERT((*entry)->start <= address); 1703 KDASSERT(address < (*entry)->end); 1704 uvm_mapent_check(*entry); 1705 return (true); 1706 } 1707 1708 SAVE_HINT(map, map->hint, *entry); 1709 UVMHIST_LOG(maphist,"<- failed!",0,0,0,0); 1710 KDASSERT((*entry) == &map->header || (*entry)->end <= address); 1711 KDASSERT((*entry)->next == &map->header || 1712 address < (*entry)->next->start); 1713 return (false); 1714 } 1715 1716 /* 1717 * See if the range between start and start + length fits in the gap 1718 * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't 1719 * fit, and -1 address wraps around. 1720 */ 1721 static int 1722 uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset, 1723 vsize_t align, int flags, int topdown, struct vm_map_entry *entry) 1724 { 1725 vaddr_t end; 1726 1727 #ifdef PMAP_PREFER 1728 /* 1729 * push start address forward as needed to avoid VAC alias problems. 1730 * we only do this if a valid offset is specified. 1731 */ 1732 1733 if (uoffset != UVM_UNKNOWN_OFFSET) 1734 PMAP_PREFER(uoffset, start, length, topdown); 1735 #endif 1736 if ((flags & UVM_FLAG_COLORMATCH) != 0) { 1737 KASSERT(align < uvmexp.ncolors); 1738 if (uvmexp.ncolors > 1) { 1739 const u_int colormask = uvmexp.colormask; 1740 const u_int colorsize = colormask + 1; 1741 vaddr_t hint = atop(*start); 1742 const u_int color = hint & colormask; 1743 if (color != align) { 1744 hint -= color; /* adjust to color boundary */ 1745 KASSERT((hint & colormask) == 0); 1746 if (topdown) { 1747 if (align > color) 1748 hint -= colorsize; 1749 } else { 1750 if (align < color) 1751 hint += colorsize; 1752 } 1753 *start = ptoa(hint + align); /* adjust to color */ 1754 } 1755 } 1756 } else { 1757 KASSERT(powerof2(align)); 1758 uvm_map_align_va(start, align, topdown); 1759 /* 1760 * XXX Should we PMAP_PREFER() here again? 1761 * eh...i think we're okay 1762 */ 1763 } 1764 1765 /* 1766 * Find the end of the proposed new region. Be sure we didn't 1767 * wrap around the address; if so, we lose. Otherwise, if the 1768 * proposed new region fits before the next entry, we win. 1769 */ 1770 1771 end = *start + length; 1772 if (end < *start) 1773 return (-1); 1774 1775 if (entry->next->start >= end && *start >= entry->end) 1776 return (1); 1777 1778 return (0); 1779 } 1780 1781 static void 1782 uvm_findspace_invariants(struct vm_map *map, vaddr_t orig_hint, vaddr_t length, 1783 struct uvm_object *uobj, voff_t uoffset, vsize_t align, int flags, 1784 vaddr_t hint, struct vm_map_entry *entry, int line) 1785 { 1786 const int topdown = map->flags & VM_MAP_TOPDOWN; 1787 1788 KASSERTMSG( topdown || hint >= orig_hint, 1789 "map=%p hint=%#"PRIxVADDR" orig_hint=%#"PRIxVADDR 1790 " length=%#"PRIxVSIZE" uobj=%p uoffset=%#llx align=%"PRIxVSIZE 1791 " flags=%#x entry=%p (uvm_map_findspace line %d)", 1792 map, hint, orig_hint, 1793 length, uobj, (unsigned long long)uoffset, align, 1794 flags, entry, line); 1795 KASSERTMSG(!topdown || hint <= orig_hint, 1796 "map=%p hint=%#"PRIxVADDR" orig_hint=%#"PRIxVADDR 1797 " length=%#"PRIxVSIZE" uobj=%p uoffset=%#llx align=%"PRIxVSIZE 1798 " flags=%#x entry=%p (uvm_map_findspace line %d)", 1799 map, hint, orig_hint, 1800 length, uobj, (unsigned long long)uoffset, align, 1801 flags, entry, line); 1802 } 1803 1804 /* 1805 * uvm_map_findspace: find "length" sized space in "map". 1806 * 1807 * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is 1808 * set in "flags" (in which case we insist on using "hint"). 1809 * => "result" is VA returned 1810 * => uobj/uoffset are to be used to handle VAC alignment, if required 1811 * => if "align" is non-zero, we attempt to align to that value. 1812 * => caller must at least have read-locked map 1813 * => returns NULL on failure, or pointer to prev. map entry if success 1814 * => note this is a cross between the old vm_map_findspace and vm_map_find 1815 */ 1816 1817 struct vm_map_entry * 1818 uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length, 1819 vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset, 1820 vsize_t align, int flags) 1821 { 1822 #define INVARIANTS() \ 1823 uvm_findspace_invariants(map, orig_hint, length, uobj, uoffset, align,\ 1824 flags, hint, entry, __LINE__) 1825 struct vm_map_entry *entry = NULL; 1826 struct vm_map_entry *child, *prev, *tmp; 1827 vaddr_t orig_hint __diagused; 1828 const int topdown = map->flags & VM_MAP_TOPDOWN; 1829 int avail; 1830 UVMHIST_FUNC(__func__); 1831 UVMHIST_CALLARGS(maphist, "(map=%#jx, hint=%#jx, len=%ju, flags=%#jx...", 1832 (uintptr_t)map, hint, length, flags); 1833 UVMHIST_LOG(maphist, " uobj=%#jx, uoffset=%#jx, align=%#jx)", 1834 (uintptr_t)uobj, uoffset, align, 0); 1835 1836 KASSERT((flags & UVM_FLAG_COLORMATCH) != 0 || powerof2(align)); 1837 KASSERT((flags & UVM_FLAG_COLORMATCH) == 0 || align < uvmexp.ncolors); 1838 KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0); 1839 1840 uvm_map_check(map, "map_findspace entry"); 1841 1842 /* 1843 * Clamp the hint to the VM map's min/max address, and remmeber 1844 * the clamped original hint. Remember the original hint, 1845 * clamped to the min/max address. If we are aligning, then we 1846 * may have to try again with no alignment constraint if we 1847 * fail the first time. 1848 * 1849 * We use the original hint to verify later that the search has 1850 * been monotonic -- that is, nonincreasing or nondecreasing, 1851 * according to topdown or !topdown respectively. But the 1852 * clamping is not monotonic. 1853 */ 1854 if (hint < vm_map_min(map)) { /* check ranges ... */ 1855 if (flags & UVM_FLAG_FIXED) { 1856 UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0); 1857 return (NULL); 1858 } 1859 hint = vm_map_min(map); 1860 } 1861 if (hint > vm_map_max(map)) { 1862 UVMHIST_LOG(maphist,"<- VA %#jx > range [%#jx->%#jx]", 1863 hint, vm_map_min(map), vm_map_max(map), 0); 1864 return (NULL); 1865 } 1866 orig_hint = hint; 1867 INVARIANTS(); 1868 1869 UVMHIST_LOG(maphist,"<- VA %#jx vs range [%#jx->%#jx]", 1870 hint, vm_map_min(map), vm_map_max(map), 0); 1871 1872 /* 1873 * hint may not be aligned properly; we need round up or down it 1874 * before proceeding further. 1875 */ 1876 if ((flags & UVM_FLAG_COLORMATCH) == 0) { 1877 uvm_map_align_va(&hint, align, topdown); 1878 INVARIANTS(); 1879 } 1880 1881 UVMHIST_LOG(maphist,"<- VA %#jx vs range [%#jx->%#jx]", 1882 hint, vm_map_min(map), vm_map_max(map), 0); 1883 /* 1884 * Look for the first possible address; if there's already 1885 * something at this address, we have to start after it. 1886 */ 1887 1888 /* 1889 * @@@: there are four, no, eight cases to consider. 1890 * 1891 * 0: found, fixed, bottom up -> fail 1892 * 1: found, fixed, top down -> fail 1893 * 2: found, not fixed, bottom up -> start after entry->end, 1894 * loop up 1895 * 3: found, not fixed, top down -> start before entry->start, 1896 * loop down 1897 * 4: not found, fixed, bottom up -> check entry->next->start, fail 1898 * 5: not found, fixed, top down -> check entry->next->start, fail 1899 * 6: not found, not fixed, bottom up -> check entry->next->start, 1900 * loop up 1901 * 7: not found, not fixed, top down -> check entry->next->start, 1902 * loop down 1903 * 1904 * as you can see, it reduces to roughly five cases, and that 1905 * adding top down mapping only adds one unique case (without 1906 * it, there would be four cases). 1907 */ 1908 1909 if ((flags & UVM_FLAG_FIXED) == 0 && 1910 hint == (topdown ? vm_map_max(map) : vm_map_min(map))) { 1911 /* 1912 * The uvm_map_findspace algorithm is monotonic -- for 1913 * topdown VM it starts with a high hint and returns a 1914 * lower free address; for !topdown VM it starts with a 1915 * low hint and returns a higher free address. As an 1916 * optimization, start with the first (highest for 1917 * topdown, lowest for !topdown) free address. 1918 * 1919 * XXX This `optimization' probably doesn't actually do 1920 * much in practice unless userland explicitly passes 1921 * the VM map's minimum or maximum address, which 1922 * varies from machine to machine (VM_MAX/MIN_ADDRESS, 1923 * e.g. 0x7fbfdfeff000 on amd64 but 0xfffffffff000 on 1924 * aarch64) and may vary according to other factors 1925 * like sysctl vm.user_va0_disable. In particular, if 1926 * the user specifies 0 as a hint to mmap, then mmap 1927 * will choose a default address which is usually _not_ 1928 * VM_MAX/MIN_ADDRESS but something else instead like 1929 * VM_MAX_ADDRESS - stack size - guard page overhead, 1930 * in which case this branch is never hit. 1931 * 1932 * In fact, this branch appears to have been broken for 1933 * two decades between when topdown was introduced in 1934 * ~2003 and when it was adapted to handle the topdown 1935 * case without violating the monotonicity assertion in 1936 * 2022. Maybe Someone^TM should either ditch the 1937 * optimization or find a better way to do it. 1938 */ 1939 entry = map->first_free; 1940 } else { 1941 if (uvm_map_lookup_entry(map, hint, &entry)) { 1942 /* "hint" address already in use ... */ 1943 if (flags & UVM_FLAG_FIXED) { 1944 UVMHIST_LOG(maphist, "<- fixed & VA in use", 1945 0, 0, 0, 0); 1946 return (NULL); 1947 } 1948 if (topdown) 1949 /* Start from lower gap. */ 1950 entry = entry->prev; 1951 } else if (flags & UVM_FLAG_FIXED) { 1952 if (entry->next->start >= hint + length && 1953 hint + length > hint) 1954 goto found; 1955 1956 /* "hint" address is gap but too small */ 1957 UVMHIST_LOG(maphist, "<- fixed mapping failed", 1958 0, 0, 0, 0); 1959 return (NULL); /* only one shot at it ... */ 1960 } else { 1961 /* 1962 * See if given hint fits in this gap. 1963 */ 1964 avail = uvm_map_space_avail(&hint, length, 1965 uoffset, align, flags, topdown, entry); 1966 INVARIANTS(); 1967 switch (avail) { 1968 case 1: 1969 goto found; 1970 case -1: 1971 goto wraparound; 1972 } 1973 1974 if (topdown) { 1975 /* 1976 * Still there is a chance to fit 1977 * if hint > entry->end. 1978 */ 1979 } else { 1980 /* Start from higher gap. */ 1981 entry = entry->next; 1982 if (entry == &map->header) 1983 goto notfound; 1984 goto nextgap; 1985 } 1986 } 1987 } 1988 1989 /* 1990 * Note that all UVM_FLAGS_FIXED case is already handled. 1991 */ 1992 KDASSERT((flags & UVM_FLAG_FIXED) == 0); 1993 1994 /* Try to find the space in the red-black tree */ 1995 1996 /* Check slot before any entry */ 1997 hint = topdown ? entry->next->start - length : entry->end; 1998 INVARIANTS(); 1999 avail = uvm_map_space_avail(&hint, length, uoffset, align, flags, 2000 topdown, entry); 2001 INVARIANTS(); 2002 switch (avail) { 2003 case 1: 2004 goto found; 2005 case -1: 2006 goto wraparound; 2007 } 2008 2009 nextgap: 2010 KDASSERT((flags & UVM_FLAG_FIXED) == 0); 2011 /* If there is not enough space in the whole tree, we fail */ 2012 tmp = ROOT_ENTRY(map); 2013 if (tmp == NULL || tmp->maxgap < length) 2014 goto notfound; 2015 2016 prev = NULL; /* previous candidate */ 2017 2018 /* Find an entry close to hint that has enough space */ 2019 for (; tmp;) { 2020 KASSERT(tmp->next->start == tmp->end + tmp->gap); 2021 if (topdown) { 2022 if (tmp->next->start < hint + length && 2023 (prev == NULL || tmp->end > prev->end)) { 2024 if (tmp->gap >= length) 2025 prev = tmp; 2026 else if ((child = LEFT_ENTRY(tmp)) != NULL 2027 && child->maxgap >= length) 2028 prev = tmp; 2029 } 2030 } else { 2031 if (tmp->end >= hint && 2032 (prev == NULL || tmp->end < prev->end)) { 2033 if (tmp->gap >= length) 2034 prev = tmp; 2035 else if ((child = RIGHT_ENTRY(tmp)) != NULL 2036 && child->maxgap >= length) 2037 prev = tmp; 2038 } 2039 } 2040 if (tmp->next->start < hint + length) 2041 child = RIGHT_ENTRY(tmp); 2042 else if (tmp->end > hint) 2043 child = LEFT_ENTRY(tmp); 2044 else { 2045 if (tmp->gap >= length) 2046 break; 2047 if (topdown) 2048 child = LEFT_ENTRY(tmp); 2049 else 2050 child = RIGHT_ENTRY(tmp); 2051 } 2052 if (child == NULL || child->maxgap < length) 2053 break; 2054 tmp = child; 2055 } 2056 2057 if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) { 2058 /* 2059 * Check if the entry that we found satifies the 2060 * space requirement 2061 */ 2062 if (topdown) { 2063 if (hint > tmp->next->start - length) 2064 hint = tmp->next->start - length; 2065 } else { 2066 if (hint < tmp->end) 2067 hint = tmp->end; 2068 } 2069 INVARIANTS(); 2070 avail = uvm_map_space_avail(&hint, length, uoffset, align, 2071 flags, topdown, tmp); 2072 INVARIANTS(); 2073 switch (avail) { 2074 case 1: 2075 entry = tmp; 2076 goto found; 2077 case -1: 2078 goto wraparound; 2079 } 2080 if (tmp->gap >= length) 2081 goto listsearch; 2082 } 2083 if (prev == NULL) 2084 goto notfound; 2085 2086 if (topdown) { 2087 KASSERT(orig_hint >= prev->next->start - length || 2088 prev->next->start - length > prev->next->start); 2089 hint = prev->next->start - length; 2090 } else { 2091 KASSERT(orig_hint <= prev->end); 2092 hint = prev->end; 2093 } 2094 INVARIANTS(); 2095 avail = uvm_map_space_avail(&hint, length, uoffset, align, 2096 flags, topdown, prev); 2097 INVARIANTS(); 2098 switch (avail) { 2099 case 1: 2100 entry = prev; 2101 goto found; 2102 case -1: 2103 goto wraparound; 2104 } 2105 if (prev->gap >= length) 2106 goto listsearch; 2107 2108 if (topdown) 2109 tmp = LEFT_ENTRY(prev); 2110 else 2111 tmp = RIGHT_ENTRY(prev); 2112 for (;;) { 2113 KASSERT(tmp && tmp->maxgap >= length); 2114 if (topdown) 2115 child = RIGHT_ENTRY(tmp); 2116 else 2117 child = LEFT_ENTRY(tmp); 2118 if (child && child->maxgap >= length) { 2119 tmp = child; 2120 continue; 2121 } 2122 if (tmp->gap >= length) 2123 break; 2124 if (topdown) 2125 tmp = LEFT_ENTRY(tmp); 2126 else 2127 tmp = RIGHT_ENTRY(tmp); 2128 } 2129 2130 if (topdown) { 2131 KASSERT(orig_hint >= tmp->next->start - length || 2132 tmp->next->start - length > tmp->next->start); 2133 hint = tmp->next->start - length; 2134 } else { 2135 KASSERT(orig_hint <= tmp->end); 2136 hint = tmp->end; 2137 } 2138 INVARIANTS(); 2139 avail = uvm_map_space_avail(&hint, length, uoffset, align, 2140 flags, topdown, tmp); 2141 INVARIANTS(); 2142 switch (avail) { 2143 case 1: 2144 entry = tmp; 2145 goto found; 2146 case -1: 2147 goto wraparound; 2148 } 2149 2150 /* 2151 * The tree fails to find an entry because of offset or alignment 2152 * restrictions. Search the list instead. 2153 */ 2154 listsearch: 2155 /* 2156 * Look through the rest of the map, trying to fit a new region in 2157 * the gap between existing regions, or after the very last region. 2158 * note: entry->end = base VA of current gap, 2159 * entry->next->start = VA of end of current gap 2160 */ 2161 2162 INVARIANTS(); 2163 for (;;) { 2164 /* Update hint for current gap. */ 2165 hint = topdown ? entry->next->start - length : entry->end; 2166 INVARIANTS(); 2167 2168 /* See if it fits. */ 2169 avail = uvm_map_space_avail(&hint, length, uoffset, align, 2170 flags, topdown, entry); 2171 INVARIANTS(); 2172 switch (avail) { 2173 case 1: 2174 goto found; 2175 case -1: 2176 goto wraparound; 2177 } 2178 2179 /* Advance to next/previous gap */ 2180 if (topdown) { 2181 if (entry == &map->header) { 2182 UVMHIST_LOG(maphist, "<- failed (off start)", 2183 0,0,0,0); 2184 goto notfound; 2185 } 2186 entry = entry->prev; 2187 } else { 2188 entry = entry->next; 2189 if (entry == &map->header) { 2190 UVMHIST_LOG(maphist, "<- failed (off end)", 2191 0,0,0,0); 2192 goto notfound; 2193 } 2194 } 2195 } 2196 2197 found: 2198 SAVE_HINT(map, map->hint, entry); 2199 *result = hint; 2200 UVMHIST_LOG(maphist,"<- got it! (result=%#jx)", hint, 0,0,0); 2201 INVARIANTS(); 2202 KASSERT(entry->end <= hint); 2203 KASSERT(hint + length <= entry->next->start); 2204 return (entry); 2205 2206 wraparound: 2207 UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0); 2208 2209 return (NULL); 2210 2211 notfound: 2212 UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0); 2213 2214 return (NULL); 2215 #undef INVARIANTS 2216 } 2217 2218 /* 2219 * U N M A P - m a i n h e l p e r f u n c t i o n s 2220 */ 2221 2222 /* 2223 * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop") 2224 * 2225 * => caller must check alignment and size 2226 * => map must be locked by caller 2227 * => we return a list of map entries that we've remove from the map 2228 * in "entry_list" 2229 */ 2230 2231 void 2232 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end, 2233 struct vm_map_entry **entry_list /* OUT */, int flags) 2234 { 2235 struct vm_map_entry *entry, *first_entry, *next; 2236 vaddr_t len; 2237 UVMHIST_FUNC(__func__); 2238 UVMHIST_CALLARGS(maphist,"(map=%#jx, start=%#jx, end=%#jx)", 2239 (uintptr_t)map, start, end, 0); 2240 VM_MAP_RANGE_CHECK(map, start, end); 2241 2242 uvm_map_check(map, "unmap_remove entry"); 2243 2244 /* 2245 * find first entry 2246 */ 2247 2248 if (uvm_map_lookup_entry(map, start, &first_entry) == true) { 2249 /* clip and go... */ 2250 entry = first_entry; 2251 UVM_MAP_CLIP_START(map, entry, start); 2252 /* critical! prevents stale hint */ 2253 SAVE_HINT(map, entry, entry->prev); 2254 } else { 2255 entry = first_entry->next; 2256 } 2257 2258 /* 2259 * save the free space hint 2260 */ 2261 2262 if (map->first_free != &map->header && map->first_free->start >= start) 2263 map->first_free = entry->prev; 2264 2265 /* 2266 * note: we now re-use first_entry for a different task. we remove 2267 * a number of map entries from the map and save them in a linked 2268 * list headed by "first_entry". once we remove them from the map 2269 * the caller should unlock the map and drop the references to the 2270 * backing objects [c.f. uvm_unmap_detach]. the object is to 2271 * separate unmapping from reference dropping. why? 2272 * [1] the map has to be locked for unmapping 2273 * [2] the map need not be locked for reference dropping 2274 * [3] dropping references may trigger pager I/O, and if we hit 2275 * a pager that does synchronous I/O we may have to wait for it. 2276 * [4] we would like all waiting for I/O to occur with maps unlocked 2277 * so that we don't block other threads. 2278 */ 2279 2280 first_entry = NULL; 2281 *entry_list = NULL; 2282 2283 /* 2284 * break up the area into map entry sized regions and unmap. note 2285 * that all mappings have to be removed before we can even consider 2286 * dropping references to amaps or VM objects (otherwise we could end 2287 * up with a mapping to a page on the free list which would be very bad) 2288 */ 2289 2290 while ((entry != &map->header) && (entry->start < end)) { 2291 KASSERT((entry->flags & UVM_MAP_STATIC) == 0); 2292 2293 UVM_MAP_CLIP_END(map, entry, end); 2294 next = entry->next; 2295 len = entry->end - entry->start; 2296 2297 /* 2298 * unwire before removing addresses from the pmap; otherwise 2299 * unwiring will put the entries back into the pmap (XXX). 2300 */ 2301 2302 if (VM_MAPENT_ISWIRED(entry)) { 2303 uvm_map_entry_unwire(map, entry); 2304 } 2305 if (flags & UVM_FLAG_VAONLY) { 2306 2307 /* nothing */ 2308 2309 } else if ((map->flags & VM_MAP_PAGEABLE) == 0) { 2310 2311 /* 2312 * if the map is non-pageable, any pages mapped there 2313 * must be wired and entered with pmap_kenter_pa(), 2314 * and we should free any such pages immediately. 2315 * this is mostly used for kmem_map. 2316 */ 2317 KASSERT(vm_map_pmap(map) == pmap_kernel()); 2318 2319 uvm_km_pgremove_intrsafe(map, entry->start, entry->end); 2320 } else if (UVM_ET_ISOBJ(entry) && 2321 UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) { 2322 panic("%s: kernel object %p %p\n", 2323 __func__, map, entry); 2324 } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) { 2325 /* 2326 * remove mappings the standard way. lock object 2327 * and/or amap to ensure vm_page state does not 2328 * change while in pmap_remove(). 2329 */ 2330 2331 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */ 2332 uvm_map_lock_entry(entry, RW_WRITER); 2333 #else 2334 uvm_map_lock_entry(entry, RW_READER); 2335 #endif 2336 pmap_remove(map->pmap, entry->start, entry->end); 2337 2338 /* 2339 * note: if map is dying, leave pmap_update() for 2340 * later. if the map is to be reused (exec) then 2341 * pmap_update() will be called. if the map is 2342 * being disposed of (exit) then pmap_destroy() 2343 * will be called. 2344 */ 2345 2346 if ((map->flags & VM_MAP_DYING) == 0) { 2347 pmap_update(vm_map_pmap(map)); 2348 } else { 2349 KASSERT(vm_map_pmap(map) != pmap_kernel()); 2350 } 2351 2352 uvm_map_unlock_entry(entry); 2353 } 2354 2355 #if defined(UVMDEBUG) 2356 /* 2357 * check if there's remaining mapping, 2358 * which is a bug in caller. 2359 */ 2360 2361 vaddr_t va; 2362 for (va = entry->start; va < entry->end; 2363 va += PAGE_SIZE) { 2364 if (pmap_extract(vm_map_pmap(map), va, NULL)) { 2365 panic("%s: %#"PRIxVADDR" has mapping", 2366 __func__, va); 2367 } 2368 } 2369 2370 if (VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) { 2371 uvm_km_check_empty(map, entry->start, 2372 entry->end); 2373 } 2374 #endif /* defined(UVMDEBUG) */ 2375 2376 /* 2377 * remove entry from map and put it on our list of entries 2378 * that we've nuked. then go to next entry. 2379 */ 2380 2381 UVMHIST_LOG(maphist, " removed map entry %#jx", 2382 (uintptr_t)entry, 0, 0, 0); 2383 2384 /* critical! prevents stale hint */ 2385 SAVE_HINT(map, entry, entry->prev); 2386 2387 uvm_map_entry_unlink(map, entry); 2388 KASSERT(map->size >= len); 2389 map->size -= len; 2390 entry->prev = NULL; 2391 entry->next = first_entry; 2392 first_entry = entry; 2393 entry = next; 2394 } 2395 2396 uvm_map_check(map, "unmap_remove leave"); 2397 2398 /* 2399 * now we've cleaned up the map and are ready for the caller to drop 2400 * references to the mapped objects. 2401 */ 2402 2403 *entry_list = first_entry; 2404 UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0); 2405 2406 if (map->flags & VM_MAP_WANTVA) { 2407 mutex_enter(&map->misc_lock); 2408 map->flags &= ~VM_MAP_WANTVA; 2409 cv_broadcast(&map->cv); 2410 mutex_exit(&map->misc_lock); 2411 } 2412 } 2413 2414 /* 2415 * uvm_unmap_detach: drop references in a chain of map entries 2416 * 2417 * => we will free the map entries as we traverse the list. 2418 */ 2419 2420 void 2421 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags) 2422 { 2423 struct vm_map_entry *next_entry; 2424 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 2425 2426 while (first_entry) { 2427 KASSERT(!VM_MAPENT_ISWIRED(first_entry)); 2428 UVMHIST_LOG(maphist, 2429 " detach %#jx: amap=%#jx, obj=%#jx, submap?=%jd", 2430 (uintptr_t)first_entry, 2431 (uintptr_t)first_entry->aref.ar_amap, 2432 (uintptr_t)first_entry->object.uvm_obj, 2433 UVM_ET_ISSUBMAP(first_entry)); 2434 2435 /* 2436 * drop reference to amap, if we've got one 2437 */ 2438 2439 if (first_entry->aref.ar_amap) 2440 uvm_map_unreference_amap(first_entry, flags); 2441 2442 /* 2443 * drop reference to our backing object, if we've got one 2444 */ 2445 2446 KASSERT(!UVM_ET_ISSUBMAP(first_entry)); 2447 if (UVM_ET_ISOBJ(first_entry) && 2448 first_entry->object.uvm_obj->pgops->pgo_detach) { 2449 (*first_entry->object.uvm_obj->pgops->pgo_detach) 2450 (first_entry->object.uvm_obj); 2451 } 2452 next_entry = first_entry->next; 2453 uvm_mapent_free(first_entry); 2454 first_entry = next_entry; 2455 } 2456 UVMHIST_LOG(maphist, "<- done", 0,0,0,0); 2457 } 2458 2459 /* 2460 * E X T R A C T I O N F U N C T I O N S 2461 */ 2462 2463 /* 2464 * uvm_map_reserve: reserve space in a vm_map for future use. 2465 * 2466 * => we reserve space in a map by putting a dummy map entry in the 2467 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE) 2468 * => map should be unlocked (we will write lock it) 2469 * => we return true if we were able to reserve space 2470 * => XXXCDC: should be inline? 2471 */ 2472 2473 int 2474 uvm_map_reserve(struct vm_map *map, vsize_t size, 2475 vaddr_t offset /* hint for pmap_prefer */, 2476 vsize_t align /* alignment */, 2477 vaddr_t *raddr /* IN:hint, OUT: reserved VA */, 2478 uvm_flag_t flags /* UVM_FLAG_FIXED or UVM_FLAG_COLORMATCH or 0 */) 2479 { 2480 UVMHIST_FUNC(__func__); 2481 UVMHIST_CALLARGS(maphist, "(map=%#jx, size=%#jx, offset=%#jx, addr=%#jx)", 2482 (uintptr_t)map, size, offset, (uintptr_t)raddr); 2483 2484 size = round_page(size); 2485 2486 /* 2487 * reserve some virtual space. 2488 */ 2489 2490 if (uvm_map(map, raddr, size, NULL, offset, align, 2491 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 2492 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) { 2493 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); 2494 return (false); 2495 } 2496 2497 UVMHIST_LOG(maphist, "<- done (*raddr=%#jx)", *raddr,0,0,0); 2498 return (true); 2499 } 2500 2501 /* 2502 * uvm_map_replace: replace a reserved (blank) area of memory with 2503 * real mappings. 2504 * 2505 * => caller must WRITE-LOCK the map 2506 * => we return true if replacement was a success 2507 * => we expect the newents chain to have nnewents entrys on it and 2508 * we expect newents->prev to point to the last entry on the list 2509 * => note newents is allowed to be NULL 2510 */ 2511 2512 static int 2513 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end, 2514 struct vm_map_entry *newents, int nnewents, vsize_t nsize, 2515 struct vm_map_entry **oldentryp) 2516 { 2517 struct vm_map_entry *oldent, *last; 2518 2519 uvm_map_check(map, "map_replace entry"); 2520 2521 /* 2522 * first find the blank map entry at the specified address 2523 */ 2524 2525 if (!uvm_map_lookup_entry(map, start, &oldent)) { 2526 return (false); 2527 } 2528 2529 /* 2530 * check to make sure we have a proper blank entry 2531 */ 2532 2533 if (end < oldent->end) { 2534 UVM_MAP_CLIP_END(map, oldent, end); 2535 } 2536 if (oldent->start != start || oldent->end != end || 2537 oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) { 2538 return (false); 2539 } 2540 2541 #ifdef DIAGNOSTIC 2542 2543 /* 2544 * sanity check the newents chain 2545 */ 2546 2547 { 2548 struct vm_map_entry *tmpent = newents; 2549 int nent = 0; 2550 vsize_t sz = 0; 2551 vaddr_t cur = start; 2552 2553 while (tmpent) { 2554 nent++; 2555 sz += tmpent->end - tmpent->start; 2556 if (tmpent->start < cur) 2557 panic("uvm_map_replace1"); 2558 if (tmpent->start >= tmpent->end || tmpent->end > end) { 2559 panic("uvm_map_replace2: " 2560 "tmpent->start=%#"PRIxVADDR 2561 ", tmpent->end=%#"PRIxVADDR 2562 ", end=%#"PRIxVADDR, 2563 tmpent->start, tmpent->end, end); 2564 } 2565 cur = tmpent->end; 2566 if (tmpent->next) { 2567 if (tmpent->next->prev != tmpent) 2568 panic("uvm_map_replace3"); 2569 } else { 2570 if (newents->prev != tmpent) 2571 panic("uvm_map_replace4"); 2572 } 2573 tmpent = tmpent->next; 2574 } 2575 if (nent != nnewents) 2576 panic("uvm_map_replace5"); 2577 if (sz != nsize) 2578 panic("uvm_map_replace6"); 2579 } 2580 #endif 2581 2582 /* 2583 * map entry is a valid blank! replace it. (this does all the 2584 * work of map entry link/unlink...). 2585 */ 2586 2587 if (newents) { 2588 last = newents->prev; 2589 2590 /* critical: flush stale hints out of map */ 2591 SAVE_HINT(map, map->hint, newents); 2592 if (map->first_free == oldent) 2593 map->first_free = last; 2594 2595 last->next = oldent->next; 2596 last->next->prev = last; 2597 2598 /* Fix RB tree */ 2599 uvm_rb_remove(map, oldent); 2600 2601 newents->prev = oldent->prev; 2602 newents->prev->next = newents; 2603 map->nentries = map->nentries + (nnewents - 1); 2604 2605 /* Fixup the RB tree */ 2606 { 2607 int i; 2608 struct vm_map_entry *tmp; 2609 2610 tmp = newents; 2611 for (i = 0; i < nnewents && tmp; i++) { 2612 uvm_rb_insert(map, tmp); 2613 tmp = tmp->next; 2614 } 2615 } 2616 } else { 2617 /* NULL list of new entries: just remove the old one */ 2618 clear_hints(map, oldent); 2619 uvm_map_entry_unlink(map, oldent); 2620 } 2621 map->size -= end - start - nsize; 2622 2623 uvm_map_check(map, "map_replace leave"); 2624 2625 /* 2626 * now we can free the old blank entry and return. 2627 */ 2628 2629 *oldentryp = oldent; 2630 return (true); 2631 } 2632 2633 /* 2634 * uvm_map_extract: extract a mapping from a map and put it somewhere 2635 * (maybe removing the old mapping) 2636 * 2637 * => maps should be unlocked (we will write lock them) 2638 * => returns 0 on success, error code otherwise 2639 * => start must be page aligned 2640 * => len must be page sized 2641 * => flags: 2642 * UVM_EXTRACT_REMOVE: remove mappings from srcmap 2643 * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only) 2644 * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs 2645 * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go 2646 * UVM_EXTRACT_PROT_ALL: set prot to UVM_PROT_ALL as we go 2647 * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<< 2648 * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only 2649 * be used from within the kernel in a kernel level map <<< 2650 */ 2651 2652 int 2653 uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len, 2654 struct vm_map *dstmap, vaddr_t *dstaddrp, int flags) 2655 { 2656 vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge; 2657 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry, 2658 *deadentry, *oldentry; 2659 struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */ 2660 vsize_t elen __unused; 2661 int nchain, error, copy_ok; 2662 vsize_t nsize; 2663 UVMHIST_FUNC(__func__); 2664 UVMHIST_CALLARGS(maphist,"(srcmap=%#jx,start=%#jx, len=%#jx", 2665 (uintptr_t)srcmap, start, len, 0); 2666 UVMHIST_LOG(maphist," ...,dstmap=%#jx, flags=%#jx)", 2667 (uintptr_t)dstmap, flags, 0, 0); 2668 2669 /* 2670 * step 0: sanity check: start must be on a page boundary, length 2671 * must be page sized. can't ask for CONTIG/QREF if you asked for 2672 * REMOVE. 2673 */ 2674 2675 KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0); 2676 KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 || 2677 (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0); 2678 2679 /* 2680 * step 1: reserve space in the target map for the extracted area 2681 */ 2682 2683 if ((flags & UVM_EXTRACT_RESERVED) == 0) { 2684 dstaddr = vm_map_min(dstmap); 2685 if (!uvm_map_reserve(dstmap, len, start, 2686 atop(start) & uvmexp.colormask, &dstaddr, 2687 UVM_FLAG_COLORMATCH)) 2688 return (ENOMEM); 2689 KASSERT((atop(start ^ dstaddr) & uvmexp.colormask) == 0); 2690 *dstaddrp = dstaddr; /* pass address back to caller */ 2691 UVMHIST_LOG(maphist, " dstaddr=%#jx", dstaddr,0,0,0); 2692 } else { 2693 dstaddr = *dstaddrp; 2694 } 2695 2696 /* 2697 * step 2: setup for the extraction process loop by init'ing the 2698 * map entry chain, locking src map, and looking up the first useful 2699 * entry in the map. 2700 */ 2701 2702 end = start + len; 2703 newend = dstaddr + len; 2704 chain = endchain = NULL; 2705 nchain = 0; 2706 nsize = 0; 2707 vm_map_lock(srcmap); 2708 2709 if (uvm_map_lookup_entry(srcmap, start, &entry)) { 2710 2711 /* "start" is within an entry */ 2712 if (flags & UVM_EXTRACT_QREF) { 2713 2714 /* 2715 * for quick references we don't clip the entry, so 2716 * the entry may map space "before" the starting 2717 * virtual address... this is the "fudge" factor 2718 * (which can be non-zero only the first time 2719 * through the "while" loop in step 3). 2720 */ 2721 2722 fudge = start - entry->start; 2723 } else { 2724 2725 /* 2726 * normal reference: we clip the map to fit (thus 2727 * fudge is zero) 2728 */ 2729 2730 UVM_MAP_CLIP_START(srcmap, entry, start); 2731 SAVE_HINT(srcmap, srcmap->hint, entry->prev); 2732 fudge = 0; 2733 } 2734 } else { 2735 2736 /* "start" is not within an entry ... skip to next entry */ 2737 if (flags & UVM_EXTRACT_CONTIG) { 2738 error = EINVAL; 2739 goto bad; /* definite hole here ... */ 2740 } 2741 2742 entry = entry->next; 2743 fudge = 0; 2744 } 2745 2746 /* save values from srcmap for step 6 */ 2747 orig_entry = entry; 2748 orig_fudge = fudge; 2749 2750 /* 2751 * step 3: now start looping through the map entries, extracting 2752 * as we go. 2753 */ 2754 2755 while (entry->start < end && entry != &srcmap->header) { 2756 2757 /* if we are not doing a quick reference, clip it */ 2758 if ((flags & UVM_EXTRACT_QREF) == 0) 2759 UVM_MAP_CLIP_END(srcmap, entry, end); 2760 2761 /* clear needs_copy (allow chunking) */ 2762 if (UVM_ET_ISNEEDSCOPY(entry)) { 2763 amap_copy(srcmap, entry, 2764 AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end); 2765 if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */ 2766 error = ENOMEM; 2767 goto bad; 2768 } 2769 2770 /* amap_copy could clip (during chunk)! update fudge */ 2771 if (fudge) { 2772 fudge = start - entry->start; 2773 orig_fudge = fudge; 2774 } 2775 } 2776 2777 /* calculate the offset of this from "start" */ 2778 oldoffset = (entry->start + fudge) - start; 2779 2780 /* allocate a new map entry */ 2781 newentry = uvm_mapent_alloc(dstmap, 0); 2782 if (newentry == NULL) { 2783 error = ENOMEM; 2784 goto bad; 2785 } 2786 2787 /* set up new map entry */ 2788 newentry->next = NULL; 2789 newentry->prev = endchain; 2790 newentry->start = dstaddr + oldoffset; 2791 newentry->end = 2792 newentry->start + (entry->end - (entry->start + fudge)); 2793 if (newentry->end > newend || newentry->end < newentry->start) 2794 newentry->end = newend; 2795 newentry->object.uvm_obj = entry->object.uvm_obj; 2796 if (newentry->object.uvm_obj) { 2797 if (newentry->object.uvm_obj->pgops->pgo_reference) 2798 newentry->object.uvm_obj->pgops-> 2799 pgo_reference(newentry->object.uvm_obj); 2800 newentry->offset = entry->offset + fudge; 2801 } else { 2802 newentry->offset = 0; 2803 } 2804 newentry->etype = entry->etype; 2805 if (flags & UVM_EXTRACT_PROT_ALL) { 2806 newentry->protection = newentry->max_protection = 2807 UVM_PROT_ALL; 2808 } else { 2809 newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ? 2810 entry->max_protection : entry->protection; 2811 newentry->max_protection = entry->max_protection; 2812 } 2813 newentry->inheritance = entry->inheritance; 2814 newentry->wired_count = 0; 2815 newentry->aref.ar_amap = entry->aref.ar_amap; 2816 if (newentry->aref.ar_amap) { 2817 newentry->aref.ar_pageoff = 2818 entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT); 2819 uvm_map_reference_amap(newentry, AMAP_SHARED | 2820 ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0)); 2821 } else { 2822 newentry->aref.ar_pageoff = 0; 2823 } 2824 newentry->advice = entry->advice; 2825 if ((flags & UVM_EXTRACT_QREF) != 0) { 2826 newentry->flags |= UVM_MAP_NOMERGE; 2827 } 2828 2829 /* now link it on the chain */ 2830 nchain++; 2831 nsize += newentry->end - newentry->start; 2832 if (endchain == NULL) { 2833 chain = endchain = newentry; 2834 } else { 2835 endchain->next = newentry; 2836 endchain = newentry; 2837 } 2838 2839 /* end of 'while' loop! */ 2840 if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end && 2841 (entry->next == &srcmap->header || 2842 entry->next->start != entry->end)) { 2843 error = EINVAL; 2844 goto bad; 2845 } 2846 entry = entry->next; 2847 fudge = 0; 2848 } 2849 2850 /* 2851 * step 4: close off chain (in format expected by uvm_map_replace) 2852 */ 2853 2854 if (chain) 2855 chain->prev = endchain; 2856 2857 /* 2858 * step 5: attempt to lock the dest map so we can pmap_copy. 2859 * note usage of copy_ok: 2860 * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5) 2861 * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7 2862 */ 2863 2864 if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) { 2865 copy_ok = 1; 2866 if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain, 2867 nchain, nsize, &resentry)) { 2868 if (srcmap != dstmap) 2869 vm_map_unlock(dstmap); 2870 error = EIO; 2871 goto bad; 2872 } 2873 } else { 2874 copy_ok = 0; 2875 /* replace defered until step 7 */ 2876 } 2877 2878 /* 2879 * step 6: traverse the srcmap a second time to do the following: 2880 * - if we got a lock on the dstmap do pmap_copy 2881 * - if UVM_EXTRACT_REMOVE remove the entries 2882 * we make use of orig_entry and orig_fudge (saved in step 2) 2883 */ 2884 2885 if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) { 2886 2887 /* purge possible stale hints from srcmap */ 2888 if (flags & UVM_EXTRACT_REMOVE) { 2889 SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev); 2890 if (srcmap->first_free != &srcmap->header && 2891 srcmap->first_free->start >= start) 2892 srcmap->first_free = orig_entry->prev; 2893 } 2894 2895 entry = orig_entry; 2896 fudge = orig_fudge; 2897 deadentry = NULL; /* for UVM_EXTRACT_REMOVE */ 2898 2899 while (entry->start < end && entry != &srcmap->header) { 2900 if (copy_ok) { 2901 oldoffset = (entry->start + fudge) - start; 2902 elen = MIN(end, entry->end) - 2903 (entry->start + fudge); 2904 pmap_copy(dstmap->pmap, srcmap->pmap, 2905 dstaddr + oldoffset, elen, 2906 entry->start + fudge); 2907 } 2908 2909 /* we advance "entry" in the following if statement */ 2910 if (flags & UVM_EXTRACT_REMOVE) { 2911 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */ 2912 uvm_map_lock_entry(entry, RW_WRITER); 2913 #else 2914 uvm_map_lock_entry(entry, RW_READER); 2915 #endif 2916 pmap_remove(srcmap->pmap, entry->start, 2917 entry->end); 2918 uvm_map_unlock_entry(entry); 2919 oldentry = entry; /* save entry */ 2920 entry = entry->next; /* advance */ 2921 uvm_map_entry_unlink(srcmap, oldentry); 2922 /* add to dead list */ 2923 oldentry->next = deadentry; 2924 deadentry = oldentry; 2925 } else { 2926 entry = entry->next; /* advance */ 2927 } 2928 2929 /* end of 'while' loop */ 2930 fudge = 0; 2931 } 2932 pmap_update(srcmap->pmap); 2933 2934 /* 2935 * unlock dstmap. we will dispose of deadentry in 2936 * step 7 if needed 2937 */ 2938 2939 if (copy_ok && srcmap != dstmap) 2940 vm_map_unlock(dstmap); 2941 2942 } else { 2943 deadentry = NULL; 2944 } 2945 2946 /* 2947 * step 7: we are done with the source map, unlock. if copy_ok 2948 * is 0 then we have not replaced the dummy mapping in dstmap yet 2949 * and we need to do so now. 2950 */ 2951 2952 vm_map_unlock(srcmap); 2953 if ((flags & UVM_EXTRACT_REMOVE) && deadentry) 2954 uvm_unmap_detach(deadentry, 0); /* dispose of old entries */ 2955 2956 /* now do the replacement if we didn't do it in step 5 */ 2957 if (copy_ok == 0) { 2958 vm_map_lock(dstmap); 2959 error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain, 2960 nchain, nsize, &resentry); 2961 vm_map_unlock(dstmap); 2962 2963 if (error == false) { 2964 error = EIO; 2965 goto bad2; 2966 } 2967 } 2968 2969 if (resentry != NULL) 2970 uvm_mapent_free(resentry); 2971 2972 return (0); 2973 2974 /* 2975 * bad: failure recovery 2976 */ 2977 bad: 2978 vm_map_unlock(srcmap); 2979 bad2: /* src already unlocked */ 2980 if (chain) 2981 uvm_unmap_detach(chain, 2982 (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0); 2983 2984 if (resentry != NULL) 2985 uvm_mapent_free(resentry); 2986 2987 if ((flags & UVM_EXTRACT_RESERVED) == 0) { 2988 uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */ 2989 } 2990 return (error); 2991 } 2992 2993 /* end of extraction functions */ 2994 2995 /* 2996 * uvm_map_submap: punch down part of a map into a submap 2997 * 2998 * => only the kernel_map is allowed to be submapped 2999 * => the purpose of submapping is to break up the locking granularity 3000 * of a larger map 3001 * => the range specified must have been mapped previously with a uvm_map() 3002 * call [with uobj==NULL] to create a blank map entry in the main map. 3003 * [And it had better still be blank!] 3004 * => maps which contain submaps should never be copied or forked. 3005 * => to remove a submap, use uvm_unmap() on the main map 3006 * and then uvm_map_deallocate() the submap. 3007 * => main map must be unlocked. 3008 * => submap must have been init'd and have a zero reference count. 3009 * [need not be locked as we don't actually reference it] 3010 */ 3011 3012 int 3013 uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end, 3014 struct vm_map *submap) 3015 { 3016 struct vm_map_entry *entry; 3017 int error; 3018 3019 vm_map_lock(map); 3020 VM_MAP_RANGE_CHECK(map, start, end); 3021 3022 if (uvm_map_lookup_entry(map, start, &entry)) { 3023 UVM_MAP_CLIP_START(map, entry, start); 3024 UVM_MAP_CLIP_END(map, entry, end); /* to be safe */ 3025 } else { 3026 entry = NULL; 3027 } 3028 3029 if (entry != NULL && 3030 entry->start == start && entry->end == end && 3031 entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL && 3032 !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) { 3033 entry->etype |= UVM_ET_SUBMAP; 3034 entry->object.sub_map = submap; 3035 entry->offset = 0; 3036 uvm_map_reference(submap); 3037 error = 0; 3038 } else { 3039 error = EINVAL; 3040 } 3041 vm_map_unlock(map); 3042 3043 return error; 3044 } 3045 3046 /* 3047 * uvm_map_protect_user: change map protection on behalf of the user. 3048 * Enforces PAX settings as necessary. 3049 */ 3050 int 3051 uvm_map_protect_user(struct lwp *l, vaddr_t start, vaddr_t end, 3052 vm_prot_t new_prot) 3053 { 3054 int error; 3055 3056 if ((error = PAX_MPROTECT_VALIDATE(l, new_prot))) 3057 return error; 3058 3059 return uvm_map_protect(&l->l_proc->p_vmspace->vm_map, start, end, 3060 new_prot, false); 3061 } 3062 3063 3064 /* 3065 * uvm_map_protect: change map protection 3066 * 3067 * => set_max means set max_protection. 3068 * => map must be unlocked. 3069 */ 3070 3071 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \ 3072 ~VM_PROT_WRITE : VM_PROT_ALL) 3073 3074 int 3075 uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end, 3076 vm_prot_t new_prot, bool set_max) 3077 { 3078 struct vm_map_entry *current, *entry; 3079 int error = 0; 3080 UVMHIST_FUNC(__func__); 3081 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_prot=%#jx)", 3082 (uintptr_t)map, start, end, new_prot); 3083 3084 vm_map_lock(map); 3085 VM_MAP_RANGE_CHECK(map, start, end); 3086 if (uvm_map_lookup_entry(map, start, &entry)) { 3087 UVM_MAP_CLIP_START(map, entry, start); 3088 } else { 3089 entry = entry->next; 3090 } 3091 3092 /* 3093 * make a first pass to check for protection violations. 3094 */ 3095 3096 current = entry; 3097 while ((current != &map->header) && (current->start < end)) { 3098 if (UVM_ET_ISSUBMAP(current)) { 3099 error = EINVAL; 3100 goto out; 3101 } 3102 if ((new_prot & current->max_protection) != new_prot) { 3103 error = EACCES; 3104 goto out; 3105 } 3106 /* 3107 * Don't allow VM_PROT_EXECUTE to be set on entries that 3108 * point to vnodes that are associated with a NOEXEC file 3109 * system. 3110 */ 3111 if (UVM_ET_ISOBJ(current) && 3112 UVM_OBJ_IS_VNODE(current->object.uvm_obj)) { 3113 struct vnode *vp = 3114 (struct vnode *) current->object.uvm_obj; 3115 3116 if ((new_prot & VM_PROT_EXECUTE) != 0 && 3117 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) { 3118 error = EACCES; 3119 goto out; 3120 } 3121 } 3122 3123 current = current->next; 3124 } 3125 3126 /* go back and fix up protections (no need to clip this time). */ 3127 3128 current = entry; 3129 while ((current != &map->header) && (current->start < end)) { 3130 vm_prot_t old_prot; 3131 3132 UVM_MAP_CLIP_END(map, current, end); 3133 old_prot = current->protection; 3134 if (set_max) 3135 current->protection = 3136 (current->max_protection = new_prot) & old_prot; 3137 else 3138 current->protection = new_prot; 3139 3140 /* 3141 * update physical map if necessary. worry about copy-on-write 3142 * here -- CHECK THIS XXX 3143 */ 3144 3145 if (current->protection != old_prot) { 3146 /* update pmap! */ 3147 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */ 3148 uvm_map_lock_entry(current, RW_WRITER); 3149 #else 3150 uvm_map_lock_entry(current, RW_READER); 3151 #endif 3152 pmap_protect(map->pmap, current->start, current->end, 3153 current->protection & MASK(current)); 3154 uvm_map_unlock_entry(current); 3155 3156 /* 3157 * If this entry points at a vnode, and the 3158 * protection includes VM_PROT_EXECUTE, mark 3159 * the vnode as VEXECMAP. 3160 */ 3161 if (UVM_ET_ISOBJ(current)) { 3162 struct uvm_object *uobj = 3163 current->object.uvm_obj; 3164 3165 if (UVM_OBJ_IS_VNODE(uobj) && 3166 (current->protection & VM_PROT_EXECUTE)) { 3167 vn_markexec((struct vnode *) uobj); 3168 } 3169 } 3170 } 3171 3172 /* 3173 * If the map is configured to lock any future mappings, 3174 * wire this entry now if the old protection was VM_PROT_NONE 3175 * and the new protection is not VM_PROT_NONE. 3176 */ 3177 3178 if ((map->flags & VM_MAP_WIREFUTURE) != 0 && 3179 VM_MAPENT_ISWIRED(current) == 0 && 3180 old_prot == VM_PROT_NONE && 3181 new_prot != VM_PROT_NONE) { 3182 3183 /* 3184 * We must call pmap_update() here because the 3185 * pmap_protect() call above might have removed some 3186 * pmap entries and uvm_map_pageable() might create 3187 * some new pmap entries that rely on the prior 3188 * removals being completely finished. 3189 */ 3190 3191 pmap_update(map->pmap); 3192 3193 if (uvm_map_pageable(map, current->start, 3194 current->end, false, 3195 UVM_LK_ENTER|UVM_LK_EXIT) != 0) { 3196 3197 /* 3198 * If locking the entry fails, remember the 3199 * error if it's the first one. Note we 3200 * still continue setting the protection in 3201 * the map, but will return the error 3202 * condition regardless. 3203 * 3204 * XXX Ignore what the actual error is, 3205 * XXX just call it a resource shortage 3206 * XXX so that it doesn't get confused 3207 * XXX what uvm_map_protect() itself would 3208 * XXX normally return. 3209 */ 3210 3211 error = ENOMEM; 3212 } 3213 } 3214 current = current->next; 3215 } 3216 pmap_update(map->pmap); 3217 3218 out: 3219 vm_map_unlock(map); 3220 3221 UVMHIST_LOG(maphist, "<- done, error=%jd",error,0,0,0); 3222 return error; 3223 } 3224 3225 #undef MASK 3226 3227 /* 3228 * uvm_map_inherit: set inheritance code for range of addrs in map. 3229 * 3230 * => map must be unlocked 3231 * => note that the inherit code is used during a "fork". see fork 3232 * code for details. 3233 */ 3234 3235 int 3236 uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end, 3237 vm_inherit_t new_inheritance) 3238 { 3239 struct vm_map_entry *entry, *temp_entry; 3240 UVMHIST_FUNC(__func__); 3241 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_inh=%#jx)", 3242 (uintptr_t)map, start, end, new_inheritance); 3243 3244 switch (new_inheritance) { 3245 case MAP_INHERIT_NONE: 3246 case MAP_INHERIT_COPY: 3247 case MAP_INHERIT_SHARE: 3248 case MAP_INHERIT_ZERO: 3249 break; 3250 default: 3251 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0); 3252 return EINVAL; 3253 } 3254 3255 vm_map_lock(map); 3256 VM_MAP_RANGE_CHECK(map, start, end); 3257 if (uvm_map_lookup_entry(map, start, &temp_entry)) { 3258 entry = temp_entry; 3259 UVM_MAP_CLIP_START(map, entry, start); 3260 } else { 3261 entry = temp_entry->next; 3262 } 3263 while ((entry != &map->header) && (entry->start < end)) { 3264 UVM_MAP_CLIP_END(map, entry, end); 3265 entry->inheritance = new_inheritance; 3266 entry = entry->next; 3267 } 3268 vm_map_unlock(map); 3269 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0); 3270 return 0; 3271 } 3272 3273 /* 3274 * uvm_map_advice: set advice code for range of addrs in map. 3275 * 3276 * => map must be unlocked 3277 */ 3278 3279 int 3280 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice) 3281 { 3282 struct vm_map_entry *entry, *temp_entry; 3283 UVMHIST_FUNC(__func__); 3284 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_adv=%#jx)", 3285 (uintptr_t)map, start, end, new_advice); 3286 3287 vm_map_lock(map); 3288 VM_MAP_RANGE_CHECK(map, start, end); 3289 if (uvm_map_lookup_entry(map, start, &temp_entry)) { 3290 entry = temp_entry; 3291 UVM_MAP_CLIP_START(map, entry, start); 3292 } else { 3293 entry = temp_entry->next; 3294 } 3295 3296 /* 3297 * XXXJRT: disallow holes? 3298 */ 3299 3300 while ((entry != &map->header) && (entry->start < end)) { 3301 UVM_MAP_CLIP_END(map, entry, end); 3302 3303 switch (new_advice) { 3304 case MADV_NORMAL: 3305 case MADV_RANDOM: 3306 case MADV_SEQUENTIAL: 3307 /* nothing special here */ 3308 break; 3309 3310 default: 3311 vm_map_unlock(map); 3312 UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0); 3313 return EINVAL; 3314 } 3315 entry->advice = new_advice; 3316 entry = entry->next; 3317 } 3318 3319 vm_map_unlock(map); 3320 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0); 3321 return 0; 3322 } 3323 3324 /* 3325 * uvm_map_willneed: apply MADV_WILLNEED 3326 */ 3327 3328 int 3329 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end) 3330 { 3331 struct vm_map_entry *entry; 3332 UVMHIST_FUNC(__func__); 3333 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx)", 3334 (uintptr_t)map, start, end, 0); 3335 3336 vm_map_lock_read(map); 3337 VM_MAP_RANGE_CHECK(map, start, end); 3338 if (!uvm_map_lookup_entry(map, start, &entry)) { 3339 entry = entry->next; 3340 } 3341 while (entry->start < end) { 3342 struct vm_amap * const amap = entry->aref.ar_amap; 3343 struct uvm_object * const uobj = entry->object.uvm_obj; 3344 3345 KASSERT(entry != &map->header); 3346 KASSERT(start < entry->end); 3347 /* 3348 * For now, we handle only the easy but commonly-requested case. 3349 * ie. start prefetching of backing uobj pages. 3350 * 3351 * XXX It might be useful to pmap_enter() the already-in-core 3352 * pages by inventing a "weak" mode for uvm_fault() which would 3353 * only do the PGO_LOCKED pgo_get(). 3354 */ 3355 if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) { 3356 off_t offset; 3357 off_t size; 3358 3359 offset = entry->offset; 3360 if (start < entry->start) { 3361 offset += entry->start - start; 3362 } 3363 size = entry->offset + (entry->end - entry->start); 3364 if (entry->end < end) { 3365 size -= end - entry->end; 3366 } 3367 uvm_readahead(uobj, offset, size); 3368 } 3369 entry = entry->next; 3370 } 3371 vm_map_unlock_read(map); 3372 UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0); 3373 return 0; 3374 } 3375 3376 /* 3377 * uvm_map_pageable: sets the pageability of a range in a map. 3378 * 3379 * => wires map entries. should not be used for transient page locking. 3380 * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()). 3381 * => regions specified as not pageable require lock-down (wired) memory 3382 * and page tables. 3383 * => map must never be read-locked 3384 * => if islocked is true, map is already write-locked 3385 * => we always unlock the map, since we must downgrade to a read-lock 3386 * to call uvm_fault_wire() 3387 * => XXXCDC: check this and try and clean it up. 3388 */ 3389 3390 int 3391 uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end, 3392 bool new_pageable, int lockflags) 3393 { 3394 struct vm_map_entry *entry, *start_entry, *failed_entry; 3395 int rv; 3396 #ifdef DIAGNOSTIC 3397 u_int timestamp_save; 3398 #endif 3399 UVMHIST_FUNC(__func__); 3400 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_pageable=%ju)", 3401 (uintptr_t)map, start, end, new_pageable); 3402 KASSERT(map->flags & VM_MAP_PAGEABLE); 3403 3404 if ((lockflags & UVM_LK_ENTER) == 0) 3405 vm_map_lock(map); 3406 VM_MAP_RANGE_CHECK(map, start, end); 3407 3408 /* 3409 * only one pageability change may take place at one time, since 3410 * uvm_fault_wire assumes it will be called only once for each 3411 * wiring/unwiring. therefore, we have to make sure we're actually 3412 * changing the pageability for the entire region. we do so before 3413 * making any changes. 3414 */ 3415 3416 if (uvm_map_lookup_entry(map, start, &start_entry) == false) { 3417 if ((lockflags & UVM_LK_EXIT) == 0) 3418 vm_map_unlock(map); 3419 3420 UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0); 3421 return EFAULT; 3422 } 3423 entry = start_entry; 3424 3425 if (start == end) { /* nothing required */ 3426 if ((lockflags & UVM_LK_EXIT) == 0) 3427 vm_map_unlock(map); 3428 3429 UVMHIST_LOG(maphist,"<- done (nothing)",0,0,0,0); 3430 return 0; 3431 } 3432 3433 /* 3434 * handle wiring and unwiring separately. 3435 */ 3436 3437 if (new_pageable) { /* unwire */ 3438 UVM_MAP_CLIP_START(map, entry, start); 3439 3440 /* 3441 * unwiring. first ensure that the range to be unwired is 3442 * really wired down and that there are no holes. 3443 */ 3444 3445 while ((entry != &map->header) && (entry->start < end)) { 3446 if (entry->wired_count == 0 || 3447 (entry->end < end && 3448 (entry->next == &map->header || 3449 entry->next->start > entry->end))) { 3450 if ((lockflags & UVM_LK_EXIT) == 0) 3451 vm_map_unlock(map); 3452 UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0); 3453 return EINVAL; 3454 } 3455 entry = entry->next; 3456 } 3457 3458 /* 3459 * POSIX 1003.1b - a single munlock call unlocks a region, 3460 * regardless of the number of mlock calls made on that 3461 * region. 3462 */ 3463 3464 entry = start_entry; 3465 while ((entry != &map->header) && (entry->start < end)) { 3466 UVM_MAP_CLIP_END(map, entry, end); 3467 if (VM_MAPENT_ISWIRED(entry)) 3468 uvm_map_entry_unwire(map, entry); 3469 entry = entry->next; 3470 } 3471 if ((lockflags & UVM_LK_EXIT) == 0) 3472 vm_map_unlock(map); 3473 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0); 3474 return 0; 3475 } 3476 3477 /* 3478 * wire case: in two passes [XXXCDC: ugly block of code here] 3479 * 3480 * 1: holding the write lock, we create any anonymous maps that need 3481 * to be created. then we clip each map entry to the region to 3482 * be wired and increment its wiring count. 3483 * 3484 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault 3485 * in the pages for any newly wired area (wired_count == 1). 3486 * 3487 * downgrading to a read lock for uvm_fault_wire avoids a possible 3488 * deadlock with another thread that may have faulted on one of 3489 * the pages to be wired (it would mark the page busy, blocking 3490 * us, then in turn block on the map lock that we hold). because 3491 * of problems in the recursive lock package, we cannot upgrade 3492 * to a write lock in vm_map_lookup. thus, any actions that 3493 * require the write lock must be done beforehand. because we 3494 * keep the read lock on the map, the copy-on-write status of the 3495 * entries we modify here cannot change. 3496 */ 3497 3498 while ((entry != &map->header) && (entry->start < end)) { 3499 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */ 3500 3501 /* 3502 * perform actions of vm_map_lookup that need the 3503 * write lock on the map: create an anonymous map 3504 * for a copy-on-write region, or an anonymous map 3505 * for a zero-fill region. (XXXCDC: submap case 3506 * ok?) 3507 */ 3508 3509 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */ 3510 if (UVM_ET_ISNEEDSCOPY(entry) && 3511 ((entry->max_protection & VM_PROT_WRITE) || 3512 (entry->object.uvm_obj == NULL))) { 3513 amap_copy(map, entry, 0, start, end); 3514 /* XXXCDC: wait OK? */ 3515 } 3516 } 3517 } 3518 UVM_MAP_CLIP_START(map, entry, start); 3519 UVM_MAP_CLIP_END(map, entry, end); 3520 entry->wired_count++; 3521 3522 /* 3523 * Check for holes 3524 */ 3525 3526 if (entry->protection == VM_PROT_NONE || 3527 (entry->end < end && 3528 (entry->next == &map->header || 3529 entry->next->start > entry->end))) { 3530 3531 /* 3532 * found one. amap creation actions do not need to 3533 * be undone, but the wired counts need to be restored. 3534 */ 3535 3536 while (entry != &map->header && entry->end > start) { 3537 entry->wired_count--; 3538 entry = entry->prev; 3539 } 3540 if ((lockflags & UVM_LK_EXIT) == 0) 3541 vm_map_unlock(map); 3542 UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0); 3543 return EINVAL; 3544 } 3545 entry = entry->next; 3546 } 3547 3548 /* 3549 * Pass 2. 3550 */ 3551 3552 #ifdef DIAGNOSTIC 3553 timestamp_save = map->timestamp; 3554 #endif 3555 vm_map_busy(map); 3556 vm_map_unlock(map); 3557 3558 rv = 0; 3559 entry = start_entry; 3560 while (entry != &map->header && entry->start < end) { 3561 if (entry->wired_count == 1) { 3562 rv = uvm_fault_wire(map, entry->start, entry->end, 3563 entry->max_protection, 1); 3564 if (rv) { 3565 3566 /* 3567 * wiring failed. break out of the loop. 3568 * we'll clean up the map below, once we 3569 * have a write lock again. 3570 */ 3571 3572 break; 3573 } 3574 } 3575 entry = entry->next; 3576 } 3577 3578 if (rv) { /* failed? */ 3579 3580 /* 3581 * Get back to an exclusive (write) lock. 3582 */ 3583 3584 vm_map_lock(map); 3585 vm_map_unbusy(map); 3586 3587 #ifdef DIAGNOSTIC 3588 if (timestamp_save + 1 != map->timestamp) 3589 panic("uvm_map_pageable: stale map"); 3590 #endif 3591 3592 /* 3593 * first drop the wiring count on all the entries 3594 * which haven't actually been wired yet. 3595 */ 3596 3597 failed_entry = entry; 3598 while (entry != &map->header && entry->start < end) { 3599 entry->wired_count--; 3600 entry = entry->next; 3601 } 3602 3603 /* 3604 * now, unwire all the entries that were successfully 3605 * wired above. 3606 */ 3607 3608 entry = start_entry; 3609 while (entry != failed_entry) { 3610 entry->wired_count--; 3611 if (VM_MAPENT_ISWIRED(entry) == 0) 3612 uvm_map_entry_unwire(map, entry); 3613 entry = entry->next; 3614 } 3615 if ((lockflags & UVM_LK_EXIT) == 0) 3616 vm_map_unlock(map); 3617 UVMHIST_LOG(maphist, "<- done (RV=%jd)", rv,0,0,0); 3618 return (rv); 3619 } 3620 3621 if ((lockflags & UVM_LK_EXIT) == 0) { 3622 vm_map_unbusy(map); 3623 } else { 3624 3625 /* 3626 * Get back to an exclusive (write) lock. 3627 */ 3628 3629 vm_map_lock(map); 3630 vm_map_unbusy(map); 3631 } 3632 3633 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0); 3634 return 0; 3635 } 3636 3637 /* 3638 * uvm_map_pageable_all: special case of uvm_map_pageable - affects 3639 * all mapped regions. 3640 * 3641 * => map must not be locked. 3642 * => if no flags are specified, all regions are unwired. 3643 * => XXXJRT: has some of the same problems as uvm_map_pageable() above. 3644 */ 3645 3646 int 3647 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit) 3648 { 3649 struct vm_map_entry *entry, *failed_entry; 3650 vsize_t size; 3651 int rv; 3652 #ifdef DIAGNOSTIC 3653 u_int timestamp_save; 3654 #endif 3655 UVMHIST_FUNC(__func__); 3656 UVMHIST_CALLARGS(maphist,"(map=%#jx,flags=%#jx)", (uintptr_t)map, flags, 3657 0, 0); 3658 3659 KASSERT(map->flags & VM_MAP_PAGEABLE); 3660 3661 vm_map_lock(map); 3662 3663 /* 3664 * handle wiring and unwiring separately. 3665 */ 3666 3667 if (flags == 0) { /* unwire */ 3668 3669 /* 3670 * POSIX 1003.1b -- munlockall unlocks all regions, 3671 * regardless of how many times mlockall has been called. 3672 */ 3673 3674 for (entry = map->header.next; entry != &map->header; 3675 entry = entry->next) { 3676 if (VM_MAPENT_ISWIRED(entry)) 3677 uvm_map_entry_unwire(map, entry); 3678 } 3679 map->flags &= ~VM_MAP_WIREFUTURE; 3680 vm_map_unlock(map); 3681 UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0); 3682 return 0; 3683 } 3684 3685 if (flags & MCL_FUTURE) { 3686 3687 /* 3688 * must wire all future mappings; remember this. 3689 */ 3690 3691 map->flags |= VM_MAP_WIREFUTURE; 3692 } 3693 3694 if ((flags & MCL_CURRENT) == 0) { 3695 3696 /* 3697 * no more work to do! 3698 */ 3699 3700 UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0); 3701 vm_map_unlock(map); 3702 return 0; 3703 } 3704 3705 /* 3706 * wire case: in three passes [XXXCDC: ugly block of code here] 3707 * 3708 * 1: holding the write lock, count all pages mapped by non-wired 3709 * entries. if this would cause us to go over our limit, we fail. 3710 * 3711 * 2: still holding the write lock, we create any anonymous maps that 3712 * need to be created. then we increment its wiring count. 3713 * 3714 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault 3715 * in the pages for any newly wired area (wired_count == 1). 3716 * 3717 * downgrading to a read lock for uvm_fault_wire avoids a possible 3718 * deadlock with another thread that may have faulted on one of 3719 * the pages to be wired (it would mark the page busy, blocking 3720 * us, then in turn block on the map lock that we hold). because 3721 * of problems in the recursive lock package, we cannot upgrade 3722 * to a write lock in vm_map_lookup. thus, any actions that 3723 * require the write lock must be done beforehand. because we 3724 * keep the read lock on the map, the copy-on-write status of the 3725 * entries we modify here cannot change. 3726 */ 3727 3728 for (size = 0, entry = map->header.next; entry != &map->header; 3729 entry = entry->next) { 3730 if (entry->protection != VM_PROT_NONE && 3731 VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */ 3732 size += entry->end - entry->start; 3733 } 3734 } 3735 3736 if (atop(size) + uvmexp.wired > uvmexp.wiredmax) { 3737 vm_map_unlock(map); 3738 return ENOMEM; 3739 } 3740 3741 if (limit != 0 && 3742 (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) { 3743 vm_map_unlock(map); 3744 return ENOMEM; 3745 } 3746 3747 /* 3748 * Pass 2. 3749 */ 3750 3751 for (entry = map->header.next; entry != &map->header; 3752 entry = entry->next) { 3753 if (entry->protection == VM_PROT_NONE) 3754 continue; 3755 if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */ 3756 3757 /* 3758 * perform actions of vm_map_lookup that need the 3759 * write lock on the map: create an anonymous map 3760 * for a copy-on-write region, or an anonymous map 3761 * for a zero-fill region. (XXXCDC: submap case 3762 * ok?) 3763 */ 3764 3765 if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */ 3766 if (UVM_ET_ISNEEDSCOPY(entry) && 3767 ((entry->max_protection & VM_PROT_WRITE) || 3768 (entry->object.uvm_obj == NULL))) { 3769 amap_copy(map, entry, 0, entry->start, 3770 entry->end); 3771 /* XXXCDC: wait OK? */ 3772 } 3773 } 3774 } 3775 entry->wired_count++; 3776 } 3777 3778 /* 3779 * Pass 3. 3780 */ 3781 3782 #ifdef DIAGNOSTIC 3783 timestamp_save = map->timestamp; 3784 #endif 3785 vm_map_busy(map); 3786 vm_map_unlock(map); 3787 3788 rv = 0; 3789 for (entry = map->header.next; entry != &map->header; 3790 entry = entry->next) { 3791 if (entry->wired_count == 1) { 3792 rv = uvm_fault_wire(map, entry->start, entry->end, 3793 entry->max_protection, 1); 3794 if (rv) { 3795 3796 /* 3797 * wiring failed. break out of the loop. 3798 * we'll clean up the map below, once we 3799 * have a write lock again. 3800 */ 3801 3802 break; 3803 } 3804 } 3805 } 3806 3807 if (rv) { 3808 3809 /* 3810 * Get back an exclusive (write) lock. 3811 */ 3812 3813 vm_map_lock(map); 3814 vm_map_unbusy(map); 3815 3816 #ifdef DIAGNOSTIC 3817 if (timestamp_save + 1 != map->timestamp) 3818 panic("uvm_map_pageable_all: stale map"); 3819 #endif 3820 3821 /* 3822 * first drop the wiring count on all the entries 3823 * which haven't actually been wired yet. 3824 * 3825 * Skip VM_PROT_NONE entries like we did above. 3826 */ 3827 3828 failed_entry = entry; 3829 for (/* nothing */; entry != &map->header; 3830 entry = entry->next) { 3831 if (entry->protection == VM_PROT_NONE) 3832 continue; 3833 entry->wired_count--; 3834 } 3835 3836 /* 3837 * now, unwire all the entries that were successfully 3838 * wired above. 3839 * 3840 * Skip VM_PROT_NONE entries like we did above. 3841 */ 3842 3843 for (entry = map->header.next; entry != failed_entry; 3844 entry = entry->next) { 3845 if (entry->protection == VM_PROT_NONE) 3846 continue; 3847 entry->wired_count--; 3848 if (VM_MAPENT_ISWIRED(entry)) 3849 uvm_map_entry_unwire(map, entry); 3850 } 3851 vm_map_unlock(map); 3852 UVMHIST_LOG(maphist,"<- done (RV=%jd)", rv,0,0,0); 3853 return (rv); 3854 } 3855 3856 vm_map_unbusy(map); 3857 3858 UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0); 3859 return 0; 3860 } 3861 3862 /* 3863 * uvm_map_clean: clean out a map range 3864 * 3865 * => valid flags: 3866 * if (flags & PGO_CLEANIT): dirty pages are cleaned first 3867 * if (flags & PGO_SYNCIO): dirty pages are written synchronously 3868 * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean 3869 * if (flags & PGO_FREE): any cached pages are freed after clean 3870 * => returns an error if any part of the specified range isn't mapped 3871 * => never a need to flush amap layer since the anonymous memory has 3872 * no permanent home, but may deactivate pages there 3873 * => called from sys_msync() and sys_madvise() 3874 * => caller must not write-lock map (read OK). 3875 * => we may sleep while cleaning if SYNCIO [with map read-locked] 3876 */ 3877 3878 int 3879 uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags) 3880 { 3881 struct vm_map_entry *current, *entry; 3882 struct uvm_object *uobj; 3883 struct vm_amap *amap; 3884 struct vm_anon *anon; 3885 struct vm_page *pg; 3886 vaddr_t offset; 3887 vsize_t size; 3888 voff_t uoff; 3889 int error, refs; 3890 UVMHIST_FUNC(__func__); 3891 UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,flags=%#jx)", 3892 (uintptr_t)map, start, end, flags); 3893 3894 KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) != 3895 (PGO_FREE|PGO_DEACTIVATE)); 3896 3897 vm_map_lock_read(map); 3898 VM_MAP_RANGE_CHECK(map, start, end); 3899 if (uvm_map_lookup_entry(map, start, &entry) == false) { 3900 vm_map_unlock_read(map); 3901 return EFAULT; 3902 } 3903 3904 /* 3905 * Make a first pass to check for holes and wiring problems. 3906 */ 3907 3908 for (current = entry; current->start < end; current = current->next) { 3909 if (UVM_ET_ISSUBMAP(current)) { 3910 vm_map_unlock_read(map); 3911 return EINVAL; 3912 } 3913 if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) { 3914 vm_map_unlock_read(map); 3915 return EBUSY; 3916 } 3917 if (end <= current->end) { 3918 break; 3919 } 3920 if (current->end != current->next->start) { 3921 vm_map_unlock_read(map); 3922 return EFAULT; 3923 } 3924 } 3925 3926 error = 0; 3927 for (current = entry; start < end; current = current->next) { 3928 amap = current->aref.ar_amap; /* upper layer */ 3929 uobj = current->object.uvm_obj; /* lower layer */ 3930 KASSERT(start >= current->start); 3931 3932 /* 3933 * No amap cleaning necessary if: 3934 * 3935 * (1) There's no amap. 3936 * 3937 * (2) We're not deactivating or freeing pages. 3938 */ 3939 3940 if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) 3941 goto flush_object; 3942 3943 offset = start - current->start; 3944 size = MIN(end, current->end) - start; 3945 3946 amap_lock(amap, RW_WRITER); 3947 for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) { 3948 anon = amap_lookup(¤t->aref, offset); 3949 if (anon == NULL) 3950 continue; 3951 3952 KASSERT(anon->an_lock == amap->am_lock); 3953 pg = anon->an_page; 3954 if (pg == NULL) { 3955 continue; 3956 } 3957 if (pg->flags & PG_BUSY) { 3958 continue; 3959 } 3960 3961 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) { 3962 3963 /* 3964 * In these first 3 cases, we just deactivate the page. 3965 */ 3966 3967 case PGO_CLEANIT|PGO_FREE: 3968 case PGO_CLEANIT|PGO_DEACTIVATE: 3969 case PGO_DEACTIVATE: 3970 deactivate_it: 3971 /* 3972 * skip the page if it's loaned or wired, 3973 * since it shouldn't be on a paging queue 3974 * at all in these cases. 3975 */ 3976 3977 if (pg->loan_count != 0 || 3978 pg->wire_count != 0) { 3979 continue; 3980 } 3981 KASSERT(pg->uanon == anon); 3982 uvm_pagelock(pg); 3983 uvm_pagedeactivate(pg); 3984 uvm_pageunlock(pg); 3985 continue; 3986 3987 case PGO_FREE: 3988 3989 /* 3990 * If there are multiple references to 3991 * the amap, just deactivate the page. 3992 */ 3993 3994 if (amap_refs(amap) > 1) 3995 goto deactivate_it; 3996 3997 /* skip the page if it's wired */ 3998 if (pg->wire_count != 0) { 3999 continue; 4000 } 4001 amap_unadd(¤t->aref, offset); 4002 refs = --anon->an_ref; 4003 if (refs == 0) { 4004 uvm_anfree(anon); 4005 } 4006 continue; 4007 } 4008 } 4009 amap_unlock(amap); 4010 4011 flush_object: 4012 /* 4013 * flush pages if we've got a valid backing object. 4014 * note that we must always clean object pages before 4015 * freeing them since otherwise we could reveal stale 4016 * data from files. 4017 */ 4018 4019 uoff = current->offset + (start - current->start); 4020 size = MIN(end, current->end) - start; 4021 if (uobj != NULL) { 4022 rw_enter(uobj->vmobjlock, RW_WRITER); 4023 if (uobj->pgops->pgo_put != NULL) 4024 error = (uobj->pgops->pgo_put)(uobj, uoff, 4025 uoff + size, flags | PGO_CLEANIT); 4026 else 4027 error = 0; 4028 } 4029 start += size; 4030 } 4031 vm_map_unlock_read(map); 4032 return (error); 4033 } 4034 4035 4036 /* 4037 * uvm_map_checkprot: check protection in map 4038 * 4039 * => must allow specified protection in a fully allocated region. 4040 * => map must be read or write locked by caller. 4041 */ 4042 4043 bool 4044 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end, 4045 vm_prot_t protection) 4046 { 4047 struct vm_map_entry *entry; 4048 struct vm_map_entry *tmp_entry; 4049 4050 if (!uvm_map_lookup_entry(map, start, &tmp_entry)) { 4051 return (false); 4052 } 4053 entry = tmp_entry; 4054 while (start < end) { 4055 if (entry == &map->header) { 4056 return (false); 4057 } 4058 4059 /* 4060 * no holes allowed 4061 */ 4062 4063 if (start < entry->start) { 4064 return (false); 4065 } 4066 4067 /* 4068 * check protection associated with entry 4069 */ 4070 4071 if ((entry->protection & protection) != protection) { 4072 return (false); 4073 } 4074 start = entry->end; 4075 entry = entry->next; 4076 } 4077 return (true); 4078 } 4079 4080 /* 4081 * uvmspace_alloc: allocate a vmspace structure. 4082 * 4083 * - structure includes vm_map and pmap 4084 * - XXX: no locking on this structure 4085 * - refcnt set to 1, rest must be init'd by caller 4086 */ 4087 struct vmspace * 4088 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax, bool topdown) 4089 { 4090 struct vmspace *vm; 4091 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 4092 4093 vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK); 4094 uvmspace_init(vm, NULL, vmin, vmax, topdown); 4095 UVMHIST_LOG(maphist,"<- done (vm=%#jx)", (uintptr_t)vm, 0, 0, 0); 4096 return (vm); 4097 } 4098 4099 /* 4100 * uvmspace_init: initialize a vmspace structure. 4101 * 4102 * - XXX: no locking on this structure 4103 * - refcnt set to 1, rest must be init'd by caller 4104 */ 4105 void 4106 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, 4107 vaddr_t vmax, bool topdown) 4108 { 4109 UVMHIST_FUNC(__func__); 4110 UVMHIST_CALLARGS(maphist, "(vm=%#jx, pmap=%#jx, vmin=%#jx, vmax=%#jx", 4111 (uintptr_t)vm, (uintptr_t)pmap, vmin, vmax); 4112 UVMHIST_LOG(maphist, " topdown=%ju)", topdown, 0, 0, 0); 4113 4114 memset(vm, 0, sizeof(*vm)); 4115 uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE 4116 | (topdown ? VM_MAP_TOPDOWN : 0) 4117 ); 4118 if (pmap) 4119 pmap_reference(pmap); 4120 else 4121 pmap = pmap_create(); 4122 vm->vm_map.pmap = pmap; 4123 vm->vm_refcnt = 1; 4124 UVMHIST_LOG(maphist,"<- done",0,0,0,0); 4125 } 4126 4127 /* 4128 * uvmspace_share: share a vmspace between two processes 4129 * 4130 * - used for vfork, threads(?) 4131 */ 4132 4133 void 4134 uvmspace_share(struct proc *p1, struct proc *p2) 4135 { 4136 4137 uvmspace_addref(p1->p_vmspace); 4138 p2->p_vmspace = p1->p_vmspace; 4139 } 4140 4141 #if 0 4142 4143 /* 4144 * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace 4145 * 4146 * - XXX: no locking on vmspace 4147 */ 4148 4149 void 4150 uvmspace_unshare(struct lwp *l) 4151 { 4152 struct proc *p = l->l_proc; 4153 struct vmspace *nvm, *ovm = p->p_vmspace; 4154 4155 if (ovm->vm_refcnt == 1) 4156 /* nothing to do: vmspace isn't shared in the first place */ 4157 return; 4158 4159 /* make a new vmspace, still holding old one */ 4160 nvm = uvmspace_fork(ovm); 4161 4162 kpreempt_disable(); 4163 pmap_deactivate(l); /* unbind old vmspace */ 4164 p->p_vmspace = nvm; 4165 pmap_activate(l); /* switch to new vmspace */ 4166 kpreempt_enable(); 4167 4168 uvmspace_free(ovm); /* drop reference to old vmspace */ 4169 } 4170 4171 #endif 4172 4173 4174 /* 4175 * uvmspace_spawn: a new process has been spawned and needs a vmspace 4176 */ 4177 4178 void 4179 uvmspace_spawn(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown) 4180 { 4181 struct proc *p = l->l_proc; 4182 struct vmspace *nvm; 4183 4184 #ifdef __HAVE_CPU_VMSPACE_EXEC 4185 cpu_vmspace_exec(l, start, end); 4186 #endif 4187 4188 nvm = uvmspace_alloc(start, end, topdown); 4189 kpreempt_disable(); 4190 p->p_vmspace = nvm; 4191 pmap_activate(l); 4192 kpreempt_enable(); 4193 } 4194 4195 /* 4196 * uvmspace_exec: the process wants to exec a new program 4197 */ 4198 4199 void 4200 uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown) 4201 { 4202 struct proc *p = l->l_proc; 4203 struct vmspace *nvm, *ovm = p->p_vmspace; 4204 struct vm_map *map; 4205 int flags; 4206 4207 KASSERT(ovm != NULL); 4208 #ifdef __HAVE_CPU_VMSPACE_EXEC 4209 cpu_vmspace_exec(l, start, end); 4210 #endif 4211 4212 map = &ovm->vm_map; 4213 /* 4214 * see if more than one process is using this vmspace... 4215 */ 4216 4217 if (ovm->vm_refcnt == 1 4218 && topdown == ((ovm->vm_map.flags & VM_MAP_TOPDOWN) != 0)) { 4219 4220 /* 4221 * if p is the only process using its vmspace then we can safely 4222 * recycle that vmspace for the program that is being exec'd. 4223 * But only if TOPDOWN matches the requested value for the new 4224 * vm space! 4225 */ 4226 4227 /* 4228 * SYSV SHM semantics require us to kill all segments on an exec 4229 */ 4230 if (uvm_shmexit && ovm->vm_shm) 4231 (*uvm_shmexit)(ovm); 4232 4233 /* 4234 * POSIX 1003.1b -- "lock future mappings" is revoked 4235 * when a process execs another program image. 4236 */ 4237 4238 map->flags &= ~VM_MAP_WIREFUTURE; 4239 4240 /* 4241 * now unmap the old program. 4242 * 4243 * XXX set VM_MAP_DYING for the duration, so pmap_update() 4244 * is not called until the pmap has been totally cleared out 4245 * after pmap_remove_all(), or it can confuse some pmap 4246 * implementations. it would be nice to handle this by 4247 * deferring the pmap_update() while it is known the address 4248 * space is not visible to any user LWP other than curlwp, 4249 * but there isn't an elegant way of inferring that right 4250 * now. 4251 */ 4252 4253 flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0; 4254 map->flags |= VM_MAP_DYING; 4255 uvm_unmap1(map, vm_map_min(map), vm_map_max(map), flags); 4256 map->flags &= ~VM_MAP_DYING; 4257 pmap_update(map->pmap); 4258 KASSERT(map->header.prev == &map->header); 4259 KASSERT(map->nentries == 0); 4260 4261 /* 4262 * resize the map 4263 */ 4264 4265 vm_map_setmin(map, start); 4266 vm_map_setmax(map, end); 4267 } else { 4268 4269 /* 4270 * p's vmspace is being shared, so we can't reuse it for p since 4271 * it is still being used for others. allocate a new vmspace 4272 * for p 4273 */ 4274 4275 nvm = uvmspace_alloc(start, end, topdown); 4276 4277 /* 4278 * install new vmspace and drop our ref to the old one. 4279 */ 4280 4281 kpreempt_disable(); 4282 pmap_deactivate(l); 4283 p->p_vmspace = nvm; 4284 pmap_activate(l); 4285 kpreempt_enable(); 4286 4287 uvmspace_free(ovm); 4288 } 4289 } 4290 4291 /* 4292 * uvmspace_addref: add a reference to a vmspace. 4293 */ 4294 4295 void 4296 uvmspace_addref(struct vmspace *vm) 4297 { 4298 4299 KASSERT((vm->vm_map.flags & VM_MAP_DYING) == 0); 4300 KASSERT(vm->vm_refcnt > 0); 4301 atomic_inc_uint(&vm->vm_refcnt); 4302 } 4303 4304 /* 4305 * uvmspace_free: free a vmspace data structure 4306 */ 4307 4308 void 4309 uvmspace_free(struct vmspace *vm) 4310 { 4311 struct vm_map_entry *dead_entries; 4312 struct vm_map *map = &vm->vm_map; 4313 int flags; 4314 4315 UVMHIST_FUNC(__func__); 4316 UVMHIST_CALLARGS(maphist,"(vm=%#jx) ref=%jd", (uintptr_t)vm, 4317 vm->vm_refcnt, 0, 0); 4318 4319 membar_release(); 4320 if (atomic_dec_uint_nv(&vm->vm_refcnt) > 0) 4321 return; 4322 membar_acquire(); 4323 4324 /* 4325 * at this point, there should be no other references to the map. 4326 * delete all of the mappings, then destroy the pmap. 4327 */ 4328 4329 map->flags |= VM_MAP_DYING; 4330 flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0; 4331 4332 /* Get rid of any SYSV shared memory segments. */ 4333 if (uvm_shmexit && vm->vm_shm != NULL) 4334 (*uvm_shmexit)(vm); 4335 4336 if (map->nentries) { 4337 uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map), 4338 &dead_entries, flags); 4339 if (dead_entries != NULL) 4340 uvm_unmap_detach(dead_entries, 0); 4341 } 4342 KASSERT(map->nentries == 0); 4343 KASSERT(map->size == 0); 4344 4345 mutex_destroy(&map->misc_lock); 4346 rw_destroy(&map->lock); 4347 cv_destroy(&map->cv); 4348 pmap_destroy(map->pmap); 4349 pool_cache_put(&uvm_vmspace_cache, vm); 4350 } 4351 4352 static struct vm_map_entry * 4353 uvm_mapent_clone(struct vm_map *new_map, struct vm_map_entry *old_entry, 4354 int flags) 4355 { 4356 struct vm_map_entry *new_entry; 4357 4358 new_entry = uvm_mapent_alloc(new_map, 0); 4359 /* old_entry -> new_entry */ 4360 uvm_mapent_copy(old_entry, new_entry); 4361 4362 /* new pmap has nothing wired in it */ 4363 new_entry->wired_count = 0; 4364 4365 /* 4366 * gain reference to object backing the map (can't 4367 * be a submap, already checked this case). 4368 */ 4369 4370 if (new_entry->aref.ar_amap) 4371 uvm_map_reference_amap(new_entry, flags); 4372 4373 if (new_entry->object.uvm_obj && 4374 new_entry->object.uvm_obj->pgops->pgo_reference) 4375 new_entry->object.uvm_obj->pgops->pgo_reference( 4376 new_entry->object.uvm_obj); 4377 4378 /* insert entry at end of new_map's entry list */ 4379 uvm_map_entry_link(new_map, new_map->header.prev, 4380 new_entry); 4381 4382 return new_entry; 4383 } 4384 4385 /* 4386 * share the mapping: this means we want the old and 4387 * new entries to share amaps and backing objects. 4388 */ 4389 static void 4390 uvm_mapent_forkshared(struct vm_map *new_map, struct vm_map *old_map, 4391 struct vm_map_entry *old_entry) 4392 { 4393 /* 4394 * if the old_entry needs a new amap (due to prev fork) 4395 * then we need to allocate it now so that we have 4396 * something we own to share with the new_entry. [in 4397 * other words, we need to clear needs_copy] 4398 */ 4399 4400 if (UVM_ET_ISNEEDSCOPY(old_entry)) { 4401 /* get our own amap, clears needs_copy */ 4402 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK, 4403 0, 0); 4404 /* XXXCDC: WAITOK??? */ 4405 } 4406 4407 uvm_mapent_clone(new_map, old_entry, AMAP_SHARED); 4408 } 4409 4410 4411 static void 4412 uvm_mapent_forkcopy(struct vm_map *new_map, struct vm_map *old_map, 4413 struct vm_map_entry *old_entry) 4414 { 4415 struct vm_map_entry *new_entry; 4416 4417 /* 4418 * copy-on-write the mapping (using mmap's 4419 * MAP_PRIVATE semantics) 4420 * 4421 * allocate new_entry, adjust reference counts. 4422 * (note that new references are read-only). 4423 */ 4424 4425 new_entry = uvm_mapent_clone(new_map, old_entry, 0); 4426 4427 new_entry->etype |= 4428 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY); 4429 4430 /* 4431 * the new entry will need an amap. it will either 4432 * need to be copied from the old entry or created 4433 * from scratch (if the old entry does not have an 4434 * amap). can we defer this process until later 4435 * (by setting "needs_copy") or do we need to copy 4436 * the amap now? 4437 * 4438 * we must copy the amap now if any of the following 4439 * conditions hold: 4440 * 1. the old entry has an amap and that amap is 4441 * being shared. this means that the old (parent) 4442 * process is sharing the amap with another 4443 * process. if we do not clear needs_copy here 4444 * we will end up in a situation where both the 4445 * parent and child process are referring to the 4446 * same amap with "needs_copy" set. if the 4447 * parent write-faults, the fault routine will 4448 * clear "needs_copy" in the parent by allocating 4449 * a new amap. this is wrong because the 4450 * parent is supposed to be sharing the old amap 4451 * and the new amap will break that. 4452 * 4453 * 2. if the old entry has an amap and a non-zero 4454 * wire count then we are going to have to call 4455 * amap_cow_now to avoid page faults in the 4456 * parent process. since amap_cow_now requires 4457 * "needs_copy" to be clear we might as well 4458 * clear it here as well. 4459 * 4460 */ 4461 4462 if (old_entry->aref.ar_amap != NULL) { 4463 if ((amap_flags(old_entry->aref.ar_amap) & AMAP_SHARED) != 0 || 4464 VM_MAPENT_ISWIRED(old_entry)) { 4465 4466 amap_copy(new_map, new_entry, 4467 AMAP_COPY_NOCHUNK, 0, 0); 4468 /* XXXCDC: M_WAITOK ... ok? */ 4469 } 4470 } 4471 4472 /* 4473 * if the parent's entry is wired down, then the 4474 * parent process does not want page faults on 4475 * access to that memory. this means that we 4476 * cannot do copy-on-write because we can't write 4477 * protect the old entry. in this case we 4478 * resolve all copy-on-write faults now, using 4479 * amap_cow_now. note that we have already 4480 * allocated any needed amap (above). 4481 */ 4482 4483 if (VM_MAPENT_ISWIRED(old_entry)) { 4484 4485 /* 4486 * resolve all copy-on-write faults now 4487 * (note that there is nothing to do if 4488 * the old mapping does not have an amap). 4489 */ 4490 if (old_entry->aref.ar_amap) 4491 amap_cow_now(new_map, new_entry); 4492 4493 } else { 4494 /* 4495 * setup mappings to trigger copy-on-write faults 4496 * we must write-protect the parent if it has 4497 * an amap and it is not already "needs_copy"... 4498 * if it is already "needs_copy" then the parent 4499 * has already been write-protected by a previous 4500 * fork operation. 4501 */ 4502 if (old_entry->aref.ar_amap && 4503 !UVM_ET_ISNEEDSCOPY(old_entry)) { 4504 if (old_entry->max_protection & VM_PROT_WRITE) { 4505 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */ 4506 uvm_map_lock_entry(old_entry, RW_WRITER); 4507 #else 4508 uvm_map_lock_entry(old_entry, RW_READER); 4509 #endif 4510 pmap_protect(old_map->pmap, 4511 old_entry->start, old_entry->end, 4512 old_entry->protection & ~VM_PROT_WRITE); 4513 uvm_map_unlock_entry(old_entry); 4514 } 4515 old_entry->etype |= UVM_ET_NEEDSCOPY; 4516 } 4517 } 4518 } 4519 4520 /* 4521 * zero the mapping: the new entry will be zero initialized 4522 */ 4523 static void 4524 uvm_mapent_forkzero(struct vm_map *new_map, struct vm_map *old_map, 4525 struct vm_map_entry *old_entry) 4526 { 4527 struct vm_map_entry *new_entry; 4528 4529 new_entry = uvm_mapent_clone(new_map, old_entry, 0); 4530 4531 new_entry->etype |= 4532 (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY); 4533 4534 if (new_entry->aref.ar_amap) { 4535 uvm_map_unreference_amap(new_entry, 0); 4536 new_entry->aref.ar_pageoff = 0; 4537 new_entry->aref.ar_amap = NULL; 4538 } 4539 4540 if (UVM_ET_ISOBJ(new_entry)) { 4541 if (new_entry->object.uvm_obj->pgops->pgo_detach) 4542 new_entry->object.uvm_obj->pgops->pgo_detach( 4543 new_entry->object.uvm_obj); 4544 new_entry->object.uvm_obj = NULL; 4545 new_entry->offset = 0; 4546 new_entry->etype &= ~UVM_ET_OBJ; 4547 } 4548 } 4549 4550 /* 4551 * F O R K - m a i n e n t r y p o i n t 4552 */ 4553 /* 4554 * uvmspace_fork: fork a process' main map 4555 * 4556 * => create a new vmspace for child process from parent. 4557 * => parent's map must not be locked. 4558 */ 4559 4560 struct vmspace * 4561 uvmspace_fork(struct vmspace *vm1) 4562 { 4563 struct vmspace *vm2; 4564 struct vm_map *old_map = &vm1->vm_map; 4565 struct vm_map *new_map; 4566 struct vm_map_entry *old_entry; 4567 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 4568 4569 vm_map_lock(old_map); 4570 4571 vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map), 4572 vm1->vm_map.flags & VM_MAP_TOPDOWN); 4573 memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy, 4574 (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy); 4575 new_map = &vm2->vm_map; /* XXX */ 4576 4577 old_entry = old_map->header.next; 4578 new_map->size = old_map->size; 4579 4580 /* 4581 * go entry-by-entry 4582 */ 4583 4584 while (old_entry != &old_map->header) { 4585 4586 /* 4587 * first, some sanity checks on the old entry 4588 */ 4589 4590 KASSERT(!UVM_ET_ISSUBMAP(old_entry)); 4591 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) || 4592 !UVM_ET_ISNEEDSCOPY(old_entry)); 4593 4594 switch (old_entry->inheritance) { 4595 case MAP_INHERIT_NONE: 4596 /* 4597 * drop the mapping, modify size 4598 */ 4599 new_map->size -= old_entry->end - old_entry->start; 4600 break; 4601 4602 case MAP_INHERIT_SHARE: 4603 uvm_mapent_forkshared(new_map, old_map, old_entry); 4604 break; 4605 4606 case MAP_INHERIT_COPY: 4607 uvm_mapent_forkcopy(new_map, old_map, old_entry); 4608 break; 4609 4610 case MAP_INHERIT_ZERO: 4611 uvm_mapent_forkzero(new_map, old_map, old_entry); 4612 break; 4613 default: 4614 KASSERT(0); 4615 break; 4616 } 4617 old_entry = old_entry->next; 4618 } 4619 4620 pmap_update(old_map->pmap); 4621 vm_map_unlock(old_map); 4622 4623 if (uvm_shmfork && vm1->vm_shm) 4624 (*uvm_shmfork)(vm1, vm2); 4625 4626 #ifdef PMAP_FORK 4627 pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap); 4628 #endif 4629 4630 UVMHIST_LOG(maphist,"<- done",0,0,0,0); 4631 return (vm2); 4632 } 4633 4634 4635 /* 4636 * uvm_mapent_trymerge: try to merge an entry with its neighbors. 4637 * 4638 * => called with map locked. 4639 * => return non zero if successfully merged. 4640 */ 4641 4642 int 4643 uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags) 4644 { 4645 struct uvm_object *uobj; 4646 struct vm_map_entry *next; 4647 struct vm_map_entry *prev; 4648 vsize_t size; 4649 int merged = 0; 4650 bool copying; 4651 int newetype; 4652 4653 if (entry->aref.ar_amap != NULL) { 4654 return 0; 4655 } 4656 if ((entry->flags & UVM_MAP_NOMERGE) != 0) { 4657 return 0; 4658 } 4659 4660 uobj = entry->object.uvm_obj; 4661 size = entry->end - entry->start; 4662 copying = (flags & UVM_MERGE_COPYING) != 0; 4663 newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype; 4664 4665 next = entry->next; 4666 if (next != &map->header && 4667 next->start == entry->end && 4668 ((copying && next->aref.ar_amap != NULL && 4669 amap_refs(next->aref.ar_amap) == 1) || 4670 (!copying && next->aref.ar_amap == NULL)) && 4671 UVM_ET_ISCOMPATIBLE(next, newetype, 4672 uobj, entry->flags, entry->protection, 4673 entry->max_protection, entry->inheritance, entry->advice, 4674 entry->wired_count) && 4675 (uobj == NULL || entry->offset + size == next->offset)) { 4676 int error; 4677 4678 if (copying) { 4679 error = amap_extend(next, size, 4680 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS); 4681 } else { 4682 error = 0; 4683 } 4684 if (error == 0) { 4685 if (uobj) { 4686 if (uobj->pgops->pgo_detach) { 4687 uobj->pgops->pgo_detach(uobj); 4688 } 4689 } 4690 4691 entry->end = next->end; 4692 clear_hints(map, next); 4693 uvm_map_entry_unlink(map, next); 4694 if (copying) { 4695 entry->aref = next->aref; 4696 entry->etype &= ~UVM_ET_NEEDSCOPY; 4697 } 4698 uvm_map_check(map, "trymerge forwardmerge"); 4699 uvm_mapent_free(next); 4700 merged++; 4701 } 4702 } 4703 4704 prev = entry->prev; 4705 if (prev != &map->header && 4706 prev->end == entry->start && 4707 ((copying && !merged && prev->aref.ar_amap != NULL && 4708 amap_refs(prev->aref.ar_amap) == 1) || 4709 (!copying && prev->aref.ar_amap == NULL)) && 4710 UVM_ET_ISCOMPATIBLE(prev, newetype, 4711 uobj, entry->flags, entry->protection, 4712 entry->max_protection, entry->inheritance, entry->advice, 4713 entry->wired_count) && 4714 (uobj == NULL || 4715 prev->offset + prev->end - prev->start == entry->offset)) { 4716 int error; 4717 4718 if (copying) { 4719 error = amap_extend(prev, size, 4720 AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS); 4721 } else { 4722 error = 0; 4723 } 4724 if (error == 0) { 4725 if (uobj) { 4726 if (uobj->pgops->pgo_detach) { 4727 uobj->pgops->pgo_detach(uobj); 4728 } 4729 entry->offset = prev->offset; 4730 } 4731 4732 entry->start = prev->start; 4733 clear_hints(map, prev); 4734 uvm_map_entry_unlink(map, prev); 4735 if (copying) { 4736 entry->aref = prev->aref; 4737 entry->etype &= ~UVM_ET_NEEDSCOPY; 4738 } 4739 uvm_map_check(map, "trymerge backmerge"); 4740 uvm_mapent_free(prev); 4741 merged++; 4742 } 4743 } 4744 4745 return merged; 4746 } 4747 4748 /* 4749 * uvm_map_setup: init map 4750 * 4751 * => map must not be in service yet. 4752 */ 4753 4754 void 4755 uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags) 4756 { 4757 4758 rb_tree_init(&map->rb_tree, &uvm_map_tree_ops); 4759 map->header.next = map->header.prev = &map->header; 4760 map->nentries = 0; 4761 map->size = 0; 4762 map->ref_count = 1; 4763 vm_map_setmin(map, vmin); 4764 vm_map_setmax(map, vmax); 4765 map->flags = flags; 4766 map->first_free = &map->header; 4767 map->hint = &map->header; 4768 map->timestamp = 0; 4769 map->busy = NULL; 4770 4771 rw_init(&map->lock); 4772 cv_init(&map->cv, "vm_map"); 4773 mutex_init(&map->misc_lock, MUTEX_DRIVER, IPL_NONE); 4774 } 4775 4776 /* 4777 * U N M A P - m a i n e n t r y p o i n t 4778 */ 4779 4780 /* 4781 * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop") 4782 * 4783 * => caller must check alignment and size 4784 * => map must be unlocked (we will lock it) 4785 * => flags is UVM_FLAG_QUANTUM or 0. 4786 */ 4787 4788 void 4789 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags) 4790 { 4791 struct vm_map_entry *dead_entries; 4792 UVMHIST_FUNC(__func__); 4793 UVMHIST_CALLARGS(maphist, " (map=%#jx, start=%#jx, end=%#jx)", 4794 (uintptr_t)map, start, end, 0); 4795 4796 KASSERTMSG(start < end, 4797 "%s: map %p: start %#jx < end %#jx", __func__, map, 4798 (uintmax_t)start, (uintmax_t)end); 4799 if (map == kernel_map) { 4800 LOCKDEBUG_MEM_CHECK((void *)start, end - start); 4801 } 4802 4803 /* 4804 * work now done by helper functions. wipe the pmap's and then 4805 * detach from the dead entries... 4806 */ 4807 vm_map_lock(map); 4808 uvm_unmap_remove(map, start, end, &dead_entries, flags); 4809 vm_map_unlock(map); 4810 4811 if (dead_entries != NULL) 4812 uvm_unmap_detach(dead_entries, 0); 4813 4814 UVMHIST_LOG(maphist, "<- done", 0,0,0,0); 4815 } 4816 4817 4818 /* 4819 * uvm_map_reference: add reference to a map 4820 * 4821 * => map need not be locked 4822 */ 4823 4824 void 4825 uvm_map_reference(struct vm_map *map) 4826 { 4827 4828 atomic_inc_uint(&map->ref_count); 4829 } 4830 4831 void 4832 uvm_map_lock_entry(struct vm_map_entry *entry, krw_t op) 4833 { 4834 4835 if (entry->aref.ar_amap != NULL) { 4836 amap_lock(entry->aref.ar_amap, op); 4837 } 4838 if (UVM_ET_ISOBJ(entry)) { 4839 rw_enter(entry->object.uvm_obj->vmobjlock, op); 4840 } 4841 } 4842 4843 void 4844 uvm_map_unlock_entry(struct vm_map_entry *entry) 4845 { 4846 4847 if (UVM_ET_ISOBJ(entry)) { 4848 rw_exit(entry->object.uvm_obj->vmobjlock); 4849 } 4850 if (entry->aref.ar_amap != NULL) { 4851 amap_unlock(entry->aref.ar_amap); 4852 } 4853 } 4854 4855 #define UVM_VOADDR_TYPE_MASK 0x3UL 4856 #define UVM_VOADDR_TYPE_UOBJ 0x1UL 4857 #define UVM_VOADDR_TYPE_ANON 0x2UL 4858 #define UVM_VOADDR_OBJECT_MASK ~UVM_VOADDR_TYPE_MASK 4859 4860 #define UVM_VOADDR_GET_TYPE(voa) \ 4861 ((voa)->object & UVM_VOADDR_TYPE_MASK) 4862 #define UVM_VOADDR_GET_OBJECT(voa) \ 4863 ((voa)->object & UVM_VOADDR_OBJECT_MASK) 4864 #define UVM_VOADDR_SET_OBJECT(voa, obj, type) \ 4865 do { \ 4866 KASSERT(((uintptr_t)(obj) & UVM_VOADDR_TYPE_MASK) == 0); \ 4867 (voa)->object = ((uintptr_t)(obj)) | (type); \ 4868 } while (/*CONSTCOND*/0) 4869 4870 #define UVM_VOADDR_GET_UOBJ(voa) \ 4871 ((struct uvm_object *)UVM_VOADDR_GET_OBJECT(voa)) 4872 #define UVM_VOADDR_SET_UOBJ(voa, uobj) \ 4873 UVM_VOADDR_SET_OBJECT(voa, uobj, UVM_VOADDR_TYPE_UOBJ) 4874 4875 #define UVM_VOADDR_GET_ANON(voa) \ 4876 ((struct vm_anon *)UVM_VOADDR_GET_OBJECT(voa)) 4877 #define UVM_VOADDR_SET_ANON(voa, anon) \ 4878 UVM_VOADDR_SET_OBJECT(voa, anon, UVM_VOADDR_TYPE_ANON) 4879 4880 /* 4881 * uvm_voaddr_acquire: returns the virtual object address corresponding 4882 * to the specified virtual address. 4883 * 4884 * => resolves COW so the true page identity is tracked. 4885 * 4886 * => acquires a reference on the page's owner (uvm_object or vm_anon) 4887 */ 4888 bool 4889 uvm_voaddr_acquire(struct vm_map * const map, vaddr_t const va, 4890 struct uvm_voaddr * const voaddr) 4891 { 4892 struct vm_map_entry *entry; 4893 struct vm_anon *anon = NULL; 4894 bool result = false; 4895 bool exclusive = false; 4896 void (*unlock_fn)(struct vm_map *); 4897 4898 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 4899 UVMHIST_LOG(maphist,"(map=%#jx,va=%#jx)", (uintptr_t)map, va, 0, 0); 4900 4901 const vaddr_t start = trunc_page(va); 4902 const vaddr_t end = round_page(va+1); 4903 4904 lookup_again: 4905 if (__predict_false(exclusive)) { 4906 vm_map_lock(map); 4907 unlock_fn = vm_map_unlock; 4908 } else { 4909 vm_map_lock_read(map); 4910 unlock_fn = vm_map_unlock_read; 4911 } 4912 4913 if (__predict_false(!uvm_map_lookup_entry(map, start, &entry))) { 4914 unlock_fn(map); 4915 UVMHIST_LOG(maphist,"<- done (no entry)",0,0,0,0); 4916 return false; 4917 } 4918 4919 if (__predict_false(entry->protection == VM_PROT_NONE)) { 4920 unlock_fn(map); 4921 UVMHIST_LOG(maphist,"<- done (PROT_NONE)",0,0,0,0); 4922 return false; 4923 } 4924 4925 /* 4926 * We have a fast path for the common case of "no COW resolution 4927 * needed" whereby we have taken a read lock on the map and if 4928 * we don't encounter any need to create a vm_anon then great! 4929 * But if we do, we loop around again, instead taking an exclusive 4930 * lock so that we can perform the fault. 4931 * 4932 * In the event that we have to resolve the fault, we do nearly the 4933 * same work as uvm_map_pageable() does: 4934 * 4935 * 1: holding the write lock, we create any anonymous maps that need 4936 * to be created. however, we do NOT need to clip the map entries 4937 * in this case. 4938 * 4939 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault 4940 * in the page (assuming the entry is not already wired). this 4941 * is done because we need the vm_anon to be present. 4942 */ 4943 if (__predict_true(!VM_MAPENT_ISWIRED(entry))) { 4944 4945 bool need_fault = false; 4946 4947 /* 4948 * perform the action of vm_map_lookup that need the 4949 * write lock on the map: create an anonymous map for 4950 * a copy-on-write region, or an anonymous map for 4951 * a zero-fill region. 4952 */ 4953 if (__predict_false(UVM_ET_ISSUBMAP(entry))) { 4954 unlock_fn(map); 4955 UVMHIST_LOG(maphist,"<- done (submap)",0,0,0,0); 4956 return false; 4957 } 4958 if (__predict_false(UVM_ET_ISNEEDSCOPY(entry) && 4959 ((entry->max_protection & VM_PROT_WRITE) || 4960 (entry->object.uvm_obj == NULL)))) { 4961 if (!exclusive) { 4962 /* need to take the slow path */ 4963 KASSERT(unlock_fn == vm_map_unlock_read); 4964 vm_map_unlock_read(map); 4965 exclusive = true; 4966 goto lookup_again; 4967 } 4968 need_fault = true; 4969 amap_copy(map, entry, 0, start, end); 4970 /* XXXCDC: wait OK? */ 4971 } 4972 4973 /* 4974 * do a quick check to see if the fault has already 4975 * been resolved to the upper layer. 4976 */ 4977 if (__predict_true(entry->aref.ar_amap != NULL && 4978 need_fault == false)) { 4979 amap_lock(entry->aref.ar_amap, RW_WRITER); 4980 anon = amap_lookup(&entry->aref, start - entry->start); 4981 if (__predict_true(anon != NULL)) { 4982 /* amap unlocked below */ 4983 goto found_anon; 4984 } 4985 amap_unlock(entry->aref.ar_amap); 4986 need_fault = true; 4987 } 4988 4989 /* 4990 * we predict this test as false because if we reach 4991 * this point, then we are likely dealing with a 4992 * shared memory region backed by a uvm_object, in 4993 * which case a fault to create the vm_anon is not 4994 * necessary. 4995 */ 4996 if (__predict_false(need_fault)) { 4997 if (exclusive) { 4998 vm_map_busy(map); 4999 vm_map_unlock(map); 5000 unlock_fn = vm_map_unbusy; 5001 } 5002 5003 if (uvm_fault_wire(map, start, end, 5004 entry->max_protection, 1)) { 5005 /* wiring failed */ 5006 unlock_fn(map); 5007 UVMHIST_LOG(maphist,"<- done (wire failed)", 5008 0,0,0,0); 5009 return false; 5010 } 5011 5012 /* 5013 * now that we have resolved the fault, we can unwire 5014 * the page. 5015 */ 5016 if (exclusive) { 5017 vm_map_lock(map); 5018 vm_map_unbusy(map); 5019 unlock_fn = vm_map_unlock; 5020 } 5021 5022 uvm_fault_unwire_locked(map, start, end); 5023 } 5024 } 5025 5026 /* check the upper layer */ 5027 if (entry->aref.ar_amap) { 5028 amap_lock(entry->aref.ar_amap, RW_WRITER); 5029 anon = amap_lookup(&entry->aref, start - entry->start); 5030 if (anon) { 5031 found_anon: KASSERT(anon->an_lock == entry->aref.ar_amap->am_lock); 5032 anon->an_ref++; 5033 rw_obj_hold(anon->an_lock); 5034 KASSERT(anon->an_ref != 0); 5035 UVM_VOADDR_SET_ANON(voaddr, anon); 5036 voaddr->offset = va & PAGE_MASK; 5037 result = true; 5038 } 5039 amap_unlock(entry->aref.ar_amap); 5040 } 5041 5042 /* check the lower layer */ 5043 if (!result && UVM_ET_ISOBJ(entry)) { 5044 struct uvm_object *uobj = entry->object.uvm_obj; 5045 5046 KASSERT(uobj != NULL); 5047 (*uobj->pgops->pgo_reference)(uobj); 5048 UVM_VOADDR_SET_UOBJ(voaddr, uobj); 5049 voaddr->offset = entry->offset + (va - entry->start); 5050 result = true; 5051 } 5052 5053 unlock_fn(map); 5054 5055 if (result) { 5056 UVMHIST_LOG(maphist, 5057 "<- done OK (type=%jd,owner=%#jx,offset=%#jx)", 5058 UVM_VOADDR_GET_TYPE(voaddr), 5059 UVM_VOADDR_GET_OBJECT(voaddr), 5060 voaddr->offset, 0); 5061 } else { 5062 UVMHIST_LOG(maphist,"<- done (failed)",0,0,0,0); 5063 } 5064 5065 return result; 5066 } 5067 5068 /* 5069 * uvm_voaddr_release: release the references held by the 5070 * vitual object address. 5071 */ 5072 void 5073 uvm_voaddr_release(struct uvm_voaddr * const voaddr) 5074 { 5075 5076 switch (UVM_VOADDR_GET_TYPE(voaddr)) { 5077 case UVM_VOADDR_TYPE_UOBJ: { 5078 struct uvm_object * const uobj = UVM_VOADDR_GET_UOBJ(voaddr); 5079 5080 KASSERT(uobj != NULL); 5081 KASSERT(uobj->pgops->pgo_detach != NULL); 5082 (*uobj->pgops->pgo_detach)(uobj); 5083 break; 5084 } 5085 case UVM_VOADDR_TYPE_ANON: { 5086 struct vm_anon * const anon = UVM_VOADDR_GET_ANON(voaddr); 5087 krwlock_t *lock; 5088 5089 KASSERT(anon != NULL); 5090 rw_enter((lock = anon->an_lock), RW_WRITER); 5091 KASSERT(anon->an_ref > 0); 5092 if (--anon->an_ref == 0) { 5093 uvm_anfree(anon); 5094 } 5095 rw_exit(lock); 5096 rw_obj_free(lock); 5097 break; 5098 } 5099 default: 5100 panic("uvm_voaddr_release: bad type"); 5101 } 5102 memset(voaddr, 0, sizeof(*voaddr)); 5103 } 5104 5105 /* 5106 * uvm_voaddr_compare: compare two uvm_voaddr objects. 5107 * 5108 * => memcmp() semantics 5109 */ 5110 int 5111 uvm_voaddr_compare(const struct uvm_voaddr * const voaddr1, 5112 const struct uvm_voaddr * const voaddr2) 5113 { 5114 const uintptr_t type1 = UVM_VOADDR_GET_TYPE(voaddr1); 5115 const uintptr_t type2 = UVM_VOADDR_GET_TYPE(voaddr2); 5116 5117 KASSERT(type1 == UVM_VOADDR_TYPE_UOBJ || 5118 type1 == UVM_VOADDR_TYPE_ANON); 5119 5120 KASSERT(type2 == UVM_VOADDR_TYPE_UOBJ || 5121 type2 == UVM_VOADDR_TYPE_ANON); 5122 5123 if (type1 < type2) 5124 return -1; 5125 if (type1 > type2) 5126 return 1; 5127 5128 const uintptr_t addr1 = UVM_VOADDR_GET_OBJECT(voaddr1); 5129 const uintptr_t addr2 = UVM_VOADDR_GET_OBJECT(voaddr2); 5130 5131 if (addr1 < addr2) 5132 return -1; 5133 if (addr1 > addr2) 5134 return 1; 5135 5136 if (voaddr1->offset < voaddr2->offset) 5137 return -1; 5138 if (voaddr1->offset > voaddr2->offset) 5139 return 1; 5140 5141 return 0; 5142 } 5143 5144 #if defined(DDB) || defined(DEBUGPRINT) 5145 5146 /* 5147 * uvm_map_printit: actually prints the map 5148 */ 5149 5150 void 5151 uvm_map_printit(struct vm_map *map, bool full, 5152 void (*pr)(const char *, ...)) 5153 { 5154 struct vm_map_entry *entry; 5155 5156 (*pr)("MAP %p: [%#lx->%#lx]\n", map, vm_map_min(map), 5157 vm_map_max(map)); 5158 (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=%#x\n", 5159 map->nentries, map->size, map->ref_count, map->timestamp, 5160 map->flags); 5161 (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap, 5162 pmap_resident_count(map->pmap), pmap_wired_count(map->pmap)); 5163 if (!full) 5164 return; 5165 for (entry = map->header.next; entry != &map->header; 5166 entry = entry->next) { 5167 (*pr)(" - %p: %#lx->%#lx: obj=%p/%#llx, amap=%p/%d\n", 5168 entry, entry->start, entry->end, entry->object.uvm_obj, 5169 (long long)entry->offset, entry->aref.ar_amap, 5170 entry->aref.ar_pageoff); 5171 (*pr)( 5172 "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, " 5173 "wc=%d, adv=%d%s\n", 5174 (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F', 5175 (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F', 5176 (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F', 5177 entry->protection, entry->max_protection, 5178 entry->inheritance, entry->wired_count, entry->advice, 5179 entry == map->first_free ? " (first_free)" : ""); 5180 } 5181 } 5182 5183 void 5184 uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 5185 { 5186 struct vm_map *map; 5187 5188 for (map = kernel_map;;) { 5189 struct vm_map_entry *entry; 5190 5191 if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) { 5192 break; 5193 } 5194 (*pr)("%p is %p+%zu from VMMAP %p\n", 5195 (void *)addr, (void *)entry->start, 5196 (size_t)(addr - (uintptr_t)entry->start), map); 5197 if (!UVM_ET_ISSUBMAP(entry)) { 5198 break; 5199 } 5200 map = entry->object.sub_map; 5201 } 5202 } 5203 5204 #endif /* DDB || DEBUGPRINT */ 5205 5206 #ifndef __USER_VA0_IS_SAFE 5207 static int 5208 sysctl_user_va0_disable(SYSCTLFN_ARGS) 5209 { 5210 struct sysctlnode node; 5211 int t, error; 5212 5213 node = *rnode; 5214 node.sysctl_data = &t; 5215 t = user_va0_disable; 5216 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5217 if (error || newp == NULL) 5218 return (error); 5219 5220 if (!t && user_va0_disable && 5221 kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MAP_VA_ZERO, 0, 5222 NULL, NULL, NULL)) 5223 return EPERM; 5224 5225 user_va0_disable = !!t; 5226 return 0; 5227 } 5228 #endif 5229 5230 static int 5231 fill_vmentry(struct lwp *l, struct proc *p, struct kinfo_vmentry *kve, 5232 struct vm_map *m, struct vm_map_entry *e) 5233 { 5234 #ifndef _RUMPKERNEL 5235 int error; 5236 5237 memset(kve, 0, sizeof(*kve)); 5238 KASSERT(e != NULL); 5239 if (UVM_ET_ISOBJ(e)) { 5240 struct uvm_object *uobj = e->object.uvm_obj; 5241 KASSERT(uobj != NULL); 5242 kve->kve_ref_count = uobj->uo_refs; 5243 kve->kve_count = uobj->uo_npages; 5244 if (UVM_OBJ_IS_VNODE(uobj)) { 5245 struct vattr va; 5246 struct vnode *vp = (struct vnode *)uobj; 5247 vn_lock(vp, LK_SHARED | LK_RETRY); 5248 error = VOP_GETATTR(vp, &va, l->l_cred); 5249 VOP_UNLOCK(vp); 5250 kve->kve_type = KVME_TYPE_VNODE; 5251 if (error == 0) { 5252 kve->kve_vn_size = vp->v_size; 5253 kve->kve_vn_type = (int)vp->v_type; 5254 kve->kve_vn_mode = va.va_mode; 5255 kve->kve_vn_rdev = va.va_rdev; 5256 kve->kve_vn_fileid = va.va_fileid; 5257 kve->kve_vn_fsid = va.va_fsid; 5258 error = vnode_to_path(kve->kve_path, 5259 sizeof(kve->kve_path) / 2, vp, l, p); 5260 } 5261 } else if (UVM_OBJ_IS_KERN_OBJECT(uobj)) { 5262 kve->kve_type = KVME_TYPE_KERN; 5263 } else if (UVM_OBJ_IS_DEVICE(uobj)) { 5264 kve->kve_type = KVME_TYPE_DEVICE; 5265 } else if (UVM_OBJ_IS_AOBJ(uobj)) { 5266 kve->kve_type = KVME_TYPE_ANON; 5267 } else { 5268 kve->kve_type = KVME_TYPE_OBJECT; 5269 } 5270 } else if (UVM_ET_ISSUBMAP(e)) { 5271 struct vm_map *map = e->object.sub_map; 5272 KASSERT(map != NULL); 5273 kve->kve_ref_count = map->ref_count; 5274 kve->kve_count = map->nentries; 5275 kve->kve_type = KVME_TYPE_SUBMAP; 5276 } else 5277 kve->kve_type = KVME_TYPE_UNKNOWN; 5278 5279 kve->kve_start = e->start; 5280 kve->kve_end = e->end; 5281 kve->kve_offset = e->offset; 5282 kve->kve_wired_count = e->wired_count; 5283 kve->kve_inheritance = e->inheritance; 5284 kve->kve_attributes = 0; /* unused */ 5285 kve->kve_advice = e->advice; 5286 #define PROT(p) (((p) & VM_PROT_READ) ? KVME_PROT_READ : 0) | \ 5287 (((p) & VM_PROT_WRITE) ? KVME_PROT_WRITE : 0) | \ 5288 (((p) & VM_PROT_EXECUTE) ? KVME_PROT_EXEC : 0) 5289 kve->kve_protection = PROT(e->protection); 5290 kve->kve_max_protection = PROT(e->max_protection); 5291 kve->kve_flags |= (e->etype & UVM_ET_COPYONWRITE) 5292 ? KVME_FLAG_COW : 0; 5293 kve->kve_flags |= (e->etype & UVM_ET_NEEDSCOPY) 5294 ? KVME_FLAG_NEEDS_COPY : 0; 5295 kve->kve_flags |= (m->flags & VM_MAP_TOPDOWN) 5296 ? KVME_FLAG_GROWS_DOWN : KVME_FLAG_GROWS_UP; 5297 kve->kve_flags |= (m->flags & VM_MAP_PAGEABLE) 5298 ? KVME_FLAG_PAGEABLE : 0; 5299 #endif 5300 return 0; 5301 } 5302 5303 static int 5304 fill_vmentries(struct lwp *l, pid_t pid, u_int elem_size, void *oldp, 5305 size_t *oldlenp) 5306 { 5307 int error; 5308 struct proc *p; 5309 struct kinfo_vmentry *vme; 5310 struct vmspace *vm; 5311 struct vm_map *map; 5312 struct vm_map_entry *entry; 5313 char *dp; 5314 size_t count, vmesize; 5315 5316 if (elem_size == 0 || elem_size > 2 * sizeof(*vme)) 5317 return EINVAL; 5318 5319 if (oldp) { 5320 if (*oldlenp > 10UL * 1024UL * 1024UL) 5321 return E2BIG; 5322 count = *oldlenp / elem_size; 5323 if (count == 0) 5324 return ENOMEM; 5325 vmesize = count * sizeof(*vme); 5326 } else 5327 vmesize = 0; 5328 5329 if ((error = proc_find_locked(l, &p, pid)) != 0) 5330 return error; 5331 5332 vme = NULL; 5333 count = 0; 5334 5335 if ((error = proc_vmspace_getref(p, &vm)) != 0) 5336 goto out; 5337 5338 map = &vm->vm_map; 5339 vm_map_lock_read(map); 5340 5341 dp = oldp; 5342 if (oldp) 5343 vme = kmem_alloc(vmesize, KM_SLEEP); 5344 for (entry = map->header.next; entry != &map->header; 5345 entry = entry->next) { 5346 if (oldp && (dp - (char *)oldp) < vmesize) { 5347 error = fill_vmentry(l, p, &vme[count], map, entry); 5348 if (error) 5349 goto out; 5350 dp += elem_size; 5351 } 5352 count++; 5353 } 5354 vm_map_unlock_read(map); 5355 uvmspace_free(vm); 5356 5357 out: 5358 if (pid != -1) 5359 mutex_exit(p->p_lock); 5360 if (error == 0) { 5361 const u_int esize = uimin(sizeof(*vme), elem_size); 5362 dp = oldp; 5363 for (size_t i = 0; i < count; i++) { 5364 if (oldp && (dp - (char *)oldp) < vmesize) { 5365 error = sysctl_copyout(l, &vme[i], dp, esize); 5366 if (error) 5367 break; 5368 dp += elem_size; 5369 } else 5370 break; 5371 } 5372 count *= elem_size; 5373 if (oldp != NULL && *oldlenp < count) 5374 error = ENOSPC; 5375 *oldlenp = count; 5376 } 5377 if (vme) 5378 kmem_free(vme, vmesize); 5379 return error; 5380 } 5381 5382 static int 5383 sysctl_vmproc(SYSCTLFN_ARGS) 5384 { 5385 int error; 5386 5387 if (namelen == 1 && name[0] == CTL_QUERY) 5388 return (sysctl_query(SYSCTLFN_CALL(rnode))); 5389 5390 if (namelen == 0) 5391 return EINVAL; 5392 5393 switch (name[0]) { 5394 case VM_PROC_MAP: 5395 if (namelen != 3) 5396 return EINVAL; 5397 sysctl_unlock(); 5398 error = fill_vmentries(l, name[1], name[2], oldp, oldlenp); 5399 sysctl_relock(); 5400 return error; 5401 default: 5402 return EINVAL; 5403 } 5404 } 5405 5406 SYSCTL_SETUP(sysctl_uvmmap_setup, "sysctl uvmmap setup") 5407 { 5408 5409 sysctl_createv(clog, 0, NULL, NULL, 5410 CTLFLAG_PERMANENT, 5411 CTLTYPE_STRUCT, "proc", 5412 SYSCTL_DESCR("Process vm information"), 5413 sysctl_vmproc, 0, NULL, 0, 5414 CTL_VM, VM_PROC, CTL_EOL); 5415 #ifndef __USER_VA0_IS_SAFE 5416 sysctl_createv(clog, 0, NULL, NULL, 5417 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 5418 CTLTYPE_INT, "user_va0_disable", 5419 SYSCTL_DESCR("Disable VA 0"), 5420 sysctl_user_va0_disable, 0, &user_va0_disable, 0, 5421 CTL_VM, CTL_CREATE, CTL_EOL); 5422 #endif 5423 } 5424