1 /* $NetBSD: pmap.c,v 1.62 2021/04/17 01:53:58 mrg Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center and by Chris G. Demetriou. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1992, 1993 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * the Systems Programming Group of the University of Utah Computer 39 * Science Department and Ralph Campbell. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. Neither the name of the University nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * @(#)pmap.c 8.4 (Berkeley) 1/26/94 66 */ 67 68 #include <sys/cdefs.h> 69 70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.62 2021/04/17 01:53:58 mrg Exp $"); 71 72 /* 73 * Manages physical address maps. 74 * 75 * In addition to hardware address maps, this 76 * module is called upon to provide software-use-only 77 * maps which may or may not be stored in the same 78 * form as hardware maps. These pseudo-maps are 79 * used to store intermediate results from copy 80 * operations to and from address spaces. 81 * 82 * Since the information managed by this module is 83 * also stored by the logical address mapping module, 84 * this module may throw away valid virtual-to-physical 85 * mappings at almost any time. However, invalidations 86 * of virtual-to-physical mappings must be done as 87 * requested. 88 * 89 * In order to cope with hardware architectures which 90 * make virtual-to-physical map invalidates expensive, 91 * this module may delay invalidate or reduced protection 92 * operations until such time as they are actually 93 * necessary. This module is given full information as 94 * to which processors are currently using which maps, 95 * and to when physical maps must be made correct. 96 */ 97 98 #include "opt_modular.h" 99 #include "opt_multiprocessor.h" 100 #include "opt_sysv.h" 101 102 #define __PMAP_PRIVATE 103 104 #include <sys/param.h> 105 106 #include <sys/asan.h> 107 #include <sys/atomic.h> 108 #include <sys/buf.h> 109 #include <sys/cpu.h> 110 #include <sys/mutex.h> 111 #include <sys/pool.h> 112 113 #include <uvm/uvm.h> 114 #include <uvm/uvm_physseg.h> 115 #include <uvm/pmap/pmap_pvt.h> 116 117 #if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \ 118 && !defined(PMAP_NO_PV_UNCACHED) 119 #error PMAP_VIRTUAL_CACHE_ALIASES with MULTIPROCESSOR requires \ 120 PMAP_NO_PV_UNCACHED to be defined 121 #endif 122 123 PMAP_COUNTER(remove_kernel_calls, "remove kernel calls"); 124 PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped"); 125 PMAP_COUNTER(remove_user_calls, "remove user calls"); 126 PMAP_COUNTER(remove_user_pages, "user pages unmapped"); 127 PMAP_COUNTER(remove_flushes, "remove cache flushes"); 128 PMAP_COUNTER(remove_tlb_ops, "remove tlb ops"); 129 PMAP_COUNTER(remove_pvfirst, "remove pv first"); 130 PMAP_COUNTER(remove_pvsearch, "remove pv search"); 131 132 PMAP_COUNTER(prefer_requests, "prefer requests"); 133 PMAP_COUNTER(prefer_adjustments, "prefer adjustments"); 134 135 PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed"); 136 137 PMAP_COUNTER(kenter_pa, "kernel fast mapped pages"); 138 PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)"); 139 PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages"); 140 PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages"); 141 142 PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable"); 143 PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable"); 144 145 PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)"); 146 PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)"); 147 PMAP_COUNTER(kernel_mappings, "kernel pages mapped"); 148 PMAP_COUNTER(user_mappings, "user pages mapped"); 149 PMAP_COUNTER(user_mappings_changed, "user mapping changed"); 150 PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed"); 151 PMAP_COUNTER(uncached_mappings, "uncached pages mapped"); 152 PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped"); 153 PMAP_COUNTER(pvtracked_mappings, "pv-tracked unmanaged pages mapped"); 154 PMAP_COUNTER(managed_mappings, "managed pages mapped"); 155 PMAP_COUNTER(mappings, "pages mapped"); 156 PMAP_COUNTER(remappings, "pages remapped"); 157 PMAP_COUNTER(unmappings, "pages unmapped"); 158 PMAP_COUNTER(primary_mappings, "page initial mappings"); 159 PMAP_COUNTER(primary_unmappings, "page final unmappings"); 160 PMAP_COUNTER(tlb_hit, "page mapping"); 161 162 PMAP_COUNTER(exec_mappings, "exec pages mapped"); 163 PMAP_COUNTER(exec_synced_mappings, "exec pages synced"); 164 PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)"); 165 PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)"); 166 PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)"); 167 PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)"); 168 PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)"); 169 PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)"); 170 PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)"); 171 PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)"); 172 PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)"); 173 174 PMAP_COUNTER(create, "creates"); 175 PMAP_COUNTER(reference, "references"); 176 PMAP_COUNTER(dereference, "dereferences"); 177 PMAP_COUNTER(destroy, "destroyed"); 178 PMAP_COUNTER(activate, "activations"); 179 PMAP_COUNTER(deactivate, "deactivations"); 180 PMAP_COUNTER(update, "updates"); 181 #ifdef MULTIPROCESSOR 182 PMAP_COUNTER(shootdown_ipis, "shootdown IPIs"); 183 #endif 184 PMAP_COUNTER(unwire, "unwires"); 185 PMAP_COUNTER(copy, "copies"); 186 PMAP_COUNTER(clear_modify, "clear_modifies"); 187 PMAP_COUNTER(protect, "protects"); 188 PMAP_COUNTER(page_protect, "page_protects"); 189 190 #define PMAP_ASID_RESERVED 0 191 CTASSERT(PMAP_ASID_RESERVED == 0); 192 193 #ifndef PMAP_SEGTAB_ALIGN 194 #define PMAP_SEGTAB_ALIGN /* nothing */ 195 #endif 196 #ifdef _LP64 197 pmap_segtab_t pmap_kstart_segtab PMAP_SEGTAB_ALIGN; /* first mid-level segtab for kernel */ 198 #endif 199 pmap_segtab_t pmap_kern_segtab PMAP_SEGTAB_ALIGN = { /* top level segtab for kernel */ 200 #ifdef _LP64 201 .seg_seg[(VM_MIN_KERNEL_ADDRESS & XSEGOFSET) >> SEGSHIFT] = &pmap_kstart_segtab, 202 #endif 203 }; 204 205 struct pmap_kernel kernel_pmap_store = { 206 .kernel_pmap = { 207 .pm_count = 1, 208 .pm_segtab = &pmap_kern_segtab, 209 .pm_minaddr = VM_MIN_KERNEL_ADDRESS, 210 .pm_maxaddr = VM_MAX_KERNEL_ADDRESS, 211 }, 212 }; 213 214 struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap; 215 216 /* The current top of kernel VM - gets updated by pmap_growkernel */ 217 vaddr_t pmap_curmaxkvaddr; 218 219 struct pmap_limits pmap_limits = { /* VA and PA limits */ 220 .virtual_start = VM_MIN_KERNEL_ADDRESS, 221 .virtual_end = VM_MAX_KERNEL_ADDRESS, 222 }; 223 224 #ifdef UVMHIST 225 static struct kern_history_ent pmapexechistbuf[10000]; 226 static struct kern_history_ent pmaphistbuf[10000]; 227 static struct kern_history_ent pmapsegtabhistbuf[1000]; 228 UVMHIST_DEFINE(pmapexechist) = UVMHIST_INITIALIZER(pmapexechist, pmapexechistbuf); 229 UVMHIST_DEFINE(pmaphist) = UVMHIST_INITIALIZER(pmaphist, pmaphistbuf); 230 UVMHIST_DEFINE(pmapsegtabhist) = UVMHIST_INITIALIZER(pmapsegtabhist, pmapsegtabhistbuf); 231 #endif 232 233 /* 234 * The pools from which pmap structures and sub-structures are allocated. 235 */ 236 struct pool pmap_pmap_pool; 237 struct pool pmap_pv_pool; 238 239 #ifndef PMAP_PV_LOWAT 240 #define PMAP_PV_LOWAT 16 241 #endif 242 int pmap_pv_lowat = PMAP_PV_LOWAT; 243 244 bool pmap_initialized = false; 245 #define PMAP_PAGE_COLOROK_P(a, b) \ 246 ((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0) 247 u_int pmap_page_colormask; 248 249 #define PAGE_IS_MANAGED(pa) (pmap_initialized && uvm_pageismanaged(pa)) 250 251 #define PMAP_IS_ACTIVE(pm) \ 252 ((pm) == pmap_kernel() || \ 253 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap) 254 255 /* Forward function declarations */ 256 void pmap_page_remove(struct vm_page_md *); 257 static void pmap_pvlist_check(struct vm_page_md *); 258 void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool); 259 void pmap_enter_pv(pmap_t, vaddr_t, paddr_t, struct vm_page_md *, pt_entry_t *, u_int); 260 261 /* 262 * PV table management functions. 263 */ 264 void *pmap_pv_page_alloc(struct pool *, int); 265 void pmap_pv_page_free(struct pool *, void *); 266 267 struct pool_allocator pmap_pv_page_allocator = { 268 pmap_pv_page_alloc, pmap_pv_page_free, 0, 269 }; 270 271 #define pmap_pv_alloc() pool_get(&pmap_pv_pool, PR_NOWAIT) 272 #define pmap_pv_free(pv) pool_put(&pmap_pv_pool, (pv)) 273 274 #ifndef PMAP_NEED_TLB_MISS_LOCK 275 276 #if defined(PMAP_MD_NEED_TLB_MISS_LOCK) || defined(DEBUG) 277 #define PMAP_NEED_TLB_MISS_LOCK 278 #endif /* PMAP_MD_NEED_TLB_MISS_LOCK || DEBUG */ 279 280 #endif /* PMAP_NEED_TLB_MISS_LOCK */ 281 282 #ifdef PMAP_NEED_TLB_MISS_LOCK 283 284 #ifdef PMAP_MD_NEED_TLB_MISS_LOCK 285 #define pmap_tlb_miss_lock_init() __nothing /* MD code deals with this */ 286 #define pmap_tlb_miss_lock_enter() pmap_md_tlb_miss_lock_enter() 287 #define pmap_tlb_miss_lock_exit() pmap_md_tlb_miss_lock_exit() 288 #else 289 kmutex_t pmap_tlb_miss_lock __cacheline_aligned; 290 291 static void 292 pmap_tlb_miss_lock_init(void) 293 { 294 mutex_init(&pmap_tlb_miss_lock, MUTEX_SPIN, IPL_HIGH); 295 } 296 297 static inline void 298 pmap_tlb_miss_lock_enter(void) 299 { 300 mutex_spin_enter(&pmap_tlb_miss_lock); 301 } 302 303 static inline void 304 pmap_tlb_miss_lock_exit(void) 305 { 306 mutex_spin_exit(&pmap_tlb_miss_lock); 307 } 308 #endif /* PMAP_MD_NEED_TLB_MISS_LOCK */ 309 310 #else 311 312 #define pmap_tlb_miss_lock_init() __nothing 313 #define pmap_tlb_miss_lock_enter() __nothing 314 #define pmap_tlb_miss_lock_exit() __nothing 315 316 #endif /* PMAP_NEED_TLB_MISS_LOCK */ 317 318 #ifndef MULTIPROCESSOR 319 kmutex_t pmap_pvlist_mutex __cacheline_aligned; 320 #endif 321 322 /* 323 * Debug functions. 324 */ 325 326 #ifdef DEBUG 327 static inline void 328 pmap_asid_check(pmap_t pm, const char *func) 329 { 330 if (!PMAP_IS_ACTIVE(pm)) 331 return; 332 333 struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(curcpu())); 334 tlb_asid_t asid = tlb_get_asid(); 335 if (asid != pai->pai_asid) 336 panic("%s: inconsistency for active TLB update: %u <-> %u", 337 func, asid, pai->pai_asid); 338 } 339 #endif 340 341 static void 342 pmap_addr_range_check(pmap_t pmap, vaddr_t sva, vaddr_t eva, const char *func) 343 { 344 #ifdef DEBUG 345 if (pmap == pmap_kernel()) { 346 if (sva < VM_MIN_KERNEL_ADDRESS) 347 panic("%s: kva %#"PRIxVADDR" not in range", 348 func, sva); 349 if (eva >= pmap_limits.virtual_end) 350 panic("%s: kva %#"PRIxVADDR" not in range", 351 func, eva); 352 } else { 353 if (eva > VM_MAXUSER_ADDRESS) 354 panic("%s: uva %#"PRIxVADDR" not in range", 355 func, eva); 356 pmap_asid_check(pmap, func); 357 } 358 #endif 359 } 360 361 /* 362 * Misc. functions. 363 */ 364 365 bool 366 pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes) 367 { 368 volatile unsigned long * const attrp = &mdpg->mdpg_attrs; 369 #ifdef MULTIPROCESSOR 370 for (;;) { 371 u_int old_attr = *attrp; 372 if ((old_attr & clear_attributes) == 0) 373 return false; 374 u_int new_attr = old_attr & ~clear_attributes; 375 if (old_attr == atomic_cas_ulong(attrp, old_attr, new_attr)) 376 return true; 377 } 378 #else 379 unsigned long old_attr = *attrp; 380 if ((old_attr & clear_attributes) == 0) 381 return false; 382 *attrp &= ~clear_attributes; 383 return true; 384 #endif 385 } 386 387 void 388 pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes) 389 { 390 #ifdef MULTIPROCESSOR 391 atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes); 392 #else 393 mdpg->mdpg_attrs |= set_attributes; 394 #endif 395 } 396 397 static void 398 pmap_page_syncicache(struct vm_page *pg) 399 { 400 UVMHIST_FUNC(__func__); 401 UVMHIST_CALLED(pmaphist); 402 #ifndef MULTIPROCESSOR 403 struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap; 404 #endif 405 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); 406 pv_entry_t pv = &mdpg->mdpg_first; 407 kcpuset_t *onproc; 408 #ifdef MULTIPROCESSOR 409 kcpuset_create(&onproc, true); 410 KASSERT(onproc != NULL); 411 #else 412 onproc = NULL; 413 #endif 414 VM_PAGEMD_PVLIST_READLOCK(mdpg); 415 pmap_pvlist_check(mdpg); 416 417 UVMHIST_LOG(pmaphist, "pv %#jx pv_pmap %#jx", (uintptr_t)pv, 418 (uintptr_t)pv->pv_pmap, 0, 0); 419 420 if (pv->pv_pmap != NULL) { 421 for (; pv != NULL; pv = pv->pv_next) { 422 #ifdef MULTIPROCESSOR 423 UVMHIST_LOG(pmaphist, "pv %#jx pv_pmap %#jx", 424 (uintptr_t)pv, (uintptr_t)pv->pv_pmap, 0, 0); 425 kcpuset_merge(onproc, pv->pv_pmap->pm_onproc); 426 if (kcpuset_match(onproc, kcpuset_running)) { 427 break; 428 } 429 #else 430 if (pv->pv_pmap == curpmap) { 431 onproc = curcpu()->ci_data.cpu_kcpuset; 432 break; 433 } 434 #endif 435 } 436 } 437 pmap_pvlist_check(mdpg); 438 VM_PAGEMD_PVLIST_UNLOCK(mdpg); 439 kpreempt_disable(); 440 pmap_md_page_syncicache(mdpg, onproc); 441 kpreempt_enable(); 442 #ifdef MULTIPROCESSOR 443 kcpuset_destroy(onproc); 444 #endif 445 } 446 447 /* 448 * Define the initial bounds of the kernel virtual address space. 449 */ 450 void 451 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) 452 { 453 454 *vstartp = pmap_limits.virtual_start; 455 *vendp = pmap_limits.virtual_end; 456 } 457 458 vaddr_t 459 pmap_growkernel(vaddr_t maxkvaddr) 460 { 461 UVMHIST_FUNC(__func__); 462 UVMHIST_CALLARGS(pmaphist, "maxkvaddr=%#jx (%#jx)", maxkvaddr, 463 pmap_curmaxkvaddr, 0, 0); 464 465 vaddr_t virtual_end = pmap_curmaxkvaddr; 466 maxkvaddr = pmap_round_seg(maxkvaddr) - 1; 467 468 /* 469 * Don't exceed VM_MAX_KERNEL_ADDRESS! 470 */ 471 if (maxkvaddr == 0 || maxkvaddr > VM_MAX_KERNEL_ADDRESS) 472 maxkvaddr = VM_MAX_KERNEL_ADDRESS; 473 474 /* 475 * Reserve PTEs for the new KVA space. 476 */ 477 for (; virtual_end < maxkvaddr; virtual_end += NBSEG) { 478 pmap_pte_reserve(pmap_kernel(), virtual_end, 0); 479 } 480 481 kasan_shadow_map((void *)pmap_curmaxkvaddr, 482 (size_t)(virtual_end - pmap_curmaxkvaddr)); 483 484 /* 485 * Update new end. 486 */ 487 pmap_curmaxkvaddr = virtual_end; 488 489 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 490 491 return virtual_end; 492 } 493 494 /* 495 * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()). 496 * This function allows for early dynamic memory allocation until the virtual 497 * memory system has been bootstrapped. After that point, either kmem_alloc 498 * or malloc should be used. This function works by stealing pages from the 499 * (to be) managed page pool, then implicitly mapping the pages (by using 500 * their direct mapped addresses) and zeroing them. 501 * 502 * It may be used once the physical memory segments have been pre-loaded 503 * into the vm_physmem[] array. Early memory allocation MUST use this 504 * interface! This cannot be used after vm_page_startup(), and will 505 * generate a panic if tried. 506 * 507 * Note that this memory will never be freed, and in essence it is wired 508 * down. 509 * 510 * We must adjust *vstartp and/or *vendp iff we use address space 511 * from the kernel virtual address range defined by pmap_virtual_space(). 512 */ 513 vaddr_t 514 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) 515 { 516 size_t npgs; 517 paddr_t pa; 518 vaddr_t va; 519 520 uvm_physseg_t maybe_bank = UVM_PHYSSEG_TYPE_INVALID; 521 522 size = round_page(size); 523 npgs = atop(size); 524 525 aprint_debug("%s: need %zu pages\n", __func__, npgs); 526 527 for (uvm_physseg_t bank = uvm_physseg_get_first(); 528 uvm_physseg_valid_p(bank); 529 bank = uvm_physseg_get_next(bank)) { 530 531 if (uvm.page_init_done == true) 532 panic("pmap_steal_memory: called _after_ bootstrap"); 533 534 aprint_debug("%s: seg %"PRIxPHYSSEG": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n", 535 __func__, bank, 536 uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank), 537 uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank)); 538 539 if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank) 540 || uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) { 541 aprint_debug("%s: seg %"PRIxPHYSSEG": bad start\n", __func__, bank); 542 continue; 543 } 544 545 if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) { 546 aprint_debug("%s: seg %"PRIxPHYSSEG": too small for %zu pages\n", 547 __func__, bank, npgs); 548 continue; 549 } 550 551 if (!pmap_md_ok_to_steal_p(bank, npgs)) { 552 continue; 553 } 554 555 /* 556 * Always try to allocate from the segment with the least 557 * amount of space left. 558 */ 559 #define VM_PHYSMEM_SPACE(b) ((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b))) 560 if (uvm_physseg_valid_p(maybe_bank) == false 561 || VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) { 562 maybe_bank = bank; 563 } 564 } 565 566 if (uvm_physseg_valid_p(maybe_bank)) { 567 const uvm_physseg_t bank = maybe_bank; 568 569 /* 570 * There are enough pages here; steal them! 571 */ 572 pa = ptoa(uvm_physseg_get_start(bank)); 573 uvm_physseg_unplug(atop(pa), npgs); 574 575 aprint_debug("%s: seg %"PRIxPHYSSEG": %zu pages stolen (%#"PRIxPADDR" left)\n", 576 __func__, bank, npgs, VM_PHYSMEM_SPACE(bank)); 577 578 va = pmap_md_map_poolpage(pa, size); 579 memset((void *)va, 0, size); 580 return va; 581 } 582 583 /* 584 * If we got here, there was no memory left. 585 */ 586 panic("pmap_steal_memory: no memory to steal %zu pages", npgs); 587 } 588 589 /* 590 * Bootstrap the system enough to run with virtual memory. 591 * (Common routine called by machine-dependent bootstrap code.) 592 */ 593 void 594 pmap_bootstrap_common(void) 595 { 596 pmap_tlb_miss_lock_init(); 597 } 598 599 /* 600 * Initialize the pmap module. 601 * Called by vm_init, to initialize any structures that the pmap 602 * system needs to map virtual memory. 603 */ 604 void 605 pmap_init(void) 606 { 607 UVMHIST_LINK_STATIC(pmapexechist); 608 UVMHIST_LINK_STATIC(pmaphist); 609 UVMHIST_LINK_STATIC(pmapsegtabhist); 610 611 UVMHIST_FUNC(__func__); 612 UVMHIST_CALLED(pmaphist); 613 614 /* 615 * Initialize the segtab lock. 616 */ 617 mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH); 618 619 /* 620 * Set a low water mark on the pv_entry pool, so that we are 621 * more likely to have these around even in extreme memory 622 * starvation. 623 */ 624 pool_setlowat(&pmap_pv_pool, pmap_pv_lowat); 625 626 /* 627 * Set the page colormask but allow pmap_md_init to override it. 628 */ 629 pmap_page_colormask = ptoa(uvmexp.colormask); 630 631 pmap_md_init(); 632 633 /* 634 * Now it is safe to enable pv entry recording. 635 */ 636 pmap_initialized = true; 637 } 638 639 /* 640 * Create and return a physical map. 641 * 642 * If the size specified for the map 643 * is zero, the map is an actual physical 644 * map, and may be referenced by the 645 * hardware. 646 * 647 * If the size specified is non-zero, 648 * the map will be used in software only, and 649 * is bounded by that size. 650 */ 651 pmap_t 652 pmap_create(void) 653 { 654 UVMHIST_FUNC(__func__); 655 UVMHIST_CALLED(pmaphist); 656 PMAP_COUNT(create); 657 658 pmap_t pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); 659 memset(pmap, 0, PMAP_SIZE); 660 661 KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL); 662 663 pmap->pm_count = 1; 664 pmap->pm_minaddr = VM_MIN_ADDRESS; 665 pmap->pm_maxaddr = VM_MAXUSER_ADDRESS; 666 667 pmap_segtab_init(pmap); 668 669 #ifdef MULTIPROCESSOR 670 kcpuset_create(&pmap->pm_active, true); 671 kcpuset_create(&pmap->pm_onproc, true); 672 KASSERT(pmap->pm_active != NULL); 673 KASSERT(pmap->pm_onproc != NULL); 674 #endif 675 676 UVMHIST_LOG(pmaphist, " <-- done (pmap=%#jx)", (uintptr_t)pmap, 677 0, 0, 0); 678 679 return pmap; 680 } 681 682 /* 683 * Retire the given physical map from service. 684 * Should only be called if the map contains 685 * no valid mappings. 686 */ 687 void 688 pmap_destroy(pmap_t pmap) 689 { 690 UVMHIST_FUNC(__func__); 691 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0); 692 693 if (atomic_dec_uint_nv(&pmap->pm_count) > 0) { 694 PMAP_COUNT(dereference); 695 UVMHIST_LOG(pmaphist, " <-- done (deref)", 0, 0, 0, 0); 696 return; 697 } 698 699 PMAP_COUNT(destroy); 700 KASSERT(pmap->pm_count == 0); 701 kpreempt_disable(); 702 pmap_tlb_miss_lock_enter(); 703 pmap_tlb_asid_release_all(pmap); 704 pmap_segtab_destroy(pmap, NULL, 0); 705 pmap_tlb_miss_lock_exit(); 706 707 #ifdef MULTIPROCESSOR 708 kcpuset_destroy(pmap->pm_active); 709 kcpuset_destroy(pmap->pm_onproc); 710 pmap->pm_active = NULL; 711 pmap->pm_onproc = NULL; 712 #endif 713 714 pool_put(&pmap_pmap_pool, pmap); 715 kpreempt_enable(); 716 717 UVMHIST_LOG(pmaphist, " <-- done (freed)", 0, 0, 0, 0); 718 } 719 720 /* 721 * Add a reference to the specified pmap. 722 */ 723 void 724 pmap_reference(pmap_t pmap) 725 { 726 UVMHIST_FUNC(__func__); 727 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0); 728 PMAP_COUNT(reference); 729 730 if (pmap != NULL) { 731 atomic_inc_uint(&pmap->pm_count); 732 } 733 734 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 735 } 736 737 /* 738 * Make a new pmap (vmspace) active for the given process. 739 */ 740 void 741 pmap_activate(struct lwp *l) 742 { 743 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 744 745 UVMHIST_FUNC(__func__); 746 UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l, 747 (uintptr_t)pmap, 0, 0); 748 PMAP_COUNT(activate); 749 750 kpreempt_disable(); 751 pmap_tlb_miss_lock_enter(); 752 pmap_tlb_asid_acquire(pmap, l); 753 pmap_segtab_activate(pmap, l); 754 pmap_tlb_miss_lock_exit(); 755 kpreempt_enable(); 756 757 UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid, 758 l->l_lid, 0, 0); 759 } 760 761 /* 762 * Remove this page from all physical maps in which it resides. 763 * Reflects back modify bits to the pager. 764 */ 765 void 766 pmap_page_remove(struct vm_page_md *mdpg) 767 { 768 kpreempt_disable(); 769 VM_PAGEMD_PVLIST_LOCK(mdpg); 770 pmap_pvlist_check(mdpg); 771 772 struct vm_page * const pg = 773 VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) : NULL; 774 775 UVMHIST_FUNC(__func__); 776 if (pg) { 777 UVMHIST_CALLARGS(pmaphist, "mdpg %#jx pg %#jx (pa %#jx): " 778 "execpage cleared", (uintptr_t)mdpg, (uintptr_t)pg, 779 VM_PAGE_TO_PHYS(pg), 0); 780 } else { 781 UVMHIST_CALLARGS(pmaphist, "mdpg %#jx", (uintptr_t)mdpg, 0, 782 0, 0); 783 } 784 785 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 786 pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE|VM_PAGEMD_UNCACHED); 787 #else 788 pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); 789 #endif 790 PMAP_COUNT(exec_uncached_remove); 791 792 pv_entry_t pv = &mdpg->mdpg_first; 793 if (pv->pv_pmap == NULL) { 794 VM_PAGEMD_PVLIST_UNLOCK(mdpg); 795 kpreempt_enable(); 796 UVMHIST_LOG(pmaphist, " <-- done (empty)", 0, 0, 0, 0); 797 return; 798 } 799 800 pv_entry_t npv; 801 pv_entry_t pvp = NULL; 802 803 for (; pv != NULL; pv = npv) { 804 npv = pv->pv_next; 805 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 806 if (PV_ISKENTER_P(pv)) { 807 UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %#jx" 808 " skip", (uintptr_t)pv, (uintptr_t)pv->pv_pmap, 809 pv->pv_va, 0); 810 811 KASSERT(pv->pv_pmap == pmap_kernel()); 812 813 /* Assume no more - it'll get fixed if there are */ 814 pv->pv_next = NULL; 815 816 /* 817 * pvp is non-null when we already have a PV_KENTER 818 * pv in pvh_first; otherwise we haven't seen a 819 * PV_KENTER pv and we need to copy this one to 820 * pvh_first 821 */ 822 if (pvp) { 823 /* 824 * The previous PV_KENTER pv needs to point to 825 * this PV_KENTER pv 826 */ 827 pvp->pv_next = pv; 828 } else { 829 pv_entry_t fpv = &mdpg->mdpg_first; 830 *fpv = *pv; 831 KASSERT(fpv->pv_pmap == pmap_kernel()); 832 } 833 pvp = pv; 834 continue; 835 } 836 #endif 837 const pmap_t pmap = pv->pv_pmap; 838 vaddr_t va = trunc_page(pv->pv_va); 839 pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); 840 KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va, 841 pmap_limits.virtual_end); 842 pt_entry_t pte = *ptep; 843 UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %#jx" 844 " pte %#jx", (uintptr_t)pv, (uintptr_t)pmap, va, 845 pte_value(pte)); 846 if (!pte_valid_p(pte)) 847 continue; 848 const bool is_kernel_pmap_p = (pmap == pmap_kernel()); 849 if (is_kernel_pmap_p) { 850 PMAP_COUNT(remove_kernel_pages); 851 } else { 852 PMAP_COUNT(remove_user_pages); 853 } 854 if (pte_wired_p(pte)) 855 pmap->pm_stats.wired_count--; 856 pmap->pm_stats.resident_count--; 857 858 pmap_tlb_miss_lock_enter(); 859 const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p); 860 pte_set(ptep, npte); 861 if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) { 862 /* 863 * Flush the TLB for the given address. 864 */ 865 pmap_tlb_invalidate_addr(pmap, va); 866 } 867 pmap_tlb_miss_lock_exit(); 868 869 /* 870 * non-null means this is a non-pvh_first pv, so we should 871 * free it. 872 */ 873 if (pvp) { 874 KASSERT(pvp->pv_pmap == pmap_kernel()); 875 KASSERT(pvp->pv_next == NULL); 876 pmap_pv_free(pv); 877 } else { 878 pv->pv_pmap = NULL; 879 pv->pv_next = NULL; 880 } 881 } 882 883 pmap_pvlist_check(mdpg); 884 VM_PAGEMD_PVLIST_UNLOCK(mdpg); 885 kpreempt_enable(); 886 887 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 888 } 889 890 #ifdef __HAVE_PMAP_PV_TRACK 891 /* 892 * pmap_pv_protect: change protection of an unmanaged pv-tracked page from 893 * all pmaps that map it 894 */ 895 void 896 pmap_pv_protect(paddr_t pa, vm_prot_t prot) 897 { 898 899 /* the only case is remove at the moment */ 900 KASSERT(prot == VM_PROT_NONE); 901 struct pmap_page *pp; 902 903 pp = pmap_pv_tracked(pa); 904 if (pp == NULL) 905 panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR, 906 pa); 907 908 struct vm_page_md *mdpg = PMAP_PAGE_TO_MD(pp); 909 pmap_page_remove(mdpg); 910 } 911 #endif 912 913 /* 914 * Make a previously active pmap (vmspace) inactive. 915 */ 916 void 917 pmap_deactivate(struct lwp *l) 918 { 919 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 920 921 UVMHIST_FUNC(__func__); 922 UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l, 923 (uintptr_t)pmap, 0, 0); 924 PMAP_COUNT(deactivate); 925 926 kpreempt_disable(); 927 KASSERT(l == curlwp || l->l_cpu == curlwp->l_cpu); 928 pmap_tlb_miss_lock_enter(); 929 pmap_tlb_asid_deactivate(pmap); 930 pmap_segtab_deactivate(pmap); 931 pmap_tlb_miss_lock_exit(); 932 kpreempt_enable(); 933 934 UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid, 935 l->l_lid, 0, 0); 936 } 937 938 void 939 pmap_update(struct pmap *pmap) 940 { 941 UVMHIST_FUNC(__func__); 942 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0); 943 PMAP_COUNT(update); 944 945 kpreempt_disable(); 946 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN) 947 u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0); 948 if (pending && pmap_tlb_shootdown_bystanders(pmap)) 949 PMAP_COUNT(shootdown_ipis); 950 #endif 951 pmap_tlb_miss_lock_enter(); 952 #if defined(DEBUG) && !defined(MULTIPROCESSOR) 953 pmap_tlb_check(pmap, pmap_md_tlb_check_entry); 954 #endif /* DEBUG */ 955 956 /* 957 * If pmap_remove_all was called, we deactivated ourselves and nuked 958 * our ASID. Now we have to reactivate ourselves. 959 */ 960 if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) { 961 pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE; 962 pmap_tlb_asid_acquire(pmap, curlwp); 963 pmap_segtab_activate(pmap, curlwp); 964 } 965 pmap_tlb_miss_lock_exit(); 966 kpreempt_enable(); 967 968 UVMHIST_LOG(pmaphist, " <-- done (kernel=%jd)", 969 (pmap == pmap_kernel() ? 1 : 0), 0, 0, 0); 970 } 971 972 /* 973 * Remove the given range of addresses from the specified map. 974 * 975 * It is assumed that the start and end are properly 976 * rounded to the page size. 977 */ 978 979 static bool 980 pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep, 981 uintptr_t flags) 982 { 983 const pt_entry_t npte = flags; 984 const bool is_kernel_pmap_p = (pmap == pmap_kernel()); 985 986 UVMHIST_FUNC(__func__); 987 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jd va=%#jx..%#jx)", 988 (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva); 989 UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)", 990 (uintptr_t)ptep, flags, 0, 0); 991 992 KASSERT(kpreempt_disabled()); 993 994 for (; sva < eva; sva += NBPG, ptep++) { 995 const pt_entry_t pte = *ptep; 996 if (!pte_valid_p(pte)) 997 continue; 998 if (is_kernel_pmap_p) { 999 PMAP_COUNT(remove_kernel_pages); 1000 } else { 1001 PMAP_COUNT(remove_user_pages); 1002 } 1003 if (pte_wired_p(pte)) 1004 pmap->pm_stats.wired_count--; 1005 pmap->pm_stats.resident_count--; 1006 struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte)); 1007 if (__predict_true(pg != NULL)) { 1008 pmap_remove_pv(pmap, sva, pg, pte_modified_p(pte)); 1009 } 1010 pmap_tlb_miss_lock_enter(); 1011 pte_set(ptep, npte); 1012 if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) { 1013 1014 /* 1015 * Flush the TLB for the given address. 1016 */ 1017 pmap_tlb_invalidate_addr(pmap, sva); 1018 } 1019 pmap_tlb_miss_lock_exit(); 1020 } 1021 1022 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 1023 1024 return false; 1025 } 1026 1027 void 1028 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) 1029 { 1030 const bool is_kernel_pmap_p = (pmap == pmap_kernel()); 1031 const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p); 1032 1033 UVMHIST_FUNC(__func__); 1034 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx)", 1035 (uintptr_t)pmap, sva, eva, 0); 1036 1037 if (is_kernel_pmap_p) { 1038 PMAP_COUNT(remove_kernel_calls); 1039 } else { 1040 PMAP_COUNT(remove_user_calls); 1041 } 1042 #ifdef PMAP_FAULTINFO 1043 curpcb->pcb_faultinfo.pfi_faultaddr = 0; 1044 curpcb->pcb_faultinfo.pfi_repeats = 0; 1045 curpcb->pcb_faultinfo.pfi_faultptep = NULL; 1046 #endif 1047 kpreempt_disable(); 1048 pmap_addr_range_check(pmap, sva, eva, __func__); 1049 pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte); 1050 kpreempt_enable(); 1051 1052 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 1053 } 1054 1055 /* 1056 * pmap_page_protect: 1057 * 1058 * Lower the permission for all mappings to a given page. 1059 */ 1060 void 1061 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 1062 { 1063 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); 1064 pv_entry_t pv; 1065 vaddr_t va; 1066 1067 UVMHIST_FUNC(__func__); 1068 UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx) prot=%#jx)", 1069 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), prot, 0); 1070 PMAP_COUNT(page_protect); 1071 1072 switch (prot) { 1073 case VM_PROT_READ|VM_PROT_WRITE: 1074 case VM_PROT_ALL: 1075 break; 1076 1077 /* copy_on_write */ 1078 case VM_PROT_READ: 1079 case VM_PROT_READ|VM_PROT_EXECUTE: 1080 pv = &mdpg->mdpg_first; 1081 kpreempt_disable(); 1082 VM_PAGEMD_PVLIST_READLOCK(mdpg); 1083 pmap_pvlist_check(mdpg); 1084 /* 1085 * Loop over all current mappings setting/clearing as 1086 * appropriate. 1087 */ 1088 if (pv->pv_pmap != NULL) { 1089 while (pv != NULL) { 1090 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 1091 if (PV_ISKENTER_P(pv)) { 1092 pv = pv->pv_next; 1093 continue; 1094 } 1095 #endif 1096 const pmap_t pmap = pv->pv_pmap; 1097 va = trunc_page(pv->pv_va); 1098 const uintptr_t gen = 1099 VM_PAGEMD_PVLIST_UNLOCK(mdpg); 1100 pmap_protect(pmap, va, va + PAGE_SIZE, prot); 1101 KASSERT(pv->pv_pmap == pmap); 1102 pmap_update(pmap); 1103 if (gen != VM_PAGEMD_PVLIST_READLOCK(mdpg)) { 1104 pv = &mdpg->mdpg_first; 1105 } else { 1106 pv = pv->pv_next; 1107 } 1108 pmap_pvlist_check(mdpg); 1109 } 1110 } 1111 pmap_pvlist_check(mdpg); 1112 VM_PAGEMD_PVLIST_UNLOCK(mdpg); 1113 kpreempt_enable(); 1114 break; 1115 1116 /* remove_all */ 1117 default: 1118 pmap_page_remove(mdpg); 1119 } 1120 1121 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 1122 } 1123 1124 static bool 1125 pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep, 1126 uintptr_t flags) 1127 { 1128 const vm_prot_t prot = (flags & VM_PROT_ALL); 1129 1130 UVMHIST_FUNC(__func__); 1131 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jd va=%#jx..%#jx)", 1132 (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva); 1133 UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)", 1134 (uintptr_t)ptep, flags, 0, 0); 1135 1136 KASSERT(kpreempt_disabled()); 1137 /* 1138 * Change protection on every valid mapping within this segment. 1139 */ 1140 for (; sva < eva; sva += NBPG, ptep++) { 1141 pt_entry_t pte = *ptep; 1142 if (!pte_valid_p(pte)) 1143 continue; 1144 struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte)); 1145 if (pg != NULL && pte_modified_p(pte)) { 1146 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); 1147 if (VM_PAGEMD_EXECPAGE_P(mdpg)) { 1148 KASSERT(!VM_PAGEMD_PVLIST_EMPTY_P(mdpg)); 1149 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 1150 if (VM_PAGEMD_CACHED_P(mdpg)) { 1151 #endif 1152 UVMHIST_LOG(pmapexechist, 1153 "pg %#jx (pa %#jx): " 1154 "syncicached performed", 1155 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 1156 0, 0); 1157 pmap_page_syncicache(pg); 1158 PMAP_COUNT(exec_synced_protect); 1159 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 1160 } 1161 #endif 1162 } 1163 } 1164 pte = pte_prot_downgrade(pte, prot); 1165 if (*ptep != pte) { 1166 pmap_tlb_miss_lock_enter(); 1167 pte_set(ptep, pte); 1168 /* 1169 * Update the TLB if needed. 1170 */ 1171 pmap_tlb_update_addr(pmap, sva, pte, PMAP_TLB_NEED_IPI); 1172 pmap_tlb_miss_lock_exit(); 1173 } 1174 } 1175 1176 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 1177 1178 return false; 1179 } 1180 1181 /* 1182 * Set the physical protection on the 1183 * specified range of this map as requested. 1184 */ 1185 void 1186 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 1187 { 1188 UVMHIST_FUNC(__func__); 1189 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx, prot=%ju)", 1190 (uintptr_t)pmap, sva, eva, prot); 1191 PMAP_COUNT(protect); 1192 1193 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1194 pmap_remove(pmap, sva, eva); 1195 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 1196 return; 1197 } 1198 1199 /* 1200 * Change protection on every valid mapping within this segment. 1201 */ 1202 kpreempt_disable(); 1203 pmap_addr_range_check(pmap, sva, eva, __func__); 1204 pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot); 1205 kpreempt_enable(); 1206 1207 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 1208 } 1209 1210 #if defined(PMAP_VIRTUAL_CACHE_ALIASES) && !defined(PMAP_NO_PV_UNCACHED) 1211 /* 1212 * pmap_page_cache: 1213 * 1214 * Change all mappings of a managed page to cached/uncached. 1215 */ 1216 void 1217 pmap_page_cache(struct vm_page_md *mdpg, bool cached) 1218 { 1219 #ifdef UVMHIST 1220 const bool vmpage_p = VM_PAGEMD_VMPAGE_P(mdpg); 1221 struct vm_page * const pg = vmpage_p ? VM_MD_TO_PAGE(mdpg) : NULL; 1222 #endif 1223 1224 UVMHIST_FUNC(__func__); 1225 UVMHIST_CALLARGS(pmaphist, "(mdpg=%#jx (pa %#jx) cached=%jd vmpage %jd)", 1226 (uintptr_t)mdpg, pg ? VM_PAGE_TO_PHYS(pg) : 0, cached, vmpage_p); 1227 1228 KASSERT(kpreempt_disabled()); 1229 KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg)); 1230 1231 if (cached) { 1232 pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED); 1233 PMAP_COUNT(page_cache_restorations); 1234 } else { 1235 pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED); 1236 PMAP_COUNT(page_cache_evictions); 1237 } 1238 1239 for (pv_entry_t pv = &mdpg->mdpg_first; pv != NULL; pv = pv->pv_next) { 1240 pmap_t pmap = pv->pv_pmap; 1241 vaddr_t va = trunc_page(pv->pv_va); 1242 1243 KASSERT(pmap != NULL); 1244 KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va)); 1245 pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); 1246 if (ptep == NULL) 1247 continue; 1248 pt_entry_t pte = *ptep; 1249 if (pte_valid_p(pte)) { 1250 pte = pte_cached_change(pte, cached); 1251 pmap_tlb_miss_lock_enter(); 1252 pte_set(ptep, pte); 1253 pmap_tlb_update_addr(pmap, va, pte, PMAP_TLB_NEED_IPI); 1254 pmap_tlb_miss_lock_exit(); 1255 } 1256 } 1257 1258 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 1259 } 1260 #endif /* PMAP_VIRTUAL_CACHE_ALIASES && !PMAP_NO_PV_UNCACHED */ 1261 1262 /* 1263 * Insert the given physical page (p) at 1264 * the specified virtual address (v) in the 1265 * target physical map with the protection requested. 1266 * 1267 * If specified, the page will be wired down, meaning 1268 * that the related pte can not be reclaimed. 1269 * 1270 * NB: This is the only routine which MAY NOT lazy-evaluate 1271 * or lose information. That is, this routine must actually 1272 * insert this page into the given map NOW. 1273 */ 1274 int 1275 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1276 { 1277 const bool wired = (flags & PMAP_WIRED) != 0; 1278 const bool is_kernel_pmap_p = (pmap == pmap_kernel()); 1279 u_int update_flags = (flags & VM_PROT_ALL) != 0 ? PMAP_TLB_INSERT : 0; 1280 #ifdef UVMHIST 1281 struct kern_history * const histp = 1282 ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist); 1283 #endif 1284 1285 UVMHIST_FUNC(__func__); 1286 UVMHIST_CALLARGS(*histp, "(pmap=%#jx, va=%#jx, pa=%#jx", 1287 (uintptr_t)pmap, va, pa, 0); 1288 UVMHIST_LOG(*histp, "prot=%#jx flags=%#jx)", prot, flags, 0, 0); 1289 1290 const bool good_color = PMAP_PAGE_COLOROK_P(pa, va); 1291 if (is_kernel_pmap_p) { 1292 PMAP_COUNT(kernel_mappings); 1293 if (!good_color) 1294 PMAP_COUNT(kernel_mappings_bad); 1295 } else { 1296 PMAP_COUNT(user_mappings); 1297 if (!good_color) 1298 PMAP_COUNT(user_mappings_bad); 1299 } 1300 pmap_addr_range_check(pmap, va, va, __func__); 1301 1302 KASSERTMSG(prot & VM_PROT_READ, "no READ (%#x) in prot %#x", 1303 VM_PROT_READ, prot); 1304 1305 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); 1306 struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL); 1307 1308 struct vm_page_md *mdpp = NULL; 1309 #ifdef __HAVE_PMAP_PV_TRACK 1310 struct pmap_page *pp = pmap_pv_tracked(pa); 1311 mdpp = pp ? PMAP_PAGE_TO_MD(pp) : NULL; 1312 #endif 1313 1314 if (mdpg) { 1315 /* Set page referenced/modified status based on flags */ 1316 if (flags & VM_PROT_WRITE) { 1317 pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED); 1318 } else if (flags & VM_PROT_ALL) { 1319 pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED); 1320 } 1321 1322 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 1323 if (!VM_PAGEMD_CACHED_P(mdpg)) { 1324 flags |= PMAP_NOCACHE; 1325 PMAP_COUNT(uncached_mappings); 1326 } 1327 #endif 1328 1329 PMAP_COUNT(managed_mappings); 1330 } else if (mdpp) { 1331 #ifdef __HAVE_PMAP_PV_TRACK 1332 pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED); 1333 1334 PMAP_COUNT(pvtracked_mappings); 1335 #endif 1336 } else { 1337 /* 1338 * Assumption: if it is not part of our managed memory 1339 * then it must be device memory which may be volatile. 1340 */ 1341 if ((flags & PMAP_CACHE_MASK) == 0) 1342 flags |= PMAP_NOCACHE; 1343 PMAP_COUNT(unmanaged_mappings); 1344 } 1345 1346 KASSERTMSG(mdpg == NULL || mdpp == NULL, "mdpg %p mdpp %p", mdpg, mdpp); 1347 1348 struct vm_page_md *md = (mdpg != NULL) ? mdpg : mdpp; 1349 pt_entry_t npte = pte_make_enter(pa, md, prot, flags, 1350 is_kernel_pmap_p); 1351 1352 kpreempt_disable(); 1353 1354 pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags); 1355 if (__predict_false(ptep == NULL)) { 1356 kpreempt_enable(); 1357 UVMHIST_LOG(*histp, " <-- ENOMEM", 0, 0, 0, 0); 1358 return ENOMEM; 1359 } 1360 const pt_entry_t opte = *ptep; 1361 const bool resident = pte_valid_p(opte); 1362 bool remap = false; 1363 if (resident) { 1364 if (pte_to_paddr(opte) != pa) { 1365 KASSERT(!is_kernel_pmap_p); 1366 const pt_entry_t rpte = pte_nv_entry(false); 1367 1368 pmap_addr_range_check(pmap, va, va + NBPG, __func__); 1369 pmap_pte_process(pmap, va, va + NBPG, pmap_pte_remove, 1370 rpte); 1371 PMAP_COUNT(user_mappings_changed); 1372 remap = true; 1373 } 1374 update_flags |= PMAP_TLB_NEED_IPI; 1375 } 1376 1377 if (!resident || remap) { 1378 pmap->pm_stats.resident_count++; 1379 } 1380 1381 /* Done after case that may sleep/return. */ 1382 if (md) 1383 pmap_enter_pv(pmap, va, pa, md, &npte, 0); 1384 1385 /* 1386 * Now validate mapping with desired protection/wiring. 1387 */ 1388 if (wired) { 1389 pmap->pm_stats.wired_count++; 1390 npte = pte_wire_entry(npte); 1391 } 1392 1393 UVMHIST_LOG(*histp, "new pte %#jx (pa %#jx)", 1394 pte_value(npte), pa, 0, 0); 1395 1396 KASSERT(pte_valid_p(npte)); 1397 1398 pmap_tlb_miss_lock_enter(); 1399 pte_set(ptep, npte); 1400 pmap_tlb_update_addr(pmap, va, npte, update_flags); 1401 pmap_tlb_miss_lock_exit(); 1402 kpreempt_enable(); 1403 1404 if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) { 1405 KASSERT(mdpg != NULL); 1406 PMAP_COUNT(exec_mappings); 1407 if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) { 1408 if (!pte_deferred_exec_p(npte)) { 1409 UVMHIST_LOG(*histp, "va=%#jx pg %#jx: " 1410 "immediate syncicache", 1411 va, (uintptr_t)pg, 0, 0); 1412 pmap_page_syncicache(pg); 1413 pmap_page_set_attributes(mdpg, 1414 VM_PAGEMD_EXECPAGE); 1415 PMAP_COUNT(exec_synced_mappings); 1416 } else { 1417 UVMHIST_LOG(*histp, "va=%#jx pg %#jx: defer " 1418 "syncicache: pte %#jx", 1419 va, (uintptr_t)pg, npte, 0); 1420 } 1421 } else { 1422 UVMHIST_LOG(*histp, 1423 "va=%#jx pg %#jx: no syncicache cached %jd", 1424 va, (uintptr_t)pg, pte_cached_p(npte), 0); 1425 } 1426 } else if (pg != NULL && (prot & VM_PROT_EXECUTE)) { 1427 KASSERT(mdpg != NULL); 1428 KASSERT(prot & VM_PROT_WRITE); 1429 PMAP_COUNT(exec_mappings); 1430 pmap_page_syncicache(pg); 1431 pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); 1432 UVMHIST_LOG(*histp, 1433 "va=%#jx pg %#jx: immediate syncicache (writeable)", 1434 va, (uintptr_t)pg, 0, 0); 1435 } 1436 1437 UVMHIST_LOG(*histp, " <-- 0 (OK)", 0, 0, 0, 0); 1438 return 0; 1439 } 1440 1441 void 1442 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1443 { 1444 pmap_t pmap = pmap_kernel(); 1445 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); 1446 struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL); 1447 1448 UVMHIST_FUNC(__func__); 1449 UVMHIST_CALLARGS(pmaphist, "(va=%#jx pa=%#jx prot=%ju, flags=%#jx)", 1450 va, pa, prot, flags); 1451 PMAP_COUNT(kenter_pa); 1452 1453 if (mdpg == NULL) { 1454 PMAP_COUNT(kenter_pa_unmanaged); 1455 if ((flags & PMAP_CACHE_MASK) == 0) 1456 flags |= PMAP_NOCACHE; 1457 } else { 1458 if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va)) 1459 PMAP_COUNT(kenter_pa_bad); 1460 } 1461 1462 pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags); 1463 kpreempt_disable(); 1464 pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); 1465 KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va, 1466 pmap_limits.virtual_end); 1467 KASSERT(!pte_valid_p(*ptep)); 1468 1469 /* 1470 * No need to track non-managed pages or PMAP_KMPAGEs pages for aliases 1471 */ 1472 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 1473 if (pg != NULL && (flags & PMAP_KMPAGE) == 0 1474 && pmap_md_virtual_cache_aliasing_p()) { 1475 pmap_enter_pv(pmap, va, pa, mdpg, &npte, PV_KENTER); 1476 } 1477 #endif 1478 1479 /* 1480 * We have the option to force this mapping into the TLB but we 1481 * don't. Instead let the next reference to the page do it. 1482 */ 1483 pmap_tlb_miss_lock_enter(); 1484 pte_set(ptep, npte); 1485 pmap_tlb_update_addr(pmap_kernel(), va, npte, 0); 1486 pmap_tlb_miss_lock_exit(); 1487 kpreempt_enable(); 1488 #if DEBUG > 1 1489 for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) { 1490 if (((long *)va)[i] != ((long *)pa)[i]) 1491 panic("%s: contents (%lx) of va %#"PRIxVADDR 1492 " != contents (%lx) of pa %#"PRIxPADDR, __func__, 1493 ((long *)va)[i], va, ((long *)pa)[i], pa); 1494 } 1495 #endif 1496 1497 UVMHIST_LOG(pmaphist, " <-- done (ptep=%#jx)", (uintptr_t)ptep, 0, 0, 1498 0); 1499 } 1500 1501 /* 1502 * Remove the given range of addresses from the kernel map. 1503 * 1504 * It is assumed that the start and end are properly 1505 * rounded to the page size. 1506 */ 1507 1508 static bool 1509 pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep, 1510 uintptr_t flags) 1511 { 1512 const pt_entry_t new_pte = pte_nv_entry(true); 1513 1514 UVMHIST_FUNC(__func__); 1515 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, sva=%#jx eva=%#jx ptep=%#jx)", 1516 (uintptr_t)pmap, sva, eva, (uintptr_t)ptep); 1517 1518 KASSERT(kpreempt_disabled()); 1519 1520 for (; sva < eva; sva += NBPG, ptep++) { 1521 pt_entry_t pte = *ptep; 1522 if (!pte_valid_p(pte)) 1523 continue; 1524 1525 PMAP_COUNT(kremove_pages); 1526 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 1527 struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte)); 1528 if (pg != NULL && pmap_md_virtual_cache_aliasing_p()) { 1529 pmap_remove_pv(pmap, sva, pg, !pte_readonly_p(pte)); 1530 } 1531 #endif 1532 1533 pmap_tlb_miss_lock_enter(); 1534 pte_set(ptep, new_pte); 1535 pmap_tlb_invalidate_addr(pmap, sva); 1536 pmap_tlb_miss_lock_exit(); 1537 } 1538 1539 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 1540 1541 return false; 1542 } 1543 1544 void 1545 pmap_kremove(vaddr_t va, vsize_t len) 1546 { 1547 const vaddr_t sva = trunc_page(va); 1548 const vaddr_t eva = round_page(va + len); 1549 1550 UVMHIST_FUNC(__func__); 1551 UVMHIST_CALLARGS(pmaphist, "(va=%#jx len=%#jx)", va, len, 0, 0); 1552 1553 kpreempt_disable(); 1554 pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0); 1555 kpreempt_enable(); 1556 1557 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 1558 } 1559 1560 bool 1561 pmap_remove_all(struct pmap *pmap) 1562 { 1563 UVMHIST_FUNC(__func__); 1564 UVMHIST_CALLARGS(pmaphist, "(pm=%#jx)", (uintptr_t)pmap, 0, 0, 0); 1565 1566 KASSERT(pmap != pmap_kernel()); 1567 1568 kpreempt_disable(); 1569 /* 1570 * Free all of our ASIDs which means we can skip doing all the 1571 * tlb_invalidate_addrs(). 1572 */ 1573 pmap_tlb_miss_lock_enter(); 1574 #ifdef MULTIPROCESSOR 1575 // This should be the last CPU with this pmap onproc 1576 KASSERT(!kcpuset_isotherset(pmap->pm_onproc, cpu_index(curcpu()))); 1577 if (kcpuset_isset(pmap->pm_onproc, cpu_index(curcpu()))) 1578 #endif 1579 pmap_tlb_asid_deactivate(pmap); 1580 #ifdef MULTIPROCESSOR 1581 KASSERT(kcpuset_iszero(pmap->pm_onproc)); 1582 #endif 1583 pmap_tlb_asid_release_all(pmap); 1584 pmap_tlb_miss_lock_exit(); 1585 pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE; 1586 1587 #ifdef PMAP_FAULTINFO 1588 curpcb->pcb_faultinfo.pfi_faultaddr = 0; 1589 curpcb->pcb_faultinfo.pfi_repeats = 0; 1590 curpcb->pcb_faultinfo.pfi_faultptep = NULL; 1591 #endif 1592 kpreempt_enable(); 1593 1594 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 1595 return false; 1596 } 1597 1598 /* 1599 * Routine: pmap_unwire 1600 * Function: Clear the wired attribute for a map/virtual-address 1601 * pair. 1602 * In/out conditions: 1603 * The mapping must already exist in the pmap. 1604 */ 1605 void 1606 pmap_unwire(pmap_t pmap, vaddr_t va) 1607 { 1608 UVMHIST_FUNC(__func__); 1609 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx)", (uintptr_t)pmap, va, 1610 0, 0); 1611 PMAP_COUNT(unwire); 1612 1613 /* 1614 * Don't need to flush the TLB since PG_WIRED is only in software. 1615 */ 1616 kpreempt_disable(); 1617 pmap_addr_range_check(pmap, va, va, __func__); 1618 pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); 1619 KASSERTMSG(ptep != NULL, "pmap %p va %#"PRIxVADDR" invalid STE", 1620 pmap, va); 1621 pt_entry_t pte = *ptep; 1622 KASSERTMSG(pte_valid_p(pte), 1623 "pmap %p va %#"PRIxVADDR" invalid PTE %#"PRIxPTE" @ %p", 1624 pmap, va, pte_value(pte), ptep); 1625 1626 if (pte_wired_p(pte)) { 1627 pmap_tlb_miss_lock_enter(); 1628 pte_set(ptep, pte_unwire_entry(pte)); 1629 pmap_tlb_miss_lock_exit(); 1630 pmap->pm_stats.wired_count--; 1631 } 1632 #ifdef DIAGNOSTIC 1633 else { 1634 printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n", 1635 __func__, pmap, va); 1636 } 1637 #endif 1638 kpreempt_enable(); 1639 1640 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 1641 } 1642 1643 /* 1644 * Routine: pmap_extract 1645 * Function: 1646 * Extract the physical page address associated 1647 * with the given map/virtual_address pair. 1648 */ 1649 bool 1650 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) 1651 { 1652 paddr_t pa; 1653 1654 if (pmap == pmap_kernel()) { 1655 if (pmap_md_direct_mapped_vaddr_p(va)) { 1656 pa = pmap_md_direct_mapped_vaddr_to_paddr(va); 1657 goto done; 1658 } 1659 if (pmap_md_io_vaddr_p(va)) 1660 panic("pmap_extract: io address %#"PRIxVADDR"", va); 1661 1662 if (va >= pmap_limits.virtual_end) 1663 panic("%s: illegal kernel mapped address %#"PRIxVADDR, 1664 __func__, va); 1665 } 1666 kpreempt_disable(); 1667 const pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); 1668 if (ptep == NULL || !pte_valid_p(*ptep)) { 1669 kpreempt_enable(); 1670 return false; 1671 } 1672 pa = pte_to_paddr(*ptep) | (va & PGOFSET); 1673 kpreempt_enable(); 1674 done: 1675 if (pap != NULL) { 1676 *pap = pa; 1677 } 1678 return true; 1679 } 1680 1681 /* 1682 * Copy the range specified by src_addr/len 1683 * from the source map to the range dst_addr/len 1684 * in the destination map. 1685 * 1686 * This routine is only advisory and need not do anything. 1687 */ 1688 void 1689 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len, 1690 vaddr_t src_addr) 1691 { 1692 UVMHIST_FUNC(__func__); 1693 UVMHIST_CALLED(pmaphist); 1694 PMAP_COUNT(copy); 1695 } 1696 1697 /* 1698 * pmap_clear_reference: 1699 * 1700 * Clear the reference bit on the specified physical page. 1701 */ 1702 bool 1703 pmap_clear_reference(struct vm_page *pg) 1704 { 1705 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); 1706 1707 UVMHIST_FUNC(__func__); 1708 UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx))", 1709 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0); 1710 1711 bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED); 1712 1713 UVMHIST_LOG(pmaphist, " <-- wasref %ju", rv, 0, 0, 0); 1714 1715 return rv; 1716 } 1717 1718 /* 1719 * pmap_is_referenced: 1720 * 1721 * Return whether or not the specified physical page is referenced 1722 * by any physical maps. 1723 */ 1724 bool 1725 pmap_is_referenced(struct vm_page *pg) 1726 { 1727 return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg)); 1728 } 1729 1730 /* 1731 * Clear the modify bits on the specified physical page. 1732 */ 1733 bool 1734 pmap_clear_modify(struct vm_page *pg) 1735 { 1736 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); 1737 pv_entry_t pv = &mdpg->mdpg_first; 1738 pv_entry_t pv_next; 1739 1740 UVMHIST_FUNC(__func__); 1741 UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (%#jx))", 1742 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0); 1743 PMAP_COUNT(clear_modify); 1744 1745 if (VM_PAGEMD_EXECPAGE_P(mdpg)) { 1746 if (pv->pv_pmap == NULL) { 1747 UVMHIST_LOG(pmapexechist, 1748 "pg %#jx (pa %#jx): execpage cleared", 1749 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0); 1750 pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); 1751 PMAP_COUNT(exec_uncached_clear_modify); 1752 } else { 1753 UVMHIST_LOG(pmapexechist, 1754 "pg %#jx (pa %#jx): syncicache performed", 1755 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0); 1756 pmap_page_syncicache(pg); 1757 PMAP_COUNT(exec_synced_clear_modify); 1758 } 1759 } 1760 if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) { 1761 UVMHIST_LOG(pmaphist, " <-- false", 0, 0, 0, 0); 1762 return false; 1763 } 1764 if (pv->pv_pmap == NULL) { 1765 UVMHIST_LOG(pmaphist, " <-- true (no mappings)", 0, 0, 0, 0); 1766 return true; 1767 } 1768 1769 /* 1770 * remove write access from any pages that are dirty 1771 * so we can tell if they are written to again later. 1772 * flush the VAC first if there is one. 1773 */ 1774 kpreempt_disable(); 1775 VM_PAGEMD_PVLIST_READLOCK(mdpg); 1776 pmap_pvlist_check(mdpg); 1777 for (; pv != NULL; pv = pv_next) { 1778 pmap_t pmap = pv->pv_pmap; 1779 vaddr_t va = trunc_page(pv->pv_va); 1780 1781 pv_next = pv->pv_next; 1782 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 1783 if (PV_ISKENTER_P(pv)) 1784 continue; 1785 #endif 1786 pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); 1787 KASSERT(ptep); 1788 pt_entry_t pte = pte_prot_nowrite(*ptep); 1789 if (*ptep == pte) { 1790 continue; 1791 } 1792 KASSERT(pte_valid_p(pte)); 1793 const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg); 1794 pmap_tlb_miss_lock_enter(); 1795 pte_set(ptep, pte); 1796 pmap_tlb_invalidate_addr(pmap, va); 1797 pmap_tlb_miss_lock_exit(); 1798 pmap_update(pmap); 1799 if (__predict_false(gen != VM_PAGEMD_PVLIST_READLOCK(mdpg))) { 1800 /* 1801 * The list changed! So restart from the beginning. 1802 */ 1803 pv_next = &mdpg->mdpg_first; 1804 pmap_pvlist_check(mdpg); 1805 } 1806 } 1807 pmap_pvlist_check(mdpg); 1808 VM_PAGEMD_PVLIST_UNLOCK(mdpg); 1809 kpreempt_enable(); 1810 1811 UVMHIST_LOG(pmaphist, " <-- true (mappings changed)", 0, 0, 0, 0); 1812 return true; 1813 } 1814 1815 /* 1816 * pmap_is_modified: 1817 * 1818 * Return whether or not the specified physical page is modified 1819 * by any physical maps. 1820 */ 1821 bool 1822 pmap_is_modified(struct vm_page *pg) 1823 { 1824 return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg)); 1825 } 1826 1827 /* 1828 * pmap_set_modified: 1829 * 1830 * Sets the page modified reference bit for the specified page. 1831 */ 1832 void 1833 pmap_set_modified(paddr_t pa) 1834 { 1835 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); 1836 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); 1837 pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED); 1838 } 1839 1840 /******************** pv_entry management ********************/ 1841 1842 static void 1843 pmap_pvlist_check(struct vm_page_md *mdpg) 1844 { 1845 #ifdef DEBUG 1846 pv_entry_t pv = &mdpg->mdpg_first; 1847 if (pv->pv_pmap != NULL) { 1848 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 1849 const u_int colormask = uvmexp.colormask; 1850 u_int colors = 0; 1851 #endif 1852 for (; pv != NULL; pv = pv->pv_next) { 1853 KASSERT(pv->pv_pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(pv->pv_va)); 1854 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 1855 colors |= __BIT(atop(pv->pv_va) & colormask); 1856 #endif 1857 } 1858 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 1859 // Assert that if there is more than 1 color mapped, that the 1860 // page is uncached. 1861 KASSERTMSG(!pmap_md_virtual_cache_aliasing_p() 1862 || colors == 0 || (colors & (colors-1)) == 0 1863 || VM_PAGEMD_UNCACHED_P(mdpg), "colors=%#x uncached=%u", 1864 colors, VM_PAGEMD_UNCACHED_P(mdpg)); 1865 #endif 1866 } else { 1867 KASSERT(pv->pv_next == NULL); 1868 } 1869 #endif /* DEBUG */ 1870 } 1871 1872 /* 1873 * Enter the pmap and virtual address into the 1874 * physical to virtual map table. 1875 */ 1876 void 1877 pmap_enter_pv(pmap_t pmap, vaddr_t va, paddr_t pa, struct vm_page_md *mdpg, 1878 pt_entry_t *nptep, u_int flags) 1879 { 1880 pv_entry_t pv, npv, apv; 1881 #ifdef UVMHIST 1882 bool first = false; 1883 struct vm_page *pg = VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) : 1884 NULL; 1885 #endif 1886 1887 UVMHIST_FUNC(__func__); 1888 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx va=%#jx pg=%#jx (%#jx)", 1889 (uintptr_t)pmap, va, (uintptr_t)pg, pa); 1890 UVMHIST_LOG(pmaphist, "nptep=%#jx (%#jx))", 1891 (uintptr_t)nptep, pte_value(*nptep), 0, 0); 1892 1893 KASSERT(kpreempt_disabled()); 1894 KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va)); 1895 KASSERTMSG(pmap != pmap_kernel() || !pmap_md_io_vaddr_p(va), 1896 "va %#"PRIxVADDR, va); 1897 1898 apv = NULL; 1899 VM_PAGEMD_PVLIST_LOCK(mdpg); 1900 again: 1901 pv = &mdpg->mdpg_first; 1902 pmap_pvlist_check(mdpg); 1903 if (pv->pv_pmap == NULL) { 1904 KASSERT(pv->pv_next == NULL); 1905 /* 1906 * No entries yet, use header as the first entry 1907 */ 1908 PMAP_COUNT(primary_mappings); 1909 PMAP_COUNT(mappings); 1910 #ifdef UVMHIST 1911 first = true; 1912 #endif 1913 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 1914 KASSERT(VM_PAGEMD_CACHED_P(mdpg)); 1915 // If the new mapping has an incompatible color the last 1916 // mapping of this page, clean the page before using it. 1917 if (!PMAP_PAGE_COLOROK_P(va, pv->pv_va)) { 1918 pmap_md_vca_clean(mdpg, PMAP_WBINV); 1919 } 1920 #endif 1921 pv->pv_pmap = pmap; 1922 pv->pv_va = va | flags; 1923 } else { 1924 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 1925 if (pmap_md_vca_add(mdpg, va, nptep)) { 1926 goto again; 1927 } 1928 #endif 1929 1930 /* 1931 * There is at least one other VA mapping this page. 1932 * Place this entry after the header. 1933 * 1934 * Note: the entry may already be in the table if 1935 * we are only changing the protection bits. 1936 */ 1937 1938 for (npv = pv; npv; npv = npv->pv_next) { 1939 if (pmap == npv->pv_pmap 1940 && va == trunc_page(npv->pv_va)) { 1941 #ifdef PARANOIADIAG 1942 pt_entry_t *ptep = pmap_pte_lookup(pmap, va); 1943 pt_entry_t pte = (ptep != NULL) ? *ptep : 0; 1944 if (!pte_valid_p(pte) || pte_to_paddr(pte) != pa) 1945 printf("%s: found va %#"PRIxVADDR 1946 " pa %#"PRIxPADDR 1947 " in pv_table but != %#"PRIxPTE"\n", 1948 __func__, va, pa, pte_value(pte)); 1949 #endif 1950 PMAP_COUNT(remappings); 1951 VM_PAGEMD_PVLIST_UNLOCK(mdpg); 1952 if (__predict_false(apv != NULL)) 1953 pmap_pv_free(apv); 1954 1955 UVMHIST_LOG(pmaphist, 1956 " <-- done pv=%#jx (reused)", 1957 (uintptr_t)pv, 0, 0, 0); 1958 return; 1959 } 1960 } 1961 if (__predict_true(apv == NULL)) { 1962 /* 1963 * To allocate a PV, we have to release the PVLIST lock 1964 * so get the page generation. We allocate the PV, and 1965 * then reacquire the lock. 1966 */ 1967 pmap_pvlist_check(mdpg); 1968 const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg); 1969 1970 apv = (pv_entry_t)pmap_pv_alloc(); 1971 if (apv == NULL) 1972 panic("pmap_enter_pv: pmap_pv_alloc() failed"); 1973 1974 /* 1975 * If the generation has changed, then someone else 1976 * tinkered with this page so we should start over. 1977 */ 1978 if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg)) 1979 goto again; 1980 } 1981 npv = apv; 1982 apv = NULL; 1983 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 1984 /* 1985 * If need to deal with virtual cache aliases, keep mappings 1986 * in the kernel pmap at the head of the list. This allows 1987 * the VCA code to easily use them for cache operations if 1988 * present. 1989 */ 1990 pmap_t kpmap = pmap_kernel(); 1991 if (pmap != kpmap) { 1992 while (pv->pv_pmap == kpmap && pv->pv_next != NULL) { 1993 pv = pv->pv_next; 1994 } 1995 } 1996 #endif 1997 npv->pv_va = va | flags; 1998 npv->pv_pmap = pmap; 1999 npv->pv_next = pv->pv_next; 2000 pv->pv_next = npv; 2001 PMAP_COUNT(mappings); 2002 } 2003 pmap_pvlist_check(mdpg); 2004 VM_PAGEMD_PVLIST_UNLOCK(mdpg); 2005 if (__predict_false(apv != NULL)) 2006 pmap_pv_free(apv); 2007 2008 UVMHIST_LOG(pmaphist, " <-- done pv=%#jx (first %ju)", (uintptr_t)pv, 2009 first, 0, 0); 2010 } 2011 2012 /* 2013 * Remove a physical to virtual address translation. 2014 * If cache was inhibited on this page, and there are no more cache 2015 * conflicts, restore caching. 2016 * Flush the cache if the last page is removed (should always be cached 2017 * at this point). 2018 */ 2019 void 2020 pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty) 2021 { 2022 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); 2023 pv_entry_t pv, npv; 2024 bool last; 2025 2026 UVMHIST_FUNC(__func__); 2027 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx, pg=%#jx (pa %#jx)", 2028 (uintptr_t)pmap, va, (uintptr_t)pg, VM_PAGE_TO_PHYS(pg)); 2029 UVMHIST_LOG(pmaphist, "dirty=%ju)", dirty, 0, 0, 0); 2030 2031 KASSERT(kpreempt_disabled()); 2032 KASSERT((va & PAGE_MASK) == 0); 2033 pv = &mdpg->mdpg_first; 2034 2035 VM_PAGEMD_PVLIST_LOCK(mdpg); 2036 pmap_pvlist_check(mdpg); 2037 2038 /* 2039 * If it is the first entry on the list, it is actually 2040 * in the header and we must copy the following entry up 2041 * to the header. Otherwise we must search the list for 2042 * the entry. In either case we free the now unused entry. 2043 */ 2044 2045 last = false; 2046 if (pmap == pv->pv_pmap && va == trunc_page(pv->pv_va)) { 2047 npv = pv->pv_next; 2048 if (npv) { 2049 *pv = *npv; 2050 KASSERT(pv->pv_pmap != NULL); 2051 } else { 2052 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 2053 pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED); 2054 #endif 2055 pv->pv_pmap = NULL; 2056 last = true; /* Last mapping removed */ 2057 } 2058 PMAP_COUNT(remove_pvfirst); 2059 } else { 2060 for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) { 2061 PMAP_COUNT(remove_pvsearch); 2062 if (pmap == npv->pv_pmap && va == trunc_page(npv->pv_va)) 2063 break; 2064 } 2065 if (npv) { 2066 pv->pv_next = npv->pv_next; 2067 } 2068 } 2069 2070 pmap_pvlist_check(mdpg); 2071 VM_PAGEMD_PVLIST_UNLOCK(mdpg); 2072 2073 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 2074 pmap_md_vca_remove(pg, va, dirty, last); 2075 #endif 2076 2077 /* 2078 * Free the pv_entry if needed. 2079 */ 2080 if (npv) 2081 pmap_pv_free(npv); 2082 if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) { 2083 if (last) { 2084 /* 2085 * If this was the page's last mapping, we no longer 2086 * care about its execness. 2087 */ 2088 UVMHIST_LOG(pmapexechist, 2089 "pg %#jx (pa %#jx)last %ju: execpage cleared", 2090 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0); 2091 pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); 2092 PMAP_COUNT(exec_uncached_remove); 2093 } else { 2094 /* 2095 * Someone still has it mapped as an executable page 2096 * so we must sync it. 2097 */ 2098 UVMHIST_LOG(pmapexechist, 2099 "pg %#jx (pa %#jx) last %ju: performed syncicache", 2100 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0); 2101 pmap_page_syncicache(pg); 2102 PMAP_COUNT(exec_synced_remove); 2103 } 2104 } 2105 2106 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); 2107 } 2108 2109 #if defined(MULTIPROCESSOR) 2110 struct pmap_pvlist_info { 2111 kmutex_t *pli_locks[PAGE_SIZE / 32]; 2112 volatile u_int pli_lock_refs[PAGE_SIZE / 32]; 2113 volatile u_int pli_lock_index; 2114 u_int pli_lock_mask; 2115 } pmap_pvlist_info; 2116 2117 void 2118 pmap_pvlist_lock_init(size_t cache_line_size) 2119 { 2120 struct pmap_pvlist_info * const pli = &pmap_pvlist_info; 2121 const vaddr_t lock_page = uvm_pageboot_alloc(PAGE_SIZE); 2122 vaddr_t lock_va = lock_page; 2123 if (sizeof(kmutex_t) > cache_line_size) { 2124 cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size); 2125 } 2126 const size_t nlocks = PAGE_SIZE / cache_line_size; 2127 KASSERT((nlocks & (nlocks - 1)) == 0); 2128 /* 2129 * Now divide the page into a number of mutexes, one per cacheline. 2130 */ 2131 for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) { 2132 kmutex_t * const lock = (kmutex_t *)lock_va; 2133 mutex_init(lock, MUTEX_DEFAULT, IPL_HIGH); 2134 pli->pli_locks[i] = lock; 2135 } 2136 pli->pli_lock_mask = nlocks - 1; 2137 } 2138 2139 kmutex_t * 2140 pmap_pvlist_lock_addr(struct vm_page_md *mdpg) 2141 { 2142 struct pmap_pvlist_info * const pli = &pmap_pvlist_info; 2143 kmutex_t *lock = mdpg->mdpg_lock; 2144 2145 /* 2146 * Allocate a lock on an as-needed basis. This will hopefully give us 2147 * semi-random distribution not based on page color. 2148 */ 2149 if (__predict_false(lock == NULL)) { 2150 size_t locknum = atomic_add_int_nv(&pli->pli_lock_index, 37); 2151 size_t lockid = locknum & pli->pli_lock_mask; 2152 kmutex_t * const new_lock = pli->pli_locks[lockid]; 2153 /* 2154 * Set the lock. If some other thread already did, just use 2155 * the one they assigned. 2156 */ 2157 lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock); 2158 if (lock == NULL) { 2159 lock = new_lock; 2160 atomic_inc_uint(&pli->pli_lock_refs[lockid]); 2161 } 2162 } 2163 2164 /* 2165 * Now finally provide the lock. 2166 */ 2167 return lock; 2168 } 2169 #else /* !MULTIPROCESSOR */ 2170 void 2171 pmap_pvlist_lock_init(size_t cache_line_size) 2172 { 2173 mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_HIGH); 2174 } 2175 2176 #ifdef MODULAR 2177 kmutex_t * 2178 pmap_pvlist_lock_addr(struct vm_page_md *mdpg) 2179 { 2180 /* 2181 * We just use a global lock. 2182 */ 2183 if (__predict_false(mdpg->mdpg_lock == NULL)) { 2184 mdpg->mdpg_lock = &pmap_pvlist_mutex; 2185 } 2186 2187 /* 2188 * Now finally provide the lock. 2189 */ 2190 return mdpg->mdpg_lock; 2191 } 2192 #endif /* MODULAR */ 2193 #endif /* !MULTIPROCESSOR */ 2194 2195 /* 2196 * pmap_pv_page_alloc: 2197 * 2198 * Allocate a page for the pv_entry pool. 2199 */ 2200 void * 2201 pmap_pv_page_alloc(struct pool *pp, int flags) 2202 { 2203 struct vm_page * const pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE); 2204 if (pg == NULL) 2205 return NULL; 2206 2207 return (void *)pmap_map_poolpage(VM_PAGE_TO_PHYS(pg)); 2208 } 2209 2210 /* 2211 * pmap_pv_page_free: 2212 * 2213 * Free a pv_entry pool page. 2214 */ 2215 void 2216 pmap_pv_page_free(struct pool *pp, void *v) 2217 { 2218 vaddr_t va = (vaddr_t)v; 2219 2220 KASSERT(pmap_md_direct_mapped_vaddr_p(va)); 2221 const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va); 2222 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); 2223 KASSERT(pg != NULL); 2224 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 2225 kpreempt_disable(); 2226 pmap_md_vca_remove(pg, va, true, true); 2227 kpreempt_enable(); 2228 #endif 2229 pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE); 2230 KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg))); 2231 uvm_pagefree(pg); 2232 } 2233 2234 #ifdef PMAP_PREFER 2235 /* 2236 * Find first virtual address >= *vap that doesn't cause 2237 * a cache alias conflict. 2238 */ 2239 void 2240 pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td) 2241 { 2242 vsize_t prefer_mask = ptoa(uvmexp.colormask); 2243 2244 PMAP_COUNT(prefer_requests); 2245 2246 prefer_mask |= pmap_md_cache_prefer_mask(); 2247 2248 if (prefer_mask) { 2249 vaddr_t va = *vap; 2250 vsize_t d = (foff - va) & prefer_mask; 2251 if (d) { 2252 if (td) 2253 *vap = trunc_page(va - ((-d) & prefer_mask)); 2254 else 2255 *vap = round_page(va + d); 2256 PMAP_COUNT(prefer_adjustments); 2257 } 2258 } 2259 } 2260 #endif /* PMAP_PREFER */ 2261 2262 #ifdef PMAP_MAP_POOLPAGE 2263 vaddr_t 2264 pmap_map_poolpage(paddr_t pa) 2265 { 2266 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); 2267 KASSERT(pg); 2268 2269 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); 2270 KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg)); 2271 2272 pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE); 2273 2274 return pmap_md_map_poolpage(pa, NBPG); 2275 } 2276 2277 paddr_t 2278 pmap_unmap_poolpage(vaddr_t va) 2279 { 2280 KASSERT(pmap_md_direct_mapped_vaddr_p(va)); 2281 paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va); 2282 2283 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); 2284 KASSERT(pg != NULL); 2285 KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg))); 2286 2287 pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE); 2288 pmap_md_unmap_poolpage(va, NBPG); 2289 2290 return pa; 2291 } 2292 #endif /* PMAP_MAP_POOLPAGE */ 2293