1 /* $NetBSD: pmap.c,v 1.114 2022/05/09 11:39:44 rin Exp $ */ 2 /*- 3 * Copyright (c) 2001 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 8 * 9 * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com> 10 * of Kyma Systems LLC. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 36 * Copyright (C) 1995, 1996 TooLs GmbH. 37 * All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed by TooLs GmbH. 50 * 4. The name of TooLs GmbH may not be used to endorse or promote products 51 * derived from this software without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 58 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 59 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 60 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 61 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 62 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 */ 64 65 #include <sys/cdefs.h> 66 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.114 2022/05/09 11:39:44 rin Exp $"); 67 68 #define PMAP_NOOPNAMES 69 70 #ifdef _KERNEL_OPT 71 #include "opt_altivec.h" 72 #include "opt_multiprocessor.h" 73 #include "opt_pmap.h" 74 #include "opt_ppcarch.h" 75 #endif 76 77 #include <sys/param.h> 78 #include <sys/proc.h> 79 #include <sys/pool.h> 80 #include <sys/queue.h> 81 #include <sys/device.h> /* for evcnt */ 82 #include <sys/systm.h> 83 #include <sys/atomic.h> 84 85 #include <uvm/uvm.h> 86 #include <uvm/uvm_physseg.h> 87 88 #include <machine/powerpc.h> 89 #include <powerpc/bat.h> 90 #include <powerpc/pcb.h> 91 #include <powerpc/psl.h> 92 #include <powerpc/spr.h> 93 #include <powerpc/oea/spr.h> 94 #include <powerpc/oea/sr_601.h> 95 96 #ifdef ALTIVEC 97 extern int pmap_use_altivec; 98 #endif 99 100 #ifdef PMAP_MEMLIMIT 101 static paddr_t pmap_memlimit = PMAP_MEMLIMIT; 102 #else 103 static paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */ 104 #endif 105 106 extern struct pmap kernel_pmap_; 107 static unsigned int pmap_pages_stolen; 108 static u_long pmap_pte_valid; 109 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 110 static u_long pmap_pvo_enter_depth; 111 static u_long pmap_pvo_remove_depth; 112 #endif 113 114 #ifndef MSGBUFADDR 115 extern paddr_t msgbuf_paddr; 116 #endif 117 118 static struct mem_region *mem, *avail; 119 static u_int mem_cnt, avail_cnt; 120 121 #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE) 122 # define PMAP_OEA 1 123 #endif 124 125 #if defined(PMAP_OEA) 126 #define _PRIxpte "lx" 127 #else 128 #define _PRIxpte PRIx64 129 #endif 130 #define _PRIxpa "lx" 131 #define _PRIxva "lx" 132 #define _PRIsr "lx" 133 134 #ifdef PMAP_NEEDS_FIXUP 135 #if defined(PMAP_OEA) 136 #define PMAPNAME(name) pmap32_##name 137 #elif defined(PMAP_OEA64) 138 #define PMAPNAME(name) pmap64_##name 139 #elif defined(PMAP_OEA64_BRIDGE) 140 #define PMAPNAME(name) pmap64bridge_##name 141 #else 142 #error unknown variant for pmap 143 #endif 144 #endif /* PMAP_NEEDS_FIXUP */ 145 146 #ifdef PMAPNAME 147 #define STATIC static 148 #define pmap_pte_spill PMAPNAME(pte_spill) 149 #define pmap_real_memory PMAPNAME(real_memory) 150 #define pmap_init PMAPNAME(init) 151 #define pmap_virtual_space PMAPNAME(virtual_space) 152 #define pmap_create PMAPNAME(create) 153 #define pmap_reference PMAPNAME(reference) 154 #define pmap_destroy PMAPNAME(destroy) 155 #define pmap_copy PMAPNAME(copy) 156 #define pmap_update PMAPNAME(update) 157 #define pmap_enter PMAPNAME(enter) 158 #define pmap_remove PMAPNAME(remove) 159 #define pmap_kenter_pa PMAPNAME(kenter_pa) 160 #define pmap_kremove PMAPNAME(kremove) 161 #define pmap_extract PMAPNAME(extract) 162 #define pmap_protect PMAPNAME(protect) 163 #define pmap_unwire PMAPNAME(unwire) 164 #define pmap_page_protect PMAPNAME(page_protect) 165 #define pmap_pv_protect PMAPNAME(pv_protect) 166 #define pmap_query_bit PMAPNAME(query_bit) 167 #define pmap_clear_bit PMAPNAME(clear_bit) 168 169 #define pmap_activate PMAPNAME(activate) 170 #define pmap_deactivate PMAPNAME(deactivate) 171 172 #define pmap_pinit PMAPNAME(pinit) 173 #define pmap_procwr PMAPNAME(procwr) 174 175 #define pmap_pool PMAPNAME(pool) 176 #define pmap_pvo_pool PMAPNAME(pvo_pool) 177 #define pmap_pvo_table PMAPNAME(pvo_table) 178 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 179 #define pmap_pte_print PMAPNAME(pte_print) 180 #define pmap_pteg_check PMAPNAME(pteg_check) 181 #define pmap_print_mmruregs PMAPNAME(print_mmuregs) 182 #define pmap_print_pte PMAPNAME(print_pte) 183 #define pmap_pteg_dist PMAPNAME(pteg_dist) 184 #endif 185 #if defined(DEBUG) || defined(PMAPCHECK) 186 #define pmap_pvo_verify PMAPNAME(pvo_verify) 187 #define pmapcheck PMAPNAME(check) 188 #endif 189 #if defined(DEBUG) || defined(PMAPDEBUG) 190 #define pmapdebug PMAPNAME(debug) 191 #endif 192 #define pmap_steal_memory PMAPNAME(steal_memory) 193 #define pmap_bootstrap PMAPNAME(bootstrap) 194 #define pmap_bootstrap1 PMAPNAME(bootstrap1) 195 #define pmap_bootstrap2 PMAPNAME(bootstrap2) 196 #else 197 #define STATIC /* nothing */ 198 #endif /* PMAPNAME */ 199 200 STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool); 201 STATIC void pmap_real_memory(paddr_t *, psize_t *); 202 STATIC void pmap_init(void); 203 STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *); 204 STATIC pmap_t pmap_create(void); 205 STATIC void pmap_reference(pmap_t); 206 STATIC void pmap_destroy(pmap_t); 207 STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t); 208 STATIC void pmap_update(pmap_t); 209 STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int); 210 STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t); 211 STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int); 212 STATIC void pmap_kremove(vaddr_t, vsize_t); 213 STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *); 214 215 STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t); 216 STATIC void pmap_unwire(pmap_t, vaddr_t); 217 STATIC void pmap_page_protect(struct vm_page *, vm_prot_t); 218 STATIC void pmap_pv_protect(paddr_t, vm_prot_t); 219 STATIC bool pmap_query_bit(struct vm_page *, int); 220 STATIC bool pmap_clear_bit(struct vm_page *, int); 221 222 STATIC void pmap_activate(struct lwp *); 223 STATIC void pmap_deactivate(struct lwp *); 224 225 STATIC void pmap_pinit(pmap_t pm); 226 STATIC void pmap_procwr(struct proc *, vaddr_t, size_t); 227 228 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 229 STATIC void pmap_pte_print(volatile struct pte *); 230 STATIC void pmap_pteg_check(void); 231 STATIC void pmap_print_mmuregs(void); 232 STATIC void pmap_print_pte(pmap_t, vaddr_t); 233 STATIC void pmap_pteg_dist(void); 234 #endif 235 #if defined(DEBUG) || defined(PMAPCHECK) 236 STATIC void pmap_pvo_verify(void); 237 #endif 238 STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *); 239 STATIC void pmap_bootstrap(paddr_t, paddr_t); 240 STATIC void pmap_bootstrap1(paddr_t, paddr_t); 241 STATIC void pmap_bootstrap2(void); 242 243 #ifdef PMAPNAME 244 const struct pmap_ops PMAPNAME(ops) = { 245 .pmapop_pte_spill = pmap_pte_spill, 246 .pmapop_real_memory = pmap_real_memory, 247 .pmapop_init = pmap_init, 248 .pmapop_virtual_space = pmap_virtual_space, 249 .pmapop_create = pmap_create, 250 .pmapop_reference = pmap_reference, 251 .pmapop_destroy = pmap_destroy, 252 .pmapop_copy = pmap_copy, 253 .pmapop_update = pmap_update, 254 .pmapop_enter = pmap_enter, 255 .pmapop_remove = pmap_remove, 256 .pmapop_kenter_pa = pmap_kenter_pa, 257 .pmapop_kremove = pmap_kremove, 258 .pmapop_extract = pmap_extract, 259 .pmapop_protect = pmap_protect, 260 .pmapop_unwire = pmap_unwire, 261 .pmapop_page_protect = pmap_page_protect, 262 .pmapop_pv_protect = pmap_pv_protect, 263 .pmapop_query_bit = pmap_query_bit, 264 .pmapop_clear_bit = pmap_clear_bit, 265 .pmapop_activate = pmap_activate, 266 .pmapop_deactivate = pmap_deactivate, 267 .pmapop_pinit = pmap_pinit, 268 .pmapop_procwr = pmap_procwr, 269 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 270 .pmapop_pte_print = pmap_pte_print, 271 .pmapop_pteg_check = pmap_pteg_check, 272 .pmapop_print_mmuregs = pmap_print_mmuregs, 273 .pmapop_print_pte = pmap_print_pte, 274 .pmapop_pteg_dist = pmap_pteg_dist, 275 #else 276 .pmapop_pte_print = NULL, 277 .pmapop_pteg_check = NULL, 278 .pmapop_print_mmuregs = NULL, 279 .pmapop_print_pte = NULL, 280 .pmapop_pteg_dist = NULL, 281 #endif 282 #if defined(DEBUG) || defined(PMAPCHECK) 283 .pmapop_pvo_verify = pmap_pvo_verify, 284 #else 285 .pmapop_pvo_verify = NULL, 286 #endif 287 .pmapop_steal_memory = pmap_steal_memory, 288 .pmapop_bootstrap = pmap_bootstrap, 289 .pmapop_bootstrap1 = pmap_bootstrap1, 290 .pmapop_bootstrap2 = pmap_bootstrap2, 291 }; 292 #endif /* !PMAPNAME */ 293 294 /* 295 * The following structure is aligned to 32 bytes 296 */ 297 struct pvo_entry { 298 LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */ 299 TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */ 300 struct pte pvo_pte; /* Prebuilt PTE */ 301 pmap_t pvo_pmap; /* ptr to owning pmap */ 302 vaddr_t pvo_vaddr; /* VA of entry */ 303 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 304 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 305 #define PVO_WIRED 0x0010 /* PVO entry is wired */ 306 #define PVO_MANAGED 0x0020 /* PVO e. for managed page */ 307 #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */ 308 #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED) 309 #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED) 310 #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 311 #define PVO_ENTER_INSERT 0 /* PVO has been removed */ 312 #define PVO_SPILL_UNSET 1 /* PVO has been evicted */ 313 #define PVO_SPILL_SET 2 /* PVO has been spilled */ 314 #define PVO_SPILL_INSERT 3 /* PVO has been inserted */ 315 #define PVO_PMAP_PAGE_PROTECT 4 /* PVO has changed */ 316 #define PVO_PMAP_PROTECT 5 /* PVO has changed */ 317 #define PVO_REMOVE 6 /* PVO has been removed */ 318 #define PVO_WHERE_MASK 15 319 #define PVO_WHERE_SHFT 8 320 } __attribute__ ((aligned (32))); 321 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 322 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 323 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 324 #define PVO_PTEGIDX_CLR(pvo) \ 325 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 326 #define PVO_PTEGIDX_SET(pvo,i) \ 327 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 328 #define PVO_WHERE(pvo,w) \ 329 ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \ 330 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT)) 331 332 TAILQ_HEAD(pvo_tqhead, pvo_entry); 333 struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */ 334 335 struct pool pmap_pool; /* pool for pmap structures */ 336 struct pool pmap_pvo_pool; /* pool for pvo entries */ 337 338 /* 339 * We keep a cache of unmanaged pages to be used for pvo entries for 340 * unmanaged pages. 341 */ 342 struct pvo_page { 343 SIMPLEQ_ENTRY(pvo_page) pvop_link; 344 }; 345 SIMPLEQ_HEAD(pvop_head, pvo_page); 346 static struct pvop_head pmap_pvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_pvop_head); 347 static u_long pmap_pvop_free; 348 static u_long pmap_pvop_maxfree; 349 350 static void *pmap_pool_alloc(struct pool *, int); 351 static void pmap_pool_free(struct pool *, void *); 352 353 static struct pool_allocator pmap_pool_allocator = { 354 .pa_alloc = pmap_pool_alloc, 355 .pa_free = pmap_pool_free, 356 .pa_pagesz = 0, 357 }; 358 359 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 360 void pmap_pte_print(volatile struct pte *); 361 void pmap_pteg_check(void); 362 void pmap_pteg_dist(void); 363 void pmap_print_pte(pmap_t, vaddr_t); 364 void pmap_print_mmuregs(void); 365 #endif 366 367 #if defined(DEBUG) || defined(PMAPCHECK) 368 #ifdef PMAPCHECK 369 int pmapcheck = 1; 370 #else 371 int pmapcheck = 0; 372 #endif 373 void pmap_pvo_verify(void); 374 static void pmap_pvo_check(const struct pvo_entry *); 375 #define PMAP_PVO_CHECK(pvo) \ 376 do { \ 377 if (pmapcheck) \ 378 pmap_pvo_check(pvo); \ 379 } while (0) 380 #else 381 #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0) 382 #endif 383 static int pmap_pte_insert(int, struct pte *); 384 static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *, 385 vaddr_t, paddr_t, register_t, int); 386 static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *); 387 static void pmap_pvo_free(struct pvo_entry *); 388 static void pmap_pvo_free_list(struct pvo_head *); 389 static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *); 390 static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 391 static struct pvo_entry *pmap_pvo_reclaim(struct pmap *); 392 static void pvo_set_exec(struct pvo_entry *); 393 static void pvo_clear_exec(struct pvo_entry *); 394 395 static void tlbia(void); 396 397 static void pmap_release(pmap_t); 398 static paddr_t pmap_boot_find_memory(psize_t, psize_t, int); 399 400 static uint32_t pmap_pvo_reclaim_nextidx; 401 #ifdef DEBUG 402 static int pmap_pvo_reclaim_debugctr; 403 #endif 404 405 #define VSID_NBPW (sizeof(uint32_t) * 8) 406 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 407 408 static int pmap_initialized; 409 410 #if defined(DEBUG) || defined(PMAPDEBUG) 411 #define PMAPDEBUG_BOOT 0x0001 412 #define PMAPDEBUG_PTE 0x0002 413 #define PMAPDEBUG_EXEC 0x0008 414 #define PMAPDEBUG_PVOENTER 0x0010 415 #define PMAPDEBUG_PVOREMOVE 0x0020 416 #define PMAPDEBUG_ACTIVATE 0x0100 417 #define PMAPDEBUG_CREATE 0x0200 418 #define PMAPDEBUG_ENTER 0x1000 419 #define PMAPDEBUG_KENTER 0x2000 420 #define PMAPDEBUG_KREMOVE 0x4000 421 #define PMAPDEBUG_REMOVE 0x8000 422 423 unsigned int pmapdebug = 0; 424 425 # define DPRINTF(x, ...) printf(x, __VA_ARGS__) 426 # define DPRINTFN(n, x, ...) do if (pmapdebug & PMAPDEBUG_ ## n) printf(x, __VA_ARGS__); while (0) 427 #else 428 # define DPRINTF(x, ...) do { } while (0) 429 # define DPRINTFN(n, x, ...) do { } while (0) 430 #endif 431 432 433 #ifdef PMAPCOUNTERS 434 /* 435 * From pmap_subr.c 436 */ 437 extern struct evcnt pmap_evcnt_mappings; 438 extern struct evcnt pmap_evcnt_unmappings; 439 440 extern struct evcnt pmap_evcnt_kernel_mappings; 441 extern struct evcnt pmap_evcnt_kernel_unmappings; 442 443 extern struct evcnt pmap_evcnt_mappings_replaced; 444 445 extern struct evcnt pmap_evcnt_exec_mappings; 446 extern struct evcnt pmap_evcnt_exec_cached; 447 448 extern struct evcnt pmap_evcnt_exec_synced; 449 extern struct evcnt pmap_evcnt_exec_synced_clear_modify; 450 extern struct evcnt pmap_evcnt_exec_synced_pvo_remove; 451 452 extern struct evcnt pmap_evcnt_exec_uncached_page_protect; 453 extern struct evcnt pmap_evcnt_exec_uncached_clear_modify; 454 extern struct evcnt pmap_evcnt_exec_uncached_zero_page; 455 extern struct evcnt pmap_evcnt_exec_uncached_copy_page; 456 extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove; 457 458 extern struct evcnt pmap_evcnt_updates; 459 extern struct evcnt pmap_evcnt_collects; 460 extern struct evcnt pmap_evcnt_copies; 461 462 extern struct evcnt pmap_evcnt_ptes_spilled; 463 extern struct evcnt pmap_evcnt_ptes_unspilled; 464 extern struct evcnt pmap_evcnt_ptes_evicted; 465 466 extern struct evcnt pmap_evcnt_ptes_primary[8]; 467 extern struct evcnt pmap_evcnt_ptes_secondary[8]; 468 extern struct evcnt pmap_evcnt_ptes_removed; 469 extern struct evcnt pmap_evcnt_ptes_changed; 470 extern struct evcnt pmap_evcnt_pvos_reclaimed; 471 extern struct evcnt pmap_evcnt_pvos_failed; 472 473 extern struct evcnt pmap_evcnt_zeroed_pages; 474 extern struct evcnt pmap_evcnt_copied_pages; 475 extern struct evcnt pmap_evcnt_idlezeroed_pages; 476 477 #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++) 478 #define PMAPCOUNT2(ev) ((ev).ev_count++) 479 #else 480 #define PMAPCOUNT(ev) ((void) 0) 481 #define PMAPCOUNT2(ev) ((void) 0) 482 #endif 483 484 #define TLBIE(va) __asm volatile("tlbie %0" :: "r"(va) : "memory") 485 486 /* XXXSL: this needs to be moved to assembler */ 487 #define TLBIEL(va) __asm volatile("tlbie %0" :: "r"(va) : "memory") 488 489 #ifdef MD_TLBSYNC 490 #define TLBSYNC() MD_TLBSYNC() 491 #else 492 #define TLBSYNC() __asm volatile("tlbsync" ::: "memory") 493 #endif 494 #define SYNC() __asm volatile("sync" ::: "memory") 495 #define EIEIO() __asm volatile("eieio" ::: "memory") 496 #define DCBST(va) __asm volatile("dcbst 0,%0" :: "r"(va) : "memory") 497 #define MFMSR() mfmsr() 498 #define MTMSR(psl) mtmsr(psl) 499 #define MFPVR() mfpvr() 500 #define MFSRIN(va) mfsrin(va) 501 #define MFTB() mfrtcltbl() 502 503 #if defined(DDB) && !defined(PMAP_OEA64) 504 static inline register_t 505 mfsrin(vaddr_t va) 506 { 507 register_t sr; 508 __asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va)); 509 return sr; 510 } 511 #endif /* DDB && !PMAP_OEA64 */ 512 513 #if defined (PMAP_OEA64_BRIDGE) 514 extern void mfmsr64 (register64_t *result); 515 #endif /* PMAP_OEA64_BRIDGE */ 516 517 #define PMAP_LOCK() KERNEL_LOCK(1, NULL) 518 #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL) 519 520 static inline register_t 521 pmap_interrupts_off(void) 522 { 523 register_t msr = MFMSR(); 524 if (msr & PSL_EE) 525 MTMSR(msr & ~PSL_EE); 526 return msr; 527 } 528 529 static void 530 pmap_interrupts_restore(register_t msr) 531 { 532 if (msr & PSL_EE) 533 MTMSR(msr); 534 } 535 536 static inline u_int32_t 537 mfrtcltbl(void) 538 { 539 #ifdef PPC_OEA601 540 if ((MFPVR() >> 16) == MPC601) 541 return (mfrtcl() >> 7); 542 else 543 #endif 544 return (mftbl()); 545 } 546 547 /* 548 * These small routines may have to be replaced, 549 * if/when we support processors other that the 604. 550 */ 551 552 void 553 tlbia(void) 554 { 555 char *i; 556 557 SYNC(); 558 #if defined(PMAP_OEA) 559 /* 560 * Why not use "tlbia"? Because not all processors implement it. 561 * 562 * This needs to be a per-CPU callback to do the appropriate thing 563 * for the CPU. XXX 564 */ 565 for (i = 0; i < (char *)0x00040000; i += 0x00001000) { 566 TLBIE(i); 567 EIEIO(); 568 SYNC(); 569 } 570 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 571 /* This is specifically for the 970, 970UM v1.6 pp. 140. */ 572 for (i = 0; i <= (char *)0xFF000; i += 0x00001000) { 573 TLBIEL(i); 574 EIEIO(); 575 SYNC(); 576 } 577 #endif 578 TLBSYNC(); 579 SYNC(); 580 } 581 582 static inline register_t 583 va_to_vsid(const struct pmap *pm, vaddr_t addr) 584 { 585 /* 586 * Rather than searching the STE groups for the VSID or extracting 587 * it from the SR, we know how we generate that from the ESID and 588 * so do that. 589 * 590 * This makes the code the same for OEA and OEA64, and also allows 591 * us to generate a correct-for-that-address-space VSID even if the 592 * pmap contains a different SR value at any given moment (e.g. 593 * kernel pmap on a 601 that is using I/O segments). 594 */ 595 return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT; 596 } 597 598 static inline register_t 599 va_to_pteg(const struct pmap *pm, vaddr_t addr) 600 { 601 register_t hash; 602 603 hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); 604 return hash & pmap_pteg_mask; 605 } 606 607 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 608 /* 609 * Given a PTE in the page table, calculate the VADDR that hashes to it. 610 * The only bit of magic is that the top 4 bits of the address doesn't 611 * technically exist in the PTE. But we know we reserved 4 bits of the 612 * VSID for it so that's how we get it. 613 */ 614 static vaddr_t 615 pmap_pte_to_va(volatile const struct pte *pt) 616 { 617 vaddr_t va; 618 uintptr_t ptaddr = (uintptr_t) pt; 619 620 if (pt->pte_hi & PTE_HID) 621 ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg)); 622 623 /* PPC Bits 10-19 PPC64 Bits 42-51 */ 624 #if defined(PMAP_OEA) 625 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff; 626 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 627 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff; 628 #endif 629 va <<= ADDR_PIDX_SHFT; 630 631 /* PPC Bits 4-9 PPC64 Bits 36-41 */ 632 va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT; 633 634 #if defined(PMAP_OEA64) 635 /* PPC63 Bits 0-35 */ 636 /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */ 637 #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 638 /* PPC Bits 0-3 */ 639 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; 640 #endif 641 642 return va; 643 } 644 #endif 645 646 static inline struct pvo_head * 647 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p) 648 { 649 struct vm_page *pg; 650 struct vm_page_md *md; 651 struct pmap_page *pp; 652 653 pg = PHYS_TO_VM_PAGE(pa); 654 if (pg_p != NULL) 655 *pg_p = pg; 656 if (pg == NULL) { 657 if ((pp = pmap_pv_tracked(pa)) != NULL) 658 return &pp->pp_pvoh; 659 return NULL; 660 } 661 md = VM_PAGE_TO_MD(pg); 662 return &md->mdpg_pvoh; 663 } 664 665 static inline struct pvo_head * 666 vm_page_to_pvoh(struct vm_page *pg) 667 { 668 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 669 670 return &md->mdpg_pvoh; 671 } 672 673 static inline void 674 pmap_pp_attr_clear(struct pmap_page *pp, int ptebit) 675 { 676 677 pp->pp_attrs &= ~ptebit; 678 } 679 680 static inline void 681 pmap_attr_clear(struct vm_page *pg, int ptebit) 682 { 683 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 684 685 pmap_pp_attr_clear(&md->mdpg_pp, ptebit); 686 } 687 688 static inline int 689 pmap_pp_attr_fetch(struct pmap_page *pp) 690 { 691 692 return pp->pp_attrs; 693 } 694 695 static inline int 696 pmap_attr_fetch(struct vm_page *pg) 697 { 698 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 699 700 return pmap_pp_attr_fetch(&md->mdpg_pp); 701 } 702 703 static inline void 704 pmap_attr_save(struct vm_page *pg, int ptebit) 705 { 706 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 707 708 md->mdpg_attrs |= ptebit; 709 } 710 711 static inline int 712 pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt) 713 { 714 if (pt->pte_hi == pvo_pt->pte_hi 715 #if 0 716 && ((pt->pte_lo ^ pvo_pt->pte_lo) & 717 ~(PTE_REF|PTE_CHG)) == 0 718 #endif 719 ) 720 return 1; 721 return 0; 722 } 723 724 static inline void 725 pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo) 726 { 727 /* 728 * Construct the PTE. Default to IMB initially. Valid bit 729 * only gets set when the real pte is set in memory. 730 * 731 * Note: Don't set the valid bit for correct operation of tlb update. 732 */ 733 #if defined(PMAP_OEA) 734 pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT) 735 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 736 pt->pte_lo = pte_lo; 737 #elif defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA64) 738 pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT) 739 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 740 pt->pte_lo = (u_int64_t) pte_lo; 741 #endif /* PMAP_OEA */ 742 } 743 744 static inline void 745 pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt) 746 { 747 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG); 748 } 749 750 static inline void 751 pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit) 752 { 753 /* 754 * As shown in Section 7.6.3.2.3 755 */ 756 pt->pte_lo &= ~ptebit; 757 TLBIE(va); 758 SYNC(); 759 EIEIO(); 760 TLBSYNC(); 761 SYNC(); 762 #ifdef MULTIPROCESSOR 763 DCBST(pt); 764 #endif 765 } 766 767 static inline void 768 pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt) 769 { 770 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 771 if (pvo_pt->pte_hi & PTE_VALID) 772 panic("pte_set: setting an already valid pte %p", pvo_pt); 773 #endif 774 pvo_pt->pte_hi |= PTE_VALID; 775 776 /* 777 * Update the PTE as defined in section 7.6.3.1 778 * Note that the REF/CHG bits are from pvo_pt and thus should 779 * have been saved so this routine can restore them (if desired). 780 */ 781 pt->pte_lo = pvo_pt->pte_lo; 782 EIEIO(); 783 pt->pte_hi = pvo_pt->pte_hi; 784 TLBSYNC(); 785 SYNC(); 786 #ifdef MULTIPROCESSOR 787 DCBST(pt); 788 #endif 789 pmap_pte_valid++; 790 } 791 792 static inline void 793 pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 794 { 795 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 796 if ((pvo_pt->pte_hi & PTE_VALID) == 0) 797 panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt); 798 if ((pt->pte_hi & PTE_VALID) == 0) 799 panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt); 800 #endif 801 802 pvo_pt->pte_hi &= ~PTE_VALID; 803 /* 804 * Force the ref & chg bits back into the PTEs. 805 */ 806 SYNC(); 807 /* 808 * Invalidate the pte ... (Section 7.6.3.3) 809 */ 810 pt->pte_hi &= ~PTE_VALID; 811 SYNC(); 812 TLBIE(va); 813 SYNC(); 814 EIEIO(); 815 TLBSYNC(); 816 SYNC(); 817 /* 818 * Save the ref & chg bits ... 819 */ 820 pmap_pte_synch(pt, pvo_pt); 821 pmap_pte_valid--; 822 } 823 824 static inline void 825 pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 826 { 827 /* 828 * Invalidate the PTE 829 */ 830 pmap_pte_unset(pt, pvo_pt, va); 831 pmap_pte_set(pt, pvo_pt); 832 } 833 834 /* 835 * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx 836 * (either primary or secondary location). 837 * 838 * Note: both the destination and source PTEs must not have PTE_VALID set. 839 */ 840 841 static int 842 pmap_pte_insert(int ptegidx, struct pte *pvo_pt) 843 { 844 volatile struct pte *pt; 845 int i; 846 847 #if defined(DEBUG) 848 DPRINTFN(PTE, "pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n", 849 ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo); 850 #endif 851 /* 852 * First try primary hash. 853 */ 854 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 855 if ((pt->pte_hi & PTE_VALID) == 0) { 856 pvo_pt->pte_hi &= ~PTE_HID; 857 pmap_pte_set(pt, pvo_pt); 858 return i; 859 } 860 } 861 862 /* 863 * Now try secondary hash. 864 */ 865 ptegidx ^= pmap_pteg_mask; 866 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 867 if ((pt->pte_hi & PTE_VALID) == 0) { 868 pvo_pt->pte_hi |= PTE_HID; 869 pmap_pte_set(pt, pvo_pt); 870 return i; 871 } 872 } 873 return -1; 874 } 875 876 /* 877 * Spill handler. 878 * 879 * Tries to spill a page table entry from the overflow area. 880 * This runs in either real mode (if dealing with a exception spill) 881 * or virtual mode when dealing with manually spilling one of the 882 * kernel's pte entries. In either case, interrupts are already 883 * disabled. 884 */ 885 886 int 887 pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec) 888 { 889 struct pvo_entry *source_pvo, *victim_pvo, *next_pvo; 890 struct pvo_entry *pvo; 891 /* XXX: gcc -- vpvoh is always set at either *1* or *2* */ 892 struct pvo_tqhead *pvoh, *vpvoh = NULL; 893 int ptegidx, i, j; 894 volatile struct pteg *pteg; 895 volatile struct pte *pt; 896 897 PMAP_LOCK(); 898 899 ptegidx = va_to_pteg(pm, addr); 900 901 /* 902 * Have to substitute some entry. Use the primary hash for this. 903 * Use low bits of timebase as random generator. Make sure we are 904 * not picking a kernel pte for replacement. 905 */ 906 pteg = &pmap_pteg_table[ptegidx]; 907 i = MFTB() & 7; 908 for (j = 0; j < 8; j++) { 909 pt = &pteg->pt[i]; 910 if ((pt->pte_hi & PTE_VALID) == 0) 911 break; 912 if (VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT) 913 < PHYSMAP_VSIDBITS) 914 break; 915 i = (i + 1) & 7; 916 } 917 KASSERT(j < 8); 918 919 source_pvo = NULL; 920 victim_pvo = NULL; 921 pvoh = &pmap_pvo_table[ptegidx]; 922 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 923 924 /* 925 * We need to find pvo entry for this address... 926 */ 927 PMAP_PVO_CHECK(pvo); /* sanity check */ 928 929 /* 930 * If we haven't found the source and we come to a PVO with 931 * a valid PTE, then we know we can't find it because all 932 * evicted PVOs always are first in the list. 933 */ 934 if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID)) 935 break; 936 if (source_pvo == NULL && pm == pvo->pvo_pmap && 937 addr == PVO_VADDR(pvo)) { 938 939 /* 940 * Now we have found the entry to be spilled into the 941 * pteg. Attempt to insert it into the page table. 942 */ 943 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 944 if (j >= 0) { 945 PVO_PTEGIDX_SET(pvo, j); 946 PMAP_PVO_CHECK(pvo); /* sanity check */ 947 PVO_WHERE(pvo, SPILL_INSERT); 948 pvo->pvo_pmap->pm_evictions--; 949 PMAPCOUNT(ptes_spilled); 950 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 951 ? pmap_evcnt_ptes_secondary 952 : pmap_evcnt_ptes_primary)[j]); 953 954 /* 955 * Since we keep the evicted entries at the 956 * from of the PVO list, we need move this 957 * (now resident) PVO after the evicted 958 * entries. 959 */ 960 next_pvo = TAILQ_NEXT(pvo, pvo_olink); 961 962 /* 963 * If we don't have to move (either we were the 964 * last entry or the next entry was valid), 965 * don't change our position. Otherwise 966 * move ourselves to the tail of the queue. 967 */ 968 if (next_pvo != NULL && 969 !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) { 970 TAILQ_REMOVE(pvoh, pvo, pvo_olink); 971 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 972 } 973 PMAP_UNLOCK(); 974 return 1; 975 } 976 source_pvo = pvo; 977 if (exec && !PVO_EXECUTABLE_P(source_pvo)) { 978 PMAP_UNLOCK(); 979 return 0; 980 } 981 if (victim_pvo != NULL) 982 break; 983 } 984 985 /* 986 * We also need the pvo entry of the victim we are replacing 987 * so save the R & C bits of the PTE. 988 */ 989 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 990 pmap_pte_compare(pt, &pvo->pvo_pte)) { 991 vpvoh = pvoh; /* *1* */ 992 victim_pvo = pvo; 993 if (source_pvo != NULL) 994 break; 995 } 996 } 997 998 if (source_pvo == NULL) { 999 PMAPCOUNT(ptes_unspilled); 1000 PMAP_UNLOCK(); 1001 return 0; 1002 } 1003 1004 if (victim_pvo == NULL) { 1005 if ((pt->pte_hi & PTE_HID) == 0) 1006 panic("pmap_pte_spill: victim p-pte (%p) has " 1007 "no pvo entry!", pt); 1008 1009 /* 1010 * If this is a secondary PTE, we need to search 1011 * its primary pvo bucket for the matching PVO. 1012 */ 1013 vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */ 1014 TAILQ_FOREACH(pvo, vpvoh, pvo_olink) { 1015 PMAP_PVO_CHECK(pvo); /* sanity check */ 1016 1017 /* 1018 * We also need the pvo entry of the victim we are 1019 * replacing so save the R & C bits of the PTE. 1020 */ 1021 if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 1022 victim_pvo = pvo; 1023 break; 1024 } 1025 } 1026 if (victim_pvo == NULL) 1027 panic("pmap_pte_spill: victim s-pte (%p) has " 1028 "no pvo entry!", pt); 1029 } 1030 1031 /* 1032 * The victim should be not be a kernel PVO/PTE entry. 1033 */ 1034 KASSERT(victim_pvo->pvo_pmap != pmap_kernel()); 1035 KASSERT(PVO_PTEGIDX_ISSET(victim_pvo)); 1036 KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i); 1037 1038 /* 1039 * We are invalidating the TLB entry for the EA for the 1040 * we are replacing even though its valid; If we don't 1041 * we lose any ref/chg bit changes contained in the TLB 1042 * entry. 1043 */ 1044 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 1045 1046 /* 1047 * To enforce the PVO list ordering constraint that all 1048 * evicted entries should come before all valid entries, 1049 * move the source PVO to the tail of its list and the 1050 * victim PVO to the head of its list (which might not be 1051 * the same list, if the victim was using the secondary hash). 1052 */ 1053 TAILQ_REMOVE(pvoh, source_pvo, pvo_olink); 1054 TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink); 1055 TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink); 1056 TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink); 1057 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 1058 pmap_pte_set(pt, &source_pvo->pvo_pte); 1059 victim_pvo->pvo_pmap->pm_evictions++; 1060 source_pvo->pvo_pmap->pm_evictions--; 1061 PVO_WHERE(victim_pvo, SPILL_UNSET); 1062 PVO_WHERE(source_pvo, SPILL_SET); 1063 1064 PVO_PTEGIDX_CLR(victim_pvo); 1065 PVO_PTEGIDX_SET(source_pvo, i); 1066 PMAPCOUNT2(pmap_evcnt_ptes_primary[i]); 1067 PMAPCOUNT(ptes_spilled); 1068 PMAPCOUNT(ptes_evicted); 1069 PMAPCOUNT(ptes_removed); 1070 1071 PMAP_PVO_CHECK(victim_pvo); 1072 PMAP_PVO_CHECK(source_pvo); 1073 1074 PMAP_UNLOCK(); 1075 return 1; 1076 } 1077 1078 /* 1079 * Restrict given range to physical memory 1080 */ 1081 void 1082 pmap_real_memory(paddr_t *start, psize_t *size) 1083 { 1084 struct mem_region *mp; 1085 1086 for (mp = mem; mp->size; mp++) { 1087 if (*start + *size > mp->start 1088 && *start < mp->start + mp->size) { 1089 if (*start < mp->start) { 1090 *size -= mp->start - *start; 1091 *start = mp->start; 1092 } 1093 if (*start + *size > mp->start + mp->size) 1094 *size = mp->start + mp->size - *start; 1095 return; 1096 } 1097 } 1098 *size = 0; 1099 } 1100 1101 /* 1102 * Initialize anything else for pmap handling. 1103 * Called during vm_init(). 1104 */ 1105 void 1106 pmap_init(void) 1107 { 1108 1109 pmap_initialized = 1; 1110 } 1111 1112 /* 1113 * How much virtual space does the kernel get? 1114 */ 1115 void 1116 pmap_virtual_space(vaddr_t *start, vaddr_t *end) 1117 { 1118 /* 1119 * For now, reserve one segment (minus some overhead) for kernel 1120 * virtual memory 1121 */ 1122 *start = VM_MIN_KERNEL_ADDRESS; 1123 *end = VM_MAX_KERNEL_ADDRESS; 1124 } 1125 1126 /* 1127 * Allocate, initialize, and return a new physical map. 1128 */ 1129 pmap_t 1130 pmap_create(void) 1131 { 1132 pmap_t pm; 1133 1134 pm = pool_get(&pmap_pool, PR_WAITOK); 1135 KASSERT((vaddr_t)pm < VM_MIN_KERNEL_ADDRESS); 1136 memset((void *)pm, 0, sizeof *pm); 1137 pmap_pinit(pm); 1138 1139 DPRINTFN(CREATE, "pmap_create: pm %p:\n" 1140 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1141 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n" 1142 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1143 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n", 1144 pm, 1145 pm->pm_sr[0], pm->pm_sr[1], 1146 pm->pm_sr[2], pm->pm_sr[3], 1147 pm->pm_sr[4], pm->pm_sr[5], 1148 pm->pm_sr[6], pm->pm_sr[7], 1149 pm->pm_sr[8], pm->pm_sr[9], 1150 pm->pm_sr[10], pm->pm_sr[11], 1151 pm->pm_sr[12], pm->pm_sr[13], 1152 pm->pm_sr[14], pm->pm_sr[15]); 1153 return pm; 1154 } 1155 1156 /* 1157 * Initialize a preallocated and zeroed pmap structure. 1158 */ 1159 void 1160 pmap_pinit(pmap_t pm) 1161 { 1162 register_t entropy = MFTB(); 1163 register_t mask; 1164 int i; 1165 1166 /* 1167 * Allocate some segment registers for this pmap. 1168 */ 1169 pm->pm_refs = 1; 1170 PMAP_LOCK(); 1171 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1172 static register_t pmap_vsidcontext; 1173 register_t hash; 1174 unsigned int n; 1175 1176 /* Create a new value by multiplying by a prime adding in 1177 * entropy from the timebase register. This is to make the 1178 * VSID more random so that the PT Hash function collides 1179 * less often. (note that the prime causes gcc to do shifts 1180 * instead of a multiply) 1181 */ 1182 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1183 hash = pmap_vsidcontext & (NPMAPS - 1); 1184 if (hash == 0) { /* 0 is special, avoid it */ 1185 entropy += 0xbadf00d; 1186 continue; 1187 } 1188 n = hash >> 5; 1189 mask = 1L << (hash & (VSID_NBPW-1)); 1190 hash = pmap_vsidcontext; 1191 if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1192 /* anything free in this bucket? */ 1193 if (~pmap_vsid_bitmap[n] == 0) { 1194 entropy = hash ^ (hash >> 16); 1195 continue; 1196 } 1197 i = ffs(~pmap_vsid_bitmap[n]) - 1; 1198 mask = 1L << i; 1199 hash &= ~(VSID_NBPW-1); 1200 hash |= i; 1201 } 1202 hash &= PTE_VSID >> PTE_VSID_SHFT; 1203 pmap_vsid_bitmap[n] |= mask; 1204 pm->pm_vsid = hash; 1205 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1206 for (i = 0; i < 16; i++) 1207 pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY | 1208 SR_NOEXEC; 1209 #endif 1210 PMAP_UNLOCK(); 1211 return; 1212 } 1213 PMAP_UNLOCK(); 1214 panic("pmap_pinit: out of segments"); 1215 } 1216 1217 /* 1218 * Add a reference to the given pmap. 1219 */ 1220 void 1221 pmap_reference(pmap_t pm) 1222 { 1223 atomic_inc_uint(&pm->pm_refs); 1224 } 1225 1226 /* 1227 * Retire the given pmap from service. 1228 * Should only be called if the map contains no valid mappings. 1229 */ 1230 void 1231 pmap_destroy(pmap_t pm) 1232 { 1233 membar_release(); 1234 if (atomic_dec_uint_nv(&pm->pm_refs) == 0) { 1235 membar_acquire(); 1236 pmap_release(pm); 1237 pool_put(&pmap_pool, pm); 1238 } 1239 } 1240 1241 /* 1242 * Release any resources held by the given physical map. 1243 * Called when a pmap initialized by pmap_pinit is being released. 1244 */ 1245 void 1246 pmap_release(pmap_t pm) 1247 { 1248 int idx, mask; 1249 1250 KASSERT(pm->pm_stats.resident_count == 0); 1251 KASSERT(pm->pm_stats.wired_count == 0); 1252 1253 PMAP_LOCK(); 1254 if (pm->pm_sr[0] == 0) 1255 panic("pmap_release"); 1256 idx = pm->pm_vsid & (NPMAPS-1); 1257 mask = 1 << (idx % VSID_NBPW); 1258 idx /= VSID_NBPW; 1259 1260 KASSERT(pmap_vsid_bitmap[idx] & mask); 1261 pmap_vsid_bitmap[idx] &= ~mask; 1262 PMAP_UNLOCK(); 1263 } 1264 1265 /* 1266 * Copy the range specified by src_addr/len 1267 * from the source map to the range dst_addr/len 1268 * in the destination map. 1269 * 1270 * This routine is only advisory and need not do anything. 1271 */ 1272 void 1273 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, 1274 vsize_t len, vaddr_t src_addr) 1275 { 1276 PMAPCOUNT(copies); 1277 } 1278 1279 /* 1280 * Require that all active physical maps contain no 1281 * incorrect entries NOW. 1282 */ 1283 void 1284 pmap_update(struct pmap *pmap) 1285 { 1286 PMAPCOUNT(updates); 1287 TLBSYNC(); 1288 } 1289 1290 static inline int 1291 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1292 { 1293 int pteidx; 1294 /* 1295 * We can find the actual pte entry without searching by 1296 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9] 1297 * and by noticing the HID bit. 1298 */ 1299 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1300 if (pvo->pvo_pte.pte_hi & PTE_HID) 1301 pteidx ^= pmap_pteg_mask * 8; 1302 return pteidx; 1303 } 1304 1305 volatile struct pte * 1306 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1307 { 1308 volatile struct pte *pt; 1309 1310 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1311 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) 1312 return NULL; 1313 #endif 1314 1315 /* 1316 * If we haven't been supplied the ptegidx, calculate it. 1317 */ 1318 if (pteidx == -1) { 1319 int ptegidx; 1320 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1321 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1322 } 1323 1324 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1325 1326 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1327 return pt; 1328 #else 1329 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1330 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1331 "pvo but no valid pte index", pvo); 1332 } 1333 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1334 panic("pmap_pvo_to_pte: pvo %p: has valid pte index in " 1335 "pvo but no valid pte", pvo); 1336 } 1337 1338 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1339 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1340 #if defined(DEBUG) || defined(PMAPCHECK) 1341 pmap_pte_print(pt); 1342 #endif 1343 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1344 "pmap_pteg_table %p but invalid in pvo", 1345 pvo, pt); 1346 } 1347 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) { 1348 #if defined(DEBUG) || defined(PMAPCHECK) 1349 pmap_pte_print(pt); 1350 #endif 1351 panic("pmap_pvo_to_pte: pvo %p: pvo pte does " 1352 "not match pte %p in pmap_pteg_table", 1353 pvo, pt); 1354 } 1355 return pt; 1356 } 1357 1358 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1359 #if defined(DEBUG) || defined(PMAPCHECK) 1360 pmap_pte_print(pt); 1361 #endif 1362 panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in " 1363 "pmap_pteg_table but valid in pvo", pvo, pt); 1364 } 1365 return NULL; 1366 #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */ 1367 } 1368 1369 struct pvo_entry * 1370 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p) 1371 { 1372 struct pvo_entry *pvo; 1373 int ptegidx; 1374 1375 va &= ~ADDR_POFF; 1376 ptegidx = va_to_pteg(pm, va); 1377 1378 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1379 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1380 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 1381 panic("pmap_pvo_find_va: invalid pvo %p on " 1382 "list %#x (%p)", pvo, ptegidx, 1383 &pmap_pvo_table[ptegidx]); 1384 #endif 1385 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1386 if (pteidx_p) 1387 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1388 return pvo; 1389 } 1390 } 1391 if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH)) 1392 panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n", 1393 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va); 1394 return NULL; 1395 } 1396 1397 #if defined(DEBUG) || defined(PMAPCHECK) 1398 void 1399 pmap_pvo_check(const struct pvo_entry *pvo) 1400 { 1401 struct pvo_head *pvo_head; 1402 struct pvo_entry *pvo0; 1403 volatile struct pte *pt; 1404 int failed = 0; 1405 1406 PMAP_LOCK(); 1407 1408 if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH) 1409 panic("pmap_pvo_check: pvo %p: invalid address", pvo); 1410 1411 if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) { 1412 printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n", 1413 pvo, pvo->pvo_pmap); 1414 failed = 1; 1415 } 1416 1417 if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH || 1418 (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) { 1419 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1420 pvo, TAILQ_NEXT(pvo, pvo_olink)); 1421 failed = 1; 1422 } 1423 1424 if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH || 1425 (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) { 1426 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1427 pvo, LIST_NEXT(pvo, pvo_vlink)); 1428 failed = 1; 1429 } 1430 1431 if (PVO_MANAGED_P(pvo)) { 1432 pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL); 1433 LIST_FOREACH(pvo0, pvo_head, pvo_vlink) { 1434 if (pvo0 == pvo) 1435 break; 1436 } 1437 if (pvo0 == NULL) { 1438 printf("pmap_pvo_check: pvo %p: not present " 1439 "on its vlist head %p\n", pvo, pvo_head); 1440 failed = 1; 1441 } 1442 } else { 1443 KASSERT(pvo->pvo_vaddr >= VM_MIN_KERNEL_ADDRESS); 1444 if (__predict_false(pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS)) 1445 failed = 1; 1446 } 1447 if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) { 1448 printf("pmap_pvo_check: pvo %p: not present " 1449 "on its olist head\n", pvo); 1450 failed = 1; 1451 } 1452 pt = pmap_pvo_to_pte(pvo, -1); 1453 if (pt == NULL) { 1454 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1455 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1456 "no PTE\n", pvo); 1457 failed = 1; 1458 } 1459 } else { 1460 if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] || 1461 (uintptr_t) pt >= 1462 (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) { 1463 printf("pmap_pvo_check: pvo %p: pte %p not in " 1464 "pteg table\n", pvo, pt); 1465 failed = 1; 1466 } 1467 if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) { 1468 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1469 "no PTE\n", pvo); 1470 failed = 1; 1471 } 1472 if (pvo->pvo_pte.pte_hi != pt->pte_hi) { 1473 printf("pmap_pvo_check: pvo %p: pte_hi differ: " 1474 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1475 pvo->pvo_pte.pte_hi, 1476 pt->pte_hi); 1477 failed = 1; 1478 } 1479 if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) & 1480 (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) { 1481 printf("pmap_pvo_check: pvo %p: pte_lo differ: " 1482 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1483 (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)), 1484 (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN))); 1485 failed = 1; 1486 } 1487 if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) { 1488 printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva "" 1489 " doesn't not match PVO's VA %#" _PRIxva "\n", 1490 pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo)); 1491 failed = 1; 1492 } 1493 if (failed) 1494 pmap_pte_print(pt); 1495 } 1496 if (failed) 1497 panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo, 1498 pvo->pvo_pmap); 1499 1500 PMAP_UNLOCK(); 1501 } 1502 #endif /* DEBUG || PMAPCHECK */ 1503 1504 /* 1505 * Search the PVO table looking for a non-wired entry. 1506 * If we find one, remove it and return it. 1507 */ 1508 1509 struct pvo_entry * 1510 pmap_pvo_reclaim(struct pmap *pm) 1511 { 1512 struct pvo_tqhead *pvoh; 1513 struct pvo_entry *pvo; 1514 uint32_t idx, endidx; 1515 1516 endidx = pmap_pvo_reclaim_nextidx; 1517 for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx; 1518 idx = (idx + 1) & pmap_pteg_mask) { 1519 pvoh = &pmap_pvo_table[idx]; 1520 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 1521 if (!PVO_WIRED_P(pvo)) { 1522 pmap_pvo_remove(pvo, -1, NULL); 1523 pmap_pvo_reclaim_nextidx = idx; 1524 PMAPCOUNT(pvos_reclaimed); 1525 return pvo; 1526 } 1527 } 1528 } 1529 return NULL; 1530 } 1531 1532 /* 1533 * This returns whether this is the first mapping of a page. 1534 */ 1535 int 1536 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head, 1537 vaddr_t va, paddr_t pa, register_t pte_lo, int flags) 1538 { 1539 struct pvo_entry *pvo; 1540 struct pvo_tqhead *pvoh; 1541 register_t msr; 1542 int ptegidx; 1543 int i; 1544 int poolflags = PR_NOWAIT; 1545 1546 /* 1547 * Compute the PTE Group index. 1548 */ 1549 va &= ~ADDR_POFF; 1550 ptegidx = va_to_pteg(pm, va); 1551 1552 msr = pmap_interrupts_off(); 1553 1554 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1555 if (pmap_pvo_remove_depth > 0) 1556 panic("pmap_pvo_enter: called while pmap_pvo_remove active!"); 1557 if (++pmap_pvo_enter_depth > 1) 1558 panic("pmap_pvo_enter: called recursively!"); 1559 #endif 1560 1561 /* 1562 * Remove any existing mapping for this page. Reuse the 1563 * pvo entry if there a mapping. 1564 */ 1565 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1566 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1567 #ifdef DEBUG 1568 if ((pmapdebug & PMAPDEBUG_PVOENTER) && 1569 ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) & 1570 ~(PTE_REF|PTE_CHG)) == 0 && 1571 va < VM_MIN_KERNEL_ADDRESS) { 1572 printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n", 1573 pvo, pvo->pvo_pte.pte_lo, pte_lo|pa); 1574 printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n", 1575 pvo->pvo_pte.pte_hi, 1576 pm->pm_sr[va >> ADDR_SR_SHFT]); 1577 pmap_pte_print(pmap_pvo_to_pte(pvo, -1)); 1578 #ifdef DDBX 1579 Debugger(); 1580 #endif 1581 } 1582 #endif 1583 PMAPCOUNT(mappings_replaced); 1584 pmap_pvo_remove(pvo, -1, NULL); 1585 break; 1586 } 1587 } 1588 1589 /* 1590 * If we aren't overwriting an mapping, try to allocate 1591 */ 1592 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1593 --pmap_pvo_enter_depth; 1594 #endif 1595 pmap_interrupts_restore(msr); 1596 if (pvo == NULL) { 1597 pvo = pool_get(pl, poolflags); 1598 } 1599 KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS); 1600 1601 #ifdef DEBUG 1602 /* 1603 * Exercise pmap_pvo_reclaim() a little. 1604 */ 1605 if (pvo && (flags & PMAP_CANFAIL) != 0 && 1606 pmap_pvo_reclaim_debugctr++ > 0x1000 && 1607 (pmap_pvo_reclaim_debugctr & 0xff) == 0) { 1608 pool_put(pl, pvo); 1609 pvo = NULL; 1610 } 1611 #endif 1612 1613 msr = pmap_interrupts_off(); 1614 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1615 ++pmap_pvo_enter_depth; 1616 #endif 1617 if (pvo == NULL) { 1618 pvo = pmap_pvo_reclaim(pm); 1619 if (pvo == NULL) { 1620 if ((flags & PMAP_CANFAIL) == 0) 1621 panic("pmap_pvo_enter: failed"); 1622 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1623 pmap_pvo_enter_depth--; 1624 #endif 1625 PMAPCOUNT(pvos_failed); 1626 pmap_interrupts_restore(msr); 1627 return ENOMEM; 1628 } 1629 } 1630 1631 pvo->pvo_vaddr = va; 1632 pvo->pvo_pmap = pm; 1633 pvo->pvo_vaddr &= ~ADDR_POFF; 1634 if (flags & VM_PROT_EXECUTE) { 1635 PMAPCOUNT(exec_mappings); 1636 pvo_set_exec(pvo); 1637 } 1638 if (flags & PMAP_WIRED) 1639 pvo->pvo_vaddr |= PVO_WIRED; 1640 if (pvo_head != NULL) { 1641 pvo->pvo_vaddr |= PVO_MANAGED; 1642 PMAPCOUNT(mappings); 1643 } else { 1644 PMAPCOUNT(kernel_mappings); 1645 } 1646 pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo); 1647 1648 if (pvo_head != NULL) 1649 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1650 if (PVO_WIRED_P(pvo)) 1651 pvo->pvo_pmap->pm_stats.wired_count++; 1652 pvo->pvo_pmap->pm_stats.resident_count++; 1653 #if defined(DEBUG) 1654 /* if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */ 1655 DPRINTFN(PVOENTER, 1656 "pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n", 1657 pvo, pm, va, pa); 1658 #endif 1659 1660 /* 1661 * We hope this succeeds but it isn't required. 1662 */ 1663 pvoh = &pmap_pvo_table[ptegidx]; 1664 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1665 if (i >= 0) { 1666 PVO_PTEGIDX_SET(pvo, i); 1667 PVO_WHERE(pvo, ENTER_INSERT); 1668 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 1669 ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]); 1670 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 1671 1672 } else { 1673 /* 1674 * Since we didn't have room for this entry (which makes it 1675 * and evicted entry), place it at the head of the list. 1676 */ 1677 TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink); 1678 PMAPCOUNT(ptes_evicted); 1679 pm->pm_evictions++; 1680 /* 1681 * If this is a kernel page, make sure it's active. 1682 */ 1683 if (pm == pmap_kernel()) { 1684 i = pmap_pte_spill(pm, va, false); 1685 KASSERT(i); 1686 } 1687 } 1688 PMAP_PVO_CHECK(pvo); /* sanity check */ 1689 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1690 pmap_pvo_enter_depth--; 1691 #endif 1692 pmap_interrupts_restore(msr); 1693 return 0; 1694 } 1695 1696 static void 1697 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol) 1698 { 1699 volatile struct pte *pt; 1700 int ptegidx; 1701 1702 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1703 if (++pmap_pvo_remove_depth > 1) 1704 panic("pmap_pvo_remove: called recursively!"); 1705 #endif 1706 1707 /* 1708 * If we haven't been supplied the ptegidx, calculate it. 1709 */ 1710 if (pteidx == -1) { 1711 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1712 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1713 } else { 1714 ptegidx = pteidx >> 3; 1715 if (pvo->pvo_pte.pte_hi & PTE_HID) 1716 ptegidx ^= pmap_pteg_mask; 1717 } 1718 PMAP_PVO_CHECK(pvo); /* sanity check */ 1719 1720 /* 1721 * If there is an active pte entry, we need to deactivate it 1722 * (and save the ref & chg bits). 1723 */ 1724 pt = pmap_pvo_to_pte(pvo, pteidx); 1725 if (pt != NULL) { 1726 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1727 PVO_WHERE(pvo, REMOVE); 1728 PVO_PTEGIDX_CLR(pvo); 1729 PMAPCOUNT(ptes_removed); 1730 } else { 1731 KASSERT(pvo->pvo_pmap->pm_evictions > 0); 1732 pvo->pvo_pmap->pm_evictions--; 1733 } 1734 1735 /* 1736 * Account for executable mappings. 1737 */ 1738 if (PVO_EXECUTABLE_P(pvo)) 1739 pvo_clear_exec(pvo); 1740 1741 /* 1742 * Update our statistics. 1743 */ 1744 pvo->pvo_pmap->pm_stats.resident_count--; 1745 if (PVO_WIRED_P(pvo)) 1746 pvo->pvo_pmap->pm_stats.wired_count--; 1747 1748 /* 1749 * If the page is managed: 1750 * Save the REF/CHG bits into their cache. 1751 * Remove the PVO from the P/V list. 1752 */ 1753 if (PVO_MANAGED_P(pvo)) { 1754 register_t ptelo = pvo->pvo_pte.pte_lo; 1755 struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN); 1756 1757 if (pg != NULL) { 1758 /* 1759 * If this page was changed and it is mapped exec, 1760 * invalidate it. 1761 */ 1762 if ((ptelo & PTE_CHG) && 1763 (pmap_attr_fetch(pg) & PTE_EXEC)) { 1764 struct pvo_head *pvoh = vm_page_to_pvoh(pg); 1765 if (LIST_EMPTY(pvoh)) { 1766 DPRINTFN(EXEC, "[pmap_pvo_remove: " 1767 "%#" _PRIxpa ": clear-exec]\n", 1768 VM_PAGE_TO_PHYS(pg)); 1769 pmap_attr_clear(pg, PTE_EXEC); 1770 PMAPCOUNT(exec_uncached_pvo_remove); 1771 } else { 1772 DPRINTFN(EXEC, "[pmap_pvo_remove: " 1773 "%#" _PRIxpa ": syncicache]\n", 1774 VM_PAGE_TO_PHYS(pg)); 1775 pmap_syncicache(VM_PAGE_TO_PHYS(pg), 1776 PAGE_SIZE); 1777 PMAPCOUNT(exec_synced_pvo_remove); 1778 } 1779 } 1780 1781 pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG)); 1782 } 1783 LIST_REMOVE(pvo, pvo_vlink); 1784 PMAPCOUNT(unmappings); 1785 } else { 1786 PMAPCOUNT(kernel_unmappings); 1787 } 1788 1789 /* 1790 * Remove the PVO from its list and return it to the pool. 1791 */ 1792 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1793 if (pvol) { 1794 LIST_INSERT_HEAD(pvol, pvo, pvo_vlink); 1795 } 1796 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1797 pmap_pvo_remove_depth--; 1798 #endif 1799 } 1800 1801 void 1802 pmap_pvo_free(struct pvo_entry *pvo) 1803 { 1804 1805 pool_put(&pmap_pvo_pool, pvo); 1806 } 1807 1808 void 1809 pmap_pvo_free_list(struct pvo_head *pvol) 1810 { 1811 struct pvo_entry *pvo, *npvo; 1812 1813 for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) { 1814 npvo = LIST_NEXT(pvo, pvo_vlink); 1815 LIST_REMOVE(pvo, pvo_vlink); 1816 pmap_pvo_free(pvo); 1817 } 1818 } 1819 1820 /* 1821 * Mark a mapping as executable. 1822 * If this is the first executable mapping in the segment, 1823 * clear the noexec flag. 1824 */ 1825 static void 1826 pvo_set_exec(struct pvo_entry *pvo) 1827 { 1828 struct pmap *pm = pvo->pvo_pmap; 1829 1830 if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) { 1831 return; 1832 } 1833 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1834 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1835 { 1836 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1837 if (pm->pm_exec[sr]++ == 0) { 1838 pm->pm_sr[sr] &= ~SR_NOEXEC; 1839 } 1840 } 1841 #endif 1842 } 1843 1844 /* 1845 * Mark a mapping as non-executable. 1846 * If this was the last executable mapping in the segment, 1847 * set the noexec flag. 1848 */ 1849 static void 1850 pvo_clear_exec(struct pvo_entry *pvo) 1851 { 1852 struct pmap *pm = pvo->pvo_pmap; 1853 1854 if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) { 1855 return; 1856 } 1857 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1858 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1859 { 1860 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1861 if (--pm->pm_exec[sr] == 0) { 1862 pm->pm_sr[sr] |= SR_NOEXEC; 1863 } 1864 } 1865 #endif 1866 } 1867 1868 /* 1869 * Insert physical page at pa into the given pmap at virtual address va. 1870 */ 1871 int 1872 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1873 { 1874 struct mem_region *mp; 1875 struct pvo_head *pvo_head; 1876 struct vm_page *pg; 1877 register_t pte_lo; 1878 int error; 1879 u_int was_exec = 0; 1880 1881 PMAP_LOCK(); 1882 1883 if (__predict_false(!pmap_initialized)) { 1884 pvo_head = NULL; 1885 pg = NULL; 1886 was_exec = PTE_EXEC; 1887 1888 } else { 1889 pvo_head = pa_to_pvoh(pa, &pg); 1890 } 1891 1892 DPRINTFN(ENTER, 1893 "pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):", 1894 pm, va, pa, prot, flags); 1895 1896 /* 1897 * If this is a managed page, and it's the first reference to the 1898 * page clear the execness of the page. Otherwise fetch the execness. 1899 */ 1900 if (pg != NULL) 1901 was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 1902 1903 DPRINTFN(ENTER, " was_exec=%d", was_exec); 1904 1905 /* 1906 * Assume the page is cache inhibited and access is guarded unless 1907 * it's in our available memory array. If it is in the memory array, 1908 * asssume it's in memory coherent memory. 1909 */ 1910 if (flags & PMAP_MD_PREFETCHABLE) { 1911 pte_lo = 0; 1912 } else 1913 pte_lo = PTE_G; 1914 1915 if ((flags & PMAP_NOCACHE) == 0) { 1916 for (mp = mem; mp->size; mp++) { 1917 if (pa >= mp->start && pa < mp->start + mp->size) { 1918 pte_lo = PTE_M; 1919 break; 1920 } 1921 } 1922 #ifdef MULTIPROCESSOR 1923 if (((mfpvr() >> 16) & 0xffff) == MPC603e) 1924 pte_lo = PTE_M; 1925 #endif 1926 } else { 1927 pte_lo |= PTE_I; 1928 } 1929 1930 if (prot & VM_PROT_WRITE) 1931 pte_lo |= PTE_BW; 1932 else 1933 pte_lo |= PTE_BR; 1934 1935 /* 1936 * If this was in response to a fault, "pre-fault" the PTE's 1937 * changed/referenced bit appropriately. 1938 */ 1939 if (flags & VM_PROT_WRITE) 1940 pte_lo |= PTE_CHG; 1941 if (flags & VM_PROT_ALL) 1942 pte_lo |= PTE_REF; 1943 1944 /* 1945 * We need to know if this page can be executable 1946 */ 1947 flags |= (prot & VM_PROT_EXECUTE); 1948 1949 /* 1950 * Record mapping for later back-translation and pte spilling. 1951 * This will overwrite any existing mapping. 1952 */ 1953 error = pmap_pvo_enter(pm, &pmap_pvo_pool, pvo_head, va, pa, pte_lo, flags); 1954 1955 /* 1956 * Flush the real page from the instruction cache if this page is 1957 * mapped executable and cacheable and has not been flushed since 1958 * the last time it was modified. 1959 */ 1960 if (error == 0 && 1961 (flags & VM_PROT_EXECUTE) && 1962 (pte_lo & PTE_I) == 0 && 1963 was_exec == 0) { 1964 DPRINTFN(ENTER, " %s", "syncicache"); 1965 PMAPCOUNT(exec_synced); 1966 pmap_syncicache(pa, PAGE_SIZE); 1967 if (pg != NULL) { 1968 pmap_attr_save(pg, PTE_EXEC); 1969 PMAPCOUNT(exec_cached); 1970 #if defined(DEBUG) || defined(PMAPDEBUG) 1971 if (pmapdebug & PMAPDEBUG_ENTER) 1972 printf(" marked-as-exec"); 1973 else if (pmapdebug & PMAPDEBUG_EXEC) 1974 printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n", 1975 VM_PAGE_TO_PHYS(pg)); 1976 #endif 1977 } 1978 } 1979 1980 DPRINTFN(ENTER, ": error=%d\n", error); 1981 1982 PMAP_UNLOCK(); 1983 1984 return error; 1985 } 1986 1987 void 1988 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1989 { 1990 struct mem_region *mp; 1991 register_t pte_lo; 1992 int error; 1993 1994 #if defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA) 1995 if (va < VM_MIN_KERNEL_ADDRESS) 1996 panic("pmap_kenter_pa: attempt to enter " 1997 "non-kernel address %#" _PRIxva "!", va); 1998 #endif 1999 2000 DPRINTFN(KENTER, 2001 "pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot); 2002 2003 PMAP_LOCK(); 2004 2005 /* 2006 * Assume the page is cache inhibited and access is guarded unless 2007 * it's in our available memory array. If it is in the memory array, 2008 * asssume it's in memory coherent memory. 2009 */ 2010 pte_lo = PTE_IG; 2011 if ((flags & PMAP_NOCACHE) == 0) { 2012 for (mp = mem; mp->size; mp++) { 2013 if (pa >= mp->start && pa < mp->start + mp->size) { 2014 pte_lo = PTE_M; 2015 break; 2016 } 2017 } 2018 #ifdef MULTIPROCESSOR 2019 if (((mfpvr() >> 16) & 0xffff) == MPC603e) 2020 pte_lo = PTE_M; 2021 #endif 2022 } 2023 2024 if (prot & VM_PROT_WRITE) 2025 pte_lo |= PTE_BW; 2026 else 2027 pte_lo |= PTE_BR; 2028 2029 /* 2030 * We don't care about REF/CHG on PVOs on the unmanaged list. 2031 */ 2032 error = pmap_pvo_enter(pmap_kernel(), &pmap_pvo_pool, 2033 NULL, va, pa, pte_lo, prot|PMAP_WIRED); 2034 2035 if (error != 0) 2036 panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d", 2037 va, pa, error); 2038 2039 PMAP_UNLOCK(); 2040 } 2041 2042 void 2043 pmap_kremove(vaddr_t va, vsize_t len) 2044 { 2045 if (va < VM_MIN_KERNEL_ADDRESS) 2046 panic("pmap_kremove: attempt to remove " 2047 "non-kernel address %#" _PRIxva "!", va); 2048 2049 DPRINTFN(KREMOVE, "pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len); 2050 pmap_remove(pmap_kernel(), va, va + len); 2051 } 2052 2053 /* 2054 * Remove the given range of mapping entries. 2055 */ 2056 void 2057 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva) 2058 { 2059 struct pvo_head pvol; 2060 struct pvo_entry *pvo; 2061 register_t msr; 2062 int pteidx; 2063 2064 PMAP_LOCK(); 2065 LIST_INIT(&pvol); 2066 msr = pmap_interrupts_off(); 2067 for (; va < endva; va += PAGE_SIZE) { 2068 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2069 if (pvo != NULL) { 2070 pmap_pvo_remove(pvo, pteidx, &pvol); 2071 } 2072 } 2073 pmap_interrupts_restore(msr); 2074 pmap_pvo_free_list(&pvol); 2075 PMAP_UNLOCK(); 2076 } 2077 2078 #if defined(PMAP_OEA) 2079 #ifdef PPC_OEA601 2080 bool 2081 pmap_extract_ioseg601(vaddr_t va, paddr_t *pap) 2082 { 2083 if ((MFPVR() >> 16) != MPC601) 2084 return false; 2085 2086 const register_t sr = iosrtable[va >> ADDR_SR_SHFT]; 2087 2088 if (SR601_VALID_P(sr) && SR601_PA_MATCH_P(sr, va)) { 2089 if (pap) 2090 *pap = va; 2091 return true; 2092 } 2093 return false; 2094 } 2095 2096 static bool 2097 pmap_extract_battable601(vaddr_t va, paddr_t *pap) 2098 { 2099 const register_t batu = battable[va >> 23].batu; 2100 const register_t batl = battable[va >> 23].batl; 2101 2102 if (BAT601_VALID_P(batl) && BAT601_VA_MATCH_P(batu, batl, va)) { 2103 const register_t mask = 2104 (~(batl & BAT601_BSM) << 17) & ~0x1ffffL; 2105 if (pap) 2106 *pap = (batl & mask) | (va & ~mask); 2107 return true; 2108 } 2109 return false; 2110 } 2111 #endif /* PPC_OEA601 */ 2112 2113 bool 2114 pmap_extract_battable(vaddr_t va, paddr_t *pap) 2115 { 2116 #ifdef PPC_OEA601 2117 if ((MFPVR() >> 16) == MPC601) 2118 return pmap_extract_battable601(va, pap); 2119 #endif /* PPC_OEA601 */ 2120 2121 if (oeacpufeat & OEACPU_NOBAT) 2122 return false; 2123 2124 const register_t batu = battable[BAT_VA2IDX(va)].batu; 2125 2126 if (BAT_VALID_P(batu, 0) && BAT_VA_MATCH_P(batu, va)) { 2127 const register_t batl = battable[BAT_VA2IDX(va)].batl; 2128 const register_t mask = 2129 (~(batu & (BAT_XBL|BAT_BL)) << 15) & ~0x1ffffL; 2130 if (pap) 2131 *pap = (batl & mask) | (va & ~mask); 2132 return true; 2133 } 2134 return false; 2135 } 2136 #endif /* PMAP_OEA */ 2137 2138 /* 2139 * Get the physical page address for the given pmap/virtual address. 2140 */ 2141 bool 2142 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) 2143 { 2144 struct pvo_entry *pvo; 2145 register_t msr; 2146 2147 PMAP_LOCK(); 2148 2149 /* 2150 * If this is the kernel pmap, check the battable and I/O 2151 * segments for a hit. This is done only for regions outside 2152 * VM_MIN_KERNEL_ADDRESS-VM_MAX_KERNEL_ADDRESS. 2153 * 2154 * Be careful when checking VM_MAX_KERNEL_ADDRESS; you don't 2155 * want to wrap around to 0. 2156 */ 2157 if (pm == pmap_kernel() && 2158 (va < VM_MIN_KERNEL_ADDRESS || 2159 (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) { 2160 KASSERT((va >> ADDR_SR_SHFT) != USER_SR); 2161 #if defined(PMAP_OEA) 2162 #ifdef PPC_OEA601 2163 if (pmap_extract_ioseg601(va, pap)) { 2164 PMAP_UNLOCK(); 2165 return true; 2166 } 2167 #endif /* PPC_OEA601 */ 2168 if (pmap_extract_battable(va, pap)) { 2169 PMAP_UNLOCK(); 2170 return true; 2171 } 2172 /* 2173 * We still check the HTAB... 2174 */ 2175 #elif defined(PMAP_OEA64_BRIDGE) 2176 if (va < SEGMENT_LENGTH) { 2177 if (pap) 2178 *pap = va; 2179 PMAP_UNLOCK(); 2180 return true; 2181 } 2182 /* 2183 * We still check the HTAB... 2184 */ 2185 #elif defined(PMAP_OEA64) 2186 #error PPC_OEA64 not supported 2187 #endif /* PPC_OEA */ 2188 } 2189 2190 msr = pmap_interrupts_off(); 2191 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2192 if (pvo != NULL) { 2193 PMAP_PVO_CHECK(pvo); /* sanity check */ 2194 if (pap) 2195 *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN) 2196 | (va & ADDR_POFF); 2197 } 2198 pmap_interrupts_restore(msr); 2199 PMAP_UNLOCK(); 2200 return pvo != NULL; 2201 } 2202 2203 /* 2204 * Lower the protection on the specified range of this pmap. 2205 */ 2206 void 2207 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot) 2208 { 2209 struct pvo_entry *pvo; 2210 volatile struct pte *pt; 2211 register_t msr; 2212 int pteidx; 2213 2214 /* 2215 * Since this routine only downgrades protection, we should 2216 * always be called with at least one bit not set. 2217 */ 2218 KASSERT(prot != VM_PROT_ALL); 2219 2220 /* 2221 * If there is no protection, this is equivalent to 2222 * remove the pmap from the pmap. 2223 */ 2224 if ((prot & VM_PROT_READ) == 0) { 2225 pmap_remove(pm, va, endva); 2226 return; 2227 } 2228 2229 PMAP_LOCK(); 2230 2231 msr = pmap_interrupts_off(); 2232 for (; va < endva; va += PAGE_SIZE) { 2233 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2234 if (pvo == NULL) 2235 continue; 2236 PMAP_PVO_CHECK(pvo); /* sanity check */ 2237 2238 /* 2239 * Revoke executable if asked to do so. 2240 */ 2241 if ((prot & VM_PROT_EXECUTE) == 0) 2242 pvo_clear_exec(pvo); 2243 2244 #if 0 2245 /* 2246 * If the page is already read-only, no change 2247 * needs to be made. 2248 */ 2249 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) 2250 continue; 2251 #endif 2252 /* 2253 * Grab the PTE pointer before we diddle with 2254 * the cached PTE copy. 2255 */ 2256 pt = pmap_pvo_to_pte(pvo, pteidx); 2257 /* 2258 * Change the protection of the page. 2259 */ 2260 pvo->pvo_pte.pte_lo &= ~PTE_PP; 2261 pvo->pvo_pte.pte_lo |= PTE_BR; 2262 2263 /* 2264 * If the PVO is in the page table, update 2265 * that pte at well. 2266 */ 2267 if (pt != NULL) { 2268 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2269 PVO_WHERE(pvo, PMAP_PROTECT); 2270 PMAPCOUNT(ptes_changed); 2271 } 2272 2273 PMAP_PVO_CHECK(pvo); /* sanity check */ 2274 } 2275 pmap_interrupts_restore(msr); 2276 PMAP_UNLOCK(); 2277 } 2278 2279 void 2280 pmap_unwire(pmap_t pm, vaddr_t va) 2281 { 2282 struct pvo_entry *pvo; 2283 register_t msr; 2284 2285 PMAP_LOCK(); 2286 msr = pmap_interrupts_off(); 2287 pvo = pmap_pvo_find_va(pm, va, NULL); 2288 if (pvo != NULL) { 2289 if (PVO_WIRED_P(pvo)) { 2290 pvo->pvo_vaddr &= ~PVO_WIRED; 2291 pm->pm_stats.wired_count--; 2292 } 2293 PMAP_PVO_CHECK(pvo); /* sanity check */ 2294 } 2295 pmap_interrupts_restore(msr); 2296 PMAP_UNLOCK(); 2297 } 2298 2299 static void 2300 pmap_pp_protect(struct pmap_page *pp, paddr_t pa, vm_prot_t prot) 2301 { 2302 struct pvo_head *pvo_head, pvol; 2303 struct pvo_entry *pvo, *next_pvo; 2304 volatile struct pte *pt; 2305 register_t msr; 2306 2307 PMAP_LOCK(); 2308 2309 KASSERT(prot != VM_PROT_ALL); 2310 LIST_INIT(&pvol); 2311 msr = pmap_interrupts_off(); 2312 2313 /* 2314 * When UVM reuses a page, it does a pmap_page_protect with 2315 * VM_PROT_NONE. At that point, we can clear the exec flag 2316 * since we know the page will have different contents. 2317 */ 2318 if ((prot & VM_PROT_READ) == 0) { 2319 DPRINTFN(EXEC, "[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n", 2320 pa); 2321 if (pmap_pp_attr_fetch(pp) & PTE_EXEC) { 2322 PMAPCOUNT(exec_uncached_page_protect); 2323 pmap_pp_attr_clear(pp, PTE_EXEC); 2324 } 2325 } 2326 2327 pvo_head = &pp->pp_pvoh; 2328 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2329 next_pvo = LIST_NEXT(pvo, pvo_vlink); 2330 PMAP_PVO_CHECK(pvo); /* sanity check */ 2331 2332 /* 2333 * Downgrading to no mapping at all, we just remove the entry. 2334 */ 2335 if ((prot & VM_PROT_READ) == 0) { 2336 pmap_pvo_remove(pvo, -1, &pvol); 2337 continue; 2338 } 2339 2340 /* 2341 * If EXEC permission is being revoked, just clear the 2342 * flag in the PVO. 2343 */ 2344 if ((prot & VM_PROT_EXECUTE) == 0) 2345 pvo_clear_exec(pvo); 2346 2347 /* 2348 * If this entry is already RO, don't diddle with the 2349 * page table. 2350 */ 2351 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 2352 PMAP_PVO_CHECK(pvo); 2353 continue; 2354 } 2355 2356 /* 2357 * Grab the PTE before the we diddle the bits so 2358 * pvo_to_pte can verify the pte contents are as 2359 * expected. 2360 */ 2361 pt = pmap_pvo_to_pte(pvo, -1); 2362 pvo->pvo_pte.pte_lo &= ~PTE_PP; 2363 pvo->pvo_pte.pte_lo |= PTE_BR; 2364 if (pt != NULL) { 2365 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2366 PVO_WHERE(pvo, PMAP_PAGE_PROTECT); 2367 PMAPCOUNT(ptes_changed); 2368 } 2369 PMAP_PVO_CHECK(pvo); /* sanity check */ 2370 } 2371 pmap_interrupts_restore(msr); 2372 pmap_pvo_free_list(&pvol); 2373 2374 PMAP_UNLOCK(); 2375 } 2376 2377 /* 2378 * Lower the protection on the specified physical page. 2379 */ 2380 void 2381 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 2382 { 2383 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 2384 2385 pmap_pp_protect(&md->mdpg_pp, VM_PAGE_TO_PHYS(pg), prot); 2386 } 2387 2388 /* 2389 * Lower the protection on the physical page at the specified physical 2390 * address, which may not be managed and so may not have a struct 2391 * vm_page. 2392 */ 2393 void 2394 pmap_pv_protect(paddr_t pa, vm_prot_t prot) 2395 { 2396 struct pmap_page *pp; 2397 2398 if ((pp = pmap_pv_tracked(pa)) == NULL) 2399 return; 2400 pmap_pp_protect(pp, pa, prot); 2401 } 2402 2403 /* 2404 * Activate the address space for the specified process. If the process 2405 * is the current process, load the new MMU context. 2406 */ 2407 void 2408 pmap_activate(struct lwp *l) 2409 { 2410 struct pcb *pcb = lwp_getpcb(l); 2411 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 2412 2413 DPRINTFN(ACTIVATE, 2414 "pmap_activate: lwp %p (curlwp %p)\n", l, curlwp); 2415 2416 /* 2417 * XXX Normally performed in cpu_lwp_fork(). 2418 */ 2419 pcb->pcb_pm = pmap; 2420 2421 /* 2422 * In theory, the SR registers need only be valid on return 2423 * to user space wait to do them there. 2424 */ 2425 if (l == curlwp) { 2426 /* Store pointer to new current pmap. */ 2427 curpm = pmap; 2428 } 2429 } 2430 2431 /* 2432 * Deactivate the specified process's address space. 2433 */ 2434 void 2435 pmap_deactivate(struct lwp *l) 2436 { 2437 } 2438 2439 bool 2440 pmap_query_bit(struct vm_page *pg, int ptebit) 2441 { 2442 struct pvo_entry *pvo; 2443 volatile struct pte *pt; 2444 register_t msr; 2445 2446 PMAP_LOCK(); 2447 2448 if (pmap_attr_fetch(pg) & ptebit) { 2449 PMAP_UNLOCK(); 2450 return true; 2451 } 2452 2453 msr = pmap_interrupts_off(); 2454 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2455 PMAP_PVO_CHECK(pvo); /* sanity check */ 2456 /* 2457 * See if we saved the bit off. If so cache, it and return 2458 * success. 2459 */ 2460 if (pvo->pvo_pte.pte_lo & ptebit) { 2461 pmap_attr_save(pg, ptebit); 2462 PMAP_PVO_CHECK(pvo); /* sanity check */ 2463 pmap_interrupts_restore(msr); 2464 PMAP_UNLOCK(); 2465 return true; 2466 } 2467 } 2468 /* 2469 * No luck, now go thru the hard part of looking at the ptes 2470 * themselves. Sync so any pending REF/CHG bits are flushed 2471 * to the PTEs. 2472 */ 2473 SYNC(); 2474 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2475 PMAP_PVO_CHECK(pvo); /* sanity check */ 2476 /* 2477 * See if this pvo have a valid PTE. If so, fetch the 2478 * REF/CHG bits from the valid PTE. If the appropriate 2479 * ptebit is set, cache, it and return success. 2480 */ 2481 pt = pmap_pvo_to_pte(pvo, -1); 2482 if (pt != NULL) { 2483 pmap_pte_synch(pt, &pvo->pvo_pte); 2484 if (pvo->pvo_pte.pte_lo & ptebit) { 2485 pmap_attr_save(pg, ptebit); 2486 PMAP_PVO_CHECK(pvo); /* sanity check */ 2487 pmap_interrupts_restore(msr); 2488 PMAP_UNLOCK(); 2489 return true; 2490 } 2491 } 2492 } 2493 pmap_interrupts_restore(msr); 2494 PMAP_UNLOCK(); 2495 return false; 2496 } 2497 2498 bool 2499 pmap_clear_bit(struct vm_page *pg, int ptebit) 2500 { 2501 struct pvo_head *pvoh = vm_page_to_pvoh(pg); 2502 struct pvo_entry *pvo; 2503 volatile struct pte *pt; 2504 register_t msr; 2505 int rv = 0; 2506 2507 PMAP_LOCK(); 2508 msr = pmap_interrupts_off(); 2509 2510 /* 2511 * Fetch the cache value 2512 */ 2513 rv |= pmap_attr_fetch(pg); 2514 2515 /* 2516 * Clear the cached value. 2517 */ 2518 pmap_attr_clear(pg, ptebit); 2519 2520 /* 2521 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we 2522 * can reset the right ones). Note that since the pvo entries and 2523 * list heads are accessed via BAT0 and are never placed in the 2524 * page table, we don't have to worry about further accesses setting 2525 * the REF/CHG bits. 2526 */ 2527 SYNC(); 2528 2529 /* 2530 * For each pvo entry, clear pvo's ptebit. If this pvo have a 2531 * valid PTE. If so, clear the ptebit from the valid PTE. 2532 */ 2533 LIST_FOREACH(pvo, pvoh, pvo_vlink) { 2534 PMAP_PVO_CHECK(pvo); /* sanity check */ 2535 pt = pmap_pvo_to_pte(pvo, -1); 2536 if (pt != NULL) { 2537 /* 2538 * Only sync the PTE if the bit we are looking 2539 * for is not already set. 2540 */ 2541 if ((pvo->pvo_pte.pte_lo & ptebit) == 0) 2542 pmap_pte_synch(pt, &pvo->pvo_pte); 2543 /* 2544 * If the bit we are looking for was already set, 2545 * clear that bit in the pte. 2546 */ 2547 if (pvo->pvo_pte.pte_lo & ptebit) 2548 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2549 } 2550 rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF); 2551 pvo->pvo_pte.pte_lo &= ~ptebit; 2552 PMAP_PVO_CHECK(pvo); /* sanity check */ 2553 } 2554 pmap_interrupts_restore(msr); 2555 2556 /* 2557 * If we are clearing the modify bit and this page was marked EXEC 2558 * and the user of the page thinks the page was modified, then we 2559 * need to clean it from the icache if it's mapped or clear the EXEC 2560 * bit if it's not mapped. The page itself might not have the CHG 2561 * bit set if the modification was done via DMA to the page. 2562 */ 2563 if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) { 2564 if (LIST_EMPTY(pvoh)) { 2565 DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n", 2566 VM_PAGE_TO_PHYS(pg)); 2567 pmap_attr_clear(pg, PTE_EXEC); 2568 PMAPCOUNT(exec_uncached_clear_modify); 2569 } else { 2570 DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n", 2571 VM_PAGE_TO_PHYS(pg)); 2572 pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE); 2573 PMAPCOUNT(exec_synced_clear_modify); 2574 } 2575 } 2576 PMAP_UNLOCK(); 2577 return (rv & ptebit) != 0; 2578 } 2579 2580 void 2581 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 2582 { 2583 struct pvo_entry *pvo; 2584 size_t offset = va & ADDR_POFF; 2585 int s; 2586 2587 PMAP_LOCK(); 2588 s = splvm(); 2589 while (len > 0) { 2590 size_t seglen = PAGE_SIZE - offset; 2591 if (seglen > len) 2592 seglen = len; 2593 pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL); 2594 if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) { 2595 pmap_syncicache( 2596 (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen); 2597 PMAP_PVO_CHECK(pvo); 2598 } 2599 va += seglen; 2600 len -= seglen; 2601 offset = 0; 2602 } 2603 splx(s); 2604 PMAP_UNLOCK(); 2605 } 2606 2607 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 2608 void 2609 pmap_pte_print(volatile struct pte *pt) 2610 { 2611 printf("PTE %p: ", pt); 2612 2613 #if defined(PMAP_OEA) 2614 /* High word: */ 2615 printf("%#" _PRIxpte ": [", pt->pte_hi); 2616 #else 2617 printf("%#" _PRIxpte ": [", pt->pte_hi); 2618 #endif /* PMAP_OEA */ 2619 2620 printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i'); 2621 printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-'); 2622 2623 printf("%#" _PRIxpte " %#" _PRIxpte "", 2624 (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT, 2625 pt->pte_hi & PTE_API); 2626 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 2627 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2628 #else 2629 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2630 #endif /* PMAP_OEA */ 2631 2632 /* Low word: */ 2633 #if defined (PMAP_OEA) 2634 printf(" %#" _PRIxpte ": [", pt->pte_lo); 2635 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2636 #else 2637 printf(" %#" _PRIxpte ": [", pt->pte_lo); 2638 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2639 #endif 2640 printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u'); 2641 printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n'); 2642 printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.'); 2643 printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.'); 2644 printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.'); 2645 printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.'); 2646 switch (pt->pte_lo & PTE_PP) { 2647 case PTE_BR: printf("br]\n"); break; 2648 case PTE_BW: printf("bw]\n"); break; 2649 case PTE_SO: printf("so]\n"); break; 2650 case PTE_SW: printf("sw]\n"); break; 2651 } 2652 } 2653 #endif 2654 2655 #if defined(DDB) 2656 void 2657 pmap_pteg_check(void) 2658 { 2659 volatile struct pte *pt; 2660 int i; 2661 int ptegidx; 2662 u_int p_valid = 0; 2663 u_int s_valid = 0; 2664 u_int invalid = 0; 2665 2666 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2667 for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) { 2668 if (pt->pte_hi & PTE_VALID) { 2669 if (pt->pte_hi & PTE_HID) 2670 s_valid++; 2671 else 2672 { 2673 p_valid++; 2674 } 2675 } else 2676 invalid++; 2677 } 2678 } 2679 printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n", 2680 p_valid, p_valid, s_valid, s_valid, 2681 invalid, invalid); 2682 } 2683 2684 void 2685 pmap_print_mmuregs(void) 2686 { 2687 int i; 2688 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 2689 u_int cpuvers; 2690 #endif 2691 #ifndef PMAP_OEA64 2692 vaddr_t addr; 2693 register_t soft_sr[16]; 2694 #endif 2695 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 2696 struct bat soft_ibat[4]; 2697 struct bat soft_dbat[4]; 2698 #endif 2699 paddr_t sdr1; 2700 2701 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 2702 cpuvers = MFPVR() >> 16; 2703 #endif 2704 __asm volatile ("mfsdr1 %0" : "=r"(sdr1)); 2705 #ifndef PMAP_OEA64 2706 addr = 0; 2707 for (i = 0; i < 16; i++) { 2708 soft_sr[i] = MFSRIN(addr); 2709 addr += (1 << ADDR_SR_SHFT); 2710 } 2711 #endif 2712 2713 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 2714 /* read iBAT (601: uBAT) registers */ 2715 __asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu)); 2716 __asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl)); 2717 __asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu)); 2718 __asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl)); 2719 __asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu)); 2720 __asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl)); 2721 __asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu)); 2722 __asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl)); 2723 2724 2725 if (cpuvers != MPC601) { 2726 /* read dBAT registers */ 2727 __asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu)); 2728 __asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl)); 2729 __asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu)); 2730 __asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl)); 2731 __asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu)); 2732 __asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl)); 2733 __asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu)); 2734 __asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl)); 2735 } 2736 #endif 2737 2738 printf("SDR1:\t%#" _PRIxpa "\n", sdr1); 2739 #ifndef PMAP_OEA64 2740 printf("SR[]:\t"); 2741 for (i = 0; i < 4; i++) 2742 printf("0x%08lx, ", soft_sr[i]); 2743 printf("\n\t"); 2744 for ( ; i < 8; i++) 2745 printf("0x%08lx, ", soft_sr[i]); 2746 printf("\n\t"); 2747 for ( ; i < 12; i++) 2748 printf("0x%08lx, ", soft_sr[i]); 2749 printf("\n\t"); 2750 for ( ; i < 16; i++) 2751 printf("0x%08lx, ", soft_sr[i]); 2752 printf("\n"); 2753 #endif 2754 2755 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 2756 printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i'); 2757 for (i = 0; i < 4; i++) { 2758 printf("0x%08lx 0x%08lx, ", 2759 soft_ibat[i].batu, soft_ibat[i].batl); 2760 if (i == 1) 2761 printf("\n\t"); 2762 } 2763 if (cpuvers != MPC601) { 2764 printf("\ndBAT[]:\t"); 2765 for (i = 0; i < 4; i++) { 2766 printf("0x%08lx 0x%08lx, ", 2767 soft_dbat[i].batu, soft_dbat[i].batl); 2768 if (i == 1) 2769 printf("\n\t"); 2770 } 2771 } 2772 printf("\n"); 2773 #endif /* PMAP_OEA... */ 2774 } 2775 2776 void 2777 pmap_print_pte(pmap_t pm, vaddr_t va) 2778 { 2779 struct pvo_entry *pvo; 2780 volatile struct pte *pt; 2781 int pteidx; 2782 2783 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2784 if (pvo != NULL) { 2785 pt = pmap_pvo_to_pte(pvo, pteidx); 2786 if (pt != NULL) { 2787 printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n", 2788 va, pt, 2789 pt->pte_hi & PTE_HID ? "(sec)" : "(pri)", 2790 pt->pte_hi, pt->pte_lo); 2791 } else { 2792 printf("No valid PTE found\n"); 2793 } 2794 } else { 2795 printf("Address not in pmap\n"); 2796 } 2797 } 2798 2799 void 2800 pmap_pteg_dist(void) 2801 { 2802 struct pvo_entry *pvo; 2803 int ptegidx; 2804 int depth; 2805 int max_depth = 0; 2806 unsigned int depths[64]; 2807 2808 memset(depths, 0, sizeof(depths)); 2809 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2810 depth = 0; 2811 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2812 depth++; 2813 } 2814 if (depth > max_depth) 2815 max_depth = depth; 2816 if (depth > 63) 2817 depth = 63; 2818 depths[depth]++; 2819 } 2820 2821 for (depth = 0; depth < 64; depth++) { 2822 printf(" [%2d]: %8u", depth, depths[depth]); 2823 if ((depth & 3) == 3) 2824 printf("\n"); 2825 if (depth == max_depth) 2826 break; 2827 } 2828 if ((depth & 3) != 3) 2829 printf("\n"); 2830 printf("Max depth found was %d\n", max_depth); 2831 } 2832 #endif /* DEBUG */ 2833 2834 #if defined(PMAPCHECK) || defined(DEBUG) 2835 void 2836 pmap_pvo_verify(void) 2837 { 2838 int ptegidx; 2839 int s; 2840 2841 s = splvm(); 2842 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2843 struct pvo_entry *pvo; 2844 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2845 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 2846 panic("pmap_pvo_verify: invalid pvo %p " 2847 "on list %#x", pvo, ptegidx); 2848 pmap_pvo_check(pvo); 2849 } 2850 } 2851 splx(s); 2852 } 2853 #endif /* PMAPCHECK */ 2854 2855 void * 2856 pmap_pool_alloc(struct pool *pp, int flags) 2857 { 2858 struct pvo_page *pvop; 2859 struct vm_page *pg; 2860 2861 if (uvm.page_init_done != true) { 2862 return (void *) uvm_pageboot_alloc(PAGE_SIZE); 2863 } 2864 2865 PMAP_LOCK(); 2866 pvop = SIMPLEQ_FIRST(&pmap_pvop_head); 2867 if (pvop != NULL) { 2868 pmap_pvop_free--; 2869 SIMPLEQ_REMOVE_HEAD(&pmap_pvop_head, pvop_link); 2870 PMAP_UNLOCK(); 2871 return pvop; 2872 } 2873 PMAP_UNLOCK(); 2874 again: 2875 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE, 2876 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256); 2877 if (__predict_false(pg == NULL)) { 2878 if (flags & PR_WAITOK) { 2879 uvm_wait("plpg"); 2880 goto again; 2881 } else { 2882 return (0); 2883 } 2884 } 2885 KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg)); 2886 return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg); 2887 } 2888 2889 void 2890 pmap_pool_free(struct pool *pp, void *va) 2891 { 2892 struct pvo_page *pvop; 2893 2894 PMAP_LOCK(); 2895 pvop = va; 2896 SIMPLEQ_INSERT_HEAD(&pmap_pvop_head, pvop, pvop_link); 2897 pmap_pvop_free++; 2898 if (pmap_pvop_free > pmap_pvop_maxfree) 2899 pmap_pvop_maxfree = pmap_pvop_free; 2900 PMAP_UNLOCK(); 2901 #if 0 2902 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va)); 2903 #endif 2904 } 2905 2906 /* 2907 * This routine in bootstraping to steal to-be-managed memory (which will 2908 * then be unmanaged). We use it to grab from the first 256MB for our 2909 * pmap needs and above 256MB for other stuff. 2910 */ 2911 vaddr_t 2912 pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp) 2913 { 2914 vsize_t size; 2915 vaddr_t va; 2916 paddr_t start, end, pa = 0; 2917 int npgs, freelist; 2918 uvm_physseg_t bank; 2919 2920 if (uvm.page_init_done == true) 2921 panic("pmap_steal_memory: called _after_ bootstrap"); 2922 2923 *vstartp = VM_MIN_KERNEL_ADDRESS; 2924 *vendp = VM_MAX_KERNEL_ADDRESS; 2925 2926 size = round_page(vsize); 2927 npgs = atop(size); 2928 2929 /* 2930 * PA 0 will never be among those given to UVM so we can use it 2931 * to indicate we couldn't steal any memory. 2932 */ 2933 2934 for (bank = uvm_physseg_get_first(); 2935 uvm_physseg_valid_p(bank); 2936 bank = uvm_physseg_get_next(bank)) { 2937 2938 freelist = uvm_physseg_get_free_list(bank); 2939 start = uvm_physseg_get_start(bank); 2940 end = uvm_physseg_get_end(bank); 2941 2942 if (freelist == VM_FREELIST_FIRST256 && 2943 (end - start) >= npgs) { 2944 pa = ptoa(start); 2945 break; 2946 } 2947 } 2948 2949 if (pa == 0) 2950 panic("pmap_steal_memory: no approriate memory to steal!"); 2951 2952 uvm_physseg_unplug(start, npgs); 2953 2954 va = (vaddr_t) pa; 2955 memset((void *) va, 0, size); 2956 pmap_pages_stolen += npgs; 2957 #ifdef DEBUG 2958 if (pmapdebug && npgs > 1) { 2959 u_int cnt = 0; 2960 for (bank = uvm_physseg_get_first(); 2961 uvm_physseg_valid_p(bank); 2962 bank = uvm_physseg_get_next(bank)) { 2963 cnt += uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank); 2964 } 2965 printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n", 2966 npgs, pmap_pages_stolen, cnt); 2967 } 2968 #endif 2969 2970 return va; 2971 } 2972 2973 /* 2974 * Find a chuck of memory with right size and alignment. 2975 */ 2976 paddr_t 2977 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end) 2978 { 2979 struct mem_region *mp; 2980 paddr_t s, e; 2981 int i, j; 2982 2983 size = round_page(size); 2984 2985 DPRINTFN(BOOT, 2986 "pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d", 2987 size, alignment, at_end); 2988 2989 if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0) 2990 panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa, 2991 alignment); 2992 2993 if (at_end) { 2994 if (alignment != PAGE_SIZE) 2995 panic("pmap_boot_find_memory: invalid ending " 2996 "alignment %#" _PRIxpa, alignment); 2997 2998 for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) { 2999 s = mp->start + mp->size - size; 3000 if (s >= mp->start && mp->size >= size) { 3001 DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s); 3002 DPRINTFN(BOOT, 3003 "pmap_boot_find_memory: b-avail[%d] start " 3004 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 3005 mp->start, mp->size); 3006 mp->size -= size; 3007 DPRINTFN(BOOT, 3008 "pmap_boot_find_memory: a-avail[%d] start " 3009 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 3010 mp->start, mp->size); 3011 return s; 3012 } 3013 } 3014 panic("pmap_boot_find_memory: no available memory"); 3015 } 3016 3017 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3018 s = (mp->start + alignment - 1) & ~(alignment-1); 3019 e = s + size; 3020 3021 /* 3022 * Is the calculated region entirely within the region? 3023 */ 3024 if (s < mp->start || e > mp->start + mp->size) 3025 continue; 3026 3027 DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s); 3028 if (s == mp->start) { 3029 /* 3030 * If the block starts at the beginning of region, 3031 * adjust the size & start. (the region may now be 3032 * zero in length) 3033 */ 3034 DPRINTFN(BOOT, 3035 "pmap_boot_find_memory: b-avail[%d] start " 3036 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3037 mp->start += size; 3038 mp->size -= size; 3039 DPRINTFN(BOOT, 3040 "pmap_boot_find_memory: a-avail[%d] start " 3041 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3042 } else if (e == mp->start + mp->size) { 3043 /* 3044 * If the block starts at the beginning of region, 3045 * adjust only the size. 3046 */ 3047 DPRINTFN(BOOT, 3048 "pmap_boot_find_memory: b-avail[%d] start " 3049 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3050 mp->size -= size; 3051 DPRINTFN(BOOT, 3052 "pmap_boot_find_memory: a-avail[%d] start " 3053 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3054 } else { 3055 /* 3056 * Block is in the middle of the region, so we 3057 * have to split it in two. 3058 */ 3059 for (j = avail_cnt; j > i + 1; j--) { 3060 avail[j] = avail[j-1]; 3061 } 3062 DPRINTFN(BOOT, 3063 "pmap_boot_find_memory: b-avail[%d] start " 3064 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3065 mp[1].start = e; 3066 mp[1].size = mp[0].start + mp[0].size - e; 3067 mp[0].size = s - mp[0].start; 3068 avail_cnt++; 3069 for (; i < avail_cnt; i++) { 3070 DPRINTFN(BOOT, 3071 "pmap_boot_find_memory: a-avail[%d] " 3072 "start %#" _PRIxpa " size %#" _PRIxpa "\n", i, 3073 avail[i].start, avail[i].size); 3074 } 3075 } 3076 KASSERT(s == (uintptr_t) s); 3077 return s; 3078 } 3079 panic("pmap_boot_find_memory: not enough memory for " 3080 "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment); 3081 } 3082 3083 /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */ 3084 #if defined (PMAP_OEA64_BRIDGE) 3085 int 3086 pmap_setup_segment0_map(int use_large_pages, ...) 3087 { 3088 vaddr_t va, va_end; 3089 3090 register_t pte_lo = 0x0; 3091 int ptegidx = 0; 3092 struct pte pte; 3093 va_list ap; 3094 3095 /* Coherent + Supervisor RW, no user access */ 3096 pte_lo = PTE_M; 3097 3098 /* XXXSL 3099 * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later, 3100 * these have to take priority. 3101 */ 3102 for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) { 3103 ptegidx = va_to_pteg(pmap_kernel(), va); 3104 pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo); 3105 (void)pmap_pte_insert(ptegidx, &pte); 3106 } 3107 3108 va_start(ap, use_large_pages); 3109 while (1) { 3110 paddr_t pa; 3111 size_t size; 3112 3113 va = va_arg(ap, vaddr_t); 3114 3115 if (va == 0) 3116 break; 3117 3118 pa = va_arg(ap, paddr_t); 3119 size = va_arg(ap, size_t); 3120 3121 for (va_end = va + size; va < va_end; va += 0x1000, pa += 0x1000) { 3122 #if 0 3123 printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__, va, pa); 3124 #endif 3125 ptegidx = va_to_pteg(pmap_kernel(), va); 3126 pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo); 3127 (void)pmap_pte_insert(ptegidx, &pte); 3128 } 3129 } 3130 va_end(ap); 3131 3132 TLBSYNC(); 3133 SYNC(); 3134 return (0); 3135 } 3136 #endif /* PMAP_OEA64_BRIDGE */ 3137 3138 /* 3139 * Set up the bottom level of the data structures necessary for the kernel 3140 * to manage memory. MMU hardware is programmed in pmap_bootstrap2(). 3141 */ 3142 void 3143 pmap_bootstrap1(paddr_t kernelstart, paddr_t kernelend) 3144 { 3145 struct mem_region *mp, tmp; 3146 paddr_t s, e; 3147 psize_t size; 3148 int i, j; 3149 3150 /* 3151 * Get memory. 3152 */ 3153 mem_regions(&mem, &avail); 3154 #if defined(DEBUG) 3155 if (pmapdebug & PMAPDEBUG_BOOT) { 3156 printf("pmap_bootstrap: memory configuration:\n"); 3157 for (mp = mem; mp->size; mp++) { 3158 printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n", 3159 mp->start, mp->size); 3160 } 3161 for (mp = avail; mp->size; mp++) { 3162 printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n", 3163 mp->start, mp->size); 3164 } 3165 } 3166 #endif 3167 3168 /* 3169 * Find out how much physical memory we have and in how many chunks. 3170 */ 3171 for (mem_cnt = 0, mp = mem; mp->size; mp++) { 3172 if (mp->start >= pmap_memlimit) 3173 continue; 3174 if (mp->start + mp->size > pmap_memlimit) { 3175 size = pmap_memlimit - mp->start; 3176 physmem += btoc(size); 3177 } else { 3178 physmem += btoc(mp->size); 3179 } 3180 mem_cnt++; 3181 } 3182 3183 /* 3184 * Count the number of available entries. 3185 */ 3186 for (avail_cnt = 0, mp = avail; mp->size; mp++) 3187 avail_cnt++; 3188 3189 /* 3190 * Page align all regions. 3191 */ 3192 kernelstart = trunc_page(kernelstart); 3193 kernelend = round_page(kernelend); 3194 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3195 s = round_page(mp->start); 3196 mp->size -= (s - mp->start); 3197 mp->size = trunc_page(mp->size); 3198 mp->start = s; 3199 e = mp->start + mp->size; 3200 3201 DPRINTFN(BOOT, 3202 "pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3203 i, mp->start, mp->size); 3204 3205 /* 3206 * Don't allow the end to run beyond our artificial limit 3207 */ 3208 if (e > pmap_memlimit) 3209 e = pmap_memlimit; 3210 3211 /* 3212 * Is this region empty or strange? skip it. 3213 */ 3214 if (e <= s) { 3215 mp->start = 0; 3216 mp->size = 0; 3217 continue; 3218 } 3219 3220 /* 3221 * Does this overlap the beginning of kernel? 3222 * Does extend past the end of the kernel? 3223 */ 3224 else if (s < kernelstart && e > kernelstart) { 3225 if (e > kernelend) { 3226 avail[avail_cnt].start = kernelend; 3227 avail[avail_cnt].size = e - kernelend; 3228 avail_cnt++; 3229 } 3230 mp->size = kernelstart - s; 3231 } 3232 /* 3233 * Check whether this region overlaps the end of the kernel. 3234 */ 3235 else if (s < kernelend && e > kernelend) { 3236 mp->start = kernelend; 3237 mp->size = e - kernelend; 3238 } 3239 /* 3240 * Look whether this regions is completely inside the kernel. 3241 * Nuke it if it does. 3242 */ 3243 else if (s >= kernelstart && e <= kernelend) { 3244 mp->start = 0; 3245 mp->size = 0; 3246 } 3247 /* 3248 * If the user imposed a memory limit, enforce it. 3249 */ 3250 else if (s >= pmap_memlimit) { 3251 mp->start = -PAGE_SIZE; /* let's know why */ 3252 mp->size = 0; 3253 } 3254 else { 3255 mp->start = s; 3256 mp->size = e - s; 3257 } 3258 DPRINTFN(BOOT, 3259 "pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3260 i, mp->start, mp->size); 3261 } 3262 3263 /* 3264 * Move (and uncount) all the null return to the end. 3265 */ 3266 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3267 if (mp->size == 0) { 3268 tmp = avail[i]; 3269 avail[i] = avail[--avail_cnt]; 3270 avail[avail_cnt] = avail[i]; 3271 } 3272 } 3273 3274 /* 3275 * (Bubble)sort them into ascending order. 3276 */ 3277 for (i = 0; i < avail_cnt; i++) { 3278 for (j = i + 1; j < avail_cnt; j++) { 3279 if (avail[i].start > avail[j].start) { 3280 tmp = avail[i]; 3281 avail[i] = avail[j]; 3282 avail[j] = tmp; 3283 } 3284 } 3285 } 3286 3287 /* 3288 * Make sure they don't overlap. 3289 */ 3290 for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) { 3291 if (mp[0].start + mp[0].size > mp[1].start) { 3292 mp[0].size = mp[1].start - mp[0].start; 3293 } 3294 DPRINTFN(BOOT, 3295 "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3296 i, mp->start, mp->size); 3297 } 3298 DPRINTFN(BOOT, 3299 "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3300 i, mp->start, mp->size); 3301 3302 #ifdef PTEGCOUNT 3303 pmap_pteg_cnt = PTEGCOUNT; 3304 #else /* PTEGCOUNT */ 3305 3306 pmap_pteg_cnt = 0x1000; 3307 3308 while (pmap_pteg_cnt < physmem) 3309 pmap_pteg_cnt <<= 1; 3310 3311 pmap_pteg_cnt >>= 1; 3312 #endif /* PTEGCOUNT */ 3313 3314 #ifdef DEBUG 3315 DPRINTFN(BOOT, "pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt); 3316 #endif 3317 3318 /* 3319 * Find suitably aligned memory for PTEG hash table. 3320 */ 3321 size = pmap_pteg_cnt * sizeof(struct pteg); 3322 pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0); 3323 3324 #ifdef DEBUG 3325 DPRINTFN(BOOT, 3326 "PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table); 3327 #endif 3328 3329 3330 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3331 if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH) 3332 panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > 256MB", 3333 pmap_pteg_table, size); 3334 #endif 3335 3336 memset(__UNVOLATILE(pmap_pteg_table), 0, 3337 pmap_pteg_cnt * sizeof(struct pteg)); 3338 pmap_pteg_mask = pmap_pteg_cnt - 1; 3339 3340 /* 3341 * We cannot do pmap_steal_memory here since UVM hasn't been loaded 3342 * with pages. So we just steal them before giving them to UVM. 3343 */ 3344 size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt; 3345 pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0); 3346 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3347 if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH) 3348 panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > 256MB", 3349 pmap_pvo_table, size); 3350 #endif 3351 3352 for (i = 0; i < pmap_pteg_cnt; i++) 3353 TAILQ_INIT(&pmap_pvo_table[i]); 3354 3355 #ifndef MSGBUFADDR 3356 /* 3357 * Allocate msgbuf in high memory. 3358 */ 3359 msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1); 3360 #endif 3361 3362 for (mp = avail, i = 0; i < avail_cnt; mp++, i++) { 3363 paddr_t pfstart = atop(mp->start); 3364 paddr_t pfend = atop(mp->start + mp->size); 3365 if (mp->size == 0) 3366 continue; 3367 if (mp->start + mp->size <= SEGMENT_LENGTH) { 3368 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3369 VM_FREELIST_FIRST256); 3370 } else if (mp->start >= SEGMENT_LENGTH) { 3371 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3372 VM_FREELIST_DEFAULT); 3373 } else { 3374 pfend = atop(SEGMENT_LENGTH); 3375 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3376 VM_FREELIST_FIRST256); 3377 pfstart = atop(SEGMENT_LENGTH); 3378 pfend = atop(mp->start + mp->size); 3379 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3380 VM_FREELIST_DEFAULT); 3381 } 3382 } 3383 3384 /* 3385 * Make sure kernel vsid is allocated as well as VSID 0. 3386 */ 3387 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3388 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 3389 pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3390 |= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW); 3391 pmap_vsid_bitmap[0] |= 1; 3392 3393 /* 3394 * Initialize kernel pmap. 3395 */ 3396 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 3397 for (i = 0; i < 16; i++) { 3398 pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY; 3399 } 3400 pmap_kernel()->pm_vsid = KERNEL_VSIDBITS; 3401 3402 pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY; 3403 #ifdef KERNEL2_SR 3404 pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY; 3405 #endif 3406 #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */ 3407 3408 #if defined(PMAP_OEA) && defined(PPC_OEA601) 3409 if ((MFPVR() >> 16) == MPC601) { 3410 for (i = 0; i < 16; i++) { 3411 if (iosrtable[i] & SR601_T) { 3412 pmap_kernel()->pm_sr[i] = iosrtable[i]; 3413 } 3414 } 3415 } 3416 #endif /* PMAP_OEA && PPC_OEA601 */ 3417 3418 #ifdef ALTIVEC 3419 pmap_use_altivec = cpu_altivec; 3420 #endif 3421 3422 #ifdef DEBUG 3423 if (pmapdebug & PMAPDEBUG_BOOT) { 3424 u_int cnt; 3425 uvm_physseg_t bank; 3426 char pbuf[9]; 3427 for (cnt = 0, bank = uvm_physseg_get_first(); 3428 uvm_physseg_valid_p(bank); 3429 bank = uvm_physseg_get_next(bank)) { 3430 cnt += uvm_physseg_get_avail_end(bank) - 3431 uvm_physseg_get_avail_start(bank); 3432 printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n", 3433 bank, 3434 ptoa(uvm_physseg_get_avail_start(bank)), 3435 ptoa(uvm_physseg_get_avail_end(bank)), 3436 ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank))); 3437 } 3438 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt)); 3439 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n", 3440 pbuf, cnt); 3441 } 3442 #endif 3443 3444 pool_init(&pmap_pvo_pool, sizeof(struct pvo_entry), 3445 sizeof(struct pvo_entry), 0, 0, "pmap_pvopl", 3446 &pmap_pool_allocator, IPL_VM); 3447 3448 pool_setlowat(&pmap_pvo_pool, 1008); 3449 3450 pool_init(&pmap_pool, sizeof(struct pmap), 3451 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_allocator, 3452 IPL_NONE); 3453 3454 #if defined(PMAP_NEED_MAPKERNEL) 3455 { 3456 struct pmap *pm = pmap_kernel(); 3457 #if defined(PMAP_NEED_FULL_MAPKERNEL) 3458 extern int etext[], kernel_text[]; 3459 vaddr_t va, va_etext = (paddr_t) etext; 3460 #endif 3461 paddr_t pa, pa_end; 3462 register_t sr; 3463 struct pte pt; 3464 unsigned int ptegidx; 3465 int bank; 3466 3467 sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY; 3468 pm->pm_sr[0] = sr; 3469 3470 for (bank = 0; bank < vm_nphysseg; bank++) { 3471 pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end); 3472 pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start); 3473 for (; pa < pa_end; pa += PAGE_SIZE) { 3474 ptegidx = va_to_pteg(pm, pa); 3475 pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW); 3476 pmap_pte_insert(ptegidx, &pt); 3477 } 3478 } 3479 3480 #if defined(PMAP_NEED_FULL_MAPKERNEL) 3481 va = (vaddr_t) kernel_text; 3482 3483 for (pa = kernelstart; va < va_etext; 3484 pa += PAGE_SIZE, va += PAGE_SIZE) { 3485 ptegidx = va_to_pteg(pm, va); 3486 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3487 pmap_pte_insert(ptegidx, &pt); 3488 } 3489 3490 for (; pa < kernelend; 3491 pa += PAGE_SIZE, va += PAGE_SIZE) { 3492 ptegidx = va_to_pteg(pm, va); 3493 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3494 pmap_pte_insert(ptegidx, &pt); 3495 } 3496 3497 for (va = 0, pa = 0; va < kernelstart; 3498 pa += PAGE_SIZE, va += PAGE_SIZE) { 3499 ptegidx = va_to_pteg(pm, va); 3500 if (va < 0x3000) 3501 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3502 else 3503 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3504 pmap_pte_insert(ptegidx, &pt); 3505 } 3506 for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH; 3507 pa += PAGE_SIZE, va += PAGE_SIZE) { 3508 ptegidx = va_to_pteg(pm, va); 3509 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3510 pmap_pte_insert(ptegidx, &pt); 3511 } 3512 #endif /* PMAP_NEED_FULL_MAPKERNEL */ 3513 } 3514 #endif /* PMAP_NEED_MAPKERNEL */ 3515 } 3516 3517 /* 3518 * Using the data structures prepared in pmap_bootstrap1(), program 3519 * the MMU hardware. 3520 */ 3521 void 3522 pmap_bootstrap2(void) 3523 { 3524 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 3525 for (int i = 0; i < 16; i++) { 3526 __asm volatile("mtsrin %0,%1" 3527 :: "r"(pmap_kernel()->pm_sr[i]), 3528 "r"(i << ADDR_SR_SHFT)); 3529 } 3530 #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */ 3531 3532 #if defined(PMAP_OEA) 3533 __asm volatile("sync; mtsdr1 %0; isync" 3534 : 3535 : "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)) 3536 : "memory"); 3537 #elif defined(PMAP_OEA64) || defined(PMAP_OEA64_BRIDGE) 3538 __asm volatile("sync; mtsdr1 %0; isync" 3539 : 3540 : "r"((uintptr_t)pmap_pteg_table | 3541 (32 - __builtin_clz(pmap_pteg_mask >> 11))) 3542 : "memory"); 3543 #endif 3544 tlbia(); 3545 3546 #if defined(PMAPDEBUG) 3547 if (pmapdebug) 3548 pmap_print_mmuregs(); 3549 #endif 3550 } 3551 3552 /* 3553 * This is not part of the defined PMAP interface and is specific to the 3554 * PowerPC architecture. This is called during initppc, before the system 3555 * is really initialized. 3556 */ 3557 void 3558 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend) 3559 { 3560 pmap_bootstrap1(kernelstart, kernelend); 3561 pmap_bootstrap2(); 3562 } 3563