1 /* $NetBSD: pmap.c,v 1.80 2011/06/19 07:59:47 matt Exp $ */ 2 /*- 3 * Copyright (c) 2001 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 8 * 9 * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com> 10 * of Kyma Systems LLC. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 36 * Copyright (C) 1995, 1996 TooLs GmbH. 37 * All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed by TooLs GmbH. 50 * 4. The name of TooLs GmbH may not be used to endorse or promote products 51 * derived from this software without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 58 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 59 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 60 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 61 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 62 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 */ 64 65 #include <sys/cdefs.h> 66 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.80 2011/06/19 07:59:47 matt Exp $"); 67 68 #define PMAP_NOOPNAMES 69 70 #include "opt_ppcarch.h" 71 #include "opt_altivec.h" 72 #include "opt_multiprocessor.h" 73 #include "opt_pmap.h" 74 75 #include <sys/param.h> 76 #include <sys/malloc.h> 77 #include <sys/proc.h> 78 #include <sys/pool.h> 79 #include <sys/queue.h> 80 #include <sys/device.h> /* for evcnt */ 81 #include <sys/systm.h> 82 #include <sys/atomic.h> 83 84 #include <uvm/uvm.h> 85 86 #include <machine/powerpc.h> 87 #include <powerpc/bat.h> 88 #include <powerpc/pcb.h> 89 #include <powerpc/psl.h> 90 #include <powerpc/spr.h> 91 #include <powerpc/stdarg.h> 92 #include <powerpc/oea/spr.h> 93 #include <powerpc/oea/sr_601.h> 94 95 #ifdef ALTIVEC 96 int pmap_use_altivec; 97 #endif 98 99 volatile struct pteg *pmap_pteg_table; 100 unsigned int pmap_pteg_cnt; 101 unsigned int pmap_pteg_mask; 102 #ifdef PMAP_MEMLIMIT 103 static paddr_t pmap_memlimit = PMAP_MEMLIMIT; 104 #else 105 static paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */ 106 #endif 107 108 struct pmap kernel_pmap_; 109 unsigned int pmap_pages_stolen; 110 u_long pmap_pte_valid; 111 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 112 u_long pmap_pvo_enter_depth; 113 u_long pmap_pvo_remove_depth; 114 #endif 115 116 #ifndef MSGBUFADDR 117 extern paddr_t msgbuf_paddr; 118 #endif 119 120 static struct mem_region *mem, *avail; 121 static u_int mem_cnt, avail_cnt; 122 123 #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE) 124 # define PMAP_OEA 1 125 #endif 126 127 #if defined(PMAP_OEA) 128 #define _PRIxpte "lx" 129 #else 130 #define _PRIxpte PRIx64 131 #endif 132 #define _PRIxpa "lx" 133 #define _PRIxva "lx" 134 #define _PRIsr "lx" 135 136 #ifdef PMAP_NEEDS_FIXUP 137 #if defined(PMAP_OEA) 138 #define PMAPNAME(name) pmap32_##name 139 #elif defined(PMAP_OEA64) 140 #define PMAPNAME(name) pmap64_##name 141 #elif defined(PMAP_OEA64_BRIDGE) 142 #define PMAPNAME(name) pmap64bridge_##name 143 #else 144 #error unknown variant for pmap 145 #endif 146 #endif /* PMAP_NEEDS_FIXUP */ 147 148 #ifdef PMAPNAME 149 #define STATIC static 150 #define pmap_pte_spill PMAPNAME(pte_spill) 151 #define pmap_real_memory PMAPNAME(real_memory) 152 #define pmap_init PMAPNAME(init) 153 #define pmap_virtual_space PMAPNAME(virtual_space) 154 #define pmap_create PMAPNAME(create) 155 #define pmap_reference PMAPNAME(reference) 156 #define pmap_destroy PMAPNAME(destroy) 157 #define pmap_copy PMAPNAME(copy) 158 #define pmap_update PMAPNAME(update) 159 #define pmap_enter PMAPNAME(enter) 160 #define pmap_remove PMAPNAME(remove) 161 #define pmap_kenter_pa PMAPNAME(kenter_pa) 162 #define pmap_kremove PMAPNAME(kremove) 163 #define pmap_extract PMAPNAME(extract) 164 #define pmap_protect PMAPNAME(protect) 165 #define pmap_unwire PMAPNAME(unwire) 166 #define pmap_page_protect PMAPNAME(page_protect) 167 #define pmap_query_bit PMAPNAME(query_bit) 168 #define pmap_clear_bit PMAPNAME(clear_bit) 169 170 #define pmap_activate PMAPNAME(activate) 171 #define pmap_deactivate PMAPNAME(deactivate) 172 173 #define pmap_pinit PMAPNAME(pinit) 174 #define pmap_procwr PMAPNAME(procwr) 175 176 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 177 #define pmap_pte_print PMAPNAME(pte_print) 178 #define pmap_pteg_check PMAPNAME(pteg_check) 179 #define pmap_print_mmruregs PMAPNAME(print_mmuregs) 180 #define pmap_print_pte PMAPNAME(print_pte) 181 #define pmap_pteg_dist PMAPNAME(pteg_dist) 182 #endif 183 #if defined(DEBUG) || defined(PMAPCHECK) 184 #define pmap_pvo_verify PMAPNAME(pvo_verify) 185 #define pmapcheck PMAPNAME(check) 186 #endif 187 #if defined(DEBUG) || defined(PMAPDEBUG) 188 #define pmapdebug PMAPNAME(debug) 189 #endif 190 #define pmap_steal_memory PMAPNAME(steal_memory) 191 #define pmap_bootstrap PMAPNAME(bootstrap) 192 #else 193 #define STATIC /* nothing */ 194 #endif /* PMAPNAME */ 195 196 STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool); 197 STATIC void pmap_real_memory(paddr_t *, psize_t *); 198 STATIC void pmap_init(void); 199 STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *); 200 STATIC pmap_t pmap_create(void); 201 STATIC void pmap_reference(pmap_t); 202 STATIC void pmap_destroy(pmap_t); 203 STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t); 204 STATIC void pmap_update(pmap_t); 205 STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int); 206 STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t); 207 STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int); 208 STATIC void pmap_kremove(vaddr_t, vsize_t); 209 STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *); 210 211 STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t); 212 STATIC void pmap_unwire(pmap_t, vaddr_t); 213 STATIC void pmap_page_protect(struct vm_page *, vm_prot_t); 214 STATIC bool pmap_query_bit(struct vm_page *, int); 215 STATIC bool pmap_clear_bit(struct vm_page *, int); 216 217 STATIC void pmap_activate(struct lwp *); 218 STATIC void pmap_deactivate(struct lwp *); 219 220 STATIC void pmap_pinit(pmap_t pm); 221 STATIC void pmap_procwr(struct proc *, vaddr_t, size_t); 222 223 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 224 STATIC void pmap_pte_print(volatile struct pte *); 225 STATIC void pmap_pteg_check(void); 226 STATIC void pmap_print_mmuregs(void); 227 STATIC void pmap_print_pte(pmap_t, vaddr_t); 228 STATIC void pmap_pteg_dist(void); 229 #endif 230 #if defined(DEBUG) || defined(PMAPCHECK) 231 STATIC void pmap_pvo_verify(void); 232 #endif 233 STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *); 234 STATIC void pmap_bootstrap(paddr_t, paddr_t); 235 236 #ifdef PMAPNAME 237 const struct pmap_ops PMAPNAME(ops) = { 238 .pmapop_pte_spill = pmap_pte_spill, 239 .pmapop_real_memory = pmap_real_memory, 240 .pmapop_init = pmap_init, 241 .pmapop_virtual_space = pmap_virtual_space, 242 .pmapop_create = pmap_create, 243 .pmapop_reference = pmap_reference, 244 .pmapop_destroy = pmap_destroy, 245 .pmapop_copy = pmap_copy, 246 .pmapop_update = pmap_update, 247 .pmapop_enter = pmap_enter, 248 .pmapop_remove = pmap_remove, 249 .pmapop_kenter_pa = pmap_kenter_pa, 250 .pmapop_kremove = pmap_kremove, 251 .pmapop_extract = pmap_extract, 252 .pmapop_protect = pmap_protect, 253 .pmapop_unwire = pmap_unwire, 254 .pmapop_page_protect = pmap_page_protect, 255 .pmapop_query_bit = pmap_query_bit, 256 .pmapop_clear_bit = pmap_clear_bit, 257 .pmapop_activate = pmap_activate, 258 .pmapop_deactivate = pmap_deactivate, 259 .pmapop_pinit = pmap_pinit, 260 .pmapop_procwr = pmap_procwr, 261 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 262 .pmapop_pte_print = pmap_pte_print, 263 .pmapop_pteg_check = pmap_pteg_check, 264 .pmapop_print_mmuregs = pmap_print_mmuregs, 265 .pmapop_print_pte = pmap_print_pte, 266 .pmapop_pteg_dist = pmap_pteg_dist, 267 #else 268 .pmapop_pte_print = NULL, 269 .pmapop_pteg_check = NULL, 270 .pmapop_print_mmuregs = NULL, 271 .pmapop_print_pte = NULL, 272 .pmapop_pteg_dist = NULL, 273 #endif 274 #if defined(DEBUG) || defined(PMAPCHECK) 275 .pmapop_pvo_verify = pmap_pvo_verify, 276 #else 277 .pmapop_pvo_verify = NULL, 278 #endif 279 .pmapop_steal_memory = pmap_steal_memory, 280 .pmapop_bootstrap = pmap_bootstrap, 281 }; 282 #endif /* !PMAPNAME */ 283 284 /* 285 * The following structure is aligned to 32 bytes 286 */ 287 struct pvo_entry { 288 LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */ 289 TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */ 290 struct pte pvo_pte; /* Prebuilt PTE */ 291 pmap_t pvo_pmap; /* ptr to owning pmap */ 292 vaddr_t pvo_vaddr; /* VA of entry */ 293 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 294 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 295 #define PVO_WIRED 0x0010 /* PVO entry is wired */ 296 #define PVO_MANAGED 0x0020 /* PVO e. for managed page */ 297 #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */ 298 #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED) 299 #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED) 300 #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 301 #define PVO_ENTER_INSERT 0 /* PVO has been removed */ 302 #define PVO_SPILL_UNSET 1 /* PVO has been evicted */ 303 #define PVO_SPILL_SET 2 /* PVO has been spilled */ 304 #define PVO_SPILL_INSERT 3 /* PVO has been inserted */ 305 #define PVO_PMAP_PAGE_PROTECT 4 /* PVO has changed */ 306 #define PVO_PMAP_PROTECT 5 /* PVO has changed */ 307 #define PVO_REMOVE 6 /* PVO has been removed */ 308 #define PVO_WHERE_MASK 15 309 #define PVO_WHERE_SHFT 8 310 } __attribute__ ((aligned (32))); 311 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 312 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 313 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 314 #define PVO_PTEGIDX_CLR(pvo) \ 315 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 316 #define PVO_PTEGIDX_SET(pvo,i) \ 317 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 318 #define PVO_WHERE(pvo,w) \ 319 ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \ 320 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT)) 321 322 TAILQ_HEAD(pvo_tqhead, pvo_entry); 323 struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */ 324 static struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 325 static struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 326 327 struct pool pmap_pool; /* pool for pmap structures */ 328 struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */ 329 struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */ 330 331 /* 332 * We keep a cache of unmanaged pages to be used for pvo entries for 333 * unmanaged pages. 334 */ 335 struct pvo_page { 336 SIMPLEQ_ENTRY(pvo_page) pvop_link; 337 }; 338 SIMPLEQ_HEAD(pvop_head, pvo_page); 339 static struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head); 340 static struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head); 341 u_long pmap_upvop_free; 342 u_long pmap_upvop_maxfree; 343 u_long pmap_mpvop_free; 344 u_long pmap_mpvop_maxfree; 345 346 static void *pmap_pool_ualloc(struct pool *, int); 347 static void *pmap_pool_malloc(struct pool *, int); 348 349 static void pmap_pool_ufree(struct pool *, void *); 350 static void pmap_pool_mfree(struct pool *, void *); 351 352 static struct pool_allocator pmap_pool_mallocator = { 353 .pa_alloc = pmap_pool_malloc, 354 .pa_free = pmap_pool_mfree, 355 .pa_pagesz = 0, 356 }; 357 358 static struct pool_allocator pmap_pool_uallocator = { 359 .pa_alloc = pmap_pool_ualloc, 360 .pa_free = pmap_pool_ufree, 361 .pa_pagesz = 0, 362 }; 363 364 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 365 void pmap_pte_print(volatile struct pte *); 366 void pmap_pteg_check(void); 367 void pmap_pteg_dist(void); 368 void pmap_print_pte(pmap_t, vaddr_t); 369 void pmap_print_mmuregs(void); 370 #endif 371 372 #if defined(DEBUG) || defined(PMAPCHECK) 373 #ifdef PMAPCHECK 374 int pmapcheck = 1; 375 #else 376 int pmapcheck = 0; 377 #endif 378 void pmap_pvo_verify(void); 379 static void pmap_pvo_check(const struct pvo_entry *); 380 #define PMAP_PVO_CHECK(pvo) \ 381 do { \ 382 if (pmapcheck) \ 383 pmap_pvo_check(pvo); \ 384 } while (0) 385 #else 386 #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0) 387 #endif 388 static int pmap_pte_insert(int, struct pte *); 389 static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *, 390 vaddr_t, paddr_t, register_t, int); 391 static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *); 392 static void pmap_pvo_free(struct pvo_entry *); 393 static void pmap_pvo_free_list(struct pvo_head *); 394 static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *); 395 static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 396 static struct pvo_entry *pmap_pvo_reclaim(struct pmap *); 397 static void pvo_set_exec(struct pvo_entry *); 398 static void pvo_clear_exec(struct pvo_entry *); 399 400 static void tlbia(void); 401 402 static void pmap_release(pmap_t); 403 static paddr_t pmap_boot_find_memory(psize_t, psize_t, int); 404 405 static uint32_t pmap_pvo_reclaim_nextidx; 406 #ifdef DEBUG 407 static int pmap_pvo_reclaim_debugctr; 408 #endif 409 410 #define VSID_NBPW (sizeof(uint32_t) * 8) 411 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 412 413 static int pmap_initialized; 414 415 #if defined(DEBUG) || defined(PMAPDEBUG) 416 #define PMAPDEBUG_BOOT 0x0001 417 #define PMAPDEBUG_PTE 0x0002 418 #define PMAPDEBUG_EXEC 0x0008 419 #define PMAPDEBUG_PVOENTER 0x0010 420 #define PMAPDEBUG_PVOREMOVE 0x0020 421 #define PMAPDEBUG_ACTIVATE 0x0100 422 #define PMAPDEBUG_CREATE 0x0200 423 #define PMAPDEBUG_ENTER 0x1000 424 #define PMAPDEBUG_KENTER 0x2000 425 #define PMAPDEBUG_KREMOVE 0x4000 426 #define PMAPDEBUG_REMOVE 0x8000 427 428 unsigned int pmapdebug = 0; 429 430 # define DPRINTF(x) printf x 431 # define DPRINTFN(n, x) if (pmapdebug & PMAPDEBUG_ ## n) printf x 432 #else 433 # define DPRINTF(x) 434 # define DPRINTFN(n, x) 435 #endif 436 437 438 #ifdef PMAPCOUNTERS 439 /* 440 * From pmap_subr.c 441 */ 442 extern struct evcnt pmap_evcnt_mappings; 443 extern struct evcnt pmap_evcnt_unmappings; 444 445 extern struct evcnt pmap_evcnt_kernel_mappings; 446 extern struct evcnt pmap_evcnt_kernel_unmappings; 447 448 extern struct evcnt pmap_evcnt_mappings_replaced; 449 450 extern struct evcnt pmap_evcnt_exec_mappings; 451 extern struct evcnt pmap_evcnt_exec_cached; 452 453 extern struct evcnt pmap_evcnt_exec_synced; 454 extern struct evcnt pmap_evcnt_exec_synced_clear_modify; 455 extern struct evcnt pmap_evcnt_exec_synced_pvo_remove; 456 457 extern struct evcnt pmap_evcnt_exec_uncached_page_protect; 458 extern struct evcnt pmap_evcnt_exec_uncached_clear_modify; 459 extern struct evcnt pmap_evcnt_exec_uncached_zero_page; 460 extern struct evcnt pmap_evcnt_exec_uncached_copy_page; 461 extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove; 462 463 extern struct evcnt pmap_evcnt_updates; 464 extern struct evcnt pmap_evcnt_collects; 465 extern struct evcnt pmap_evcnt_copies; 466 467 extern struct evcnt pmap_evcnt_ptes_spilled; 468 extern struct evcnt pmap_evcnt_ptes_unspilled; 469 extern struct evcnt pmap_evcnt_ptes_evicted; 470 471 extern struct evcnt pmap_evcnt_ptes_primary[8]; 472 extern struct evcnt pmap_evcnt_ptes_secondary[8]; 473 extern struct evcnt pmap_evcnt_ptes_removed; 474 extern struct evcnt pmap_evcnt_ptes_changed; 475 extern struct evcnt pmap_evcnt_pvos_reclaimed; 476 extern struct evcnt pmap_evcnt_pvos_failed; 477 478 extern struct evcnt pmap_evcnt_zeroed_pages; 479 extern struct evcnt pmap_evcnt_copied_pages; 480 extern struct evcnt pmap_evcnt_idlezeroed_pages; 481 482 #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++) 483 #define PMAPCOUNT2(ev) ((ev).ev_count++) 484 #else 485 #define PMAPCOUNT(ev) ((void) 0) 486 #define PMAPCOUNT2(ev) ((void) 0) 487 #endif 488 489 #define TLBIE(va) __asm volatile("tlbie %0" :: "r"(va)) 490 491 /* XXXSL: this needs to be moved to assembler */ 492 #define TLBIEL(va) __asm __volatile("tlbie %0" :: "r"(va)) 493 494 #define TLBSYNC() __asm volatile("tlbsync") 495 #define SYNC() __asm volatile("sync") 496 #define EIEIO() __asm volatile("eieio") 497 #define DCBST(va) __asm __volatile("dcbst 0,%0" :: "r"(va)) 498 #define MFMSR() mfmsr() 499 #define MTMSR(psl) mtmsr(psl) 500 #define MFPVR() mfpvr() 501 #define MFSRIN(va) mfsrin(va) 502 #define MFTB() mfrtcltbl() 503 504 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 505 static inline register_t 506 mfsrin(vaddr_t va) 507 { 508 register_t sr; 509 __asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va)); 510 return sr; 511 } 512 #endif /* PMAP_OEA*/ 513 514 #if defined (PMAP_OEA64_BRIDGE) 515 extern void mfmsr64 (register64_t *result); 516 #endif /* PMAP_OEA64_BRIDGE */ 517 518 #define PMAP_LOCK() KERNEL_LOCK(1, NULL) 519 #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL) 520 521 static inline register_t 522 pmap_interrupts_off(void) 523 { 524 register_t msr = MFMSR(); 525 if (msr & PSL_EE) 526 MTMSR(msr & ~PSL_EE); 527 return msr; 528 } 529 530 static void 531 pmap_interrupts_restore(register_t msr) 532 { 533 if (msr & PSL_EE) 534 MTMSR(msr); 535 } 536 537 static inline u_int32_t 538 mfrtcltbl(void) 539 { 540 #ifdef PPC_OEA601 541 if ((MFPVR() >> 16) == MPC601) 542 return (mfrtcl() >> 7); 543 else 544 #endif 545 return (mftbl()); 546 } 547 548 /* 549 * These small routines may have to be replaced, 550 * if/when we support processors other that the 604. 551 */ 552 553 void 554 tlbia(void) 555 { 556 char *i; 557 558 SYNC(); 559 #if defined(PMAP_OEA) 560 /* 561 * Why not use "tlbia"? Because not all processors implement it. 562 * 563 * This needs to be a per-CPU callback to do the appropriate thing 564 * for the CPU. XXX 565 */ 566 for (i = 0; i < (char *)0x00040000; i += 0x00001000) { 567 TLBIE(i); 568 EIEIO(); 569 SYNC(); 570 } 571 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 572 /* This is specifically for the 970, 970UM v1.6 pp. 140. */ 573 for (i = 0; i <= (char *)0xFF000; i += 0x00001000) { 574 TLBIEL(i); 575 EIEIO(); 576 SYNC(); 577 } 578 #endif 579 TLBSYNC(); 580 SYNC(); 581 } 582 583 static inline register_t 584 va_to_vsid(const struct pmap *pm, vaddr_t addr) 585 { 586 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 587 return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT; 588 #else /* PMAP_OEA64 */ 589 #if 0 590 const struct ste *ste; 591 register_t hash; 592 int i; 593 594 hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH; 595 596 /* 597 * Try the primary group first 598 */ 599 ste = pm->pm_stes[hash].stes; 600 for (i = 0; i < 8; i++, ste++) { 601 if (ste->ste_hi & STE_V) && 602 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID)) 603 return ste; 604 } 605 606 /* 607 * Then the secondary group. 608 */ 609 ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes; 610 for (i = 0; i < 8; i++, ste++) { 611 if (ste->ste_hi & STE_V) && 612 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID)) 613 return addr; 614 } 615 616 return NULL; 617 #else 618 /* 619 * Rather than searching the STE groups for the VSID, we know 620 * how we generate that from the ESID and so do that. 621 */ 622 return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT; 623 #endif 624 #endif /* PMAP_OEA */ 625 } 626 627 static inline register_t 628 va_to_pteg(const struct pmap *pm, vaddr_t addr) 629 { 630 register_t hash; 631 632 hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); 633 return hash & pmap_pteg_mask; 634 } 635 636 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 637 /* 638 * Given a PTE in the page table, calculate the VADDR that hashes to it. 639 * The only bit of magic is that the top 4 bits of the address doesn't 640 * technically exist in the PTE. But we know we reserved 4 bits of the 641 * VSID for it so that's how we get it. 642 */ 643 static vaddr_t 644 pmap_pte_to_va(volatile const struct pte *pt) 645 { 646 vaddr_t va; 647 uintptr_t ptaddr = (uintptr_t) pt; 648 649 if (pt->pte_hi & PTE_HID) 650 ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg)); 651 652 /* PPC Bits 10-19 PPC64 Bits 42-51 */ 653 #if defined(PMAP_OEA) 654 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff; 655 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 656 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff; 657 #endif 658 va <<= ADDR_PIDX_SHFT; 659 660 /* PPC Bits 4-9 PPC64 Bits 36-41 */ 661 va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT; 662 663 #if defined(PMAP_OEA64) 664 /* PPC63 Bits 0-35 */ 665 /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */ 666 #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 667 /* PPC Bits 0-3 */ 668 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; 669 #endif 670 671 return va; 672 } 673 #endif 674 675 static inline struct pvo_head * 676 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p) 677 { 678 struct vm_page *pg; 679 struct vm_page_md *md; 680 681 pg = PHYS_TO_VM_PAGE(pa); 682 if (pg_p != NULL) 683 *pg_p = pg; 684 if (pg == NULL) 685 return &pmap_pvo_unmanaged; 686 md = VM_PAGE_TO_MD(pg); 687 return &md->mdpg_pvoh; 688 } 689 690 static inline struct pvo_head * 691 vm_page_to_pvoh(struct vm_page *pg) 692 { 693 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 694 695 return &md->mdpg_pvoh; 696 } 697 698 699 static inline void 700 pmap_attr_clear(struct vm_page *pg, int ptebit) 701 { 702 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 703 704 md->mdpg_attrs &= ~ptebit; 705 } 706 707 static inline int 708 pmap_attr_fetch(struct vm_page *pg) 709 { 710 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 711 712 return md->mdpg_attrs; 713 } 714 715 static inline void 716 pmap_attr_save(struct vm_page *pg, int ptebit) 717 { 718 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 719 720 md->mdpg_attrs |= ptebit; 721 } 722 723 static inline int 724 pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt) 725 { 726 if (pt->pte_hi == pvo_pt->pte_hi 727 #if 0 728 && ((pt->pte_lo ^ pvo_pt->pte_lo) & 729 ~(PTE_REF|PTE_CHG)) == 0 730 #endif 731 ) 732 return 1; 733 return 0; 734 } 735 736 static inline void 737 pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo) 738 { 739 /* 740 * Construct the PTE. Default to IMB initially. Valid bit 741 * only gets set when the real pte is set in memory. 742 * 743 * Note: Don't set the valid bit for correct operation of tlb update. 744 */ 745 #if defined(PMAP_OEA) 746 pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT) 747 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 748 pt->pte_lo = pte_lo; 749 #elif defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA64) 750 pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT) 751 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 752 pt->pte_lo = (u_int64_t) pte_lo; 753 #endif /* PMAP_OEA */ 754 } 755 756 static inline void 757 pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt) 758 { 759 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG); 760 } 761 762 static inline void 763 pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit) 764 { 765 /* 766 * As shown in Section 7.6.3.2.3 767 */ 768 pt->pte_lo &= ~ptebit; 769 TLBIE(va); 770 SYNC(); 771 EIEIO(); 772 TLBSYNC(); 773 SYNC(); 774 #ifdef MULTIPROCESSOR 775 DCBST(pt); 776 #endif 777 } 778 779 static inline void 780 pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt) 781 { 782 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 783 if (pvo_pt->pte_hi & PTE_VALID) 784 panic("pte_set: setting an already valid pte %p", pvo_pt); 785 #endif 786 pvo_pt->pte_hi |= PTE_VALID; 787 788 /* 789 * Update the PTE as defined in section 7.6.3.1 790 * Note that the REF/CHG bits are from pvo_pt and thus should 791 * have been saved so this routine can restore them (if desired). 792 */ 793 pt->pte_lo = pvo_pt->pte_lo; 794 EIEIO(); 795 pt->pte_hi = pvo_pt->pte_hi; 796 TLBSYNC(); 797 SYNC(); 798 #ifdef MULTIPROCESSOR 799 DCBST(pt); 800 #endif 801 pmap_pte_valid++; 802 } 803 804 static inline void 805 pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 806 { 807 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 808 if ((pvo_pt->pte_hi & PTE_VALID) == 0) 809 panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt); 810 if ((pt->pte_hi & PTE_VALID) == 0) 811 panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt); 812 #endif 813 814 pvo_pt->pte_hi &= ~PTE_VALID; 815 /* 816 * Force the ref & chg bits back into the PTEs. 817 */ 818 SYNC(); 819 /* 820 * Invalidate the pte ... (Section 7.6.3.3) 821 */ 822 pt->pte_hi &= ~PTE_VALID; 823 SYNC(); 824 TLBIE(va); 825 SYNC(); 826 EIEIO(); 827 TLBSYNC(); 828 SYNC(); 829 /* 830 * Save the ref & chg bits ... 831 */ 832 pmap_pte_synch(pt, pvo_pt); 833 pmap_pte_valid--; 834 } 835 836 static inline void 837 pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 838 { 839 /* 840 * Invalidate the PTE 841 */ 842 pmap_pte_unset(pt, pvo_pt, va); 843 pmap_pte_set(pt, pvo_pt); 844 } 845 846 /* 847 * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx 848 * (either primary or secondary location). 849 * 850 * Note: both the destination and source PTEs must not have PTE_VALID set. 851 */ 852 853 static int 854 pmap_pte_insert(int ptegidx, struct pte *pvo_pt) 855 { 856 volatile struct pte *pt; 857 int i; 858 859 #if defined(DEBUG) 860 DPRINTFN(PTE, ("pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n", 861 ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo)); 862 #endif 863 /* 864 * First try primary hash. 865 */ 866 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 867 if ((pt->pte_hi & PTE_VALID) == 0) { 868 pvo_pt->pte_hi &= ~PTE_HID; 869 pmap_pte_set(pt, pvo_pt); 870 return i; 871 } 872 } 873 874 /* 875 * Now try secondary hash. 876 */ 877 ptegidx ^= pmap_pteg_mask; 878 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 879 if ((pt->pte_hi & PTE_VALID) == 0) { 880 pvo_pt->pte_hi |= PTE_HID; 881 pmap_pte_set(pt, pvo_pt); 882 return i; 883 } 884 } 885 return -1; 886 } 887 888 /* 889 * Spill handler. 890 * 891 * Tries to spill a page table entry from the overflow area. 892 * This runs in either real mode (if dealing with a exception spill) 893 * or virtual mode when dealing with manually spilling one of the 894 * kernel's pte entries. In either case, interrupts are already 895 * disabled. 896 */ 897 898 int 899 pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec) 900 { 901 struct pvo_entry *source_pvo, *victim_pvo, *next_pvo; 902 struct pvo_entry *pvo; 903 /* XXX: gcc -- vpvoh is always set at either *1* or *2* */ 904 struct pvo_tqhead *pvoh, *vpvoh = NULL; 905 int ptegidx, i, j; 906 volatile struct pteg *pteg; 907 volatile struct pte *pt; 908 909 PMAP_LOCK(); 910 911 ptegidx = va_to_pteg(pm, addr); 912 913 /* 914 * Have to substitute some entry. Use the primary hash for this. 915 * Use low bits of timebase as random generator. Make sure we are 916 * not picking a kernel pte for replacement. 917 */ 918 pteg = &pmap_pteg_table[ptegidx]; 919 i = MFTB() & 7; 920 for (j = 0; j < 8; j++) { 921 pt = &pteg->pt[i]; 922 if ((pt->pte_hi & PTE_VALID) == 0) 923 break; 924 if (VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT) 925 < PHYSMAP_VSIDBITS) 926 break; 927 i = (i + 1) & 7; 928 } 929 KASSERT(j < 8); 930 931 source_pvo = NULL; 932 victim_pvo = NULL; 933 pvoh = &pmap_pvo_table[ptegidx]; 934 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 935 936 /* 937 * We need to find pvo entry for this address... 938 */ 939 PMAP_PVO_CHECK(pvo); /* sanity check */ 940 941 /* 942 * If we haven't found the source and we come to a PVO with 943 * a valid PTE, then we know we can't find it because all 944 * evicted PVOs always are first in the list. 945 */ 946 if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID)) 947 break; 948 if (source_pvo == NULL && pm == pvo->pvo_pmap && 949 addr == PVO_VADDR(pvo)) { 950 951 /* 952 * Now we have found the entry to be spilled into the 953 * pteg. Attempt to insert it into the page table. 954 */ 955 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 956 if (j >= 0) { 957 PVO_PTEGIDX_SET(pvo, j); 958 PMAP_PVO_CHECK(pvo); /* sanity check */ 959 PVO_WHERE(pvo, SPILL_INSERT); 960 pvo->pvo_pmap->pm_evictions--; 961 PMAPCOUNT(ptes_spilled); 962 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 963 ? pmap_evcnt_ptes_secondary 964 : pmap_evcnt_ptes_primary)[j]); 965 966 /* 967 * Since we keep the evicted entries at the 968 * from of the PVO list, we need move this 969 * (now resident) PVO after the evicted 970 * entries. 971 */ 972 next_pvo = TAILQ_NEXT(pvo, pvo_olink); 973 974 /* 975 * If we don't have to move (either we were the 976 * last entry or the next entry was valid), 977 * don't change our position. Otherwise 978 * move ourselves to the tail of the queue. 979 */ 980 if (next_pvo != NULL && 981 !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) { 982 TAILQ_REMOVE(pvoh, pvo, pvo_olink); 983 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 984 } 985 PMAP_UNLOCK(); 986 return 1; 987 } 988 source_pvo = pvo; 989 if (exec && !PVO_EXECUTABLE_P(source_pvo)) { 990 return 0; 991 } 992 if (victim_pvo != NULL) 993 break; 994 } 995 996 /* 997 * We also need the pvo entry of the victim we are replacing 998 * so save the R & C bits of the PTE. 999 */ 1000 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 1001 pmap_pte_compare(pt, &pvo->pvo_pte)) { 1002 vpvoh = pvoh; /* *1* */ 1003 victim_pvo = pvo; 1004 if (source_pvo != NULL) 1005 break; 1006 } 1007 } 1008 1009 if (source_pvo == NULL) { 1010 PMAPCOUNT(ptes_unspilled); 1011 PMAP_UNLOCK(); 1012 return 0; 1013 } 1014 1015 if (victim_pvo == NULL) { 1016 if ((pt->pte_hi & PTE_HID) == 0) 1017 panic("pmap_pte_spill: victim p-pte (%p) has " 1018 "no pvo entry!", pt); 1019 1020 /* 1021 * If this is a secondary PTE, we need to search 1022 * its primary pvo bucket for the matching PVO. 1023 */ 1024 vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */ 1025 TAILQ_FOREACH(pvo, vpvoh, pvo_olink) { 1026 PMAP_PVO_CHECK(pvo); /* sanity check */ 1027 1028 /* 1029 * We also need the pvo entry of the victim we are 1030 * replacing so save the R & C bits of the PTE. 1031 */ 1032 if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 1033 victim_pvo = pvo; 1034 break; 1035 } 1036 } 1037 if (victim_pvo == NULL) 1038 panic("pmap_pte_spill: victim s-pte (%p) has " 1039 "no pvo entry!", pt); 1040 } 1041 1042 /* 1043 * The victim should be not be a kernel PVO/PTE entry. 1044 */ 1045 KASSERT(victim_pvo->pvo_pmap != pmap_kernel()); 1046 KASSERT(PVO_PTEGIDX_ISSET(victim_pvo)); 1047 KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i); 1048 1049 /* 1050 * We are invalidating the TLB entry for the EA for the 1051 * we are replacing even though its valid; If we don't 1052 * we lose any ref/chg bit changes contained in the TLB 1053 * entry. 1054 */ 1055 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 1056 1057 /* 1058 * To enforce the PVO list ordering constraint that all 1059 * evicted entries should come before all valid entries, 1060 * move the source PVO to the tail of its list and the 1061 * victim PVO to the head of its list (which might not be 1062 * the same list, if the victim was using the secondary hash). 1063 */ 1064 TAILQ_REMOVE(pvoh, source_pvo, pvo_olink); 1065 TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink); 1066 TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink); 1067 TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink); 1068 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 1069 pmap_pte_set(pt, &source_pvo->pvo_pte); 1070 victim_pvo->pvo_pmap->pm_evictions++; 1071 source_pvo->pvo_pmap->pm_evictions--; 1072 PVO_WHERE(victim_pvo, SPILL_UNSET); 1073 PVO_WHERE(source_pvo, SPILL_SET); 1074 1075 PVO_PTEGIDX_CLR(victim_pvo); 1076 PVO_PTEGIDX_SET(source_pvo, i); 1077 PMAPCOUNT2(pmap_evcnt_ptes_primary[i]); 1078 PMAPCOUNT(ptes_spilled); 1079 PMAPCOUNT(ptes_evicted); 1080 PMAPCOUNT(ptes_removed); 1081 1082 PMAP_PVO_CHECK(victim_pvo); 1083 PMAP_PVO_CHECK(source_pvo); 1084 1085 PMAP_UNLOCK(); 1086 return 1; 1087 } 1088 1089 /* 1090 * Restrict given range to physical memory 1091 */ 1092 void 1093 pmap_real_memory(paddr_t *start, psize_t *size) 1094 { 1095 struct mem_region *mp; 1096 1097 for (mp = mem; mp->size; mp++) { 1098 if (*start + *size > mp->start 1099 && *start < mp->start + mp->size) { 1100 if (*start < mp->start) { 1101 *size -= mp->start - *start; 1102 *start = mp->start; 1103 } 1104 if (*start + *size > mp->start + mp->size) 1105 *size = mp->start + mp->size - *start; 1106 return; 1107 } 1108 } 1109 *size = 0; 1110 } 1111 1112 /* 1113 * Initialize anything else for pmap handling. 1114 * Called during vm_init(). 1115 */ 1116 void 1117 pmap_init(void) 1118 { 1119 pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry), 1120 sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl", 1121 &pmap_pool_mallocator, IPL_NONE); 1122 1123 pool_setlowat(&pmap_mpvo_pool, 1008); 1124 1125 pmap_initialized = 1; 1126 1127 } 1128 1129 /* 1130 * How much virtual space does the kernel get? 1131 */ 1132 void 1133 pmap_virtual_space(vaddr_t *start, vaddr_t *end) 1134 { 1135 /* 1136 * For now, reserve one segment (minus some overhead) for kernel 1137 * virtual memory 1138 */ 1139 *start = VM_MIN_KERNEL_ADDRESS; 1140 *end = VM_MAX_KERNEL_ADDRESS; 1141 } 1142 1143 /* 1144 * Allocate, initialize, and return a new physical map. 1145 */ 1146 pmap_t 1147 pmap_create(void) 1148 { 1149 pmap_t pm; 1150 1151 pm = pool_get(&pmap_pool, PR_WAITOK); 1152 memset((void *)pm, 0, sizeof *pm); 1153 pmap_pinit(pm); 1154 1155 DPRINTFN(CREATE,("pmap_create: pm %p:\n" 1156 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1157 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n" 1158 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1159 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n", 1160 pm, 1161 pm->pm_sr[0], pm->pm_sr[1], 1162 pm->pm_sr[2], pm->pm_sr[3], 1163 pm->pm_sr[4], pm->pm_sr[5], 1164 pm->pm_sr[6], pm->pm_sr[7], 1165 pm->pm_sr[8], pm->pm_sr[9], 1166 pm->pm_sr[10], pm->pm_sr[11], 1167 pm->pm_sr[12], pm->pm_sr[13], 1168 pm->pm_sr[14], pm->pm_sr[15])); 1169 return pm; 1170 } 1171 1172 /* 1173 * Initialize a preallocated and zeroed pmap structure. 1174 */ 1175 void 1176 pmap_pinit(pmap_t pm) 1177 { 1178 register_t entropy = MFTB(); 1179 register_t mask; 1180 int i; 1181 1182 /* 1183 * Allocate some segment registers for this pmap. 1184 */ 1185 pm->pm_refs = 1; 1186 PMAP_LOCK(); 1187 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1188 static register_t pmap_vsidcontext; 1189 register_t hash; 1190 unsigned int n; 1191 1192 /* Create a new value by multiplying by a prime adding in 1193 * entropy from the timebase register. This is to make the 1194 * VSID more random so that the PT Hash function collides 1195 * less often. (note that the prime causes gcc to do shifts 1196 * instead of a multiply) 1197 */ 1198 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1199 hash = pmap_vsidcontext & (NPMAPS - 1); 1200 if (hash == 0) { /* 0 is special, avoid it */ 1201 entropy += 0xbadf00d; 1202 continue; 1203 } 1204 n = hash >> 5; 1205 mask = 1L << (hash & (VSID_NBPW-1)); 1206 hash = pmap_vsidcontext; 1207 if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1208 /* anything free in this bucket? */ 1209 if (~pmap_vsid_bitmap[n] == 0) { 1210 entropy = hash ^ (hash >> 16); 1211 continue; 1212 } 1213 i = ffs(~pmap_vsid_bitmap[n]) - 1; 1214 mask = 1L << i; 1215 hash &= ~(VSID_NBPW-1); 1216 hash |= i; 1217 } 1218 hash &= PTE_VSID >> PTE_VSID_SHFT; 1219 pmap_vsid_bitmap[n] |= mask; 1220 pm->pm_vsid = hash; 1221 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1222 for (i = 0; i < 16; i++) 1223 pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY | 1224 SR_NOEXEC; 1225 #endif 1226 PMAP_UNLOCK(); 1227 return; 1228 } 1229 PMAP_UNLOCK(); 1230 panic("pmap_pinit: out of segments"); 1231 } 1232 1233 /* 1234 * Add a reference to the given pmap. 1235 */ 1236 void 1237 pmap_reference(pmap_t pm) 1238 { 1239 atomic_inc_uint(&pm->pm_refs); 1240 } 1241 1242 /* 1243 * Retire the given pmap from service. 1244 * Should only be called if the map contains no valid mappings. 1245 */ 1246 void 1247 pmap_destroy(pmap_t pm) 1248 { 1249 if (atomic_dec_uint_nv(&pm->pm_refs) == 0) { 1250 pmap_release(pm); 1251 pool_put(&pmap_pool, pm); 1252 } 1253 } 1254 1255 /* 1256 * Release any resources held by the given physical map. 1257 * Called when a pmap initialized by pmap_pinit is being released. 1258 */ 1259 void 1260 pmap_release(pmap_t pm) 1261 { 1262 int idx, mask; 1263 1264 KASSERT(pm->pm_stats.resident_count == 0); 1265 KASSERT(pm->pm_stats.wired_count == 0); 1266 1267 PMAP_LOCK(); 1268 if (pm->pm_sr[0] == 0) 1269 panic("pmap_release"); 1270 idx = pm->pm_vsid & (NPMAPS-1); 1271 mask = 1 << (idx % VSID_NBPW); 1272 idx /= VSID_NBPW; 1273 1274 KASSERT(pmap_vsid_bitmap[idx] & mask); 1275 pmap_vsid_bitmap[idx] &= ~mask; 1276 PMAP_UNLOCK(); 1277 } 1278 1279 /* 1280 * Copy the range specified by src_addr/len 1281 * from the source map to the range dst_addr/len 1282 * in the destination map. 1283 * 1284 * This routine is only advisory and need not do anything. 1285 */ 1286 void 1287 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, 1288 vsize_t len, vaddr_t src_addr) 1289 { 1290 PMAPCOUNT(copies); 1291 } 1292 1293 /* 1294 * Require that all active physical maps contain no 1295 * incorrect entries NOW. 1296 */ 1297 void 1298 pmap_update(struct pmap *pmap) 1299 { 1300 PMAPCOUNT(updates); 1301 TLBSYNC(); 1302 } 1303 1304 static inline int 1305 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1306 { 1307 int pteidx; 1308 /* 1309 * We can find the actual pte entry without searching by 1310 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9] 1311 * and by noticing the HID bit. 1312 */ 1313 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1314 if (pvo->pvo_pte.pte_hi & PTE_HID) 1315 pteidx ^= pmap_pteg_mask * 8; 1316 return pteidx; 1317 } 1318 1319 volatile struct pte * 1320 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1321 { 1322 volatile struct pte *pt; 1323 1324 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1325 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) 1326 return NULL; 1327 #endif 1328 1329 /* 1330 * If we haven't been supplied the ptegidx, calculate it. 1331 */ 1332 if (pteidx == -1) { 1333 int ptegidx; 1334 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1335 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1336 } 1337 1338 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1339 1340 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1341 return pt; 1342 #else 1343 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1344 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1345 "pvo but no valid pte index", pvo); 1346 } 1347 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1348 panic("pmap_pvo_to_pte: pvo %p: has valid pte index in " 1349 "pvo but no valid pte", pvo); 1350 } 1351 1352 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1353 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1354 #if defined(DEBUG) || defined(PMAPCHECK) 1355 pmap_pte_print(pt); 1356 #endif 1357 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1358 "pmap_pteg_table %p but invalid in pvo", 1359 pvo, pt); 1360 } 1361 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) { 1362 #if defined(DEBUG) || defined(PMAPCHECK) 1363 pmap_pte_print(pt); 1364 #endif 1365 panic("pmap_pvo_to_pte: pvo %p: pvo pte does " 1366 "not match pte %p in pmap_pteg_table", 1367 pvo, pt); 1368 } 1369 return pt; 1370 } 1371 1372 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1373 #if defined(DEBUG) || defined(PMAPCHECK) 1374 pmap_pte_print(pt); 1375 #endif 1376 panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in " 1377 "pmap_pteg_table but valid in pvo", pvo, pt); 1378 } 1379 return NULL; 1380 #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */ 1381 } 1382 1383 struct pvo_entry * 1384 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p) 1385 { 1386 struct pvo_entry *pvo; 1387 int ptegidx; 1388 1389 va &= ~ADDR_POFF; 1390 ptegidx = va_to_pteg(pm, va); 1391 1392 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1393 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1394 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 1395 panic("pmap_pvo_find_va: invalid pvo %p on " 1396 "list %#x (%p)", pvo, ptegidx, 1397 &pmap_pvo_table[ptegidx]); 1398 #endif 1399 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1400 if (pteidx_p) 1401 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1402 return pvo; 1403 } 1404 } 1405 if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH)) 1406 panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n", 1407 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va); 1408 return NULL; 1409 } 1410 1411 #if defined(DEBUG) || defined(PMAPCHECK) 1412 void 1413 pmap_pvo_check(const struct pvo_entry *pvo) 1414 { 1415 struct pvo_head *pvo_head; 1416 struct pvo_entry *pvo0; 1417 volatile struct pte *pt; 1418 int failed = 0; 1419 1420 PMAP_LOCK(); 1421 1422 if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH) 1423 panic("pmap_pvo_check: pvo %p: invalid address", pvo); 1424 1425 if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) { 1426 printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n", 1427 pvo, pvo->pvo_pmap); 1428 failed = 1; 1429 } 1430 1431 if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH || 1432 (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) { 1433 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1434 pvo, TAILQ_NEXT(pvo, pvo_olink)); 1435 failed = 1; 1436 } 1437 1438 if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH || 1439 (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) { 1440 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1441 pvo, LIST_NEXT(pvo, pvo_vlink)); 1442 failed = 1; 1443 } 1444 1445 if (PVO_MANAGED_P(pvo)) { 1446 pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL); 1447 } else { 1448 if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) { 1449 printf("pmap_pvo_check: pvo %p: non kernel address " 1450 "on kernel unmanaged list\n", pvo); 1451 failed = 1; 1452 } 1453 pvo_head = &pmap_pvo_kunmanaged; 1454 } 1455 LIST_FOREACH(pvo0, pvo_head, pvo_vlink) { 1456 if (pvo0 == pvo) 1457 break; 1458 } 1459 if (pvo0 == NULL) { 1460 printf("pmap_pvo_check: pvo %p: not present " 1461 "on its vlist head %p\n", pvo, pvo_head); 1462 failed = 1; 1463 } 1464 if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) { 1465 printf("pmap_pvo_check: pvo %p: not present " 1466 "on its olist head\n", pvo); 1467 failed = 1; 1468 } 1469 pt = pmap_pvo_to_pte(pvo, -1); 1470 if (pt == NULL) { 1471 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1472 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1473 "no PTE\n", pvo); 1474 failed = 1; 1475 } 1476 } else { 1477 if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] || 1478 (uintptr_t) pt >= 1479 (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) { 1480 printf("pmap_pvo_check: pvo %p: pte %p not in " 1481 "pteg table\n", pvo, pt); 1482 failed = 1; 1483 } 1484 if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) { 1485 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1486 "no PTE\n", pvo); 1487 failed = 1; 1488 } 1489 if (pvo->pvo_pte.pte_hi != pt->pte_hi) { 1490 printf("pmap_pvo_check: pvo %p: pte_hi differ: " 1491 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1492 pvo->pvo_pte.pte_hi, 1493 pt->pte_hi); 1494 failed = 1; 1495 } 1496 if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) & 1497 (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) { 1498 printf("pmap_pvo_check: pvo %p: pte_lo differ: " 1499 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1500 (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)), 1501 (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN))); 1502 failed = 1; 1503 } 1504 if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) { 1505 printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva "" 1506 " doesn't not match PVO's VA %#" _PRIxva "\n", 1507 pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo)); 1508 failed = 1; 1509 } 1510 if (failed) 1511 pmap_pte_print(pt); 1512 } 1513 if (failed) 1514 panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo, 1515 pvo->pvo_pmap); 1516 1517 PMAP_UNLOCK(); 1518 } 1519 #endif /* DEBUG || PMAPCHECK */ 1520 1521 /* 1522 * Search the PVO table looking for a non-wired entry. 1523 * If we find one, remove it and return it. 1524 */ 1525 1526 struct pvo_entry * 1527 pmap_pvo_reclaim(struct pmap *pm) 1528 { 1529 struct pvo_tqhead *pvoh; 1530 struct pvo_entry *pvo; 1531 uint32_t idx, endidx; 1532 1533 endidx = pmap_pvo_reclaim_nextidx; 1534 for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx; 1535 idx = (idx + 1) & pmap_pteg_mask) { 1536 pvoh = &pmap_pvo_table[idx]; 1537 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 1538 if (!PVO_WIRED_P(pvo)) { 1539 pmap_pvo_remove(pvo, -1, NULL); 1540 pmap_pvo_reclaim_nextidx = idx; 1541 PMAPCOUNT(pvos_reclaimed); 1542 return pvo; 1543 } 1544 } 1545 } 1546 return NULL; 1547 } 1548 1549 /* 1550 * This returns whether this is the first mapping of a page. 1551 */ 1552 int 1553 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head, 1554 vaddr_t va, paddr_t pa, register_t pte_lo, int flags) 1555 { 1556 struct pvo_entry *pvo; 1557 struct pvo_tqhead *pvoh; 1558 register_t msr; 1559 int ptegidx; 1560 int i; 1561 int poolflags = PR_NOWAIT; 1562 1563 /* 1564 * Compute the PTE Group index. 1565 */ 1566 va &= ~ADDR_POFF; 1567 ptegidx = va_to_pteg(pm, va); 1568 1569 msr = pmap_interrupts_off(); 1570 1571 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1572 if (pmap_pvo_remove_depth > 0) 1573 panic("pmap_pvo_enter: called while pmap_pvo_remove active!"); 1574 if (++pmap_pvo_enter_depth > 1) 1575 panic("pmap_pvo_enter: called recursively!"); 1576 #endif 1577 1578 /* 1579 * Remove any existing mapping for this page. Reuse the 1580 * pvo entry if there a mapping. 1581 */ 1582 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1583 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1584 #ifdef DEBUG 1585 if ((pmapdebug & PMAPDEBUG_PVOENTER) && 1586 ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) & 1587 ~(PTE_REF|PTE_CHG)) == 0 && 1588 va < VM_MIN_KERNEL_ADDRESS) { 1589 printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n", 1590 pvo, pvo->pvo_pte.pte_lo, pte_lo|pa); 1591 printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n", 1592 pvo->pvo_pte.pte_hi, 1593 pm->pm_sr[va >> ADDR_SR_SHFT]); 1594 pmap_pte_print(pmap_pvo_to_pte(pvo, -1)); 1595 #ifdef DDBX 1596 Debugger(); 1597 #endif 1598 } 1599 #endif 1600 PMAPCOUNT(mappings_replaced); 1601 pmap_pvo_remove(pvo, -1, NULL); 1602 break; 1603 } 1604 } 1605 1606 /* 1607 * If we aren't overwriting an mapping, try to allocate 1608 */ 1609 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1610 --pmap_pvo_enter_depth; 1611 #endif 1612 pmap_interrupts_restore(msr); 1613 if (pvo) { 1614 pmap_pvo_free(pvo); 1615 } 1616 pvo = pool_get(pl, poolflags); 1617 1618 #ifdef DEBUG 1619 /* 1620 * Exercise pmap_pvo_reclaim() a little. 1621 */ 1622 if (pvo && (flags & PMAP_CANFAIL) != 0 && 1623 pmap_pvo_reclaim_debugctr++ > 0x1000 && 1624 (pmap_pvo_reclaim_debugctr & 0xff) == 0) { 1625 pool_put(pl, pvo); 1626 pvo = NULL; 1627 } 1628 #endif 1629 1630 msr = pmap_interrupts_off(); 1631 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1632 ++pmap_pvo_enter_depth; 1633 #endif 1634 if (pvo == NULL) { 1635 pvo = pmap_pvo_reclaim(pm); 1636 if (pvo == NULL) { 1637 if ((flags & PMAP_CANFAIL) == 0) 1638 panic("pmap_pvo_enter: failed"); 1639 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1640 pmap_pvo_enter_depth--; 1641 #endif 1642 PMAPCOUNT(pvos_failed); 1643 pmap_interrupts_restore(msr); 1644 return ENOMEM; 1645 } 1646 } 1647 1648 pvo->pvo_vaddr = va; 1649 pvo->pvo_pmap = pm; 1650 pvo->pvo_vaddr &= ~ADDR_POFF; 1651 if (flags & VM_PROT_EXECUTE) { 1652 PMAPCOUNT(exec_mappings); 1653 pvo_set_exec(pvo); 1654 } 1655 if (flags & PMAP_WIRED) 1656 pvo->pvo_vaddr |= PVO_WIRED; 1657 if (pvo_head != &pmap_pvo_kunmanaged) { 1658 pvo->pvo_vaddr |= PVO_MANAGED; 1659 PMAPCOUNT(mappings); 1660 } else { 1661 PMAPCOUNT(kernel_mappings); 1662 } 1663 pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo); 1664 1665 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1666 if (PVO_WIRED_P(pvo)) 1667 pvo->pvo_pmap->pm_stats.wired_count++; 1668 pvo->pvo_pmap->pm_stats.resident_count++; 1669 #if defined(DEBUG) 1670 /* if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */ 1671 DPRINTFN(PVOENTER, 1672 ("pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n", 1673 pvo, pm, va, pa)); 1674 #endif 1675 1676 /* 1677 * We hope this succeeds but it isn't required. 1678 */ 1679 pvoh = &pmap_pvo_table[ptegidx]; 1680 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1681 if (i >= 0) { 1682 PVO_PTEGIDX_SET(pvo, i); 1683 PVO_WHERE(pvo, ENTER_INSERT); 1684 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 1685 ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]); 1686 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 1687 1688 } else { 1689 /* 1690 * Since we didn't have room for this entry (which makes it 1691 * and evicted entry), place it at the head of the list. 1692 */ 1693 TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink); 1694 PMAPCOUNT(ptes_evicted); 1695 pm->pm_evictions++; 1696 /* 1697 * If this is a kernel page, make sure it's active. 1698 */ 1699 if (pm == pmap_kernel()) { 1700 i = pmap_pte_spill(pm, va, false); 1701 KASSERT(i); 1702 } 1703 } 1704 PMAP_PVO_CHECK(pvo); /* sanity check */ 1705 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1706 pmap_pvo_enter_depth--; 1707 #endif 1708 pmap_interrupts_restore(msr); 1709 return 0; 1710 } 1711 1712 static void 1713 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol) 1714 { 1715 volatile struct pte *pt; 1716 int ptegidx; 1717 1718 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1719 if (++pmap_pvo_remove_depth > 1) 1720 panic("pmap_pvo_remove: called recursively!"); 1721 #endif 1722 1723 /* 1724 * If we haven't been supplied the ptegidx, calculate it. 1725 */ 1726 if (pteidx == -1) { 1727 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1728 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1729 } else { 1730 ptegidx = pteidx >> 3; 1731 if (pvo->pvo_pte.pte_hi & PTE_HID) 1732 ptegidx ^= pmap_pteg_mask; 1733 } 1734 PMAP_PVO_CHECK(pvo); /* sanity check */ 1735 1736 /* 1737 * If there is an active pte entry, we need to deactivate it 1738 * (and save the ref & chg bits). 1739 */ 1740 pt = pmap_pvo_to_pte(pvo, pteidx); 1741 if (pt != NULL) { 1742 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1743 PVO_WHERE(pvo, REMOVE); 1744 PVO_PTEGIDX_CLR(pvo); 1745 PMAPCOUNT(ptes_removed); 1746 } else { 1747 KASSERT(pvo->pvo_pmap->pm_evictions > 0); 1748 pvo->pvo_pmap->pm_evictions--; 1749 } 1750 1751 /* 1752 * Account for executable mappings. 1753 */ 1754 if (PVO_EXECUTABLE_P(pvo)) 1755 pvo_clear_exec(pvo); 1756 1757 /* 1758 * Update our statistics. 1759 */ 1760 pvo->pvo_pmap->pm_stats.resident_count--; 1761 if (PVO_WIRED_P(pvo)) 1762 pvo->pvo_pmap->pm_stats.wired_count--; 1763 1764 /* 1765 * Save the REF/CHG bits into their cache if the page is managed. 1766 */ 1767 if (PVO_MANAGED_P(pvo)) { 1768 register_t ptelo = pvo->pvo_pte.pte_lo; 1769 struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN); 1770 1771 if (pg != NULL) { 1772 /* 1773 * If this page was changed and it is mapped exec, 1774 * invalidate it. 1775 */ 1776 if ((ptelo & PTE_CHG) && 1777 (pmap_attr_fetch(pg) & PTE_EXEC)) { 1778 struct pvo_head *pvoh = vm_page_to_pvoh(pg); 1779 if (LIST_EMPTY(pvoh)) { 1780 DPRINTFN(EXEC, ("[pmap_pvo_remove: " 1781 "%#" _PRIxpa ": clear-exec]\n", 1782 VM_PAGE_TO_PHYS(pg))); 1783 pmap_attr_clear(pg, PTE_EXEC); 1784 PMAPCOUNT(exec_uncached_pvo_remove); 1785 } else { 1786 DPRINTFN(EXEC, ("[pmap_pvo_remove: " 1787 "%#" _PRIxpa ": syncicache]\n", 1788 VM_PAGE_TO_PHYS(pg))); 1789 pmap_syncicache(VM_PAGE_TO_PHYS(pg), 1790 PAGE_SIZE); 1791 PMAPCOUNT(exec_synced_pvo_remove); 1792 } 1793 } 1794 1795 pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG)); 1796 } 1797 PMAPCOUNT(unmappings); 1798 } else { 1799 PMAPCOUNT(kernel_unmappings); 1800 } 1801 1802 /* 1803 * Remove the PVO from its lists and return it to the pool. 1804 */ 1805 LIST_REMOVE(pvo, pvo_vlink); 1806 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1807 if (pvol) { 1808 LIST_INSERT_HEAD(pvol, pvo, pvo_vlink); 1809 } 1810 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1811 pmap_pvo_remove_depth--; 1812 #endif 1813 } 1814 1815 void 1816 pmap_pvo_free(struct pvo_entry *pvo) 1817 { 1818 1819 pool_put(PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo); 1820 } 1821 1822 void 1823 pmap_pvo_free_list(struct pvo_head *pvol) 1824 { 1825 struct pvo_entry *pvo, *npvo; 1826 1827 for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) { 1828 npvo = LIST_NEXT(pvo, pvo_vlink); 1829 LIST_REMOVE(pvo, pvo_vlink); 1830 pmap_pvo_free(pvo); 1831 } 1832 } 1833 1834 /* 1835 * Mark a mapping as executable. 1836 * If this is the first executable mapping in the segment, 1837 * clear the noexec flag. 1838 */ 1839 static void 1840 pvo_set_exec(struct pvo_entry *pvo) 1841 { 1842 struct pmap *pm = pvo->pvo_pmap; 1843 1844 if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) { 1845 return; 1846 } 1847 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1848 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1849 { 1850 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1851 if (pm->pm_exec[sr]++ == 0) { 1852 pm->pm_sr[sr] &= ~SR_NOEXEC; 1853 } 1854 } 1855 #endif 1856 } 1857 1858 /* 1859 * Mark a mapping as non-executable. 1860 * If this was the last executable mapping in the segment, 1861 * set the noexec flag. 1862 */ 1863 static void 1864 pvo_clear_exec(struct pvo_entry *pvo) 1865 { 1866 struct pmap *pm = pvo->pvo_pmap; 1867 1868 if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) { 1869 return; 1870 } 1871 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1872 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1873 { 1874 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1875 if (--pm->pm_exec[sr] == 0) { 1876 pm->pm_sr[sr] |= SR_NOEXEC; 1877 } 1878 } 1879 #endif 1880 } 1881 1882 /* 1883 * Insert physical page at pa into the given pmap at virtual address va. 1884 */ 1885 int 1886 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1887 { 1888 struct mem_region *mp; 1889 struct pvo_head *pvo_head; 1890 struct vm_page *pg; 1891 struct pool *pl; 1892 register_t pte_lo; 1893 int error; 1894 u_int pvo_flags; 1895 u_int was_exec = 0; 1896 1897 PMAP_LOCK(); 1898 1899 if (__predict_false(!pmap_initialized)) { 1900 pvo_head = &pmap_pvo_kunmanaged; 1901 pl = &pmap_upvo_pool; 1902 pvo_flags = 0; 1903 pg = NULL; 1904 was_exec = PTE_EXEC; 1905 } else { 1906 pvo_head = pa_to_pvoh(pa, &pg); 1907 pl = &pmap_mpvo_pool; 1908 pvo_flags = PVO_MANAGED; 1909 } 1910 1911 DPRINTFN(ENTER, 1912 ("pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):", 1913 pm, va, pa, prot, flags)); 1914 1915 /* 1916 * If this is a managed page, and it's the first reference to the 1917 * page clear the execness of the page. Otherwise fetch the execness. 1918 */ 1919 if (pg != NULL) 1920 was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 1921 1922 DPRINTFN(ENTER, (" was_exec=%d", was_exec)); 1923 1924 /* 1925 * Assume the page is cache inhibited and access is guarded unless 1926 * it's in our available memory array. If it is in the memory array, 1927 * asssume it's in memory coherent memory. 1928 */ 1929 if (flags & PMAP_MD_PREFETCHABLE) { 1930 pte_lo = 0; 1931 } else 1932 pte_lo = PTE_G; 1933 1934 if ((flags & PMAP_MD_NOCACHE) == 0) { 1935 for (mp = mem; mp->size; mp++) { 1936 if (pa >= mp->start && pa < mp->start + mp->size) { 1937 pte_lo = PTE_M; 1938 break; 1939 } 1940 } 1941 } else { 1942 pte_lo |= PTE_I; 1943 } 1944 1945 if (prot & VM_PROT_WRITE) 1946 pte_lo |= PTE_BW; 1947 else 1948 pte_lo |= PTE_BR; 1949 1950 /* 1951 * If this was in response to a fault, "pre-fault" the PTE's 1952 * changed/referenced bit appropriately. 1953 */ 1954 if (flags & VM_PROT_WRITE) 1955 pte_lo |= PTE_CHG; 1956 if (flags & VM_PROT_ALL) 1957 pte_lo |= PTE_REF; 1958 1959 /* 1960 * We need to know if this page can be executable 1961 */ 1962 flags |= (prot & VM_PROT_EXECUTE); 1963 1964 /* 1965 * Record mapping for later back-translation and pte spilling. 1966 * This will overwrite any existing mapping. 1967 */ 1968 error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags); 1969 1970 /* 1971 * Flush the real page from the instruction cache if this page is 1972 * mapped executable and cacheable and has not been flushed since 1973 * the last time it was modified. 1974 */ 1975 if (error == 0 && 1976 (flags & VM_PROT_EXECUTE) && 1977 (pte_lo & PTE_I) == 0 && 1978 was_exec == 0) { 1979 DPRINTFN(ENTER, (" syncicache")); 1980 PMAPCOUNT(exec_synced); 1981 pmap_syncicache(pa, PAGE_SIZE); 1982 if (pg != NULL) { 1983 pmap_attr_save(pg, PTE_EXEC); 1984 PMAPCOUNT(exec_cached); 1985 #if defined(DEBUG) || defined(PMAPDEBUG) 1986 if (pmapdebug & PMAPDEBUG_ENTER) 1987 printf(" marked-as-exec"); 1988 else if (pmapdebug & PMAPDEBUG_EXEC) 1989 printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n", 1990 VM_PAGE_TO_PHYS(pg)); 1991 1992 #endif 1993 } 1994 } 1995 1996 DPRINTFN(ENTER, (": error=%d\n", error)); 1997 1998 PMAP_UNLOCK(); 1999 2000 return error; 2001 } 2002 2003 void 2004 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 2005 { 2006 struct mem_region *mp; 2007 register_t pte_lo; 2008 int error; 2009 2010 #if defined (PMAP_OEA64_BRIDGE) 2011 if (va < VM_MIN_KERNEL_ADDRESS) 2012 panic("pmap_kenter_pa: attempt to enter " 2013 "non-kernel address %#" _PRIxva "!", va); 2014 #endif 2015 2016 DPRINTFN(KENTER, 2017 ("pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot)); 2018 2019 PMAP_LOCK(); 2020 2021 /* 2022 * Assume the page is cache inhibited and access is guarded unless 2023 * it's in our available memory array. If it is in the memory array, 2024 * asssume it's in memory coherent memory. 2025 */ 2026 pte_lo = PTE_IG; 2027 if ((flags & PMAP_MD_NOCACHE) == 0) { 2028 for (mp = mem; mp->size; mp++) { 2029 if (pa >= mp->start && pa < mp->start + mp->size) { 2030 pte_lo = PTE_M; 2031 break; 2032 } 2033 } 2034 } 2035 2036 if (prot & VM_PROT_WRITE) 2037 pte_lo |= PTE_BW; 2038 else 2039 pte_lo |= PTE_BR; 2040 2041 /* 2042 * We don't care about REF/CHG on PVOs on the unmanaged list. 2043 */ 2044 error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool, 2045 &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED); 2046 2047 if (error != 0) 2048 panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d", 2049 va, pa, error); 2050 2051 PMAP_UNLOCK(); 2052 } 2053 2054 void 2055 pmap_kremove(vaddr_t va, vsize_t len) 2056 { 2057 if (va < VM_MIN_KERNEL_ADDRESS) 2058 panic("pmap_kremove: attempt to remove " 2059 "non-kernel address %#" _PRIxva "!", va); 2060 2061 DPRINTFN(KREMOVE,("pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len)); 2062 pmap_remove(pmap_kernel(), va, va + len); 2063 } 2064 2065 /* 2066 * Remove the given range of mapping entries. 2067 */ 2068 void 2069 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva) 2070 { 2071 struct pvo_head pvol; 2072 struct pvo_entry *pvo; 2073 register_t msr; 2074 int pteidx; 2075 2076 PMAP_LOCK(); 2077 LIST_INIT(&pvol); 2078 msr = pmap_interrupts_off(); 2079 for (; va < endva; va += PAGE_SIZE) { 2080 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2081 if (pvo != NULL) { 2082 pmap_pvo_remove(pvo, pteidx, &pvol); 2083 } 2084 } 2085 pmap_interrupts_restore(msr); 2086 pmap_pvo_free_list(&pvol); 2087 PMAP_UNLOCK(); 2088 } 2089 2090 /* 2091 * Get the physical page address for the given pmap/virtual address. 2092 */ 2093 bool 2094 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) 2095 { 2096 struct pvo_entry *pvo; 2097 register_t msr; 2098 2099 PMAP_LOCK(); 2100 2101 /* 2102 * If this is a kernel pmap lookup, also check the battable 2103 * and if we get a hit, translate the VA to a PA using the 2104 * BAT entries. Don't check for VM_MAX_KERNEL_ADDRESS is 2105 * that will wrap back to 0. 2106 */ 2107 if (pm == pmap_kernel() && 2108 (va < VM_MIN_KERNEL_ADDRESS || 2109 (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) { 2110 KASSERT((va >> ADDR_SR_SHFT) != USER_SR); 2111 #if defined (PMAP_OEA) 2112 #ifdef PPC_OEA601 2113 if ((MFPVR() >> 16) == MPC601) { 2114 register_t batu = battable[va >> 23].batu; 2115 register_t batl = battable[va >> 23].batl; 2116 register_t sr = iosrtable[va >> ADDR_SR_SHFT]; 2117 if (BAT601_VALID_P(batl) && 2118 BAT601_VA_MATCH_P(batu, batl, va)) { 2119 register_t mask = 2120 (~(batl & BAT601_BSM) << 17) & ~0x1ffffL; 2121 if (pap) 2122 *pap = (batl & mask) | (va & ~mask); 2123 PMAP_UNLOCK(); 2124 return true; 2125 } else if (SR601_VALID_P(sr) && 2126 SR601_PA_MATCH_P(sr, va)) { 2127 if (pap) 2128 *pap = va; 2129 PMAP_UNLOCK(); 2130 return true; 2131 } 2132 } else 2133 #endif /* PPC_OEA601 */ 2134 { 2135 register_t batu = battable[va >> ADDR_SR_SHFT].batu; 2136 if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) { 2137 register_t batl = 2138 battable[va >> ADDR_SR_SHFT].batl; 2139 register_t mask = 2140 (~(batu & BAT_BL) << 15) & ~0x1ffffL; 2141 if (pap) 2142 *pap = (batl & mask) | (va & ~mask); 2143 PMAP_UNLOCK(); 2144 return true; 2145 } 2146 } 2147 return false; 2148 #elif defined (PMAP_OEA64_BRIDGE) 2149 if (va >= SEGMENT_LENGTH) 2150 panic("%s: pm: %s va >= SEGMENT_LENGTH, va: 0x%08lx\n", 2151 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va); 2152 else { 2153 if (pap) 2154 *pap = va; 2155 PMAP_UNLOCK(); 2156 return true; 2157 } 2158 #elif defined (PMAP_OEA64) 2159 #error PPC_OEA64 not supported 2160 #endif /* PPC_OEA */ 2161 } 2162 2163 msr = pmap_interrupts_off(); 2164 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2165 if (pvo != NULL) { 2166 PMAP_PVO_CHECK(pvo); /* sanity check */ 2167 if (pap) 2168 *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN) 2169 | (va & ADDR_POFF); 2170 } 2171 pmap_interrupts_restore(msr); 2172 PMAP_UNLOCK(); 2173 return pvo != NULL; 2174 } 2175 2176 /* 2177 * Lower the protection on the specified range of this pmap. 2178 */ 2179 void 2180 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot) 2181 { 2182 struct pvo_entry *pvo; 2183 volatile struct pte *pt; 2184 register_t msr; 2185 int pteidx; 2186 2187 /* 2188 * Since this routine only downgrades protection, we should 2189 * always be called with at least one bit not set. 2190 */ 2191 KASSERT(prot != VM_PROT_ALL); 2192 2193 /* 2194 * If there is no protection, this is equivalent to 2195 * remove the pmap from the pmap. 2196 */ 2197 if ((prot & VM_PROT_READ) == 0) { 2198 pmap_remove(pm, va, endva); 2199 return; 2200 } 2201 2202 PMAP_LOCK(); 2203 2204 msr = pmap_interrupts_off(); 2205 for (; va < endva; va += PAGE_SIZE) { 2206 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2207 if (pvo == NULL) 2208 continue; 2209 PMAP_PVO_CHECK(pvo); /* sanity check */ 2210 2211 /* 2212 * Revoke executable if asked to do so. 2213 */ 2214 if ((prot & VM_PROT_EXECUTE) == 0) 2215 pvo_clear_exec(pvo); 2216 2217 #if 0 2218 /* 2219 * If the page is already read-only, no change 2220 * needs to be made. 2221 */ 2222 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) 2223 continue; 2224 #endif 2225 /* 2226 * Grab the PTE pointer before we diddle with 2227 * the cached PTE copy. 2228 */ 2229 pt = pmap_pvo_to_pte(pvo, pteidx); 2230 /* 2231 * Change the protection of the page. 2232 */ 2233 pvo->pvo_pte.pte_lo &= ~PTE_PP; 2234 pvo->pvo_pte.pte_lo |= PTE_BR; 2235 2236 /* 2237 * If the PVO is in the page table, update 2238 * that pte at well. 2239 */ 2240 if (pt != NULL) { 2241 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2242 PVO_WHERE(pvo, PMAP_PROTECT); 2243 PMAPCOUNT(ptes_changed); 2244 } 2245 2246 PMAP_PVO_CHECK(pvo); /* sanity check */ 2247 } 2248 pmap_interrupts_restore(msr); 2249 PMAP_UNLOCK(); 2250 } 2251 2252 void 2253 pmap_unwire(pmap_t pm, vaddr_t va) 2254 { 2255 struct pvo_entry *pvo; 2256 register_t msr; 2257 2258 PMAP_LOCK(); 2259 msr = pmap_interrupts_off(); 2260 pvo = pmap_pvo_find_va(pm, va, NULL); 2261 if (pvo != NULL) { 2262 if (PVO_WIRED_P(pvo)) { 2263 pvo->pvo_vaddr &= ~PVO_WIRED; 2264 pm->pm_stats.wired_count--; 2265 } 2266 PMAP_PVO_CHECK(pvo); /* sanity check */ 2267 } 2268 pmap_interrupts_restore(msr); 2269 PMAP_UNLOCK(); 2270 } 2271 2272 /* 2273 * Lower the protection on the specified physical page. 2274 */ 2275 void 2276 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 2277 { 2278 struct pvo_head *pvo_head, pvol; 2279 struct pvo_entry *pvo, *next_pvo; 2280 volatile struct pte *pt; 2281 register_t msr; 2282 2283 PMAP_LOCK(); 2284 2285 KASSERT(prot != VM_PROT_ALL); 2286 LIST_INIT(&pvol); 2287 msr = pmap_interrupts_off(); 2288 2289 /* 2290 * When UVM reuses a page, it does a pmap_page_protect with 2291 * VM_PROT_NONE. At that point, we can clear the exec flag 2292 * since we know the page will have different contents. 2293 */ 2294 if ((prot & VM_PROT_READ) == 0) { 2295 DPRINTFN(EXEC, ("[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n", 2296 VM_PAGE_TO_PHYS(pg))); 2297 if (pmap_attr_fetch(pg) & PTE_EXEC) { 2298 PMAPCOUNT(exec_uncached_page_protect); 2299 pmap_attr_clear(pg, PTE_EXEC); 2300 } 2301 } 2302 2303 pvo_head = vm_page_to_pvoh(pg); 2304 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2305 next_pvo = LIST_NEXT(pvo, pvo_vlink); 2306 PMAP_PVO_CHECK(pvo); /* sanity check */ 2307 2308 /* 2309 * Downgrading to no mapping at all, we just remove the entry. 2310 */ 2311 if ((prot & VM_PROT_READ) == 0) { 2312 pmap_pvo_remove(pvo, -1, &pvol); 2313 continue; 2314 } 2315 2316 /* 2317 * If EXEC permission is being revoked, just clear the 2318 * flag in the PVO. 2319 */ 2320 if ((prot & VM_PROT_EXECUTE) == 0) 2321 pvo_clear_exec(pvo); 2322 2323 /* 2324 * If this entry is already RO, don't diddle with the 2325 * page table. 2326 */ 2327 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 2328 PMAP_PVO_CHECK(pvo); 2329 continue; 2330 } 2331 2332 /* 2333 * Grab the PTE before the we diddle the bits so 2334 * pvo_to_pte can verify the pte contents are as 2335 * expected. 2336 */ 2337 pt = pmap_pvo_to_pte(pvo, -1); 2338 pvo->pvo_pte.pte_lo &= ~PTE_PP; 2339 pvo->pvo_pte.pte_lo |= PTE_BR; 2340 if (pt != NULL) { 2341 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2342 PVO_WHERE(pvo, PMAP_PAGE_PROTECT); 2343 PMAPCOUNT(ptes_changed); 2344 } 2345 PMAP_PVO_CHECK(pvo); /* sanity check */ 2346 } 2347 pmap_interrupts_restore(msr); 2348 pmap_pvo_free_list(&pvol); 2349 2350 PMAP_UNLOCK(); 2351 } 2352 2353 /* 2354 * Activate the address space for the specified process. If the process 2355 * is the current process, load the new MMU context. 2356 */ 2357 void 2358 pmap_activate(struct lwp *l) 2359 { 2360 struct pcb *pcb = lwp_getpcb(l); 2361 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 2362 2363 DPRINTFN(ACTIVATE, 2364 ("pmap_activate: lwp %p (curlwp %p)\n", l, curlwp)); 2365 2366 /* 2367 * XXX Normally performed in cpu_lwp_fork(). 2368 */ 2369 pcb->pcb_pm = pmap; 2370 2371 /* 2372 * In theory, the SR registers need only be valid on return 2373 * to user space wait to do them there. 2374 */ 2375 if (l == curlwp) { 2376 /* Store pointer to new current pmap. */ 2377 curpm = pmap; 2378 } 2379 } 2380 2381 /* 2382 * Deactivate the specified process's address space. 2383 */ 2384 void 2385 pmap_deactivate(struct lwp *l) 2386 { 2387 } 2388 2389 bool 2390 pmap_query_bit(struct vm_page *pg, int ptebit) 2391 { 2392 struct pvo_entry *pvo; 2393 volatile struct pte *pt; 2394 register_t msr; 2395 2396 PMAP_LOCK(); 2397 2398 if (pmap_attr_fetch(pg) & ptebit) { 2399 PMAP_UNLOCK(); 2400 return true; 2401 } 2402 2403 msr = pmap_interrupts_off(); 2404 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2405 PMAP_PVO_CHECK(pvo); /* sanity check */ 2406 /* 2407 * See if we saved the bit off. If so cache, it and return 2408 * success. 2409 */ 2410 if (pvo->pvo_pte.pte_lo & ptebit) { 2411 pmap_attr_save(pg, ptebit); 2412 PMAP_PVO_CHECK(pvo); /* sanity check */ 2413 pmap_interrupts_restore(msr); 2414 PMAP_UNLOCK(); 2415 return true; 2416 } 2417 } 2418 /* 2419 * No luck, now go thru the hard part of looking at the ptes 2420 * themselves. Sync so any pending REF/CHG bits are flushed 2421 * to the PTEs. 2422 */ 2423 SYNC(); 2424 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2425 PMAP_PVO_CHECK(pvo); /* sanity check */ 2426 /* 2427 * See if this pvo have a valid PTE. If so, fetch the 2428 * REF/CHG bits from the valid PTE. If the appropriate 2429 * ptebit is set, cache, it and return success. 2430 */ 2431 pt = pmap_pvo_to_pte(pvo, -1); 2432 if (pt != NULL) { 2433 pmap_pte_synch(pt, &pvo->pvo_pte); 2434 if (pvo->pvo_pte.pte_lo & ptebit) { 2435 pmap_attr_save(pg, ptebit); 2436 PMAP_PVO_CHECK(pvo); /* sanity check */ 2437 pmap_interrupts_restore(msr); 2438 PMAP_UNLOCK(); 2439 return true; 2440 } 2441 } 2442 } 2443 pmap_interrupts_restore(msr); 2444 PMAP_UNLOCK(); 2445 return false; 2446 } 2447 2448 bool 2449 pmap_clear_bit(struct vm_page *pg, int ptebit) 2450 { 2451 struct pvo_head *pvoh = vm_page_to_pvoh(pg); 2452 struct pvo_entry *pvo; 2453 volatile struct pte *pt; 2454 register_t msr; 2455 int rv = 0; 2456 2457 PMAP_LOCK(); 2458 msr = pmap_interrupts_off(); 2459 2460 /* 2461 * Fetch the cache value 2462 */ 2463 rv |= pmap_attr_fetch(pg); 2464 2465 /* 2466 * Clear the cached value. 2467 */ 2468 pmap_attr_clear(pg, ptebit); 2469 2470 /* 2471 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we 2472 * can reset the right ones). Note that since the pvo entries and 2473 * list heads are accessed via BAT0 and are never placed in the 2474 * page table, we don't have to worry about further accesses setting 2475 * the REF/CHG bits. 2476 */ 2477 SYNC(); 2478 2479 /* 2480 * For each pvo entry, clear pvo's ptebit. If this pvo have a 2481 * valid PTE. If so, clear the ptebit from the valid PTE. 2482 */ 2483 LIST_FOREACH(pvo, pvoh, pvo_vlink) { 2484 PMAP_PVO_CHECK(pvo); /* sanity check */ 2485 pt = pmap_pvo_to_pte(pvo, -1); 2486 if (pt != NULL) { 2487 /* 2488 * Only sync the PTE if the bit we are looking 2489 * for is not already set. 2490 */ 2491 if ((pvo->pvo_pte.pte_lo & ptebit) == 0) 2492 pmap_pte_synch(pt, &pvo->pvo_pte); 2493 /* 2494 * If the bit we are looking for was already set, 2495 * clear that bit in the pte. 2496 */ 2497 if (pvo->pvo_pte.pte_lo & ptebit) 2498 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2499 } 2500 rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF); 2501 pvo->pvo_pte.pte_lo &= ~ptebit; 2502 PMAP_PVO_CHECK(pvo); /* sanity check */ 2503 } 2504 pmap_interrupts_restore(msr); 2505 2506 /* 2507 * If we are clearing the modify bit and this page was marked EXEC 2508 * and the user of the page thinks the page was modified, then we 2509 * need to clean it from the icache if it's mapped or clear the EXEC 2510 * bit if it's not mapped. The page itself might not have the CHG 2511 * bit set if the modification was done via DMA to the page. 2512 */ 2513 if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) { 2514 if (LIST_EMPTY(pvoh)) { 2515 DPRINTFN(EXEC, ("[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n", 2516 VM_PAGE_TO_PHYS(pg))); 2517 pmap_attr_clear(pg, PTE_EXEC); 2518 PMAPCOUNT(exec_uncached_clear_modify); 2519 } else { 2520 DPRINTFN(EXEC, ("[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n", 2521 VM_PAGE_TO_PHYS(pg))); 2522 pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE); 2523 PMAPCOUNT(exec_synced_clear_modify); 2524 } 2525 } 2526 PMAP_UNLOCK(); 2527 return (rv & ptebit) != 0; 2528 } 2529 2530 void 2531 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 2532 { 2533 struct pvo_entry *pvo; 2534 size_t offset = va & ADDR_POFF; 2535 int s; 2536 2537 PMAP_LOCK(); 2538 s = splvm(); 2539 while (len > 0) { 2540 size_t seglen = PAGE_SIZE - offset; 2541 if (seglen > len) 2542 seglen = len; 2543 pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL); 2544 if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) { 2545 pmap_syncicache( 2546 (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen); 2547 PMAP_PVO_CHECK(pvo); 2548 } 2549 va += seglen; 2550 len -= seglen; 2551 offset = 0; 2552 } 2553 splx(s); 2554 PMAP_UNLOCK(); 2555 } 2556 2557 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 2558 void 2559 pmap_pte_print(volatile struct pte *pt) 2560 { 2561 printf("PTE %p: ", pt); 2562 2563 #if defined(PMAP_OEA) 2564 /* High word: */ 2565 printf("%#" _PRIxpte ": [", pt->pte_hi); 2566 #else 2567 printf("%#" _PRIxpte ": [", pt->pte_hi); 2568 #endif /* PMAP_OEA */ 2569 2570 printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i'); 2571 printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-'); 2572 2573 printf("%#" _PRIxpte " %#" _PRIxpte "", 2574 (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT, 2575 pt->pte_hi & PTE_API); 2576 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 2577 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2578 #else 2579 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2580 #endif /* PMAP_OEA */ 2581 2582 /* Low word: */ 2583 #if defined (PMAP_OEA) 2584 printf(" %#" _PRIxpte ": [", pt->pte_lo); 2585 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2586 #else 2587 printf(" %#" _PRIxpte ": [", pt->pte_lo); 2588 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2589 #endif 2590 printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u'); 2591 printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n'); 2592 printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.'); 2593 printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.'); 2594 printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.'); 2595 printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.'); 2596 switch (pt->pte_lo & PTE_PP) { 2597 case PTE_BR: printf("br]\n"); break; 2598 case PTE_BW: printf("bw]\n"); break; 2599 case PTE_SO: printf("so]\n"); break; 2600 case PTE_SW: printf("sw]\n"); break; 2601 } 2602 } 2603 #endif 2604 2605 #if defined(DDB) 2606 void 2607 pmap_pteg_check(void) 2608 { 2609 volatile struct pte *pt; 2610 int i; 2611 int ptegidx; 2612 u_int p_valid = 0; 2613 u_int s_valid = 0; 2614 u_int invalid = 0; 2615 2616 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2617 for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) { 2618 if (pt->pte_hi & PTE_VALID) { 2619 if (pt->pte_hi & PTE_HID) 2620 s_valid++; 2621 else 2622 { 2623 p_valid++; 2624 } 2625 } else 2626 invalid++; 2627 } 2628 } 2629 printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n", 2630 p_valid, p_valid, s_valid, s_valid, 2631 invalid, invalid); 2632 } 2633 2634 void 2635 pmap_print_mmuregs(void) 2636 { 2637 int i; 2638 u_int cpuvers; 2639 #ifndef PMAP_OEA64 2640 vaddr_t addr; 2641 register_t soft_sr[16]; 2642 #endif 2643 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE) 2644 struct bat soft_ibat[4]; 2645 struct bat soft_dbat[4]; 2646 #endif 2647 paddr_t sdr1; 2648 2649 cpuvers = MFPVR() >> 16; 2650 __asm volatile ("mfsdr1 %0" : "=r"(sdr1)); 2651 #ifndef PMAP_OEA64 2652 addr = 0; 2653 for (i = 0; i < 16; i++) { 2654 soft_sr[i] = MFSRIN(addr); 2655 addr += (1 << ADDR_SR_SHFT); 2656 } 2657 #endif 2658 2659 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE) 2660 /* read iBAT (601: uBAT) registers */ 2661 __asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu)); 2662 __asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl)); 2663 __asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu)); 2664 __asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl)); 2665 __asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu)); 2666 __asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl)); 2667 __asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu)); 2668 __asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl)); 2669 2670 2671 if (cpuvers != MPC601) { 2672 /* read dBAT registers */ 2673 __asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu)); 2674 __asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl)); 2675 __asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu)); 2676 __asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl)); 2677 __asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu)); 2678 __asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl)); 2679 __asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu)); 2680 __asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl)); 2681 } 2682 #endif 2683 2684 printf("SDR1:\t%#" _PRIxpa "\n", sdr1); 2685 #ifndef PMAP_OEA64 2686 printf("SR[]:\t"); 2687 for (i = 0; i < 4; i++) 2688 printf("0x%08lx, ", soft_sr[i]); 2689 printf("\n\t"); 2690 for ( ; i < 8; i++) 2691 printf("0x%08lx, ", soft_sr[i]); 2692 printf("\n\t"); 2693 for ( ; i < 12; i++) 2694 printf("0x%08lx, ", soft_sr[i]); 2695 printf("\n\t"); 2696 for ( ; i < 16; i++) 2697 printf("0x%08lx, ", soft_sr[i]); 2698 printf("\n"); 2699 #endif 2700 2701 #if defined(PMAP_OEA) || defined(PMAP_OEA_BRIDGE) 2702 printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i'); 2703 for (i = 0; i < 4; i++) { 2704 printf("0x%08lx 0x%08lx, ", 2705 soft_ibat[i].batu, soft_ibat[i].batl); 2706 if (i == 1) 2707 printf("\n\t"); 2708 } 2709 if (cpuvers != MPC601) { 2710 printf("\ndBAT[]:\t"); 2711 for (i = 0; i < 4; i++) { 2712 printf("0x%08lx 0x%08lx, ", 2713 soft_dbat[i].batu, soft_dbat[i].batl); 2714 if (i == 1) 2715 printf("\n\t"); 2716 } 2717 } 2718 printf("\n"); 2719 #endif /* PMAP_OEA... */ 2720 } 2721 2722 void 2723 pmap_print_pte(pmap_t pm, vaddr_t va) 2724 { 2725 struct pvo_entry *pvo; 2726 volatile struct pte *pt; 2727 int pteidx; 2728 2729 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2730 if (pvo != NULL) { 2731 pt = pmap_pvo_to_pte(pvo, pteidx); 2732 if (pt != NULL) { 2733 printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n", 2734 va, pt, 2735 pt->pte_hi & PTE_HID ? "(sec)" : "(pri)", 2736 pt->pte_hi, pt->pte_lo); 2737 } else { 2738 printf("No valid PTE found\n"); 2739 } 2740 } else { 2741 printf("Address not in pmap\n"); 2742 } 2743 } 2744 2745 void 2746 pmap_pteg_dist(void) 2747 { 2748 struct pvo_entry *pvo; 2749 int ptegidx; 2750 int depth; 2751 int max_depth = 0; 2752 unsigned int depths[64]; 2753 2754 memset(depths, 0, sizeof(depths)); 2755 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2756 depth = 0; 2757 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2758 depth++; 2759 } 2760 if (depth > max_depth) 2761 max_depth = depth; 2762 if (depth > 63) 2763 depth = 63; 2764 depths[depth]++; 2765 } 2766 2767 for (depth = 0; depth < 64; depth++) { 2768 printf(" [%2d]: %8u", depth, depths[depth]); 2769 if ((depth & 3) == 3) 2770 printf("\n"); 2771 if (depth == max_depth) 2772 break; 2773 } 2774 if ((depth & 3) != 3) 2775 printf("\n"); 2776 printf("Max depth found was %d\n", max_depth); 2777 } 2778 #endif /* DEBUG */ 2779 2780 #if defined(PMAPCHECK) || defined(DEBUG) 2781 void 2782 pmap_pvo_verify(void) 2783 { 2784 int ptegidx; 2785 int s; 2786 2787 s = splvm(); 2788 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2789 struct pvo_entry *pvo; 2790 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2791 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 2792 panic("pmap_pvo_verify: invalid pvo %p " 2793 "on list %#x", pvo, ptegidx); 2794 pmap_pvo_check(pvo); 2795 } 2796 } 2797 splx(s); 2798 } 2799 #endif /* PMAPCHECK */ 2800 2801 2802 void * 2803 pmap_pool_ualloc(struct pool *pp, int flags) 2804 { 2805 struct pvo_page *pvop; 2806 2807 if (uvm.page_init_done != true) { 2808 return (void *) uvm_pageboot_alloc(PAGE_SIZE); 2809 } 2810 2811 PMAP_LOCK(); 2812 pvop = SIMPLEQ_FIRST(&pmap_upvop_head); 2813 if (pvop != NULL) { 2814 pmap_upvop_free--; 2815 SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link); 2816 PMAP_UNLOCK(); 2817 return pvop; 2818 } 2819 PMAP_UNLOCK(); 2820 return pmap_pool_malloc(pp, flags); 2821 } 2822 2823 void * 2824 pmap_pool_malloc(struct pool *pp, int flags) 2825 { 2826 struct pvo_page *pvop; 2827 struct vm_page *pg; 2828 2829 PMAP_LOCK(); 2830 pvop = SIMPLEQ_FIRST(&pmap_mpvop_head); 2831 if (pvop != NULL) { 2832 pmap_mpvop_free--; 2833 SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link); 2834 PMAP_UNLOCK(); 2835 return pvop; 2836 } 2837 PMAP_UNLOCK(); 2838 again: 2839 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE, 2840 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256); 2841 if (__predict_false(pg == NULL)) { 2842 if (flags & PR_WAITOK) { 2843 uvm_wait("plpg"); 2844 goto again; 2845 } else { 2846 return (0); 2847 } 2848 } 2849 KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg)); 2850 return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg); 2851 } 2852 2853 void 2854 pmap_pool_ufree(struct pool *pp, void *va) 2855 { 2856 struct pvo_page *pvop; 2857 #if 0 2858 if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) { 2859 pmap_pool_mfree(va, size, tag); 2860 return; 2861 } 2862 #endif 2863 PMAP_LOCK(); 2864 pvop = va; 2865 SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link); 2866 pmap_upvop_free++; 2867 if (pmap_upvop_free > pmap_upvop_maxfree) 2868 pmap_upvop_maxfree = pmap_upvop_free; 2869 PMAP_UNLOCK(); 2870 } 2871 2872 void 2873 pmap_pool_mfree(struct pool *pp, void *va) 2874 { 2875 struct pvo_page *pvop; 2876 2877 PMAP_LOCK(); 2878 pvop = va; 2879 SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link); 2880 pmap_mpvop_free++; 2881 if (pmap_mpvop_free > pmap_mpvop_maxfree) 2882 pmap_mpvop_maxfree = pmap_mpvop_free; 2883 PMAP_UNLOCK(); 2884 #if 0 2885 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va)); 2886 #endif 2887 } 2888 2889 /* 2890 * This routine in bootstraping to steal to-be-managed memory (which will 2891 * then be unmanaged). We use it to grab from the first 256MB for our 2892 * pmap needs and above 256MB for other stuff. 2893 */ 2894 vaddr_t 2895 pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp) 2896 { 2897 vsize_t size; 2898 vaddr_t va; 2899 paddr_t pa = 0; 2900 int npgs, bank; 2901 struct vm_physseg *ps; 2902 2903 if (uvm.page_init_done == true) 2904 panic("pmap_steal_memory: called _after_ bootstrap"); 2905 2906 *vstartp = VM_MIN_KERNEL_ADDRESS; 2907 *vendp = VM_MAX_KERNEL_ADDRESS; 2908 2909 size = round_page(vsize); 2910 npgs = atop(size); 2911 2912 /* 2913 * PA 0 will never be among those given to UVM so we can use it 2914 * to indicate we couldn't steal any memory. 2915 */ 2916 for (bank = 0; bank < vm_nphysseg; bank++) { 2917 ps = VM_PHYSMEM_PTR(bank); 2918 if (ps->free_list == VM_FREELIST_FIRST256 && 2919 ps->avail_end - ps->avail_start >= npgs) { 2920 pa = ptoa(ps->avail_start); 2921 break; 2922 } 2923 } 2924 2925 if (pa == 0) 2926 panic("pmap_steal_memory: no approriate memory to steal!"); 2927 2928 ps->avail_start += npgs; 2929 ps->start += npgs; 2930 2931 /* 2932 * If we've used up all the pages in the segment, remove it and 2933 * compact the list. 2934 */ 2935 if (ps->avail_start == ps->end) { 2936 /* 2937 * If this was the last one, then a very bad thing has occurred 2938 */ 2939 if (--vm_nphysseg == 0) 2940 panic("pmap_steal_memory: out of memory!"); 2941 2942 printf("pmap_steal_memory: consumed bank %d\n", bank); 2943 for (; bank < vm_nphysseg; bank++, ps++) { 2944 ps[0] = ps[1]; 2945 } 2946 } 2947 2948 va = (vaddr_t) pa; 2949 memset((void *) va, 0, size); 2950 pmap_pages_stolen += npgs; 2951 #ifdef DEBUG 2952 if (pmapdebug && npgs > 1) { 2953 u_int cnt = 0; 2954 for (bank = 0; bank < vm_nphysseg; bank++) { 2955 ps = VM_PHYSMEM_PTR(bank); 2956 cnt += ps->avail_end - ps->avail_start; 2957 } 2958 printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n", 2959 npgs, pmap_pages_stolen, cnt); 2960 } 2961 #endif 2962 2963 return va; 2964 } 2965 2966 /* 2967 * Find a chuck of memory with right size and alignment. 2968 */ 2969 paddr_t 2970 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end) 2971 { 2972 struct mem_region *mp; 2973 paddr_t s, e; 2974 int i, j; 2975 2976 size = round_page(size); 2977 2978 DPRINTFN(BOOT, 2979 ("pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d", 2980 size, alignment, at_end)); 2981 2982 if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0) 2983 panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa, 2984 alignment); 2985 2986 if (at_end) { 2987 if (alignment != PAGE_SIZE) 2988 panic("pmap_boot_find_memory: invalid ending " 2989 "alignment %#" _PRIxpa, alignment); 2990 2991 for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) { 2992 s = mp->start + mp->size - size; 2993 if (s >= mp->start && mp->size >= size) { 2994 DPRINTFN(BOOT,(": %#" _PRIxpa "\n", s)); 2995 DPRINTFN(BOOT, 2996 ("pmap_boot_find_memory: b-avail[%d] start " 2997 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 2998 mp->start, mp->size)); 2999 mp->size -= size; 3000 DPRINTFN(BOOT, 3001 ("pmap_boot_find_memory: a-avail[%d] start " 3002 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 3003 mp->start, mp->size)); 3004 return s; 3005 } 3006 } 3007 panic("pmap_boot_find_memory: no available memory"); 3008 } 3009 3010 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3011 s = (mp->start + alignment - 1) & ~(alignment-1); 3012 e = s + size; 3013 3014 /* 3015 * Is the calculated region entirely within the region? 3016 */ 3017 if (s < mp->start || e > mp->start + mp->size) 3018 continue; 3019 3020 DPRINTFN(BOOT,(": %#" _PRIxpa "\n", s)); 3021 if (s == mp->start) { 3022 /* 3023 * If the block starts at the beginning of region, 3024 * adjust the size & start. (the region may now be 3025 * zero in length) 3026 */ 3027 DPRINTFN(BOOT, 3028 ("pmap_boot_find_memory: b-avail[%d] start " 3029 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size)); 3030 mp->start += size; 3031 mp->size -= size; 3032 DPRINTFN(BOOT, 3033 ("pmap_boot_find_memory: a-avail[%d] start " 3034 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size)); 3035 } else if (e == mp->start + mp->size) { 3036 /* 3037 * If the block starts at the beginning of region, 3038 * adjust only the size. 3039 */ 3040 DPRINTFN(BOOT, 3041 ("pmap_boot_find_memory: b-avail[%d] start " 3042 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size)); 3043 mp->size -= size; 3044 DPRINTFN(BOOT, 3045 ("pmap_boot_find_memory: a-avail[%d] start " 3046 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size)); 3047 } else { 3048 /* 3049 * Block is in the middle of the region, so we 3050 * have to split it in two. 3051 */ 3052 for (j = avail_cnt; j > i + 1; j--) { 3053 avail[j] = avail[j-1]; 3054 } 3055 DPRINTFN(BOOT, 3056 ("pmap_boot_find_memory: b-avail[%d] start " 3057 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size)); 3058 mp[1].start = e; 3059 mp[1].size = mp[0].start + mp[0].size - e; 3060 mp[0].size = s - mp[0].start; 3061 avail_cnt++; 3062 for (; i < avail_cnt; i++) { 3063 DPRINTFN(BOOT, 3064 ("pmap_boot_find_memory: a-avail[%d] " 3065 "start %#" _PRIxpa " size %#" _PRIxpa "\n", i, 3066 avail[i].start, avail[i].size)); 3067 } 3068 } 3069 KASSERT(s == (uintptr_t) s); 3070 return s; 3071 } 3072 panic("pmap_boot_find_memory: not enough memory for " 3073 "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment); 3074 } 3075 3076 /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */ 3077 #if defined (PMAP_OEA64_BRIDGE) 3078 int 3079 pmap_setup_segment0_map(int use_large_pages, ...) 3080 { 3081 vaddr_t va; 3082 3083 register_t pte_lo = 0x0; 3084 int ptegidx = 0, i = 0; 3085 struct pte pte; 3086 va_list ap; 3087 3088 /* Coherent + Supervisor RW, no user access */ 3089 pte_lo = PTE_M; 3090 3091 /* XXXSL 3092 * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later, 3093 * these have to take priority. 3094 */ 3095 for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) { 3096 ptegidx = va_to_pteg(pmap_kernel(), va); 3097 pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo); 3098 i = pmap_pte_insert(ptegidx, &pte); 3099 } 3100 3101 va_start(ap, use_large_pages); 3102 while (1) { 3103 paddr_t pa; 3104 size_t size; 3105 3106 va = va_arg(ap, vaddr_t); 3107 3108 if (va == 0) 3109 break; 3110 3111 pa = va_arg(ap, paddr_t); 3112 size = va_arg(ap, size_t); 3113 3114 for (; va < (va + size); va += 0x1000, pa += 0x1000) { 3115 #if 0 3116 printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__, va, pa); 3117 #endif 3118 ptegidx = va_to_pteg(pmap_kernel(), va); 3119 pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo); 3120 i = pmap_pte_insert(ptegidx, &pte); 3121 } 3122 } 3123 3124 TLBSYNC(); 3125 SYNC(); 3126 return (0); 3127 } 3128 #endif /* PMAP_OEA64_BRIDGE */ 3129 3130 /* 3131 * This is not part of the defined PMAP interface and is specific to the 3132 * PowerPC architecture. This is called during initppc, before the system 3133 * is really initialized. 3134 */ 3135 void 3136 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend) 3137 { 3138 struct mem_region *mp, tmp; 3139 paddr_t s, e; 3140 psize_t size; 3141 int i, j; 3142 3143 /* 3144 * Get memory. 3145 */ 3146 mem_regions(&mem, &avail); 3147 #if defined(DEBUG) 3148 if (pmapdebug & PMAPDEBUG_BOOT) { 3149 printf("pmap_bootstrap: memory configuration:\n"); 3150 for (mp = mem; mp->size; mp++) { 3151 printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n", 3152 mp->start, mp->size); 3153 } 3154 for (mp = avail; mp->size; mp++) { 3155 printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n", 3156 mp->start, mp->size); 3157 } 3158 } 3159 #endif 3160 3161 /* 3162 * Find out how much physical memory we have and in how many chunks. 3163 */ 3164 for (mem_cnt = 0, mp = mem; mp->size; mp++) { 3165 if (mp->start >= pmap_memlimit) 3166 continue; 3167 if (mp->start + mp->size > pmap_memlimit) { 3168 size = pmap_memlimit - mp->start; 3169 physmem += btoc(size); 3170 } else { 3171 physmem += btoc(mp->size); 3172 } 3173 mem_cnt++; 3174 } 3175 3176 /* 3177 * Count the number of available entries. 3178 */ 3179 for (avail_cnt = 0, mp = avail; mp->size; mp++) 3180 avail_cnt++; 3181 3182 /* 3183 * Page align all regions. 3184 */ 3185 kernelstart = trunc_page(kernelstart); 3186 kernelend = round_page(kernelend); 3187 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3188 s = round_page(mp->start); 3189 mp->size -= (s - mp->start); 3190 mp->size = trunc_page(mp->size); 3191 mp->start = s; 3192 e = mp->start + mp->size; 3193 3194 DPRINTFN(BOOT, 3195 ("pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3196 i, mp->start, mp->size)); 3197 3198 /* 3199 * Don't allow the end to run beyond our artificial limit 3200 */ 3201 if (e > pmap_memlimit) 3202 e = pmap_memlimit; 3203 3204 /* 3205 * Is this region empty or strange? skip it. 3206 */ 3207 if (e <= s) { 3208 mp->start = 0; 3209 mp->size = 0; 3210 continue; 3211 } 3212 3213 /* 3214 * Does this overlap the beginning of kernel? 3215 * Does extend past the end of the kernel? 3216 */ 3217 else if (s < kernelstart && e > kernelstart) { 3218 if (e > kernelend) { 3219 avail[avail_cnt].start = kernelend; 3220 avail[avail_cnt].size = e - kernelend; 3221 avail_cnt++; 3222 } 3223 mp->size = kernelstart - s; 3224 } 3225 /* 3226 * Check whether this region overlaps the end of the kernel. 3227 */ 3228 else if (s < kernelend && e > kernelend) { 3229 mp->start = kernelend; 3230 mp->size = e - kernelend; 3231 } 3232 /* 3233 * Look whether this regions is completely inside the kernel. 3234 * Nuke it if it does. 3235 */ 3236 else if (s >= kernelstart && e <= kernelend) { 3237 mp->start = 0; 3238 mp->size = 0; 3239 } 3240 /* 3241 * If the user imposed a memory limit, enforce it. 3242 */ 3243 else if (s >= pmap_memlimit) { 3244 mp->start = -PAGE_SIZE; /* let's know why */ 3245 mp->size = 0; 3246 } 3247 else { 3248 mp->start = s; 3249 mp->size = e - s; 3250 } 3251 DPRINTFN(BOOT, 3252 ("pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3253 i, mp->start, mp->size)); 3254 } 3255 3256 /* 3257 * Move (and uncount) all the null return to the end. 3258 */ 3259 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3260 if (mp->size == 0) { 3261 tmp = avail[i]; 3262 avail[i] = avail[--avail_cnt]; 3263 avail[avail_cnt] = avail[i]; 3264 } 3265 } 3266 3267 /* 3268 * (Bubble)sort them into ascending order. 3269 */ 3270 for (i = 0; i < avail_cnt; i++) { 3271 for (j = i + 1; j < avail_cnt; j++) { 3272 if (avail[i].start > avail[j].start) { 3273 tmp = avail[i]; 3274 avail[i] = avail[j]; 3275 avail[j] = tmp; 3276 } 3277 } 3278 } 3279 3280 /* 3281 * Make sure they don't overlap. 3282 */ 3283 for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) { 3284 if (mp[0].start + mp[0].size > mp[1].start) { 3285 mp[0].size = mp[1].start - mp[0].start; 3286 } 3287 DPRINTFN(BOOT, 3288 ("pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3289 i, mp->start, mp->size)); 3290 } 3291 DPRINTFN(BOOT, 3292 ("pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3293 i, mp->start, mp->size)); 3294 3295 #ifdef PTEGCOUNT 3296 pmap_pteg_cnt = PTEGCOUNT; 3297 #else /* PTEGCOUNT */ 3298 3299 pmap_pteg_cnt = 0x1000; 3300 3301 while (pmap_pteg_cnt < physmem) 3302 pmap_pteg_cnt <<= 1; 3303 3304 pmap_pteg_cnt >>= 1; 3305 #endif /* PTEGCOUNT */ 3306 3307 #ifdef DEBUG 3308 DPRINTFN(BOOT, 3309 ("pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt)); 3310 #endif 3311 3312 /* 3313 * Find suitably aligned memory for PTEG hash table. 3314 */ 3315 size = pmap_pteg_cnt * sizeof(struct pteg); 3316 pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0); 3317 3318 #ifdef DEBUG 3319 DPRINTFN(BOOT, 3320 ("PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table)); 3321 #endif 3322 3323 3324 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3325 if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH) 3326 panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > 256MB", 3327 pmap_pteg_table, size); 3328 #endif 3329 3330 memset(__UNVOLATILE(pmap_pteg_table), 0, 3331 pmap_pteg_cnt * sizeof(struct pteg)); 3332 pmap_pteg_mask = pmap_pteg_cnt - 1; 3333 3334 /* 3335 * We cannot do pmap_steal_memory here since UVM hasn't been loaded 3336 * with pages. So we just steal them before giving them to UVM. 3337 */ 3338 size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt; 3339 pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0); 3340 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3341 if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH) 3342 panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > 256MB", 3343 pmap_pvo_table, size); 3344 #endif 3345 3346 for (i = 0; i < pmap_pteg_cnt; i++) 3347 TAILQ_INIT(&pmap_pvo_table[i]); 3348 3349 #ifndef MSGBUFADDR 3350 /* 3351 * Allocate msgbuf in high memory. 3352 */ 3353 msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1); 3354 #endif 3355 3356 for (mp = avail, i = 0; i < avail_cnt; mp++, i++) { 3357 paddr_t pfstart = atop(mp->start); 3358 paddr_t pfend = atop(mp->start + mp->size); 3359 if (mp->size == 0) 3360 continue; 3361 if (mp->start + mp->size <= SEGMENT_LENGTH) { 3362 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3363 VM_FREELIST_FIRST256); 3364 } else if (mp->start >= SEGMENT_LENGTH) { 3365 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3366 VM_FREELIST_DEFAULT); 3367 } else { 3368 pfend = atop(SEGMENT_LENGTH); 3369 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3370 VM_FREELIST_FIRST256); 3371 pfstart = atop(SEGMENT_LENGTH); 3372 pfend = atop(mp->start + mp->size); 3373 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3374 VM_FREELIST_DEFAULT); 3375 } 3376 } 3377 3378 /* 3379 * Make sure kernel vsid is allocated as well as VSID 0. 3380 */ 3381 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3382 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 3383 pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3384 |= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW); 3385 pmap_vsid_bitmap[0] |= 1; 3386 3387 /* 3388 * Initialize kernel pmap and hardware. 3389 */ 3390 3391 /* PMAP_OEA64_BRIDGE does support these instructions */ 3392 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 3393 for (i = 0; i < 16; i++) { 3394 pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY; 3395 __asm volatile ("mtsrin %0,%1" 3396 :: "r"(KERNELN_SEGMENT(i)|SR_PRKEY), "r"(i << ADDR_SR_SHFT)); 3397 } 3398 3399 pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY; 3400 __asm volatile ("mtsr %0,%1" 3401 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 3402 #ifdef KERNEL2_SR 3403 pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY; 3404 __asm volatile ("mtsr %0,%1" 3405 :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT)); 3406 #endif 3407 #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */ 3408 #if defined (PMAP_OEA) 3409 for (i = 0; i < 16; i++) { 3410 if (iosrtable[i] & SR601_T) { 3411 pmap_kernel()->pm_sr[i] = iosrtable[i]; 3412 __asm volatile ("mtsrin %0,%1" 3413 :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT)); 3414 } 3415 } 3416 __asm volatile ("sync; mtsdr1 %0; isync" 3417 :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10))); 3418 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 3419 __asm __volatile ("sync; mtsdr1 %0; isync" 3420 :: "r"((uintptr_t)pmap_pteg_table | (32 - __builtin_clz(pmap_pteg_mask >> 11)))); 3421 #endif 3422 tlbia(); 3423 3424 #ifdef ALTIVEC 3425 pmap_use_altivec = cpu_altivec; 3426 #endif 3427 3428 #ifdef DEBUG 3429 if (pmapdebug & PMAPDEBUG_BOOT) { 3430 u_int cnt; 3431 int bank; 3432 char pbuf[9]; 3433 for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) { 3434 cnt += VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start; 3435 printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n", 3436 bank, 3437 ptoa(VM_PHYSMEM_PTR(bank)->avail_start), 3438 ptoa(VM_PHYSMEM_PTR(bank)->avail_end), 3439 ptoa(VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start)); 3440 } 3441 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt)); 3442 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n", 3443 pbuf, cnt); 3444 } 3445 #endif 3446 3447 pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry), 3448 sizeof(struct pvo_entry), 0, 0, "pmap_upvopl", 3449 &pmap_pool_uallocator, IPL_VM); 3450 3451 pool_setlowat(&pmap_upvo_pool, 252); 3452 3453 pool_init(&pmap_pool, sizeof(struct pmap), 3454 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator, 3455 IPL_NONE); 3456 3457 #if defined(PMAP_NEED_MAPKERNEL) || 1 3458 { 3459 struct pmap *pm = pmap_kernel(); 3460 #if defined(PMAP_NEED_FULL_MAPKERNEL) 3461 extern int etext[], kernel_text[]; 3462 vaddr_t va, va_etext = (paddr_t) etext; 3463 #endif 3464 paddr_t pa, pa_end; 3465 register_t sr; 3466 struct pte pt; 3467 unsigned int ptegidx; 3468 int bank; 3469 3470 sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY; 3471 pm->pm_sr[0] = sr; 3472 3473 for (bank = 0; bank < vm_nphysseg; bank++) { 3474 pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end); 3475 pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start); 3476 for (; pa < pa_end; pa += PAGE_SIZE) { 3477 ptegidx = va_to_pteg(pm, pa); 3478 pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW); 3479 pmap_pte_insert(ptegidx, &pt); 3480 } 3481 } 3482 3483 #if defined(PMAP_NEED_FULL_MAPKERNEL) 3484 va = (vaddr_t) kernel_text; 3485 3486 for (pa = kernelstart; va < va_etext; 3487 pa += PAGE_SIZE, va += PAGE_SIZE) { 3488 ptegidx = va_to_pteg(pm, va); 3489 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3490 pmap_pte_insert(ptegidx, &pt); 3491 } 3492 3493 for (; pa < kernelend; 3494 pa += PAGE_SIZE, va += PAGE_SIZE) { 3495 ptegidx = va_to_pteg(pm, va); 3496 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3497 pmap_pte_insert(ptegidx, &pt); 3498 } 3499 3500 for (va = 0, pa = 0; va < kernelstart; 3501 pa += PAGE_SIZE, va += PAGE_SIZE) { 3502 ptegidx = va_to_pteg(pm, va); 3503 if (va < 0x3000) 3504 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3505 else 3506 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3507 pmap_pte_insert(ptegidx, &pt); 3508 } 3509 for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH; 3510 pa += PAGE_SIZE, va += PAGE_SIZE) { 3511 ptegidx = va_to_pteg(pm, va); 3512 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3513 pmap_pte_insert(ptegidx, &pt); 3514 } 3515 #endif 3516 3517 __asm volatile ("mtsrin %0,%1" 3518 :: "r"(sr), "r"(kernelstart)); 3519 } 3520 #endif 3521 } 3522